repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
tmm1/home-assistant
|
refs/heads/dev
|
homeassistant/components/device_tracker/thomson.py
|
9
|
"""
homeassistant.components.device_tracker.thomson
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Device tracker platform that supports scanning a THOMSON router for device
presence.
This device tracker needs telnet to be enabled on the router.
Configuration:
To use the THOMSON tracker you will need to add something like the following
to your configuration.yaml file.
device_tracker:
platform: thomson
host: YOUR_ROUTER_IP
username: YOUR_ADMIN_USERNAME
password: YOUR_ADMIN_PASSWORD
Variables:
host
*Required
The IP address of your router, e.g. 192.168.1.1.
username
*Required
The username of an user with administrative privileges, usually 'admin'.
password
*Required
The password for your given admin account.
"""
import logging
from datetime import timedelta
import re
import threading
import telnetlib
from homeassistant.const import CONF_HOST, CONF_USERNAME, CONF_PASSWORD
from homeassistant.helpers import validate_config
from homeassistant.util import Throttle
from homeassistant.components.device_tracker import DOMAIN
# Return cached results if last scan was less then this time ago
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
_LOGGER = logging.getLogger(__name__)
_DEVICES_REGEX = re.compile(
r'(?P<mac>(([0-9a-f]{2}[:-]){5}([0-9a-f]{2})))\s' +
r'(?P<ip>([0-9]{1,3}[\.]){3}[0-9]{1,3})\s+' +
r'(?P<status>([^\s]+))\s+' +
r'(?P<type>([^\s]+))\s+' +
r'(?P<intf>([^\s]+))\s+' +
r'(?P<hwintf>([^\s]+))\s+' +
r'(?P<host>([^\s]+))')
# pylint: disable=unused-argument
def get_scanner(hass, config):
""" Validates config and returns a THOMSON scanner. """
if not validate_config(config,
{DOMAIN: [CONF_HOST, CONF_USERNAME, CONF_PASSWORD]},
_LOGGER):
return None
scanner = ThomsonDeviceScanner(config[DOMAIN])
return scanner if scanner.success_init else None
class ThomsonDeviceScanner(object):
"""
This class queries a router running THOMSON firmware
for connected devices. Adapted from ASUSWRT scanner.
"""
def __init__(self, config):
self.host = config[CONF_HOST]
self.username = config[CONF_USERNAME]
self.password = config[CONF_PASSWORD]
self.lock = threading.Lock()
self.last_results = {}
# Test the router is accessible
data = self.get_thomson_data()
self.success_init = data is not None
def scan_devices(self):
""" Scans for new devices and return a
list containing found device ids. """
self._update_info()
return [client['mac'] for client in self.last_results]
def get_device_name(self, device):
""" Returns the name of the given device
or None if we don't know. """
if not self.last_results:
return None
for client in self.last_results:
if client['mac'] == device:
return client['host']
return None
@Throttle(MIN_TIME_BETWEEN_SCANS)
def _update_info(self):
"""
Ensures the information from the THOMSON router is up to date.
Returns boolean if scanning successful.
"""
if not self.success_init:
return False
with self.lock:
_LOGGER.info("Checking ARP")
data = self.get_thomson_data()
if not data:
return False
# flag C stands for CONNECTED
active_clients = [client for client in data.values() if
client['status'].find('C') != -1]
self.last_results = active_clients
return True
def get_thomson_data(self):
""" Retrieve data from THOMSON and return parsed result. """
try:
telnet = telnetlib.Telnet(self.host)
telnet.read_until(b'Username : ')
telnet.write((self.username + '\r\n').encode('ascii'))
telnet.read_until(b'Password : ')
telnet.write((self.password + '\r\n').encode('ascii'))
telnet.read_until(b'=>')
telnet.write(('hostmgr list\r\n').encode('ascii'))
devices_result = telnet.read_until(b'=>').split(b'\r\n')
telnet.write('exit\r\n'.encode('ascii'))
except EOFError:
_LOGGER.exception("Unexpected response from router")
return
except ConnectionRefusedError:
_LOGGER.exception("Connection refused by router," +
" is telnet enabled?")
return
devices = {}
for device in devices_result:
match = _DEVICES_REGEX.search(device.decode('utf-8'))
if match:
devices[match.group('ip')] = {
'ip': match.group('ip'),
'mac': match.group('mac').upper(),
'host': match.group('host'),
'status': match.group('status')
}
return devices
|
BennyRe/rosbridge_suite
|
refs/heads/develop
|
rosbridge_server/src/tornado/iostream.py
|
15
|
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utility classes to write to and read from non-blocking files and sockets.
Contents:
* `BaseIOStream`: Generic interface for reading and writing.
* `IOStream`: Implementation of BaseIOStream using non-blocking sockets.
* `SSLIOStream`: SSL-aware version of IOStream.
* `PipeIOStream`: Pipe-based IOStream implementation.
"""
from __future__ import absolute_import, division, print_function, with_statement
import collections
import errno
import numbers
import os
import socket
import sys
import re
from tornado.concurrent import TracebackFuture
from tornado import ioloop
from tornado.log import gen_log, app_log
from tornado.netutil import ssl_wrap_socket, ssl_match_hostname, SSLCertificateError
from tornado import stack_context
from tornado.util import bytes_type, errno_from_exception
try:
from tornado.platform.posix import _set_nonblocking
except ImportError:
_set_nonblocking = None
try:
import ssl
except ImportError:
# ssl is not available on Google App Engine
ssl = None
# These errnos indicate that a non-blocking operation must be retried
# at a later time. On most platforms they're the same value, but on
# some they differ.
_ERRNO_WOULDBLOCK = (errno.EWOULDBLOCK, errno.EAGAIN)
if hasattr(errno, "WSAEWOULDBLOCK"):
_ERRNO_WOULDBLOCK += (errno.WSAEWOULDBLOCK,)
# These errnos indicate that a connection has been abruptly terminated.
# They should be caught and handled less noisily than other errors.
_ERRNO_CONNRESET = (errno.ECONNRESET, errno.ECONNABORTED, errno.EPIPE,
errno.ETIMEDOUT)
if hasattr(errno, "WSAECONNRESET"):
_ERRNO_CONNRESET += (errno.WSAECONNRESET, errno.WSAECONNABORTED, errno.WSAETIMEDOUT)
# More non-portable errnos:
_ERRNO_INPROGRESS = (errno.EINPROGRESS,)
if hasattr(errno, "WSAEINPROGRESS"):
_ERRNO_INPROGRESS += (errno.WSAEINPROGRESS,)
#######################################################
class StreamClosedError(IOError):
"""Exception raised by `IOStream` methods when the stream is closed.
Note that the close callback is scheduled to run *after* other
callbacks on the stream (to allow for buffered data to be processed),
so you may see this error before you see the close callback.
"""
pass
class UnsatisfiableReadError(Exception):
"""Exception raised when a read cannot be satisfied.
Raised by ``read_until`` and ``read_until_regex`` with a ``max_bytes``
argument.
"""
pass
class StreamBufferFullError(Exception):
"""Exception raised by `IOStream` methods when the buffer is full.
"""
class BaseIOStream(object):
"""A utility class to write to and read from a non-blocking file or socket.
We support a non-blocking ``write()`` and a family of ``read_*()`` methods.
All of the methods take an optional ``callback`` argument and return a
`.Future` only if no callback is given. When the operation completes,
the callback will be run or the `.Future` will resolve with the data
read (or ``None`` for ``write()``). All outstanding ``Futures`` will
resolve with a `StreamClosedError` when the stream is closed; users
of the callback interface will be notified via
`.BaseIOStream.set_close_callback` instead.
When a stream is closed due to an error, the IOStream's ``error``
attribute contains the exception object.
Subclasses must implement `fileno`, `close_fd`, `write_to_fd`,
`read_from_fd`, and optionally `get_fd_error`.
"""
def __init__(self, io_loop=None, max_buffer_size=None,
read_chunk_size=None, max_write_buffer_size=None):
"""`BaseIOStream` constructor.
:arg io_loop: The `.IOLoop` to use; defaults to `.IOLoop.current`.
:arg max_buffer_size: Maximum amount of incoming data to buffer;
defaults to 100MB.
:arg read_chunk_size: Amount of data to read at one time from the
underlying transport; defaults to 64KB.
:arg max_write_buffer_size: Amount of outgoing data to buffer;
defaults to unlimited.
.. versionchanged:: 4.0
Add the ``max_write_buffer_size`` parameter. Changed default
``read_chunk_size`` to 64KB.
"""
self.io_loop = io_loop or ioloop.IOLoop.current()
self.max_buffer_size = max_buffer_size or 104857600
# A chunk size that is too close to max_buffer_size can cause
# spurious failures.
self.read_chunk_size = min(read_chunk_size or 65536,
self.max_buffer_size // 2)
self.max_write_buffer_size = max_write_buffer_size
self.error = None
self._read_buffer = collections.deque()
self._write_buffer = collections.deque()
self._read_buffer_size = 0
self._write_buffer_size = 0
self._write_buffer_frozen = False
self._read_delimiter = None
self._read_regex = None
self._read_max_bytes = None
self._read_bytes = None
self._read_partial = False
self._read_until_close = False
self._read_callback = None
self._read_future = None
self._streaming_callback = None
self._write_callback = None
self._write_future = None
self._close_callback = None
self._connect_callback = None
self._connect_future = None
self._connecting = False
self._state = None
self._pending_callbacks = 0
self._closed = False
def fileno(self):
"""Returns the file descriptor for this stream."""
raise NotImplementedError()
def close_fd(self):
"""Closes the file underlying this stream.
``close_fd`` is called by `BaseIOStream` and should not be called
elsewhere; other users should call `close` instead.
"""
raise NotImplementedError()
def write_to_fd(self, data):
"""Attempts to write ``data`` to the underlying file.
Returns the number of bytes written.
"""
raise NotImplementedError()
def read_from_fd(self):
"""Attempts to read from the underlying file.
Returns ``None`` if there was nothing to read (the socket
returned `~errno.EWOULDBLOCK` or equivalent), otherwise
returns the data. When possible, should return no more than
``self.read_chunk_size`` bytes at a time.
"""
raise NotImplementedError()
def get_fd_error(self):
"""Returns information about any error on the underlying file.
This method is called after the `.IOLoop` has signaled an error on the
file descriptor, and should return an Exception (such as `socket.error`
with additional information, or None if no such information is
available.
"""
return None
def read_until_regex(self, regex, callback=None, max_bytes=None):
"""Asynchronously read until we have matched the given regex.
The result includes the data that matches the regex and anything
that came before it. If a callback is given, it will be run
with the data as an argument; if not, this method returns a
`.Future`.
If ``max_bytes`` is not None, the connection will be closed
if more than ``max_bytes`` bytes have been read and the regex is
not satisfied.
.. versionchanged:: 4.0
Added the ``max_bytes`` argument. The ``callback`` argument is
now optional and a `.Future` will be returned if it is omitted.
"""
future = self._set_read_callback(callback)
self._read_regex = re.compile(regex)
self._read_max_bytes = max_bytes
try:
self._try_inline_read()
except UnsatisfiableReadError as e:
# Handle this the same way as in _handle_events.
gen_log.info("Unsatisfiable read, closing connection: %s" % e)
self.close(exc_info=True)
return future
return future
def read_until(self, delimiter, callback=None, max_bytes=None):
"""Asynchronously read until we have found the given delimiter.
The result includes all the data read including the delimiter.
If a callback is given, it will be run with the data as an argument;
if not, this method returns a `.Future`.
If ``max_bytes`` is not None, the connection will be closed
if more than ``max_bytes`` bytes have been read and the delimiter
is not found.
.. versionchanged:: 4.0
Added the ``max_bytes`` argument. The ``callback`` argument is
now optional and a `.Future` will be returned if it is omitted.
"""
future = self._set_read_callback(callback)
self._read_delimiter = delimiter
self._read_max_bytes = max_bytes
try:
self._try_inline_read()
except UnsatisfiableReadError as e:
# Handle this the same way as in _handle_events.
gen_log.info("Unsatisfiable read, closing connection: %s" % e)
self.close(exc_info=True)
return future
return future
def read_bytes(self, num_bytes, callback=None, streaming_callback=None,
partial=False):
"""Asynchronously read a number of bytes.
If a ``streaming_callback`` is given, it will be called with chunks
of data as they become available, and the final result will be empty.
Otherwise, the result is all the data that was read.
If a callback is given, it will be run with the data as an argument;
if not, this method returns a `.Future`.
If ``partial`` is true, the callback is run as soon as we have
any bytes to return (but never more than ``num_bytes``)
.. versionchanged:: 4.0
Added the ``partial`` argument. The callback argument is now
optional and a `.Future` will be returned if it is omitted.
"""
future = self._set_read_callback(callback)
assert isinstance(num_bytes, numbers.Integral)
self._read_bytes = num_bytes
self._read_partial = partial
self._streaming_callback = stack_context.wrap(streaming_callback)
self._try_inline_read()
return future
def read_until_close(self, callback=None, streaming_callback=None):
"""Asynchronously reads all data from the socket until it is closed.
If a ``streaming_callback`` is given, it will be called with chunks
of data as they become available, and the final result will be empty.
Otherwise, the result is all the data that was read.
If a callback is given, it will be run with the data as an argument;
if not, this method returns a `.Future`.
.. versionchanged:: 4.0
The callback argument is now optional and a `.Future` will
be returned if it is omitted.
"""
future = self._set_read_callback(callback)
self._streaming_callback = stack_context.wrap(streaming_callback)
if self.closed():
if self._streaming_callback is not None:
self._run_read_callback(self._read_buffer_size, True)
self._run_read_callback(self._read_buffer_size, False)
return future
self._read_until_close = True
self._try_inline_read()
return future
def write(self, data, callback=None):
"""Asynchronously write the given data to this stream.
If ``callback`` is given, we call it when all of the buffered write
data has been successfully written to the stream. If there was
previously buffered write data and an old write callback, that
callback is simply overwritten with this new callback.
If no ``callback`` is given, this method returns a `.Future` that
resolves (with a result of ``None``) when the write has been
completed. If `write` is called again before that `.Future` has
resolved, the previous future will be orphaned and will never resolve.
.. versionchanged:: 4.0
Now returns a `.Future` if no callback is given.
"""
assert isinstance(data, bytes_type)
self._check_closed()
# We use bool(_write_buffer) as a proxy for write_buffer_size>0,
# so never put empty strings in the buffer.
if data:
if (self.max_write_buffer_size is not None and
self._write_buffer_size + len(data) > self.max_write_buffer_size):
raise StreamBufferFullError("Reached maximum read buffer size")
# Break up large contiguous strings before inserting them in the
# write buffer, so we don't have to recopy the entire thing
# as we slice off pieces to send to the socket.
WRITE_BUFFER_CHUNK_SIZE = 128 * 1024
for i in range(0, len(data), WRITE_BUFFER_CHUNK_SIZE):
self._write_buffer.append(data[i:i + WRITE_BUFFER_CHUNK_SIZE])
self._write_buffer_size += len(data)
if callback is not None:
self._write_callback = stack_context.wrap(callback)
future = None
else:
future = self._write_future = TracebackFuture()
if not self._connecting:
self._handle_write()
if self._write_buffer:
self._add_io_state(self.io_loop.WRITE)
self._maybe_add_error_listener()
return future
def set_close_callback(self, callback):
"""Call the given callback when the stream is closed.
This is not necessary for applications that use the `.Future`
interface; all outstanding ``Futures`` will resolve with a
`StreamClosedError` when the stream is closed.
"""
self._close_callback = stack_context.wrap(callback)
self._maybe_add_error_listener()
def close(self, exc_info=False):
"""Close this stream.
If ``exc_info`` is true, set the ``error`` attribute to the current
exception from `sys.exc_info` (or if ``exc_info`` is a tuple,
use that instead of `sys.exc_info`).
"""
if not self.closed():
if exc_info:
if not isinstance(exc_info, tuple):
exc_info = sys.exc_info()
if any(exc_info):
self.error = exc_info[1]
if self._read_until_close:
if (self._streaming_callback is not None and
self._read_buffer_size):
self._run_read_callback(self._read_buffer_size, True)
self._read_until_close = False
self._run_read_callback(self._read_buffer_size, False)
if self._state is not None:
self.io_loop.remove_handler(self.fileno())
self._state = None
self.close_fd()
self._closed = True
self._maybe_run_close_callback()
def _maybe_run_close_callback(self):
# If there are pending callbacks, don't run the close callback
# until they're done (see _maybe_add_error_handler)
if self.closed() and self._pending_callbacks == 0:
futures = []
if self._read_future is not None:
futures.append(self._read_future)
self._read_future = None
if self._write_future is not None:
futures.append(self._write_future)
self._write_future = None
if self._connect_future is not None:
futures.append(self._connect_future)
self._connect_future = None
for future in futures:
if (isinstance(self.error, (socket.error, IOError)) and
errno_from_exception(self.error) in _ERRNO_CONNRESET):
# Treat connection resets as closed connections so
# clients only have to catch one kind of exception
# to avoid logging.
future.set_exception(StreamClosedError())
else:
future.set_exception(self.error or StreamClosedError())
if self._close_callback is not None:
cb = self._close_callback
self._close_callback = None
self._run_callback(cb)
# Delete any unfinished callbacks to break up reference cycles.
self._read_callback = self._write_callback = None
# Clear the buffers so they can be cleared immediately even
# if the IOStream object is kept alive by a reference cycle.
# TODO: Clear the read buffer too; it currently breaks some tests.
self._write_buffer = None
def reading(self):
"""Returns true if we are currently reading from the stream."""
return self._read_callback is not None or self._read_future is not None
def writing(self):
"""Returns true if we are currently writing to the stream."""
return bool(self._write_buffer)
def closed(self):
"""Returns true if the stream has been closed."""
return self._closed
def set_nodelay(self, value):
"""Sets the no-delay flag for this stream.
By default, data written to TCP streams may be held for a time
to make the most efficient use of bandwidth (according to
Nagle's algorithm). The no-delay flag requests that data be
written as soon as possible, even if doing so would consume
additional bandwidth.
This flag is currently defined only for TCP-based ``IOStreams``.
.. versionadded:: 3.1
"""
pass
def _handle_events(self, fd, events):
if self.closed():
gen_log.warning("Got events for closed stream %s", fd)
return
try:
if self._connecting:
# Most IOLoops will report a write failed connect
# with the WRITE event, but SelectIOLoop reports a
# READ as well so we must check for connecting before
# either.
self._handle_connect()
if self.closed():
return
if events & self.io_loop.READ:
self._handle_read()
if self.closed():
return
if events & self.io_loop.WRITE:
self._handle_write()
if self.closed():
return
if events & self.io_loop.ERROR:
self.error = self.get_fd_error()
# We may have queued up a user callback in _handle_read or
# _handle_write, so don't close the IOStream until those
# callbacks have had a chance to run.
self.io_loop.add_callback(self.close)
return
state = self.io_loop.ERROR
if self.reading():
state |= self.io_loop.READ
if self.writing():
state |= self.io_loop.WRITE
if state == self.io_loop.ERROR and self._read_buffer_size == 0:
# If the connection is idle, listen for reads too so
# we can tell if the connection is closed. If there is
# data in the read buffer we won't run the close callback
# yet anyway, so we don't need to listen in this case.
state |= self.io_loop.READ
if state != self._state:
assert self._state is not None, \
"shouldn't happen: _handle_events without self._state"
self._state = state
self.io_loop.update_handler(self.fileno(), self._state)
except UnsatisfiableReadError as e:
gen_log.info("Unsatisfiable read, closing connection: %s" % e)
self.close(exc_info=True)
except Exception:
gen_log.error("Uncaught exception, closing connection.",
exc_info=True)
self.close(exc_info=True)
raise
def _run_callback(self, callback, *args):
def wrapper():
self._pending_callbacks -= 1
try:
return callback(*args)
except Exception:
app_log.error("Uncaught exception, closing connection.",
exc_info=True)
# Close the socket on an uncaught exception from a user callback
# (It would eventually get closed when the socket object is
# gc'd, but we don't want to rely on gc happening before we
# run out of file descriptors)
self.close(exc_info=True)
# Re-raise the exception so that IOLoop.handle_callback_exception
# can see it and log the error
raise
finally:
self._maybe_add_error_listener()
# We schedule callbacks to be run on the next IOLoop iteration
# rather than running them directly for several reasons:
# * Prevents unbounded stack growth when a callback calls an
# IOLoop operation that immediately runs another callback
# * Provides a predictable execution context for e.g.
# non-reentrant mutexes
# * Ensures that the try/except in wrapper() is run outside
# of the application's StackContexts
with stack_context.NullContext():
# stack_context was already captured in callback, we don't need to
# capture it again for IOStream's wrapper. This is especially
# important if the callback was pre-wrapped before entry to
# IOStream (as in HTTPConnection._header_callback), as we could
# capture and leak the wrong context here.
self._pending_callbacks += 1
self.io_loop.add_callback(wrapper)
def _read_to_buffer_loop(self):
# This method is called from _handle_read and _try_inline_read.
try:
if self._read_bytes is not None:
target_bytes = self._read_bytes
elif self._read_max_bytes is not None:
target_bytes = self._read_max_bytes
elif self.reading():
# For read_until without max_bytes, or
# read_until_close, read as much as we can before
# scanning for the delimiter.
target_bytes = None
else:
target_bytes = 0
next_find_pos = 0
# Pretend to have a pending callback so that an EOF in
# _read_to_buffer doesn't trigger an immediate close
# callback. At the end of this method we'll either
# estabilsh a real pending callback via
# _read_from_buffer or run the close callback.
#
# We need two try statements here so that
# pending_callbacks is decremented before the `except`
# clause below (which calls `close` and does need to
# trigger the callback)
self._pending_callbacks += 1
while not self.closed():
# Read from the socket until we get EWOULDBLOCK or equivalent.
# SSL sockets do some internal buffering, and if the data is
# sitting in the SSL object's buffer select() and friends
# can't see it; the only way to find out if it's there is to
# try to read it.
if self._read_to_buffer() == 0:
break
self._run_streaming_callback()
# If we've read all the bytes we can use, break out of
# this loop. We can't just call read_from_buffer here
# because of subtle interactions with the
# pending_callback and error_listener mechanisms.
#
# If we've reached target_bytes, we know we're done.
if (target_bytes is not None and
self._read_buffer_size >= target_bytes):
break
# Otherwise, we need to call the more expensive find_read_pos.
# It's inefficient to do this on every read, so instead
# do it on the first read and whenever the read buffer
# size has doubled.
if self._read_buffer_size >= next_find_pos:
pos = self._find_read_pos()
if pos is not None:
return pos
next_find_pos = self._read_buffer_size * 2
return self._find_read_pos()
finally:
self._pending_callbacks -= 1
def _handle_read(self):
try:
pos = self._read_to_buffer_loop()
except UnsatisfiableReadError:
raise
except Exception:
gen_log.warning("error on read", exc_info=True)
self.close(exc_info=True)
return
if pos is not None:
self._read_from_buffer(pos)
return
else:
self._maybe_run_close_callback()
def _set_read_callback(self, callback):
assert self._read_callback is None, "Already reading"
assert self._read_future is None, "Already reading"
if callback is not None:
self._read_callback = stack_context.wrap(callback)
else:
self._read_future = TracebackFuture()
return self._read_future
def _run_read_callback(self, size, streaming):
if streaming:
callback = self._streaming_callback
else:
callback = self._read_callback
self._read_callback = self._streaming_callback = None
if self._read_future is not None:
assert callback is None
future = self._read_future
self._read_future = None
future.set_result(self._consume(size))
if callback is not None:
assert self._read_future is None
self._run_callback(callback, self._consume(size))
else:
# If we scheduled a callback, we will add the error listener
# afterwards. If we didn't, we have to do it now.
self._maybe_add_error_listener()
def _try_inline_read(self):
"""Attempt to complete the current read operation from buffered data.
If the read can be completed without blocking, schedules the
read callback on the next IOLoop iteration; otherwise starts
listening for reads on the socket.
"""
# See if we've already got the data from a previous read
self._run_streaming_callback()
pos = self._find_read_pos()
if pos is not None:
self._read_from_buffer(pos)
return
self._check_closed()
try:
pos = self._read_to_buffer_loop()
except Exception:
# If there was an in _read_to_buffer, we called close() already,
# but couldn't run the close callback because of _pending_callbacks.
# Before we escape from this function, run the close callback if
# applicable.
self._maybe_run_close_callback()
raise
if pos is not None:
self._read_from_buffer(pos)
return
# We couldn't satisfy the read inline, so either close the stream
# or listen for new data.
if self.closed():
self._maybe_run_close_callback()
else:
self._add_io_state(ioloop.IOLoop.READ)
def _read_to_buffer(self):
"""Reads from the socket and appends the result to the read buffer.
Returns the number of bytes read. Returns 0 if there is nothing
to read (i.e. the read returns EWOULDBLOCK or equivalent). On
error closes the socket and raises an exception.
"""
try:
chunk = self.read_from_fd()
except (socket.error, IOError, OSError) as e:
# ssl.SSLError is a subclass of socket.error
if e.args[0] in _ERRNO_CONNRESET:
# Treat ECONNRESET as a connection close rather than
# an error to minimize log spam (the exception will
# be available on self.error for apps that care).
self.close(exc_info=True)
return
self.close(exc_info=True)
raise
if chunk is None:
return 0
self._read_buffer.append(chunk)
self._read_buffer_size += len(chunk)
if self._read_buffer_size > self.max_buffer_size:
gen_log.error("Reached maximum read buffer size")
self.close()
raise StreamBufferFullError("Reached maximum read buffer size")
return len(chunk)
def _run_streaming_callback(self):
if self._streaming_callback is not None and self._read_buffer_size:
bytes_to_consume = self._read_buffer_size
if self._read_bytes is not None:
bytes_to_consume = min(self._read_bytes, bytes_to_consume)
self._read_bytes -= bytes_to_consume
self._run_read_callback(bytes_to_consume, True)
def _read_from_buffer(self, pos):
"""Attempts to complete the currently-pending read from the buffer.
The argument is either a position in the read buffer or None,
as returned by _find_read_pos.
"""
self._read_bytes = self._read_delimiter = self._read_regex = None
self._read_partial = False
self._run_read_callback(pos, False)
def _find_read_pos(self):
"""Attempts to find a position in the read buffer that satisfies
the currently-pending read.
Returns a position in the buffer if the current read can be satisfied,
or None if it cannot.
"""
if (self._read_bytes is not None and
(self._read_buffer_size >= self._read_bytes or
(self._read_partial and self._read_buffer_size > 0))):
num_bytes = min(self._read_bytes, self._read_buffer_size)
return num_bytes
elif self._read_delimiter is not None:
# Multi-byte delimiters (e.g. '\r\n') may straddle two
# chunks in the read buffer, so we can't easily find them
# without collapsing the buffer. However, since protocols
# using delimited reads (as opposed to reads of a known
# length) tend to be "line" oriented, the delimiter is likely
# to be in the first few chunks. Merge the buffer gradually
# since large merges are relatively expensive and get undone in
# _consume().
if self._read_buffer:
while True:
loc = self._read_buffer[0].find(self._read_delimiter)
if loc != -1:
delimiter_len = len(self._read_delimiter)
self._check_max_bytes(self._read_delimiter,
loc + delimiter_len)
return loc + delimiter_len
if len(self._read_buffer) == 1:
break
_double_prefix(self._read_buffer)
self._check_max_bytes(self._read_delimiter,
len(self._read_buffer[0]))
elif self._read_regex is not None:
if self._read_buffer:
while True:
m = self._read_regex.search(self._read_buffer[0])
if m is not None:
self._check_max_bytes(self._read_regex, m.end())
return m.end()
if len(self._read_buffer) == 1:
break
_double_prefix(self._read_buffer)
self._check_max_bytes(self._read_regex,
len(self._read_buffer[0]))
return None
def _check_max_bytes(self, delimiter, size):
if (self._read_max_bytes is not None and
size > self._read_max_bytes):
raise UnsatisfiableReadError(
"delimiter %r not found within %d bytes" % (
delimiter, self._read_max_bytes))
def _handle_write(self):
while self._write_buffer:
try:
if not self._write_buffer_frozen:
# On windows, socket.send blows up if given a
# write buffer that's too large, instead of just
# returning the number of bytes it was able to
# process. Therefore we must not call socket.send
# with more than 128KB at a time.
_merge_prefix(self._write_buffer, 128 * 1024)
num_bytes = self.write_to_fd(self._write_buffer[0])
if num_bytes == 0:
# With OpenSSL, if we couldn't write the entire buffer,
# the very same string object must be used on the
# next call to send. Therefore we suppress
# merging the write buffer after an incomplete send.
# A cleaner solution would be to set
# SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER, but this is
# not yet accessible from python
# (http://bugs.python.org/issue8240)
self._write_buffer_frozen = True
break
self._write_buffer_frozen = False
_merge_prefix(self._write_buffer, num_bytes)
self._write_buffer.popleft()
self._write_buffer_size -= num_bytes
except (socket.error, IOError, OSError) as e:
if e.args[0] in _ERRNO_WOULDBLOCK:
self._write_buffer_frozen = True
break
else:
if e.args[0] not in _ERRNO_CONNRESET:
# Broken pipe errors are usually caused by connection
# reset, and its better to not log EPIPE errors to
# minimize log spam
gen_log.warning("Write error on %s: %s",
self.fileno(), e)
self.close(exc_info=True)
return
if not self._write_buffer:
if self._write_callback:
callback = self._write_callback
self._write_callback = None
self._run_callback(callback)
if self._write_future:
future = self._write_future
self._write_future = None
future.set_result(None)
def _consume(self, loc):
if loc == 0:
return b""
_merge_prefix(self._read_buffer, loc)
self._read_buffer_size -= loc
return self._read_buffer.popleft()
def _check_closed(self):
if self.closed():
raise StreamClosedError("Stream is closed")
def _maybe_add_error_listener(self):
# This method is part of an optimization: to detect a connection that
# is closed when we're not actively reading or writing, we must listen
# for read events. However, it is inefficient to do this when the
# connection is first established because we are going to read or write
# immediately anyway. Instead, we insert checks at various times to
# see if the connection is idle and add the read listener then.
if self._pending_callbacks != 0:
return
if self._state is None or self._state == ioloop.IOLoop.ERROR:
if self.closed():
self._maybe_run_close_callback()
elif (self._read_buffer_size == 0 and
self._close_callback is not None):
self._add_io_state(ioloop.IOLoop.READ)
def _add_io_state(self, state):
"""Adds `state` (IOLoop.{READ,WRITE} flags) to our event handler.
Implementation notes: Reads and writes have a fast path and a
slow path. The fast path reads synchronously from socket
buffers, while the slow path uses `_add_io_state` to schedule
an IOLoop callback. Note that in both cases, the callback is
run asynchronously with `_run_callback`.
To detect closed connections, we must have called
`_add_io_state` at some point, but we want to delay this as
much as possible so we don't have to set an `IOLoop.ERROR`
listener that will be overwritten by the next slow-path
operation. As long as there are callbacks scheduled for
fast-path ops, those callbacks may do more reads.
If a sequence of fast-path ops do not end in a slow-path op,
(e.g. for an @asynchronous long-poll request), we must add
the error handler. This is done in `_run_callback` and `write`
(since the write callback is optional so we can have a
fast-path write with no `_run_callback`)
"""
if self.closed():
# connection has been closed, so there can be no future events
return
if self._state is None:
self._state = ioloop.IOLoop.ERROR | state
with stack_context.NullContext():
self.io_loop.add_handler(
self.fileno(), self._handle_events, self._state)
elif not self._state & state:
self._state = self._state | state
self.io_loop.update_handler(self.fileno(), self._state)
class IOStream(BaseIOStream):
r"""Socket-based `IOStream` implementation.
This class supports the read and write methods from `BaseIOStream`
plus a `connect` method.
The ``socket`` parameter may either be connected or unconnected.
For server operations the socket is the result of calling
`socket.accept <socket.socket.accept>`. For client operations the
socket is created with `socket.socket`, and may either be
connected before passing it to the `IOStream` or connected with
`IOStream.connect`.
A very simple (and broken) HTTP client using this class::
import tornado.ioloop
import tornado.iostream
import socket
def send_request():
stream.write(b"GET / HTTP/1.0\r\nHost: friendfeed.com\r\n\r\n")
stream.read_until(b"\r\n\r\n", on_headers)
def on_headers(data):
headers = {}
for line in data.split(b"\r\n"):
parts = line.split(b":")
if len(parts) == 2:
headers[parts[0].strip()] = parts[1].strip()
stream.read_bytes(int(headers[b"Content-Length"]), on_body)
def on_body(data):
print data
stream.close()
tornado.ioloop.IOLoop.instance().stop()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
stream = tornado.iostream.IOStream(s)
stream.connect(("friendfeed.com", 80), send_request)
tornado.ioloop.IOLoop.instance().start()
"""
def __init__(self, socket, *args, **kwargs):
self.socket = socket
self.socket.setblocking(False)
super(IOStream, self).__init__(*args, **kwargs)
def fileno(self):
return self.socket
def close_fd(self):
self.socket.close()
self.socket = None
def get_fd_error(self):
errno = self.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_ERROR)
return socket.error(errno, os.strerror(errno))
def read_from_fd(self):
try:
chunk = self.socket.recv(self.read_chunk_size)
except socket.error as e:
if e.args[0] in _ERRNO_WOULDBLOCK:
return None
else:
raise
if not chunk:
self.close()
return None
return chunk
def write_to_fd(self, data):
return self.socket.send(data)
def connect(self, address, callback=None, server_hostname=None):
"""Connects the socket to a remote address without blocking.
May only be called if the socket passed to the constructor was
not previously connected. The address parameter is in the
same format as for `socket.connect <socket.socket.connect>` for
the type of socket passed to the IOStream constructor,
e.g. an ``(ip, port)`` tuple. Hostnames are accepted here,
but will be resolved synchronously and block the IOLoop.
If you have a hostname instead of an IP address, the `.TCPClient`
class is recommended instead of calling this method directly.
`.TCPClient` will do asynchronous DNS resolution and handle
both IPv4 and IPv6.
If ``callback`` is specified, it will be called with no
arguments when the connection is completed; if not this method
returns a `.Future` (whose result after a successful
connection will be the stream itself).
If specified, the ``server_hostname`` parameter will be used
in SSL connections for certificate validation (if requested in
the ``ssl_options``) and SNI (if supported; requires
Python 3.2+).
Note that it is safe to call `IOStream.write
<BaseIOStream.write>` while the connection is pending, in
which case the data will be written as soon as the connection
is ready. Calling `IOStream` read methods before the socket is
connected works on some platforms but is non-portable.
.. versionchanged:: 4.0
If no callback is given, returns a `.Future`.
"""
self._connecting = True
if callback is not None:
self._connect_callback = stack_context.wrap(callback)
future = None
else:
future = self._connect_future = TracebackFuture()
try:
self.socket.connect(address)
except socket.error as e:
# In non-blocking mode we expect connect() to raise an
# exception with EINPROGRESS or EWOULDBLOCK.
#
# On freebsd, other errors such as ECONNREFUSED may be
# returned immediately when attempting to connect to
# localhost, so handle them the same way as an error
# reported later in _handle_connect.
if (errno_from_exception(e) not in _ERRNO_INPROGRESS and
errno_from_exception(e) not in _ERRNO_WOULDBLOCK):
gen_log.warning("Connect error on fd %s: %s",
self.socket.fileno(), e)
self.close(exc_info=True)
return future
self._add_io_state(self.io_loop.WRITE)
return future
def start_tls(self, server_side, ssl_options=None, server_hostname=None):
"""Convert this `IOStream` to an `SSLIOStream`.
This enables protocols that begin in clear-text mode and
switch to SSL after some initial negotiation (such as the
``STARTTLS`` extension to SMTP and IMAP).
This method cannot be used if there are outstanding reads
or writes on the stream, or if there is any data in the
IOStream's buffer (data in the operating system's socket
buffer is allowed). This means it must generally be used
immediately after reading or writing the last clear-text
data. It can also be used immediately after connecting,
before any reads or writes.
The ``ssl_options`` argument may be either a dictionary
of options or an `ssl.SSLContext`. If a ``server_hostname``
is given, it will be used for certificate verification
(as configured in the ``ssl_options``).
This method returns a `.Future` whose result is the new
`SSLIOStream`. After this method has been called,
any other operation on the original stream is undefined.
If a close callback is defined on this stream, it will be
transferred to the new stream.
.. versionadded:: 4.0
"""
if (self._read_callback or self._read_future or
self._write_callback or self._write_future or
self._connect_callback or self._connect_future or
self._pending_callbacks or self._closed or
self._read_buffer or self._write_buffer):
raise ValueError("IOStream is not idle; cannot convert to SSL")
if ssl_options is None:
ssl_options = {}
socket = self.socket
self.io_loop.remove_handler(socket)
self.socket = None
socket = ssl_wrap_socket(socket, ssl_options, server_side=server_side,
do_handshake_on_connect=False)
orig_close_callback = self._close_callback
self._close_callback = None
future = TracebackFuture()
ssl_stream = SSLIOStream(socket, ssl_options=ssl_options,
io_loop=self.io_loop)
# Wrap the original close callback so we can fail our Future as well.
# If we had an "unwrap" counterpart to this method we would need
# to restore the original callback after our Future resolves
# so that repeated wrap/unwrap calls don't build up layers.
def close_callback():
if not future.done():
future.set_exception(ssl_stream.error or StreamClosedError())
if orig_close_callback is not None:
orig_close_callback()
ssl_stream.set_close_callback(close_callback)
ssl_stream._ssl_connect_callback = lambda: future.set_result(ssl_stream)
ssl_stream.max_buffer_size = self.max_buffer_size
ssl_stream.read_chunk_size = self.read_chunk_size
return future
def _handle_connect(self):
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
self.error = socket.error(err, os.strerror(err))
# IOLoop implementations may vary: some of them return
# an error state before the socket becomes writable, so
# in that case a connection failure would be handled by the
# error path in _handle_events instead of here.
if self._connect_future is None:
gen_log.warning("Connect error on fd %s: %s",
self.socket.fileno(), errno.errorcode[err])
self.close()
return
if self._connect_callback is not None:
callback = self._connect_callback
self._connect_callback = None
self._run_callback(callback)
if self._connect_future is not None:
future = self._connect_future
self._connect_future = None
future.set_result(self)
self._connecting = False
def set_nodelay(self, value):
if (self.socket is not None and
self.socket.family in (socket.AF_INET, socket.AF_INET6)):
try:
self.socket.setsockopt(socket.IPPROTO_TCP,
socket.TCP_NODELAY, 1 if value else 0)
except socket.error as e:
# Sometimes setsockopt will fail if the socket is closed
# at the wrong time. This can happen with HTTPServer
# resetting the value to false between requests.
if e.errno not in (errno.EINVAL, errno.ECONNRESET):
raise
class SSLIOStream(IOStream):
"""A utility class to write to and read from a non-blocking SSL socket.
If the socket passed to the constructor is already connected,
it should be wrapped with::
ssl.wrap_socket(sock, do_handshake_on_connect=False, **kwargs)
before constructing the `SSLIOStream`. Unconnected sockets will be
wrapped when `IOStream.connect` is finished.
"""
def __init__(self, *args, **kwargs):
"""The ``ssl_options`` keyword argument may either be a dictionary
of keywords arguments for `ssl.wrap_socket`, or an `ssl.SSLContext`
object.
"""
self._ssl_options = kwargs.pop('ssl_options', {})
super(SSLIOStream, self).__init__(*args, **kwargs)
self._ssl_accepting = True
self._handshake_reading = False
self._handshake_writing = False
self._ssl_connect_callback = None
self._server_hostname = None
# If the socket is already connected, attempt to start the handshake.
try:
self.socket.getpeername()
except socket.error:
pass
else:
# Indirectly start the handshake, which will run on the next
# IOLoop iteration and then the real IO state will be set in
# _handle_events.
self._add_io_state(self.io_loop.WRITE)
def reading(self):
return self._handshake_reading or super(SSLIOStream, self).reading()
def writing(self):
return self._handshake_writing or super(SSLIOStream, self).writing()
def _do_ssl_handshake(self):
# Based on code from test_ssl.py in the python stdlib
try:
self._handshake_reading = False
self._handshake_writing = False
self.socket.do_handshake()
except ssl.SSLError as err:
if err.args[0] == ssl.SSL_ERROR_WANT_READ:
self._handshake_reading = True
return
elif err.args[0] == ssl.SSL_ERROR_WANT_WRITE:
self._handshake_writing = True
return
elif err.args[0] in (ssl.SSL_ERROR_EOF,
ssl.SSL_ERROR_ZERO_RETURN):
return self.close(exc_info=True)
elif err.args[0] == ssl.SSL_ERROR_SSL:
try:
peer = self.socket.getpeername()
except Exception:
peer = '(not connected)'
gen_log.warning("SSL Error on %s %s: %s",
self.socket.fileno(), peer, err)
return self.close(exc_info=True)
raise
except socket.error as err:
if err.args[0] in _ERRNO_CONNRESET:
return self.close(exc_info=True)
except AttributeError:
# On Linux, if the connection was reset before the call to
# wrap_socket, do_handshake will fail with an
# AttributeError.
return self.close(exc_info=True)
else:
self._ssl_accepting = False
if not self._verify_cert(self.socket.getpeercert()):
self.close()
return
if self._ssl_connect_callback is not None:
callback = self._ssl_connect_callback
self._ssl_connect_callback = None
self._run_callback(callback)
def _verify_cert(self, peercert):
"""Returns True if peercert is valid according to the configured
validation mode and hostname.
The ssl handshake already tested the certificate for a valid
CA signature; the only thing that remains is to check
the hostname.
"""
if isinstance(self._ssl_options, dict):
verify_mode = self._ssl_options.get('cert_reqs', ssl.CERT_NONE)
elif isinstance(self._ssl_options, ssl.SSLContext):
verify_mode = self._ssl_options.verify_mode
assert verify_mode in (ssl.CERT_NONE, ssl.CERT_REQUIRED, ssl.CERT_OPTIONAL)
if verify_mode == ssl.CERT_NONE or self._server_hostname is None:
return True
cert = self.socket.getpeercert()
if cert is None and verify_mode == ssl.CERT_REQUIRED:
gen_log.warning("No SSL certificate given")
return False
try:
ssl_match_hostname(peercert, self._server_hostname)
except SSLCertificateError:
gen_log.warning("Invalid SSL certificate", exc_info=True)
return False
else:
return True
def _handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
return
super(SSLIOStream, self)._handle_read()
def _handle_write(self):
if self._ssl_accepting:
self._do_ssl_handshake()
return
super(SSLIOStream, self)._handle_write()
def connect(self, address, callback=None, server_hostname=None):
# Save the user's callback and run it after the ssl handshake
# has completed.
self._ssl_connect_callback = stack_context.wrap(callback)
self._server_hostname = server_hostname
# Note: Since we don't pass our callback argument along to
# super.connect(), this will always return a Future.
# This is harmless, but a bit less efficient than it could be.
return super(SSLIOStream, self).connect(address, callback=None)
def _handle_connect(self):
# Call the superclass method to check for errors.
super(SSLIOStream, self)._handle_connect()
if self.closed():
return
# When the connection is complete, wrap the socket for SSL
# traffic. Note that we do this by overriding _handle_connect
# instead of by passing a callback to super().connect because
# user callbacks are enqueued asynchronously on the IOLoop,
# but since _handle_events calls _handle_connect immediately
# followed by _handle_write we need this to be synchronous.
#
# The IOLoop will get confused if we swap out self.socket while the
# fd is registered, so remove it now and re-register after
# wrap_socket().
self.io_loop.remove_handler(self.socket)
old_state = self._state
self._state = None
self.socket = ssl_wrap_socket(self.socket, self._ssl_options,
server_hostname=self._server_hostname,
do_handshake_on_connect=False)
self._add_io_state(old_state)
def read_from_fd(self):
if self._ssl_accepting:
# If the handshake hasn't finished yet, there can't be anything
# to read (attempting to read may or may not raise an exception
# depending on the SSL version)
return None
try:
# SSLSocket objects have both a read() and recv() method,
# while regular sockets only have recv().
# The recv() method blocks (at least in python 2.6) if it is
# called when there is nothing to read, so we have to use
# read() instead.
chunk = self.socket.read(self.read_chunk_size)
except ssl.SSLError as e:
# SSLError is a subclass of socket.error, so this except
# block must come first.
if e.args[0] == ssl.SSL_ERROR_WANT_READ:
return None
else:
raise
except socket.error as e:
if e.args[0] in _ERRNO_WOULDBLOCK:
return None
else:
raise
if not chunk:
self.close()
return None
return chunk
class PipeIOStream(BaseIOStream):
"""Pipe-based `IOStream` implementation.
The constructor takes an integer file descriptor (such as one returned
by `os.pipe`) rather than an open file object. Pipes are generally
one-way, so a `PipeIOStream` can be used for reading or writing but not
both.
"""
def __init__(self, fd, *args, **kwargs):
self.fd = fd
_set_nonblocking(fd)
super(PipeIOStream, self).__init__(*args, **kwargs)
def fileno(self):
return self.fd
def close_fd(self):
os.close(self.fd)
def write_to_fd(self, data):
return os.write(self.fd, data)
def read_from_fd(self):
try:
chunk = os.read(self.fd, self.read_chunk_size)
except (IOError, OSError) as e:
if errno_from_exception(e) in _ERRNO_WOULDBLOCK:
return None
elif errno_from_exception(e) == errno.EBADF:
# If the writing half of a pipe is closed, select will
# report it as readable but reads will fail with EBADF.
self.close(exc_info=True)
return None
else:
raise
if not chunk:
self.close()
return None
return chunk
def _double_prefix(deque):
"""Grow by doubling, but don't split the second chunk just because the
first one is small.
"""
new_len = max(len(deque[0]) * 2,
(len(deque[0]) + len(deque[1])))
_merge_prefix(deque, new_len)
def _merge_prefix(deque, size):
"""Replace the first entries in a deque of strings with a single
string of up to size bytes.
>>> d = collections.deque(['abc', 'de', 'fghi', 'j'])
>>> _merge_prefix(d, 5); print(d)
deque(['abcde', 'fghi', 'j'])
Strings will be split as necessary to reach the desired size.
>>> _merge_prefix(d, 7); print(d)
deque(['abcdefg', 'hi', 'j'])
>>> _merge_prefix(d, 3); print(d)
deque(['abc', 'defg', 'hi', 'j'])
>>> _merge_prefix(d, 100); print(d)
deque(['abcdefghij'])
"""
if len(deque) == 1 and len(deque[0]) <= size:
return
prefix = []
remaining = size
while deque and remaining > 0:
chunk = deque.popleft()
if len(chunk) > remaining:
deque.appendleft(chunk[remaining:])
chunk = chunk[:remaining]
prefix.append(chunk)
remaining -= len(chunk)
# This data structure normally just contains byte strings, but
# the unittest gets messy if it doesn't use the default str() type,
# so do the merge based on the type of data that's actually present.
if prefix:
deque.appendleft(type(prefix[0])().join(prefix))
if not deque:
deque.appendleft(b"")
def doctests():
import doctest
return doctest.DocTestSuite()
|
tempbottle/pycapnp
|
refs/heads/develop
|
test/test_struct.py
|
2
|
import pytest
import capnp
import os
import tempfile
import sys
this_dir = os.path.dirname(__file__)
@pytest.fixture
def addressbook():
return capnp.load(os.path.join(this_dir, 'addressbook.capnp'))
@pytest.fixture
def all_types():
return capnp.load(os.path.join(this_dir, 'all_types.capnp'))
def test_which_builder(addressbook):
addresses = addressbook.AddressBook.new_message()
people = addresses.init('people', 2)
alice = people[0]
alice.employment.school = "MIT"
assert alice.employment.which == addressbook.Person.Employment.school
assert alice.employment.which == "school"
bob = people[1]
assert bob.employment.which == addressbook.Person.Employment.unemployed
assert bob.employment.which == "unemployed"
bob.employment.unemployed = None
assert bob.employment.which == addressbook.Person.Employment.unemployed
assert bob.employment.which == "unemployed"
with pytest.raises(Exception):
addresses.which
with pytest.raises(Exception):
addresses.which
def test_which_reader(addressbook):
def writeAddressBook(fd):
message = capnp._MallocMessageBuilder()
addressBook = message.init_root(addressbook.AddressBook)
people = addressBook.init('people', 2)
alice = people[0]
alice.employment.school = "MIT"
bob = people[1]
bob.employment.unemployed = None
capnp._write_packed_message_to_fd(fd, message)
f = tempfile.TemporaryFile()
writeAddressBook(f.fileno())
f.seek(0)
addresses = addressbook.AddressBook.read_packed(f)
people = addresses.people
alice = people[0]
assert alice.employment.which == "school"
bob = people[1]
assert bob.employment.which == "unemployed"
with pytest.raises(Exception):
addresses.which
with pytest.raises(Exception):
addresses.which
@pytest.mark.skipif(capnp.version.LIBCAPNP_VERSION < 5000, reason="Using ints as enums requires v0.5.0+ of the C++ capnp library")
def test_enum(addressbook):
addresses = addressbook.AddressBook.new_message()
people = addresses.init('people', 2)
alice = people[0]
phones = alice.init('phones', 2)
assert phones[0].type == phones[1].type
phones[0].type = addressbook.Person.PhoneNumber.Type.home
assert phones[0].type != phones[1].type
phones[1].type = 'home'
assert phones[0].type == phones[1].type
def test_builder_set(addressbook):
person = addressbook.Person.new_message()
person.name = 'test'
assert person.name == 'test'
with pytest.raises(AttributeError):
person.foo = 'test'
def test_builder_set_from_list(all_types):
msg = all_types.TestAllTypes.new_message()
msg.int32List = [0, 1, 2]
assert list(msg.int32List) == [0, 1, 2]
def test_null_str(all_types):
msg = all_types.TestAllTypes.new_message()
msg.textField = "f\x00oo"
msg.dataField = b"b\x00ar"
assert msg.textField == "f\x00oo"
assert msg.dataField == b"b\x00ar"
def test_unicode_str(all_types):
msg = all_types.TestAllTypes.new_message()
if sys.version_info[0] == 2:
msg.textField = u"f\u00e6oo".encode('utf-8')
assert msg.textField.decode('utf-8') == u"f\u00e6oo"
else:
msg.textField = "f\u00e6oo"
assert msg.textField == "f\u00e6oo"
def test_new_message(all_types):
msg = all_types.TestAllTypes.new_message(int32Field=100)
assert msg.int32Field == 100
msg = all_types.TestAllTypes.new_message(structField={'int32Field': 100})
assert msg.structField.int32Field == 100
msg = all_types.TestAllTypes.new_message(structList=[{'int32Field': 100}, {'int32Field': 101}])
assert msg.structList[0].int32Field == 100
assert msg.structList[1].int32Field == 101
msg = all_types.TestAllTypes.new_message(int32Field=100)
assert msg.int32Field == 100
msg = all_types.TestAllTypes.new_message(**{'int32Field': 100, 'int64Field': 101})
assert msg.int32Field == 100
assert msg.int64Field == 101
def test_set_dict(all_types):
msg = all_types.TestAllTypes.new_message()
msg.structField = {'int32Field': 100}
assert msg.structField.int32Field == 100
msg.init('structList', 2)
msg.structList[0] = {'int32Field': 102}
assert msg.structList[0].int32Field == 102
def test_set_dict_union(addressbook):
person = addressbook.Person.new_message(**{'employment': {'employer': {'name': 'foo'}}})
assert person.employment.employer.name == 'foo'
try:
basestring # attempt to evaluate basestring
def isstr(s):
return isinstance(s, basestring)
except NameError:
def isstr(s):
return isinstance(s, str)
def test_to_dict_enum(addressbook):
person = addressbook.Person.new_message(**{'phones': [{'number': '999-9999', 'type': 'mobile'}]})
field = person.to_dict()['phones'][0]['type']
assert isstr(field)
assert field == 'mobile'
def test_explicit_field(addressbook):
person = addressbook.Person.new_message(**{'name': 'Test'})
name_field = addressbook.Person.schema.fields['name']
assert person.name == person._get_by_field(name_field)
assert person.name == person.as_reader()._get_by_field(name_field)
def test_to_dict_verbose(addressbook):
person = addressbook.Person.new_message(**{'name': 'Test'})
assert person.to_dict(verbose=True)['phones'] == []
if sys.version_info >= (2, 7):
assert person.to_dict(verbose=True, ordered=True)['phones'] == []
with pytest.raises(KeyError):
assert person.to_dict()['phones'] == []
def test_to_dict_ordered(addressbook):
person = addressbook.Person.new_message(**{'name': 'Alice', 'phones': [{'type': 'mobile', 'number': '555-1212'}], 'id': 123, 'employment': {'school': 'MIT'}, 'email': 'alice@example.com'})
if sys.version_info >= (2, 7):
assert list(person.to_dict(ordered=True).keys()) == ['id', 'name', 'email', 'phones', 'employment']
else:
with pytest.raises(Exception):
person.to_dict(ordered=True)
def test_nested_list(addressbook):
struct = addressbook.NestedList.new_message()
struct.init('list', 2)
struct.list.init(0, 1)
struct.list.init(1, 2)
struct.list[0][0] = 1
struct.list[1][0] = 2
struct.list[1][1] = 3
assert struct.to_dict()["list"] == [[1], [2,3]]
|
blacklin/kbengine
|
refs/heads/master
|
kbe/res/scripts/common/Lib/asyncio/base_events.py
|
61
|
"""Base implementation of event loop.
The event loop can be broken up into a multiplexer (the part
responsible for notifying us of I/O events) and the event loop proper,
which wraps a multiplexer with functionality for scheduling callbacks,
immediately or at a given time in the future.
Whenever a public API takes a callback, subsequent positional
arguments will be passed to the callback if/when it is called. This
avoids the proliferation of trivial lambdas implementing closures.
Keyword arguments for the callback are not supported; this is a
conscious design decision, leaving the door open for keyword arguments
to modify the meaning of the API call itself.
"""
import collections
import concurrent.futures
import heapq
import inspect
import logging
import os
import socket
import subprocess
import time
import traceback
import sys
from . import coroutines
from . import events
from . import futures
from . import tasks
from .coroutines import coroutine
from .log import logger
__all__ = ['BaseEventLoop', 'Server']
# Argument for default thread pool executor creation.
_MAX_WORKERS = 5
def _format_handle(handle):
cb = handle._callback
if inspect.ismethod(cb) and isinstance(cb.__self__, tasks.Task):
# format the task
return repr(cb.__self__)
else:
return str(handle)
def _format_pipe(fd):
if fd == subprocess.PIPE:
return '<pipe>'
elif fd == subprocess.STDOUT:
return '<stdout>'
else:
return repr(fd)
class _StopError(BaseException):
"""Raised to stop the event loop."""
def _check_resolved_address(sock, address):
# Ensure that the address is already resolved to avoid the trap of hanging
# the entire event loop when the address requires doing a DNS lookup.
family = sock.family
if family == socket.AF_INET:
host, port = address
elif family == socket.AF_INET6:
host, port = address[:2]
else:
return
type_mask = 0
if hasattr(socket, 'SOCK_NONBLOCK'):
type_mask |= socket.SOCK_NONBLOCK
if hasattr(socket, 'SOCK_CLOEXEC'):
type_mask |= socket.SOCK_CLOEXEC
# Use getaddrinfo(flags=AI_NUMERICHOST) to ensure that the address is
# already resolved.
try:
socket.getaddrinfo(host, port,
family=family,
type=(sock.type & ~type_mask),
proto=sock.proto,
flags=socket.AI_NUMERICHOST)
except socket.gaierror as err:
raise ValueError("address must be resolved (IP address), got %r: %s"
% (address, err))
def _raise_stop_error(*args):
raise _StopError
class Server(events.AbstractServer):
def __init__(self, loop, sockets):
self._loop = loop
self.sockets = sockets
self._active_count = 0
self._waiters = []
def __repr__(self):
return '<%s sockets=%r>' % (self.__class__.__name__, self.sockets)
def _attach(self):
assert self.sockets is not None
self._active_count += 1
def _detach(self):
assert self._active_count > 0
self._active_count -= 1
if self._active_count == 0 and self.sockets is None:
self._wakeup()
def close(self):
sockets = self.sockets
if sockets is None:
return
self.sockets = None
for sock in sockets:
self._loop._stop_serving(sock)
if self._active_count == 0:
self._wakeup()
def _wakeup(self):
waiters = self._waiters
self._waiters = None
for waiter in waiters:
if not waiter.done():
waiter.set_result(waiter)
@coroutine
def wait_closed(self):
if self.sockets is None or self._waiters is None:
return
waiter = futures.Future(loop=self._loop)
self._waiters.append(waiter)
yield from waiter
class BaseEventLoop(events.AbstractEventLoop):
def __init__(self):
self._closed = False
self._ready = collections.deque()
self._scheduled = []
self._default_executor = None
self._internal_fds = 0
self._running = False
self._clock_resolution = time.get_clock_info('monotonic').resolution
self._exception_handler = None
self._debug = (not sys.flags.ignore_environment
and bool(os.environ.get('PYTHONASYNCIODEBUG')))
# In debug mode, if the execution of a callback or a step of a task
# exceed this duration in seconds, the slow callback/task is logged.
self.slow_callback_duration = 0.1
def __repr__(self):
return ('<%s running=%s closed=%s debug=%s>'
% (self.__class__.__name__, self.is_running(),
self.is_closed(), self.get_debug()))
def create_task(self, coro):
"""Schedule a coroutine object.
Return a task object.
"""
task = tasks.Task(coro, loop=self)
if task._source_traceback:
del task._source_traceback[-1]
return task
def _make_socket_transport(self, sock, protocol, waiter=None, *,
extra=None, server=None):
"""Create socket transport."""
raise NotImplementedError
def _make_ssl_transport(self, rawsock, protocol, sslcontext, waiter, *,
server_side=False, server_hostname=None,
extra=None, server=None):
"""Create SSL transport."""
raise NotImplementedError
def _make_datagram_transport(self, sock, protocol,
address=None, waiter=None, extra=None):
"""Create datagram transport."""
raise NotImplementedError
def _make_read_pipe_transport(self, pipe, protocol, waiter=None,
extra=None):
"""Create read pipe transport."""
raise NotImplementedError
def _make_write_pipe_transport(self, pipe, protocol, waiter=None,
extra=None):
"""Create write pipe transport."""
raise NotImplementedError
@coroutine
def _make_subprocess_transport(self, protocol, args, shell,
stdin, stdout, stderr, bufsize,
extra=None, **kwargs):
"""Create subprocess transport."""
raise NotImplementedError
def _write_to_self(self):
"""Write a byte to self-pipe, to wake up the event loop.
This may be called from a different thread.
The subclass is responsible for implementing the self-pipe.
"""
raise NotImplementedError
def _process_events(self, event_list):
"""Process selector events."""
raise NotImplementedError
def _check_closed(self):
if self._closed:
raise RuntimeError('Event loop is closed')
def run_forever(self):
"""Run until stop() is called."""
self._check_closed()
if self._running:
raise RuntimeError('Event loop is running.')
self._running = True
try:
while True:
try:
self._run_once()
except _StopError:
break
finally:
self._running = False
def run_until_complete(self, future):
"""Run until the Future is done.
If the argument is a coroutine, it is wrapped in a Task.
WARNING: It would be disastrous to call run_until_complete()
with the same coroutine twice -- it would wrap it in two
different Tasks and that can't be good.
Return the Future's result, or raise its exception.
"""
self._check_closed()
new_task = not isinstance(future, futures.Future)
future = tasks.async(future, loop=self)
if new_task:
# An exception is raised if the future didn't complete, so there
# is no need to log the "destroy pending task" message
future._log_destroy_pending = False
future.add_done_callback(_raise_stop_error)
self.run_forever()
future.remove_done_callback(_raise_stop_error)
if not future.done():
raise RuntimeError('Event loop stopped before Future completed.')
return future.result()
def stop(self):
"""Stop running the event loop.
Every callback scheduled before stop() is called will run. Callbacks
scheduled after stop() is called will not run. However, those callbacks
will run if run_forever is called again later.
"""
self.call_soon(_raise_stop_error)
def close(self):
"""Close the event loop.
This clears the queues and shuts down the executor,
but does not wait for the executor to finish.
The event loop must not be running.
"""
if self._running:
raise RuntimeError("Cannot close a running event loop")
if self._closed:
return
if self._debug:
logger.debug("Close %r", self)
self._closed = True
self._ready.clear()
self._scheduled.clear()
executor = self._default_executor
if executor is not None:
self._default_executor = None
executor.shutdown(wait=False)
def is_closed(self):
"""Returns True if the event loop was closed."""
return self._closed
def is_running(self):
"""Returns True if the event loop is running."""
return self._running
def time(self):
"""Return the time according to the event loop's clock.
This is a float expressed in seconds since an epoch, but the
epoch, precision, accuracy and drift are unspecified and may
differ per event loop.
"""
return time.monotonic()
def call_later(self, delay, callback, *args):
"""Arrange for a callback to be called at a given time.
Return a Handle: an opaque object with a cancel() method that
can be used to cancel the call.
The delay can be an int or float, expressed in seconds. It is
always relative to the current time.
Each callback will be called exactly once. If two callbacks
are scheduled for exactly the same time, it undefined which
will be called first.
Any positional arguments after the callback will be passed to
the callback when it is called.
"""
timer = self.call_at(self.time() + delay, callback, *args)
if timer._source_traceback:
del timer._source_traceback[-1]
return timer
def call_at(self, when, callback, *args):
"""Like call_later(), but uses an absolute time.
Absolute time corresponds to the event loop's time() method.
"""
if coroutines.iscoroutinefunction(callback):
raise TypeError("coroutines cannot be used with call_at()")
if self._debug:
self._assert_is_current_event_loop()
timer = events.TimerHandle(when, callback, args, self)
if timer._source_traceback:
del timer._source_traceback[-1]
heapq.heappush(self._scheduled, timer)
return timer
def call_soon(self, callback, *args):
"""Arrange for a callback to be called as soon as possible.
This operates as a FIFO queue: callbacks are called in the
order in which they are registered. Each callback will be
called exactly once.
Any positional arguments after the callback will be passed to
the callback when it is called.
"""
handle = self._call_soon(callback, args, check_loop=True)
if handle._source_traceback:
del handle._source_traceback[-1]
return handle
def _call_soon(self, callback, args, check_loop):
if coroutines.iscoroutinefunction(callback):
raise TypeError("coroutines cannot be used with call_soon()")
if self._debug and check_loop:
self._assert_is_current_event_loop()
handle = events.Handle(callback, args, self)
if handle._source_traceback:
del handle._source_traceback[-1]
self._ready.append(handle)
return handle
def _assert_is_current_event_loop(self):
"""Asserts that this event loop is the current event loop.
Non-thread-safe methods of this class make this assumption and will
likely behave incorrectly when the assumption is violated.
Should only be called when (self._debug == True). The caller is
responsible for checking this condition for performance reasons.
"""
try:
current = events.get_event_loop()
except AssertionError:
return
if current is not self:
raise RuntimeError(
"Non-thread-safe operation invoked on an event loop other "
"than the current one")
def call_soon_threadsafe(self, callback, *args):
"""Like call_soon(), but thread-safe."""
handle = self._call_soon(callback, args, check_loop=False)
if handle._source_traceback:
del handle._source_traceback[-1]
self._write_to_self()
return handle
def run_in_executor(self, executor, callback, *args):
if coroutines.iscoroutinefunction(callback):
raise TypeError("Coroutines cannot be used with run_in_executor()")
if isinstance(callback, events.Handle):
assert not args
assert not isinstance(callback, events.TimerHandle)
if callback._cancelled:
f = futures.Future(loop=self)
f.set_result(None)
return f
callback, args = callback._callback, callback._args
if executor is None:
executor = self._default_executor
if executor is None:
executor = concurrent.futures.ThreadPoolExecutor(_MAX_WORKERS)
self._default_executor = executor
return futures.wrap_future(executor.submit(callback, *args), loop=self)
def set_default_executor(self, executor):
self._default_executor = executor
def _getaddrinfo_debug(self, host, port, family, type, proto, flags):
msg = ["%s:%r" % (host, port)]
if family:
msg.append('family=%r' % family)
if type:
msg.append('type=%r' % type)
if proto:
msg.append('proto=%r' % proto)
if flags:
msg.append('flags=%r' % flags)
msg = ', '.join(msg)
logger.debug('Get address info %s', msg)
t0 = self.time()
addrinfo = socket.getaddrinfo(host, port, family, type, proto, flags)
dt = self.time() - t0
msg = ('Getting address info %s took %.3f ms: %r'
% (msg, dt * 1e3, addrinfo))
if dt >= self.slow_callback_duration:
logger.info(msg)
else:
logger.debug(msg)
return addrinfo
def getaddrinfo(self, host, port, *,
family=0, type=0, proto=0, flags=0):
if self._debug:
return self.run_in_executor(None, self._getaddrinfo_debug,
host, port, family, type, proto, flags)
else:
return self.run_in_executor(None, socket.getaddrinfo,
host, port, family, type, proto, flags)
def getnameinfo(self, sockaddr, flags=0):
return self.run_in_executor(None, socket.getnameinfo, sockaddr, flags)
@coroutine
def create_connection(self, protocol_factory, host=None, port=None, *,
ssl=None, family=0, proto=0, flags=0, sock=None,
local_addr=None, server_hostname=None):
"""Connect to a TCP server.
Create a streaming transport connection to a given Internet host and
port: socket family AF_INET or socket.AF_INET6 depending on host (or
family if specified), socket type SOCK_STREAM. protocol_factory must be
a callable returning a protocol instance.
This method is a coroutine which will try to establish the connection
in the background. When successful, the coroutine returns a
(transport, protocol) pair.
"""
if server_hostname is not None and not ssl:
raise ValueError('server_hostname is only meaningful with ssl')
if server_hostname is None and ssl:
# Use host as default for server_hostname. It is an error
# if host is empty or not set, e.g. when an
# already-connected socket was passed or when only a port
# is given. To avoid this error, you can pass
# server_hostname='' -- this will bypass the hostname
# check. (This also means that if host is a numeric
# IP/IPv6 address, we will attempt to verify that exact
# address; this will probably fail, but it is possible to
# create a certificate for a specific IP address, so we
# don't judge it here.)
if not host:
raise ValueError('You must set server_hostname '
'when using ssl without a host')
server_hostname = host
if host is not None or port is not None:
if sock is not None:
raise ValueError(
'host/port and sock can not be specified at the same time')
f1 = self.getaddrinfo(
host, port, family=family,
type=socket.SOCK_STREAM, proto=proto, flags=flags)
fs = [f1]
if local_addr is not None:
f2 = self.getaddrinfo(
*local_addr, family=family,
type=socket.SOCK_STREAM, proto=proto, flags=flags)
fs.append(f2)
else:
f2 = None
yield from tasks.wait(fs, loop=self)
infos = f1.result()
if not infos:
raise OSError('getaddrinfo() returned empty list')
if f2 is not None:
laddr_infos = f2.result()
if not laddr_infos:
raise OSError('getaddrinfo() returned empty list')
exceptions = []
for family, type, proto, cname, address in infos:
try:
sock = socket.socket(family=family, type=type, proto=proto)
sock.setblocking(False)
if f2 is not None:
for _, _, _, _, laddr in laddr_infos:
try:
sock.bind(laddr)
break
except OSError as exc:
exc = OSError(
exc.errno, 'error while '
'attempting to bind on address '
'{!r}: {}'.format(
laddr, exc.strerror.lower()))
exceptions.append(exc)
else:
sock.close()
sock = None
continue
if self._debug:
logger.debug("connect %r to %r", sock, address)
yield from self.sock_connect(sock, address)
except OSError as exc:
if sock is not None:
sock.close()
exceptions.append(exc)
except:
if sock is not None:
sock.close()
raise
else:
break
else:
if len(exceptions) == 1:
raise exceptions[0]
else:
# If they all have the same str(), raise one.
model = str(exceptions[0])
if all(str(exc) == model for exc in exceptions):
raise exceptions[0]
# Raise a combined exception so the user can see all
# the various error messages.
raise OSError('Multiple exceptions: {}'.format(
', '.join(str(exc) for exc in exceptions)))
elif sock is None:
raise ValueError(
'host and port was not specified and no sock specified')
sock.setblocking(False)
transport, protocol = yield from self._create_connection_transport(
sock, protocol_factory, ssl, server_hostname)
if self._debug:
# Get the socket from the transport because SSL transport closes
# the old socket and creates a new SSL socket
sock = transport.get_extra_info('socket')
logger.debug("%r connected to %s:%r: (%r, %r)",
sock, host, port, transport, protocol)
return transport, protocol
@coroutine
def _create_connection_transport(self, sock, protocol_factory, ssl,
server_hostname):
protocol = protocol_factory()
waiter = futures.Future(loop=self)
if ssl:
sslcontext = None if isinstance(ssl, bool) else ssl
transport = self._make_ssl_transport(
sock, protocol, sslcontext, waiter,
server_side=False, server_hostname=server_hostname)
else:
transport = self._make_socket_transport(sock, protocol, waiter)
yield from waiter
return transport, protocol
@coroutine
def create_datagram_endpoint(self, protocol_factory,
local_addr=None, remote_addr=None, *,
family=0, proto=0, flags=0):
"""Create datagram connection."""
if not (local_addr or remote_addr):
if family == 0:
raise ValueError('unexpected address family')
addr_pairs_info = (((family, proto), (None, None)),)
else:
# join address by (family, protocol)
addr_infos = collections.OrderedDict()
for idx, addr in ((0, local_addr), (1, remote_addr)):
if addr is not None:
assert isinstance(addr, tuple) and len(addr) == 2, (
'2-tuple is expected')
infos = yield from self.getaddrinfo(
*addr, family=family, type=socket.SOCK_DGRAM,
proto=proto, flags=flags)
if not infos:
raise OSError('getaddrinfo() returned empty list')
for fam, _, pro, _, address in infos:
key = (fam, pro)
if key not in addr_infos:
addr_infos[key] = [None, None]
addr_infos[key][idx] = address
# each addr has to have info for each (family, proto) pair
addr_pairs_info = [
(key, addr_pair) for key, addr_pair in addr_infos.items()
if not ((local_addr and addr_pair[0] is None) or
(remote_addr and addr_pair[1] is None))]
if not addr_pairs_info:
raise ValueError('can not get address information')
exceptions = []
for ((family, proto),
(local_address, remote_address)) in addr_pairs_info:
sock = None
r_addr = None
try:
sock = socket.socket(
family=family, type=socket.SOCK_DGRAM, proto=proto)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(False)
if local_addr:
sock.bind(local_address)
if remote_addr:
yield from self.sock_connect(sock, remote_address)
r_addr = remote_address
except OSError as exc:
if sock is not None:
sock.close()
exceptions.append(exc)
except:
if sock is not None:
sock.close()
raise
else:
break
else:
raise exceptions[0]
protocol = protocol_factory()
waiter = futures.Future(loop=self)
transport = self._make_datagram_transport(sock, protocol, r_addr,
waiter)
if self._debug:
if local_addr:
logger.info("Datagram endpoint local_addr=%r remote_addr=%r "
"created: (%r, %r)",
local_addr, remote_addr, transport, protocol)
else:
logger.debug("Datagram endpoint remote_addr=%r created: "
"(%r, %r)",
remote_addr, transport, protocol)
yield from waiter
return transport, protocol
@coroutine
def create_server(self, protocol_factory, host=None, port=None,
*,
family=socket.AF_UNSPEC,
flags=socket.AI_PASSIVE,
sock=None,
backlog=100,
ssl=None,
reuse_address=None):
"""Create a TCP server bound to host and port.
Return a Server object which can be used to stop the service.
This method is a coroutine.
"""
if isinstance(ssl, bool):
raise TypeError('ssl argument must be an SSLContext or None')
if host is not None or port is not None:
if sock is not None:
raise ValueError(
'host/port and sock can not be specified at the same time')
AF_INET6 = getattr(socket, 'AF_INET6', 0)
if reuse_address is None:
reuse_address = os.name == 'posix' and sys.platform != 'cygwin'
sockets = []
if host == '':
host = None
infos = yield from self.getaddrinfo(
host, port, family=family,
type=socket.SOCK_STREAM, proto=0, flags=flags)
if not infos:
raise OSError('getaddrinfo() returned empty list')
completed = False
try:
for res in infos:
af, socktype, proto, canonname, sa = res
try:
sock = socket.socket(af, socktype, proto)
except socket.error:
# Assume it's a bad family/type/protocol combination.
if self._debug:
logger.warning('create_server() failed to create '
'socket.socket(%r, %r, %r)',
af, socktype, proto, exc_info=True)
continue
sockets.append(sock)
if reuse_address:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,
True)
# Disable IPv4/IPv6 dual stack support (enabled by
# default on Linux) which makes a single socket
# listen on both address families.
if af == AF_INET6 and hasattr(socket, 'IPPROTO_IPV6'):
sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_V6ONLY,
True)
try:
sock.bind(sa)
except OSError as err:
raise OSError(err.errno, 'error while attempting '
'to bind on address %r: %s'
% (sa, err.strerror.lower()))
completed = True
finally:
if not completed:
for sock in sockets:
sock.close()
else:
if sock is None:
raise ValueError('Neither host/port nor sock were specified')
sockets = [sock]
server = Server(self, sockets)
for sock in sockets:
sock.listen(backlog)
sock.setblocking(False)
self._start_serving(protocol_factory, sock, ssl, server)
if self._debug:
logger.info("%r is serving", server)
return server
@coroutine
def connect_read_pipe(self, protocol_factory, pipe):
protocol = protocol_factory()
waiter = futures.Future(loop=self)
transport = self._make_read_pipe_transport(pipe, protocol, waiter)
yield from waiter
if self._debug:
logger.debug('Read pipe %r connected: (%r, %r)',
pipe.fileno(), transport, protocol)
return transport, protocol
@coroutine
def connect_write_pipe(self, protocol_factory, pipe):
protocol = protocol_factory()
waiter = futures.Future(loop=self)
transport = self._make_write_pipe_transport(pipe, protocol, waiter)
yield from waiter
if self._debug:
logger.debug('Write pipe %r connected: (%r, %r)',
pipe.fileno(), transport, protocol)
return transport, protocol
def _log_subprocess(self, msg, stdin, stdout, stderr):
info = [msg]
if stdin is not None:
info.append('stdin=%s' % _format_pipe(stdin))
if stdout is not None and stderr == subprocess.STDOUT:
info.append('stdout=stderr=%s' % _format_pipe(stdout))
else:
if stdout is not None:
info.append('stdout=%s' % _format_pipe(stdout))
if stderr is not None:
info.append('stderr=%s' % _format_pipe(stderr))
logger.debug(' '.join(info))
@coroutine
def subprocess_shell(self, protocol_factory, cmd, *, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=False, shell=True, bufsize=0,
**kwargs):
if not isinstance(cmd, (bytes, str)):
raise ValueError("cmd must be a string")
if universal_newlines:
raise ValueError("universal_newlines must be False")
if not shell:
raise ValueError("shell must be True")
if bufsize != 0:
raise ValueError("bufsize must be 0")
protocol = protocol_factory()
if self._debug:
# don't log parameters: they may contain sensitive information
# (password) and may be too long
debug_log = 'run shell command %r' % cmd
self._log_subprocess(debug_log, stdin, stdout, stderr)
transport = yield from self._make_subprocess_transport(
protocol, cmd, True, stdin, stdout, stderr, bufsize, **kwargs)
if self._debug:
logger.info('%s: %r' % (debug_log, transport))
return transport, protocol
@coroutine
def subprocess_exec(self, protocol_factory, program, *args,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, universal_newlines=False,
shell=False, bufsize=0, **kwargs):
if universal_newlines:
raise ValueError("universal_newlines must be False")
if shell:
raise ValueError("shell must be False")
if bufsize != 0:
raise ValueError("bufsize must be 0")
popen_args = (program,) + args
for arg in popen_args:
if not isinstance(arg, (str, bytes)):
raise TypeError("program arguments must be "
"a bytes or text string, not %s"
% type(arg).__name__)
protocol = protocol_factory()
if self._debug:
# don't log parameters: they may contain sensitive information
# (password) and may be too long
debug_log = 'execute program %r' % program
self._log_subprocess(debug_log, stdin, stdout, stderr)
transport = yield from self._make_subprocess_transport(
protocol, popen_args, False, stdin, stdout, stderr,
bufsize, **kwargs)
if self._debug:
logger.info('%s: %r' % (debug_log, transport))
return transport, protocol
def set_exception_handler(self, handler):
"""Set handler as the new event loop exception handler.
If handler is None, the default exception handler will
be set.
If handler is a callable object, it should have a
signature matching '(loop, context)', where 'loop'
will be a reference to the active event loop, 'context'
will be a dict object (see `call_exception_handler()`
documentation for details about context).
"""
if handler is not None and not callable(handler):
raise TypeError('A callable object or None is expected, '
'got {!r}'.format(handler))
self._exception_handler = handler
def default_exception_handler(self, context):
"""Default exception handler.
This is called when an exception occurs and no exception
handler is set, and can be called by a custom exception
handler that wants to defer to the default behavior.
The context parameter has the same meaning as in
`call_exception_handler()`.
"""
message = context.get('message')
if not message:
message = 'Unhandled exception in event loop'
exception = context.get('exception')
if exception is not None:
exc_info = (type(exception), exception, exception.__traceback__)
else:
exc_info = False
log_lines = [message]
for key in sorted(context):
if key in {'message', 'exception'}:
continue
value = context[key]
if key == 'source_traceback':
tb = ''.join(traceback.format_list(value))
value = 'Object created at (most recent call last):\n'
value += tb.rstrip()
else:
value = repr(value)
log_lines.append('{}: {}'.format(key, value))
logger.error('\n'.join(log_lines), exc_info=exc_info)
def call_exception_handler(self, context):
"""Call the current event loop's exception handler.
The context argument is a dict containing the following keys:
- 'message': Error message;
- 'exception' (optional): Exception object;
- 'future' (optional): Future instance;
- 'handle' (optional): Handle instance;
- 'protocol' (optional): Protocol instance;
- 'transport' (optional): Transport instance;
- 'socket' (optional): Socket instance.
New keys maybe introduced in the future.
Note: do not overload this method in an event loop subclass.
For custom exception handling, use the
`set_exception_handler()` method.
"""
if self._exception_handler is None:
try:
self.default_exception_handler(context)
except Exception:
# Second protection layer for unexpected errors
# in the default implementation, as well as for subclassed
# event loops with overloaded "default_exception_handler".
logger.error('Exception in default exception handler',
exc_info=True)
else:
try:
self._exception_handler(self, context)
except Exception as exc:
# Exception in the user set custom exception handler.
try:
# Let's try default handler.
self.default_exception_handler({
'message': 'Unhandled error in exception handler',
'exception': exc,
'context': context,
})
except Exception:
# Guard 'default_exception_handler' in case it is
# overloaded.
logger.error('Exception in default exception handler '
'while handling an unexpected error '
'in custom exception handler',
exc_info=True)
def _add_callback(self, handle):
"""Add a Handle to _scheduled (TimerHandle) or _ready."""
assert isinstance(handle, events.Handle), 'A Handle is required here'
if handle._cancelled:
return
if isinstance(handle, events.TimerHandle):
heapq.heappush(self._scheduled, handle)
else:
self._ready.append(handle)
def _add_callback_signalsafe(self, handle):
"""Like _add_callback() but called from a signal handler."""
self._add_callback(handle)
self._write_to_self()
def _run_once(self):
"""Run one full iteration of the event loop.
This calls all currently ready callbacks, polls for I/O,
schedules the resulting callbacks, and finally schedules
'call_later' callbacks.
"""
# Remove delayed calls that were cancelled from head of queue.
while self._scheduled and self._scheduled[0]._cancelled:
heapq.heappop(self._scheduled)
timeout = None
if self._ready:
timeout = 0
elif self._scheduled:
# Compute the desired timeout.
when = self._scheduled[0]._when
timeout = max(0, when - self.time())
if self._debug and timeout != 0:
t0 = self.time()
event_list = self._selector.select(timeout)
dt = self.time() - t0
if dt >= 1.0:
level = logging.INFO
else:
level = logging.DEBUG
nevent = len(event_list)
if timeout is None:
logger.log(level, 'poll took %.3f ms: %s events',
dt * 1e3, nevent)
elif nevent:
logger.log(level,
'poll %.3f ms took %.3f ms: %s events',
timeout * 1e3, dt * 1e3, nevent)
elif dt >= 1.0:
logger.log(level,
'poll %.3f ms took %.3f ms: timeout',
timeout * 1e3, dt * 1e3)
else:
event_list = self._selector.select(timeout)
self._process_events(event_list)
# Handle 'later' callbacks that are ready.
end_time = self.time() + self._clock_resolution
while self._scheduled:
handle = self._scheduled[0]
if handle._when >= end_time:
break
handle = heapq.heappop(self._scheduled)
self._ready.append(handle)
# This is the only place where callbacks are actually *called*.
# All other places just add them to ready.
# Note: We run all currently scheduled callbacks, but not any
# callbacks scheduled by callbacks run this time around --
# they will be run the next time (after another I/O poll).
# Use an idiom that is thread-safe without using locks.
ntodo = len(self._ready)
for i in range(ntodo):
handle = self._ready.popleft()
if handle._cancelled:
continue
if self._debug:
t0 = self.time()
handle._run()
dt = self.time() - t0
if dt >= self.slow_callback_duration:
logger.warning('Executing %s took %.3f seconds',
_format_handle(handle), dt)
else:
handle._run()
handle = None # Needed to break cycles when an exception occurs.
def get_debug(self):
return self._debug
def set_debug(self, enabled):
self._debug = enabled
|
danielchalef/gensim
|
refs/heads/develop
|
gensim/summarization/pagerank_weighted.py
|
21
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
from numpy import empty as empty_matrix
from scipy.sparse import csr_matrix
from scipy.linalg import eig
from six.moves import xrange
try:
from numpy import VisibleDeprecationWarning
import warnings
warnings.filterwarnings("ignore", category=VisibleDeprecationWarning)
except ImportError:
pass
def pagerank_weighted(graph, damping=0.85):
adjacency_matrix = build_adjacency_matrix(graph)
probability_matrix = build_probability_matrix(graph)
pagerank_matrix = damping * adjacency_matrix.todense() + (1 - damping) * probability_matrix
vals, vecs = eig(pagerank_matrix, left=True, right=False)
return process_results(graph, vecs)
def build_adjacency_matrix(graph):
row = []
col = []
data = []
nodes = graph.nodes()
length = len(nodes)
for i in xrange(length):
current_node = nodes[i]
neighbors_sum = sum(graph.edge_weight((current_node, neighbor)) for neighbor in graph.neighbors(current_node))
for j in xrange(length):
edge_weight = float(graph.edge_weight((current_node, nodes[j])))
if i != j and edge_weight != 0:
row.append(i)
col.append(j)
data.append(edge_weight / neighbors_sum)
return csr_matrix((data, (row, col)), shape=(length, length))
def build_probability_matrix(graph):
dimension = len(graph.nodes())
matrix = empty_matrix((dimension, dimension))
probability = 1 / float(dimension)
matrix.fill(probability)
return matrix
def process_results(graph, vecs):
scores = {}
for i, node in enumerate(graph.nodes()):
scores[node] = abs(vecs[i][0])
return scores
|
babble/babble
|
refs/heads/master
|
include/jython/Lib/encodings/mac_iceland.py
|
593
|
""" Python Character Mapping Codec mac_iceland generated from 'MAPPINGS/VENDORS/APPLE/ICELAND.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-iceland',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> CONTROL CHARACTER
u'\x01' # 0x01 -> CONTROL CHARACTER
u'\x02' # 0x02 -> CONTROL CHARACTER
u'\x03' # 0x03 -> CONTROL CHARACTER
u'\x04' # 0x04 -> CONTROL CHARACTER
u'\x05' # 0x05 -> CONTROL CHARACTER
u'\x06' # 0x06 -> CONTROL CHARACTER
u'\x07' # 0x07 -> CONTROL CHARACTER
u'\x08' # 0x08 -> CONTROL CHARACTER
u'\t' # 0x09 -> CONTROL CHARACTER
u'\n' # 0x0A -> CONTROL CHARACTER
u'\x0b' # 0x0B -> CONTROL CHARACTER
u'\x0c' # 0x0C -> CONTROL CHARACTER
u'\r' # 0x0D -> CONTROL CHARACTER
u'\x0e' # 0x0E -> CONTROL CHARACTER
u'\x0f' # 0x0F -> CONTROL CHARACTER
u'\x10' # 0x10 -> CONTROL CHARACTER
u'\x11' # 0x11 -> CONTROL CHARACTER
u'\x12' # 0x12 -> CONTROL CHARACTER
u'\x13' # 0x13 -> CONTROL CHARACTER
u'\x14' # 0x14 -> CONTROL CHARACTER
u'\x15' # 0x15 -> CONTROL CHARACTER
u'\x16' # 0x16 -> CONTROL CHARACTER
u'\x17' # 0x17 -> CONTROL CHARACTER
u'\x18' # 0x18 -> CONTROL CHARACTER
u'\x19' # 0x19 -> CONTROL CHARACTER
u'\x1a' # 0x1A -> CONTROL CHARACTER
u'\x1b' # 0x1B -> CONTROL CHARACTER
u'\x1c' # 0x1C -> CONTROL CHARACTER
u'\x1d' # 0x1D -> CONTROL CHARACTER
u'\x1e' # 0x1E -> CONTROL CHARACTER
u'\x1f' # 0x1F -> CONTROL CHARACTER
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> CONTROL CHARACTER
u'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0x81 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe3' # 0x8B -> LATIN SMALL LETTER A WITH TILDE
u'\xe5' # 0x8C -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
u'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE
u'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
u'\xec' # 0x93 -> LATIN SMALL LETTER I WITH GRAVE
u'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE
u'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf2' # 0x98 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE
u'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
u'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE
u'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xdd' # 0xA0 -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\xb0' # 0xA1 -> DEGREE SIGN
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa7' # 0xA4 -> SECTION SIGN
u'\u2022' # 0xA5 -> BULLET
u'\xb6' # 0xA6 -> PILCROW SIGN
u'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
u'\xae' # 0xA8 -> REGISTERED SIGN
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u2122' # 0xAA -> TRADE MARK SIGN
u'\xb4' # 0xAB -> ACUTE ACCENT
u'\xa8' # 0xAC -> DIAERESIS
u'\u2260' # 0xAD -> NOT EQUAL TO
u'\xc6' # 0xAE -> LATIN CAPITAL LETTER AE
u'\xd8' # 0xAF -> LATIN CAPITAL LETTER O WITH STROKE
u'\u221e' # 0xB0 -> INFINITY
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
u'\xa5' # 0xB4 -> YEN SIGN
u'\xb5' # 0xB5 -> MICRO SIGN
u'\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL
u'\u2211' # 0xB7 -> N-ARY SUMMATION
u'\u220f' # 0xB8 -> N-ARY PRODUCT
u'\u03c0' # 0xB9 -> GREEK SMALL LETTER PI
u'\u222b' # 0xBA -> INTEGRAL
u'\xaa' # 0xBB -> FEMININE ORDINAL INDICATOR
u'\xba' # 0xBC -> MASCULINE ORDINAL INDICATOR
u'\u03a9' # 0xBD -> GREEK CAPITAL LETTER OMEGA
u'\xe6' # 0xBE -> LATIN SMALL LETTER AE
u'\xf8' # 0xBF -> LATIN SMALL LETTER O WITH STROKE
u'\xbf' # 0xC0 -> INVERTED QUESTION MARK
u'\xa1' # 0xC1 -> INVERTED EXCLAMATION MARK
u'\xac' # 0xC2 -> NOT SIGN
u'\u221a' # 0xC3 -> SQUARE ROOT
u'\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK
u'\u2248' # 0xC5 -> ALMOST EQUAL TO
u'\u2206' # 0xC6 -> INCREMENT
u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
u'\xa0' # 0xCA -> NO-BREAK SPACE
u'\xc0' # 0xCB -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc3' # 0xCC -> LATIN CAPITAL LETTER A WITH TILDE
u'\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE
u'\u0152' # 0xCE -> LATIN CAPITAL LIGATURE OE
u'\u0153' # 0xCF -> LATIN SMALL LIGATURE OE
u'\u2013' # 0xD0 -> EN DASH
u'\u2014' # 0xD1 -> EM DASH
u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
u'\xf7' # 0xD6 -> DIVISION SIGN
u'\u25ca' # 0xD7 -> LOZENGE
u'\xff' # 0xD8 -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\u0178' # 0xD9 -> LATIN CAPITAL LETTER Y WITH DIAERESIS
u'\u2044' # 0xDA -> FRACTION SLASH
u'\u20ac' # 0xDB -> EURO SIGN
u'\xd0' # 0xDC -> LATIN CAPITAL LETTER ETH
u'\xf0' # 0xDD -> LATIN SMALL LETTER ETH
u'\xde' # 0xDE -> LATIN CAPITAL LETTER THORN
u'\xfe' # 0xDF -> LATIN SMALL LETTER THORN
u'\xfd' # 0xE0 -> LATIN SMALL LETTER Y WITH ACUTE
u'\xb7' # 0xE1 -> MIDDLE DOT
u'\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK
u'\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2030' # 0xE4 -> PER MILLE SIGN
u'\xc2' # 0xE5 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xca' # 0xE6 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xcb' # 0xE8 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xc8' # 0xE9 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xEB -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xEC -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xcc' # 0xED -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\uf8ff' # 0xF0 -> Apple logo
u'\xd2' # 0xF1 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xF3 -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xd9' # 0xF4 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\u0131' # 0xF5 -> LATIN SMALL LETTER DOTLESS I
u'\u02c6' # 0xF6 -> MODIFIER LETTER CIRCUMFLEX ACCENT
u'\u02dc' # 0xF7 -> SMALL TILDE
u'\xaf' # 0xF8 -> MACRON
u'\u02d8' # 0xF9 -> BREVE
u'\u02d9' # 0xFA -> DOT ABOVE
u'\u02da' # 0xFB -> RING ABOVE
u'\xb8' # 0xFC -> CEDILLA
u'\u02dd' # 0xFD -> DOUBLE ACUTE ACCENT
u'\u02db' # 0xFE -> OGONEK
u'\u02c7' # 0xFF -> CARON
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
ali/mopidy
|
refs/heads/develop
|
tests/internal/__init__.py
|
190
|
from __future__ import absolute_import, unicode_literals
|
ronekko/chainer
|
refs/heads/master
|
tests/chainer_tests/functions_tests/loss_tests/test_hinge.py
|
1
|
import unittest
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product({
'reduce': ['no', 'mean'],
'norm': ['L1', 'L2'],
'label_dtype': [numpy.int8, numpy.int16, numpy.int32, numpy.int64],
}))
class TestHinge(unittest.TestCase):
def setUp(self):
shape = (10, 5)
self.x = numpy.random.uniform(-1, 1, shape).astype(numpy.float32)
# Avoid values around -1.0 for stability
self.x[numpy.logical_and(-1.01 < self.x, self.x < -0.99)] = 0.5
self.t = numpy.random.randint(
0, shape[1], shape[:1]).astype(self.label_dtype)
if self.reduce == 'no':
self.gy = numpy.random.uniform(
-1, 1, self.x.shape).astype(numpy.float32)
self.check_backward_options = {'atol': 1e-1, 'rtol': 1e-1}
def check_forward(self, x_data, t_data):
x_val = chainer.Variable(x_data)
t_val = chainer.Variable(t_data)
loss = functions.hinge(x_val, t_val, self.norm, self.reduce)
if self.reduce == 'mean':
self.assertEqual(loss.data.shape, ())
else:
self.assertEqual(loss.data.shape, self.x.shape)
self.assertEqual(loss.data.dtype, numpy.float32)
loss_value = cuda.to_cpu(loss.data)
# Compute expected value
for i in six.moves.range(self.x.shape[0]):
self.x[i, self.t[i]] *= -1
for i in six.moves.range(self.x.shape[0]):
for j in six.moves.range(self.x.shape[1]):
self.x[i, j] = max(0, 1.0 + self.x[i, j])
if self.norm == 'L1':
loss_expect = self.x
elif self.norm == 'L2':
loss_expect = self.x ** 2
if self.reduce == 'mean':
loss_expect = numpy.sum(loss_expect) / self.x.shape[0]
testing.assert_allclose(loss_expect, loss_value)
def test_forward_cpu(self):
self.check_forward(self.x, self.t)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x), cuda.to_gpu(self.t))
def check_backward(self, x_data, t_data):
def f(x, t):
return functions.hinge(x, t, self.norm)
gradient_check.check_backward(
f, (x_data, t_data), None, dtype='d',
**self.check_backward_options)
def test_backward_cpu(self):
self.check_backward(self.x, self.t)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.t))
class TestHingeInvalidOption(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (10, 5)).astype(numpy.float32)
self.t = numpy.random.randint(0, 5, (10,)).astype(numpy.int32)
def check_invalid_norm_option(self, xp):
x = xp.asarray(self.x)
t = xp.asarray(self.t)
with self.assertRaises(NotImplementedError):
functions.hinge(x, t, 'invalid_norm', 'mean')
def test_invalid_norm_option_cpu(self):
self.check_invalid_norm_option(numpy)
@attr.gpu
def test_invalid_norm_option_gpu(self):
self.check_invalid_norm_option(cuda.cupy)
def check_invalid_reduce_option(self, xp):
x = xp.asarray(self.x)
t = xp.asarray(self.t)
with self.assertRaises(ValueError):
functions.hinge(x, t, 'L1', 'invalid_option')
def test_invalid_reduce_option_cpu(self):
self.check_invalid_reduce_option(numpy)
@attr.gpu
def test_invalid_reduce_option_gpu(self):
self.check_invalid_reduce_option(cuda.cupy)
testing.run_module(__name__, __file__)
|
NEricN/RobotCSimulator
|
refs/heads/master
|
Python/App/Lib/test/test_stringprep.py
|
115
|
# To fully test this module, we would need a copy of the stringprep tables.
# Since we don't have them, this test checks only a few codepoints.
import unittest
from test import test_support
from stringprep import *
class StringprepTests(unittest.TestCase):
def test(self):
self.assertTrue(in_table_a1(u"\u0221"))
self.assertFalse(in_table_a1(u"\u0222"))
self.assertTrue(in_table_b1(u"\u00ad"))
self.assertFalse(in_table_b1(u"\u00ae"))
self.assertTrue(map_table_b2(u"\u0041"), u"\u0061")
self.assertTrue(map_table_b2(u"\u0061"), u"\u0061")
self.assertTrue(map_table_b3(u"\u0041"), u"\u0061")
self.assertTrue(map_table_b3(u"\u0061"), u"\u0061")
self.assertTrue(in_table_c11(u"\u0020"))
self.assertFalse(in_table_c11(u"\u0021"))
self.assertTrue(in_table_c12(u"\u00a0"))
self.assertFalse(in_table_c12(u"\u00a1"))
self.assertTrue(in_table_c12(u"\u00a0"))
self.assertFalse(in_table_c12(u"\u00a1"))
self.assertTrue(in_table_c11_c12(u"\u00a0"))
self.assertFalse(in_table_c11_c12(u"\u00a1"))
self.assertTrue(in_table_c21(u"\u001f"))
self.assertFalse(in_table_c21(u"\u0020"))
self.assertTrue(in_table_c22(u"\u009f"))
self.assertFalse(in_table_c22(u"\u00a0"))
self.assertTrue(in_table_c21_c22(u"\u009f"))
self.assertFalse(in_table_c21_c22(u"\u00a0"))
self.assertTrue(in_table_c3(u"\ue000"))
self.assertFalse(in_table_c3(u"\uf900"))
self.assertTrue(in_table_c4(u"\uffff"))
self.assertFalse(in_table_c4(u"\u0000"))
self.assertTrue(in_table_c5(u"\ud800"))
self.assertFalse(in_table_c5(u"\ud7ff"))
self.assertTrue(in_table_c6(u"\ufff9"))
self.assertFalse(in_table_c6(u"\ufffe"))
self.assertTrue(in_table_c7(u"\u2ff0"))
self.assertFalse(in_table_c7(u"\u2ffc"))
self.assertTrue(in_table_c8(u"\u0340"))
self.assertFalse(in_table_c8(u"\u0342"))
# C.9 is not in the bmp
# self.assertTrue(in_table_c9(u"\U000E0001"))
# self.assertFalse(in_table_c8(u"\U000E0002"))
self.assertTrue(in_table_d1(u"\u05be"))
self.assertFalse(in_table_d1(u"\u05bf"))
self.assertTrue(in_table_d2(u"\u0041"))
self.assertFalse(in_table_d2(u"\u0040"))
# This would generate a hash of all predicates. However, running
# it is quite expensive, and only serves to detect changes in the
# unicode database. Instead, stringprep.py asserts the version of
# the database.
# import hashlib
# predicates = [k for k in dir(stringprep) if k.startswith("in_table")]
# predicates.sort()
# for p in predicates:
# f = getattr(stringprep, p)
# # Collect all BMP code points
# data = ["0"] * 0x10000
# for i in range(0x10000):
# if f(unichr(i)):
# data[i] = "1"
# data = "".join(data)
# h = hashlib.sha1()
# h.update(data)
# print p, h.hexdigest()
def test_main():
test_support.run_unittest(StringprepTests)
if __name__ == '__main__':
test_main()
|
simongibbons/numpy
|
refs/heads/main
|
numpy/distutils/from_template.py
|
8
|
#!/usr/bin/env python3
"""
process_file(filename)
takes templated file .xxx.src and produces .xxx file where .xxx
is .pyf .f90 or .f using the following template rules:
'<..>' denotes a template.
All function and subroutine blocks in a source file with names that
contain '<..>' will be replicated according to the rules in '<..>'.
The number of comma-separated words in '<..>' will determine the number of
replicates.
'<..>' may have two different forms, named and short. For example,
named:
<p=d,s,z,c> where anywhere inside a block '<p>' will be replaced with
'd', 's', 'z', and 'c' for each replicate of the block.
<_c> is already defined: <_c=s,d,c,z>
<_t> is already defined: <_t=real,double precision,complex,double complex>
short:
<s,d,c,z>, a short form of the named, useful when no <p> appears inside
a block.
In general, '<..>' contains a comma separated list of arbitrary
expressions. If these expression must contain a comma|leftarrow|rightarrow,
then prepend the comma|leftarrow|rightarrow with a backslash.
If an expression matches '\\<index>' then it will be replaced
by <index>-th expression.
Note that all '<..>' forms in a block must have the same number of
comma-separated entries.
Predefined named template rules:
<prefix=s,d,c,z>
<ftype=real,double precision,complex,double complex>
<ftypereal=real,double precision,\\0,\\1>
<ctype=float,double,complex_float,complex_double>
<ctypereal=float,double,\\0,\\1>
"""
__all__ = ['process_str', 'process_file']
import os
import sys
import re
routine_start_re = re.compile(r'(\n|\A)(( (\$|\*))|)\s*(subroutine|function)\b', re.I)
routine_end_re = re.compile(r'\n\s*end\s*(subroutine|function)\b.*(\n|\Z)', re.I)
function_start_re = re.compile(r'\n (\$|\*)\s*function\b', re.I)
def parse_structure(astr):
""" Return a list of tuples for each function or subroutine each
tuple is the start and end of a subroutine or function to be
expanded.
"""
spanlist = []
ind = 0
while True:
m = routine_start_re.search(astr, ind)
if m is None:
break
start = m.start()
if function_start_re.match(astr, start, m.end()):
while True:
i = astr.rfind('\n', ind, start)
if i==-1:
break
start = i
if astr[i:i+7]!='\n $':
break
start += 1
m = routine_end_re.search(astr, m.end())
ind = end = m and m.end()-1 or len(astr)
spanlist.append((start, end))
return spanlist
template_re = re.compile(r"<\s*(\w[\w\d]*)\s*>")
named_re = re.compile(r"<\s*(\w[\w\d]*)\s*=\s*(.*?)\s*>")
list_re = re.compile(r"<\s*((.*?))\s*>")
def find_repl_patterns(astr):
reps = named_re.findall(astr)
names = {}
for rep in reps:
name = rep[0].strip() or unique_key(names)
repl = rep[1].replace(r'\,', '@comma@')
thelist = conv(repl)
names[name] = thelist
return names
def find_and_remove_repl_patterns(astr):
names = find_repl_patterns(astr)
astr = re.subn(named_re, '', astr)[0]
return astr, names
item_re = re.compile(r"\A\\(?P<index>\d+)\Z")
def conv(astr):
b = astr.split(',')
l = [x.strip() for x in b]
for i in range(len(l)):
m = item_re.match(l[i])
if m:
j = int(m.group('index'))
l[i] = l[j]
return ','.join(l)
def unique_key(adict):
""" Obtain a unique key given a dictionary."""
allkeys = list(adict.keys())
done = False
n = 1
while not done:
newkey = '__l%s' % (n)
if newkey in allkeys:
n += 1
else:
done = True
return newkey
template_name_re = re.compile(r'\A\s*(\w[\w\d]*)\s*\Z')
def expand_sub(substr, names):
substr = substr.replace(r'\>', '@rightarrow@')
substr = substr.replace(r'\<', '@leftarrow@')
lnames = find_repl_patterns(substr)
substr = named_re.sub(r"<\1>", substr) # get rid of definition templates
def listrepl(mobj):
thelist = conv(mobj.group(1).replace(r'\,', '@comma@'))
if template_name_re.match(thelist):
return "<%s>" % (thelist)
name = None
for key in lnames.keys(): # see if list is already in dictionary
if lnames[key] == thelist:
name = key
if name is None: # this list is not in the dictionary yet
name = unique_key(lnames)
lnames[name] = thelist
return "<%s>" % name
substr = list_re.sub(listrepl, substr) # convert all lists to named templates
# newnames are constructed as needed
numsubs = None
base_rule = None
rules = {}
for r in template_re.findall(substr):
if r not in rules:
thelist = lnames.get(r, names.get(r, None))
if thelist is None:
raise ValueError('No replicates found for <%s>' % (r))
if r not in names and not thelist.startswith('_'):
names[r] = thelist
rule = [i.replace('@comma@', ',') for i in thelist.split(',')]
num = len(rule)
if numsubs is None:
numsubs = num
rules[r] = rule
base_rule = r
elif num == numsubs:
rules[r] = rule
else:
print("Mismatch in number of replacements (base <%s=%s>)"
" for <%s=%s>. Ignoring." %
(base_rule, ','.join(rules[base_rule]), r, thelist))
if not rules:
return substr
def namerepl(mobj):
name = mobj.group(1)
return rules.get(name, (k+1)*[name])[k]
newstr = ''
for k in range(numsubs):
newstr += template_re.sub(namerepl, substr) + '\n\n'
newstr = newstr.replace('@rightarrow@', '>')
newstr = newstr.replace('@leftarrow@', '<')
return newstr
def process_str(allstr):
newstr = allstr
writestr = ''
struct = parse_structure(newstr)
oldend = 0
names = {}
names.update(_special_names)
for sub in struct:
cleanedstr, defs = find_and_remove_repl_patterns(newstr[oldend:sub[0]])
writestr += cleanedstr
names.update(defs)
writestr += expand_sub(newstr[sub[0]:sub[1]], names)
oldend = sub[1]
writestr += newstr[oldend:]
return writestr
include_src_re = re.compile(r"(\n|\A)\s*include\s*['\"](?P<name>[\w\d./\\]+[.]src)['\"]", re.I)
def resolve_includes(source):
d = os.path.dirname(source)
with open(source) as fid:
lines = []
for line in fid:
m = include_src_re.match(line)
if m:
fn = m.group('name')
if not os.path.isabs(fn):
fn = os.path.join(d, fn)
if os.path.isfile(fn):
print('Including file', fn)
lines.extend(resolve_includes(fn))
else:
lines.append(line)
else:
lines.append(line)
return lines
def process_file(source):
lines = resolve_includes(source)
return process_str(''.join(lines))
_special_names = find_repl_patterns('''
<_c=s,d,c,z>
<_t=real,double precision,complex,double complex>
<prefix=s,d,c,z>
<ftype=real,double precision,complex,double complex>
<ctype=float,double,complex_float,complex_double>
<ftypereal=real,double precision,\\0,\\1>
<ctypereal=float,double,\\0,\\1>
''')
def main():
try:
file = sys.argv[1]
except IndexError:
fid = sys.stdin
outfile = sys.stdout
else:
fid = open(file, 'r')
(base, ext) = os.path.splitext(file)
newname = base
outfile = open(newname, 'w')
allstr = fid.read()
writestr = process_str(allstr)
outfile.write(writestr)
if __name__ == "__main__":
main()
|
msenin94/salt-mk-verificator
|
refs/heads/master
|
mk_verificator/tests/packet_checker/test_packet_checker.py
|
2
|
import pytest
import json
from mk_verificator import utils
@pytest.mark.parametrize(
"group",
utils.get_groups(utils.get_configuration(__file__))
)
def test_check_package_versions(local_salt_client, group):
config = utils.get_configuration(__file__)
output = local_salt_client.cmd(group, 'lowpkg.list_pkgs', expr_form='pcre')
if len(output.keys()) < 2:
pytest.skip("Nothing to compare - only 1 node")
nodes = []
pkts_data = []
my_set = set()
for node in output:
nodes.append(node)
my_set.update(output[node].keys())
for deb in my_set:
diff = []
row = []
for node in nodes:
if deb in output[node].keys():
diff.append(output[node][deb])
row.append("{}: {}".format(node, output[node][deb]))
else:
row.append("{}: No package".format(node))
if diff.count(diff[0]) < len(nodes):
row.sort()
row.insert(0, deb)
pkts_data.append(row)
assert len(pkts_data) <= config["skip_number"], \
"Several problems found for {0} group: {1}".format(
group, json.dumps(pkts_data, indent=4))
@pytest.mark.parametrize(
"group",
utils.get_groups(utils.get_configuration(__file__))
)
def test_check_module_versions(local_salt_client, group):
config = utils.get_configuration(__file__)
pre_check = local_salt_client.cmd(
group, 'cmd.run', ['dpkg -l | grep "python-pip "'], expr_form='pcre')
if pre_check.values().count('') > 0:
pytest.skip("pip is not installed on one or more nodes")
if len(pre_check.keys()) < 2:
pytest.skip("Nothing to compare - only 1 node")
output = local_salt_client.cmd(group, 'pip.freeze', expr_form='pcre')
nodes = []
pkts_data = []
my_set = set()
for node in output:
nodes.append(node)
my_set.update([x.split("=")[0] for x in output[node]])
output[node] = dict([x.split("==") for x in output[node]])
for deb in my_set:
diff = []
row = []
for node in nodes:
if deb in output[node].keys():
diff.append(output[node][deb])
row.append("{}: {}".format(node, output[node][deb]))
else:
row.append("{}: No module".format(node))
if diff.count(diff[0]) < len(nodes):
row.sort()
row.insert(0, deb)
pkts_data.append(row)
assert len(pkts_data) <= config["skip_number"], \
"Several problems found for {0} group: {1}".format(
group, json.dumps(pkts_data, indent=4))
|
ptisserand/ansible
|
refs/heads/devel
|
lib/ansible/vars/clean.py
|
23
|
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
from copy import deepcopy
from ansible import constants as C
from ansible.module_utils._text import to_text
from ansible.module_utils.six import string_types
from ansible.plugins.loader import connection_loader
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
def strip_internal_keys(dirty, exceptions=None):
'''
All keys starting with _ansible_ are internal, so create a copy of the 'dirty' dict
and remove them from the clean one before returning it
'''
if exceptions is None:
exceptions = ()
clean = dirty.copy()
for k in dirty.keys():
if isinstance(k, string_types) and k.startswith('_ansible_'):
if k not in exceptions:
del clean[k]
elif isinstance(dirty[k], dict):
clean[k] = strip_internal_keys(dirty[k])
return clean
def remove_internal_keys(data):
'''
More nuanced version of strip_internal_keys
'''
for key in list(data.keys()):
if (key.startswith('_ansible_') and key != '_ansible_parsed') or key in C.INTERNAL_RESULT_KEYS:
display.warning("Removed unexpected internal key in module return: %s = %s" % (key, data[key]))
del data[key]
# remove bad/empty internal keys
for key in ['warnings', 'deprecations']:
if key in data and not data[key]:
del data[key]
def clean_facts(facts):
''' remove facts that can override internal keys or otherwise deemed unsafe '''
data = deepcopy(facts)
remove_keys = set()
fact_keys = set(data.keys())
# first we add all of our magic variable names to the set of
# keys we want to remove from facts
for magic_var in C.MAGIC_VARIABLE_MAPPING:
remove_keys.update(fact_keys.intersection(C.MAGIC_VARIABLE_MAPPING[magic_var]))
# next we remove any connection plugin specific vars
for conn_path in connection_loader.all(path_only=True):
try:
conn_name = os.path.splitext(os.path.basename(conn_path))[0]
re_key = re.compile('^ansible_%s_' % conn_name)
for fact_key in fact_keys:
# most lightweight VM or container tech creates devices with this pattern, this avoids filtering them out
if re_key.match(fact_key) and not fact_key.endswith(('_bridge', '_gwbridge')):
remove_keys.add(fact_key)
except AttributeError:
pass
# remove some KNOWN keys
for hard in C.RESTRICTED_RESULT_KEYS + C.INTERNAL_RESULT_KEYS:
if hard in fact_keys:
remove_keys.add(hard)
# finally, we search for interpreter keys to remove
re_interp = re.compile('^ansible_.*_interpreter$')
for fact_key in fact_keys:
if re_interp.match(fact_key):
remove_keys.add(fact_key)
# then we remove them (except for ssh host keys)
for r_key in remove_keys:
if not r_key.startswith('ansible_ssh_host_key_'):
try:
r_val = to_text(data[r_key])
if len(r_val) > 24:
r_val = '%s ... %s' % (r_val[:13], r_val[-6:])
except Exception:
r_val = ' <failed to convert value to a string> '
display.warning("Removed restricted key from module data: %s = %s" % (r_key, r_val))
del data[r_key]
return strip_internal_keys(data)
def namespace_facts(facts):
''' return all facts inside 'ansible_facts' w/o an ansible_ prefix '''
deprefixed = {}
for k in facts:
if k in ('ansible_local',):
# exceptions to 'deprefixing'
deprefixed[k] = deepcopy(facts[k])
else:
deprefixed[k.replace('ansible_', '', 1)] = deepcopy(facts[k])
return {'ansible_facts': deprefixed}
|
ehashman/oh-mainline
|
refs/heads/master
|
vendor/packages/mock/tests/support.py
|
18
|
import sys
info = sys.version_info
if info[:3] >= (3, 2, 0):
# for Python 3.2 ordinary unittest is fine
import unittest as unittest2
else:
import unittest2
try:
# need to turn it into a local variable or we can't
# import it from here under Python 2
apply = apply
except NameError:
# no apply in Python 3
def apply(f, *args, **kw):
return f(*args, **kw)
inPy3k = sys.version_info[0] == 3
with_available = sys.version_info[:2] >= (2, 5)
class SomeClass(object):
class_attribute = None
def wibble(self):
pass
|
jainanisha90/WeVoteServer
|
refs/heads/develop
|
apis_v1/documentation_source/organization_stop_following_doc.py
|
3
|
# apis_v1/documentation_source/organization_stop_following_doc.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
def organization_stop_following_doc_template_values(url_root):
"""
Show documentation about organizationStopFollowing
"""
required_query_parameter_list = [
{
'name': 'voter_device_id',
'value': 'string', # boolean, integer, long, string
'description': 'An 88 character unique identifier linked to a voter record on the server',
},
{
'name': 'organization_id',
'value': 'integer', # boolean, integer, long, string
'description': 'Internal database unique identifier for organization',
},
{
'name': 'organization_we_vote_id',
'value': 'string', # boolean, integer, long, string
'description': 'The unique identifier for this organization across all networks '
'(either organization_id OR organization_we_vote_id required -- not both.) '
'NOTE: In the future we '
'might support other identifiers used in the industry.',
},
{
'name': 'api_key',
'value': 'string (from post, cookie, or get (in that order))', # boolean, integer, long, string
'description': 'The unique key provided to any organization using the WeVoteServer APIs',
},
]
optional_query_parameter_list = [
]
potential_status_codes_list = [
{
'code': 'VALID_VOTER_DEVICE_ID_MISSING',
'description': 'A valid voter_device_id parameter was not included. Cannot proceed.',
},
{
'code': 'VALID_VOTER_ID_MISSING',
'description': 'A valid voter_id was not found from voter_device_id. Cannot proceed.',
},
{
'code': 'VALID_ORGANIZATION_ID_MISSING',
'description': 'A valid organization_id was not found. Cannot proceed.',
},
{
'code': 'ORGANIZATION_NOT_FOUND_ON_CREATE STOP_FOLLOWING',
'description': 'An organization with that organization_id was not found. Cannot proceed.',
},
{
'code': 'STOPPED_FOLLOWING',
'description': 'Successfully stopped following this organization',
},
]
try_now_link_variables_dict = {
'organization_id': '1',
}
api_response = '{\n' \
' "status": string,\n' \
' "success": boolean,\n' \
' "voter_device_id": string (88 characters long),\n' \
' "organization_id": integer,\n' \
' "organization_we_vote_id": string,\n' \
'}'
template_values = {
'api_name': 'organizationStopFollowing',
'api_slug': 'organizationStopFollowing',
'api_introduction':
"Call this to save that the voter has decided to stop following this organization. Logically equivalent"
"to never following in the first place, but leaves a record in the database.",
'try_now_link': 'apis_v1:organizationStopFollowingView',
'try_now_link_variables_dict': try_now_link_variables_dict,
'url_root': url_root,
'get_or_post': 'GET',
'required_query_parameter_list': required_query_parameter_list,
'optional_query_parameter_list': optional_query_parameter_list,
'api_response': api_response,
'api_response_notes':
"",
'potential_status_codes_list': potential_status_codes_list,
}
return template_values
|
jandom/rdkit
|
refs/heads/master
|
Code/Demos/boost/smartPtrsAndIters/test.py
|
12
|
# Copyright Rational Discovery LLC 2005
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import SPtrTestModule as TestModule
import unittest
class TestCase(unittest.TestCase):
def setUp(self):
pass
def test1(self):
obj = TestModule.DemoKlass(3)
self.failUnless(obj.GetVal() == 3)
def test2(self):
obj = TestModule.buildPtr(3)
self.failUnless(obj.GetVal() == 3)
def test3(self):
obj = TestModule.buildSPtr(3)
self.failUnless(obj.GetVal() == 3)
def test4(self):
sz = 5
vect = TestModule.buildPtrVector(sz)
self.failUnless(len(vect) == sz)
for i in range(sz):
self.failUnless(vect[i].GetVal() == i)
def test5(self):
sz = 5
vect = TestModule.buildSPtrVector(sz)
self.failUnless(len(vect) == sz)
for i in range(sz):
self.failUnless(vect[i].GetVal() == i)
def test5b(self):
sz = 5
vect = TestModule.buildSPtrVector(sz)
self.failUnless(len(vect) == sz)
p = 0
for itm in vect:
self.failUnless(itm.GetVal() == p)
p += 1
def test6(self):
sz = 5
cont = TestModule.DemoContainer(sz)
p = 0
for itm in cont:
self.failUnless(itm.GetVal() == p)
p += 1
self.failUnless(p == sz)
if __name__ == '__main__':
unittest.main()
|
nagyv/python-api-library
|
refs/heads/master
|
kayako/tests/object/test_ticket_custom_field.py
|
4
|
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# Copyright (c) 2011, Evan Leis
#
# Distributed under the terms of the Lesser GNU General Public License (LGPL)
#-----------------------------------------------------------------------------
'''
Created on May 10, 2011
@author: evan
'''
from kayako.tests import KayakoAPITest
class TestTicketNote(KayakoAPITest):
SUBJECT = 'DELETEME'
def tearDown(self):
from kayako.objects import Department, Ticket
dept = self.api.first(Department, module='tickets')
test_tickets = self.api.filter(Ticket, args=(dept.id,), subject=self.SUBJECT)
for ticket in test_tickets:
ticket.delete()
super(TestTicketNote, self).tearDown()
def test_get(self):
from kayako.core.lib import UnsetParameter
from kayako.objects import Department, Ticket, TicketCustomField
api = self.api
depts = api.get_all(Department)
for dept in depts:
if dept.module == 'tickets':
break
ticket = api.create(Ticket)
ticket.subject = self.SUBJECT
ticket.fullname = 'Unit Test'
ticket.email = 'test@example.com'
ticket.contents = 'test'
ticket.departmentid = dept.id
ticket.ticketstatusid = 1
ticket.ticketpriorityid = 1
ticket.tickettypeid = 1
ticket.userid = 1
ticket.ownerstaffid = 1
ticket.type = 'default'
ticket.add()
custom_field_groups = api.get_all(TicketCustomField, ticket.id)
ticket.delete()
assert len(custom_field_groups), len(custom_field_groups)
assert len(custom_field_groups[0].fields), len(custom_field_groups[0].fields)
custom_field_group = custom_field_groups[0]
custom_field = custom_field_group.fields[0]
assert custom_field_group.id, custom_field_group.id
assert custom_field_group.title, custom_field_group.title
assert str(custom_field_group), str(custom_field_group)
assert custom_field.id, custom_field.id
assert custom_field.type, custom_field.type
assert custom_field.title, custom_field.title
assert custom_field.value is not UnsetParameter, custom_field.value
assert str(custom_field), str(custom_field)
|
eino-makitalo/odoo
|
refs/heads/8.0
|
addons/account_payment/wizard/account_payment_pay.py
|
382
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
#TODO:REMOVE this wizard is not used
class account_payment_make_payment(osv.osv_memory):
_name = "account.payment.make.payment"
_description = "Account make payment"
def launch_wizard(self, cr, uid, ids, context=None):
"""
Search for a wizard to launch according to the type.
If type is manual. just confirm the order.
"""
obj_payment_order = self.pool.get('payment.order')
if context is None:
context = {}
# obj_model = self.pool.get('ir.model.data')
# obj_act = self.pool.get('ir.actions.act_window')
# order = obj_payment_order.browse(cr, uid, context['active_id'], context)
obj_payment_order.set_done(cr, uid, [context['active_id']], context)
return {'type': 'ir.actions.act_window_close'}
# t = order.mode and order.mode.type.code or 'manual'
# if t == 'manual':
# obj_payment_order.set_done(cr,uid,context['active_id'],context)
# return {}
#
# gw = obj_payment_order.get_wizard(t)
# if not gw:
# obj_payment_order.set_done(cr,uid,context['active_id'],context)
# return {}
#
# module, wizard= gw
# result = obj_model._get_id(cr, uid, module, wizard)
# id = obj_model.read(cr, uid, [result], ['res_id'])[0]['res_id']
# return obj_act.read(cr, uid, [id])[0]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
chyeh727/django
|
refs/heads/master
|
django/contrib/gis/db/backends/mysql/schema.py
|
448
|
import logging
from django.contrib.gis.db.models.fields import GeometryField
from django.db.backends.mysql.schema import DatabaseSchemaEditor
from django.db.utils import OperationalError
logger = logging.getLogger('django.contrib.gis')
class MySQLGISSchemaEditor(DatabaseSchemaEditor):
sql_add_spatial_index = 'CREATE SPATIAL INDEX %(index)s ON %(table)s(%(column)s)'
sql_drop_spatial_index = 'DROP INDEX %(index)s ON %(table)s'
def __init__(self, *args, **kwargs):
super(MySQLGISSchemaEditor, self).__init__(*args, **kwargs)
self.geometry_sql = []
def skip_default(self, field):
return (
super(MySQLGISSchemaEditor, self).skip_default(field) or
# Geometry fields are stored as BLOB/TEXT and can't have defaults.
isinstance(field, GeometryField)
)
def column_sql(self, model, field, include_default=False):
column_sql = super(MySQLGISSchemaEditor, self).column_sql(model, field, include_default)
# MySQL doesn't support spatial indexes on NULL columns
if isinstance(field, GeometryField) and field.spatial_index and not field.null:
qn = self.connection.ops.quote_name
db_table = model._meta.db_table
self.geometry_sql.append(
self.sql_add_spatial_index % {
'index': qn(self._create_spatial_index_name(model, field)),
'table': qn(db_table),
'column': qn(field.column),
}
)
return column_sql
def create_model(self, model):
super(MySQLGISSchemaEditor, self).create_model(model)
self.create_spatial_indexes()
def add_field(self, model, field):
super(MySQLGISSchemaEditor, self).add_field(model, field)
self.create_spatial_indexes()
def remove_field(self, model, field):
if isinstance(field, GeometryField) and field.spatial_index:
qn = self.connection.ops.quote_name
sql = self.sql_drop_spatial_index % {
'index': qn(self._create_spatial_index_name(model, field)),
'table': qn(model._meta.db_table),
}
try:
self.execute(sql)
except OperationalError:
logger.error(
"Couldn't remove spatial index: %s (may be expected "
"if your storage engine doesn't support them)." % sql
)
super(MySQLGISSchemaEditor, self).remove_field(model, field)
def _create_spatial_index_name(self, model, field):
return '%s_%s_id' % (model._meta.db_table, field.column)
def create_spatial_indexes(self):
for sql in self.geometry_sql:
try:
self.execute(sql)
except OperationalError:
logger.error(
"Cannot create SPATIAL INDEX %s. Only MyISAM and (as of "
"MySQL 5.7.5) InnoDB support them." % sql
)
self.geometry_sql = []
|
darmaa/odoo
|
refs/heads/master
|
openerp/addons/test_impex/__init__.py
|
2148
|
import models
|
henryfjordan/django
|
refs/heads/master
|
tests/model_meta/test_legacy.py
|
199
|
import warnings
from django import test
from django.contrib.contenttypes.fields import GenericRelation
from django.core.exceptions import FieldDoesNotExist
from django.db.models.fields import CharField, related
from django.utils.deprecation import RemovedInDjango110Warning
from .models import BasePerson, Person
from .results import TEST_RESULTS
class OptionsBaseTests(test.SimpleTestCase):
def _map_related_query_names(self, res):
return tuple((o.field.related_query_name(), m) for o, m in res)
def _map_names(self, res):
return tuple((f.name, m) for f, m in res)
class M2MTests(OptionsBaseTests):
def test_many_to_many_with_model(self):
for model, expected_result in TEST_RESULTS['many_to_many_with_model'].items():
with warnings.catch_warnings(record=True) as warning:
warnings.simplefilter("always")
models = [model for field, model in model._meta.get_m2m_with_model()]
self.assertEqual([RemovedInDjango110Warning], [w.message.__class__ for w in warning])
self.assertEqual(models, expected_result)
@test.ignore_warnings(category=RemovedInDjango110Warning)
class RelatedObjectsTests(OptionsBaseTests):
key_name = lambda self, r: r[0]
def test_related_objects(self):
result_key = 'get_all_related_objects_with_model_legacy'
for model, expected in TEST_RESULTS[result_key].items():
objects = model._meta.get_all_related_objects_with_model()
self.assertEqual(self._map_related_query_names(objects), expected)
def test_related_objects_local(self):
result_key = 'get_all_related_objects_with_model_local_legacy'
for model, expected in TEST_RESULTS[result_key].items():
objects = model._meta.get_all_related_objects_with_model(local_only=True)
self.assertEqual(self._map_related_query_names(objects), expected)
def test_related_objects_include_hidden(self):
result_key = 'get_all_related_objects_with_model_hidden_legacy'
for model, expected in TEST_RESULTS[result_key].items():
objects = model._meta.get_all_related_objects_with_model(include_hidden=True)
self.assertEqual(
sorted(self._map_names(objects), key=self.key_name),
sorted(expected, key=self.key_name)
)
def test_related_objects_include_hidden_local_only(self):
result_key = 'get_all_related_objects_with_model_hidden_local_legacy'
for model, expected in TEST_RESULTS[result_key].items():
objects = model._meta.get_all_related_objects_with_model(
include_hidden=True, local_only=True)
self.assertEqual(
sorted(self._map_names(objects), key=self.key_name),
sorted(expected, key=self.key_name)
)
def test_related_objects_proxy(self):
result_key = 'get_all_related_objects_with_model_proxy_legacy'
for model, expected in TEST_RESULTS[result_key].items():
objects = model._meta.get_all_related_objects_with_model(
include_proxy_eq=True)
self.assertEqual(self._map_related_query_names(objects), expected)
def test_related_objects_proxy_hidden(self):
result_key = 'get_all_related_objects_with_model_proxy_hidden_legacy'
for model, expected in TEST_RESULTS[result_key].items():
objects = model._meta.get_all_related_objects_with_model(
include_proxy_eq=True, include_hidden=True)
self.assertEqual(
sorted(self._map_names(objects), key=self.key_name),
sorted(expected, key=self.key_name)
)
@test.ignore_warnings(category=RemovedInDjango110Warning)
class RelatedM2MTests(OptionsBaseTests):
def test_related_m2m_with_model(self):
result_key = 'get_all_related_many_to_many_with_model_legacy'
for model, expected in TEST_RESULTS[result_key].items():
objects = model._meta.get_all_related_m2m_objects_with_model()
self.assertEqual(self._map_related_query_names(objects), expected)
def test_related_m2m_local_only(self):
result_key = 'get_all_related_many_to_many_local_legacy'
for model, expected in TEST_RESULTS[result_key].items():
objects = model._meta.get_all_related_many_to_many_objects(local_only=True)
self.assertEqual([o.field.related_query_name() for o in objects], expected)
def test_related_m2m_asymmetrical(self):
m2m = Person._meta.many_to_many
self.assertTrue('following_base' in [f.attname for f in m2m])
related_m2m = Person._meta.get_all_related_many_to_many_objects()
self.assertTrue('followers_base' in [o.field.related_query_name() for o in related_m2m])
def test_related_m2m_symmetrical(self):
m2m = Person._meta.many_to_many
self.assertTrue('friends_base' in [f.attname for f in m2m])
related_m2m = Person._meta.get_all_related_many_to_many_objects()
self.assertIn('friends_inherited_rel_+', [o.field.related_query_name() for o in related_m2m])
@test.ignore_warnings(category=RemovedInDjango110Warning)
class GetFieldByNameTests(OptionsBaseTests):
def test_get_data_field(self):
field_info = Person._meta.get_field_by_name('data_abstract')
self.assertEqual(field_info[1:], (BasePerson, True, False))
self.assertIsInstance(field_info[0], CharField)
def test_get_m2m_field(self):
field_info = Person._meta.get_field_by_name('m2m_base')
self.assertEqual(field_info[1:], (BasePerson, True, True))
self.assertIsInstance(field_info[0], related.ManyToManyField)
def test_get_related_object(self):
field_info = Person._meta.get_field_by_name('relating_baseperson')
self.assertEqual(field_info[1:], (BasePerson, False, False))
self.assertTrue(field_info[0].auto_created)
def test_get_related_m2m(self):
field_info = Person._meta.get_field_by_name('relating_people')
self.assertEqual(field_info[1:], (None, False, True))
self.assertTrue(field_info[0].auto_created)
def test_get_generic_relation(self):
field_info = Person._meta.get_field_by_name('generic_relation_base')
self.assertEqual(field_info[1:], (None, True, False))
self.assertIsInstance(field_info[0], GenericRelation)
def test_get_m2m_field_invalid(self):
with warnings.catch_warnings(record=True) as warning:
warnings.simplefilter("always")
self.assertRaises(
FieldDoesNotExist,
Person._meta.get_field,
**{'field_name': 'm2m_base', 'many_to_many': False}
)
self.assertEqual(Person._meta.get_field('m2m_base', many_to_many=True).name, 'm2m_base')
# 2 RemovedInDjango110Warning messages should be raised, one for each call of get_field()
# with the 'many_to_many' argument.
self.assertEqual(
[RemovedInDjango110Warning, RemovedInDjango110Warning],
[w.message.__class__ for w in warning]
)
@test.ignore_warnings(category=RemovedInDjango110Warning)
class GetAllFieldNamesTestCase(OptionsBaseTests):
def test_get_all_field_names(self):
for model, expected_names in TEST_RESULTS['get_all_field_names'].items():
objects = model._meta.get_all_field_names()
self.assertEqual(sorted(map(str, objects)), sorted(expected_names))
|
aricchen/openHR
|
refs/heads/master
|
openerp/addons/survey/wizard/survey_print_statistics.py
|
54
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class survey_print_statistics(osv.osv_memory):
_name = 'survey.print.statistics'
_columns = {
'survey_ids': fields.many2many('survey', string="Survey", required="1"),
}
def action_next(self, cr, uid, ids, context=None):
"""
Print Survey Statistics in pdf format.
"""
if context is None:
context = {}
datas = {'ids': context.get('active_ids', [])}
res = self.read(cr, uid, ids, ['survey_ids'], context=context)
res = res and res[0] or {}
datas['form'] = res
datas['model'] = 'survey.print.statistics'
return {
'type': 'ir.actions.report.xml',
'report_name': 'survey.analysis',
'datas': datas,
}
survey_print_statistics()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
mythos234/SimplKernel-LL-N910F
|
refs/heads/master
|
tools/perf/tests/attr.py
|
3174
|
#! /usr/bin/python
import os
import sys
import glob
import optparse
import tempfile
import logging
import shutil
import ConfigParser
class Fail(Exception):
def __init__(self, test, msg):
self.msg = msg
self.test = test
def getMsg(self):
return '\'%s\' - %s' % (self.test.path, self.msg)
class Unsup(Exception):
def __init__(self, test):
self.test = test
def getMsg(self):
return '\'%s\'' % self.test.path
class Event(dict):
terms = [
'cpu',
'flags',
'type',
'size',
'config',
'sample_period',
'sample_type',
'read_format',
'disabled',
'inherit',
'pinned',
'exclusive',
'exclude_user',
'exclude_kernel',
'exclude_hv',
'exclude_idle',
'mmap',
'comm',
'freq',
'inherit_stat',
'enable_on_exec',
'task',
'watermark',
'precise_ip',
'mmap_data',
'sample_id_all',
'exclude_host',
'exclude_guest',
'exclude_callchain_kernel',
'exclude_callchain_user',
'wakeup_events',
'bp_type',
'config1',
'config2',
'branch_sample_type',
'sample_regs_user',
'sample_stack_user',
]
def add(self, data):
for key, val in data:
log.debug(" %s = %s" % (key, val))
self[key] = val
def __init__(self, name, data, base):
log.debug(" Event %s" % name);
self.name = name;
self.group = ''
self.add(base)
self.add(data)
def compare_data(self, a, b):
# Allow multiple values in assignment separated by '|'
a_list = a.split('|')
b_list = b.split('|')
for a_item in a_list:
for b_item in b_list:
if (a_item == b_item):
return True
elif (a_item == '*') or (b_item == '*'):
return True
return False
def equal(self, other):
for t in Event.terms:
log.debug(" [%s] %s %s" % (t, self[t], other[t]));
if not self.has_key(t) or not other.has_key(t):
return False
if not self.compare_data(self[t], other[t]):
return False
return True
def diff(self, other):
for t in Event.terms:
if not self.has_key(t) or not other.has_key(t):
continue
if not self.compare_data(self[t], other[t]):
log.warning("expected %s=%s, got %s" % (t, self[t], other[t]))
# Test file description needs to have following sections:
# [config]
# - just single instance in file
# - needs to specify:
# 'command' - perf command name
# 'args' - special command arguments
# 'ret' - expected command return value (0 by default)
#
# [eventX:base]
# - one or multiple instances in file
# - expected values assignments
class Test(object):
def __init__(self, path, options):
parser = ConfigParser.SafeConfigParser()
parser.read(path)
log.warning("running '%s'" % path)
self.path = path
self.test_dir = options.test_dir
self.perf = options.perf
self.command = parser.get('config', 'command')
self.args = parser.get('config', 'args')
try:
self.ret = parser.get('config', 'ret')
except:
self.ret = 0
self.expect = {}
self.result = {}
log.debug(" loading expected events");
self.load_events(path, self.expect)
def is_event(self, name):
if name.find("event") == -1:
return False
else:
return True
def load_events(self, path, events):
parser_event = ConfigParser.SafeConfigParser()
parser_event.read(path)
# The event record section header contains 'event' word,
# optionaly followed by ':' allowing to load 'parent
# event' first as a base
for section in filter(self.is_event, parser_event.sections()):
parser_items = parser_event.items(section);
base_items = {}
# Read parent event if there's any
if (':' in section):
base = section[section.index(':') + 1:]
parser_base = ConfigParser.SafeConfigParser()
parser_base.read(self.test_dir + '/' + base)
base_items = parser_base.items('event')
e = Event(section, parser_items, base_items)
events[section] = e
def run_cmd(self, tempdir):
cmd = "PERF_TEST_ATTR=%s %s %s -o %s/perf.data %s" % (tempdir,
self.perf, self.command, tempdir, self.args)
ret = os.WEXITSTATUS(os.system(cmd))
log.info(" '%s' ret %d " % (cmd, ret))
if ret != int(self.ret):
raise Unsup(self)
def compare(self, expect, result):
match = {}
log.debug(" compare");
# For each expected event find all matching
# events in result. Fail if there's not any.
for exp_name, exp_event in expect.items():
exp_list = []
log.debug(" matching [%s]" % exp_name)
for res_name, res_event in result.items():
log.debug(" to [%s]" % res_name)
if (exp_event.equal(res_event)):
exp_list.append(res_name)
log.debug(" ->OK")
else:
log.debug(" ->FAIL");
log.debug(" match: [%s] matches %s" % (exp_name, str(exp_list)))
# we did not any matching event - fail
if (not exp_list):
exp_event.diff(res_event)
raise Fail(self, 'match failure');
match[exp_name] = exp_list
# For each defined group in the expected events
# check we match the same group in the result.
for exp_name, exp_event in expect.items():
group = exp_event.group
if (group == ''):
continue
for res_name in match[exp_name]:
res_group = result[res_name].group
if res_group not in match[group]:
raise Fail(self, 'group failure')
log.debug(" group: [%s] matches group leader %s" %
(exp_name, str(match[group])))
log.debug(" matched")
def resolve_groups(self, events):
for name, event in events.items():
group_fd = event['group_fd'];
if group_fd == '-1':
continue;
for iname, ievent in events.items():
if (ievent['fd'] == group_fd):
event.group = iname
log.debug('[%s] has group leader [%s]' % (name, iname))
break;
def run(self):
tempdir = tempfile.mkdtemp();
try:
# run the test script
self.run_cmd(tempdir);
# load events expectation for the test
log.debug(" loading result events");
for f in glob.glob(tempdir + '/event*'):
self.load_events(f, self.result);
# resolve group_fd to event names
self.resolve_groups(self.expect);
self.resolve_groups(self.result);
# do the expectation - results matching - both ways
self.compare(self.expect, self.result)
self.compare(self.result, self.expect)
finally:
# cleanup
shutil.rmtree(tempdir)
def run_tests(options):
for f in glob.glob(options.test_dir + '/' + options.test):
try:
Test(f, options).run()
except Unsup, obj:
log.warning("unsupp %s" % obj.getMsg())
def setup_log(verbose):
global log
level = logging.CRITICAL
if verbose == 1:
level = logging.WARNING
if verbose == 2:
level = logging.INFO
if verbose >= 3:
level = logging.DEBUG
log = logging.getLogger('test')
log.setLevel(level)
ch = logging.StreamHandler()
ch.setLevel(level)
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
log.addHandler(ch)
USAGE = '''%s [OPTIONS]
-d dir # tests dir
-p path # perf binary
-t test # single test
-v # verbose level
''' % sys.argv[0]
def main():
parser = optparse.OptionParser(usage=USAGE)
parser.add_option("-t", "--test",
action="store", type="string", dest="test")
parser.add_option("-d", "--test-dir",
action="store", type="string", dest="test_dir")
parser.add_option("-p", "--perf",
action="store", type="string", dest="perf")
parser.add_option("-v", "--verbose",
action="count", dest="verbose")
options, args = parser.parse_args()
if args:
parser.error('FAILED wrong arguments %s' % ' '.join(args))
return -1
setup_log(options.verbose)
if not options.test_dir:
print 'FAILED no -d option specified'
sys.exit(-1)
if not options.test:
options.test = 'test*'
try:
run_tests(options)
except Fail, obj:
print "FAILED %s" % obj.getMsg();
sys.exit(-1)
sys.exit(0)
if __name__ == '__main__':
main()
|
owlzhou/ttornado
|
refs/heads/master
|
env/Lib/site-packages/pip/_vendor/html5lib/trie/py.py
|
1323
|
from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
from bisect import bisect_left
from ._base import Trie as ABCTrie
class Trie(ABCTrie):
def __init__(self, data):
if not all(isinstance(x, text_type) for x in data.keys()):
raise TypeError("All keys must be strings")
self._data = data
self._keys = sorted(data.keys())
self._cachestr = ""
self._cachepoints = (0, len(data))
def __contains__(self, key):
return key in self._data
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(self._data)
def __getitem__(self, key):
return self._data[key]
def keys(self, prefix=None):
if prefix is None or prefix == "" or not self._keys:
return set(self._keys)
if prefix.startswith(self._cachestr):
lo, hi = self._cachepoints
start = i = bisect_left(self._keys, prefix, lo, hi)
else:
start = i = bisect_left(self._keys, prefix)
keys = set()
if start == len(self._keys):
return keys
while self._keys[i].startswith(prefix):
keys.add(self._keys[i])
i += 1
self._cachestr = prefix
self._cachepoints = (start, i)
return keys
def has_keys_with_prefix(self, prefix):
if prefix in self._data:
return True
if prefix.startswith(self._cachestr):
lo, hi = self._cachepoints
i = bisect_left(self._keys, prefix, lo, hi)
else:
i = bisect_left(self._keys, prefix)
if i == len(self._keys):
return False
return self._keys[i].startswith(prefix)
|
sunny94/temp
|
refs/heads/iss8501_parsing
|
sympy/ntheory/egyptian_fraction.py
|
16
|
from __future__ import print_function, division
import sympy.polys
from sympy import Integer
from fractions import gcd
def egyptian_fraction(r, algorithm="Greedy"):
"""
Return the list of denominators of an Egyptian fraction
expansion [1]_ of the said rational `r`.
Parameters
==========
r : Rational
a positive rational number.
algorithm : { "Greedy", "Graham Jewett", "Takenouchi", "Golomb" }, optional
Denotes the algorithm to be used (the default is "Greedy").
Examples
========
>>> from sympy import Rational
>>> from sympy.ntheory.egyptian_fraction import egyptian_fraction
>>> egyptian_fraction(Rational(3, 7))
[3, 11, 231]
>>> egyptian_fraction(Rational(3, 7), "Graham Jewett")
[7, 8, 9, 56, 57, 72, 3192]
>>> egyptian_fraction(Rational(3, 7), "Takenouchi")
[4, 7, 28]
>>> egyptian_fraction(Rational(3, 7), "Golomb")
[3, 15, 35]
>>> egyptian_fraction(Rational(11, 5), "Golomb")
[1, 2, 3, 4, 9, 234, 1118, 2580]
See Also
========
sympy.core.numbers.Rational
Notes
=====
Currently the following algorithms are supported:
1) Greedy Algorithm
Also called the Fibonacci-Sylvester algorithm [2]_.
At each step, extract the largest unit fraction less
than the target and replace the target with the remainder.
It has some distinct properties:
a) Given `p/q` in lowest terms, generates an expansion of maximum
length `p`. Even as the numerators get large, the number of
terms is seldom more than a handful.
b) Uses minimal memory.
c) The terms can blow up (standard examples of this are 5/121 and
31/311). The denominator is at most squared at each step
(doubly-exponential growth) and typically exhibits
singly-exponential growth.
2) Graham Jewett Algorithm
The algorithm suggested by the result of Graham and Jewett.
Note that this has a tendency to blow up: the length of the
resulting expansion is always ``2**(x/gcd(x, y)) - 1``. See [3]_.
3) Takenouchi Algorithm
The algorithm suggested by Takenouchi (1921).
Differs from the Graham-Jewett algorithm only in the handling
of duplicates. See [3]_.
4) Golomb's Algorithm
A method given by Golumb (1962), using modular arithmetic and
inverses. It yields the same results as a method using continued
fractions proposed by Bleicher (1972). See [4]_.
If the given rational is greater than or equal to 1, a greedy algorithm
of summing the harmonic sequence 1/1 + 1/2 + 1/3 + ... is used, taking
all the unit fractions of this sequence until adding one more would be
greater than the given number. This list of denominators is prefixed
to the result from the requested algorithm used on the remainder. For
example, if r is 8/3, using the Greedy algorithm, we get [1, 2, 3, 4,
5, 6, 7, 14, 420], where the beginning of the sequence, [1, 2, 3, 4, 5,
6, 7] is part of the harmonic sequence summing to 363/140, leaving a
remainder of 31/420, which yields [14, 420] by the Greedy algorithm.
The result of egyptian_fraction(Rational(8, 3), "Golomb") is [1, 2, 3,
4, 5, 6, 7, 14, 574, 2788, 6460, 11590, 33062, 113820], and so on.
References
==========
.. [1] http://en.wikipedia.org/wiki/Egyptian_fraction
.. [2] https://en.wikipedia.org/wiki/Greedy_algorithm_for_Egyptian_fractions
.. [3] http://www.ics.uci.edu/~eppstein/numth/egypt/conflict.html
.. [4] http://ami.ektf.hu/uploads/papers/finalpdf/AMI_42_from129to134.pdf
"""
if r <= 0:
raise ValueError("Value must be positive")
prefix, rem = egypt_harmonic(r)
if rem == 0:
return prefix
x, y = rem.as_numer_denom()
if algorithm == "Greedy":
return prefix + egypt_greedy(x, y)
elif algorithm == "Graham Jewett":
return prefix + egypt_graham_jewett(x, y)
elif algorithm == "Takenouchi":
return prefix + egypt_takenouchi(x, y)
elif algorithm == "Golomb":
return prefix + egypt_golomb(x, y)
else:
raise ValueError("Entered invalid algorithm")
def egypt_greedy(x, y):
if x == 1:
return [y]
else:
a = (-y) % (x)
b = y*(y//x + 1)
c = gcd(a, b)
if c > 1:
num, denom = a//c, b//c
else:
num, denom = a, b
return [y//x + 1] + egypt_greedy(num, denom)
def egypt_graham_jewett(x, y):
l = [y] * x
# l is now a list of integers whose reciprocals sum to x/y.
# we shall now proceed to manipulate the elements of l without
# changing the reciprocated sum until all elements are unique.
while len(l) != len(set(l)):
l.sort() # so the list has duplicates. find a smallest pair
for i in range(len(l) - 1):
if l[i] == l[i + 1]:
break
# we have now identified a pair of identical
# elements: l[i] and l[i + 1].
# now comes the application of the result of graham and jewett:
l[i + 1] = l[i] + 1
# and we just iterate that until the list has no duplicates.
l.append(l[i]*(l[i] + 1))
return sorted(l)
def egypt_takenouchi(x, y):
l = [y] * x
while len(l) != len(set(l)):
l.sort()
for i in range(len(l) - 1):
if l[i] == l[i + 1]:
break
k = l[i]
if k % 2 == 0:
l[i] = l[i] // 2
del l[i + 1]
else:
l[i], l[i + 1] = (k + 1)//2, k*(k + 1)//2
return sorted(l)
def egypt_golomb(x, y):
if x == 1:
return [y]
xp = sympy.polys.ZZ.invert(int(x), int(y))
rv = [Integer(xp*y)]
rv.extend(egypt_golomb((x*xp - 1)//y, xp))
return sorted(rv)
def egypt_harmonic(r):
rv = []
d = Integer(1)
acc = Integer(0)
while acc + 1/d <= r:
acc += 1/d
rv.append(d)
d += 1
return (rv, r - acc)
|
bond-anton/BDMesh
|
refs/heads/master
|
tests/test_Mesh1DUniform.py
|
1
|
import random
import math as m
import numpy as np
import unittest
from BDMesh import Mesh1DUniform, Mesh1D
class TestMesh1DUniform(unittest.TestCase):
def setUp(self):
self.mesh = Mesh1DUniform(m.pi, 2*m.pi)
def test_equality(self):
other_mesh = Mesh1DUniform(m.pi, 2 * m.pi)
self.assertEqual(self.mesh, other_mesh)
other_mesh = Mesh1DUniform(2 * m.pi, m.pi, boundary_condition_1=1, boundary_condition_2=3)
self.assertEqual(self.mesh, other_mesh)
other_mesh = Mesh1DUniform(m.pi, 3 * m.pi)
self.assertNotEqual(self.mesh, other_mesh)
other_mesh = Mesh1DUniform(3 * m.pi, m.pi)
self.assertNotEqual(self.mesh, other_mesh)
self.assertEqual(str(self.mesh),
'Mesh1DUniform: [%2.2g; %2.2g], %2.2g step, %d nodes' % (self.mesh.physical_boundary_1,
self.mesh.physical_boundary_2,
self.mesh.physical_step,
self.mesh.num))
self.mesh = Mesh1DUniform(-10, 10, physical_step=1.0)
other_mesh = Mesh1DUniform(-10, 10, num=21)
self.assertEqual(self.mesh, other_mesh)
def test_physical_step(self):
self.mesh = Mesh1DUniform(0, 10, physical_step=1.0, num=100)
self.assertEqual(self.mesh.physical_step, 1.0)
with self.assertRaises(TypeError):
self.mesh.physical_step = 'a'
self.mesh.physical_step = 1.1
self.assertNotEqual(self.mesh.physical_step, 1.1)
self.mesh.physical_step = self.mesh.jacobian
self.assertEqual(self.mesh.physical_step, self.mesh.jacobian)
self.mesh.physical_step = 1.1 * self.mesh.jacobian
self.assertEqual(self.mesh.physical_step, self.mesh.jacobian)
self.mesh.physical_step = -1.0
self.assertEqual(self.mesh.physical_step, 10.0)
def test_local_step(self):
self.mesh = Mesh1DUniform(0, 10, physical_step=1.0)
self.assertEqual(self.mesh.local_step, 0.1)
self.mesh.local_step = 0.05
self.assertEqual(self.mesh.local_step, 0.05)
self.mesh.local_step = 0.053
self.assertNotEqual(self.mesh.local_step, 0.053)
with self.assertRaises(TypeError):
self.mesh.local_step = 'a'
self.mesh.local_step = 1
self.assertEqual(self.mesh.local_step, 1)
self.mesh.local_step = 2
self.assertEqual(self.mesh.local_step, 1)
self.mesh.local_step = -2
self.assertEqual(self.mesh.local_step, 1)
self.mesh.local_step = -0.5
self.assertEqual(self.mesh.local_step, 1.0)
def test_num(self):
self.mesh = Mesh1DUniform(0, 10, physical_step=1.0)
self.assertEqual(self.mesh.num, 11)
self.mesh.num = 12
self.assertEqual(self.mesh.num, 12)
with self.assertRaises(TypeError):
self.mesh.num = 'a'
with self.assertRaises(TypeError):
self.mesh.num = None
self.assertEqual(self.mesh.num, 12)
self.mesh.num = 2 + 1e-11
self.assertEqual(self.mesh.num, 2)
self.mesh.num = 2.8
self.assertEqual(self.mesh.num, 2)
self.mesh.num = 2
self.assertEqual(self.mesh.num, 2)
def test_crop(self):
self.mesh = Mesh1DUniform(0, 10, physical_step=1.0)
self.mesh.crop = [3, 2]
np.testing.assert_equal(self.mesh.crop, np.array([3, 2]))
self.mesh.crop = [0, 0]
np.testing.assert_equal(self.mesh.crop, np.array([0, 0]))
self.mesh.crop = [3, 2]
np.testing.assert_equal(self.mesh.crop, np.array([3, 2]))
with self.assertRaises(TypeError):
self.mesh.crop = 3
self.mesh.crop = [-3, 2]
np.testing.assert_equal(self.mesh.crop, np.array([0, 2]))
self.mesh.crop = [3, -2]
np.testing.assert_equal(self.mesh.crop, np.array([3, 0]))
self.mesh.crop = [3, 2, 1]
np.testing.assert_equal(self.mesh.crop, np.array([3, 2]))
self.mesh.crop = [5, 5]
np.testing.assert_equal(self.mesh.crop, np.array([5, 4]))
def test_trim(self):
self.mesh = Mesh1DUniform(0, 10, physical_step=1.0)
self.mesh.crop = [3, 2]
self.mesh.trim()
trimmed = Mesh1DUniform(3, 8, physical_step=1.0)
self.assertEqual(self.mesh, trimmed)
def test_inner_mesh_indices(self):
self.mesh = Mesh1DUniform(0, 10, physical_step=1.0)
inner = Mesh1DUniform(3, 7, physical_step=1.0)
indices = self.mesh.inner_mesh_indices(inner)
self.assertEqual(indices, (3, 7))
inner = Mesh1D(3, 7)
indices = self.mesh.inner_mesh_indices(inner)
self.assertEqual(indices, (3, 7))
with self.assertRaises(TypeError):
self.mesh.inner_mesh_indices(1)
inner = Mesh1DUniform(3, 17, physical_step=1.0)
indices = self.mesh.inner_mesh_indices(inner)
self.assertEqual(indices, (-1, -1))
inner = Mesh1DUniform(-3, 17, physical_step=1.0)
indices = self.mesh.inner_mesh_indices(inner)
self.assertEqual(indices, (-1, -1))
inner = Mesh1DUniform(0.55, 9.55, physical_step=1.0)
indices = self.mesh.inner_mesh_indices(inner)
self.assertEqual(indices, (1, 10))
def test_aligned(self):
self.mesh = Mesh1DUniform(0, 10, physical_step=1.0)
# check if aligned with equal mesh
other = Mesh1DUniform(0, 10, physical_step=1.0)
self.assertTrue(self.mesh.is_aligned_with(other))
# check if aligned with integer node mesh
other = Mesh1DUniform(100, 110, physical_step=1.0)
self.assertTrue(self.mesh.is_aligned_with(other))
# check if aligned with half-step node mesh
other = Mesh1DUniform(100, 110, physical_step=0.5)
self.assertTrue(self.mesh.is_aligned_with(other))
# check if aligned with floating point step mesh
num = 899
self.mesh = Mesh1DUniform(0, 10, num=num + 1)
start = 100 * self.mesh.physical_step
for i in range(1, 10):
other = Mesh1DUniform(start, start + 10, num=2 * num + 1)
self.assertTrue(self.mesh.is_aligned_with(other))
num = other.num - 1
start += other.physical_step * 7
with self.assertRaises(TypeError):
self.mesh.is_aligned_with(1)
# check if not aligned with mesh of same step but shifted by some offset value
self.mesh = Mesh1DUniform(0, 10, physical_step=1.0)
offset = 0.33
other = Mesh1DUniform(100 + offset, 110 + offset, physical_step=1.0)
self.assertFalse(self.mesh.is_aligned_with(other))
# check if not aligned with mesh of non-integer step coefficient
coeff = 1.33
other = Mesh1DUniform(100, 110, physical_step=1.0 * coeff)
self.assertFalse(self.mesh.is_aligned_with(other))
def test_merge(self):
# check merging with equal mesh
self.mesh = Mesh1DUniform(0, 10, physical_step=1.0)
other = Mesh1DUniform(0, 10, physical_step=1.0)
self.assertTrue(self.mesh.merge_with(other))
self.assertEqual(self.mesh, other)
# check mearging with floating point step mesh
for step_coeff in range(1, 5):
self.mesh = Mesh1DUniform(0, 10, physical_step=1.0)
num = self.mesh.num - 1
start = -5
for i in range(1, 5):
other = Mesh1DUniform(start, start + 10, num=step_coeff * num + 1)
self.mesh.physical_step = other.physical_step
self.assertTrue(self.mesh.merge_with(other))
merged = Mesh1DUniform(min(self.mesh.physical_boundary_1, start),
max(self.mesh.physical_boundary_2, start + 10),
physical_step=other.physical_step)
self.assertEqual(self.mesh, merged)
num = other.num - 1
start += 1 + other.physical_step * random.choice([-1, 1])
self.mesh = Mesh1DUniform(0, 10, physical_step=1.0)
# check merging with not overlapping mesh
self.mesh = Mesh1DUniform(0, 10, physical_step=1.0)
self.assertFalse(self.mesh.merge_with(Mesh1DUniform(11, 21, physical_step=1.0)))
self.mesh = Mesh1DUniform(0, 10, physical_step=1.0)
self.assertFalse(self.mesh.merge_with(Mesh1DUniform(5, 15, physical_step=0.6)))
# test priority of meshes
self.mesh = Mesh1DUniform(0, 10, physical_step=0.1)
other = Mesh1DUniform(5, 15, physical_step=0.1)
self.assertTrue(self.mesh.merge_with(other, self_priority=True))
merged = Mesh1DUniform(0, 15, physical_step=0.1)
self.assertEqual(self.mesh, merged)
self.mesh = Mesh1DUniform(0, 10, physical_step=0.1)
other = Mesh1DUniform(5, 15, physical_step=0.1)
self.assertTrue(self.mesh.merge_with(other, self_priority=False))
merged = Mesh1DUniform(0, 15, physical_step=0.1)
self.assertEqual(self.mesh, merged)
self.mesh = Mesh1DUniform(0, 10, physical_step=0.1)
other = Mesh1DUniform(5, 15, physical_step=0.1)
with self.assertRaises(TypeError):
self.mesh.merge_with(other, priority='xxx')
|
mammadori/pyglet
|
refs/heads/master
|
pyglet/gl/glu.py
|
45
|
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Wrapper for /usr/include/GL/glu.h
Generated by tools/gengl.py.
Do not modify this file.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
from ctypes import *
from pyglet.gl.lib import link_GLU as _link_function
from pyglet.gl.lib import c_ptrdiff_t
# BEGIN GENERATED CONTENT (do not edit below this line)
# This content is generated by tools/gengl.py.
# Wrapper for /usr/include/GL/glu.h
GLU_EXT_object_space_tess = 1 # /usr/include/GL/glu.h:71
GLU_EXT_nurbs_tessellator = 1 # /usr/include/GL/glu.h:72
GLU_FALSE = 0 # /usr/include/GL/glu.h:75
GLU_TRUE = 1 # /usr/include/GL/glu.h:76
GLU_VERSION_1_1 = 1 # /usr/include/GL/glu.h:79
GLU_VERSION_1_2 = 1 # /usr/include/GL/glu.h:80
GLU_VERSION_1_3 = 1 # /usr/include/GL/glu.h:81
GLU_VERSION = 100800 # /usr/include/GL/glu.h:84
GLU_EXTENSIONS = 100801 # /usr/include/GL/glu.h:85
GLU_INVALID_ENUM = 100900 # /usr/include/GL/glu.h:88
GLU_INVALID_VALUE = 100901 # /usr/include/GL/glu.h:89
GLU_OUT_OF_MEMORY = 100902 # /usr/include/GL/glu.h:90
GLU_INCOMPATIBLE_GL_VERSION = 100903 # /usr/include/GL/glu.h:91
GLU_INVALID_OPERATION = 100904 # /usr/include/GL/glu.h:92
GLU_OUTLINE_POLYGON = 100240 # /usr/include/GL/glu.h:96
GLU_OUTLINE_PATCH = 100241 # /usr/include/GL/glu.h:97
GLU_NURBS_ERROR = 100103 # /usr/include/GL/glu.h:100
GLU_ERROR = 100103 # /usr/include/GL/glu.h:101
GLU_NURBS_BEGIN = 100164 # /usr/include/GL/glu.h:102
GLU_NURBS_BEGIN_EXT = 100164 # /usr/include/GL/glu.h:103
GLU_NURBS_VERTEX = 100165 # /usr/include/GL/glu.h:104
GLU_NURBS_VERTEX_EXT = 100165 # /usr/include/GL/glu.h:105
GLU_NURBS_NORMAL = 100166 # /usr/include/GL/glu.h:106
GLU_NURBS_NORMAL_EXT = 100166 # /usr/include/GL/glu.h:107
GLU_NURBS_COLOR = 100167 # /usr/include/GL/glu.h:108
GLU_NURBS_COLOR_EXT = 100167 # /usr/include/GL/glu.h:109
GLU_NURBS_TEXTURE_COORD = 100168 # /usr/include/GL/glu.h:110
GLU_NURBS_TEX_COORD_EXT = 100168 # /usr/include/GL/glu.h:111
GLU_NURBS_END = 100169 # /usr/include/GL/glu.h:112
GLU_NURBS_END_EXT = 100169 # /usr/include/GL/glu.h:113
GLU_NURBS_BEGIN_DATA = 100170 # /usr/include/GL/glu.h:114
GLU_NURBS_BEGIN_DATA_EXT = 100170 # /usr/include/GL/glu.h:115
GLU_NURBS_VERTEX_DATA = 100171 # /usr/include/GL/glu.h:116
GLU_NURBS_VERTEX_DATA_EXT = 100171 # /usr/include/GL/glu.h:117
GLU_NURBS_NORMAL_DATA = 100172 # /usr/include/GL/glu.h:118
GLU_NURBS_NORMAL_DATA_EXT = 100172 # /usr/include/GL/glu.h:119
GLU_NURBS_COLOR_DATA = 100173 # /usr/include/GL/glu.h:120
GLU_NURBS_COLOR_DATA_EXT = 100173 # /usr/include/GL/glu.h:121
GLU_NURBS_TEXTURE_COORD_DATA = 100174 # /usr/include/GL/glu.h:122
GLU_NURBS_TEX_COORD_DATA_EXT = 100174 # /usr/include/GL/glu.h:123
GLU_NURBS_END_DATA = 100175 # /usr/include/GL/glu.h:124
GLU_NURBS_END_DATA_EXT = 100175 # /usr/include/GL/glu.h:125
GLU_NURBS_ERROR1 = 100251 # /usr/include/GL/glu.h:128
GLU_NURBS_ERROR2 = 100252 # /usr/include/GL/glu.h:129
GLU_NURBS_ERROR3 = 100253 # /usr/include/GL/glu.h:130
GLU_NURBS_ERROR4 = 100254 # /usr/include/GL/glu.h:131
GLU_NURBS_ERROR5 = 100255 # /usr/include/GL/glu.h:132
GLU_NURBS_ERROR6 = 100256 # /usr/include/GL/glu.h:133
GLU_NURBS_ERROR7 = 100257 # /usr/include/GL/glu.h:134
GLU_NURBS_ERROR8 = 100258 # /usr/include/GL/glu.h:135
GLU_NURBS_ERROR9 = 100259 # /usr/include/GL/glu.h:136
GLU_NURBS_ERROR10 = 100260 # /usr/include/GL/glu.h:137
GLU_NURBS_ERROR11 = 100261 # /usr/include/GL/glu.h:138
GLU_NURBS_ERROR12 = 100262 # /usr/include/GL/glu.h:139
GLU_NURBS_ERROR13 = 100263 # /usr/include/GL/glu.h:140
GLU_NURBS_ERROR14 = 100264 # /usr/include/GL/glu.h:141
GLU_NURBS_ERROR15 = 100265 # /usr/include/GL/glu.h:142
GLU_NURBS_ERROR16 = 100266 # /usr/include/GL/glu.h:143
GLU_NURBS_ERROR17 = 100267 # /usr/include/GL/glu.h:144
GLU_NURBS_ERROR18 = 100268 # /usr/include/GL/glu.h:145
GLU_NURBS_ERROR19 = 100269 # /usr/include/GL/glu.h:146
GLU_NURBS_ERROR20 = 100270 # /usr/include/GL/glu.h:147
GLU_NURBS_ERROR21 = 100271 # /usr/include/GL/glu.h:148
GLU_NURBS_ERROR22 = 100272 # /usr/include/GL/glu.h:149
GLU_NURBS_ERROR23 = 100273 # /usr/include/GL/glu.h:150
GLU_NURBS_ERROR24 = 100274 # /usr/include/GL/glu.h:151
GLU_NURBS_ERROR25 = 100275 # /usr/include/GL/glu.h:152
GLU_NURBS_ERROR26 = 100276 # /usr/include/GL/glu.h:153
GLU_NURBS_ERROR27 = 100277 # /usr/include/GL/glu.h:154
GLU_NURBS_ERROR28 = 100278 # /usr/include/GL/glu.h:155
GLU_NURBS_ERROR29 = 100279 # /usr/include/GL/glu.h:156
GLU_NURBS_ERROR30 = 100280 # /usr/include/GL/glu.h:157
GLU_NURBS_ERROR31 = 100281 # /usr/include/GL/glu.h:158
GLU_NURBS_ERROR32 = 100282 # /usr/include/GL/glu.h:159
GLU_NURBS_ERROR33 = 100283 # /usr/include/GL/glu.h:160
GLU_NURBS_ERROR34 = 100284 # /usr/include/GL/glu.h:161
GLU_NURBS_ERROR35 = 100285 # /usr/include/GL/glu.h:162
GLU_NURBS_ERROR36 = 100286 # /usr/include/GL/glu.h:163
GLU_NURBS_ERROR37 = 100287 # /usr/include/GL/glu.h:164
GLU_AUTO_LOAD_MATRIX = 100200 # /usr/include/GL/glu.h:167
GLU_CULLING = 100201 # /usr/include/GL/glu.h:168
GLU_SAMPLING_TOLERANCE = 100203 # /usr/include/GL/glu.h:169
GLU_DISPLAY_MODE = 100204 # /usr/include/GL/glu.h:170
GLU_PARAMETRIC_TOLERANCE = 100202 # /usr/include/GL/glu.h:171
GLU_SAMPLING_METHOD = 100205 # /usr/include/GL/glu.h:172
GLU_U_STEP = 100206 # /usr/include/GL/glu.h:173
GLU_V_STEP = 100207 # /usr/include/GL/glu.h:174
GLU_NURBS_MODE = 100160 # /usr/include/GL/glu.h:175
GLU_NURBS_MODE_EXT = 100160 # /usr/include/GL/glu.h:176
GLU_NURBS_TESSELLATOR = 100161 # /usr/include/GL/glu.h:177
GLU_NURBS_TESSELLATOR_EXT = 100161 # /usr/include/GL/glu.h:178
GLU_NURBS_RENDERER = 100162 # /usr/include/GL/glu.h:179
GLU_NURBS_RENDERER_EXT = 100162 # /usr/include/GL/glu.h:180
GLU_OBJECT_PARAMETRIC_ERROR = 100208 # /usr/include/GL/glu.h:183
GLU_OBJECT_PARAMETRIC_ERROR_EXT = 100208 # /usr/include/GL/glu.h:184
GLU_OBJECT_PATH_LENGTH = 100209 # /usr/include/GL/glu.h:185
GLU_OBJECT_PATH_LENGTH_EXT = 100209 # /usr/include/GL/glu.h:186
GLU_PATH_LENGTH = 100215 # /usr/include/GL/glu.h:187
GLU_PARAMETRIC_ERROR = 100216 # /usr/include/GL/glu.h:188
GLU_DOMAIN_DISTANCE = 100217 # /usr/include/GL/glu.h:189
GLU_MAP1_TRIM_2 = 100210 # /usr/include/GL/glu.h:192
GLU_MAP1_TRIM_3 = 100211 # /usr/include/GL/glu.h:193
GLU_POINT = 100010 # /usr/include/GL/glu.h:196
GLU_LINE = 100011 # /usr/include/GL/glu.h:197
GLU_FILL = 100012 # /usr/include/GL/glu.h:198
GLU_SILHOUETTE = 100013 # /usr/include/GL/glu.h:199
GLU_SMOOTH = 100000 # /usr/include/GL/glu.h:205
GLU_FLAT = 100001 # /usr/include/GL/glu.h:206
GLU_NONE = 100002 # /usr/include/GL/glu.h:207
GLU_OUTSIDE = 100020 # /usr/include/GL/glu.h:210
GLU_INSIDE = 100021 # /usr/include/GL/glu.h:211
GLU_TESS_BEGIN = 100100 # /usr/include/GL/glu.h:214
GLU_BEGIN = 100100 # /usr/include/GL/glu.h:215
GLU_TESS_VERTEX = 100101 # /usr/include/GL/glu.h:216
GLU_VERTEX = 100101 # /usr/include/GL/glu.h:217
GLU_TESS_END = 100102 # /usr/include/GL/glu.h:218
GLU_END = 100102 # /usr/include/GL/glu.h:219
GLU_TESS_ERROR = 100103 # /usr/include/GL/glu.h:220
GLU_TESS_EDGE_FLAG = 100104 # /usr/include/GL/glu.h:221
GLU_EDGE_FLAG = 100104 # /usr/include/GL/glu.h:222
GLU_TESS_COMBINE = 100105 # /usr/include/GL/glu.h:223
GLU_TESS_BEGIN_DATA = 100106 # /usr/include/GL/glu.h:224
GLU_TESS_VERTEX_DATA = 100107 # /usr/include/GL/glu.h:225
GLU_TESS_END_DATA = 100108 # /usr/include/GL/glu.h:226
GLU_TESS_ERROR_DATA = 100109 # /usr/include/GL/glu.h:227
GLU_TESS_EDGE_FLAG_DATA = 100110 # /usr/include/GL/glu.h:228
GLU_TESS_COMBINE_DATA = 100111 # /usr/include/GL/glu.h:229
GLU_CW = 100120 # /usr/include/GL/glu.h:232
GLU_CCW = 100121 # /usr/include/GL/glu.h:233
GLU_INTERIOR = 100122 # /usr/include/GL/glu.h:234
GLU_EXTERIOR = 100123 # /usr/include/GL/glu.h:235
GLU_UNKNOWN = 100124 # /usr/include/GL/glu.h:236
GLU_TESS_WINDING_RULE = 100140 # /usr/include/GL/glu.h:239
GLU_TESS_BOUNDARY_ONLY = 100141 # /usr/include/GL/glu.h:240
GLU_TESS_TOLERANCE = 100142 # /usr/include/GL/glu.h:241
GLU_TESS_ERROR1 = 100151 # /usr/include/GL/glu.h:244
GLU_TESS_ERROR2 = 100152 # /usr/include/GL/glu.h:245
GLU_TESS_ERROR3 = 100153 # /usr/include/GL/glu.h:246
GLU_TESS_ERROR4 = 100154 # /usr/include/GL/glu.h:247
GLU_TESS_ERROR5 = 100155 # /usr/include/GL/glu.h:248
GLU_TESS_ERROR6 = 100156 # /usr/include/GL/glu.h:249
GLU_TESS_ERROR7 = 100157 # /usr/include/GL/glu.h:250
GLU_TESS_ERROR8 = 100158 # /usr/include/GL/glu.h:251
GLU_TESS_MISSING_BEGIN_POLYGON = 100151 # /usr/include/GL/glu.h:252
GLU_TESS_MISSING_BEGIN_CONTOUR = 100152 # /usr/include/GL/glu.h:253
GLU_TESS_MISSING_END_POLYGON = 100153 # /usr/include/GL/glu.h:254
GLU_TESS_MISSING_END_CONTOUR = 100154 # /usr/include/GL/glu.h:255
GLU_TESS_COORD_TOO_LARGE = 100155 # /usr/include/GL/glu.h:256
GLU_TESS_NEED_COMBINE_CALLBACK = 100156 # /usr/include/GL/glu.h:257
GLU_TESS_WINDING_ODD = 100130 # /usr/include/GL/glu.h:260
GLU_TESS_WINDING_NONZERO = 100131 # /usr/include/GL/glu.h:261
GLU_TESS_WINDING_POSITIVE = 100132 # /usr/include/GL/glu.h:262
GLU_TESS_WINDING_NEGATIVE = 100133 # /usr/include/GL/glu.h:263
GLU_TESS_WINDING_ABS_GEQ_TWO = 100134 # /usr/include/GL/glu.h:264
class struct_GLUnurbs(Structure):
__slots__ = [
]
struct_GLUnurbs._fields_ = [
('_opaque_struct', c_int)
]
class struct_GLUnurbs(Structure):
__slots__ = [
]
struct_GLUnurbs._fields_ = [
('_opaque_struct', c_int)
]
GLUnurbs = struct_GLUnurbs # /usr/include/GL/glu.h:274
class struct_GLUquadric(Structure):
__slots__ = [
]
struct_GLUquadric._fields_ = [
('_opaque_struct', c_int)
]
class struct_GLUquadric(Structure):
__slots__ = [
]
struct_GLUquadric._fields_ = [
('_opaque_struct', c_int)
]
GLUquadric = struct_GLUquadric # /usr/include/GL/glu.h:275
class struct_GLUtesselator(Structure):
__slots__ = [
]
struct_GLUtesselator._fields_ = [
('_opaque_struct', c_int)
]
class struct_GLUtesselator(Structure):
__slots__ = [
]
struct_GLUtesselator._fields_ = [
('_opaque_struct', c_int)
]
GLUtesselator = struct_GLUtesselator # /usr/include/GL/glu.h:276
GLUnurbsObj = GLUnurbs # /usr/include/GL/glu.h:279
GLUquadricObj = GLUquadric # /usr/include/GL/glu.h:280
GLUtesselatorObj = GLUtesselator # /usr/include/GL/glu.h:281
GLUtriangulatorObj = GLUtesselator # /usr/include/GL/glu.h:282
GLU_TESS_MAX_COORD = 9.9999999999999998e+149 # /usr/include/GL/glu.h:284
_GLUfuncptr = CFUNCTYPE(None) # /usr/include/GL/glu.h:287
# /usr/include/GL/glu.h:289
gluBeginCurve = _link_function('gluBeginCurve', None, [POINTER(GLUnurbs)], None)
# /usr/include/GL/glu.h:290
gluBeginPolygon = _link_function('gluBeginPolygon', None, [POINTER(GLUtesselator)], None)
# /usr/include/GL/glu.h:291
gluBeginSurface = _link_function('gluBeginSurface', None, [POINTER(GLUnurbs)], None)
# /usr/include/GL/glu.h:292
gluBeginTrim = _link_function('gluBeginTrim', None, [POINTER(GLUnurbs)], None)
GLint = c_int # /usr/include/GL/gl.h:159
GLenum = c_uint # /usr/include/GL/gl.h:153
GLsizei = c_int # /usr/include/GL/gl.h:163
# /usr/include/GL/glu.h:293
gluBuild1DMipmapLevels = _link_function('gluBuild1DMipmapLevels', GLint, [GLenum, GLint, GLsizei, GLenum, GLenum, GLint, GLint, GLint, POINTER(None)], None)
# /usr/include/GL/glu.h:294
gluBuild1DMipmaps = _link_function('gluBuild1DMipmaps', GLint, [GLenum, GLint, GLsizei, GLenum, GLenum, POINTER(None)], None)
# /usr/include/GL/glu.h:295
gluBuild2DMipmapLevels = _link_function('gluBuild2DMipmapLevels', GLint, [GLenum, GLint, GLsizei, GLsizei, GLenum, GLenum, GLint, GLint, GLint, POINTER(None)], None)
# /usr/include/GL/glu.h:296
gluBuild2DMipmaps = _link_function('gluBuild2DMipmaps', GLint, [GLenum, GLint, GLsizei, GLsizei, GLenum, GLenum, POINTER(None)], None)
# /usr/include/GL/glu.h:297
gluBuild3DMipmapLevels = _link_function('gluBuild3DMipmapLevels', GLint, [GLenum, GLint, GLsizei, GLsizei, GLsizei, GLenum, GLenum, GLint, GLint, GLint, POINTER(None)], None)
# /usr/include/GL/glu.h:298
gluBuild3DMipmaps = _link_function('gluBuild3DMipmaps', GLint, [GLenum, GLint, GLsizei, GLsizei, GLsizei, GLenum, GLenum, POINTER(None)], None)
GLboolean = c_ubyte # /usr/include/GL/gl.h:154
GLubyte = c_ubyte # /usr/include/GL/gl.h:160
# /usr/include/GL/glu.h:299
gluCheckExtension = _link_function('gluCheckExtension', GLboolean, [POINTER(GLubyte), POINTER(GLubyte)], None)
GLdouble = c_double # /usr/include/GL/gl.h:166
# /usr/include/GL/glu.h:300
gluCylinder = _link_function('gluCylinder', None, [POINTER(GLUquadric), GLdouble, GLdouble, GLdouble, GLint, GLint], None)
# /usr/include/GL/glu.h:301
gluDeleteNurbsRenderer = _link_function('gluDeleteNurbsRenderer', None, [POINTER(GLUnurbs)], None)
# /usr/include/GL/glu.h:302
gluDeleteQuadric = _link_function('gluDeleteQuadric', None, [POINTER(GLUquadric)], None)
# /usr/include/GL/glu.h:303
gluDeleteTess = _link_function('gluDeleteTess', None, [POINTER(GLUtesselator)], None)
# /usr/include/GL/glu.h:304
gluDisk = _link_function('gluDisk', None, [POINTER(GLUquadric), GLdouble, GLdouble, GLint, GLint], None)
# /usr/include/GL/glu.h:305
gluEndCurve = _link_function('gluEndCurve', None, [POINTER(GLUnurbs)], None)
# /usr/include/GL/glu.h:306
gluEndPolygon = _link_function('gluEndPolygon', None, [POINTER(GLUtesselator)], None)
# /usr/include/GL/glu.h:307
gluEndSurface = _link_function('gluEndSurface', None, [POINTER(GLUnurbs)], None)
# /usr/include/GL/glu.h:308
gluEndTrim = _link_function('gluEndTrim', None, [POINTER(GLUnurbs)], None)
# /usr/include/GL/glu.h:309
gluErrorString = _link_function('gluErrorString', POINTER(GLubyte), [GLenum], None)
GLfloat = c_float # /usr/include/GL/gl.h:164
# /usr/include/GL/glu.h:310
gluGetNurbsProperty = _link_function('gluGetNurbsProperty', None, [POINTER(GLUnurbs), GLenum, POINTER(GLfloat)], None)
# /usr/include/GL/glu.h:311
gluGetString = _link_function('gluGetString', POINTER(GLubyte), [GLenum], None)
# /usr/include/GL/glu.h:312
gluGetTessProperty = _link_function('gluGetTessProperty', None, [POINTER(GLUtesselator), GLenum, POINTER(GLdouble)], None)
# /usr/include/GL/glu.h:313
gluLoadSamplingMatrices = _link_function('gluLoadSamplingMatrices', None, [POINTER(GLUnurbs), POINTER(GLfloat), POINTER(GLfloat), POINTER(GLint)], None)
# /usr/include/GL/glu.h:314
gluLookAt = _link_function('gluLookAt', None, [GLdouble, GLdouble, GLdouble, GLdouble, GLdouble, GLdouble, GLdouble, GLdouble, GLdouble], None)
# /usr/include/GL/glu.h:315
gluNewNurbsRenderer = _link_function('gluNewNurbsRenderer', POINTER(GLUnurbs), [], None)
# /usr/include/GL/glu.h:316
gluNewQuadric = _link_function('gluNewQuadric', POINTER(GLUquadric), [], None)
# /usr/include/GL/glu.h:317
gluNewTess = _link_function('gluNewTess', POINTER(GLUtesselator), [], None)
# /usr/include/GL/glu.h:318
gluNextContour = _link_function('gluNextContour', None, [POINTER(GLUtesselator), GLenum], None)
# /usr/include/GL/glu.h:319
gluNurbsCallback = _link_function('gluNurbsCallback', None, [POINTER(GLUnurbs), GLenum, _GLUfuncptr], None)
GLvoid = None # /usr/include/GL/gl.h:156
# /usr/include/GL/glu.h:320
gluNurbsCallbackData = _link_function('gluNurbsCallbackData', None, [POINTER(GLUnurbs), POINTER(GLvoid)], None)
# /usr/include/GL/glu.h:321
gluNurbsCallbackDataEXT = _link_function('gluNurbsCallbackDataEXT', None, [POINTER(GLUnurbs), POINTER(GLvoid)], None)
# /usr/include/GL/glu.h:322
gluNurbsCurve = _link_function('gluNurbsCurve', None, [POINTER(GLUnurbs), GLint, POINTER(GLfloat), GLint, POINTER(GLfloat), GLint, GLenum], None)
# /usr/include/GL/glu.h:323
gluNurbsProperty = _link_function('gluNurbsProperty', None, [POINTER(GLUnurbs), GLenum, GLfloat], None)
# /usr/include/GL/glu.h:324
gluNurbsSurface = _link_function('gluNurbsSurface', None, [POINTER(GLUnurbs), GLint, POINTER(GLfloat), GLint, POINTER(GLfloat), GLint, GLint, POINTER(GLfloat), GLint, GLint, GLenum], None)
# /usr/include/GL/glu.h:325
gluOrtho2D = _link_function('gluOrtho2D', None, [GLdouble, GLdouble, GLdouble, GLdouble], None)
# /usr/include/GL/glu.h:326
gluPartialDisk = _link_function('gluPartialDisk', None, [POINTER(GLUquadric), GLdouble, GLdouble, GLint, GLint, GLdouble, GLdouble], None)
# /usr/include/GL/glu.h:327
gluPerspective = _link_function('gluPerspective', None, [GLdouble, GLdouble, GLdouble, GLdouble], None)
# /usr/include/GL/glu.h:328
gluPickMatrix = _link_function('gluPickMatrix', None, [GLdouble, GLdouble, GLdouble, GLdouble, POINTER(GLint)], None)
# /usr/include/GL/glu.h:329
gluProject = _link_function('gluProject', GLint, [GLdouble, GLdouble, GLdouble, POINTER(GLdouble), POINTER(GLdouble), POINTER(GLint), POINTER(GLdouble), POINTER(GLdouble), POINTER(GLdouble)], None)
# /usr/include/GL/glu.h:330
gluPwlCurve = _link_function('gluPwlCurve', None, [POINTER(GLUnurbs), GLint, POINTER(GLfloat), GLint, GLenum], None)
# /usr/include/GL/glu.h:331
gluQuadricCallback = _link_function('gluQuadricCallback', None, [POINTER(GLUquadric), GLenum, _GLUfuncptr], None)
# /usr/include/GL/glu.h:332
gluQuadricDrawStyle = _link_function('gluQuadricDrawStyle', None, [POINTER(GLUquadric), GLenum], None)
# /usr/include/GL/glu.h:333
gluQuadricNormals = _link_function('gluQuadricNormals', None, [POINTER(GLUquadric), GLenum], None)
# /usr/include/GL/glu.h:334
gluQuadricOrientation = _link_function('gluQuadricOrientation', None, [POINTER(GLUquadric), GLenum], None)
# /usr/include/GL/glu.h:335
gluQuadricTexture = _link_function('gluQuadricTexture', None, [POINTER(GLUquadric), GLboolean], None)
# /usr/include/GL/glu.h:336
gluScaleImage = _link_function('gluScaleImage', GLint, [GLenum, GLsizei, GLsizei, GLenum, POINTER(None), GLsizei, GLsizei, GLenum, POINTER(GLvoid)], None)
# /usr/include/GL/glu.h:337
gluSphere = _link_function('gluSphere', None, [POINTER(GLUquadric), GLdouble, GLint, GLint], None)
# /usr/include/GL/glu.h:338
gluTessBeginContour = _link_function('gluTessBeginContour', None, [POINTER(GLUtesselator)], None)
# /usr/include/GL/glu.h:339
gluTessBeginPolygon = _link_function('gluTessBeginPolygon', None, [POINTER(GLUtesselator), POINTER(GLvoid)], None)
# /usr/include/GL/glu.h:340
gluTessCallback = _link_function('gluTessCallback', None, [POINTER(GLUtesselator), GLenum, _GLUfuncptr], None)
# /usr/include/GL/glu.h:341
gluTessEndContour = _link_function('gluTessEndContour', None, [POINTER(GLUtesselator)], None)
# /usr/include/GL/glu.h:342
gluTessEndPolygon = _link_function('gluTessEndPolygon', None, [POINTER(GLUtesselator)], None)
# /usr/include/GL/glu.h:343
gluTessNormal = _link_function('gluTessNormal', None, [POINTER(GLUtesselator), GLdouble, GLdouble, GLdouble], None)
# /usr/include/GL/glu.h:344
gluTessProperty = _link_function('gluTessProperty', None, [POINTER(GLUtesselator), GLenum, GLdouble], None)
# /usr/include/GL/glu.h:345
gluTessVertex = _link_function('gluTessVertex', None, [POINTER(GLUtesselator), POINTER(GLdouble), POINTER(GLvoid)], None)
# /usr/include/GL/glu.h:346
gluUnProject = _link_function('gluUnProject', GLint, [GLdouble, GLdouble, GLdouble, POINTER(GLdouble), POINTER(GLdouble), POINTER(GLint), POINTER(GLdouble), POINTER(GLdouble), POINTER(GLdouble)], None)
# /usr/include/GL/glu.h:347
gluUnProject4 = _link_function('gluUnProject4', GLint, [GLdouble, GLdouble, GLdouble, GLdouble, POINTER(GLdouble), POINTER(GLdouble), POINTER(GLint), GLdouble, GLdouble, POINTER(GLdouble), POINTER(GLdouble), POINTER(GLdouble), POINTER(GLdouble)], None)
__all__ = ['GLU_EXT_object_space_tess', 'GLU_EXT_nurbs_tessellator',
'GLU_FALSE', 'GLU_TRUE', 'GLU_VERSION_1_1', 'GLU_VERSION_1_2',
'GLU_VERSION_1_3', 'GLU_VERSION', 'GLU_EXTENSIONS', 'GLU_INVALID_ENUM',
'GLU_INVALID_VALUE', 'GLU_OUT_OF_MEMORY', 'GLU_INCOMPATIBLE_GL_VERSION',
'GLU_INVALID_OPERATION', 'GLU_OUTLINE_POLYGON', 'GLU_OUTLINE_PATCH',
'GLU_NURBS_ERROR', 'GLU_ERROR', 'GLU_NURBS_BEGIN', 'GLU_NURBS_BEGIN_EXT',
'GLU_NURBS_VERTEX', 'GLU_NURBS_VERTEX_EXT', 'GLU_NURBS_NORMAL',
'GLU_NURBS_NORMAL_EXT', 'GLU_NURBS_COLOR', 'GLU_NURBS_COLOR_EXT',
'GLU_NURBS_TEXTURE_COORD', 'GLU_NURBS_TEX_COORD_EXT', 'GLU_NURBS_END',
'GLU_NURBS_END_EXT', 'GLU_NURBS_BEGIN_DATA', 'GLU_NURBS_BEGIN_DATA_EXT',
'GLU_NURBS_VERTEX_DATA', 'GLU_NURBS_VERTEX_DATA_EXT', 'GLU_NURBS_NORMAL_DATA',
'GLU_NURBS_NORMAL_DATA_EXT', 'GLU_NURBS_COLOR_DATA',
'GLU_NURBS_COLOR_DATA_EXT', 'GLU_NURBS_TEXTURE_COORD_DATA',
'GLU_NURBS_TEX_COORD_DATA_EXT', 'GLU_NURBS_END_DATA',
'GLU_NURBS_END_DATA_EXT', 'GLU_NURBS_ERROR1', 'GLU_NURBS_ERROR2',
'GLU_NURBS_ERROR3', 'GLU_NURBS_ERROR4', 'GLU_NURBS_ERROR5',
'GLU_NURBS_ERROR6', 'GLU_NURBS_ERROR7', 'GLU_NURBS_ERROR8',
'GLU_NURBS_ERROR9', 'GLU_NURBS_ERROR10', 'GLU_NURBS_ERROR11',
'GLU_NURBS_ERROR12', 'GLU_NURBS_ERROR13', 'GLU_NURBS_ERROR14',
'GLU_NURBS_ERROR15', 'GLU_NURBS_ERROR16', 'GLU_NURBS_ERROR17',
'GLU_NURBS_ERROR18', 'GLU_NURBS_ERROR19', 'GLU_NURBS_ERROR20',
'GLU_NURBS_ERROR21', 'GLU_NURBS_ERROR22', 'GLU_NURBS_ERROR23',
'GLU_NURBS_ERROR24', 'GLU_NURBS_ERROR25', 'GLU_NURBS_ERROR26',
'GLU_NURBS_ERROR27', 'GLU_NURBS_ERROR28', 'GLU_NURBS_ERROR29',
'GLU_NURBS_ERROR30', 'GLU_NURBS_ERROR31', 'GLU_NURBS_ERROR32',
'GLU_NURBS_ERROR33', 'GLU_NURBS_ERROR34', 'GLU_NURBS_ERROR35',
'GLU_NURBS_ERROR36', 'GLU_NURBS_ERROR37', 'GLU_AUTO_LOAD_MATRIX',
'GLU_CULLING', 'GLU_SAMPLING_TOLERANCE', 'GLU_DISPLAY_MODE',
'GLU_PARAMETRIC_TOLERANCE', 'GLU_SAMPLING_METHOD', 'GLU_U_STEP', 'GLU_V_STEP',
'GLU_NURBS_MODE', 'GLU_NURBS_MODE_EXT', 'GLU_NURBS_TESSELLATOR',
'GLU_NURBS_TESSELLATOR_EXT', 'GLU_NURBS_RENDERER', 'GLU_NURBS_RENDERER_EXT',
'GLU_OBJECT_PARAMETRIC_ERROR', 'GLU_OBJECT_PARAMETRIC_ERROR_EXT',
'GLU_OBJECT_PATH_LENGTH', 'GLU_OBJECT_PATH_LENGTH_EXT', 'GLU_PATH_LENGTH',
'GLU_PARAMETRIC_ERROR', 'GLU_DOMAIN_DISTANCE', 'GLU_MAP1_TRIM_2',
'GLU_MAP1_TRIM_3', 'GLU_POINT', 'GLU_LINE', 'GLU_FILL', 'GLU_SILHOUETTE',
'GLU_SMOOTH', 'GLU_FLAT', 'GLU_NONE', 'GLU_OUTSIDE', 'GLU_INSIDE',
'GLU_TESS_BEGIN', 'GLU_BEGIN', 'GLU_TESS_VERTEX', 'GLU_VERTEX',
'GLU_TESS_END', 'GLU_END', 'GLU_TESS_ERROR', 'GLU_TESS_EDGE_FLAG',
'GLU_EDGE_FLAG', 'GLU_TESS_COMBINE', 'GLU_TESS_BEGIN_DATA',
'GLU_TESS_VERTEX_DATA', 'GLU_TESS_END_DATA', 'GLU_TESS_ERROR_DATA',
'GLU_TESS_EDGE_FLAG_DATA', 'GLU_TESS_COMBINE_DATA', 'GLU_CW', 'GLU_CCW',
'GLU_INTERIOR', 'GLU_EXTERIOR', 'GLU_UNKNOWN', 'GLU_TESS_WINDING_RULE',
'GLU_TESS_BOUNDARY_ONLY', 'GLU_TESS_TOLERANCE', 'GLU_TESS_ERROR1',
'GLU_TESS_ERROR2', 'GLU_TESS_ERROR3', 'GLU_TESS_ERROR4', 'GLU_TESS_ERROR5',
'GLU_TESS_ERROR6', 'GLU_TESS_ERROR7', 'GLU_TESS_ERROR8',
'GLU_TESS_MISSING_BEGIN_POLYGON', 'GLU_TESS_MISSING_BEGIN_CONTOUR',
'GLU_TESS_MISSING_END_POLYGON', 'GLU_TESS_MISSING_END_CONTOUR',
'GLU_TESS_COORD_TOO_LARGE', 'GLU_TESS_NEED_COMBINE_CALLBACK',
'GLU_TESS_WINDING_ODD', 'GLU_TESS_WINDING_NONZERO',
'GLU_TESS_WINDING_POSITIVE', 'GLU_TESS_WINDING_NEGATIVE',
'GLU_TESS_WINDING_ABS_GEQ_TWO', 'GLUnurbs', 'GLUquadric', 'GLUtesselator',
'GLUnurbsObj', 'GLUquadricObj', 'GLUtesselatorObj', 'GLUtriangulatorObj',
'GLU_TESS_MAX_COORD', '_GLUfuncptr', 'gluBeginCurve', 'gluBeginPolygon',
'gluBeginSurface', 'gluBeginTrim', 'gluBuild1DMipmapLevels',
'gluBuild1DMipmaps', 'gluBuild2DMipmapLevels', 'gluBuild2DMipmaps',
'gluBuild3DMipmapLevels', 'gluBuild3DMipmaps', 'gluCheckExtension',
'gluCylinder', 'gluDeleteNurbsRenderer', 'gluDeleteQuadric', 'gluDeleteTess',
'gluDisk', 'gluEndCurve', 'gluEndPolygon', 'gluEndSurface', 'gluEndTrim',
'gluErrorString', 'gluGetNurbsProperty', 'gluGetString', 'gluGetTessProperty',
'gluLoadSamplingMatrices', 'gluLookAt', 'gluNewNurbsRenderer',
'gluNewQuadric', 'gluNewTess', 'gluNextContour', 'gluNurbsCallback',
'gluNurbsCallbackData', 'gluNurbsCallbackDataEXT', 'gluNurbsCurve',
'gluNurbsProperty', 'gluNurbsSurface', 'gluOrtho2D', 'gluPartialDisk',
'gluPerspective', 'gluPickMatrix', 'gluProject', 'gluPwlCurve',
'gluQuadricCallback', 'gluQuadricDrawStyle', 'gluQuadricNormals',
'gluQuadricOrientation', 'gluQuadricTexture', 'gluScaleImage', 'gluSphere',
'gluTessBeginContour', 'gluTessBeginPolygon', 'gluTessCallback',
'gluTessEndContour', 'gluTessEndPolygon', 'gluTessNormal', 'gluTessProperty',
'gluTessVertex', 'gluUnProject', 'gluUnProject4']
# END GENERATED CONTENT (do not edit above this line)
|
xuxiao/zulip
|
refs/heads/master
|
analytics/management/commands/user_stats.py
|
114
|
from __future__ import absolute_import
import datetime
import pytz
from django.core.management.base import BaseCommand
from zerver.models import UserProfile, Realm, Stream, Message
class Command(BaseCommand):
help = "Generate statistics on user activity."
def add_arguments(self, parser):
parser.add_argument('realms', metavar='<realm>', type=str, nargs='*',
help="realm to generate statistics for")
def messages_sent_by(self, user, week):
start = datetime.datetime.now(tz=pytz.utc) - datetime.timedelta(days=(week + 1)*7)
end = datetime.datetime.now(tz=pytz.utc) - datetime.timedelta(days=week*7)
return Message.objects.filter(sender=user, pub_date__gt=start, pub_date__lte=end).count()
def handle(self, *args, **options):
if options['realms']:
try:
realms = [Realm.objects.get(domain=domain) for domain in options['realms']]
except Realm.DoesNotExist, e:
print e
exit(1)
else:
realms = Realm.objects.all()
for realm in realms:
print realm.domain
user_profiles = UserProfile.objects.filter(realm=realm, is_active=True)
print "%d users" % (len(user_profiles),)
print "%d streams" % (len(Stream.objects.filter(realm=realm)),)
for user_profile in user_profiles:
print "%35s" % (user_profile.email,),
for week in range(10):
print "%5d" % (self.messages_sent_by(user_profile, week)),
print ""
|
v-iam/azure-sdk-for-python
|
refs/heads/master
|
azure-mgmt-resource/azure/mgmt/resource/links/v2016_09_01/models/resource_link_filter.py
|
2
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ResourceLinkFilter(Model):
"""Resource link filter.
:param target_id: The ID of the target resource.
:type target_id: str
"""
_validation = {
'target_id': {'required': True},
}
_attribute_map = {
'target_id': {'key': 'targetId', 'type': 'str'},
}
def __init__(self, target_id):
self.target_id = target_id
|
Dewl/apv
|
refs/heads/master
|
pdfview/scripts/pjpp.py
|
111
|
#!/usr/bin/env pypy
import os, sys, logging, re
import argparse
import fnmatch
configurations = {'lite', 'pro'}
package_dirs = {
'lite': ('src/cx/hell/android/pdfview',),
'pro': ('src/cx/hell/android/pdfviewpro',)
}
file_replaces = {
'lite': (
'cx.hell.android.pdfview.',
'"cx.hell.android.pdfview"',
'package cx.hell.android.pdfview;',
'android:icon="@drawable/pdfviewer"',
),
'pro': (
'cx.hell.android.pdfviewpro.',
'"cx.hell.android.pdfviewpro"',
'package cx.hell.android.pdfviewpro;',
'android:icon="@drawable/apvpro_icon"',
),
}
def make_comment(file_type, line):
"""Add comment to line and return modified line, but try not to add comments to already commented out lines."""
if file_type in ('java', 'c'):
return '// ' + line if not line.startswith('//') else line
elif file_type in ('html', 'xml'):
return '<!-- ' + line.strip() + ' -->\n' if not line.strip().startswith('<!--') else line
else:
raise Exception("unknown file type: %s" % file_type)
def remove_comment(file_type, line):
"""Remove comment from line, but only if line is commented, otherwise return unchanged line."""
if file_type in ('java', 'c'):
if line.startswith('// '): return line[3:]
else: return line
elif file_type in ('html', 'xml'):
if line.strip().startswith('<!-- ') and line.strip().endswith(' -->'): return line.strip()[5:-4] + '\n'
else: return line
else:
raise Exception("unknown file type: %s" % file_type)
def handle_comments(conf, file_type, lines, filename):
new_lines = []
re_cmd_starts = re.compile(r'(?:(//|<!--))\s+#ifdef\s+(?P<def>[a-zA-Z]+)')
re_cmd_ends = re.compile(r'(?:(//|<!--))\s+#endif')
required_defs = []
for i, line in enumerate(lines):
m = re_cmd_starts.search(line)
if m:
required_def = m.group('def')
logging.debug("line %s:%d %s matches as start of %s" % (filename, i+1, line.strip(), required_def))
required_defs.append(required_def)
new_lines.append(line)
continue
m = re_cmd_ends.search(line)
if m:
logging.debug("line %s:%d %s matches as endif" % (filename, i+1, line.strip()))
required_defs.pop()
new_lines.append(line)
continue
if len(required_defs) == 0:
new_lines.append(line)
elif len(required_defs) == 1 and required_defs[0] == conf:
new_line = remove_comment(file_type, line)
new_lines.append(new_line)
else:
new_line = make_comment(file_type, line)
new_lines.append(new_line)
assert len(new_lines) == len(lines)
return new_lines
def find_files(dirname, name):
matches = []
for root, dirnames, filenames in os.walk(dirname):
for filename in fnmatch.filter(filenames, name):
matches.append(os.path.join(root, filename))
return matches
def fix_package_dirs(conf):
for i, dirname in enumerate(package_dirs[conf]):
logging.debug("trying to restore %s" % dirname)
if os.path.exists(dirname):
if os.path.isdir(dirname):
logging.debug(" already exists")
continue
else:
logging.error(" %s already exists, but is not dir" % dirname)
continue
# find other name
found_dirname = None
for other_conf, other_dirnames in package_dirs.items():
other_dirname = other_dirnames[i]
if other_conf == conf: continue # skip this conf when looking for other conf
if os.path.isdir(other_dirname):
if found_dirname is None:
found_dirname = other_dirname
else:
# source dir already found :/
raise Exception("too many possible dirs for this package: %s, %s" % (found_dirname, other_dirname))
if found_dirname is None:
raise Exception("didn't find %s" % dirname)
# now rename found_dirname to dirname
os.rename(found_dirname, dirname)
logging.debug("renamed %s to %s" % (found_dirname, dirname))
def handle_comments_in_files(conf, file_type, filenames):
for filename in filenames:
lines = open(filename).readlines()
new_lines = handle_comments(conf, file_type, lines, filename)
if lines != new_lines:
logging.debug("file %s comments changed" % filename)
f = open(filename, 'w')
f.write(''.join(new_lines))
f.close()
del f
def replace_in_files(conf, filenames):
#logging.debug("about replace to %s in %s" % (conf, ', '.join(filenames)))
other_confs = [other_conf for other_conf in file_replaces.keys() if other_conf != conf]
#logging.debug("there are %d other confs to replace from: %s" % (len(other_confs), ', '.join(other_confs)))
for filename in filenames:
new_lines = []
lines = open(filename).readlines()
for line in lines:
new_line = line
for i, target_string in enumerate(file_replaces[conf]):
for other_conf in other_confs:
source_string = file_replaces[other_conf][i]
new_line = new_line.replace(source_string, target_string)
new_lines.append(new_line)
if new_lines != lines:
logging.debug("file %s changed, writing..." % filename)
f = open(filename, 'w')
f.write(''.join(new_lines))
f.close()
del f
else:
logging.debug("file %s didn't change, no need to rewrite" % filename)
def fix_java_files(conf):
filenames = find_files('src', name='*.java')
replace_in_files(conf, filenames)
handle_comments_in_files(conf, 'java', filenames)
def fix_xml_files(conf):
filenames = find_files('.', name='*.xml')
replace_in_files(conf, filenames)
handle_comments_in_files(conf, 'xml', filenames)
def fix_html_files(conf):
filenames = find_files('res', name='*.html')
replace_in_files(conf, filenames)
handle_comments_in_files(conf, 'html', filenames)
def fix_c_files(conf):
filenames = find_files('jni/pdfview2', name='*.c')
replace_in_files(conf, filenames)
handle_comments_in_files(conf, 'c', filenames)
filenames = find_files('jni/pdfview2', name='*.h')
replace_in_files(conf, filenames)
handle_comments_in_files(conf, 'c', filenames)
def fix_resources(conf):
pass
def main():
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s')
parser = argparse.ArgumentParser(description='Switch project configurations')
parser.add_argument('--configuration', dest='configuration', default='lite')
args = parser.parse_args()
if not os.path.exists('AndroidManifest.xml'):
raise Exception('android manifest not found, please run this script from main project directory')
conf = args.configuration
if conf not in configurations:
raise Exception("invalid configuration: %s" % conf)
fix_package_dirs(conf)
fix_java_files(conf)
fix_xml_files(conf)
fix_html_files(conf)
fix_c_files(conf)
fix_resources(conf)
if __name__ == '__main__':
main()
|
dd00/commandergenius
|
refs/heads/dd00
|
project/jni/python/src/Lib/distutils/tests/test_build_scripts.py
|
47
|
"""Tests for distutils.command.build_scripts."""
import os
import unittest
from distutils.command.build_scripts import build_scripts
from distutils.core import Distribution
from distutils import sysconfig
from distutils.tests import support
class BuildScriptsTestCase(support.TempdirManager,
support.LoggingSilencer,
unittest.TestCase):
def test_default_settings(self):
cmd = self.get_build_scripts_cmd("/foo/bar", [])
self.assert_(not cmd.force)
self.assert_(cmd.build_dir is None)
cmd.finalize_options()
self.assert_(cmd.force)
self.assertEqual(cmd.build_dir, "/foo/bar")
def test_build(self):
source = self.mkdtemp()
target = self.mkdtemp()
expected = self.write_sample_scripts(source)
cmd = self.get_build_scripts_cmd(target,
[os.path.join(source, fn)
for fn in expected])
cmd.finalize_options()
cmd.run()
built = os.listdir(target)
for name in expected:
self.assert_(name in built)
def get_build_scripts_cmd(self, target, scripts):
import sys
dist = Distribution()
dist.scripts = scripts
dist.command_obj["build"] = support.DummyCommand(
build_scripts=target,
force=1,
executable=sys.executable
)
return build_scripts(dist)
def write_sample_scripts(self, dir):
expected = []
expected.append("script1.py")
self.write_script(dir, "script1.py",
("#! /usr/bin/env python2.3\n"
"# bogus script w/ Python sh-bang\n"
"pass\n"))
expected.append("script2.py")
self.write_script(dir, "script2.py",
("#!/usr/bin/python\n"
"# bogus script w/ Python sh-bang\n"
"pass\n"))
expected.append("shell.sh")
self.write_script(dir, "shell.sh",
("#!/bin/sh\n"
"# bogus shell script w/ sh-bang\n"
"exit 0\n"))
return expected
def write_script(self, dir, name, text):
f = open(os.path.join(dir, name), "w")
f.write(text)
f.close()
def test_version_int(self):
source = self.mkdtemp()
target = self.mkdtemp()
expected = self.write_sample_scripts(source)
cmd = self.get_build_scripts_cmd(target,
[os.path.join(source, fn)
for fn in expected])
cmd.finalize_options()
# http://bugs.python.org/issue4524
#
# On linux-g++-32 with command line `./configure --enable-ipv6
# --with-suffix=3`, python is compiled okay but the build scripts
# failed when writing the name of the executable
old = sysconfig._config_vars.get('VERSION')
sysconfig._config_vars['VERSION'] = 4
try:
cmd.run()
finally:
if old is not None:
sysconfig._config_vars['VERSION'] = old
built = os.listdir(target)
for name in expected:
self.assert_(name in built)
def test_suite():
return unittest.makeSuite(BuildScriptsTestCase)
if __name__ == "__main__":
unittest.main(defaultTest="test_suite")
|
kennedyshead/home-assistant
|
refs/heads/dev
|
tests/components/motioneye/__init__.py
|
2
|
"""Tests for the motionEye integration."""
from __future__ import annotations
from typing import Any
from unittest.mock import AsyncMock, Mock, patch
from motioneye_client.const import DEFAULT_PORT
from homeassistant.components.motioneye.const import DOMAIN
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_URL
from homeassistant.core import HomeAssistant
from tests.common import MockConfigEntry
TEST_CONFIG_ENTRY_ID = "74565ad414754616000674c87bdc876c"
TEST_URL = f"http://test:{DEFAULT_PORT+1}"
TEST_CAMERA_ID = 100
TEST_CAMERA_NAME = "Test Camera"
TEST_CAMERA_ENTITY_ID = "camera.test_camera"
TEST_CAMERA_DEVICE_IDENTIFIER = (DOMAIN, f"{TEST_CONFIG_ENTRY_ID}_{TEST_CAMERA_ID}")
TEST_CAMERA = {
"show_frame_changes": False,
"framerate": 25,
"actions": [],
"preserve_movies": 0,
"auto_threshold_tuning": True,
"recording_mode": "motion-triggered",
"monday_to": "",
"streaming_resolution": 100,
"light_switch_detect": 0,
"command_end_notifications_enabled": False,
"smb_shares": False,
"upload_server": "",
"monday_from": "",
"movie_passthrough": False,
"auto_brightness": False,
"frame_change_threshold": 3.0,
"name": TEST_CAMERA_NAME,
"movie_format": "mp4:h264_omx",
"network_username": "",
"preserve_pictures": 0,
"event_gap": 30,
"enabled": True,
"upload_movie": True,
"video_streaming": True,
"upload_location": "",
"max_movie_length": 0,
"movie_file_name": "%Y-%m-%d/%H-%M-%S",
"upload_authorization_key": "",
"still_images": False,
"upload_method": "post",
"max_frame_change_threshold": 0,
"device_url": "rtsp://localhost/live",
"text_overlay": False,
"right_text": "timestamp",
"upload_picture": True,
"email_notifications_enabled": False,
"working_schedule_type": "during",
"movie_quality": 75,
"disk_total": 44527655808,
"upload_service": "ftp",
"upload_password": "",
"wednesday_to": "",
"mask_type": "smart",
"command_storage_enabled": False,
"disk_used": 11419704992,
"streaming_motion": 0,
"manual_snapshots": True,
"noise_level": 12,
"mask_lines": [],
"upload_enabled": False,
"root_directory": f"/var/lib/motioneye/{TEST_CAMERA_NAME}",
"clean_cloud_enabled": False,
"working_schedule": False,
"pre_capture": 1,
"command_notifications_enabled": False,
"streaming_framerate": 25,
"email_notifications_picture_time_span": 0,
"thursday_to": "",
"streaming_server_resize": False,
"upload_subfolders": True,
"sunday_to": "",
"left_text": "",
"image_file_name": "%Y-%m-%d/%H-%M-%S",
"rotation": 0,
"capture_mode": "manual",
"movies": False,
"motion_detection": True,
"text_scale": 1,
"upload_username": "",
"upload_port": "",
"available_disks": [],
"network_smb_ver": "1.0",
"streaming_auth_mode": "basic",
"despeckle_filter": "",
"snapshot_interval": 0,
"minimum_motion_frames": 20,
"auto_noise_detect": True,
"network_share_name": "",
"sunday_from": "",
"friday_from": "",
"web_hook_storage_enabled": False,
"custom_left_text": "",
"streaming_port": 8081,
"id": TEST_CAMERA_ID,
"post_capture": 1,
"streaming_quality": 75,
"wednesday_from": "",
"proto": "netcam",
"extra_options": [],
"image_quality": 85,
"create_debug_media": False,
"friday_to": "",
"custom_right_text": "",
"web_hook_notifications_enabled": False,
"saturday_from": "",
"available_resolutions": [
"1600x1200",
"1920x1080",
],
"tuesday_from": "",
"network_password": "",
"saturday_to": "",
"network_server": "",
"smart_mask_sluggishness": 5,
"mask": False,
"tuesday_to": "",
"thursday_from": "",
"storage_device": "custom-path",
"resolution": "1920x1080",
}
TEST_CAMERAS = {"cameras": [TEST_CAMERA]}
TEST_SURVEILLANCE_USERNAME = "surveillance_username"
def create_mock_motioneye_client() -> AsyncMock:
"""Create mock motionEye client."""
mock_client = AsyncMock()
mock_client.async_client_login = AsyncMock(return_value={})
mock_client.async_get_cameras = AsyncMock(return_value=TEST_CAMERAS)
mock_client.async_client_close = AsyncMock(return_value=True)
mock_client.get_camera_snapshot_url = Mock(return_value="")
mock_client.get_camera_stream_url = Mock(return_value="")
return mock_client
def create_mock_motioneye_config_entry(
hass: HomeAssistant,
data: dict[str, Any] | None = None,
options: dict[str, Any] | None = None,
) -> ConfigEntry:
"""Add a test config entry."""
config_entry: MockConfigEntry = MockConfigEntry( # type: ignore[no-untyped-call]
entry_id=TEST_CONFIG_ENTRY_ID,
domain=DOMAIN,
data=data or {CONF_URL: TEST_URL},
title=f"{TEST_URL}",
options=options or {},
)
config_entry.add_to_hass(hass) # type: ignore[no-untyped-call]
return config_entry
async def setup_mock_motioneye_config_entry(
hass: HomeAssistant,
config_entry: ConfigEntry | None = None,
client: Mock | None = None,
) -> ConfigEntry:
"""Add a mock MotionEye config entry to hass."""
config_entry = config_entry or create_mock_motioneye_config_entry(hass)
client = client or create_mock_motioneye_client()
with patch(
"homeassistant.components.motioneye.MotionEyeClient",
return_value=client,
):
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
return config_entry
|
TeamHG-Memex/scrapy-cdr
|
refs/heads/master
|
scrapy_cdr/es_upload.py
|
1
|
import argparse
from collections import defaultdict
from concurrent.futures import ThreadPoolExecutor
from datetime import datetime
from itertools import islice
import logging
import os
import shutil
import sys
from six.moves.urllib.parse import urlsplit
import time
import traceback
import json_lines
import elasticsearch
import elasticsearch.helpers as es_helpers
from .utils import format_timestamp
def main():
parser = argparse.ArgumentParser(description='Upload items to ES index')
arg = parser.add_argument
arg('inputs', nargs='+', help='inputs in .jl or .jl.gz format')
arg('index', help='ES index name')
arg('--type', default='document',
help='ES type to use ("document" by default)')
arg('--op-type', default='index',
choices={'index', 'create', 'delete', 'update'},
help='ES operation type to use ("index" by default)')
arg('--broken', action='store_true',
help='specify if input might be broken (incomplete)')
arg('--host', default='localhost', help='ES host in host[:port] format')
arg('--user', help='HTTP Basic Auth user')
arg('--password', help='HTTP Basic Auth password')
arg('--chunk-size', type=int, default=50, help='upload chunk size')
arg('--threads', type=int, default=4, help='number of threads')
arg('--limit', type=int, help='Index first N items')
arg('--format', choices=['CDRv2', 'CDRv3'], default='CDRv3')
arg('--max-chunk-bytes', type=int, default=10 * 2**20,
help='Depends on how ES is configured. 10 MB on AWS (default).')
arg('--log-level', default='INFO')
arg('--log-file')
arg('--reverse-domain-storage', action='store_true',
help='Store objects in reverse domain folder structure. Objects '
'will be copied in the filesystem. --media-root must be set.')
arg('--media-root', help='path to the root of stored media objects')
args = parser.parse_args()
if args.reverse_domain_storage and not args.media_root:
parser.error('--media-root must be set with --reverse-domain-objects')
logging.basicConfig(
level=getattr(logging, args.log_level),
format='%(asctime)s [%(levelname)s] %(module)s: %(message)s',
filename=args.log_file)
logging.getLogger('elasticsearch').setLevel(logging.WARNING)
kwargs = {}
if args.user or args.password:
kwargs['http_auth'] = (args.user, args.password)
client = elasticsearch.Elasticsearch(
[args.host],
connection_class=elasticsearch.RequestsHttpConnection,
timeout=600,
**kwargs)
logging.info(client.info())
def _items():
for filename in args.inputs:
logging.info('Starting {}'.format(filename))
with json_lines.open(filename, broken=args.broken) as f:
for item in f:
yield item
is_cdrv3 = args.format == 'CDRv3'
def _actions():
items = _items()
if args.limit:
items = islice(items, args.limit)
for item in items:
if is_cdrv3:
assert 'timestamp_crawl' in item, \
'this is not CDRv3, check --format'
else:
assert 'timestamp' in item, 'this is not CDRv2, check --format'
if is_cdrv3:
item['timestamp_index'] = format_timestamp(datetime.utcnow())
elif isinstance(item['timestamp'], int):
item['timestamp'] = format_timestamp(
datetime.fromtimestamp(item['timestamp'] / 1000.))
if args.reverse_domain_storage:
_reverse_domain_storage(item, args.media_root)
action = {
'_op_type': args.op_type,
'_index': args.index,
'_type': args.type,
'_id': item.pop('_id'),
}
if is_cdrv3:
item.pop('metadata', None) # not in CDRv3 schema
else:
item.pop('extracted_metadata', None)
if args.op_type != 'delete':
action['_source'] = item
yield action
# This wrapper is needed due to use of raise_on_error=False
# below (which we need because ES can raise exceptions on timeouts, etc.),
# but we don't want to ignore errors when reading data.
failed = [False] # to set correct exit code
def actions():
try:
for x in _actions():
yield x
except Exception:
traceback.print_exc()
failed[0] = True
raise # will be caught anyway
t0 = t00 = time.time()
i = last_i = 0
result_counts = defaultdict(int)
try:
for i, (success, result) in enumerate(
parallel_bulk(
client,
actions=actions(),
thread_count=args.threads,
chunk_size=args.chunk_size,
raise_on_error=False,
raise_on_exception=False,
max_chunk_bytes=args.max_chunk_bytes,
), start=1):
op_result = result[args.op_type].get('result')
if op_result is None:
# ES 2.x
op_result = ('status_{}'
.format(result[args.op_type].get('status')))
result_counts[op_result] += 1
if not (success or (args.op_type == 'delete' and
op_result in {'not_found', 'status_404'})):
logging.info('ES error: {}'.format(str(result)[:2000]))
failed[0] = True
t1 = time.time()
if t1 - t0 > 10:
_report_stats(i, last_i, t1 - t0, result_counts)
t0 = t1
last_i = i
finally:
_report_stats(i, 0, time.time() - t00, result_counts)
if failed[0]:
sys.exit(1)
def _reverse_domain_storage(item, media_root):
for obj in item.get('objects', []):
stored_url = obj['obj_stored_url']
assert '/' not in stored_url
domain = urlsplit(obj['obj_original_url']).netloc
if ':' in domain:
domain, _ = domain.split(':', 1)
parents = [p for p in reversed(domain.split('.')) if p]
os.makedirs(os.path.join(media_root, *parents), exist_ok=True)
stored_url_noext, _ = os.path.splitext(stored_url)
new_stored_url = os.path.sep.join(parents + [stored_url_noext])
dest = os.path.join(media_root, new_stored_url)
if not os.path.exists(dest):
shutil.copy(os.path.join(media_root, stored_url), dest)
obj['obj_stored_url'] = new_stored_url
def _report_stats(items, prev_items, dt, result_counts):
logging.info(
'{items:,} items processed ({stats}) at {speed:.0f} items/s'
.format(items=items,
stats=', '.join(
'{}: {:,}'.format(k, v)
for k, v in sorted(result_counts.items()) if v != 0),
speed=(items - prev_items) / dt,
)
)
def parallel_bulk(client, actions, thread_count=4, chunk_size=500,
max_chunk_bytes=100 * 1024 * 1024,
expand_action_callback=es_helpers.expand_action,
**kwargs):
""" es_helpers.parallel_bulk rewritten with imap_fixed_output_buffer
instead of Pool.imap, which consumed unbounded memory if the generator
outruns the upload (which usually happens).
"""
actions = map(expand_action_callback, actions)
for result in imap_fixed_output_buffer(
lambda chunk: list(
es_helpers._process_bulk_chunk(client, chunk, **kwargs)),
es_helpers._chunk_actions(actions, chunk_size, max_chunk_bytes,
client.transport.serializer),
threads=thread_count,
):
for item in result:
yield item
def imap_fixed_output_buffer(fn, it, threads: int):
with ThreadPoolExecutor(max_workers=threads) as executor:
futures = []
max_futures = threads + 1
for i, x in enumerate(it):
while len(futures) >= max_futures:
future, futures = futures[0], futures[1:]
yield future.result()
futures.append(executor.submit(fn, x))
for future in futures:
yield future.result()
|
tuxfux-hlp-notes/python-batches
|
refs/heads/master
|
archieves/batch-62/modules/mysheets/lib/python2.7/site-packages/pip/_vendor/colorama/initialise.py
|
171
|
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
import atexit
import sys
from .ansitowin32 import AnsiToWin32
orig_stdout = sys.stdout
orig_stderr = sys.stderr
wrapped_stdout = sys.stdout
wrapped_stderr = sys.stderr
atexit_done = False
def reset_all():
AnsiToWin32(orig_stdout).reset_all()
def init(autoreset=False, convert=None, strip=None, wrap=True):
if not wrap and any([autoreset, convert, strip]):
raise ValueError('wrap=False conflicts with any other arg=True')
global wrapped_stdout, wrapped_stderr
sys.stdout = wrapped_stdout = \
wrap_stream(orig_stdout, convert, strip, autoreset, wrap)
sys.stderr = wrapped_stderr = \
wrap_stream(orig_stderr, convert, strip, autoreset, wrap)
global atexit_done
if not atexit_done:
atexit.register(reset_all)
atexit_done = True
def deinit():
sys.stdout = orig_stdout
sys.stderr = orig_stderr
def reinit():
sys.stdout = wrapped_stdout
sys.stderr = wrapped_stdout
def wrap_stream(stream, convert, strip, autoreset, wrap):
if wrap:
wrapper = AnsiToWin32(stream,
convert=convert, strip=strip, autoreset=autoreset)
if wrapper.should_wrap():
stream = wrapper.stream
return stream
|
xbezdick/tempest
|
refs/heads/master
|
tempest/api/volume/test_volumes_snapshots.py
|
3
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from tempest.api.volume import base
from tempest.common.utils import data_utils
from tempest import config
from tempest import test
LOG = logging.getLogger(__name__)
CONF = config.CONF
class VolumesV2SnapshotTestJSON(base.BaseVolumeTest):
@classmethod
def skip_checks(cls):
super(VolumesV2SnapshotTestJSON, cls).skip_checks()
if not CONF.volume_feature_enabled.snapshot:
raise cls.skipException("Cinder volume snapshots are disabled")
@classmethod
def resource_setup(cls):
super(VolumesV2SnapshotTestJSON, cls).resource_setup()
cls.volume_origin = cls.create_volume()
cls.name_field = cls.special_fields['name_field']
cls.descrip_field = cls.special_fields['descrip_field']
def _detach(self, volume_id):
"""Detach volume."""
self.volumes_client.detach_volume(volume_id)
self.volumes_client.wait_for_volume_status(volume_id, 'available')
def _list_by_param_values_and_assert(self, params, with_detail=False):
"""
Perform list or list_details action with given params
and validates result.
"""
if with_detail:
fetched_snap_list = self.snapshots_client.list_snapshots(
detail=True, params=params)['snapshots']
else:
fetched_snap_list = self.snapshots_client.list_snapshots(
params=params)['snapshots']
# Validating params of fetched snapshots
for snap in fetched_snap_list:
for key in params:
msg = "Failed to list snapshots %s by %s" % \
('details' if with_detail else '', key)
self.assertEqual(params[key], snap[key], msg)
@test.idempotent_id('b467b54c-07a4-446d-a1cf-651dedcc3ff1')
@test.services('compute')
def test_snapshot_create_with_volume_in_use(self):
# Create a snapshot when volume status is in-use
# Create a test instance
server_name = data_utils.rand_name('instance')
server = self.create_server(
name=server_name,
wait_until='ACTIVE')
self.addCleanup(self.servers_client.delete_server, server['id'])
mountpoint = '/dev/%s' % CONF.compute.volume_device_name
self.servers_client.attach_volume(
server['id'], volumeId=self.volume_origin['id'],
device=mountpoint)
self.volumes_client.wait_for_volume_status(self.volume_origin['id'],
'in-use')
self.addCleanup(self.volumes_client.wait_for_volume_status,
self.volume_origin['id'], 'available')
self.addCleanup(self.servers_client.detach_volume, server['id'],
self.volume_origin['id'])
# Snapshot a volume even if it's attached to an instance
snapshot = self.create_snapshot(self.volume_origin['id'],
force=True)
# Delete the snapshot
self.cleanup_snapshot(snapshot)
@test.idempotent_id('2a8abbe4-d871-46db-b049-c41f5af8216e')
def test_snapshot_create_get_list_update_delete(self):
# Create a snapshot
s_name = data_utils.rand_name('snap')
params = {self.name_field: s_name}
snapshot = self.create_snapshot(self.volume_origin['id'], **params)
# Get the snap and check for some of its details
snap_get = self.snapshots_client.show_snapshot(
snapshot['id'])['snapshot']
self.assertEqual(self.volume_origin['id'],
snap_get['volume_id'],
"Referred volume origin mismatch")
# Compare also with the output from the list action
tracking_data = (snapshot['id'], snapshot[self.name_field])
snaps_list = self.snapshots_client.list_snapshots()['snapshots']
snaps_data = [(f['id'], f[self.name_field]) for f in snaps_list]
self.assertIn(tracking_data, snaps_data)
# Updates snapshot with new values
new_s_name = data_utils.rand_name('new-snap')
new_desc = 'This is the new description of snapshot.'
params = {self.name_field: new_s_name,
self.descrip_field: new_desc}
update_snapshot = self.snapshots_client.update_snapshot(
snapshot['id'], **params)['snapshot']
# Assert response body for update_snapshot method
self.assertEqual(new_s_name, update_snapshot[self.name_field])
self.assertEqual(new_desc, update_snapshot[self.descrip_field])
# Assert response body for show_snapshot method
updated_snapshot = self.snapshots_client.show_snapshot(
snapshot['id'])['snapshot']
self.assertEqual(new_s_name, updated_snapshot[self.name_field])
self.assertEqual(new_desc, updated_snapshot[self.descrip_field])
# Delete the snapshot
self.cleanup_snapshot(snapshot)
@test.idempotent_id('59f41f43-aebf-48a9-ab5d-d76340fab32b')
def test_snapshots_list_with_params(self):
"""list snapshots with params."""
# Create a snapshot
display_name = data_utils.rand_name('snap')
params = {self.name_field: display_name}
snapshot = self.create_snapshot(self.volume_origin['id'], **params)
self.addCleanup(self.cleanup_snapshot, snapshot)
# Verify list snapshots by display_name filter
params = {self.name_field: snapshot[self.name_field]}
self._list_by_param_values_and_assert(params)
# Verify list snapshots by status filter
params = {'status': 'available'}
self._list_by_param_values_and_assert(params)
# Verify list snapshots by status and display name filter
params = {'status': 'available',
self.name_field: snapshot[self.name_field]}
self._list_by_param_values_and_assert(params)
@test.idempotent_id('220a1022-1fcd-4a74-a7bd-6b859156cda2')
def test_snapshots_list_details_with_params(self):
"""list snapshot details with params."""
# Create a snapshot
display_name = data_utils.rand_name('snap')
params = {self.name_field: display_name}
snapshot = self.create_snapshot(self.volume_origin['id'], **params)
self.addCleanup(self.cleanup_snapshot, snapshot)
# Verify list snapshot details by display_name filter
params = {self.name_field: snapshot[self.name_field]}
self._list_by_param_values_and_assert(params, with_detail=True)
# Verify list snapshot details by status filter
params = {'status': 'available'}
self._list_by_param_values_and_assert(params, with_detail=True)
# Verify list snapshot details by status and display name filter
params = {'status': 'available',
self.name_field: snapshot[self.name_field]}
self._list_by_param_values_and_assert(params, with_detail=True)
@test.idempotent_id('677863d1-3142-456d-b6ac-9924f667a7f4')
def test_volume_from_snapshot(self):
# Create a temporary snap using wrapper method from base, then
# create a snap based volume and deletes it
snapshot = self.create_snapshot(self.volume_origin['id'])
# NOTE(gfidente): size is required also when passing snapshot_id
volume = self.volumes_client.create_volume(
snapshot_id=snapshot['id'])['volume']
self.volumes_client.wait_for_volume_status(volume['id'], 'available')
self.volumes_client.delete_volume(volume['id'])
self.volumes_client.wait_for_resource_deletion(volume['id'])
self.cleanup_snapshot(snapshot)
def cleanup_snapshot(self, snapshot):
# Delete the snapshot
self.snapshots_client.delete_snapshot(snapshot['id'])
self.snapshots_client.wait_for_resource_deletion(snapshot['id'])
self.snapshots.remove(snapshot)
class VolumesV1SnapshotTestJSON(VolumesV2SnapshotTestJSON):
_api_version = 1
|
openstack/swift
|
refs/heads/master
|
test/unit/cli/__init__.py
|
12133432
| |
carthagecollege/django-djtinue
|
refs/heads/master
|
djtinue/__init__.py
|
12133432
| |
louyihua/edx-platform
|
refs/heads/master
|
openedx/core/djangoapps/bookmarks/tests/__init__.py
|
12133432
| |
uetke/experimentor
|
refs/heads/master
|
examples/config/config.py
|
1
|
from experimentor.config import Config
# Settings specific to the GUI. These settings can be changed if, for example, the GUI is not responsive enough or if
# there is an overflow of information going to the screen (i.e. updates happening>60Hz).
Config.monitor_read_scan = 10 # How many times do we update the signal during 1 wavelength sweep
Config.laser_update = 3000 # How often (in milliseconds) the laser properties are updated.
|
EvgeneOskin/taiga-back
|
refs/heads/master
|
taiga/projects/custom_attributes/migrations/0001_initial.py
|
27
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('projects', '0015_auto_20141230_1212'),
]
operations = [
migrations.CreateModel(
name='IssueCustomAttribute',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),
('name', models.CharField(verbose_name='name', max_length=64)),
('description', models.TextField(blank=True, verbose_name='description')),
('order', models.IntegerField(verbose_name='order', default=10000)),
('created_date', models.DateTimeField(verbose_name='created date', default=django.utils.timezone.now)),
('modified_date', models.DateTimeField(verbose_name='modified date')),
('project', models.ForeignKey(to='projects.Project', verbose_name='project', related_name='issuecustomattributes')),
],
options={
'ordering': ['project', 'order', 'name'],
'verbose_name': 'issue custom attribute',
'verbose_name_plural': 'issue custom attributes',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='TaskCustomAttribute',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),
('name', models.CharField(verbose_name='name', max_length=64)),
('description', models.TextField(blank=True, verbose_name='description')),
('order', models.IntegerField(verbose_name='order', default=10000)),
('created_date', models.DateTimeField(verbose_name='created date', default=django.utils.timezone.now)),
('modified_date', models.DateTimeField(verbose_name='modified date')),
('project', models.ForeignKey(to='projects.Project', verbose_name='project', related_name='taskcustomattributes')),
],
options={
'ordering': ['project', 'order', 'name'],
'verbose_name': 'task custom attribute',
'verbose_name_plural': 'task custom attributes',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='UserStoryCustomAttribute',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),
('name', models.CharField(verbose_name='name', max_length=64)),
('description', models.TextField(blank=True, verbose_name='description')),
('order', models.IntegerField(verbose_name='order', default=10000)),
('created_date', models.DateTimeField(verbose_name='created date', default=django.utils.timezone.now)),
('modified_date', models.DateTimeField(verbose_name='modified date')),
('project', models.ForeignKey(to='projects.Project', verbose_name='project', related_name='userstorycustomattributes')),
],
options={
'ordering': ['project', 'order', 'name'],
'verbose_name': 'user story custom attribute',
'verbose_name_plural': 'user story custom attributes',
'abstract': False,
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='userstorycustomattribute',
unique_together=set([('project', 'name')]),
),
migrations.AlterUniqueTogether(
name='taskcustomattribute',
unique_together=set([('project', 'name')]),
),
migrations.AlterUniqueTogether(
name='issuecustomattribute',
unique_together=set([('project', 'name')]),
),
]
|
arifsetiawan/edx-platform
|
refs/heads/master
|
common/djangoapps/cors_csrf/migrations/0001_initial.py
|
98
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'XDomainProxyConfiguration'
db.create_table('cors_csrf_xdomainproxyconfiguration', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('change_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('changed_by', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, on_delete=models.PROTECT)),
('enabled', self.gf('django.db.models.fields.BooleanField')(default=False)),
('whitelist', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('cors_csrf', ['XDomainProxyConfiguration'])
def backwards(self, orm):
# Deleting model 'XDomainProxyConfiguration'
db.delete_table('cors_csrf_xdomainproxyconfiguration')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'cors_csrf.xdomainproxyconfiguration': {
'Meta': {'object_name': 'XDomainProxyConfiguration'},
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'whitelist': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['cors_csrf']
|
apixandru/intellij-community
|
refs/heads/master
|
python/testData/codeInsight/smartEnter/dict_after.py
|
83
|
class A:
def foo(self):
self.a = {"1": 1, "2":2}<caret>
|
syci/account-financial-tools
|
refs/heads/8.0
|
account_move_reconcile_helper/__init__.py
|
4
|
# -*- coding: utf-8 -*-
from . import models
from .post_install import set_reconcile_ref
|
surround-io/three.js
|
refs/heads/master
|
utils/exporters/blender/addons/io_three/exporter/io.py
|
201
|
import os
import shutil
from .. import constants, logger
from . import _json
def copy_registered_textures(dest, registration):
"""Copy the registered textures to the destination (root) path
:param dest: destination directory
:param registration: registered textures
:type dest: str
:type registration: dict
"""
logger.debug("io.copy_registered_textures(%s, %s)", dest, registration)
os.makedirs(dest, exist_ok=True)
for value in registration.values():
copy(value['file_path'], dest)
def copy(src, dst):
"""Copy a file to a destination
:param src: source file
:param dst: destination file/path
"""
logger.debug("io.copy(%s, %s)" % (src, dst))
if os.path.isdir(dst):
file_name = os.path.basename(src)
dst = os.path.join(dst, file_name)
if src != dst:
shutil.copy(src, dst)
def dump(filepath, data, options=None):
"""Dump the output to disk (JSON, msgpack, etc)
:param filepath: output file path
:param data: serializable data to write to disk
:param options: (Default value = None)
:type options: dict
"""
options = options or {}
logger.debug("io.dump(%s, data, options=%s)", filepath, options)
compress = options.get(constants.COMPRESSION, constants.NONE)
if compress == constants.MSGPACK:
try:
import msgpack
except ImportError:
logger.error("msgpack module not found")
raise
logger.info("Dumping to msgpack")
func = lambda x, y: msgpack.dump(x, y)
mode = 'wb'
else:
round_off = options.get(constants.ENABLE_PRECISION)
if round_off:
_json.ROUND = options[constants.PRECISION]
else:
_json.ROUND = None
indent = options.get(constants.INDENT, True)
indent = 4 if indent else None
logger.info("Dumping to JSON")
func = lambda x, y: _json.json.dump(x, y, indent=indent)
mode = 'w'
logger.info("Writing to %s", filepath)
with open(filepath, mode=mode) as stream:
func(data, stream)
def load(filepath, options):
"""Load the contents of the file path with the correct parser
:param filepath: input file path
:param options:
:type options: dict
"""
logger.debug("io.load(%s, %s)", filepath, options)
compress = options.get(constants.COMPRESSION, constants.NONE)
if compress == constants.MSGPACK:
try:
import msgpack
except ImportError:
logger.error("msgpack module not found")
raise
module = msgpack
mode = 'rb'
else:
logger.info("Loading JSON")
module = _json.json
mode = 'r'
with open(filepath, mode=mode) as stream:
data = module.load(stream)
return data
|
RO-ny9/python-for-android
|
refs/heads/master
|
python3-alpha/python3-src/Lib/encodings/__init__.py
|
46
|
""" Standard "encodings" Package
Standard Python encoding modules are stored in this package
directory.
Codec modules must have names corresponding to normalized encoding
names as defined in the normalize_encoding() function below, e.g.
'utf-8' must be implemented by the module 'utf_8.py'.
Each codec module must export the following interface:
* getregentry() -> codecs.CodecInfo object
The getregentry() API must return a CodecInfo object with encoder, decoder,
incrementalencoder, incrementaldecoder, streamwriter and streamreader
atttributes which adhere to the Python Codec Interface Standard.
In addition, a module may optionally also define the following
APIs which are then used by the package's codec search function:
* getaliases() -> sequence of encoding name strings to use as aliases
Alias names returned by getaliases() must be normalized encoding
names as defined by normalize_encoding().
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""#"
import codecs
from . import aliases
_cache = {}
_unknown = '--unknown--'
_import_tail = ['*']
_aliases = aliases.aliases
class CodecRegistryError(LookupError, SystemError):
pass
def normalize_encoding(encoding):
""" Normalize an encoding name.
Normalization works as follows: all non-alphanumeric
characters except the dot used for Python package names are
collapsed and replaced with a single underscore, e.g. ' -;#'
becomes '_'. Leading and trailing underscores are removed.
Note that encoding names should be ASCII only; if they do use
non-ASCII characters, these must be Latin-1 compatible.
"""
if isinstance(encoding, bytes):
encoding = str(encoding, "ascii")
chars = []
punct = False
for c in encoding:
if c.isalnum() or c == '.':
if punct and chars:
chars.append('_')
chars.append(c)
punct = False
else:
punct = True
return ''.join(chars)
def search_function(encoding):
# Cache lookup
entry = _cache.get(encoding, _unknown)
if entry is not _unknown:
return entry
# Import the module:
#
# First try to find an alias for the normalized encoding
# name and lookup the module using the aliased name, then try to
# lookup the module using the standard import scheme, i.e. first
# try in the encodings package, then at top-level.
#
norm_encoding = normalize_encoding(encoding)
aliased_encoding = _aliases.get(norm_encoding) or \
_aliases.get(norm_encoding.replace('.', '_'))
if aliased_encoding is not None:
modnames = [aliased_encoding,
norm_encoding]
else:
modnames = [norm_encoding]
for modname in modnames:
if not modname or '.' in modname:
continue
try:
# Import is absolute to prevent the possibly malicious import of a
# module with side-effects that is not in the 'encodings' package.
mod = __import__('encodings.' + modname, fromlist=_import_tail,
level=0)
except ImportError:
pass
else:
break
else:
mod = None
try:
getregentry = mod.getregentry
except AttributeError:
# Not a codec module
mod = None
if mod is None:
# Cache misses
_cache[encoding] = None
return None
# Now ask the module for the registry entry
entry = getregentry()
if not isinstance(entry, codecs.CodecInfo):
if not 4 <= len(entry) <= 7:
raise CodecRegistryError('module "%s" (%s) failed to register'
% (mod.__name__, mod.__file__))
if not hasattr(entry[0], '__call__') or \
not hasattr(entry[1], '__call__') or \
(entry[2] is not None and not hasattr(entry[2], '__call__')) or \
(entry[3] is not None and not hasattr(entry[3], '__call__')) or \
(len(entry) > 4 and entry[4] is not None and not hasattr(entry[4], '__call__')) or \
(len(entry) > 5 and entry[5] is not None and not hasattr(entry[5], '__call__')):
raise CodecRegistryError('incompatible codecs in module "%s" (%s)'
% (mod.__name__, mod.__file__))
if len(entry)<7 or entry[6] is None:
entry += (None,)*(6-len(entry)) + (mod.__name__.split(".", 1)[1],)
entry = codecs.CodecInfo(*entry)
# Cache the codec registry entry
_cache[encoding] = entry
# Register its aliases (without overwriting previously registered
# aliases)
try:
codecaliases = mod.getaliases()
except AttributeError:
pass
else:
for alias in codecaliases:
if alias not in _aliases:
_aliases[alias] = modname
# Return the registry entry
return entry
# Register the search_function in the Python codec registry
codecs.register(search_function)
|
MaxGuevara/quark
|
refs/heads/master
|
qa/rpc-tests/txn_doublespend.py
|
152
|
#!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test proper accounting with malleable transactions
#
from test_framework import BitcoinTestFramework
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from decimal import Decimal
from util import *
import os
import shutil
class TxnMallTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
def setup_network(self):
# Start with split network:
return super(TxnMallTest, self).setup_network(True)
def run_test(self):
# All nodes should start with 1,250 BTC:
starting_balance = 1250
for i in range(4):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress("") # bug workaround, coins generated assigned to first getnewaddress!
# Assign coins to foo and bar accounts:
self.nodes[0].move("", "foo", 1220)
self.nodes[0].move("", "bar", 30)
assert_equal(self.nodes[0].getbalance(""), 0)
# Coins are sent to node1_address
node1_address = self.nodes[1].getnewaddress("from0")
# First: use raw transaction API to send 1210 BTC to node1_address,
# but don't broadcast:
(total_in, inputs) = gather_inputs(self.nodes[0], 1210)
change_address = self.nodes[0].getnewaddress("foo")
outputs = {}
outputs[change_address] = 40
outputs[node1_address] = 1210
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
doublespend = self.nodes[0].signrawtransaction(rawtx)
assert_equal(doublespend["complete"], True)
# Create two transaction from node[0] to node[1]; the
# second must spend change from the first because the first
# spends all mature inputs:
txid1 = self.nodes[0].sendfrom("foo", node1_address, 1210, 0)
txid2 = self.nodes[0].sendfrom("bar", node1_address, 20, 0)
# Have node0 mine a block:
if (self.options.mine_block):
self.nodes[0].setgenerate(True, 1)
sync_blocks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Node0's balance should be starting balance, plus 50BTC for another
# matured block, minus 1210, minus 20, and minus transaction fees:
expected = starting_balance
if self.options.mine_block: expected += 50
expected += tx1["amount"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
# foo and bar accounts should be debited:
assert_equal(self.nodes[0].getbalance("foo"), 1220+tx1["amount"]+tx1["fee"])
assert_equal(self.nodes[0].getbalance("bar"), 30+tx2["amount"]+tx2["fee"])
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
# Node1's "from0" balance should be both transaction amounts:
assert_equal(self.nodes[1].getbalance("from0"), -(tx1["amount"]+tx2["amount"]))
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
# Now give doublespend to miner:
mutated_txid = self.nodes[2].sendrawtransaction(doublespend["hex"])
# ... mine a block...
self.nodes[2].setgenerate(True, 1)
# Reconnect the split network, and sync chain:
connect_nodes(self.nodes[1], 2)
self.nodes[2].setgenerate(True, 1) # Mine another block to make sure we sync
sync_blocks(self.nodes)
# Re-fetch transaction info:
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Both transactions should be conflicted
assert_equal(tx1["confirmations"], -1)
assert_equal(tx2["confirmations"], -1)
# Node0's total balance should be starting balance, plus 100BTC for
# two more matured blocks, minus 1210 for the double-spend:
expected = starting_balance + 100 - 1210
assert_equal(self.nodes[0].getbalance(), expected)
assert_equal(self.nodes[0].getbalance("*"), expected)
# foo account should be debited, but bar account should not:
assert_equal(self.nodes[0].getbalance("foo"), 1220-1210)
assert_equal(self.nodes[0].getbalance("bar"), 30)
# Node1's "from" account balance should be just the mutated send:
assert_equal(self.nodes[1].getbalance("from0"), 1210)
if __name__ == '__main__':
TxnMallTest().main()
|
charbeljc/server-tools
|
refs/heads/8.0
|
auth_from_http_remote_user/utils.py
|
50
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Laurent Mignon
# Copyright 2014 'ACSONE SA/NV'
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
KEY_LENGTH = 16
|
HIPS/optofit
|
refs/heads/master
|
examples/gp_squid_test.py
|
1
|
import numpy as np
seed = np.random.randint(2**16)
# seed = 50431
seed = 58482
print "Seed: ", seed
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
from optofit.cneuron.compartment import Compartment, SquidCompartment
from optofit.cneuron.channels import LeakChannel, NaChannel, KdrChannel
from optofit.cneuron.simulate import forward_euler
from optofit.cneuron.gpchannel import GPChannel, sigma, GPKdrChannel
from hips.inference.particle_mcmc import *
from optofit.cinference.pmcmc import *
# Set the random seed for reproducibility
np.random.seed(seed)
# Make a simple compartment
hypers = {
'C' : 1.0,
'V0' : -60.0,
'g_leak' : 0.03,
'E_leak' : -65.0}
gp1_hypers = {'D': 2,
'sig' : 1,
'g_gp' : 12.0,
'E_gp' : 50.0,
'alpha_0': 1.0,
'beta_0' : 2.0,
'sigma_kernel': 2.0}
gp2_hypers = {'D' : 1,
'sig' : 1,
'g_gp' : 3.60,
'E_gp' : -77.0,
'alpha_0': 1.0,
'beta_0' : 2.0,
'sigma_kernel': 2.0}
squid_hypers = {
'C' : 1.0,
'V0' : -60.0,
'g_leak' : 0.03,
'E_leak' : -65.0,
'g_na' : 12.0,
# 'g_na' : 0.0,
'E_na' : 50.0,
'g_kdr' : 3.60,
'E_kdr' : -77.0
}
def create_gp_model():
# Add a few channels
body = Compartment(name='body', hypers=hypers)
leak = LeakChannel(name='leak', hypers=hypers)
gp1 = GPChannel(name='gpna', hypers=gp1_hypers)
gp2 = GPKdrChannel(name='gpk', hypers=gp2_hypers)
body.add_child(leak)
body.add_child(gp1)
body.add_child(gp2)
# Initialize the model
D, I = body.initialize_offsets()
return body, gp1, gp2, D, I
def sample_squid_model():
squid_body = SquidCompartment(name='body', hypers=squid_hypers)
# squid_body = Compartment(name='body', hypers=squid_hypers)
# leak = LeakChannel(name='leak', hypers=squid_hypers)
# na = NaChannel(name='na', hypers=squid_hypers)
# kdr = KdrChannel(name='kdr', hypers=squid_hypers)
# squid_body.add_child(leak)
# body.add_child(na)
# squid_body.add_child(kdr)
# Initialize the model
D, I = squid_body.initialize_offsets()
# Set the recording duration
t_start = 0
t_stop = 100.
dt = 0.1
t_ds = 0.1
t = np.arange(t_start, t_stop, dt)
T = len(t)
# Make input with an injected current from 500-600ms
inpt = np.zeros((T, I))
inpt[20/dt:80/dt,:] = 7.
inpt += np.random.randn(T, I)
# Set the initial distribution to be Gaussian around the steady state
z0 = np.zeros(D)
squid_body.steady_state(z0)
init = GaussianInitialDistribution(z0, 0.1**2 * np.eye(D))
# Set the proposal distribution using Hodgkin Huxley dynamics
# TODO: Fix the hack which requires us to know the number of particles
N = 100
sigmas = 0.0001*np.ones(D)
# Set the voltage transition dynamics to be a bit noisier
sigmas[squid_body.x_offset] = 0.25
prop = HodgkinHuxleyProposal(T, N, D, squid_body, sigmas, t, inpt)
# Set the observation model to observe only the voltage
etas = np.ones(1)
observed_dims = np.array([squid_body.x_offset]).astype(np.int32)
lkhd = PartialGaussianLikelihood(observed_dims, etas)
# Initialize the latent state matrix to sample N=1 particle
z = np.zeros((T,N,D))
z[0,0,:] = init.sample()
# Initialize the output matrix
x = np.zeros((T,D))
# Sample the latent state sequence
for i in np.arange(0,T-1):
# The interface kinda sucks. We have to tell it that
# the first particle is always its ancestor
prop.sample_next(z, i, np.array([0], dtype=np.int32))
# Sample observations
for i in np.arange(0,T):
lkhd.sample(z,x,i,0)
# Extract the first (and in this case only) particle
z = z[:,0,:].copy(order='C')
# Downsample
intvl = int(t_ds / dt)
td = t[::intvl].copy('C')
zd = z[::intvl, :].copy('C')
xd = x[::intvl, :].copy('C')
inptd = inpt[::intvl].copy('C')
# Plot the first particle trajectory
plt.ion()
st_axs, _ = squid_body.plot(td, zd, color='k')
# Plot the observed voltage
st_axs[0].plot(td, xd[:,0], 'r')
# plt.plot(t, x[:,0], 'r')
plt.show()
plt.pause(0.01)
return td, zd, xd, inptd, st_axs
def sample_from_model(T,D, init, prop):
z = np.zeros((T,1,D))
z[0,0,:] = init.sample()[:]
# Sample the latent state sequence with the given initial condition
for i in np.arange(0,T-1):
# The interface kinda sucks. We have to tell it that
# the first particle is always its ancestor
prop.sample_next(z, i, np.array([0], dtype=np.int32))
return z[:,0,:]
# Now run the pMCMC inference
def sample_gp_given_true_z(t, x, inpt,
z_squid,
N_particles=100,
axs=None, gp1_ax=None, gp2_ax=None):
dt = np.diff(t)
T,O = x.shape
# Make a model
body, gp1, gp2, D, I = create_gp_model()
# Set the initial distribution to be Gaussian around the steady state
ss = np.zeros(D)
body.steady_state(ss)
init = GaussianInitialDistribution(ss, 0.1**2 * np.eye(D))
# Set the proposal distribution using Hodgkin Huxley dynamics
sigmas = 0.001*np.ones(D)
# Set the voltage transition dynamics to be a bit noisier
sigmas[body.x_offset] = 0.25
prop = HodgkinHuxleyProposal(T, N_particles, D, body, sigmas, t, inpt)
# Set the observation model to observe only the voltage
etas = np.ones(1)
observed_dims = np.array([body.x_offset]).astype(np.int32)
lkhd = PartialGaussianLikelihood(observed_dims, etas)
# Initialize the latent state matrix with the equivalent of the squid latent state
z = np.zeros((T,1,D))
z[:,0,0] = z_squid[:,0] # V = V
m3h = z_squid[:,1]**3 * z_squid[:,2]
m3h = np.clip(m3h, 1e-4,1-1e-4)
z[:,0,1] = np.log(m3h/(1.0-m3h)) # Na open fraction
n4 = z_squid[:,3]**4
n4 = np.clip(n4, 1e-4,1-1e-4)
z[:,0,2] = np.log(n4/(1.0-n4)) # Kdr open fraction
# Prepare the particle Gibbs sampler with the first particle
pf = ParticleGibbsAncestorSampling(T, N_particles, D)
pf.initialize(init, prop, lkhd, x, z[:,0,:].copy('C'))
# Plot the initial state
# gp1_ax, im1, l_gp1 = gp1.plot(ax=gp1_ax, data=z[:,0,:])
gp2_ax, im2, l_gp2 = gp2.plot(ax=gp2_ax, data=z[:,0,:])
axs, lines = body.plot(t, z[:,0,:], color='b', axs=axs)
axs[0].plot(t, x[:,0], 'r')
# Plot a sample from the model
lpred = axs[0].plot(t, sample_from_model(T,D, init, prop)[:,0], 'g')
# Update figures
for i in range(1,4):
plt.figure(i)
plt.pause(0.001)
# Initialize sample outputs
S = 1000
z_smpls = np.zeros((S,T,D))
z_smpls[0,:,:] = z[:,0,:]
for s in range(1,S):
print "Iteration %d" % s
# Reinitialize with the previous particle
# pf.initialize(init, prop, lkhd, x, z_smpls[s-1,:,:])
# Sample a new trajectory given the updated kinetics and the previous sample
# z_smpls[s,:,:] = pf.sample()
z_smpls[s,:,:] = z_smpls[s-1,:,:]
# Resample the GP
gp1.resample(z_smpls[s,:,:], dt)
gp2.resample(z_smpls[s,:,:], dt)
# Resample the conductances
# resample_body(body, t, z_smpls[s,:,:], sigmas[0])
# Plot the sample
body.plot(t, z_smpls[s,:,:], lines=lines)
# gp1.plot(im=im1, l=l_gp1, data=z_smpls[s,:,:])
gp2.plot(im=im2, l=l_gp2, data=z_smpls[s,:,:])
# Sample from the model and plot
lpred[0].set_data(t, sample_from_model(T,D, init,prop)[:,0])
# Update figures
for i in range(1,4):
plt.figure(i)
plt.pause(0.001)
z_mean = z_smpls.mean(axis=0)
z_std = z_smpls.std(axis=0)
z_env = np.zeros((T*2,2))
z_env[:,0] = np.concatenate((t, t[::-1]))
z_env[:,1] = np.concatenate((z_mean[:,0] + z_std[:,0], z_mean[::-1,0] - z_std[::-1,0]))
plt.ioff()
plt.show()
return z_smpls
t, z, x, inpt, st_axs = sample_squid_model()
raw_input("Press enter to being sampling...\n")
sample_gp_given_true_z(t, x, inpt, z, axs=st_axs)
|
flavour/ifrc_qa
|
refs/heads/master
|
modules/s3db/delphi.py
|
1
|
# -*- coding: utf-8 -*-
""" Sahana Eden Delphi Decision Maker Model
@copyright: 2009-2016 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3DelphiModel",
"S3DelphiUser",
)
from gluon import *
from gluon.storage import Storage
from ..s3 import *
# =============================================================================
class S3DelphiModel(S3Model):
"""
Delphi Decision Maker
"""
names = ("delphi_group",
"delphi_membership",
"delphi_problem",
"delphi_solution",
"delphi_vote",
"delphi_comment",
"delphi_solution_represent",
"delphi_DelphiUser",
)
def model(self):
T = current.T
db = current.db
messages = current.messages
NONE = messages["NONE"]
UNKNOWN_OPT = messages.UNKNOWN_OPT
add_components = self.add_components
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
# ---------------------------------------------------------------------
# Groups
# ---------------------------------------------------------------------
tablename = "delphi_group"
define_table(tablename,
Field("name", length=255, notnull=True, unique=True,
label = T("Group Title"),
requires = [IS_NOT_EMPTY(),
IS_NOT_ONE_OF(db,
"%s.name" % tablename,
),
],
),
Field("description", "text",
label = T("Description"),
),
Field("active", "boolean", default=True,
label = T("Active"),
represent = s3_yes_no_represent,
),
*s3_meta_fields()
)
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Group"),
title_display = T("Group Details"),
title_list = T("Groups"),
title_update = T("Edit Group"),
label_list_button = T("List Groups"),
label_delete_button = T("Delete Group"),
msg_record_created = T("Group added"),
msg_record_modified = T("Group updated"),
msg_record_deleted = T("Group deleted"),
msg_list_empty = T("No Groups currently defined"))
configure(tablename,
deduplicate = S3Duplicate(),
list_fields = ["id",
"name",
"description",
],
)
# Components
add_components(tablename,
delphi_membership = "group_id",
delphi_problem = "group_id",
)
group_id = S3ReusableField("group_id", "reference %s" % tablename,
notnull = True,
label = T("Problem Group"),
represent = self.delphi_group_represent,
requires = IS_ONE_OF(db, "delphi_group.id",
self.delphi_group_represent
),
)
user_id = S3ReusableField("user_id", current.auth.settings.table_user,
notnull=True,
label = T("User"),
represent = s3_auth_user_represent,
requires = IS_ONE_OF(db, "auth_user.id",
s3_auth_user_represent),
)
# ---------------------------------------------------------------------
# Group Membership
# ---------------------------------------------------------------------
delphi_role_opts = {
1:T("Guest"),
2:T("Contributor"),
3:T("Participant"),
4:T("Moderator")
}
tablename = "delphi_membership"
define_table(tablename,
group_id(),
user_id(),
Field("description",
label = T("Description"),
),
# @ToDo: Change how Membership Requests work
Field("req", "boolean",
default = False,
label = T("Request"), # Membership Request
represent = s3_yes_no_represent,
),
Field("status", "integer",
default = 3,
label = T("Status"),
represent = lambda opt: \
delphi_role_opts.get(opt, UNKNOWN_OPT),
requires = IS_IN_SET(delphi_role_opts,
zero=None),
comment = DIV(_class="tooltip",
_title="%s|%s|%s|%s|%s" % (T("Status"),
T("Guests can view all details"),
T("A Contributor can additionally Post comments to the proposed Solutions & add alternative Solutions"),
T("A Participant can additionally Vote"),
T("A Moderator can additionally create Problems & control Memberships")))
),
*s3_meta_fields()
)
# CRUD strings
crud_strings[tablename] = Storage(
label_create = T("Add Member"),
title_display = T("Membership Details"),
title_list = T("Group Members"),
title_update = T("Edit Membership"),
label_list_button = T("List Members"),
label_delete_button = T("Remove Person from Group"),
msg_record_created = T("Person added to Group"),
msg_record_modified = T("Membership updated"),
msg_record_deleted = T("Person removed from Group"),
msg_list_empty = T("This Group has no Members yet"))
configure(tablename,
list_fields = ["id",
"group_id",
"user_id",
"status",
"req",
],
)
# ---------------------------------------------------------------------
# Problems
# ---------------------------------------------------------------------
tablename = "delphi_problem"
define_table(tablename,
group_id(),
Field("code", length=8,
label = T("Problem Code"),
represent = lambda v: v or NONE,
),
Field("name", length=255, notnull=True, unique=True,
label = T("Problem Title"),
requires = [IS_NOT_EMPTY(),
IS_NOT_ONE_OF(db,
"%s.name" % tablename,
),
],
),
Field("description", "text",
label = T("Description"),
represent = s3_comments_represent,
),
Field("criteria", "text",
label = T("Criteria"),
),
Field("active", "boolean",
default = True,
label = T("Active"),
represent = s3_yes_no_represent,
),
*s3_meta_fields()
)
# @todo: make lazy_table
table = db[tablename]
table.modified_on.label = T("Last Modification")
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Add Problem"),
title_display = T("Problem Details"),
title_list = T("Problems"),
title_update = T("Edit Problem"),
label_list_button = T("List Problems"),
label_delete_button = T("Delete Problem"),
msg_record_created = T("Problem added"),
msg_record_modified = T("Problem updated"),
msg_record_deleted = T("Problem deleted"),
msg_list_empty = T("No Problems currently defined"))
configure(tablename,
deduplicate = S3Duplicate(),
list_fields = ["id",
"group_id",
"code",
"name",
"description",
"created_by",
"modified_on",
],
orderby = table.code,
)
# Components
add_components(tablename,
delphi_solution = "problem_id",
)
problem_id = S3ReusableField("problem_id", "reference %s" % tablename,
notnull=True,
label = T("Problem"),
represent = self.delphi_problem_represent,
requires = IS_ONE_OF(db, "delphi_problem.id",
self.delphi_problem_represent
),
)
# ---------------------------------------------------------------------
# Solutions
# ---------------------------------------------------------------------
tablename = "delphi_solution"
define_table(tablename,
problem_id(),
Field("name",
label = T("Title"),
requires = IS_NOT_EMPTY(),
),
Field("description", "text",
label = T("Description"),
represent = s3_comments_represent,
),
Field("changes", "integer",
default = 0,
label = T("Changes"),
writable = False,
),
Field.Method("comments",
delphi_solution_comments),
Field.Method("votes",
delphi_solution_votes),
*s3_meta_fields()
)
# @todo: make lazy_table
table = db[tablename]
table.created_by.label = T("Suggested By")
table.modified_on.label = T("Last Modification")
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Add Solution"),
title_display = T("Solution Details"),
title_list = T("Solutions"),
title_update = T("Edit Solution"),
label_list_button = T("List Solutions"),
label_delete_button = T("Delete Solution"),
msg_record_created = T("Solution added"),
msg_record_modified = T("Solution updated"),
msg_record_deleted = T("Solution deleted"),
msg_list_empty = T("No Solutions currently defined"))
configure(tablename,
extra_fields = ["problem_id"],
list_fields = ["id",
#"problem_id",
"name",
"description",
"created_by",
"modified_on",
(T("Voted on"), "votes"),
(T("Comments"), "comments"),
],
)
solution_represent = S3Represent(lookup=tablename)
solution_id = S3ReusableField("solution_id", "reference %s" % tablename,
label = T("Solution"),
represent = solution_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "delphi_solution.id",
solution_represent
)),
)
# ---------------------------------------------------------------------
# Votes
# ---------------------------------------------------------------------
tablename = "delphi_vote"
define_table(tablename,
problem_id(),
solution_id(empty = False),
Field("rank", "integer",
label = T("Rank"),
),
*s3_meta_fields()
)
# ---------------------------------------------------------------------
# Comments
# @ToDo: Attachments?
#
# Parent field allows us to:
# * easily filter for top-level threads
# * easily filter for next level of threading
# * hook a new reply into the correct location in the hierarchy
#
# ---------------------------------------------------------------------
tablename = "delphi_comment"
define_table(tablename,
Field("parent", "reference delphi_comment",
requires = IS_EMPTY_OR(
IS_ONE_OF_EMPTY(db, "delphi_comment.id")),
readable=False,
),
problem_id(),
# @ToDo: Tag to 1+ Solutions
#solution_multi_id(),
solution_id(),
Field("body", "text", notnull=True,
label = T("Comment"),
requires = IS_NOT_EMPTY(),
),
*s3_meta_fields()
)
configure(tablename,
list_fields = ["id",
"problem_id",
"solution_id",
"created_by",
"modified_on",
],
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
return dict(delphi_solution_represent = solution_represent,
delphi_DelphiUser = S3DelphiUser,
)
# -------------------------------------------------------------------------
@staticmethod
def delphi_group_represent(id, row=None):
""" FK representation """
if not row:
db = current.db
table = db.delphi_group
row = db(table.id == id).select(table.id,
table.name,
limitby = (0, 1)).first()
elif not id:
return current.messages["NONE"]
try:
return A(row.name,
_href=URL(c="delphi",
f="group",
args=[row.id]))
except:
return current.messages.UNKNOWN_OPT
# ---------------------------------------------------------------------
@staticmethod
def delphi_problem_represent(id, row=None, show_link=False,
solutions=True):
"""
FK representation
@ToDo: Migrate to S3Represent
"""
if not row:
db = current.db
table = db.delphi_problem
row = db(table.id == id).select(table.id,
table.name,
limitby = (0, 1)).first()
elif not id:
return current.messages["NONE"]
try:
if show_link:
if solutions:
url = URL(c="delphi", f="problem", args=[row.id, "solution"])
else:
url = URL(c="delphi", f="problem", args=[row.id])
return A(row.name, _href=url)
else:
return row.name
except:
return current.messages.UNKNOWN_OPT
# =============================================================================
def delphi_solution_comments(row):
""" Clickable number of comments for a solution, virtual field """
if hasattr(row, "delphi_solution"):
row = row.delphi_solution
try:
solution_id = row.id
problem_id = row.problem_id
except AttributeError:
return None
ctable = current.s3db.delphi_comment
query = (ctable.solution_id == solution_id)
comments = current.db(query).count()
url = URL(c="delphi", f="problem",
args=[problem_id, "solution", solution_id, "discuss"])
return A(comments, _href=url)
def delphi_solution_votes(row):
""" Clickable number of solutions for a problem, virtual field """
if hasattr(row, "delphi_solution"):
row = row.delphi_solution
try:
solution_id = row.id
problem_id = row.problem_id
except AttributeError:
return None
vtable = current.s3db.delphi_vote
query = (vtable.solution_id == solution_id)
votes = current.db(query).count()
url = URL(c="delphi", f="problem",
args=[problem_id, "results"])
return A(votes, _href=url)
# =============================================================================
class S3DelphiUser:
""" Delphi User class """
def user(self):
""" Used by Discuss() (& summary()) """
return current.s3db.auth_user[self.user_id]
def __init__(self, group_id=None):
auth = current.auth
user_id = auth.user.id if auth.is_logged_in() else None
status = 1 # guest
membership = None
if auth.s3_has_role("DelphiAdmin"):
# DelphiAdmin is Moderator for every Group
status = 4
elif user_id != None and group_id != None:
table = current.s3db.delphi_membership
query = (table.group_id == group_id) & \
(table.user_id == user_id)
membership = current.db(query).select()
if membership:
membership = membership[0]
status = membership.status
self.authorised = (status == 4)
# Only Moderators & Participants can Vote
self.can_vote = status in (3, 4)
# All but Guests can add Solutions & Discuss
self.can_add_item = status != 1
self.can_post = status != 1
self.membership = membership
self.status = status
self.user_id = user_id
# END =========================================================================
|
kelseyoo14/Wander
|
refs/heads/master
|
venv_2_7/lib/python2.7/site-packages/pandas/tests/test_reshape.py
|
9
|
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
from copy import deepcopy
from datetime import datetime, timedelta
import operator
import os
import nose
from pandas import DataFrame, Series
from pandas.core.sparse import SparseDataFrame
import pandas as pd
from numpy import nan
import numpy as np
from pandas.util.testing import assert_frame_equal
from pandas.core.reshape import (melt, lreshape, get_dummies,
wide_to_long)
import pandas.util.testing as tm
from pandas.compat import StringIO, cPickle, range, u
_multiprocess_can_split_ = True
class TestMelt(tm.TestCase):
def setUp(self):
self.df = tm.makeTimeDataFrame()[:10]
self.df['id1'] = (self.df['A'] > 0).astype(np.int64)
self.df['id2'] = (self.df['B'] > 0).astype(np.int64)
self.var_name = 'var'
self.value_name = 'val'
self.df1 = pd.DataFrame([[ 1.067683, -1.110463, 0.20867 ],
[-1.321405, 0.368915, -1.055342],
[-0.807333, 0.08298 , -0.873361]])
self.df1.columns = [list('ABC'), list('abc')]
self.df1.columns.names = ['CAP', 'low']
def test_default_col_names(self):
result = melt(self.df)
self.assertEqual(result.columns.tolist(), ['variable', 'value'])
result1 = melt(self.df, id_vars=['id1'])
self.assertEqual(result1.columns.tolist(), ['id1', 'variable', 'value'])
result2 = melt(self.df, id_vars=['id1', 'id2'])
self.assertEqual(result2.columns.tolist(), ['id1', 'id2', 'variable', 'value'])
def test_value_vars(self):
result3 = melt(self.df, id_vars=['id1', 'id2'], value_vars='A')
self.assertEqual(len(result3), 10)
result4 = melt(self.df, id_vars=['id1', 'id2'], value_vars=['A', 'B'])
expected4 = DataFrame({'id1': self.df['id1'].tolist() * 2,
'id2': self.df['id2'].tolist() * 2,
'variable': ['A']*10 + ['B']*10,
'value': self.df['A'].tolist() + self.df['B'].tolist()},
columns=['id1', 'id2', 'variable', 'value'])
tm.assert_frame_equal(result4, expected4)
def test_custom_var_name(self):
result5 = melt(self.df, var_name=self.var_name)
self.assertEqual(result5.columns.tolist(), ['var', 'value'])
result6 = melt(self.df, id_vars=['id1'], var_name=self.var_name)
self.assertEqual(result6.columns.tolist(), ['id1', 'var', 'value'])
result7 = melt(self.df, id_vars=['id1', 'id2'], var_name=self.var_name)
self.assertEqual(result7.columns.tolist(), ['id1', 'id2', 'var', 'value'])
result8 = melt(self.df, id_vars=['id1', 'id2'],
value_vars='A', var_name=self.var_name)
self.assertEqual(result8.columns.tolist(), ['id1', 'id2', 'var', 'value'])
result9 = melt(self.df, id_vars=['id1', 'id2'],
value_vars=['A', 'B'], var_name=self.var_name)
expected9 = DataFrame({'id1': self.df['id1'].tolist() * 2,
'id2': self.df['id2'].tolist() * 2,
self.var_name: ['A']*10 + ['B']*10,
'value': self.df['A'].tolist() + self.df['B'].tolist()},
columns=['id1', 'id2', self.var_name, 'value'])
tm.assert_frame_equal(result9, expected9)
def test_custom_value_name(self):
result10 = melt(self.df, value_name=self.value_name)
self.assertEqual(result10.columns.tolist(), ['variable', 'val'])
result11 = melt(self.df, id_vars=['id1'], value_name=self.value_name)
self.assertEqual(result11.columns.tolist(), ['id1', 'variable', 'val'])
result12 = melt(self.df, id_vars=['id1', 'id2'], value_name=self.value_name)
self.assertEqual(result12.columns.tolist(), ['id1', 'id2', 'variable', 'val'])
result13 = melt(self.df, id_vars=['id1', 'id2'],
value_vars='A', value_name=self.value_name)
self.assertEqual(result13.columns.tolist(), ['id1', 'id2', 'variable', 'val'])
result14 = melt(self.df, id_vars=['id1', 'id2'],
value_vars=['A', 'B'], value_name=self.value_name)
expected14 = DataFrame({'id1': self.df['id1'].tolist() * 2,
'id2': self.df['id2'].tolist() * 2,
'variable': ['A']*10 + ['B']*10,
self.value_name: self.df['A'].tolist() + self.df['B'].tolist()},
columns=['id1', 'id2', 'variable', self.value_name])
tm.assert_frame_equal(result14, expected14)
def test_custom_var_and_value_name(self):
result15 = melt(self.df, var_name=self.var_name, value_name=self.value_name)
self.assertEqual(result15.columns.tolist(), ['var', 'val'])
result16 = melt(self.df, id_vars=['id1'], var_name=self.var_name, value_name=self.value_name)
self.assertEqual(result16.columns.tolist(), ['id1', 'var', 'val'])
result17 = melt(self.df, id_vars=['id1', 'id2'],
var_name=self.var_name, value_name=self.value_name)
self.assertEqual(result17.columns.tolist(), ['id1', 'id2', 'var', 'val'])
result18 = melt(self.df, id_vars=['id1', 'id2'],
value_vars='A', var_name=self.var_name, value_name=self.value_name)
self.assertEqual(result18.columns.tolist(), ['id1', 'id2', 'var', 'val'])
result19 = melt(self.df, id_vars=['id1', 'id2'],
value_vars=['A', 'B'], var_name=self.var_name, value_name=self.value_name)
expected19 = DataFrame({'id1': self.df['id1'].tolist() * 2,
'id2': self.df['id2'].tolist() * 2,
self.var_name: ['A']*10 + ['B']*10,
self.value_name: self.df['A'].tolist() + self.df['B'].tolist()},
columns=['id1', 'id2', self.var_name, self.value_name])
tm.assert_frame_equal(result19, expected19)
df20 = self.df.copy()
df20.columns.name = 'foo'
result20 = melt(df20)
self.assertEqual(result20.columns.tolist(), ['foo', 'value'])
def test_col_level(self):
res1 = melt(self.df1, col_level=0)
res2 = melt(self.df1, col_level='CAP')
self.assertEqual(res1.columns.tolist(), ['CAP', 'value'])
self.assertEqual(res1.columns.tolist(), ['CAP', 'value'])
def test_multiindex(self):
res = pd.melt(self.df1)
self.assertEqual(res.columns.tolist(), ['CAP', 'low', 'value'])
class TestGetDummies(tm.TestCase):
sparse = False
def setUp(self):
self.df = DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'b', 'c'],
'C': [1, 2, 3]})
def test_basic(self):
s_list = list('abc')
s_series = Series(s_list)
s_series_index = Series(s_list, list('ABC'))
expected = DataFrame({'a': {0: 1.0, 1: 0.0, 2: 0.0},
'b': {0: 0.0, 1: 1.0, 2: 0.0},
'c': {0: 0.0, 1: 0.0, 2: 1.0}})
assert_frame_equal(get_dummies(s_list, sparse=self.sparse), expected)
assert_frame_equal(get_dummies(s_series, sparse=self.sparse), expected)
expected.index = list('ABC')
assert_frame_equal(get_dummies(s_series_index, sparse=self.sparse), expected)
def test_basic_types(self):
# GH 10531
s_list = list('abc')
s_series = Series(s_list)
s_df = DataFrame({'a': [0, 1, 0, 1, 2],
'b': ['A', 'A', 'B', 'C', 'C'],
'c': [2, 3, 3, 3, 2]})
if not self.sparse:
exp_df_type = DataFrame
exp_blk_type = pd.core.internals.FloatBlock
else:
exp_df_type = SparseDataFrame
exp_blk_type = pd.core.internals.SparseBlock
self.assertEqual(type(get_dummies(s_list, sparse=self.sparse)), exp_df_type)
self.assertEqual(type(get_dummies(s_series, sparse=self.sparse)), exp_df_type)
r = get_dummies(s_df, sparse=self.sparse, columns=s_df.columns)
self.assertEqual(type(r), exp_df_type)
r = get_dummies(s_df, sparse=self.sparse, columns=['a'])
self.assertEqual(type(r[['a_0']]._data.blocks[0]), exp_blk_type)
self.assertEqual(type(r[['a_1']]._data.blocks[0]), exp_blk_type)
self.assertEqual(type(r[['a_2']]._data.blocks[0]), exp_blk_type)
def test_just_na(self):
just_na_list = [np.nan]
just_na_series = Series(just_na_list)
just_na_series_index = Series(just_na_list, index = ['A'])
res_list = get_dummies(just_na_list, sparse=self.sparse)
res_series = get_dummies(just_na_series, sparse=self.sparse)
res_series_index = get_dummies(just_na_series_index, sparse=self.sparse)
self.assertEqual(res_list.empty, True)
self.assertEqual(res_series.empty, True)
self.assertEqual(res_series_index.empty, True)
self.assertEqual(res_list.index.tolist(), [0])
self.assertEqual(res_series.index.tolist(), [0])
self.assertEqual(res_series_index.index.tolist(), ['A'])
def test_include_na(self):
s = ['a', 'b', np.nan]
res = get_dummies(s, sparse=self.sparse)
exp = DataFrame({'a': {0: 1.0, 1: 0.0, 2: 0.0},
'b': {0: 0.0, 1: 1.0, 2: 0.0}})
assert_frame_equal(res, exp)
# Sparse dataframes do not allow nan labelled columns, see #GH8822
res_na = get_dummies(s, dummy_na=True, sparse=self.sparse)
exp_na = DataFrame({nan: {0: 0.0, 1: 0.0, 2: 1.0},
'a': {0: 1.0, 1: 0.0, 2: 0.0},
'b': {0: 0.0, 1: 1.0, 2: 0.0}}).reindex_axis(['a', 'b', nan], 1)
# hack (NaN handling in assert_index_equal)
exp_na.columns = res_na.columns
assert_frame_equal(res_na, exp_na)
res_just_na = get_dummies([nan], dummy_na=True, sparse=self.sparse)
exp_just_na = DataFrame(Series(1.0,index=[0]),columns=[nan])
tm.assert_numpy_array_equal(res_just_na.values, exp_just_na.values)
def test_unicode(self): # See GH 6885 - get_dummies chokes on unicode values
import unicodedata
e = 'e'
eacute = unicodedata.lookup('LATIN SMALL LETTER E WITH ACUTE')
s = [e, eacute, eacute]
res = get_dummies(s, prefix='letter', sparse=self.sparse)
exp = DataFrame({'letter_e': {0: 1.0, 1: 0.0, 2: 0.0},
u('letter_%s') % eacute: {0: 0.0, 1: 1.0, 2: 1.0}})
assert_frame_equal(res, exp)
def test_dataframe_dummies_all_obj(self):
df = self.df[['A', 'B']]
result = get_dummies(df, sparse=self.sparse)
expected = DataFrame({'A_a': [1., 0, 1], 'A_b': [0., 1, 0],
'B_b': [1., 1, 0], 'B_c': [0., 0, 1]})
assert_frame_equal(result, expected)
def test_dataframe_dummies_mix_default(self):
df = self.df
result = get_dummies(df, sparse=self.sparse)
expected = DataFrame({'C': [1, 2, 3], 'A_a': [1., 0, 1],
'A_b': [0., 1, 0], 'B_b': [1., 1, 0],
'B_c': [0., 0, 1]})
expected = expected[['C', 'A_a', 'A_b', 'B_b', 'B_c']]
assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_list(self):
prefixes = ['from_A', 'from_B']
df = DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'b', 'c'],
'C': [1, 2, 3]})
result = get_dummies(df, prefix=prefixes, sparse=self.sparse)
expected = DataFrame({'C': [1, 2, 3], 'from_A_a': [1., 0, 1],
'from_A_b': [0., 1, 0], 'from_B_b': [1., 1, 0],
'from_B_c': [0., 0, 1]})
expected = expected[['C', 'from_A_a', 'from_A_b', 'from_B_b',
'from_B_c']]
assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_str(self):
# not that you should do this...
df = self.df
result = get_dummies(df, prefix='bad', sparse=self.sparse)
expected = DataFrame([[1, 1., 0., 1., 0.],
[2, 0., 1., 1., 0.],
[3, 1., 0., 0., 1.]],
columns=['C', 'bad_a', 'bad_b', 'bad_b', 'bad_c'])
assert_frame_equal(result, expected)
def test_dataframe_dummies_subset(self):
df = self.df
result = get_dummies(df, prefix=['from_A'],
columns=['A'], sparse=self.sparse)
expected = DataFrame({'from_A_a': [1., 0, 1], 'from_A_b': [0., 1, 0],
'B': ['b', 'b', 'c'], 'C': [1, 2, 3]})
assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_sep(self):
df = self.df
result = get_dummies(df, prefix_sep='..', sparse=self.sparse)
expected = DataFrame({'C': [1, 2, 3], 'A..a': [1., 0, 1],
'A..b': [0., 1, 0], 'B..b': [1., 1, 0],
'B..c': [0., 0, 1]})
expected = expected[['C', 'A..a', 'A..b', 'B..b', 'B..c']]
assert_frame_equal(result, expected)
result = get_dummies(df, prefix_sep=['..', '__'], sparse=self.sparse)
expected = expected.rename(columns={'B..b': 'B__b', 'B..c': 'B__c'})
assert_frame_equal(result, expected)
result = get_dummies(df, prefix_sep={'A': '..', 'B': '__'}, sparse=self.sparse)
assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_bad_length(self):
with tm.assertRaises(ValueError):
get_dummies(self.df, prefix=['too few'], sparse=self.sparse)
def test_dataframe_dummies_prefix_sep_bad_length(self):
with tm.assertRaises(ValueError):
get_dummies(self.df, prefix_sep=['bad'], sparse=self.sparse)
def test_dataframe_dummies_prefix_dict(self):
prefixes = {'A': 'from_A', 'B': 'from_B'}
df = DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'b', 'c'],
'C': [1, 2, 3]})
result = get_dummies(df, prefix=prefixes, sparse=self.sparse)
expected = DataFrame({'from_A_a': [1., 0, 1], 'from_A_b': [0., 1, 0],
'from_B_b': [1., 1, 0], 'from_B_c': [0., 0, 1],
'C': [1, 2, 3]})
assert_frame_equal(result, expected)
def test_dataframe_dummies_with_na(self):
df = self.df
df.loc[3, :] = [np.nan, np.nan, np.nan]
result = get_dummies(df, dummy_na=True, sparse=self.sparse)
expected = DataFrame({'C': [1, 2, 3, np.nan], 'A_a': [1., 0, 1, 0],
'A_b': [0., 1, 0, 0], 'A_nan': [0., 0, 0, 1], 'B_b': [1., 1, 0, 0],
'B_c': [0., 0, 1, 0], 'B_nan': [0., 0, 0, 1]})
expected = expected[['C', 'A_a', 'A_b', 'A_nan', 'B_b', 'B_c',
'B_nan']]
assert_frame_equal(result, expected)
result = get_dummies(df, dummy_na=False, sparse=self.sparse)
expected = expected[['C', 'A_a', 'A_b', 'B_b', 'B_c']]
assert_frame_equal(result, expected)
def test_dataframe_dummies_with_categorical(self):
df = self.df
df['cat'] = pd.Categorical(['x', 'y', 'y'])
result = get_dummies(df, sparse=self.sparse)
expected = DataFrame({'C': [1, 2, 3], 'A_a': [1., 0, 1],
'A_b': [0., 1, 0], 'B_b': [1., 1, 0],
'B_c': [0., 0, 1], 'cat_x': [1., 0, 0],
'cat_y': [0., 1, 1]})
expected = expected[['C', 'A_a', 'A_b', 'B_b', 'B_c',
'cat_x', 'cat_y']]
assert_frame_equal(result, expected)
class TestGetDummiesSparse(TestGetDummies):
sparse = True
class TestLreshape(tm.TestCase):
def test_pairs(self):
data = {'birthdt': ['08jan2009', '20dec2008', '30dec2008',
'21dec2008', '11jan2009'],
'birthwt': [1766, 3301, 1454, 3139, 4133],
'id': [101, 102, 103, 104, 105],
'sex': ['Male', 'Female', 'Female', 'Female', 'Female'],
'visitdt1': ['11jan2009', '22dec2008', '04jan2009',
'29dec2008', '20jan2009'],
'visitdt2': ['21jan2009', nan, '22jan2009', '31dec2008', '03feb2009'],
'visitdt3': ['05feb2009', nan, nan, '02jan2009', '15feb2009'],
'wt1': [1823, 3338, 1549, 3298, 4306],
'wt2': [2011.0, nan, 1892.0, 3338.0, 4575.0],
'wt3': [2293.0, nan, nan, 3377.0, 4805.0]}
df = DataFrame(data)
spec = {'visitdt': ['visitdt%d' % i for i in range(1, 4)],
'wt': ['wt%d' % i for i in range(1, 4)]}
result = lreshape(df, spec)
exp_data = {'birthdt': ['08jan2009', '20dec2008', '30dec2008',
'21dec2008', '11jan2009', '08jan2009',
'30dec2008', '21dec2008', '11jan2009',
'08jan2009', '21dec2008', '11jan2009'],
'birthwt': [1766, 3301, 1454, 3139, 4133, 1766,
1454, 3139, 4133, 1766, 3139, 4133],
'id': [101, 102, 103, 104, 105, 101,
103, 104, 105, 101, 104, 105],
'sex': ['Male', 'Female', 'Female', 'Female', 'Female',
'Male', 'Female', 'Female', 'Female', 'Male',
'Female', 'Female'],
'visitdt': ['11jan2009', '22dec2008', '04jan2009', '29dec2008',
'20jan2009', '21jan2009', '22jan2009', '31dec2008',
'03feb2009', '05feb2009', '02jan2009', '15feb2009'],
'wt': [1823.0, 3338.0, 1549.0, 3298.0, 4306.0, 2011.0,
1892.0, 3338.0, 4575.0, 2293.0, 3377.0, 4805.0]}
exp = DataFrame(exp_data, columns=result.columns)
tm.assert_frame_equal(result, exp)
result = lreshape(df, spec, dropna=False)
exp_data = {'birthdt': ['08jan2009', '20dec2008', '30dec2008',
'21dec2008', '11jan2009',
'08jan2009', '20dec2008', '30dec2008',
'21dec2008', '11jan2009',
'08jan2009', '20dec2008', '30dec2008',
'21dec2008', '11jan2009'],
'birthwt': [1766, 3301, 1454, 3139, 4133,
1766, 3301, 1454, 3139, 4133,
1766, 3301, 1454, 3139, 4133],
'id': [101, 102, 103, 104, 105,
101, 102, 103, 104, 105,
101, 102, 103, 104, 105],
'sex': ['Male', 'Female', 'Female', 'Female', 'Female',
'Male', 'Female', 'Female', 'Female', 'Female',
'Male', 'Female', 'Female', 'Female', 'Female'],
'visitdt': ['11jan2009', '22dec2008', '04jan2009',
'29dec2008', '20jan2009',
'21jan2009', nan, '22jan2009',
'31dec2008', '03feb2009',
'05feb2009', nan, nan, '02jan2009', '15feb2009'],
'wt': [1823.0, 3338.0, 1549.0, 3298.0, 4306.0, 2011.0,
nan, 1892.0, 3338.0, 4575.0, 2293.0, nan, nan,
3377.0, 4805.0]}
exp = DataFrame(exp_data, columns=result.columns)
tm.assert_frame_equal(result, exp)
spec = {'visitdt': ['visitdt%d' % i for i in range(1, 3)],
'wt': ['wt%d' % i for i in range(1, 4)]}
self.assertRaises(ValueError, lreshape, df, spec)
class TestWideToLong(tm.TestCase):
def test_simple(self):
np.random.seed(123)
x = np.random.randn(3)
df = pd.DataFrame({"A1970" : {0 : "a", 1 : "b", 2 : "c"},
"A1980" : {0 : "d", 1 : "e", 2 : "f"},
"B1970" : {0 : 2.5, 1 : 1.2, 2 : .7},
"B1980" : {0 : 3.2, 1 : 1.3, 2 : .1},
"X" : dict(zip(range(3), x))
})
df["id"] = df.index
exp_data = {"X" : x.tolist() + x.tolist(),
"A" : ['a', 'b', 'c', 'd', 'e', 'f'],
"B" : [2.5, 1.2, 0.7, 3.2, 1.3, 0.1],
"year" : [1970, 1970, 1970, 1980, 1980, 1980],
"id" : [0, 1, 2, 0, 1, 2]}
exp_frame = DataFrame(exp_data)
exp_frame = exp_frame.set_index(['id', 'year'])[["X", "A", "B"]]
long_frame = wide_to_long(df, ["A", "B"], i="id", j="year")
tm.assert_frame_equal(long_frame, exp_frame)
def test_stubs(self):
# GH9204
df = pd.DataFrame([[0,1,2,3,8],[4,5,6,7,9]])
df.columns = ['id', 'inc1', 'inc2', 'edu1', 'edu2']
stubs = ['inc', 'edu']
df_long = pd.wide_to_long(df, stubs, i='id', j='age')
self.assertEqual(stubs,['inc', 'edu'])
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
|
FHannes/intellij-community
|
refs/heads/master
|
python/lib/Lib/distutils/file_util.py
|
81
|
"""distutils.file_util
Utility functions for operating on single files.
"""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: file_util.py 37828 2004-11-10 22:23:15Z loewis $"
import os
from distutils.errors import DistutilsFileError
from distutils import log
# for generating verbose output in 'copy_file()'
_copy_action = { None: 'copying',
'hard': 'hard linking',
'sym': 'symbolically linking' }
def _copy_file_contents (src, dst, buffer_size=16*1024):
"""Copy the file 'src' to 'dst'; both must be filenames. Any error
opening either file, reading from 'src', or writing to 'dst', raises
DistutilsFileError. Data is read/written in chunks of 'buffer_size'
bytes (default 16k). No attempt is made to handle anything apart from
regular files.
"""
# Stolen from shutil module in the standard library, but with
# custom error-handling added.
fsrc = None
fdst = None
try:
try:
fsrc = open(src, 'rb')
except os.error, (errno, errstr):
raise DistutilsFileError, \
"could not open '%s': %s" % (src, errstr)
if os.path.exists(dst):
try:
os.unlink(dst)
except os.error, (errno, errstr):
raise DistutilsFileError, \
"could not delete '%s': %s" % (dst, errstr)
try:
fdst = open(dst, 'wb')
except os.error, (errno, errstr):
raise DistutilsFileError, \
"could not create '%s': %s" % (dst, errstr)
while 1:
try:
buf = fsrc.read(buffer_size)
except os.error, (errno, errstr):
raise DistutilsFileError, \
"could not read from '%s': %s" % (src, errstr)
if not buf:
break
try:
fdst.write(buf)
except os.error, (errno, errstr):
raise DistutilsFileError, \
"could not write to '%s': %s" % (dst, errstr)
finally:
if fdst:
fdst.close()
if fsrc:
fsrc.close()
# _copy_file_contents()
def copy_file (src, dst,
preserve_mode=1,
preserve_times=1,
update=0,
link=None,
verbose=0,
dry_run=0):
"""Copy a file 'src' to 'dst'. If 'dst' is a directory, then 'src' is
copied there with the same name; otherwise, it must be a filename. (If
the file exists, it will be ruthlessly clobbered.) If 'preserve_mode'
is true (the default), the file's mode (type and permission bits, or
whatever is analogous on the current platform) is copied. If
'preserve_times' is true (the default), the last-modified and
last-access times are copied as well. If 'update' is true, 'src' will
only be copied if 'dst' does not exist, or if 'dst' does exist but is
older than 'src'.
'link' allows you to make hard links (os.link) or symbolic links
(os.symlink) instead of copying: set it to "hard" or "sym"; if it is
None (the default), files are copied. Don't set 'link' on systems that
don't support it: 'copy_file()' doesn't check if hard or symbolic
linking is available.
Under Mac OS, uses the native file copy function in macostools; on
other systems, uses '_copy_file_contents()' to copy file contents.
Return a tuple (dest_name, copied): 'dest_name' is the actual name of
the output file, and 'copied' is true if the file was copied (or would
have been copied, if 'dry_run' true).
"""
# XXX if the destination file already exists, we clobber it if
# copying, but blow up if linking. Hmmm. And I don't know what
# macostools.copyfile() does. Should definitely be consistent, and
# should probably blow up if destination exists and we would be
# changing it (ie. it's not already a hard/soft link to src OR
# (not update) and (src newer than dst).
from distutils.dep_util import newer
from stat import ST_ATIME, ST_MTIME, ST_MODE, S_IMODE
if not os.path.isfile(src):
raise DistutilsFileError, \
"can't copy '%s': doesn't exist or not a regular file" % src
if os.path.isdir(dst):
dir = dst
dst = os.path.join(dst, os.path.basename(src))
else:
dir = os.path.dirname(dst)
if update and not newer(src, dst):
log.debug("not copying %s (output up-to-date)", src)
return dst, 0
try:
action = _copy_action[link]
except KeyError:
raise ValueError, \
"invalid value '%s' for 'link' argument" % link
if os.path.basename(dst) == os.path.basename(src):
log.info("%s %s -> %s", action, src, dir)
else:
log.info("%s %s -> %s", action, src, dst)
if dry_run:
return (dst, 1)
# On Mac OS, use the native file copy routine
if os.name == 'mac':
import macostools
try:
macostools.copy(src, dst, 0, preserve_times)
except os.error, exc:
raise DistutilsFileError, \
"could not copy '%s' to '%s': %s" % (src, dst, exc[-1])
# If linking (hard or symbolic), use the appropriate system call
# (Unix only, of course, but that's the caller's responsibility)
elif link == 'hard':
if not (os.path.exists(dst) and os.path.samefile(src, dst)):
os.link(src, dst)
elif link == 'sym':
if not (os.path.exists(dst) and os.path.samefile(src, dst)):
os.symlink(src, dst)
# Otherwise (non-Mac, not linking), copy the file contents and
# (optionally) copy the times and mode.
else:
_copy_file_contents(src, dst)
if preserve_mode or preserve_times:
st = os.stat(src)
# According to David Ascher <da@ski.org>, utime() should be done
# before chmod() (at least under NT).
if preserve_times:
os.utime(dst, (st[ST_ATIME], st[ST_MTIME]))
if preserve_mode and hasattr(os, 'chmod'):
os.chmod(dst, S_IMODE(st[ST_MODE]))
return (dst, 1)
# copy_file ()
# XXX I suspect this is Unix-specific -- need porting help!
def move_file (src, dst,
verbose=0,
dry_run=0):
"""Move a file 'src' to 'dst'. If 'dst' is a directory, the file will
be moved into it with the same name; otherwise, 'src' is just renamed
to 'dst'. Return the new full name of the file.
Handles cross-device moves on Unix using 'copy_file()'. What about
other systems???
"""
from os.path import exists, isfile, isdir, basename, dirname
import errno
log.info("moving %s -> %s", src, dst)
if dry_run:
return dst
if not isfile(src):
raise DistutilsFileError, \
"can't move '%s': not a regular file" % src
if isdir(dst):
dst = os.path.join(dst, basename(src))
elif exists(dst):
raise DistutilsFileError, \
"can't move '%s': destination '%s' already exists" % \
(src, dst)
if not isdir(dirname(dst)):
raise DistutilsFileError, \
"can't move '%s': destination '%s' not a valid path" % \
(src, dst)
copy_it = 0
try:
os.rename(src, dst)
except os.error, (num, msg):
if num == errno.EXDEV:
copy_it = 1
else:
raise DistutilsFileError, \
"couldn't move '%s' to '%s': %s" % (src, dst, msg)
if copy_it:
copy_file(src, dst)
try:
os.unlink(src)
except os.error, (num, msg):
try:
os.unlink(dst)
except os.error:
pass
raise DistutilsFileError, \
("couldn't move '%s' to '%s' by copy/delete: " +
"delete '%s' failed: %s") % \
(src, dst, src, msg)
return dst
# move_file ()
def write_file (filename, contents):
"""Create a file with the specified name and write 'contents' (a
sequence of strings without line terminators) to it.
"""
f = open(filename, "w")
for line in contents:
f.write(line + "\n")
f.close()
|
zhangxujinsh/keras
|
refs/heads/master
|
keras/wrappers/scikit_learn.py
|
35
|
from __future__ import absolute_import
import copy
import numpy as np
from ..utils.np_utils import to_categorical
class KerasClassifier(object):
"""
Implementation of the scikit-learn classifier API for Keras.
Parameters
----------
model : object
An un-compiled Keras model object is required to use the scikit-learn wrapper.
optimizer : string, optional
Optimization method used by the model during compilation/training.
loss : string, optional
Loss function used by the model during compilation/training.
"""
def __init__(self, model, optimizer='adam', loss='categorical_crossentropy'):
self.model = model
self.optimizer = optimizer
self.loss = loss
self.compiled_model_ = None
self.classes_ = []
self.config_ = []
self.weights_ = []
def get_params(self, deep=True):
"""
Get parameters for this estimator.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : dict
Dictionary of parameter names mapped to their values.
"""
return {'model': self.model, 'optimizer': self.optimizer, 'loss': self.loss}
def set_params(self, **params):
"""
Set the parameters of this estimator.
Parameters
----------
params: dict
Dictionary of parameter names mapped to their values.
Returns
-------
self
"""
for parameter, value in params.items():
setattr(self, parameter, value)
return self
def fit(self, X, y, batch_size=128, nb_epoch=100, verbose=0, shuffle=True):
"""
Fit the model according to the given training data.
Makes a copy of the un-compiled model definition to use for
compilation and fitting, leaving the original definition
intact.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Training samples where n_samples in the number of samples
and n_features is the number of features.
y : array-like, shape = (n_samples) or (n_samples, n_outputs)
True labels for X.
batch_size : int, optional
Number of training samples evaluated at a time.
nb_epochs : int, optional
Number of training epochs.
verbose : int, optional
Verbosity level.
shuffle : boolean, optional
Indicator to shuffle the training data.
Returns
-------
self : object
Returns self.
"""
if len(y.shape) == 1:
self.classes_ = list(np.unique(y))
if self.loss == 'categorical_crossentropy':
y = to_categorical(y)
else:
self.classes_ = np.arange(0, y.shape[1])
self.compiled_model_ = copy.deepcopy(self.model)
self.compiled_model_.compile(optimizer=self.optimizer, loss=self.loss)
self.compiled_model_.fit(X, y, batch_size=batch_size, nb_epoch=nb_epoch, verbose=verbose, shuffle=shuffle)
self.config_ = self.model.get_config()
self.weights_ = self.model.get_weights()
return self
def score(self, X, y, batch_size=128, verbose=0):
"""
Returns the mean accuracy on the given test data and labels.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Test samples where n_samples in the number of samples
and n_features is the number of features.
y : array-like, shape = (n_samples) or (n_samples, n_outputs)
True labels for X.
batch_size : int, optional
Number of test samples evaluated at a time.
verbose : int, optional
Verbosity level.
Returns
-------
score : float
Mean accuracy of self.predict(X) wrt. y.
"""
loss, accuracy = self.compiled_model_.evaluate(X, y, batch_size=batch_size,
show_accuracy=True, verbose=verbose)
return accuracy
def predict(self, X, batch_size=128, verbose=0):
"""
Returns the class predictions for the given test data.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Test samples where n_samples in the number of samples
and n_features is the number of features.
batch_size : int, optional
Number of test samples evaluated at a time.
verbose : int, optional
Verbosity level.
Returns
-------
preds : array-like, shape = (n_samples)
Class predictions.
"""
return self.compiled_model_.predict_classes(X, batch_size=batch_size, verbose=verbose)
def predict_proba(self, X, batch_size=128, verbose=0):
"""
Returns class probability estimates for the given test data.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Test samples where n_samples in the number of samples
and n_features is the number of features.
batch_size : int, optional
Number of test samples evaluated at a time.
verbose : int, optional
Verbosity level.
Returns
-------
proba : array-like, shape = (n_samples, n_outputs)
Class probability estimates.
"""
return self.compiled_model_.predict_proba(X, batch_size=batch_size, verbose=verbose)
|
seravok/LPTHW
|
refs/heads/master
|
ex9.py
|
1
|
# Here's some new strange stuff, remember type it exactly
# Defines days of the week
days = "Mon Tue Wed Thu Fri Sat Sun"
# Defines the months that will be printed after calling "months"
# A new line is created after every month
months = "Jan\nFeb\nMar\nApr\nMay\nJun\nJul\nAug"
# Print the days and then the months
print "Here are the days: ", days
print "Here are the months:", months
# Able to write on multiple rows with three double-quotes
print """
There's something going on here.
With the three double-quotes.
We'll be able to type as much as we like.
Even 4 lines if we want, or 5, or 6.
"""
|
oracal/cineworld
|
refs/heads/master
|
cineworld/cineworld.py
|
1
|
#!/usr/bin/env python
'''
Created on 17 Jul 2011
@author: oracal
'''
from cineworld_api_key import API_KEY
from fuzzywuzzy.fuzz import WRatio
from operator import itemgetter
from urllib import urlencode
import datetime
try:
import json
except ImportError:
import simplejson as json
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
class CW(object):
def __init__(self, api_key=''):
"""setup api key and API website addresses"""
if not api_key:
self.api_key = API_KEY
else:
self.api_key = api_key
base_url = 'http://www.cineworld.com/api/quickbook/'
self.base_url = base_url
self.cinemas_url = base_url + 'cinemas'
self.films_url = base_url + 'films'
self.dates_url = base_url + 'dates'
self.performances = base_url + 'performances'
def get_list(self, datatype, url, **kwargs):
"""base function for connecting to API"""
search_url = [url, '?']
kwargs.update({'key': self.api_key})
search_url.append(urlencode(kwargs))
data = json.loads(urlopen(''.join(search_url)).read())
return data[datatype]
def get_cinemas(self, **kwargs):
"""gets a list of all cineworld cinemas and allows further customization of the list using arguments located in the API documentation"""
return self.get_list('cinemas', self.cinemas_url, **kwargs)
def get_films(self, **kwargs):
"""gets a list of all films currently playing in cineworld cinemas and allows further customization of the list using arguments located in the API documentation"""
return self.get_list('films', self.films_url, **kwargs)
def get_film_list(self):
"""cache the result of the list of films in case of multiple searching on the same object"""
self.film_list = self.get_films()
return self.film_list
def get_dates(self, **kwargs):
"""gets a list of all dates when films are playing at cineworld cinemas and allows further customization of the list using arguments located in the API documentation"""
return self.get_list('dates', self.dates_url, **kwargs)
def get_performances(self, **kwargs):
"""not well documented but I assume it's for more specialized performances i.e. not films"""
return self.get_list('performances', self.performances_url, **kwargs)
def get_box_office_films(self):
"""uses a certain cinema (O2) and a certain day when non specialist films show (Wednesday) to get a list of the latest box office films"""
today = datetime.date.today()
next_wednesday = (today + datetime.timedelta((2 - today.weekday()) % 7)).strftime('%Y%m%d')
films = self.get_films(cinema=79, date = next_wednesday)
films = filter(lambda x: '3D' not in x['title'], films)
for film in films:
if '2D -' in film['title']:
film['title']=film['title'][5:]
return films
def film_search(self, title):
"""film search using fuzzy matching"""
films = []
#check for cache or update
if not hasattr(self, 'film_list'):
self.get_film_list()
#iterate over films and check for fuzzy string match
for film in self.film_list:
strength = WRatio(title, film['title'])
if strength > 80:
film.update({u'strength':strength})
films.append(film)
#sort films by the strength of the fuzzy string match
films_sorted = sorted(films, key=itemgetter('strength'), reverse = True)
return films_sorted
def get_film_id(self, title, three_dimensional=False):
"""get the film id using the title in conjunction with the searching function"""
films = self.film_search(title)
for film in films:
if (film['title'].find('3D') is - 1) is not three_dimensional:
return film['edi']
return -1
def get_film_info(self, edi):
"""get the film id of a film using its edi number"""
return self.get_films(film=edi, full='true')
def get_cinemas_by_film(self, edi, **kwargs):
"""get cinemas where the film is playing using the film edi number"""
return self.get_cinemas(film=edi, **kwargs)
def get_cinema_info(self, id):
"""get cinema information using the cinema id number"""
self.get_cinemas(cinema=id, full='true')
|
zenx/xhtml2pdf
|
refs/heads/master
|
test/story2canvas.py
|
155
|
# -*- coding: utf-8 -*-
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "$Revision: 194 $"
__author__ = "$Author: holtwick $"
__date__ = "$Date: 2008-04-18 18:59:53 +0200 (Fr, 18 Apr 2008) $"
from reportlab.pdfgen.canvas import Canvas
from reportlab.lib.units import inch
from reportlab.platypus import Frame
import ho.pisa as pisa
def test(filename):
# Convert HTML to "Reportlab Story" structure
story = pisa.pisaStory("""
<h1>Sample</h1>
<p>Hello <b>World</b>!</p>
""" * 20).story
# Draw to Canvas
c = Canvas(filename)
f = Frame(inch, inch, 6*inch, 9*inch, showBoundary=1)
f.addFromList(story,c)
c.save()
# Show PDF
pisa.startViewer(filename)
if __name__=="__main__":
test('story2canvas.pdf')
|
Jonneitapuro/isoskaba2
|
refs/heads/master
|
skaba/util.py
|
1
|
from skaba.models import User, Event
import csv
from io import StringIO
from datetime import datetime
"""
Check if user has admin status.
"""
def check_admin(user):
if (user.is_authenticated() and (user.profile.role == "admin") or (user.is_superuser)):
return True
return False
"""
Check if user has moderator status.
"""
def check_moderator(user):
if (user.is_authenticated() and (user.profile.role == "moderator" \
or user.profile.role == 'admin' \
or user.is_superuser)):
return True
return False
def csv_user_import(csv_file, guild):
# assume columns to be First name, Last name, e-mail, is_KV, is_TF, Password
csvf = StringIO(csv_file.read().decode())
csvreader = csv.DictReader(csvf, delimiter=',',
fieldnames=['firstname', 'lastname', 'email', 'is_kv', 'is_tf', 'password'])
for row in csvreader:
if row['password']:
pw = row['password']
else:
pw = 'ISO2016'
user = User.objects.create_user(username = generate_username(row['firstname'], row['lastname']),
first_name = row['firstname'],
last_name = row['lastname'],
email = row['email'],
password = pw
)
user.save()
profile = user.profile
profile.is_kv = row['is_kv'] == '1'
profile.is_tf = row['is_tf'] == '1'
profile.guild_id = guild
profile.save()
return True
def csv_event_import(csv_file, guild):
# assume columns to be Event name, desc(fi), desc(en), desc(swe), Points, url, repeats, date
csvf = StringIO(csv_file.read().decode())
csvreader = csv.DictReader(csvf, delimiter=',',
fieldnames=['eventname', 'descfi', 'descen', 'descswe', 'points', 'url', 'repeats', 'date'])
for row in csvreader:
event = Event.objects.create(
name = row['eventname'],
description = str(row['descfi']) + "<br /><br />" + str(row['descen']) + "<br /><br />" + str(row['descswe']),
points = row['points'],
guild_id = guild,
slug = row['url'],
repeats = row['repeats'],
eventdate = datetime.strptime(row['date'],'%Y-%m-%d').date()
)
event.save()
return True
def generate_username(first, last):
name = first + '.' + last
name = name.lower()
if User.objects.filter(username=name).exists():
name = generate_username(first, last + '1')
return name
|
Juniper/nova
|
refs/heads/master
|
nova/api/openstack/compute/extended_availability_zone.py
|
4
|
# Copyright 2013 Netease, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Extended Availability Zone Status API extension."""
from nova.api.openstack import wsgi
from nova import availability_zones as avail_zone
from nova.policies import extended_availability_zone as eaz_policies
PREFIX = "OS-EXT-AZ"
class ExtendedAZController(wsgi.Controller):
def _extend_server(self, context, server, instance):
# NOTE(mriedem): The OS-EXT-AZ prefix should not be used for new
# attributes after v2.1. They are only in v2.1 for backward compat
# with v2.0.
key = "%s:availability_zone" % PREFIX
az = avail_zone.get_instance_availability_zone(context, instance)
server[key] = az or ''
@wsgi.extends
def show(self, req, resp_obj, id):
context = req.environ['nova.context']
if context.can(eaz_policies.BASE_POLICY_NAME, fatal=False):
server = resp_obj.obj['server']
db_instance = req.get_db_instance(server['id'])
self._extend_server(context, server, db_instance)
@wsgi.extends
def detail(self, req, resp_obj):
context = req.environ['nova.context']
if context.can(eaz_policies.BASE_POLICY_NAME, fatal=False):
servers = list(resp_obj.obj['servers'])
for server in servers:
db_instance = req.get_db_instance(server['id'])
self._extend_server(context, server, db_instance)
|
AWPorter/aima-python
|
refs/heads/master
|
submissions/Kinley/myBayes.py
|
15
|
import traceback
from submissions.Kinley import drugs
#
#
class DataFrame:
data = []
feature_names = []
target = []
target_names = []
drugData = DataFrame()
drugData.data = []
targetData = []
alcohol = drugs.get_surveys('Alcohol Dependence')
#tobacco = drugs.get_surveys('Tobacco Use')
i=0
for survey in alcohol[0]['data']:
try:
youngUser = float(survey['Young']),
youngUserFloat = youngUser[0]
midUser = float(survey['Medium']),
midUserFloat = midUser[0]
oldUser = float(survey['Old']),
oldUserFloat = oldUser[0]
place = survey['State']
total = youngUserFloat + midUserFloat + oldUserFloat
targetData.append(total)
youngCertain = float(survey['Young CI']),
youngCertainFloat = youngCertain[0]
midCertain = float(survey['Medium CI']),
midCertainFloat = midCertain[0]
oldCertain = float(survey['Old CI']),
oldCertainFloat = oldCertain[0]
drugData.data.append([youngCertainFloat, midCertainFloat, oldCertainFloat])
i = i + 1
except:
traceback.print_exc()
drugData.feature_names = [
'Young CI',
'Medium CI',
'Old CI',
]
drugData.target = []
def drugTarget(number):
if number > 100.0:
return 1
return 0
for pre in targetData:
# choose the target
tt = drugTarget(pre)
drugData.target.append(tt)
drugData.target_names = [
'States > 100k alcoholics',
'States < 100k alcoholics',
]
Examples = {
'Drugs': drugData,
}
# The name of the survey question. Must be one of 'Cocaine Year', 'Alcohol Month',
# 'Cigarette Use', 'Alcohol Risk', 'Illicit/Alcohol Dependence or Abuse', 'Marijuana New',
# 'Illicit Dependence', 'Alcohol Dependence', 'Tobacco Use', 'Alcohol Binge', 'Marijuana Risk',
# 'Alcohol Abuse', 'Marijuana Month', 'Illicit Dependence or Abuse', 'Smoking Risk', 'Illicit Month',
# 'Alcohol Treatment', 'Nonmarijuana Illicit', 'Pain Relievers', 'Marijuana Year',
# 'Illicit Treatment', 'Depression'.
#If you make a typo, it will attempt to suggest a corrected answer. However, this is not perfect, so try to be as accurate as possible.
|
dgjustice/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/amazon/sns_topic.py
|
18
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This is a free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This Ansible library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'committer',
'version': '1.0'}
DOCUMENTATION = """
module: sns_topic
short_description: Manages AWS SNS topics and subscriptions
description:
- The C(sns_topic) module allows you to create, delete, and manage subscriptions for AWS SNS topics.
version_added: 2.0
author:
- "Joel Thompson (@joelthompson)"
- "Fernando Jose Pando (@nand0p)"
options:
name:
description:
- The name or ARN of the SNS topic to converge
required: True
state:
description:
- Whether to create or destroy an SNS topic
required: False
default: present
choices: ["absent", "present"]
display_name:
description:
- Display name of the topic
required: False
default: None
policy:
description:
- Policy to apply to the SNS topic
required: False
default: None
delivery_policy:
description:
- Delivery policy to apply to the SNS topic
required: False
default: None
subscriptions:
description:
- List of subscriptions to apply to the topic. Note that AWS requires
subscriptions to be confirmed, so you will need to confirm any new
subscriptions.
required: False
default: []
purge_subscriptions:
description:
- "Whether to purge any subscriptions not listed here. NOTE: AWS does not
allow you to purge any PendingConfirmation subscriptions, so if any
exist and would be purged, they are silently skipped. This means that
somebody could come back later and confirm the subscription. Sorry.
Blame Amazon."
required: False
default: True
extends_documentation_fragment: aws
requirements: [ "boto" ]
"""
EXAMPLES = """
- name: Create alarm SNS topic
sns_topic:
name: "alarms"
state: present
display_name: "alarm SNS topic"
delivery_policy:
http:
defaultHealthyRetryPolicy:
minDelayTarget: 2
maxDelayTarget: 4
numRetries: 3
numMaxDelayRetries: 5
backoffFunction: "<linear|arithmetic|geometric|exponential>"
disableSubscriptionOverrides: True
defaultThrottlePolicy:
maxReceivesPerSecond: 10
subscriptions:
- endpoint: "my_email_address@example.com"
protocol: "email"
- endpoint: "my_mobile_number"
protocol: "sms"
"""
RETURN = '''
sns_arn:
description: The ARN of the topic you are modifying
type: string
sample: "arn:aws:sns:us-east-1:123456789012:my_topic_name"
sns_topic:
description: Dict of sns topic details
type: dict
sample:
name: sns-topic-name
state: present
display_name: default
policy: {}
delivery_policy: {}
subscriptions_new: []
subscriptions_existing: []
subscriptions_deleted: []
subscriptions_added: []
subscriptions_purge': false
check_mode: false
topic_created: false
topic_deleted: false
attributes_set: []
'''
import time
import json
import re
try:
import boto.sns
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import connect_to_aws, ec2_argument_spec, get_aws_connection_info
class SnsTopicManager(object):
""" Handles SNS Topic creation and destruction """
def __init__(self,
module,
name,
state,
display_name,
policy,
delivery_policy,
subscriptions,
purge_subscriptions,
check_mode,
region,
**aws_connect_params):
self.region = region
self.aws_connect_params = aws_connect_params
self.connection = self._get_boto_connection()
self.changed = False
self.module = module
self.name = name
self.state = state
self.display_name = display_name
self.policy = policy
self.delivery_policy = delivery_policy
self.subscriptions = subscriptions
self.subscriptions_existing = []
self.subscriptions_deleted = []
self.subscriptions_added = []
self.purge_subscriptions = purge_subscriptions
self.check_mode = check_mode
self.topic_created = False
self.topic_deleted = False
self.arn_topic = None
self.attributes_set = []
def _get_boto_connection(self):
try:
return connect_to_aws(boto.sns, self.region,
**self.aws_connect_params)
except BotoServerError as err:
self.module.fail_json(msg=err.message)
def _get_all_topics(self):
next_token = None
topics = []
while True:
try:
response = self.connection.get_all_topics(next_token)
except BotoServerError as err:
self.module.fail_json(msg=err.message)
topics.extend(response['ListTopicsResponse']['ListTopicsResult']['Topics'])
next_token = response['ListTopicsResponse']['ListTopicsResult']['NextToken']
if not next_token:
break
return [t['TopicArn'] for t in topics]
def _arn_topic_lookup(self):
# topic names cannot have colons, so this captures the full topic name
all_topics = self._get_all_topics()
lookup_topic = ':%s' % self.name
for topic in all_topics:
if topic.endswith(lookup_topic):
return topic
def _create_topic(self):
self.changed = True
self.topic_created = True
if not self.check_mode:
self.connection.create_topic(self.name)
self.arn_topic = self._arn_topic_lookup()
while not self.arn_topic:
time.sleep(3)
self.arn_topic = self._arn_topic_lookup()
def _set_topic_attrs(self):
topic_attributes = self.connection.get_topic_attributes(self.arn_topic) \
['GetTopicAttributesResponse'] ['GetTopicAttributesResult'] \
['Attributes']
if self.display_name and self.display_name != topic_attributes['DisplayName']:
self.changed = True
self.attributes_set.append('display_name')
if not self.check_mode:
self.connection.set_topic_attributes(self.arn_topic, 'DisplayName',
self.display_name)
if self.policy and self.policy != json.loads(topic_attributes['Policy']):
self.changed = True
self.attributes_set.append('policy')
if not self.check_mode:
self.connection.set_topic_attributes(self.arn_topic, 'Policy',
json.dumps(self.policy))
if self.delivery_policy and ('DeliveryPolicy' not in topic_attributes or \
self.delivery_policy != json.loads(topic_attributes['DeliveryPolicy'])):
self.changed = True
self.attributes_set.append('delivery_policy')
if not self.check_mode:
self.connection.set_topic_attributes(self.arn_topic, 'DeliveryPolicy',
json.dumps(self.delivery_policy))
def _canonicalize_endpoint(self, protocol, endpoint):
if protocol == 'sms':
return re.sub('[^0-9]*', '', endpoint)
return endpoint
def _get_topic_subs(self):
next_token = None
while True:
response = self.connection.get_all_subscriptions_by_topic(self.arn_topic, next_token)
self.subscriptions_existing.extend(response['ListSubscriptionsByTopicResponse'] \
['ListSubscriptionsByTopicResult']['Subscriptions'])
next_token = response['ListSubscriptionsByTopicResponse'] \
['ListSubscriptionsByTopicResult']['NextToken']
if not next_token:
break
def _set_topic_subs(self):
subscriptions_existing_list = []
desired_subscriptions = [(sub['protocol'],
self._canonicalize_endpoint(sub['protocol'], sub['endpoint'])) for sub in
self.subscriptions]
if self.subscriptions_existing:
for sub in self.subscriptions_existing:
sub_key = (sub['Protocol'], sub['Endpoint'])
subscriptions_existing_list.append(sub_key)
if self.purge_subscriptions and sub_key not in desired_subscriptions and \
sub['SubscriptionArn'] not in ('PendingConfirmation', 'Deleted'):
self.changed = True
self.subscriptions_deleted.append(sub_key)
if not self.check_mode:
self.connection.unsubscribe(sub['SubscriptionArn'])
for (protocol, endpoint) in desired_subscriptions:
if (protocol, endpoint) not in subscriptions_existing_list:
self.changed = True
self.subscriptions_added.append(sub)
if not self.check_mode:
self.connection.subscribe(self.arn_topic, protocol, endpoint)
def _delete_subscriptions(self):
# NOTE: subscriptions in 'PendingConfirmation' timeout in 3 days
# https://forums.aws.amazon.com/thread.jspa?threadID=85993
for sub in self.subscriptions_existing:
if sub['SubscriptionArn'] not in ('PendingConfirmation', 'Deleted'):
self.subscriptions_deleted.append(sub['SubscriptionArn'])
self.changed = True
if not self.check_mode:
self.connection.unsubscribe(sub['SubscriptionArn'])
def _delete_topic(self):
self.topic_deleted = True
self.changed = True
if not self.check_mode:
self.connection.delete_topic(self.arn_topic)
def ensure_ok(self):
self.arn_topic = self._arn_topic_lookup()
if not self.arn_topic:
self._create_topic()
self._set_topic_attrs()
self._get_topic_subs()
self._set_topic_subs()
def ensure_gone(self):
self.arn_topic = self._arn_topic_lookup()
if self.arn_topic:
self._get_topic_subs()
if self.subscriptions_existing:
self._delete_subscriptions()
self._delete_topic()
def get_info(self):
info = {
'name': self.name,
'state': self.state,
'display_name': self.display_name,
'policy': self.policy,
'delivery_policy': self.delivery_policy,
'subscriptions_new': self.subscriptions,
'subscriptions_existing': self.subscriptions_existing,
'subscriptions_deleted': self.subscriptions_deleted,
'subscriptions_added': self.subscriptions_added,
'subscriptions_purge': self.purge_subscriptions,
'check_mode': self.check_mode,
'topic_created': self.topic_created,
'topic_deleted': self.topic_deleted,
'attributes_set': self.attributes_set
}
return info
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['present',
'absent']),
display_name=dict(type='str', required=False),
policy=dict(type='dict', required=False),
delivery_policy=dict(type='dict', required=False),
subscriptions=dict(default=[], type='list', required=False),
purge_subscriptions=dict(type='bool', default=True),
)
)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
name = module.params.get('name')
state = module.params.get('state')
display_name = module.params.get('display_name')
policy = module.params.get('policy')
delivery_policy = module.params.get('delivery_policy')
subscriptions = module.params.get('subscriptions')
purge_subscriptions = module.params.get('purge_subscriptions')
check_mode = module.check_mode
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg="region must be specified")
sns_topic = SnsTopicManager(module,
name,
state,
display_name,
policy,
delivery_policy,
subscriptions,
purge_subscriptions,
check_mode,
region,
**aws_connect_params)
if state == 'present':
sns_topic.ensure_ok()
elif state == 'absent':
sns_topic.ensure_gone()
sns_facts = dict(changed=sns_topic.changed,
sns_arn=sns_topic.arn_topic,
sns_topic=sns_topic.get_info())
module.exit_json(**sns_facts)
if __name__ == '__main__':
main()
|
unaxfromsibiria/roolet
|
refs/heads/master
|
clients/python3/roolet/client.py
|
1
|
# @author: Michael Vorotyntsev
# @email: linkofwise@gmail.com
# @github: unaxfromsibiria
import base64
import pickle
import socket
import time
from uuid import uuid4
from random import SystemRandom
from .config import Configuration, LoggerWrapper
from .common import CommandBuilder, Command
from .exceptions import ExecuteError
from .protocol import CommandTargetType, auth_request, ServiceGroup
class Client(object):
"""
TODO:
"""
cls_command_builder = CommandBuilder
command_builder = None
encoding = None
_connection = None
_iter = None
_timeout = None
_buffer_size = None
_cid = None
_cid_part = None
def __init__(self, conf=None):
if not conf:
conf = Configuration()
self._conf = conf
self.encoding = conf.get('encoding') or 'utf-8'
self._iter = conf.get('iter') or 0.05
self._timeout = conf.get('timeout') or 60
if not(1 <= self._timeout <= 60):
self._timeout = 60
self._buffer_size = conf.get('buffer_size') or 512
self._logger = conf.get_logger(
wraper=LoggerWrapper('client'))
self.command_builder = self.cls_command_builder()
rand = SystemRandom()
self._cid_part = '{:0>4}'.format(hex(rand.randint(0, int('ffff', 16)))[2:])
def _new_cmd(self, **data):
return self.command_builder.cls_command(cid=self._cid, **data)
def _read(self):
start_time = time.time()
wait = True
while wait:
# socket read timeout is decreases
socket_timeout = self._timeout - round(time.time() - start_time, 1)
wait = socket_timeout > 0
if wait:
self._connection.settimeout(socket_timeout)
try:
new_data = self._connection.recv(self._buffer_size)
except Exception as err:
self._logger.error(err)
wait = False
else:
new_data = new_data.decode(self.encoding)
new_data = new_data.split('\n')
for line_data in new_data:
if not line_data:
continue
self.command_builder.append(line_data)
wait = not self.command_builder.is_done()
if not wait:
yield self.command_builder.get_command()
finally:
if self._connection:
self._connection.settimeout(None)
if wait:
time.sleep(self._iter)
def _send(self, command):
assert isinstance(command, Command)
data = bytes('{}\n'.format(command.as_json()), self.encoding)
self._connection.send(data)
def _auth(self):
cmd = self._new_cmd(target=CommandTargetType.auth_request)
self._send(cmd)
for cmd in self._read():
if not cmd:
continue
assert cmd.target == CommandTargetType.auth_request
auth_cmd = auth_request(
command=cmd, manager=None, options=self._conf, logger=self._logger)
self._send(auth_cmd)
for cmd in self._read():
if self._cid is None:
self._cid = cmd.cid
self._logger.debug('Received new client ID: {}'.format(cmd.cid))
return cmd.target == CommandTargetType.client_data
return False
def _get_task_id(self):
return '{}-{}'.format(self._cid_part, uuid4())
def open(self):
if self._connection is None:
self._cid = None
try:
self._connection = socket.socket(
socket.AF_INET, socket.SOCK_STREAM)
self._connection.connect((
self._conf.get('addr'),
int(self._conf.get('port')),
))
except Exception as err:
self._logger.error(err)
else:
if self._auth():
# send data
cmd = self._new_cmd(
data={
'group': ServiceGroup.service.value,
},
target=CommandTargetType.client_data)
self._send(cmd)
for cmd in self._read():
if not cmd:
continue
if not cmd.target == CommandTargetType.wait_command:
self._logger.error(
'Protocol changed? Unexpected command: {}'.format(cmd))
self._connection.close()
self._connection = None
else:
self._logger.error('Auth problem! Check key in configuration.')
self._connection.close()
self._connection = None
def is_active(self):
return bool(self._connection and self._cid)
def execute(self, method, params=None, progress=True):
"""
:param str method: remote method name
:param params: method kwargs
:param bool progress: use native progress bar support
:rtype str: return task id
"""
assert method and isinstance(method, str)
task_id = self._get_task_id()
data = {
'id': task_id,
'params': None,
'progress': progress,
}
if params:
data.update(
params=base64.encodebytes(pickle.dumps(params)))
cmd = self._new_cmd(
target=CommandTargetType.call_method, data=data, method=method)
self._send(cmd)
for cmd in self._read():
if cmd.target == CommandTargetType.problem:
raise ExecuteError(cmd.data)
elif cmd.target == CommandTargetType.ok:
return task_id
def get_result(self, task_id):
cmd = self._new_cmd(
target=CommandTargetType.get_result, data=task_id)
self._send(cmd)
for cmd in self._read():
if cmd.target == CommandTargetType.problem:
raise ExecuteError(cmd.data)
elif cmd.target == CommandTargetType.ok:
if cmd.data:
# TODO: try except wrapper
return pickle.loads(base64.decodebytes(cmd.data))
|
wkschwartz/django
|
refs/heads/stable/3.2.x
|
tests/i18n/other/locale/fr/__init__.py
|
12133432
| |
DigitalSlideArchive/HistomicsTK
|
refs/heads/master
|
histomicstk/cli/ComputeNucleiFeatures/__init__.py
|
12133432
| |
blublud/networkx
|
refs/heads/master
|
networkx/algorithms/components/tests/test_biconnected.py
|
43
|
#!/usr/bin/env python
from nose.tools import *
import networkx as nx
from networkx.algorithms.components import biconnected
from networkx import NetworkXNotImplemented
def assert_components_edges_equal(x, y):
sx = {frozenset([frozenset(e) for e in c]) for c in x}
sy = {frozenset([frozenset(e) for e in c]) for c in y}
assert_equal(sx, sy)
def assert_components_equal(x, y):
sx = {frozenset(c) for c in x}
sy = {frozenset(c) for c in y}
assert_equal(sx, sy)
def test_barbell():
G = nx.barbell_graph(8, 4)
G.add_path([7, 20, 21, 22])
G.add_cycle([22, 23, 24, 25])
pts = set(nx.articulation_points(G))
assert_equal(pts, {7, 8, 9, 10, 11, 12, 20, 21, 22})
answer = [
{12, 13, 14, 15, 16, 17, 18, 19},
{0, 1, 2, 3, 4, 5, 6, 7},
{22, 23, 24, 25},
{11, 12},
{10, 11},
{9, 10},
{8, 9},
{7, 8},
{21, 22},
{20, 21},
{7, 20},
]
assert_components_equal(list(nx.biconnected_components(G)), answer)
G.add_edge(2,17)
pts = set(nx.articulation_points(G))
assert_equal(pts, {7, 20, 21, 22})
def test_articulation_points_cycle():
G=nx.cycle_graph(3)
G.add_cycle([1, 3, 4])
pts=set(nx.articulation_points(G))
assert_equal(pts, {1})
def test_is_biconnected():
G=nx.cycle_graph(3)
assert_true(nx.is_biconnected(G))
G.add_cycle([1, 3, 4])
assert_false(nx.is_biconnected(G))
def test_empty_is_biconnected():
G=nx.empty_graph(5)
assert_false(nx.is_biconnected(G))
G.add_edge(0, 1)
assert_false(nx.is_biconnected(G))
def test_biconnected_components_cycle():
G=nx.cycle_graph(3)
G.add_cycle([1, 3, 4])
answer = [{0, 1, 2}, {1, 3, 4}]
assert_components_equal(list(nx.biconnected_components(G)), answer)
def test_biconnected_component_subgraphs_cycle():
G=nx.cycle_graph(3)
G.add_cycle([1, 3, 4, 5])
Gc = set(nx.biconnected_component_subgraphs(G))
assert_equal(len(Gc), 2)
g1, g2=Gc
if 0 in g1:
assert_true(nx.is_isomorphic(g1, nx.Graph([(0,1),(0,2),(1,2)])))
assert_true(nx.is_isomorphic(g2, nx.Graph([(1,3),(1,5),(3,4),(4,5)])))
else:
assert_true(nx.is_isomorphic(g1, nx.Graph([(1,3),(1,5),(3,4),(4,5)])))
assert_true(nx.is_isomorphic(g2, nx.Graph([(0,1),(0,2),(1,2)])))
def test_biconnected_components1():
# graph example from
# http://www.ibluemojo.com/school/articul_algorithm.html
edges=[
(0, 1), (0, 5), (0, 6), (0, 14), (1, 5), (1, 6), (1, 14), (2, 4),
(2, 10), (3, 4), (3, 15), (4, 6), (4, 7), (4, 10), (5, 14), (6, 14),
(7, 9), (8, 9), (8, 12), (8, 13), (10, 15), (11, 12), (11, 13), (12, 13)
]
G=nx.Graph(edges)
pts = set(nx.articulation_points(G))
assert_equal(pts, {4, 6, 7, 8, 9})
comps = list(nx.biconnected_component_edges(G))
answer = [
[(3, 4), (15, 3), (10, 15), (10, 4), (2, 10), (4, 2)],
[(13, 12), (13, 8), (11, 13), (12, 11), (8, 12)],
[(9, 8)],
[(7, 9)],
[(4, 7)],
[(6, 4)],
[(14, 0), (5, 1), (5, 0), (14, 5), (14, 1), (6, 14), (6, 0), (1, 6), (0, 1)],
]
assert_components_edges_equal(comps, answer)
def test_biconnected_components2():
G=nx.Graph()
G.add_cycle('ABC')
G.add_cycle('CDE')
G.add_cycle('FIJHG')
G.add_cycle('GIJ')
G.add_edge('E','G')
comps = list(nx.biconnected_component_edges(G))
answer = [
[tuple('GF'), tuple('FI'), tuple('IG'), tuple('IJ'),
tuple('JG'), tuple('JH'), tuple('HG')],
[tuple('EG')],
[tuple('CD'), tuple('DE'), tuple('CE')],
[tuple('AB'), tuple('BC'), tuple('AC')]
]
assert_components_edges_equal(comps, answer)
def test_biconnected_davis():
D = nx.davis_southern_women_graph()
bcc = list(nx.biconnected_components(D))[0]
assert_true(set(D) == bcc) # All nodes in a giant bicomponent
# So no articulation points
assert_equal(len(list(nx.articulation_points(D))), 0)
def test_biconnected_karate():
K = nx.karate_club_graph()
answer = [{0, 1, 2, 3, 7, 8, 9, 12, 13, 14, 15, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33},
{0, 4, 5, 6, 10, 16},
{0, 11}]
bcc = list(nx.biconnected_components(K))
assert_components_equal(bcc, answer)
assert_equal(set(nx.articulation_points(K)), {0})
def test_biconnected_eppstein():
# tests from http://www.ics.uci.edu/~eppstein/PADS/Biconnectivity.py
G1 = nx.Graph({
0: [1, 2, 5],
1: [0, 5],
2: [0, 3, 4],
3: [2, 4, 5, 6],
4: [2, 3, 5, 6],
5: [0, 1, 3, 4],
6: [3, 4],
})
G2 = nx.Graph({
0: [2, 5],
1: [3, 8],
2: [0, 3, 5],
3: [1, 2, 6, 8],
4: [7],
5: [0, 2],
6: [3, 8],
7: [4],
8: [1, 3, 6],
})
assert_true(nx.is_biconnected(G1))
assert_false(nx.is_biconnected(G2))
answer_G2 = [{1, 3, 6, 8}, {0, 2, 5}, {2, 3}, {4, 7}]
bcc = list(nx.biconnected_components(G2))
assert_components_equal(bcc, answer_G2)
def test_connected_raise():
DG = nx.DiGraph()
assert_raises(NetworkXNotImplemented, nx.biconnected_components, DG)
assert_raises(NetworkXNotImplemented, nx.biconnected_component_subgraphs, DG)
assert_raises(NetworkXNotImplemented, nx.biconnected_component_edges, DG)
assert_raises(NetworkXNotImplemented, nx.articulation_points, DG)
assert_raises(NetworkXNotImplemented, nx.is_biconnected, DG)
|
xyjin/Program_trade_system
|
refs/heads/master
|
strategies/trending.py
|
1
|
# A module for all built-in commands.
# vim: sw=4: et
LICENSE="""
Copyright (C) 2011 Michael Ihde
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
import datetime
import os
import tables
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import matplotlib.ticker as ticker
from indicators.ema import EMA
from indicators.rsi import RSI
from indicators.simplevalue import SimpleValue
from strategy import Strategy
from utils.model import Order
from utils.date import ONE_DAY
class SymbolData(tables.IsDescription):
date = tables.TimeCol()
closing = tables.Float32Col()
ema_short = tables.Float32Col()
ema_long = tables.Float32Col()
class Trending(Strategy):
DEF_LONG_DAYS = 200
DEF_SHORT_DAYS = 15
DEF_RSI_PERIOD = 14
def __init__(self, start_date, end_date, initial_position, market, params, h5file=None):
Strategy.__init__(self, start_date, end_date, initial_position, market, params, h5file)
for symbol in initial_position.keys():
if symbol == "$":
continue
self.addIndicator(symbol, "value", SimpleValue())
try:
short = params['short']
except KeyError:
short = Trending.DEF_SHORT_DAYS
self.addIndicator(symbol, "short", EMA(short))
try:
long_ = params['long']
except KeyError:
long_ = Trending.DEF_LONG_DAYS
self.addIndicator(symbol, "long", EMA(long_))
try:
rsi = params['rsi']
except KeyError:
rsi = Trending.DEF_RSI_PERIOD
self.addIndicator(symbol, "rsi", RSI(rsi))
# Backfill the indicators
try:
backfill = params['backfill']
except KeyError:
backfill = long_
d = start_date - (backfill * ONE_DAY)
print 'trending'
print d
print start_date
self.updateIndicators(d, start_date)
def evaluate(self, date, position, market):
self.updateIndicators(date)
orders = []
# Based of indicators, create signals
buyTriggers = []
sellTriggers = []
for symbol, qty in position.items():
if symbol != '$':
ticker = market[symbol]
close_price = ticker[date].adjclose
#print date
#print close_price
#print self.indicators[symbol]["short"].value
#print self.indicators[symbol]["long"].value
if self.indicators[symbol]["short"].value < self.indicators[symbol]["long"].value:
sellTriggers.append(symbol)
elif self.indicators[symbol]["short"].value > self.indicators[symbol]["long"].value:
buyTriggers.append(symbol)
#print sellTriggers
#print buyTriggers
# Using the basic MoneyManagement strategy, split all available cash
# among all buy signals
# Evaluate sell orders
for sellTrigger in sellTriggers:
print position[sellTrigger].amount
if position[sellTrigger].amount > 0:
orders.append(Order(Order.SELL, sellTrigger, "ALL", Order.MARKET_PRICE))
# Evaluate all buy orders
if len(buyTriggers) > 0:
cash = position['$']
#print cash
cashamt = position['$'] / len(buyTriggers)
for buyTrigger in buyTriggers:
ticker = market[buyTrigger]
close_price = ticker[date].adjclose
if close_price != None:
estimated_shares = int(cashamt / close_price)
# Only issues orders that buy at least one share
if estimated_shares >= 1:
orders.append(Order(Order.BUY, buyTrigger, "$%f" % cashamt, Order.MARKET_PRICE))
return orders
CLAZZ = Trending
|
tadebayo/myedge
|
refs/heads/master
|
myvenv/Lib/site-packages/django/middleware/common.py
|
39
|
import logging
import re
from django import http
from django.conf import settings
from django.core.exceptions import PermissionDenied
from django.core.mail import mail_managers
from django.urls import is_valid_path
from django.utils.cache import get_conditional_response, set_response_etag
from django.utils.deprecation import MiddlewareMixin
from django.utils.encoding import force_text
from django.utils.http import unquote_etag
from django.utils.six.moves.urllib.parse import urlparse
logger = logging.getLogger('django.request')
class CommonMiddleware(MiddlewareMixin):
"""
"Common" middleware for taking care of some basic operations:
- Forbids access to User-Agents in settings.DISALLOWED_USER_AGENTS
- URL rewriting: Based on the APPEND_SLASH and PREPEND_WWW settings,
this middleware appends missing slashes and/or prepends missing
"www."s.
- If APPEND_SLASH is set and the initial URL doesn't end with a
slash, and it is not found in urlpatterns, a new URL is formed by
appending a slash at the end. If this new URL is found in
urlpatterns, then an HTTP-redirect is returned to this new URL;
otherwise the initial URL is processed as usual.
This behavior can be customized by subclassing CommonMiddleware and
overriding the response_redirect_class attribute.
- ETags: If the USE_ETAGS setting is set, ETags will be calculated from
the entire page content and Not Modified responses will be returned
appropriately.
"""
response_redirect_class = http.HttpResponsePermanentRedirect
def process_request(self, request):
"""
Check for denied User-Agents and rewrite the URL based on
settings.APPEND_SLASH and settings.PREPEND_WWW
"""
# Check for denied User-Agents
if 'HTTP_USER_AGENT' in request.META:
for user_agent_regex in settings.DISALLOWED_USER_AGENTS:
if user_agent_regex.search(request.META['HTTP_USER_AGENT']):
raise PermissionDenied('Forbidden user agent')
# Check for a redirect based on settings.PREPEND_WWW
host = request.get_host()
must_prepend = settings.PREPEND_WWW and host and not host.startswith('www.')
redirect_url = ('%s://www.%s' % (request.scheme, host)) if must_prepend else ''
# Check if a slash should be appended
if self.should_redirect_with_slash(request):
path = self.get_full_path_with_slash(request)
else:
path = request.get_full_path()
# Return a redirect if necessary
if redirect_url or path != request.get_full_path():
redirect_url += path
return self.response_redirect_class(redirect_url)
def should_redirect_with_slash(self, request):
"""
Return True if settings.APPEND_SLASH is True and appending a slash to
the request path turns an invalid path into a valid one.
"""
if settings.APPEND_SLASH and not request.get_full_path().endswith('/'):
urlconf = getattr(request, 'urlconf', None)
return (
not is_valid_path(request.path_info, urlconf) and
is_valid_path('%s/' % request.path_info, urlconf)
)
return False
def get_full_path_with_slash(self, request):
"""
Return the full path of the request with a trailing slash appended.
Raise a RuntimeError if settings.DEBUG is True and request.method is
POST, PUT, or PATCH.
"""
new_path = request.get_full_path(force_append_slash=True)
if settings.DEBUG and request.method in ('POST', 'PUT', 'PATCH'):
raise RuntimeError(
"You called this URL via %(method)s, but the URL doesn't end "
"in a slash and you have APPEND_SLASH set. Django can't "
"redirect to the slash URL while maintaining %(method)s data. "
"Change your form to point to %(url)s (note the trailing "
"slash), or set APPEND_SLASH=False in your Django settings." % {
'method': request.method,
'url': request.get_host() + new_path,
}
)
return new_path
def process_response(self, request, response):
"""
Calculate the ETag, if needed.
When the status code of the response is 404, it may redirect to a path
with an appended slash if should_redirect_with_slash() returns True.
"""
# If the given URL is "Not Found", then check if we should redirect to
# a path with a slash appended.
if response.status_code == 404:
if self.should_redirect_with_slash(request):
return self.response_redirect_class(self.get_full_path_with_slash(request))
if settings.USE_ETAGS:
if not response.has_header('ETag'):
set_response_etag(response)
if response.has_header('ETag'):
return get_conditional_response(
request,
etag=unquote_etag(response['ETag']),
response=response,
)
return response
class BrokenLinkEmailsMiddleware(MiddlewareMixin):
def process_response(self, request, response):
"""
Send broken link emails for relevant 404 NOT FOUND responses.
"""
if response.status_code == 404 and not settings.DEBUG:
domain = request.get_host()
path = request.get_full_path()
referer = force_text(request.META.get('HTTP_REFERER', ''), errors='replace')
if not self.is_ignorable_request(request, path, domain, referer):
ua = force_text(request.META.get('HTTP_USER_AGENT', '<none>'), errors='replace')
ip = request.META.get('REMOTE_ADDR', '<none>')
mail_managers(
"Broken %slink on %s" % (
('INTERNAL ' if self.is_internal_request(domain, referer) else ''),
domain
),
"Referrer: %s\nRequested URL: %s\nUser agent: %s\n"
"IP address: %s\n" % (referer, path, ua, ip),
fail_silently=True)
return response
def is_internal_request(self, domain, referer):
"""
Returns True if the referring URL is the same domain as the current request.
"""
# Different subdomains are treated as different domains.
return bool(re.match("^https?://%s/" % re.escape(domain), referer))
def is_ignorable_request(self, request, uri, domain, referer):
"""
Return True if the given request *shouldn't* notify the site managers
according to project settings or in situations outlined by the inline
comments.
"""
# The referer is empty.
if not referer:
return True
# APPEND_SLASH is enabled and the referer is equal to the current URL
# without a trailing slash indicating an internal redirect.
if settings.APPEND_SLASH and uri.endswith('/') and referer == uri[:-1]:
return True
# A '?' in referer is identified as a search engine source.
if not self.is_internal_request(domain, referer) and '?' in referer:
return True
# The referer is equal to the current URL, ignoring the scheme (assumed
# to be a poorly implemented bot).
parsed_referer = urlparse(referer)
if parsed_referer.netloc in ['', domain] and parsed_referer.path == uri:
return True
return any(pattern.search(uri) for pattern in settings.IGNORABLE_404_URLS)
|
Onager/l2tdevtools
|
refs/heads/master
|
tests/dependency_writers/travis.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the Travis-CI script writers."""
from __future__ import unicode_literals
import unittest
from l2tdevtools import dependencies
from l2tdevtools.dependency_writers import travis
from l2tdevtools.helpers import project
from tests import test_lib
class TravisInstallScriptWriterTest(test_lib.BaseTestCase):
"""Tests the Travis-CI install.sh script writer."""
def testInitialize(self):
"""Tests that the writer can be initialized."""
l2tdevtools_path = '/fake/l2tdevtools/'
project_definition = project.ProjectHelper(l2tdevtools_path)
configuration_file = self._GetTestFilePath(['dependencies.ini'])
dependency_helper = dependencies.DependencyHelper(
configuration_file=configuration_file)
writer = travis.TravisInstallScriptWriter(
l2tdevtools_path, project_definition, dependency_helper, None)
self.assertIsNotNone(writer)
# TODO: Add test for the Write method.
class TravisRunTestsScriptWriterLTest(test_lib.BaseTestCase):
"""Tests the Travis-CI runtests.sh script writer."""
def testInitialize(self):
"""Tests that the writer can be initialized."""
l2tdevtools_path = '/fake/l2tdevtools/'
project_definition = project.ProjectHelper(l2tdevtools_path)
configuration_file = self._GetTestFilePath(['dependencies.ini'])
dependency_helper = dependencies.DependencyHelper(
configuration_file=configuration_file)
writer = travis.TravisRunTestsScriptWriter(
l2tdevtools_path, project_definition, dependency_helper, None)
self.assertIsNotNone(writer)
# TODO: Add test for the Write method.
class TravisRunWithTimeoutScriptWriterTest(test_lib.BaseTestCase):
"""Tests the Travis-CI run_with_timeout.sh script writer."""
def testInitialize(self):
"""Tests that the writer can be initialized."""
l2tdevtools_path = '/fake/l2tdevtools/'
project_definition = project.ProjectHelper(l2tdevtools_path)
configuration_file = self._GetTestFilePath(['dependencies.ini'])
dependency_helper = dependencies.DependencyHelper(
configuration_file=configuration_file)
writer = travis.TravisRunWithTimeoutScriptWriter(
l2tdevtools_path, project_definition, dependency_helper, None)
self.assertIsNotNone(writer)
# TODO: Add test for the Write method.
if __name__ == '__main__':
unittest.main()
|
janewilson90/auchtermuchty
|
refs/heads/master
|
node_modules/node-gyp/gyp/pylib/gyp/MSVSProject.py
|
2736
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio project reader/writer."""
import gyp.common
import gyp.easy_xml as easy_xml
#------------------------------------------------------------------------------
class Tool(object):
"""Visual Studio tool."""
def __init__(self, name, attrs=None):
"""Initializes the tool.
Args:
name: Tool name.
attrs: Dict of tool attributes; may be None.
"""
self._attrs = attrs or {}
self._attrs['Name'] = name
def _GetSpecification(self):
"""Creates an element for the tool.
Returns:
A new xml.dom.Element for the tool.
"""
return ['Tool', self._attrs]
class Filter(object):
"""Visual Studio filter - that is, a virtual folder."""
def __init__(self, name, contents=None):
"""Initializes the folder.
Args:
name: Filter (folder) name.
contents: List of filenames and/or Filter objects contained.
"""
self.name = name
self.contents = list(contents or [])
#------------------------------------------------------------------------------
class Writer(object):
"""Visual Studio XML project writer."""
def __init__(self, project_path, version, name, guid=None, platforms=None):
"""Initializes the project.
Args:
project_path: Path to the project file.
version: Format version to emit.
name: Name of the project.
guid: GUID to use for project, if not None.
platforms: Array of string, the supported platforms. If null, ['Win32']
"""
self.project_path = project_path
self.version = version
self.name = name
self.guid = guid
# Default to Win32 for platforms.
if not platforms:
platforms = ['Win32']
# Initialize the specifications of the various sections.
self.platform_section = ['Platforms']
for platform in platforms:
self.platform_section.append(['Platform', {'Name': platform}])
self.tool_files_section = ['ToolFiles']
self.configurations_section = ['Configurations']
self.files_section = ['Files']
# Keep a dict keyed on filename to speed up access.
self.files_dict = dict()
def AddToolFile(self, path):
"""Adds a tool file to the project.
Args:
path: Relative path from project to tool file.
"""
self.tool_files_section.append(['ToolFile', {'RelativePath': path}])
def _GetSpecForConfiguration(self, config_type, config_name, attrs, tools):
"""Returns the specification for a configuration.
Args:
config_type: Type of configuration node.
config_name: Configuration name.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
Returns:
"""
# Handle defaults
if not attrs:
attrs = {}
if not tools:
tools = []
# Add configuration node and its attributes
node_attrs = attrs.copy()
node_attrs['Name'] = config_name
specification = [config_type, node_attrs]
# Add tool nodes and their attributes
if tools:
for t in tools:
if isinstance(t, Tool):
specification.append(t._GetSpecification())
else:
specification.append(Tool(t)._GetSpecification())
return specification
def AddConfig(self, name, attrs=None, tools=None):
"""Adds a configuration to the project.
Args:
name: Configuration name.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
"""
spec = self._GetSpecForConfiguration('Configuration', name, attrs, tools)
self.configurations_section.append(spec)
def _AddFilesToNode(self, parent, files):
"""Adds files and/or filters to the parent node.
Args:
parent: Destination node
files: A list of Filter objects and/or relative paths to files.
Will call itself recursively, if the files list contains Filter objects.
"""
for f in files:
if isinstance(f, Filter):
node = ['Filter', {'Name': f.name}]
self._AddFilesToNode(node, f.contents)
else:
node = ['File', {'RelativePath': f}]
self.files_dict[f] = node
parent.append(node)
def AddFiles(self, files):
"""Adds files to the project.
Args:
files: A list of Filter objects and/or relative paths to files.
This makes a copy of the file/filter tree at the time of this call. If you
later add files to a Filter object which was passed into a previous call
to AddFiles(), it will not be reflected in this project.
"""
self._AddFilesToNode(self.files_section, files)
# TODO(rspangler) This also doesn't handle adding files to an existing
# filter. That is, it doesn't merge the trees.
def AddFileConfig(self, path, config, attrs=None, tools=None):
"""Adds a configuration to a file.
Args:
path: Relative path to the file.
config: Name of configuration to add.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
Raises:
ValueError: Relative path does not match any file added via AddFiles().
"""
# Find the file node with the right relative path
parent = self.files_dict.get(path)
if not parent:
raise ValueError('AddFileConfig: file "%s" not in project.' % path)
# Add the config to the file node
spec = self._GetSpecForConfiguration('FileConfiguration', config, attrs,
tools)
parent.append(spec)
def WriteIfChanged(self):
"""Writes the project file."""
# First create XML content definition
content = [
'VisualStudioProject',
{'ProjectType': 'Visual C++',
'Version': self.version.ProjectVersion(),
'Name': self.name,
'ProjectGUID': self.guid,
'RootNamespace': self.name,
'Keyword': 'Win32Proj'
},
self.platform_section,
self.tool_files_section,
self.configurations_section,
['References'], # empty section
self.files_section,
['Globals'] # empty section
]
easy_xml.WriteXmlIfChanged(content, self.project_path,
encoding="Windows-1252")
|
GinnyN/towerofdimensions-django
|
refs/heads/master
|
django/core/management/commands/runfcgi.py
|
674
|
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = "Runs this project as a FastCGI application. Requires flup."
args = '[various KEY=val options, use `runfcgi help` for help]'
def handle(self, *args, **options):
from django.conf import settings
from django.utils import translation
# Activate the current language, because it won't get activated later.
try:
translation.activate(settings.LANGUAGE_CODE)
except AttributeError:
pass
from django.core.servers.fastcgi import runfastcgi
runfastcgi(args)
def usage(self, subcommand):
from django.core.servers.fastcgi import FASTCGI_HELP
return FASTCGI_HELP
|
openstack/horizon
|
refs/heads/master
|
openstack_dashboard/dashboards/admin/volume_types/qos_specs/tables.py
|
1
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from urllib import parse
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import tables
from openstack_dashboard import api
class SpecCreateKeyValuePair(tables.LinkAction):
# this is to create a spec key-value pair for an existing QOS Spec
name = "create"
verbose_name = _("Create")
url = "horizon:admin:volume_types:qos_specs:create"
classes = ("ajax-modal",)
icon = "plus"
def get_link_url(self, qos_spec=None):
qos_spec_id = self.table.kwargs['qos_spec_id']
return reverse(self.url, args=[qos_spec_id])
class SpecDeleteKeyValuePair(tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
"Delete Spec",
"Delete Specs",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
"Deleted Spec",
"Deleted Specs",
count
)
def delete(self, request, obj_id):
qos_spec_id = self.table.kwargs['qos_spec_id']
# use "unset" api to remove this key-value pair from QOS Spec
key = parse.unquote(obj_id)
api.cinder.qos_spec_unset_keys(request,
qos_spec_id,
[key])
# redirect to non-modal page
def get_success_url(self, request=None):
return reverse('horizon:admin:volume_types:index')
class SpecEditKeyValuePair(tables.LinkAction):
name = "edit"
verbose_name = _("Edit")
url = "horizon:admin:volume_types:qos_specs:edit"
classes = ("ajax-modal",)
icon = "pencil"
def get_link_url(self, qos_spec):
return reverse(self.url, args=[qos_spec.id, qos_spec.key])
class SpecsTable(tables.DataTable):
key = tables.Column('key', verbose_name=_('Key'))
value = tables.Column('value', verbose_name=_('Value'))
class Meta(object):
name = "specs"
verbose_name = _("Key-Value Pairs")
table_actions = (SpecCreateKeyValuePair, SpecDeleteKeyValuePair)
row_actions = (SpecEditKeyValuePair, SpecDeleteKeyValuePair)
def get_object_id(self, datum):
return parse.quote(datum.key)
def get_object_display(self, datum):
return datum.key
|
menify/sandbox
|
refs/heads/master
|
trunk/setup/setup_flexelint.py
|
1
|
import os.path
import aql.utils
from aql.setup import toolSetup
import aql.local_host
#//---------------------------------------------------------------------------//
@toolSetup('aql_tool_flexelint')
def setup_flexelint( options, os_env, env ):
if aql.local_host.os == 'cygwin':
_drive_d = '/cygdrive/d'
else:
_drive_d = 'd:'
FLEXELINTDIR = _drive_d + '/bin/development/flexelint'
FLEXLINT_USER_DIR = os.path.join( os.path.dirname( __file__ ), 'lnt' )
aql.utils.prependEnvPath( os_env, 'PATH', FLEXELINTDIR + '/bin' )
options.lint_flags += '-i' + FLEXELINTDIR + '/lnt'
options.lint_flags += '-i' + FLEXLINT_USER_DIR
options.lint_flags += 'common.lnt'
options.lint_flags += 'msg_format.lnt'
|
owers19856/django-cms
|
refs/heads/develop
|
cms/models/settingmodels.py
|
39
|
# -*- coding: utf-8 -*-
from django.conf import settings
from django.db import models
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
@python_2_unicode_compatible
class UserSettings(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL, editable=False, related_name='djangocms_usersettings')
language = models.CharField(_("Language"), max_length=10, choices=settings.LANGUAGES,
help_text=_("The language for the admin interface and toolbar"))
clipboard = models.ForeignKey('cms.Placeholder', blank=True, null=True, editable=False)
class Meta:
verbose_name = _('user setting')
verbose_name_plural = _('user settings')
app_label = 'cms'
def __str__(self):
return force_text(self.user)
|
ibrahimkarahan/Flexget
|
refs/heads/develop
|
flexget/ui/plugins/movies/movies.py
|
5
|
from __future__ import unicode_literals, division, absolute_import
import time
import logging
import posixpath
from flask import render_template, Blueprint, request, redirect, flash, send_file
from flask.helpers import url_for
from flexget.plugin import DependencyError, get_plugin_by_name
from flexget.ui.webui import register_plugin, app, manager
from flexget.utils import qualities
try:
from flexget.plugins.filter.movie_queue import QueueError, queue_get, queue_add, queue_del, queue_edit
except ImportError:
raise DependencyError(issued_by='ui.movies', missing='movie_queue')
movies_module = Blueprint('movies', __name__)
log = logging.getLogger('ui.movies')
# TODO: refactor this filter to some globally usable place (webui.py?)
# also flexget/plugins/ui/utils.py needs to be removed
# ... mainly because we have flexget/utils for that :)
@app.template_filter('pretty_age')
def pretty_age_filter(value):
from flexget.ui.utils import pretty_date
return pretty_date(time.mktime(value.timetuple()))
@movies_module.route('/')
def index():
movie_queue = queue_get()
tmdb_lookup = get_plugin_by_name('api_tmdb').instance.lookup
for item in movie_queue:
try:
movie = tmdb_lookup(tmdb_id=item.tmdb_id, only_cached=True)
except LookupError:
item.overview = ('TMDb lookup was not successful, no overview available.'
'Lookup is being retried in the background.')
log.debug('No themoviedb result for tmdb id %s' % item.tmdb_id)
# this is probably not needed since non cached movies are retried also
# in the cover function
#
# import thread
# thread.start_new_thread(tmdb_lookup, (), {'imdb_id': item.imdb_id})
continue
# set thumb, but only if already in cache because retrieving is too slow here
# movies without cached thumb use img tag reading /cover/<imdb_id> which will
# retrieve the image and thus allows rendering the page immediattely
for poster in movie.posters:
if poster.size == 'thumb':
thumb = poster.get_file(only_cached=True)
if thumb:
item.thumb = url_for('userstatic', filename=posixpath.join(*thumb))
break
item.title = movie.name
item.year = movie.released and movie.released.year
item.overview = movie.overview
context = {'movies': movie_queue}
return render_template('movies/movies.html', **context)
@movies_module.route('/add', methods=['GET', 'POST'])
def add_to_queue():
what = request.values.get('what')
imdb_id = request.values.get('imdb_id')
# TODO: This is a rather limited selection of quality considering the new quality system. Improve it.
quality = qualities.Requirements(request.values.get('quality', 'ANY'))
force = request.values.get('force') == 'on'
try:
title = queue_add(title=what, imdb_id=imdb_id, quality=quality, force=force)['title']
except QueueError as e:
flash(e.message, 'error')
else:
flash('%s successfully added to queue.' % title, 'success')
return redirect(url_for('.index'))
@movies_module.route('/del')
def del_from_queue():
imdb_id = request.values.get('imdb_id')
try:
title = queue_del(imdb_id=imdb_id)
except QueueError as e:
flash(e.message, 'error')
else:
flash('%s removed from queue.' % title, 'delete')
return redirect(url_for('.index'))
@movies_module.route('/edit')
def edit_movie_quality():
imdb_id = request.values.get('imdb_id')
quality = request.values.get('quality')
try:
queue_edit(quality, imdb_id=imdb_id)
except QueueError as e:
flash(e.message, 'error')
else:
# TODO: Display movie name instead of id
flash('%s quality changed to %s' % (imdb_id, quality), 'success')
return redirect(url_for('.index'))
@movies_module.route('/cover/<imdb_id>')
def cover(imdb_id):
import os
# TODO: return '' should be replaced with something sane, http error 404 ?
tmdb_lookup = get_plugin_by_name('api_tmdb').instance.lookup
try:
movie = tmdb_lookup(imdb_id=imdb_id)
except LookupError:
log.error('No cached data for %s' % imdb_id)
return ''
filepath = None
for poster in movie.posters:
if poster.size == 'thumb':
filepath = os.path.join(manager.config_base, 'userstatic', *poster.get_file())
break
if filepath is None:
log.error('No cover for %s' % imdb_id)
return ''
elif not os.path.exists(filepath):
log.error('File %s does not exist' % filepath)
return ''
log.debug('sending thumb file %s' % filepath)
return send_file(filepath, mimetype='image/png')
register_plugin(movies_module, menu='Movies')
|
hadmagic/Aidez-moi
|
refs/heads/master
|
ticket/tables.py
|
1
|
# coding=utf-8
__author__ = 'had'
# The MIT License (MIT)
# Copyright (c) [2015] [Houtmann Hadrien]
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the Aidez-moi), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from ticket.models import Tickets
from django.core.urlresolvers import reverse
import django_tables2 as tables
from django_tables2.utils import A
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
class PriorityColumn(tables.Column):
"""
Class qui sert à colorer les cellules en fonction de leurs
priorité
"""
def render(self, value):
if value == dict(Tickets.PRIORITY_CHOICES).get('CRITICAL') :
self.attrs = {"td": {"bgcolor": "FF3333"}}
elif value == dict(Tickets.PRIORITY_CHOICES).get('HIGH'):
self.attrs = {"td": {"bgcolor": "FF8585"}}
elif value == dict(Tickets.PRIORITY_CHOICES).get('LOW'):
self.attrs = {"td": {"bgcolor": "FFC299"}}
elif value == dict(Tickets.PRIORITY_CHOICES).get('VERYLOW'):
self.attrs = {"td": {"bgcolor": "FFE2CE"}}
elif value == dict(Tickets.PRIORITY_CHOICES).get('NORMAL'):
self.attrs = {}
return value
class StatusColumn(tables.Column):
"""
Class met un badge en fonction du status
"""
def render(self, value):
if value == dict(Tickets.STATUS_CHOICES).get('OPEN'):
return mark_safe(
'<div class="ui center orange label">'+ _('Ouvert')+ '</div>')
elif value == dict(Tickets.STATUS_CHOICES).get('CLOSED'):
return mark_safe(
'<div class="ui center black label">' + _('Clos') + '</div>')
elif value == dict(Tickets.STATUS_CHOICES).get('RESOLVED'):
return mark_safe('<div class="ui green label">' +_('Résolus') + '</div>')
class TitleColumn(tables.LinkColumn):
"""
Classe qui surcharge la colonne titre afin de limiter le titre en caractère pour ne pas
déformer le tableau
L'astuce et de limiter la variable value: value[:126]
"""
def render(self, value, record, bound_column):
return self.render_link(reverse('view', args=[record.id]), value[:70])
class TicketsTables(tables.Table):
title = TitleColumn('view', args=[A('id')])
priority = PriorityColumn()
status = StatusColumn()
class Meta:
model = Tickets
attrs = {"class": "paleblue"}
exclude = ('content', 'depends_on',
'file', 'date_resolved',
'date_closed', 'date_assigned',
'ask_to_delete')
|
GarethNelson/distcc
|
refs/heads/master
|
test/onetest.py
|
26
|
#!/usr/bin/python2.4
#
# Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
# USA.
"""Usage: onetest.py [--valgrind[=command]] [--lzo] [--pump] TESTNAME
This command runs a single test case.
TESTNAME should be the name of one of the test cases from testdistcc.py.
"""
__author__ = 'Fergus Henderson'
import testdistcc
import comfychair
import sys
if __name__ == '__main__':
while len(sys.argv) > 1 and sys.argv[1].startswith("--"):
if sys.argv[1] == "--valgrind":
testdistcc._valgrind_command = "valgrind --quiet "
del sys.argv[1]
elif sys.argv[1].startswith("--valgrind="):
testdistcc._valgrind_command = sys.argv[1][len("--valgrind="):] + " "
del sys.argv[1]
elif sys.argv[1] == "--lzo":
testdistcc._server_options = ",lzo"
del sys.argv[1]
elif sys.argv[1] == "--pump":
testdistcc._server_options = ",lzo,cpp"
del sys.argv[1]
if len(sys.argv) > 1:
testname = sys.argv[1]
del sys.argv[1]
comfychair.main([eval('testdistcc.' + testname)])
else:
sys.exit(__doc__)
|
farhi-naz/phantomjs
|
refs/heads/master
|
src/breakpad/src/tools/gyp/test/generator-output/gyptest-rules.py
|
151
|
#!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies --generator-output= behavior when using rules.
"""
import TestGyp
test = TestGyp.TestGyp()
test.writable(test.workpath('rules'), False)
test.run_gyp('rules.gyp',
'--generator-output=' + test.workpath('gypfiles'),
chdir='rules')
test.writable(test.workpath('rules'), True)
test.relocate('rules', 'relocate/rules')
test.relocate('gypfiles', 'relocate/gypfiles')
test.writable(test.workpath('relocate/rules'), False)
test.writable(test.workpath('relocate/rules/build'), True)
test.writable(test.workpath('relocate/rules/subdir1/build'), True)
test.writable(test.workpath('relocate/rules/subdir2/build'), True)
test.writable(test.workpath('relocate/rules/subdir2/rules-out'), True)
test.build('rules.gyp', test.ALL, chdir='relocate/gypfiles')
expect = """\
Hello from program.c
Hello from function1.in1
Hello from function2.in1
Hello from define3.in0
Hello from define4.in0
"""
if test.format == 'xcode':
chdir = 'relocate/rules/subdir1'
else:
chdir = 'relocate/gypfiles'
test.run_built_executable('program', chdir=chdir, stdout=expect)
test.must_match('relocate/rules/subdir2/rules-out/file1.out',
"Hello from file1.in0\n")
test.must_match('relocate/rules/subdir2/rules-out/file2.out',
"Hello from file2.in0\n")
test.must_match('relocate/rules/subdir2/rules-out/file3.out',
"Hello from file3.in1\n")
test.must_match('relocate/rules/subdir2/rules-out/file4.out',
"Hello from file4.in1\n")
test.pass_test()
|
dims/neutron
|
refs/heads/master
|
neutron/db/migration/alembic_migrations/versions/liberty/contract/11926bcfe72d_add_geneve_ml2_type_driver.py
|
23
|
# Copyright 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""add geneve ml2 type driver
Revision ID: 11926bcfe72d
Revises: 2e5352a0ad4d
Create Date: 2015-08-27 19:56:16.356522
"""
# revision identifiers, used by Alembic.
revision = '11926bcfe72d'
down_revision = '2e5352a0ad4d'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'ml2_geneve_allocations',
sa.Column('geneve_vni', sa.Integer(),
autoincrement=False, nullable=False),
sa.Column('allocated', sa.Boolean(),
server_default=sa.sql.false(), nullable=False),
sa.PrimaryKeyConstraint('geneve_vni'),
)
op.create_index(op.f('ix_ml2_geneve_allocations_allocated'),
'ml2_geneve_allocations', ['allocated'], unique=False)
op.create_table(
'ml2_geneve_endpoints',
sa.Column('ip_address', sa.String(length=64), nullable=False),
sa.Column('host', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('ip_address'),
sa.UniqueConstraint('host', name='unique_ml2_geneve_endpoints0host'),
)
|
codester2/devide
|
refs/heads/master
|
modules/vtk_basic/vtkWin32VideoSource.py
|
7
|
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkWin32VideoSource(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkWin32VideoSource(), 'Processing.',
(), ('vtkImageData',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
sourcepole/qgis-wps-client
|
refs/heads/master
|
Ui_qgswpsgui.py
|
1
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'qgswpsgui.ui'
#
# Created by: PyQt5 UI code generator 5.15.0
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_QgsWps(object):
def setupUi(self, QgsWps):
QgsWps.setObjectName("QgsWps")
QgsWps.setWindowModality(QtCore.Qt.NonModal)
QgsWps.resize(780, 604)
QgsWps.setAcceptDrops(False)
self.gridLayout = QtWidgets.QGridLayout(QgsWps)
self.gridLayout.setObjectName("gridLayout")
self.GroupBox1 = QtWidgets.QGroupBox(QgsWps)
self.GroupBox1.setObjectName("GroupBox1")
self.gridlayout = QtWidgets.QGridLayout(self.GroupBox1)
self.gridlayout.setContentsMargins(9, 9, 9, 9)
self.gridlayout.setSpacing(6)
self.gridlayout.setObjectName("gridlayout")
self.btnNew = QtWidgets.QPushButton(self.GroupBox1)
self.btnNew.setObjectName("btnNew")
self.gridlayout.addWidget(self.btnNew, 1, 1, 1, 1)
self.btnEdit = QtWidgets.QPushButton(self.GroupBox1)
self.btnEdit.setEnabled(False)
self.btnEdit.setObjectName("btnEdit")
self.gridlayout.addWidget(self.btnEdit, 1, 2, 1, 1)
spacerItem = QtWidgets.QSpacerItem(171, 30, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridlayout.addItem(spacerItem, 1, 4, 1, 1)
self.btnConnect = QtWidgets.QPushButton(self.GroupBox1)
self.btnConnect.setEnabled(True)
self.btnConnect.setObjectName("btnConnect")
self.gridlayout.addWidget(self.btnConnect, 1, 0, 1, 1)
self.cmbConnections = QtWidgets.QComboBox(self.GroupBox1)
self.cmbConnections.setObjectName("cmbConnections")
self.gridlayout.addWidget(self.cmbConnections, 0, 0, 1, 7)
self.btnDelete = QtWidgets.QPushButton(self.GroupBox1)
self.btnDelete.setEnabled(False)
self.btnDelete.setObjectName("btnDelete")
self.gridlayout.addWidget(self.btnDelete, 1, 3, 1, 1)
self.pushDefaultServer = QtWidgets.QPushButton(self.GroupBox1)
self.pushDefaultServer.setObjectName("pushDefaultServer")
self.gridlayout.addWidget(self.pushDefaultServer, 1, 6, 1, 1)
self.btnBookmarks = QtWidgets.QPushButton(self.GroupBox1)
self.btnBookmarks.setObjectName("btnBookmarks")
self.gridlayout.addWidget(self.btnBookmarks, 1, 5, 1, 1)
self.gridLayout.addWidget(self.GroupBox1, 0, 0, 1, 1)
self.splitter = QtWidgets.QSplitter(QgsWps)
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.splitter.setObjectName("splitter")
self.label = QtWidgets.QLabel(self.splitter)
self.label.setEnabled(True)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())
self.label.setSizePolicy(sizePolicy)
self.label.setMaximumSize(QtCore.QSize(50, 16777215))
self.label.setObjectName("label")
self.lneFilter = QtWidgets.QLineEdit(self.splitter)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lneFilter.sizePolicy().hasHeightForWidth())
self.lneFilter.setSizePolicy(sizePolicy)
self.lneFilter.setObjectName("lneFilter")
self.gridLayout.addWidget(self.splitter, 1, 0, 1, 1)
self.treeWidget = QtWidgets.QTreeWidget(QgsWps)
self.treeWidget.setColumnCount(3)
self.treeWidget.setObjectName("treeWidget")
self.gridLayout.addWidget(self.treeWidget, 2, 0, 1, 1)
self.hboxlayout = QtWidgets.QHBoxLayout()
self.hboxlayout.setContentsMargins(0, 0, 0, 0)
self.hboxlayout.setSpacing(6)
self.hboxlayout.setObjectName("hboxlayout")
self.btnAbout = QtWidgets.QPushButton(QgsWps)
self.btnAbout.setObjectName("btnAbout")
self.hboxlayout.addWidget(self.btnAbout)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.hboxlayout.addItem(spacerItem1)
self.buttonBox = QtWidgets.QDialogButtonBox(QgsWps)
self.buttonBox.setEnabled(True)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Close|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.hboxlayout.addWidget(self.buttonBox)
self.gridLayout.addLayout(self.hboxlayout, 3, 0, 1, 1)
self.retranslateUi(QgsWps)
QtCore.QMetaObject.connectSlotsByName(QgsWps)
def retranslateUi(self, QgsWps):
_translate = QtCore.QCoreApplication.translate
QgsWps.setWindowTitle(_translate("QgsWps", "Note: this plugin not considered stable yet. Use it on your own risk"))
self.GroupBox1.setTitle(_translate("QgsWps", "Server Connections"))
self.btnNew.setText(_translate("QgsWps", "&New"))
self.btnEdit.setText(_translate("QgsWps", "Edit"))
self.btnConnect.setText(_translate("QgsWps", "C&onnect"))
self.btnDelete.setText(_translate("QgsWps", "Delete"))
self.pushDefaultServer.setText(_translate("QgsWps", "Add default server"))
self.btnBookmarks.setText(_translate("QgsWps", "Bookmarks"))
self.label.setText(_translate("QgsWps", "Filter:"))
self.treeWidget.setSortingEnabled(True)
self.treeWidget.headerItem().setText(0, _translate("QgsWps", "Identifier"))
self.treeWidget.headerItem().setText(1, _translate("QgsWps", "Title"))
self.treeWidget.headerItem().setText(2, _translate("QgsWps", "Abstract"))
self.btnAbout.setText(_translate("QgsWps", "about"))
|
severin31/suplemon
|
refs/heads/master
|
suplemon/modules/linter.py
|
2
|
# -*- encoding: utf-8
import subprocess
from suplemon.suplemon_module import Module
class Linter(Module):
def init(self):
self.init_logging(__name__)
if not self.has_flake8_support():
self.logger.warning("Flake8 not available. Can't show linting.")
return False
# Error codes to ignore e.g. 'E501' (line too long)
self.ignore = []
# Max length of line
self.max_line_length = 120 # Default is 79
# TODO: Run linting in a seperate thread to avoid
# blocking the UI when the app is loading
# Lint all files after app is loaded
self.bind_event_after("app_loaded", self.lint_all_files)
# Show linting messages in status bar
self.bind_event_after("mainloop", self.mainloop)
# Re-lint current file when appropriate
self.bind_event_after("save_file", self.lint_current_file)
self.bind_event_after("save_file_as", self.lint_current_file)
self.bind_event_after("reload_file", self.lint_current_file)
self.bind_event_after("open_file", self.lint_current_file)
def run(self, app, editor, args):
"""Run the linting command."""
editor = self.app.get_file().get_editor()
count = self.get_msg_count(editor)
status = str(count) + " lines with linting errors in this file."
self.app.set_status(status)
def mainloop(self, event):
"""Run the linting command."""
file = self.app.get_file()
editor = file.get_editor()
cursor = editor.get_cursor()
if len(editor.cursors) > 1:
return False
line_no = cursor.y + 1
msg = self.get_msgs_on_line(editor, cursor.y)
if msg:
self.app.set_status("Line " + str(line_no) + ": " + msg)
def has_flake8_support(self):
output = self.get_output(["flake8", "--version"])
return output
def lint_current_file(self, event):
self.lint_file(self.app.get_file())
def lint_all_files(self, event):
"""Do linting check for all open files and store results."""
for file in self.app.files:
self.lint_file(file)
return False
def lint_file(self, file):
path = file.get_path()
if not path: # Unsaved file
return False
if file.get_extension().lower() != "py": # Only lint Python files
return False
linting = self.get_file_linting(path)
if linting is False: # Linting failed
return False
editor = file.get_editor()
line_no = 0
while line_no < len(editor.lines):
line = editor.lines[line_no]
if line_no+1 in linting.keys():
line.linting = linting[line_no+1]
line.set_number_color(1)
else:
line.linting = False
line.reset_number_color()
line_no += 1
def get_output(self, cmd):
try:
process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
except (OSError, EnvironmentError): # can't use FileNotFoundError in Python 2
return False
out, err = process.communicate()
return out
def get_msgs_on_line(self, editor, line_no):
line = editor.lines[line_no]
if not hasattr(line, "linting") or not line.linting:
return False
return line.linting[0][1]
def get_msg_count(self, editor):
count = 0
for line in editor.lines:
if hasattr(line, "linting"):
if line.linting:
count += 1
return count
def get_file_linting(self, path):
"""Do linting check for given file path."""
output = self.get_output(["flake8", "--max-line-length", str(self.max_line_length), path])
if output is False:
self.logger.warning("Failed to get linting for file '{0}'.".format(path))
return False
output = output.decode("utf-8")
# Remove file paths from output
output = output.replace(path+":", "")
lines = output.split("\n")
linting = {}
for line in lines:
if not line:
continue
parts = line.split(":")
line_no = int(parts[0])
char_no = int(parts[1])
data = ":".join(parts[2:]).strip()
err_code = data.split(" ")[0]
if err_code in self.ignore:
continue
if line_no not in linting.keys():
linting[line_no] = []
linting[line_no].append((char_no, data, err_code))
return linting
module = {
"class": Linter,
"name": "linter",
}
|
Quantipy/quantipy
|
refs/heads/master
|
quantipy/sandbox/pptx/__init__.py
|
12133432
| |
cgre-aachen/gempy
|
refs/heads/master
|
examples/examples/geometries/foo/__init__.py
|
12133432
| |
uwdata/termite-visualizations
|
refs/heads/master
|
web2py/scripts/extract_mysql_models.py
|
33
|
'''
Create the web2py code needed to access your mysql legacy db.
To make this work all the legacy tables you want to access need to have an "id" field.
This plugin needs:
mysql
mysqldump
installed and globally available.
Under Windows you will probably need to add the mysql executable directory to the PATH variable,
you will also need to modify mysql to mysql.exe and mysqldump to mysqldump.exe below.
Just guessing here :)
Access your tables with:
legacy_db(legacy_db.mytable.id>0).select()
If the script crashes this is might be due to that fact that the data_type_map dictionary below is incomplete.
Please complete it, improve it and continue.
Created by Falko Krause, minor modifications by Massimo Di Pierro and Ron McOuat
'''
import subprocess
import re
import sys
data_type_map = dict(
varchar='string',
int='integer',
integer='integer',
tinyint='integer',
smallint='integer',
mediumint='integer',
bigint='integer',
float='double',
double='double',
char='string',
decimal='integer',
date='date',
#year = 'date',
time='time',
timestamp='datetime',
datetime='datetime',
binary='blob',
blob='blob',
tinyblob='blob',
mediumblob='blob',
longblob='blob',
text='text',
tinytext='text',
mediumtext='text',
longtext='text',
)
def mysql(database_name, username, password):
p = subprocess.Popen(['mysql',
'--user=%s' % username,
'--password=%s' % password,
'--execute=show tables;',
database_name],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
sql_showtables, stderr = p.communicate()
tables = [re.sub(
'\|\s+([^\|*])\s+.*', '\1', x) for x in sql_showtables.split()[1:]]
connection_string = "legacy_db = DAL('mysql://%s:%s@localhost/%s')" % (
username, password, database_name)
legacy_db_table_web2py_code = []
for table_name in tables:
#get the sql create statement
p = subprocess.Popen(['mysqldump',
'--user=%s' % username,
'--password=%s' % password,
'--skip-add-drop-table',
'--no-data', database_name,
table_name], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
sql_create_stmnt, stderr = p.communicate()
if 'CREATE' in sql_create_stmnt: # check if the table exists
#remove garbage lines from sql statement
sql_lines = sql_create_stmnt.split('\n')
sql_lines = filter(
lambda x: not(x in ('','\r') or x[:2] in ('--','/*')),
sql_lines)
#generate the web2py code from the create statement
web2py_table_code = ''
table_name = re.search(
'CREATE TABLE .(\S+). \(', sql_lines[0]).group(1)
fields = []
for line in sql_lines[1:-1]:
if re.search('KEY', line) or re.search('PRIMARY', line) or re.search(' ID', line) or line.startswith(')'):
continue
hit = re.search('(\S+)\s+(\S+)(,| )( .*)?', line)
if hit is not None:
name, d_type = hit.group(1), hit.group(2)
d_type = re.sub(r'(\w+)\(.*', r'\1', d_type)
name = re.sub('`', '', name)
web2py_table_code += "\n Field('%s','%s')," % (
name, data_type_map[d_type])
web2py_table_code = "legacy_db.define_table('%s',%s\n migrate=False)" % (table_name, web2py_table_code)
legacy_db_table_web2py_code.append(web2py_table_code)
#----------------------------------------
#write the legacy db to file
legacy_db_web2py_code = connection_string + "\n\n"
legacy_db_web2py_code += "\n\n#--------\n".join(
legacy_db_table_web2py_code)
return legacy_db_web2py_code
regex = re.compile('(.*?):(.*?)@(.*)')
if len(sys.argv) < 2 or not regex.match(sys.argv[1]):
print 'USAGE:\n\n extract_mysql_models.py username:password@data_basename\n\n'
else:
m = regex.match(sys.argv[1])
print mysql(m.group(3), m.group(1), m.group(2))
|
BambooL/jeeves
|
refs/heads/master
|
demo/tests/simpleRule/jelf/settings.py
|
12
|
"""
Django settings for jelf project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(__file__)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!$e(y9&5ol=#s7wex!xhv=f&5f2@ufjez3ee9kdifw=41p_+%*'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, '..', 'templates/'),
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
ALLOWED_HOSTS = ['*']
TEMPLATE_LOADERS = (
'django_jinja.loaders.AppLoader',
'django_jinja.loaders.FileSystemLoader',
)
DEFAULT_JINJA2_TEMPLATE_EXTENSION = '.html'
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_jinja',
'timelog',
'jelf',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'urls'
WSGI_APPLICATION = 'wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'jjelf.db'),
}
}
MEDIA_ROOT = os.path.join(BASE_DIR, '..', "media")
MEDIA_URL = "/media/"
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, '..', "static"),
)
# possible phases are submit, review, final
CONF_PHASE = 'submit'
|
heeraj123/oh-mainline
|
refs/heads/master
|
vendor/packages/celery/celery/backends/amqp.py
|
17
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import with_statement
import socket
import threading
import time
from itertools import count
from kombu.entity import Exchange, Queue
from kombu.messaging import Consumer, Producer
from .. import states
from ..exceptions import TimeoutError
from .base import BaseDictBackend
class BacklogLimitExceeded(Exception):
"""Too much state history to fast-forward."""
def repair_uuid(s):
# Historically the dashes in UUIDS are removed from AMQ entity names,
# but there is no known reason to. Hopefully we'll be able to fix
# this in v3.0.
return "%s-%s-%s-%s-%s" % (s[:8], s[8:12], s[12:16], s[16:20], s[20:])
class AMQPBackend(BaseDictBackend):
"""Publishes results by sending messages."""
Exchange = Exchange
Queue = Queue
Consumer = Consumer
Producer = Producer
BacklogLimitExceeded = BacklogLimitExceeded
def __init__(self, connection=None, exchange=None, exchange_type=None,
persistent=None, serializer=None, auto_delete=True,
**kwargs):
super(AMQPBackend, self).__init__(**kwargs)
conf = self.app.conf
self._connection = connection
self.queue_arguments = {}
self.persistent = (conf.CELERY_RESULT_PERSISTENT if persistent is None
else persistent)
delivery_mode = persistent and "persistent" or "transient"
exchange = exchange or conf.CELERY_RESULT_EXCHANGE
exchange_type = exchange_type or conf.CELERY_RESULT_EXCHANGE_TYPE
self.exchange = self.Exchange(name=exchange,
type=exchange_type,
delivery_mode=delivery_mode,
durable=self.persistent,
auto_delete=auto_delete)
self.serializer = serializer or conf.CELERY_RESULT_SERIALIZER
self.auto_delete = auto_delete
# AMQP_TASK_RESULT_EXPIRES setting is deprecated and will be
# removed in version 3.0.
dexpires = conf.CELERY_AMQP_TASK_RESULT_EXPIRES
self.expires = None
if "expires" in kwargs:
if kwargs["expires"] is not None:
self.expires = self.prepare_expires(kwargs["expires"])
else:
self.expires = self.prepare_expires(dexpires)
if self.expires:
self.queue_arguments["x-expires"] = int(self.expires * 1000)
self.mutex = threading.Lock()
def _create_binding(self, task_id):
name = task_id.replace("-", "")
return self.Queue(name=name,
exchange=self.exchange,
routing_key=name,
durable=self.persistent,
auto_delete=self.auto_delete,
queue_arguments=self.queue_arguments)
def _create_producer(self, task_id, channel):
self._create_binding(task_id)(channel).declare()
return self.Producer(channel, exchange=self.exchange,
routing_key=task_id.replace("-", ""),
serializer=self.serializer)
def _create_consumer(self, bindings, channel):
return self.Consumer(channel, bindings, no_ack=True)
def _publish_result(self, connection, task_id, meta):
# cache single channel
if connection._default_channel is not None and \
connection._default_channel.connection is None:
connection.maybe_close_channel(connection._default_channel)
channel = connection.default_channel
self._create_producer(task_id, channel).publish(meta)
def revive(self, channel):
pass
def _store_result(self, task_id, result, status, traceback=None,
max_retries=20, interval_start=0, interval_step=1,
interval_max=1):
"""Send task return value and status."""
with self.mutex:
with self.app.pool.acquire(block=True) as conn:
def errback(error, delay):
print("Couldn't send result for %r: %r. Retry in %rs." % (
task_id, error, delay))
send = conn.ensure(self, self._publish_result,
max_retries=max_retries,
errback=errback,
interval_start=interval_start,
interval_step=interval_step,
interval_max=interval_max)
send(conn, task_id, {"task_id": task_id, "status": status,
"result": self.encode_result(result, status),
"traceback": traceback})
return result
def get_task_meta(self, task_id, cache=True):
return self.poll(task_id)
def wait_for(self, task_id, timeout=None, cache=True, propagate=True,
**kwargs):
cached_meta = self._cache.get(task_id)
if cache and cached_meta and \
cached_meta["status"] in states.READY_STATES:
meta = cached_meta
else:
try:
meta = self.consume(task_id, timeout=timeout)
except socket.timeout:
raise TimeoutError("The operation timed out.")
state = meta["status"]
if state == states.SUCCESS:
return meta["result"]
elif state in states.PROPAGATE_STATES:
if propagate:
raise self.exception_to_python(meta["result"])
return meta["result"]
else:
return self.wait_for(task_id, timeout, cache)
def poll(self, task_id, backlog_limit=100):
with self.app.pool.acquire_channel(block=True) as (_, channel):
binding = self._create_binding(task_id)(channel)
binding.declare()
latest, acc = None, None
for i in count(): # fast-forward
latest, acc = acc, binding.get(no_ack=True)
if not acc:
break
if i > backlog_limit:
raise self.BacklogLimitExceeded(task_id)
if latest:
payload = self._cache[task_id] = latest.payload
return payload
elif task_id in self._cache: # use previously received state.
return self._cache[task_id]
return {"status": states.PENDING, "result": None}
def drain_events(self, connection, consumer, timeout=None, now=time.time):
wait = connection.drain_events
results = {}
def callback(meta, message):
if meta["status"] in states.READY_STATES:
uuid = repair_uuid(message.delivery_info["routing_key"])
results[uuid] = meta
consumer.callbacks[:] = [callback]
time_start = now()
while 1:
# Total time spent may exceed a single call to wait()
if timeout and now() - time_start >= timeout:
raise socket.timeout()
wait(timeout=timeout)
if results: # got event on the wanted channel.
break
self._cache.update(results)
return results
def consume(self, task_id, timeout=None):
with self.app.pool.acquire_channel(block=True) as (conn, channel):
binding = self._create_binding(task_id)
with self._create_consumer(binding, channel) as consumer:
return self.drain_events(conn, consumer, timeout).values()[0]
def get_many(self, task_ids, timeout=None, **kwargs):
with self.app.pool.acquire_channel(block=True) as (conn, channel):
ids = set(task_ids)
cached_ids = set()
for task_id in ids:
try:
cached = self._cache[task_id]
except KeyError:
pass
else:
if cached["status"] in states.READY_STATES:
yield task_id, cached
cached_ids.add(task_id)
ids ^= cached_ids
bindings = [self._create_binding(task_id) for task_id in task_ids]
with self._create_consumer(bindings, channel) as consumer:
while ids:
r = self.drain_events(conn, consumer, timeout)
ids ^= set(r.keys())
for ready_id, ready_meta in r.iteritems():
yield ready_id, ready_meta
def reload_task_result(self, task_id):
raise NotImplementedError(
"reload_task_result is not supported by this backend.")
def reload_taskset_result(self, task_id):
"""Reload taskset result, even if it has been previously fetched."""
raise NotImplementedError(
"reload_taskset_result is not supported by this backend.")
def save_taskset(self, taskset_id, result):
raise NotImplementedError(
"save_taskset is not supported by this backend.")
def restore_taskset(self, taskset_id, cache=True):
raise NotImplementedError(
"restore_taskset is not supported by this backend.")
def delete_taskset(self, taskset_id):
raise NotImplementedError(
"delete_taskset is not supported by this backend.")
def __reduce__(self, args=(), kwargs={}):
kwargs.update(
dict(connection=self._connection,
exchange=self.exchange.name,
exchange_type=self.exchange.type,
persistent=self.persistent,
serializer=self.serializer,
auto_delete=self.auto_delete,
expires=self.expires))
return super(AMQPBackend, self).__reduce__(args, kwargs)
|
leafclick/intellij-community
|
refs/heads/master
|
python/testData/mover/oneLineCompoundOutside.py
|
80
|
if condition:
pass
elif other_condition:
if another_one:
if T<caret>rue: a = 1 # <- move statement up here
else: b = 2
|
canance/signpi-server
|
refs/heads/master
|
manage.py
|
1
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "signpi.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
siosio/intellij-community
|
refs/heads/master
|
python/testData/completion/hasattrCompletion/hasattrIfPyramidAndOr.py
|
12
|
def foo(x):
if hasattr(x, 'foo'):
if hasattr(x, 'bar') or hasattr(x, 'baz'):
if hasattr(x, 'qux') and (hasattr(x, 'quux') or hasattr(x, 'quuz')) and hasattr(x, 'corge'):
print(x.<caret>)
|
open-power-host-os/builds
|
refs/heads/master
|
lib/subcommands/update_metapackage.py
|
3
|
# Copyright (C) IBM Corp. 2017.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
from lib import distro_utils
from lib import exception
from lib.packages_manager import discover_packages
from lib.metapackage import update_metapackage
from lib.versions_repository import setup_versions_repository
LOG = logging.getLogger(__name__)
def run(CONF):
versions_repo = setup_versions_repository(CONF)
package_names = discover_packages()
architecture = CONF.get('architecture')
distro = distro_utils.get_distro(CONF.get('distro_name'),
CONF.get('distro_version'),
architecture)
commit_updates = CONF.get('commit_updates')
push_updates = CONF.get('push_updates')
push_repo_url = CONF.get('push_repo_url')
push_repo_branch = CONF.get('push_repo_branch')
updater_name = CONF.get('updater_name')
updater_email = CONF.get('updater_email')
REQUIRED_PARAMETERS = ["updater_name", "updater_email"]
if push_updates:
REQUIRED_PARAMETERS += ["push_repo_url", "push_repo_branch" ]
for parameter in REQUIRED_PARAMETERS:
if not CONF.get(parameter):
raise exception.RequiredParameterMissing(parameter=parameter)
METAPACKAGE_NAME = "open-power-host-os"
package_names.remove(METAPACKAGE_NAME)
update_metapackage(
versions_repo, distro, METAPACKAGE_NAME, package_names,
updater_name, updater_email)
if commit_updates:
commit_message = (CONF.get('commit_message')
or "Update {} dependencies".format(METAPACKAGE_NAME))
versions_repo.commit_changes(
commit_message, updater_name, updater_email)
if push_updates:
LOG.info("Pushing updated {} files".format(METAPACKAGE_NAME))
versions_repo.push_head_commits(push_repo_url, push_repo_branch)
LOG.info("Metapackage updated succesfully")
|
scrollback/kuma
|
refs/heads/master
|
vendor/packages/ipython/docs/examples/core/example-embed-short.py
|
7
|
"""Quick code snippets for embedding IPython into other programs.
See example-embed.py for full details, this file has the bare minimum code for
cut and paste use once you understand how to use the system."""
#---------------------------------------------------------------------------
# This code loads IPython but modifies a few things if it detects it's running
# embedded in another IPython session (helps avoid confusion)
try:
__IPYTHON__
except NameError:
argv = ['']
banner = exit_msg = ''
else:
# Command-line options for IPython (a list like sys.argv)
argv = ['-pi1','In <\\#>:','-pi2',' .\\D.:','-po','Out<\\#>:']
banner = '*** Nested interpreter ***'
exit_msg = '*** Back in main IPython ***'
# First import the embeddable shell class
from IPython.Shell import IPShellEmbed
# Now create the IPython shell instance. Put ipshell() anywhere in your code
# where you want it to open.
ipshell = IPShellEmbed(argv,banner=banner,exit_msg=exit_msg)
#---------------------------------------------------------------------------
# This code will load an embeddable IPython shell always with no changes for
# nested embededings.
from IPython.Shell import IPShellEmbed
ipshell = IPShellEmbed()
# Now ipshell() will open IPython anywhere in the code.
#---------------------------------------------------------------------------
# This code loads an embeddable shell only if NOT running inside
# IPython. Inside IPython, the embeddable shell variable ipshell is just a
# dummy function.
try:
__IPYTHON__
except NameError:
from IPython.Shell import IPShellEmbed
ipshell = IPShellEmbed()
# Now ipshell() will open IPython anywhere in the code
else:
# Define a dummy ipshell() so the same code doesn't crash inside an
# interactive IPython
def ipshell(): pass
#******************* End of file <example-embed-short.py> ********************
|
adelina-t/neutron
|
refs/heads/master
|
neutron/tests/functional/agent/linux/helpers.py
|
26
|
# Copyright (c) 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import fixtures
class RecursivePermDirFixture(fixtures.Fixture):
"""Ensure at least perms permissions on directory and ancestors."""
def __init__(self, directory, perms):
super(RecursivePermDirFixture, self).__init__()
self.directory = directory
self.least_perms = perms
def _setUp(self):
previous_directory = None
current_directory = self.directory
while previous_directory != current_directory:
perms = os.stat(current_directory).st_mode
if perms & self.least_perms != self.least_perms:
os.chmod(current_directory, perms | self.least_perms)
previous_directory = current_directory
current_directory = os.path.dirname(current_directory)
|
uiri/pxqz
|
refs/heads/master
|
venv/lib/python2.7/site-packages/django/dispatch/saferef.py
|
86
|
"""
"Safe weakrefs", originally from pyDispatcher.
Provides a way to safely weakref any function, including bound methods (which
aren't handled by the core weakref module).
"""
import traceback
import weakref
def safeRef(target, onDelete = None):
"""Return a *safe* weak reference to a callable target
target -- the object to be weakly referenced, if it's a
bound method reference, will create a BoundMethodWeakref,
otherwise creates a simple weakref.
onDelete -- if provided, will have a hard reference stored
to the callable to be called after the safe reference
goes out of scope with the reference object, (either a
weakref or a BoundMethodWeakref) as argument.
"""
if hasattr(target, 'im_self'):
if target.im_self is not None:
# Turn a bound method into a BoundMethodWeakref instance.
# Keep track of these instances for lookup by disconnect().
assert hasattr(target, 'im_func'), """safeRef target %r has im_self, but no im_func, don't know how to create reference"""%( target,)
reference = get_bound_method_weakref(
target=target,
onDelete=onDelete
)
return reference
if callable(onDelete):
return weakref.ref(target, onDelete)
else:
return weakref.ref( target )
class BoundMethodWeakref(object):
"""'Safe' and reusable weak references to instance methods
BoundMethodWeakref objects provide a mechanism for
referencing a bound method without requiring that the
method object itself (which is normally a transient
object) is kept alive. Instead, the BoundMethodWeakref
object keeps weak references to both the object and the
function which together define the instance method.
Attributes:
key -- the identity key for the reference, calculated
by the class's calculateKey method applied to the
target instance method
deletionMethods -- sequence of callable objects taking
single argument, a reference to this object which
will be called when *either* the target object or
target function is garbage collected (i.e. when
this object becomes invalid). These are specified
as the onDelete parameters of safeRef calls.
weakSelf -- weak reference to the target object
weakFunc -- weak reference to the target function
Class Attributes:
_allInstances -- class attribute pointing to all live
BoundMethodWeakref objects indexed by the class's
calculateKey(target) method applied to the target
objects. This weak value dictionary is used to
short-circuit creation so that multiple references
to the same (object, function) pair produce the
same BoundMethodWeakref instance.
"""
_allInstances = weakref.WeakValueDictionary()
def __new__( cls, target, onDelete=None, *arguments,**named ):
"""Create new instance or return current instance
Basically this method of construction allows us to
short-circuit creation of references to already-
referenced instance methods. The key corresponding
to the target is calculated, and if there is already
an existing reference, that is returned, with its
deletionMethods attribute updated. Otherwise the
new instance is created and registered in the table
of already-referenced methods.
"""
key = cls.calculateKey(target)
current =cls._allInstances.get(key)
if current is not None:
current.deletionMethods.append( onDelete)
return current
else:
base = super( BoundMethodWeakref, cls).__new__( cls )
cls._allInstances[key] = base
base.__init__( target, onDelete, *arguments,**named)
return base
def __init__(self, target, onDelete=None):
"""Return a weak-reference-like instance for a bound method
target -- the instance-method target for the weak
reference, must have im_self and im_func attributes
and be reconstructable via:
target.im_func.__get__( target.im_self )
which is true of built-in instance methods.
onDelete -- optional callback which will be called
when this weak reference ceases to be valid
(i.e. either the object or the function is garbage
collected). Should take a single argument,
which will be passed a pointer to this object.
"""
def remove(weak, self=self):
"""Set self.isDead to true when method or instance is destroyed"""
methods = self.deletionMethods[:]
del self.deletionMethods[:]
try:
del self.__class__._allInstances[ self.key ]
except KeyError:
pass
for function in methods:
try:
if callable( function ):
function( self )
except Exception, e:
try:
traceback.print_exc()
except AttributeError, err:
print '''Exception during saferef %s cleanup function %s: %s'''%(
self, function, e
)
self.deletionMethods = [onDelete]
self.key = self.calculateKey( target )
self.weakSelf = weakref.ref(target.im_self, remove)
self.weakFunc = weakref.ref(target.im_func, remove)
self.selfName = str(target.im_self)
self.funcName = str(target.im_func.__name__)
def calculateKey( cls, target ):
"""Calculate the reference key for this reference
Currently this is a two-tuple of the id()'s of the
target object and the target function respectively.
"""
return (id(target.im_self),id(target.im_func))
calculateKey = classmethod( calculateKey )
def __str__(self):
"""Give a friendly representation of the object"""
return """%s( %s.%s )"""%(
self.__class__.__name__,
self.selfName,
self.funcName,
)
__repr__ = __str__
def __nonzero__( self ):
"""Whether we are still a valid reference"""
return self() is not None
def __cmp__( self, other ):
"""Compare with another reference"""
if not isinstance (other,self.__class__):
return cmp( self.__class__, type(other) )
return cmp( self.key, other.key)
def __call__(self):
"""Return a strong reference to the bound method
If the target cannot be retrieved, then will
return None, otherwise returns a bound instance
method for our object and function.
Note:
You may call this method any number of times,
as it does not invalidate the reference.
"""
target = self.weakSelf()
if target is not None:
function = self.weakFunc()
if function is not None:
return function.__get__(target)
return None
class BoundNonDescriptorMethodWeakref(BoundMethodWeakref):
"""A specialized BoundMethodWeakref, for platforms where instance methods
are not descriptors.
It assumes that the function name and the target attribute name are the
same, instead of assuming that the function is a descriptor. This approach
is equally fast, but not 100% reliable because functions can be stored on an
attribute named differenty than the function's name such as in:
class A: pass
def foo(self): return "foo"
A.bar = foo
But this shouldn't be a common use case. So, on platforms where methods
aren't descriptors (such as Jython) this implementation has the advantage
of working in the most cases.
"""
def __init__(self, target, onDelete=None):
"""Return a weak-reference-like instance for a bound method
target -- the instance-method target for the weak
reference, must have im_self and im_func attributes
and be reconstructable via:
target.im_func.__get__( target.im_self )
which is true of built-in instance methods.
onDelete -- optional callback which will be called
when this weak reference ceases to be valid
(i.e. either the object or the function is garbage
collected). Should take a single argument,
which will be passed a pointer to this object.
"""
assert getattr(target.im_self, target.__name__) == target, \
("method %s isn't available as the attribute %s of %s" %
(target, target.__name__, target.im_self))
super(BoundNonDescriptorMethodWeakref, self).__init__(target, onDelete)
def __call__(self):
"""Return a strong reference to the bound method
If the target cannot be retrieved, then will
return None, otherwise returns a bound instance
method for our object and function.
Note:
You may call this method any number of times,
as it does not invalidate the reference.
"""
target = self.weakSelf()
if target is not None:
function = self.weakFunc()
if function is not None:
# Using partial() would be another option, but it erases the
# "signature" of the function. That is, after a function is
# curried, the inspect module can't be used to determine how
# many arguments the function expects, nor what keyword
# arguments it supports, and pydispatcher needs this
# information.
return getattr(target, function.__name__)
return None
def get_bound_method_weakref(target, onDelete):
"""Instantiates the appropiate BoundMethodWeakRef, depending on the details of
the underlying class method implementation"""
if hasattr(target, '__get__'):
# target method is a descriptor, so the default implementation works:
return BoundMethodWeakref(target=target, onDelete=onDelete)
else:
# no luck, use the alternative implementation:
return BoundNonDescriptorMethodWeakref(target=target, onDelete=onDelete)
|
mikemow/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/pyvideo.py
|
158
|
from __future__ import unicode_literals
import re
import os
from .common import InfoExtractor
class PyvideoIE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?pyvideo\.org/video/(?P<id>\d+)/(.*)'
_TESTS = [
{
'url': 'http://pyvideo.org/video/1737/become-a-logging-expert-in-30-minutes',
'md5': 'de317418c8bc76b1fd8633e4f32acbc6',
'info_dict': {
'id': '24_4WWkSmNo',
'ext': 'mp4',
'title': 'Become a logging expert in 30 minutes',
'description': 'md5:9665350d466c67fb5b1598de379021f7',
'upload_date': '20130320',
'uploader': 'NextDayVideo',
'uploader_id': 'NextDayVideo',
},
'add_ie': ['Youtube'],
},
{
'url': 'http://pyvideo.org/video/2542/gloriajw-spotifywitherikbernhardsson182m4v',
'md5': '5fe1c7e0a8aa5570330784c847ff6d12',
'info_dict': {
'id': '2542',
'ext': 'm4v',
'title': 'Gloriajw-SpotifyWithErikBernhardsson182',
},
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
m_youtube = re.search(r'(https?://www\.youtube\.com/watch\?v=.*)', webpage)
if m_youtube is not None:
return self.url_result(m_youtube.group(1), 'Youtube')
title = self._html_search_regex(
r'<div class="section">\s*<h3(?:\s+class="[^"]*"[^>]*)?>([^>]+?)</h3>',
webpage, 'title', flags=re.DOTALL)
video_url = self._search_regex(
[r'<source src="(.*?)"', r'<dt>Download</dt>.*?<a href="(.+?)"'],
webpage, 'video url', flags=re.DOTALL)
return {
'id': video_id,
'title': os.path.splitext(title)[0],
'url': video_url,
}
|
person142/scipy
|
refs/heads/master
|
scipy/interpolate/tests/test_fitpack.py
|
3
|
import itertools
import os
import numpy as np
from numpy.testing import (assert_equal, assert_allclose, assert_,
assert_almost_equal, assert_array_almost_equal)
from pytest import raises as assert_raises
from numpy import array, asarray, pi, sin, cos, arange, dot, ravel, sqrt, round
from scipy import interpolate
from scipy.interpolate.fitpack import (splrep, splev, bisplrep, bisplev,
sproot, splprep, splint, spalde, splder, splantider, insert, dblint)
from scipy.interpolate.dfitpack import regrid_smth
def data_file(basename):
return os.path.join(os.path.abspath(os.path.dirname(__file__)),
'data', basename)
def norm2(x):
return sqrt(dot(x.T,x))
def f1(x,d=0):
if d is None:
return "sin"
if x is None:
return "sin(x)"
if d % 4 == 0:
return sin(x)
if d % 4 == 1:
return cos(x)
if d % 4 == 2:
return -sin(x)
if d % 4 == 3:
return -cos(x)
def f2(x,y=0,dx=0,dy=0):
if x is None:
return "sin(x+y)"
d = dx+dy
if d % 4 == 0:
return sin(x+y)
if d % 4 == 1:
return cos(x+y)
if d % 4 == 2:
return -sin(x+y)
if d % 4 == 3:
return -cos(x+y)
def makepairs(x, y):
"""Helper function to create an array of pairs of x and y."""
xy = array(list(itertools.product(asarray(x), asarray(y))))
return xy.T
def put(*a):
"""Produce some output if file run directly"""
import sys
if hasattr(sys.modules['__main__'], '__put_prints'):
sys.stderr.write("".join(map(str, a)) + "\n")
class TestSmokeTests(object):
"""
Smoke tests (with a few asserts) for fitpack routines -- mostly
check that they are runnable
"""
def check_1(self,f=f1,per=0,s=0,a=0,b=2*pi,N=20,at=0,xb=None,xe=None):
if xb is None:
xb = a
if xe is None:
xe = b
x = a+(b-a)*arange(N+1,dtype=float)/float(N) # nodes
x1 = a+(b-a)*arange(1,N,dtype=float)/float(N-1) # middle points of the nodes
v = f(x)
nk = []
def err_est(k, d):
# Assume f has all derivatives < 1
h = 1.0/float(N)
tol = 5 * h**(.75*(k-d))
if s > 0:
tol += 1e5*s
return tol
for k in range(1,6):
tck = splrep(x,v,s=s,per=per,k=k,xe=xe)
if at:
t = tck[0][k:-k]
else:
t = x1
nd = []
for d in range(k+1):
tol = err_est(k, d)
err = norm2(f(t,d)-splev(t,tck,d)) / norm2(f(t,d))
assert_(err < tol, (k, d, err, tol))
nd.append((err, tol))
nk.append(nd)
put("\nf = %s s=S_k(x;t,c) x in [%s, %s] > [%s, %s]" % (f(None),
repr(round(xb,3)),repr(round(xe,3)),
repr(round(a,3)),repr(round(b,3))))
if at:
str = "at knots"
else:
str = "at the middle of nodes"
put(" per=%d s=%s Evaluation %s" % (per,repr(s),str))
put(" k : |f-s|^2 |f'-s'| |f''-.. |f'''-. |f''''- |f'''''")
k = 1
for l in nk:
put(' %d : ' % k)
for r in l:
put(' %.1e %.1e' % r)
put('\n')
k = k+1
def check_2(self,f=f1,per=0,s=0,a=0,b=2*pi,N=20,xb=None,xe=None,
ia=0,ib=2*pi,dx=0.2*pi):
if xb is None:
xb = a
if xe is None:
xe = b
x = a+(b-a)*arange(N+1,dtype=float)/float(N) # nodes
v = f(x)
def err_est(k, d):
# Assume f has all derivatives < 1
h = 1.0/float(N)
tol = 5 * h**(.75*(k-d))
if s > 0:
tol += 1e5*s
return tol
nk = []
for k in range(1,6):
tck = splrep(x,v,s=s,per=per,k=k,xe=xe)
nk.append([splint(ia,ib,tck),spalde(dx,tck)])
put("\nf = %s s=S_k(x;t,c) x in [%s, %s] > [%s, %s]" % (f(None),
repr(round(xb,3)),repr(round(xe,3)),
repr(round(a,3)),repr(round(b,3))))
put(" per=%d s=%s N=%d [a, b] = [%s, %s] dx=%s" % (per,repr(s),N,repr(round(ia,3)),repr(round(ib,3)),repr(round(dx,3))))
put(" k : int(s,[a,b]) Int.Error Rel. error of s^(d)(dx) d = 0, .., k")
k = 1
for r in nk:
if r[0] < 0:
sr = '-'
else:
sr = ' '
put(" %d %s%.8f %.1e " % (k,sr,abs(r[0]),
abs(r[0]-(f(ib,-1)-f(ia,-1)))))
d = 0
for dr in r[1]:
err = abs(1-dr/f(dx,d))
tol = err_est(k, d)
assert_(err < tol, (k, d))
put(" %.1e %.1e" % (err, tol))
d = d+1
put("\n")
k = k+1
def check_3(self,f=f1,per=0,s=0,a=0,b=2*pi,N=20,xb=None,xe=None,
ia=0,ib=2*pi,dx=0.2*pi):
if xb is None:
xb = a
if xe is None:
xe = b
x = a+(b-a)*arange(N+1,dtype=float)/float(N) # nodes
v = f(x)
put(" k : Roots of s(x) approx %s x in [%s,%s]:" %
(f(None),repr(round(a,3)),repr(round(b,3))))
for k in range(1,6):
tck = splrep(x, v, s=s, per=per, k=k, xe=xe)
if k == 3:
roots = sproot(tck)
assert_allclose(splev(roots, tck), 0, atol=1e-10, rtol=1e-10)
assert_allclose(roots, pi*array([1, 2, 3, 4]), rtol=1e-3)
put(' %d : %s' % (k, repr(roots.tolist())))
else:
assert_raises(ValueError, sproot, tck)
def check_4(self,f=f1,per=0,s=0,a=0,b=2*pi,N=20,xb=None,xe=None,
ia=0,ib=2*pi,dx=0.2*pi):
if xb is None:
xb = a
if xe is None:
xe = b
x = a+(b-a)*arange(N+1,dtype=float)/float(N) # nodes
x1 = a + (b-a)*arange(1,N,dtype=float)/float(N-1) # middle points of the nodes
v, _ = f(x),f(x1)
put(" u = %s N = %d" % (repr(round(dx,3)),N))
put(" k : [x(u), %s(x(u))] Error of splprep Error of splrep " % (f(0,None)))
for k in range(1,6):
tckp,u = splprep([x,v],s=s,per=per,k=k,nest=-1)
tck = splrep(x,v,s=s,per=per,k=k)
uv = splev(dx,tckp)
err1 = abs(uv[1]-f(uv[0]))
err2 = abs(splev(uv[0],tck)-f(uv[0]))
assert_(err1 < 1e-2)
assert_(err2 < 1e-2)
put(" %d : %s %.1e %.1e" %
(k,repr([round(z,3) for z in uv]),
err1,
err2))
put("Derivatives of parametric cubic spline at u (first function):")
k = 3
tckp,u = splprep([x,v],s=s,per=per,k=k,nest=-1)
for d in range(1,k+1):
uv = splev(dx,tckp,d)
put(" %s " % (repr(uv[0])))
def check_5(self,f=f2,kx=3,ky=3,xb=0,xe=2*pi,yb=0,ye=2*pi,Nx=20,Ny=20,s=0):
x = xb+(xe-xb)*arange(Nx+1,dtype=float)/float(Nx)
y = yb+(ye-yb)*arange(Ny+1,dtype=float)/float(Ny)
xy = makepairs(x,y)
tck = bisplrep(xy[0],xy[1],f(xy[0],xy[1]),s=s,kx=kx,ky=ky)
tt = [tck[0][kx:-kx],tck[1][ky:-ky]]
t2 = makepairs(tt[0],tt[1])
v1 = bisplev(tt[0],tt[1],tck)
v2 = f2(t2[0],t2[1])
v2.shape = len(tt[0]),len(tt[1])
err = norm2(ravel(v1-v2))
assert_(err < 1e-2, err)
put(err)
def test_smoke_splrep_splev(self):
put("***************** splrep/splev")
self.check_1(s=1e-6)
self.check_1()
self.check_1(at=1)
self.check_1(per=1)
self.check_1(per=1,at=1)
self.check_1(b=1.5*pi)
self.check_1(b=1.5*pi,xe=2*pi,per=1,s=1e-1)
def test_smoke_splint_spalde(self):
put("***************** splint/spalde")
self.check_2()
self.check_2(per=1)
self.check_2(ia=0.2*pi,ib=pi)
self.check_2(ia=0.2*pi,ib=pi,N=50)
def test_smoke_sproot(self):
put("***************** sproot")
self.check_3(a=0.1,b=15)
def test_smoke_splprep_splrep_splev(self):
put("***************** splprep/splrep/splev")
self.check_4()
self.check_4(N=50)
def test_smoke_bisplrep_bisplev(self):
put("***************** bisplev")
self.check_5()
class TestSplev(object):
def test_1d_shape(self):
x = [1,2,3,4,5]
y = [4,5,6,7,8]
tck = splrep(x, y)
z = splev([1], tck)
assert_equal(z.shape, (1,))
z = splev(1, tck)
assert_equal(z.shape, ())
def test_2d_shape(self):
x = [1, 2, 3, 4, 5]
y = [4, 5, 6, 7, 8]
tck = splrep(x, y)
t = np.array([[1.0, 1.5, 2.0, 2.5],
[3.0, 3.5, 4.0, 4.5]])
z = splev(t, tck)
z0 = splev(t[0], tck)
z1 = splev(t[1], tck)
assert_equal(z, np.row_stack((z0, z1)))
def test_extrapolation_modes(self):
# test extrapolation modes
# * if ext=0, return the extrapolated value.
# * if ext=1, return 0
# * if ext=2, raise a ValueError
# * if ext=3, return the boundary value.
x = [1,2,3]
y = [0,2,4]
tck = splrep(x, y, k=1)
rstl = [[-2, 6], [0, 0], None, [0, 4]]
for ext in (0, 1, 3):
assert_array_almost_equal(splev([0, 4], tck, ext=ext), rstl[ext])
assert_raises(ValueError, splev, [0, 4], tck, ext=2)
class TestSplder(object):
def setup_method(self):
# non-uniform grid, just to make it sure
x = np.linspace(0, 1, 100)**3
y = np.sin(20 * x)
self.spl = splrep(x, y)
# double check that knots are non-uniform
assert_(np.diff(self.spl[0]).ptp() > 0)
def test_inverse(self):
# Check that antiderivative + derivative is identity.
for n in range(5):
spl2 = splantider(self.spl, n)
spl3 = splder(spl2, n)
assert_allclose(self.spl[0], spl3[0])
assert_allclose(self.spl[1], spl3[1])
assert_equal(self.spl[2], spl3[2])
def test_splder_vs_splev(self):
# Check derivative vs. FITPACK
for n in range(3+1):
# Also extrapolation!
xx = np.linspace(-1, 2, 2000)
if n == 3:
# ... except that FITPACK extrapolates strangely for
# order 0, so let's not check that.
xx = xx[(xx >= 0) & (xx <= 1)]
dy = splev(xx, self.spl, n)
spl2 = splder(self.spl, n)
dy2 = splev(xx, spl2)
if n == 1:
assert_allclose(dy, dy2, rtol=2e-6)
else:
assert_allclose(dy, dy2)
def test_splantider_vs_splint(self):
# Check antiderivative vs. FITPACK
spl2 = splantider(self.spl)
# no extrapolation, splint assumes function is zero outside
# range
xx = np.linspace(0, 1, 20)
for x1 in xx:
for x2 in xx:
y1 = splint(x1, x2, self.spl)
y2 = splev(x2, spl2) - splev(x1, spl2)
assert_allclose(y1, y2)
def test_order0_diff(self):
assert_raises(ValueError, splder, self.spl, 4)
def test_kink(self):
# Should refuse to differentiate splines with kinks
spl2 = insert(0.5, self.spl, m=2)
splder(spl2, 2) # Should work
assert_raises(ValueError, splder, spl2, 3)
spl2 = insert(0.5, self.spl, m=3)
splder(spl2, 1) # Should work
assert_raises(ValueError, splder, spl2, 2)
spl2 = insert(0.5, self.spl, m=4)
assert_raises(ValueError, splder, spl2, 1)
def test_multidim(self):
# c can have trailing dims
for n in range(3):
t, c, k = self.spl
c2 = np.c_[c, c, c]
c2 = np.dstack((c2, c2))
spl2 = splantider((t, c2, k), n)
spl3 = splder(spl2, n)
assert_allclose(t, spl3[0])
assert_allclose(c2, spl3[1])
assert_equal(k, spl3[2])
class TestBisplrep(object):
def test_overflow(self):
a = np.linspace(0, 1, 620)
b = np.linspace(0, 1, 620)
x, y = np.meshgrid(a, b)
z = np.random.rand(*x.shape)
assert_raises(OverflowError, bisplrep, x.ravel(), y.ravel(), z.ravel(), s=0)
def test_regression_1310(self):
# Regression test for gh-1310
data = np.load(data_file('bug-1310.npz'))['data']
# Shouldn't crash -- the input data triggers work array sizes
# that caused previously some data to not be aligned on
# sizeof(double) boundaries in memory, which made the Fortran
# code to crash when compiled with -O3
bisplrep(data[:,0], data[:,1], data[:,2], kx=3, ky=3, s=0,
full_output=True)
def test_dblint():
# Basic test to see it runs and gives the correct result on a trivial
# problem. Note that `dblint` is not exposed in the interpolate namespace.
x = np.linspace(0, 1)
y = np.linspace(0, 1)
xx, yy = np.meshgrid(x, y)
rect = interpolate.RectBivariateSpline(x, y, 4 * xx * yy)
tck = list(rect.tck)
tck.extend(rect.degrees)
assert_almost_equal(dblint(0, 1, 0, 1, tck), 1)
assert_almost_equal(dblint(0, 0.5, 0, 1, tck), 0.25)
assert_almost_equal(dblint(0.5, 1, 0, 1, tck), 0.75)
assert_almost_equal(dblint(-100, 100, -100, 100, tck), 1)
def test_splev_der_k():
# regression test for gh-2188: splev(x, tck, der=k) gives garbage or crashes
# for x outside of knot range
# test case from gh-2188
tck = (np.array([0., 0., 2.5, 2.5]),
np.array([-1.56679978, 2.43995873, 0., 0.]),
1)
t, c, k = tck
x = np.array([-3, 0, 2.5, 3])
# an explicit form of the linear spline
assert_allclose(splev(x, tck), c[0] + (c[1] - c[0]) * x/t[2])
assert_allclose(splev(x, tck, 1), (c[1]-c[0]) / t[2])
# now check a random spline vs splder
np.random.seed(1234)
x = np.sort(np.random.random(30))
y = np.random.random(30)
t, c, k = splrep(x, y)
x = [t[0] - 1., t[-1] + 1.]
tck2 = splder((t, c, k), k)
assert_allclose(splev(x, (t, c, k), k), splev(x, tck2))
def test_bisplev_integer_overflow():
np.random.seed(1)
x = np.linspace(0, 1, 11)
y = x
z = np.random.randn(11, 11).ravel()
kx = 1
ky = 1
nx, tx, ny, ty, c, fp, ier = regrid_smth(
x, y, z, None, None, None, None, kx=kx, ky=ky, s=0.0)
tck = (tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)], kx, ky)
xp = np.zeros([2621440])
yp = np.zeros([2621440])
assert_raises((RuntimeError, MemoryError), bisplev, xp, yp, tck)
|
dkubiak789/OpenUpgrade
|
refs/heads/8.0
|
openerp/addons/base/module/wizard/base_export_language.py
|
178
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2004-2012 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
import cStringIO
from openerp import tools
from openerp.osv import fields,osv
from openerp.tools.translate import _
from openerp.tools.misc import get_iso_codes
NEW_LANG_KEY = '__new__'
class base_language_export(osv.osv_memory):
_name = "base.language.export"
def _get_languages(self, cr, uid, context):
lang_obj = self.pool.get('res.lang')
ids = lang_obj.search(cr, uid, [('translatable', '=', True)])
langs = lang_obj.browse(cr, uid, ids)
return [(NEW_LANG_KEY, _('New Language (Empty translation template)'))] + [(lang.code, lang.name) for lang in langs]
_columns = {
'name': fields.char('File Name', readonly=True),
'lang': fields.selection(_get_languages, 'Language', required=True),
'format': fields.selection([('csv','CSV File'),
('po','PO File'),
('tgz', 'TGZ Archive')], 'File Format', required=True),
'modules': fields.many2many('ir.module.module', 'rel_modules_langexport', 'wiz_id', 'module_id', 'Modules To Export', domain=[('state','=','installed')]),
'data': fields.binary('File', readonly=True),
'state': fields.selection([('choose', 'choose'), # choose language
('get', 'get')]) # get the file
}
_defaults = {
'state': 'choose',
'name': 'lang.tar.gz',
'lang': NEW_LANG_KEY,
'format': 'csv',
}
def act_getfile(self, cr, uid, ids, context=None):
this = self.browse(cr, uid, ids)[0]
lang = this.lang if this.lang != NEW_LANG_KEY else False
mods = map(lambda m: m.name, this.modules) or ['all']
mods.sort()
buf = cStringIO.StringIO()
tools.trans_export(lang, mods, buf, this.format, cr)
filename = 'new'
if lang:
filename = get_iso_codes(lang)
elif len(mods) == 1:
filename = mods[0]
this.name = "%s.%s" % (filename, this.format)
out = base64.encodestring(buf.getvalue())
buf.close()
self.write(cr, uid, ids, {'state': 'get',
'data': out,
'name':this.name}, context=context)
return {
'type': 'ir.actions.act_window',
'res_model': 'base.language.export',
'view_mode': 'form',
'view_type': 'form',
'res_id': this.id,
'views': [(False, 'form')],
'target': 'new',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
instantchow/home-assistant
|
refs/heads/master
|
tests/components/test_api.py
|
4
|
"""The tests for the Home Assistant HTTP component."""
# pylint: disable=protected-access,too-many-public-methods
from contextlib import closing
import json
import tempfile
import unittest
from unittest.mock import patch
import requests
from homeassistant import bootstrap, const
import homeassistant.core as ha
import homeassistant.components.http as http
from tests.common import get_test_instance_port, get_test_home_assistant
API_PASSWORD = "test1234"
SERVER_PORT = get_test_instance_port()
HTTP_BASE_URL = "http://127.0.0.1:{}".format(SERVER_PORT)
HA_HEADERS = {const.HTTP_HEADER_HA_AUTH: API_PASSWORD}
hass = None
def _url(path=""):
"""Helper method to generate URLs."""
return HTTP_BASE_URL + path
def setUpModule(): # pylint: disable=invalid-name
"""Initialize a Home Assistant server."""
global hass
hass = get_test_home_assistant()
hass.bus.listen('test_event', lambda _: _)
hass.states.set('test.test', 'a_state')
bootstrap.setup_component(
hass, http.DOMAIN,
{http.DOMAIN: {http.CONF_API_PASSWORD: API_PASSWORD,
http.CONF_SERVER_PORT: SERVER_PORT}})
bootstrap.setup_component(hass, 'api')
hass.start()
def tearDownModule(): # pylint: disable=invalid-name
"""Stop the Home Assistant server."""
hass.stop()
class TestAPI(unittest.TestCase):
"""Test the API."""
def tearDown(self):
"""Stop everything that was started."""
hass.pool.block_till_done()
# TODO move back to http component and test with use_auth.
def test_access_denied_without_password(self):
"""Test access without password."""
req = requests.get(_url(const.URL_API))
self.assertEqual(401, req.status_code)
def test_access_denied_with_wrong_password(self):
"""Test ascces with wrong password."""
req = requests.get(
_url(const.URL_API),
headers={const.HTTP_HEADER_HA_AUTH: 'wrongpassword'})
self.assertEqual(401, req.status_code)
def test_access_with_password_in_url(self):
"""Test access with password in URL."""
req = requests.get(
"{}?api_password={}".format(_url(const.URL_API), API_PASSWORD))
self.assertEqual(200, req.status_code)
def test_access_via_session(self):
"""Test access wia session."""
session = requests.Session()
req = session.get(_url(const.URL_API), headers=HA_HEADERS)
self.assertEqual(200, req.status_code)
req = session.get(_url(const.URL_API))
self.assertEqual(200, req.status_code)
def test_api_list_state_entities(self):
"""Test if the debug interface allows us to list state entities."""
req = requests.get(_url(const.URL_API_STATES),
headers=HA_HEADERS)
remote_data = [ha.State.from_dict(item) for item in req.json()]
self.assertEqual(hass.states.all(), remote_data)
def test_api_get_state(self):
"""Test if the debug interface allows us to get a state."""
req = requests.get(
_url(const.URL_API_STATES_ENTITY.format("test.test")),
headers=HA_HEADERS)
data = ha.State.from_dict(req.json())
state = hass.states.get("test.test")
self.assertEqual(state.state, data.state)
self.assertEqual(state.last_changed, data.last_changed)
self.assertEqual(state.attributes, data.attributes)
def test_api_get_non_existing_state(self):
"""Test if the debug interface allows us to get a state."""
req = requests.get(
_url(const.URL_API_STATES_ENTITY.format("does_not_exist")),
headers=HA_HEADERS)
self.assertEqual(404, req.status_code)
def test_api_state_change(self):
"""Test if we can change the state of an entity that exists."""
hass.states.set("test.test", "not_to_be_set")
requests.post(_url(const.URL_API_STATES_ENTITY.format("test.test")),
data=json.dumps({"state": "debug_state_change2"}),
headers=HA_HEADERS)
self.assertEqual("debug_state_change2",
hass.states.get("test.test").state)
# pylint: disable=invalid-name
def test_api_state_change_of_non_existing_entity(self):
"""Test if changing a state of a non existing entity is possible."""
new_state = "debug_state_change"
req = requests.post(
_url(const.URL_API_STATES_ENTITY.format(
"test_entity.that_does_not_exist")),
data=json.dumps({'state': new_state}),
headers=HA_HEADERS)
cur_state = (hass.states.
get("test_entity.that_does_not_exist").state)
self.assertEqual(201, req.status_code)
self.assertEqual(cur_state, new_state)
# pylint: disable=invalid-name
def test_api_state_change_with_bad_data(self):
"""Test if API sends appropriate error if we omit state."""
req = requests.post(
_url(const.URL_API_STATES_ENTITY.format(
"test_entity.that_does_not_exist")),
data=json.dumps({}),
headers=HA_HEADERS)
self.assertEqual(400, req.status_code)
# pylint: disable=invalid-name
def test_api_fire_event_with_no_data(self):
"""Test if the API allows us to fire an event."""
test_value = []
def listener(event):
"""Helper method that will verify our event got called."""
test_value.append(1)
hass.bus.listen_once("test.event_no_data", listener)
requests.post(
_url(const.URL_API_EVENTS_EVENT.format("test.event_no_data")),
headers=HA_HEADERS)
hass.pool.block_till_done()
self.assertEqual(1, len(test_value))
# pylint: disable=invalid-name
def test_api_fire_event_with_data(self):
"""Test if the API allows us to fire an event."""
test_value = []
def listener(event):
"""Helper method that will verify that our event got called.
Also test if our data came through.
"""
if "test" in event.data:
test_value.append(1)
hass.bus.listen_once("test_event_with_data", listener)
requests.post(
_url(const.URL_API_EVENTS_EVENT.format("test_event_with_data")),
data=json.dumps({"test": 1}),
headers=HA_HEADERS)
hass.pool.block_till_done()
self.assertEqual(1, len(test_value))
# pylint: disable=invalid-name
def test_api_fire_event_with_invalid_json(self):
"""Test if the API allows us to fire an event."""
test_value = []
def listener(event):
"""Helper method that will verify our event got called."""
test_value.append(1)
hass.bus.listen_once("test_event_bad_data", listener)
req = requests.post(
_url(const.URL_API_EVENTS_EVENT.format("test_event_bad_data")),
data=json.dumps('not an object'),
headers=HA_HEADERS)
hass.pool.block_till_done()
self.assertEqual(422, req.status_code)
self.assertEqual(0, len(test_value))
# Try now with valid but unusable JSON
req = requests.post(
_url(const.URL_API_EVENTS_EVENT.format("test_event_bad_data")),
data=json.dumps([1, 2, 3]),
headers=HA_HEADERS)
hass.pool.block_till_done()
self.assertEqual(422, req.status_code)
self.assertEqual(0, len(test_value))
def test_api_get_config(self):
"""Test the return of the configuration."""
req = requests.get(_url(const.URL_API_CONFIG),
headers=HA_HEADERS)
self.assertEqual(hass.config.as_dict(), req.json())
def test_api_get_components(self):
"""Test the return of the components."""
req = requests.get(_url(const.URL_API_COMPONENTS),
headers=HA_HEADERS)
self.assertEqual(hass.config.components, req.json())
def test_api_get_error_log(self):
"""Test the return of the error log."""
test_content = 'Test String'
with tempfile.NamedTemporaryFile() as log:
log.write(test_content.encode('utf-8'))
log.flush()
with patch.object(hass.config, 'path', return_value=log.name):
req = requests.get(_url(const.URL_API_ERROR_LOG),
headers=HA_HEADERS)
self.assertEqual(test_content, req.text)
self.assertIsNone(req.headers.get('expires'))
def test_api_get_event_listeners(self):
"""Test if we can get the list of events being listened for."""
req = requests.get(_url(const.URL_API_EVENTS),
headers=HA_HEADERS)
local = hass.bus.listeners
for event in req.json():
self.assertEqual(event["listener_count"],
local.pop(event["event"]))
self.assertEqual(0, len(local))
def test_api_get_services(self):
"""Test if we can get a dict describing current services."""
req = requests.get(_url(const.URL_API_SERVICES),
headers=HA_HEADERS)
local_services = hass.services.services
for serv_domain in req.json():
local = local_services.pop(serv_domain["domain"])
self.assertEqual(local, serv_domain["services"])
def test_api_call_service_no_data(self):
"""Test if the API allows us to call a service."""
test_value = []
def listener(service_call):
"""Helper method that will verify that our service got called."""
test_value.append(1)
hass.services.register("test_domain", "test_service", listener)
requests.post(
_url(const.URL_API_SERVICES_SERVICE.format(
"test_domain", "test_service")),
headers=HA_HEADERS)
hass.pool.block_till_done()
self.assertEqual(1, len(test_value))
def test_api_call_service_with_data(self):
"""Test if the API allows us to call a service."""
test_value = []
def listener(service_call):
"""Helper method that will verify that our service got called.
Also test if our data came through.
"""
if "test" in service_call.data:
test_value.append(1)
hass.services.register("test_domain", "test_service", listener)
requests.post(
_url(const.URL_API_SERVICES_SERVICE.format(
"test_domain", "test_service")),
data=json.dumps({"test": 1}),
headers=HA_HEADERS)
hass.pool.block_till_done()
self.assertEqual(1, len(test_value))
def test_api_template(self):
"""Test the template API."""
hass.states.set('sensor.temperature', 10)
req = requests.post(
_url(const.URL_API_TEMPLATE),
data=json.dumps({"template":
'{{ states.sensor.temperature.state }}'}),
headers=HA_HEADERS)
self.assertEqual('10', req.text)
def test_api_template_error(self):
"""Test the template API."""
hass.states.set('sensor.temperature', 10)
req = requests.post(
_url(const.URL_API_TEMPLATE),
data=json.dumps({"template":
'{{ states.sensor.temperature.state'}),
headers=HA_HEADERS)
self.assertEqual(422, req.status_code)
def test_api_event_forward(self):
"""Test setting up event forwarding."""
req = requests.post(
_url(const.URL_API_EVENT_FORWARD),
headers=HA_HEADERS)
self.assertEqual(400, req.status_code)
req = requests.post(
_url(const.URL_API_EVENT_FORWARD),
data=json.dumps({'host': '127.0.0.1'}),
headers=HA_HEADERS)
self.assertEqual(400, req.status_code)
req = requests.post(
_url(const.URL_API_EVENT_FORWARD),
data=json.dumps({'api_password': 'bla-di-bla'}),
headers=HA_HEADERS)
self.assertEqual(400, req.status_code)
req = requests.post(
_url(const.URL_API_EVENT_FORWARD),
data=json.dumps({
'api_password': 'bla-di-bla',
'host': '127.0.0.1',
'port': 'abcd'
}),
headers=HA_HEADERS)
self.assertEqual(422, req.status_code)
req = requests.post(
_url(const.URL_API_EVENT_FORWARD),
data=json.dumps({
'api_password': 'bla-di-bla',
'host': '127.0.0.1',
'port': get_test_instance_port()
}),
headers=HA_HEADERS)
self.assertEqual(422, req.status_code)
# Setup a real one
req = requests.post(
_url(const.URL_API_EVENT_FORWARD),
data=json.dumps({
'api_password': API_PASSWORD,
'host': '127.0.0.1',
'port': SERVER_PORT
}),
headers=HA_HEADERS)
self.assertEqual(200, req.status_code)
# Delete it again..
req = requests.delete(
_url(const.URL_API_EVENT_FORWARD),
data=json.dumps({}),
headers=HA_HEADERS)
self.assertEqual(400, req.status_code)
req = requests.delete(
_url(const.URL_API_EVENT_FORWARD),
data=json.dumps({
'host': '127.0.0.1',
'port': 'abcd'
}),
headers=HA_HEADERS)
self.assertEqual(422, req.status_code)
req = requests.delete(
_url(const.URL_API_EVENT_FORWARD),
data=json.dumps({
'host': '127.0.0.1',
'port': SERVER_PORT
}),
headers=HA_HEADERS)
self.assertEqual(200, req.status_code)
def test_stream(self):
"""Test the stream."""
listen_count = self._listen_count()
with closing(requests.get(_url(const.URL_API_STREAM),
stream=True, headers=HA_HEADERS)) as req:
data = self._stream_next_event(req)
self.assertEqual('ping', data)
self.assertEqual(listen_count + 1, self._listen_count())
hass.bus.fire('test_event')
hass.pool.block_till_done()
data = self._stream_next_event(req)
self.assertEqual('test_event', data['event_type'])
def test_stream_with_restricted(self):
"""Test the stream with restrictions."""
listen_count = self._listen_count()
with closing(requests.get(_url(const.URL_API_STREAM),
data=json.dumps({
'restrict': 'test_event1,test_event3'}),
stream=True, headers=HA_HEADERS)) as req:
data = self._stream_next_event(req)
self.assertEqual('ping', data)
self.assertEqual(listen_count + 2, self._listen_count())
hass.bus.fire('test_event1')
hass.pool.block_till_done()
hass.bus.fire('test_event2')
hass.pool.block_till_done()
hass.bus.fire('test_event3')
hass.pool.block_till_done()
data = self._stream_next_event(req)
self.assertEqual('test_event1', data['event_type'])
data = self._stream_next_event(req)
self.assertEqual('test_event3', data['event_type'])
def _stream_next_event(self, stream):
"""Test the stream for next event."""
data = b''
last_new_line = False
for dat in stream.iter_content(1):
if dat == b'\n' and last_new_line:
break
data += dat
last_new_line = dat == b'\n'
conv = data.decode('utf-8').strip()[6:]
return conv if conv == 'ping' else json.loads(conv)
def _listen_count(self):
"""Return number of event listeners."""
return sum(hass.bus.listeners.values())
|
rchav/vinerack
|
refs/heads/master
|
saleor/dashboard/discount/__init__.py
|
12133432
| |
memtoko/django
|
refs/heads/master
|
tests/signed_cookies_tests/__init__.py
|
12133432
| |
liorvh/golismero
|
refs/heads/master
|
plugins/report/__init__.py
|
12133432
| |
collingreen/yaib_ludumdare
|
refs/heads/master
|
modules/__init__.py
|
12133432
| |
simsong/grr-insider
|
refs/heads/master
|
lib/aff4_objects/client_stats.py
|
3
|
#!/usr/bin/env python
# Copyright 2012 Google Inc. All Rights Reserved.
"""AFF4 object representing client stats."""
from grr.lib import aff4
from grr.lib import rdfvalue
from grr.lib.aff4_objects import standard
class ClientStats(standard.VFSDirectory):
"""A container for all client statistics."""
class SchemaCls(standard.VFSDirectory.SchemaCls):
STATS = aff4.Attribute("aff4:stats", rdfvalue.ClientStats,
"Client Stats.", "Client stats")
|
oliverlee/sympy
|
refs/heads/master
|
sympy/matrices/expressions/tests/test_dotproduct.py
|
12
|
from sympy.matrices import Matrix
from sympy.matrices.expressions.dotproduct import DotProduct
from sympy.utilities.pytest import raises
A = Matrix(3, 1, [1, 2, 3])
B = Matrix(3, 1, [1, 3, 5])
C = Matrix(4, 1, [1, 2, 4, 5])
def test_docproduct():
assert DotProduct(A, B).doit() == 22
raises(TypeError, lambda: DotProduct(A.T, B).doit())
raises(TypeError, lambda: DotProduct(A, B.T).doit())
raises(TypeError, lambda: DotProduct(B, C).doit())
assert DotProduct(A.T, B.T).doit() == 22
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.