repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
slashk/goldstone-server
|
refs/heads/master
|
goldstone/core/pagination.py
|
2
|
"""Default pagination."""
# Copyright 2015 Solinea, Inc.
#
# Licensed under the Solinea Software License Agreement (goldstone),
# Version 1.0 (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at:
#
# http://www.solinea.com/goldstone/LICENSE.pdf
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rest_framework.pagination import PageNumberPagination
class Pagination(PageNumberPagination):
"""Perform the standard pagination, and allow the caller to specify the
page size."""
page_size_query_param = "page_size"
|
liangjiaxing/sympy
|
refs/heads/master
|
sympy/strategies/core.py
|
94
|
""" Generic SymPy-Independent Strategies """
from __future__ import print_function, division
from sympy.core.compatibility import get_function_name
identity = lambda x: x
def exhaust(rule):
""" Apply a rule repeatedly until it has no effect """
def exhaustive_rl(expr):
new, old = rule(expr), expr
while(new != old):
new, old = rule(new), new
return new
return exhaustive_rl
def memoize(rule):
""" Memoized version of a rule """
cache = {}
def memoized_rl(expr):
if expr in cache:
return cache[expr]
else:
result = rule(expr)
cache[expr] = result
return result
return memoized_rl
def condition(cond, rule):
""" Only apply rule if condition is true """
def conditioned_rl(expr):
if cond(expr):
return rule(expr)
else:
return expr
return conditioned_rl
def chain(*rules):
"""
Compose a sequence of rules so that they apply to the expr sequentially
"""
def chain_rl(expr):
for rule in rules:
expr = rule(expr)
return expr
return chain_rl
def debug(rule, file=None):
""" Print out before and after expressions each time rule is used """
if file is None:
from sys import stdout
file = stdout
def debug_rl(*args, **kwargs):
expr = args[0]
result = rule(*args, **kwargs)
if result != expr:
file.write("Rule: %s\n" % get_function_name(rule))
file.write("In: %s\nOut: %s\n\n"%(expr, result))
return result
return debug_rl
def null_safe(rule):
""" Return original expr if rule returns None """
def null_safe_rl(expr):
result = rule(expr)
if result is None:
return expr
else:
return result
return null_safe_rl
def tryit(rule):
""" Return original expr if rule raises exception """
def try_rl(expr):
try:
return rule(expr)
except Exception:
return expr
return try_rl
def do_one(*rules):
""" Try each of the rules until one works. Then stop. """
def do_one_rl(expr):
for rl in rules:
result = rl(expr)
if result != expr:
return result
return expr
return do_one_rl
def switch(key, ruledict):
""" Select a rule based on the result of key called on the function """
def switch_rl(expr):
rl = ruledict.get(key(expr), identity)
return rl(expr)
return switch_rl
identity = lambda x: x
def minimize(*rules, **kwargs):
""" Select result of rules that minimizes objective
>>> from sympy.strategies import minimize
>>> inc = lambda x: x + 1
>>> dec = lambda x: x - 1
>>> rl = minimize(inc, dec)
>>> rl(4)
3
>>> rl = minimize(inc, dec, objective=lambda x: -x) # maximize
>>> rl(4)
5
"""
objective = kwargs.get('objective', identity)
def minrule(expr):
return min([rule(expr) for rule in rules], key=objective)
return minrule
|
Aristocles/CouchPotatoServer
|
refs/heads/master
|
libs/requests/packages/urllib3/connectionpool.py
|
316
|
# urllib3/connectionpool.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import sys
import errno
import logging
from socket import error as SocketError, timeout as SocketTimeout
import socket
try: # Python 3
from queue import LifoQueue, Empty, Full
except ImportError:
from Queue import LifoQueue, Empty, Full
import Queue as _ # Platform-specific: Windows
from .exceptions import (
ClosedPoolError,
ConnectionError,
ConnectTimeoutError,
EmptyPoolError,
HostChangedError,
LocationParseError,
MaxRetryError,
SSLError,
TimeoutError,
ReadTimeoutError,
ProxyError,
)
from .packages.ssl_match_hostname import CertificateError
from .packages import six
from .connection import (
port_by_scheme,
DummyConnection,
HTTPConnection, HTTPSConnection, VerifiedHTTPSConnection,
HTTPException, BaseSSLError,
)
from .request import RequestMethods
from .response import HTTPResponse
from .util import (
get_host,
is_connection_dropped,
Timeout,
)
xrange = six.moves.xrange
log = logging.getLogger(__name__)
_Default = object()
## Pool objects
class ConnectionPool(object):
"""
Base class for all connection pools, such as
:class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.
"""
scheme = None
QueueCls = LifoQueue
def __init__(self, host, port=None):
if host is None:
raise LocationParseError(host)
# httplib doesn't like it when we include brackets in ipv6 addresses
host = host.strip('[]')
self.host = host
self.port = port
def __str__(self):
return '%s(host=%r, port=%r)' % (type(self).__name__,
self.host, self.port)
# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252
_blocking_errnos = set([errno.EAGAIN, errno.EWOULDBLOCK])
class HTTPConnectionPool(ConnectionPool, RequestMethods):
"""
Thread-safe connection pool for one host.
:param host:
Host used for this HTTP Connection (e.g. "localhost"), passed into
:class:`httplib.HTTPConnection`.
:param port:
Port used for this HTTP Connection (None is equivalent to 80), passed
into :class:`httplib.HTTPConnection`.
:param strict:
Causes BadStatusLine to be raised if the status line can't be parsed
as a valid HTTP/1.0 or 1.1 status line, passed into
:class:`httplib.HTTPConnection`.
.. note::
Only works in Python 2. This parameter is ignored in Python 3.
:param timeout:
Socket timeout in seconds for each individual connection. This can
be a float or integer, which sets the timeout for the HTTP request,
or an instance of :class:`urllib3.util.Timeout` which gives you more
fine-grained control over request timeouts. After the constructor has
been parsed, this is always a `urllib3.util.Timeout` object.
:param maxsize:
Number of connections to save that can be reused. More than 1 is useful
in multithreaded situations. If ``block`` is set to false, more
connections will be created but they will not be saved once they've
been used.
:param block:
If set to True, no more than ``maxsize`` connections will be used at
a time. When no free connections are available, the call will block
until a connection has been released. This is a useful side effect for
particular multithreaded situations where one does not want to use more
than maxsize connections per host to prevent flooding.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param _proxy:
Parsed proxy URL, should not be used directly, instead, see
:class:`urllib3.connectionpool.ProxyManager`"
:param _proxy_headers:
A dictionary with proxy headers, should not be used directly,
instead, see :class:`urllib3.connectionpool.ProxyManager`"
"""
scheme = 'http'
ConnectionCls = HTTPConnection
def __init__(self, host, port=None, strict=False,
timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False,
headers=None, _proxy=None, _proxy_headers=None, **conn_kw):
ConnectionPool.__init__(self, host, port)
RequestMethods.__init__(self, headers)
self.strict = strict
# This is for backwards compatibility and can be removed once a timeout
# can only be set to a Timeout object
if not isinstance(timeout, Timeout):
timeout = Timeout.from_float(timeout)
self.timeout = timeout
self.pool = self.QueueCls(maxsize)
self.block = block
self.proxy = _proxy
self.proxy_headers = _proxy_headers or {}
# Fill the queue up so that doing get() on it will block properly
for _ in xrange(maxsize):
self.pool.put(None)
# These are mostly for testing and debugging purposes.
self.num_connections = 0
self.num_requests = 0
if sys.version_info < (2, 7): # Python 2.6 and older
conn_kw.pop('source_address', None)
self.conn_kw = conn_kw
def _new_conn(self):
"""
Return a fresh :class:`HTTPConnection`.
"""
self.num_connections += 1
log.info("Starting new HTTP connection (%d): %s" %
(self.num_connections, self.host))
conn = self.ConnectionCls(host=self.host, port=self.port,
timeout=self.timeout.connect_timeout,
strict=self.strict, **self.conn_kw)
if self.proxy is not None:
# Enable Nagle's algorithm for proxies, to avoid packet
# fragmentation.
conn.tcp_nodelay = 0
return conn
def _get_conn(self, timeout=None):
"""
Get a connection. Will return a pooled connection if one is available.
If no connections are available and :prop:`.block` is ``False``, then a
fresh connection is returned.
:param timeout:
Seconds to wait before giving up and raising
:class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
:prop:`.block` is ``True``.
"""
conn = None
try:
conn = self.pool.get(block=self.block, timeout=timeout)
except AttributeError: # self.pool is None
raise ClosedPoolError(self, "Pool is closed.")
except Empty:
if self.block:
raise EmptyPoolError(self,
"Pool reached maximum size and no more "
"connections are allowed.")
pass # Oh well, we'll create a new connection then
# If this is a persistent connection, check if it got disconnected
if conn and is_connection_dropped(conn):
log.info("Resetting dropped connection: %s" % self.host)
conn.close()
return conn or self._new_conn()
def _put_conn(self, conn):
"""
Put a connection back into the pool.
:param conn:
Connection object for the current host and port as returned by
:meth:`._new_conn` or :meth:`._get_conn`.
If the pool is already full, the connection is closed and discarded
because we exceeded maxsize. If connections are discarded frequently,
then maxsize should be increased.
If the pool is closed, then the connection will be closed and discarded.
"""
try:
self.pool.put(conn, block=False)
return # Everything is dandy, done.
except AttributeError:
# self.pool is None.
pass
except Full:
# This should never happen if self.block == True
log.warning(
"Connection pool is full, discarding connection: %s" %
self.host)
# Connection never got put back into the pool, close it.
if conn:
conn.close()
def _get_timeout(self, timeout):
""" Helper that always returns a :class:`urllib3.util.Timeout` """
if timeout is _Default:
return self.timeout.clone()
if isinstance(timeout, Timeout):
return timeout.clone()
else:
# User passed us an int/float. This is for backwards compatibility,
# can be removed later
return Timeout.from_float(timeout)
def _make_request(self, conn, method, url, timeout=_Default,
**httplib_request_kw):
"""
Perform a request on a given urllib connection object taken from our
pool.
:param conn:
a connection from one of our connection pools
:param timeout:
Socket timeout in seconds for the request. This can be a
float or integer, which will set the same timeout value for
the socket connect and the socket read, or an instance of
:class:`urllib3.util.Timeout`, which gives you more fine-grained
control over your timeouts.
"""
self.num_requests += 1
timeout_obj = self._get_timeout(timeout)
try:
timeout_obj.start_connect()
conn.timeout = timeout_obj.connect_timeout
# conn.request() calls httplib.*.request, not the method in
# urllib3.request. It also calls makefile (recv) on the socket.
conn.request(method, url, **httplib_request_kw)
except SocketTimeout:
raise ConnectTimeoutError(
self, "Connection to %s timed out. (connect timeout=%s)" %
(self.host, timeout_obj.connect_timeout))
# Reset the timeout for the recv() on the socket
read_timeout = timeout_obj.read_timeout
# App Engine doesn't have a sock attr
if hasattr(conn, 'sock'):
# In Python 3 socket.py will catch EAGAIN and return None when you
# try and read into the file pointer created by http.client, which
# instead raises a BadStatusLine exception. Instead of catching
# the exception and assuming all BadStatusLine exceptions are read
# timeouts, check for a zero timeout before making the request.
if read_timeout == 0:
raise ReadTimeoutError(
self, url,
"Read timed out. (read timeout=%s)" % read_timeout)
if read_timeout is Timeout.DEFAULT_TIMEOUT:
conn.sock.settimeout(socket.getdefaulttimeout())
else: # None or a value
conn.sock.settimeout(read_timeout)
# Receive the response from the server
try:
try: # Python 2.7+, use buffering of HTTP responses
httplib_response = conn.getresponse(buffering=True)
except TypeError: # Python 2.6 and older
httplib_response = conn.getresponse()
except SocketTimeout:
raise ReadTimeoutError(
self, url, "Read timed out. (read timeout=%s)" % read_timeout)
except BaseSSLError as e:
# Catch possible read timeouts thrown as SSL errors. If not the
# case, rethrow the original. We need to do this because of:
# http://bugs.python.org/issue10272
if 'timed out' in str(e) or \
'did not complete (read)' in str(e): # Python 2.6
raise ReadTimeoutError(self, url, "Read timed out.")
raise
except SocketError as e: # Platform-specific: Python 2
# See the above comment about EAGAIN in Python 3. In Python 2 we
# have to specifically catch it and throw the timeout error
if e.errno in _blocking_errnos:
raise ReadTimeoutError(
self, url,
"Read timed out. (read timeout=%s)" % read_timeout)
raise
# AppEngine doesn't have a version attr.
http_version = getattr(conn, '_http_vsn_str', 'HTTP/?')
log.debug("\"%s %s %s\" %s %s" % (method, url, http_version,
httplib_response.status,
httplib_response.length))
return httplib_response
def close(self):
"""
Close all pooled connections and disable the pool.
"""
# Disable access to the pool
old_pool, self.pool = self.pool, None
try:
while True:
conn = old_pool.get(block=False)
if conn:
conn.close()
except Empty:
pass # Done.
def is_same_host(self, url):
"""
Check if the given ``url`` is a member of the same host as this
connection pool.
"""
if url.startswith('/'):
return True
# TODO: Add optional support for socket.gethostbyname checking.
scheme, host, port = get_host(url)
# Use explicit default port for comparison when none is given
if self.port and not port:
port = port_by_scheme.get(scheme)
elif not self.port and port == port_by_scheme.get(scheme):
port = None
return (scheme, host, port) == (self.scheme, self.host, self.port)
def urlopen(self, method, url, body=None, headers=None, retries=3,
redirect=True, assert_same_host=True, timeout=_Default,
pool_timeout=None, release_conn=None, **response_kw):
"""
Get a connection from the pool and perform an HTTP request. This is the
lowest level call for making a request, so you'll need to specify all
the raw details.
.. note::
More commonly, it's appropriate to use a convenience method provided
by :class:`.RequestMethods`, such as :meth:`request`.
.. note::
`release_conn` will only behave as expected if
`preload_content=False` because we want to make
`preload_content=False` the default behaviour someday soon without
breaking backwards compatibility.
:param method:
HTTP request method (such as GET, POST, PUT, etc.)
:param body:
Data to send in the request body (useful for creating
POST requests, see HTTPConnectionPool.post_url for
more convenience).
:param headers:
Dictionary of custom headers to send, such as User-Agent,
If-None-Match, etc. If None, pool headers are used. If provided,
these headers completely replace any pool-specific headers.
:param retries:
Number of retries to allow before raising a MaxRetryError exception.
If `False`, then retries are disabled and any exception is raised
immediately.
:param redirect:
If True, automatically handle redirects (status codes 301, 302,
303, 307, 308). Each redirect counts as a retry. Disabling retries
will disable redirect, too.
:param assert_same_host:
If ``True``, will make sure that the host of the pool requests is
consistent else will raise HostChangedError. When False, you can
use the pool on an HTTP proxy and request foreign hosts.
:param timeout:
If specified, overrides the default timeout for this one
request. It may be a float (in seconds) or an instance of
:class:`urllib3.util.Timeout`.
:param pool_timeout:
If set and the pool is set to block=True, then this method will
block for ``pool_timeout`` seconds and raise EmptyPoolError if no
connection is available within the time period.
:param release_conn:
If False, then the urlopen call will not release the connection
back into the pool once a response is received (but will release if
you read the entire contents of the response such as when
`preload_content=True`). This is useful if you're not preloading
the response's content immediately. You will need to call
``r.release_conn()`` on the response ``r`` to return the connection
back into the pool. If None, it takes the value of
``response_kw.get('preload_content', True)``.
:param \**response_kw:
Additional parameters are passed to
:meth:`urllib3.response.HTTPResponse.from_httplib`
"""
if headers is None:
headers = self.headers
if retries < 0 and retries is not False:
raise MaxRetryError(self, url)
if release_conn is None:
release_conn = response_kw.get('preload_content', True)
# Check host
if assert_same_host and not self.is_same_host(url):
raise HostChangedError(self, url, retries - 1)
conn = None
# Merge the proxy headers. Only do this in HTTP. We have to copy the
# headers dict so we can safely change it without those changes being
# reflected in anyone else's copy.
if self.scheme == 'http':
headers = headers.copy()
headers.update(self.proxy_headers)
# Must keep the exception bound to a separate variable or else Python 3
# complains about UnboundLocalError.
err = None
try:
# Request a connection from the queue
conn = self._get_conn(timeout=pool_timeout)
# Make the request on the httplib connection object
httplib_response = self._make_request(conn, method, url,
timeout=timeout,
body=body, headers=headers)
# If we're going to release the connection in ``finally:``, then
# the request doesn't need to know about the connection. Otherwise
# it will also try to release it and we'll have a double-release
# mess.
response_conn = not release_conn and conn
# Import httplib's response into our own wrapper object
response = HTTPResponse.from_httplib(httplib_response,
pool=self,
connection=response_conn,
**response_kw)
# else:
# The connection will be put back into the pool when
# ``response.release_conn()`` is called (implicitly by
# ``response.read()``)
except Empty:
# Timed out by queue.
raise EmptyPoolError(self, "No pool connections are available.")
except (BaseSSLError, CertificateError) as e:
# Release connection unconditionally because there is no way to
# close it externally in case of exception.
release_conn = True
raise SSLError(e)
except (TimeoutError, HTTPException, SocketError) as e:
if conn:
# Discard the connection for these exceptions. It will be
# be replaced during the next _get_conn() call.
conn.close()
conn = None
if not retries:
if isinstance(e, TimeoutError):
# TimeoutError is exempt from MaxRetryError-wrapping.
# FIXME: ... Not sure why. Add a reason here.
raise
# Wrap unexpected exceptions with the most appropriate
# module-level exception and re-raise.
if isinstance(e, SocketError) and self.proxy:
raise ProxyError('Cannot connect to proxy.', e)
if retries is False:
raise ConnectionError('Connection failed.', e)
raise MaxRetryError(self, url, e)
# Keep track of the error for the retry warning.
err = e
finally:
if release_conn:
# Put the connection back to be reused. If the connection is
# expired then it will be None, which will get replaced with a
# fresh connection during _get_conn.
self._put_conn(conn)
if not conn:
# Try again
log.warning("Retrying (%d attempts remain) after connection "
"broken by '%r': %s" % (retries, err, url))
return self.urlopen(method, url, body, headers, retries - 1,
redirect, assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, **response_kw)
# Handle redirect?
redirect_location = redirect and response.get_redirect_location()
if redirect_location and retries is not False:
if response.status == 303:
method = 'GET'
log.info("Redirecting %s -> %s" % (url, redirect_location))
return self.urlopen(method, redirect_location, body, headers,
retries - 1, redirect, assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, **response_kw)
return response
class HTTPSConnectionPool(HTTPConnectionPool):
"""
Same as :class:`.HTTPConnectionPool`, but HTTPS.
When Python is compiled with the :mod:`ssl` module, then
:class:`.VerifiedHTTPSConnection` is used, which *can* verify certificates,
instead of :class:`.HTTPSConnection`.
:class:`.VerifiedHTTPSConnection` uses one of ``assert_fingerprint``,
``assert_hostname`` and ``host`` in this order to verify connections.
If ``assert_hostname`` is False, no verification is done.
The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs`` and
``ssl_version`` are only used if :mod:`ssl` is available and are fed into
:meth:`urllib3.util.ssl_wrap_socket` to upgrade the connection socket
into an SSL socket.
"""
scheme = 'https'
ConnectionCls = HTTPSConnection
def __init__(self, host, port=None,
strict=False, timeout=None, maxsize=1,
block=False, headers=None,
_proxy=None, _proxy_headers=None,
key_file=None, cert_file=None, cert_reqs=None,
ca_certs=None, ssl_version=None,
assert_hostname=None, assert_fingerprint=None,
**conn_kw):
if sys.version_info < (2, 7): # Python 2.6 or older
conn_kw.pop('source_address', None)
HTTPConnectionPool.__init__(self, host, port, strict, timeout, maxsize,
block, headers, _proxy, _proxy_headers, **conn_kw)
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.ca_certs = ca_certs
self.ssl_version = ssl_version
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
self.conn_kw = conn_kw
def _prepare_conn(self, conn):
"""
Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`
and establish the tunnel if proxy is used.
"""
if isinstance(conn, VerifiedHTTPSConnection):
conn.set_cert(key_file=self.key_file,
cert_file=self.cert_file,
cert_reqs=self.cert_reqs,
ca_certs=self.ca_certs,
assert_hostname=self.assert_hostname,
assert_fingerprint=self.assert_fingerprint)
conn.ssl_version = self.ssl_version
conn.conn_kw = self.conn_kw
if self.proxy is not None:
# Python 2.7+
try:
set_tunnel = conn.set_tunnel
except AttributeError: # Platform-specific: Python 2.6
set_tunnel = conn._set_tunnel
set_tunnel(self.host, self.port, self.proxy_headers)
# Establish tunnel connection early, because otherwise httplib
# would improperly set Host: header to proxy's IP:port.
conn.connect()
return conn
def _new_conn(self):
"""
Return a fresh :class:`httplib.HTTPSConnection`.
"""
self.num_connections += 1
log.info("Starting new HTTPS connection (%d): %s"
% (self.num_connections, self.host))
if not self.ConnectionCls or self.ConnectionCls is DummyConnection:
# Platform-specific: Python without ssl
raise SSLError("Can't connect to HTTPS URL because the SSL "
"module is not available.")
actual_host = self.host
actual_port = self.port
if self.proxy is not None:
actual_host = self.proxy.host
actual_port = self.proxy.port
extra_params = {}
if not six.PY3: # Python 2
extra_params['strict'] = self.strict
extra_params.update(self.conn_kw)
conn = self.ConnectionCls(host=actual_host, port=actual_port,
timeout=self.timeout.connect_timeout,
**extra_params)
if self.proxy is not None:
# Enable Nagle's algorithm for proxies, to avoid packet
# fragmentation.
conn.tcp_nodelay = 0
return self._prepare_conn(conn)
def connection_from_url(url, **kw):
"""
Given a url, return an :class:`.ConnectionPool` instance of its host.
This is a shortcut for not having to parse out the scheme, host, and port
of the url before creating an :class:`.ConnectionPool` instance.
:param url:
Absolute URL string that must include the scheme. Port is optional.
:param \**kw:
Passes additional parameters to the constructor of the appropriate
:class:`.ConnectionPool`. Useful for specifying things like
timeout, maxsize, headers, etc.
Example: ::
>>> conn = connection_from_url('http://google.com/')
>>> r = conn.request('GET', '/')
"""
scheme, host, port = get_host(url)
if scheme == 'https':
return HTTPSConnectionPool(host, port=port, **kw)
else:
return HTTPConnectionPool(host, port=port, **kw)
|
pymedusa/Medusa
|
refs/heads/master
|
ext/boto/sqs/batchresults.py
|
191
|
# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2011 Eucalyptus Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
A set of results returned by SendMessageBatch.
"""
class ResultEntry(dict):
"""
The result (successful or unsuccessful) of a single
message within a send_message_batch request.
In the case of a successful result, this dict-like
object will contain the following items:
:ivar id: A string containing the user-supplied ID of the message.
:ivar message_id: A string containing the SQS ID of the new message.
:ivar message_md5: A string containing the MD5 hash of the message body.
In the case of an error, this object will contain the following
items:
:ivar id: A string containing the user-supplied ID of the message.
:ivar sender_fault: A boolean value.
:ivar error_code: A string containing a short description of the error.
:ivar error_message: A string containing a description of the error.
"""
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Id':
self['id'] = value
elif name == 'MessageId':
self['message_id'] = value
elif name == 'MD5OfMessageBody':
self['message_md5'] = value
elif name == 'SenderFault':
self['sender_fault'] = value
elif name == 'Code':
self['error_code'] = value
elif name == 'Message':
self['error_message'] = value
class BatchResults(object):
"""
A container for the results of a send_message_batch request.
:ivar results: A list of successful results. Each item in the
list will be an instance of :class:`ResultEntry`.
:ivar errors: A list of unsuccessful results. Each item in the
list will be an instance of :class:`ResultEntry`.
"""
def __init__(self, parent):
self.parent = parent
self.results = []
self.errors = []
def startElement(self, name, attrs, connection):
if name.endswith('MessageBatchResultEntry'):
entry = ResultEntry()
self.results.append(entry)
return entry
if name == 'BatchResultErrorEntry':
entry = ResultEntry()
self.errors.append(entry)
return entry
return None
def endElement(self, name, value, connection):
setattr(self, name, value)
|
foxichu/etherkeeper
|
refs/heads/master
|
etherkeeper/etherpad/migrations/0002_auto__chg_field_padauthor_folder__chg_field_padauthor_author.py
|
2
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'PadAuthor.folder'
db.alter_column(u'etherpad_padauthor', 'folder_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organize.Folder'], null=True))
# Changing field 'PadAuthor.author'
db.alter_column(u'etherpad_padauthor', 'author_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['core.Author'], null=True))
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'PadAuthor.folder'
raise RuntimeError("Cannot reverse this migration. 'PadAuthor.folder' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration
# Changing field 'PadAuthor.folder'
db.alter_column(u'etherpad_padauthor', 'folder_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organize.Folder']))
# User chose to not deal with backwards NULL issues for 'PadAuthor.author'
raise RuntimeError("Cannot reverse this migration. 'PadAuthor.author' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration
# Changing field 'PadAuthor.author'
db.alter_column(u'etherpad_padauthor', 'author_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['core.Author']))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'core.author': {
'Meta': {'object_name': 'Author'},
'etherpad_id': ('django.db.models.fields.CharField', [], {'max_length': '42'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'etherpad.pad': {
'Meta': {'object_name': 'Pad'},
'groupid': ('django.db.models.fields.CharField', [], {'max_length': '42'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'padid': ('django.db.models.fields.CharField', [], {'max_length': '42'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'etherpad.padauthor': {
'Meta': {'object_name': 'PadAuthor'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Author']", 'null': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organize.Folder']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pad': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['etherpad.Pad']", 'unique': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['organize.Tag']", 'symmetrical': 'False'})
},
u'organize.folder': {
'Meta': {'object_name': 'Folder'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Author']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organize.Folder']"})
},
u'organize.tag': {
'Meta': {'object_name': 'Tag'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Author']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['etherpad']
|
alilotfi/django
|
refs/heads/master
|
django/test/__init__.py
|
341
|
"""
Django Unit Test and Doctest framework.
"""
from django.test.client import Client, RequestFactory
from django.test.testcases import (
LiveServerTestCase, SimpleTestCase, TestCase, TransactionTestCase,
skipIfDBFeature, skipUnlessAnyDBFeature, skipUnlessDBFeature,
)
from django.test.utils import (
ignore_warnings, modify_settings, override_settings,
override_system_checks,
)
__all__ = [
'Client', 'RequestFactory', 'TestCase', 'TransactionTestCase',
'SimpleTestCase', 'LiveServerTestCase', 'skipIfDBFeature',
'skipUnlessAnyDBFeature', 'skipUnlessDBFeature', 'ignore_warnings',
'modify_settings', 'override_settings', 'override_system_checks'
]
# To simplify Django's test suite; not meant as a public API
try:
from unittest import mock # NOQA
except ImportError:
try:
import mock # NOQA
except ImportError:
pass
|
googleads/google-ads-python
|
refs/heads/master
|
google/ads/googleads/v6/enums/types/call_placeholder_field.py
|
1
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v6.enums",
marshal="google.ads.googleads.v6",
manifest={"CallPlaceholderFieldEnum",},
)
class CallPlaceholderFieldEnum(proto.Message):
r"""Values for Call placeholder fields."""
class CallPlaceholderField(proto.Enum):
r"""Possible values for Call placeholder fields."""
UNSPECIFIED = 0
UNKNOWN = 1
PHONE_NUMBER = 2
COUNTRY_CODE = 3
TRACKED = 4
CONVERSION_TYPE_ID = 5
CONVERSION_REPORTING_STATE = 6
__all__ = tuple(sorted(__protobuf__.manifest))
|
cleydson/scrapy
|
refs/heads/master
|
scrapy/utils/reqser.py
|
110
|
"""
Helper functions for serializing (and deserializing) requests.
"""
import six
from scrapy.http import Request
from scrapy.utils.python import to_unicode, to_native_str
def request_to_dict(request, spider=None):
"""Convert Request object to a dict.
If a spider is given, it will try to find out the name of the spider method
used in the callback and store that as the callback.
"""
cb = request.callback
if callable(cb):
cb = _find_method(spider, cb)
eb = request.errback
if callable(eb):
eb = _find_method(spider, eb)
d = {
'url': to_unicode(request.url), # urls should be safe (safe_string_url)
'callback': cb,
'errback': eb,
'method': request.method,
'headers': dict(request.headers),
'body': request.body,
'cookies': request.cookies,
'meta': request.meta,
'_encoding': request._encoding,
'priority': request.priority,
'dont_filter': request.dont_filter,
}
return d
def request_from_dict(d, spider=None):
"""Create Request object from a dict.
If a spider is given, it will try to resolve the callbacks looking at the
spider for methods with the same name.
"""
cb = d['callback']
if cb and spider:
cb = _get_method(spider, cb)
eb = d['errback']
if eb and spider:
eb = _get_method(spider, eb)
return Request(
url=to_native_str(d['url']),
callback=cb,
errback=eb,
method=d['method'],
headers=d['headers'],
body=d['body'],
cookies=d['cookies'],
meta=d['meta'],
encoding=d['_encoding'],
priority=d['priority'],
dont_filter=d['dont_filter'])
def _find_method(obj, func):
if obj:
try:
func_self = six.get_method_self(func)
except AttributeError: # func has no __self__
pass
else:
if func_self is obj:
return six.get_method_function(func).__name__
raise ValueError("Function %s is not a method of: %s" % (func, obj))
def _get_method(obj, name):
name = str(name)
try:
return getattr(obj, name)
except AttributeError:
raise ValueError("Method %r not found in: %s" % (name, obj))
|
bilalliberty/android_kernel_htc_liberty-villec2
|
refs/heads/cm-11.0
|
tools/perf/util/setup.py
|
4998
|
#!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
|
Christewart/bitcoin
|
refs/heads/master
|
test/functional/rpc_zmq.py
|
13
|
#!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test for the ZMQ RPC methods."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class RPCZMQTest(BitcoinTestFramework):
address = "tcp://127.0.0.1:28332"
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def skip_test_if_missing_module(self):
self.skip_if_no_py3_zmq()
self.skip_if_no_bitcoind_zmq()
def run_test(self):
self._test_getzmqnotifications()
def _test_getzmqnotifications(self):
self.restart_node(0, extra_args=[])
assert_equal(self.nodes[0].getzmqnotifications(), [])
self.restart_node(0, extra_args=["-zmqpubhashtx=%s" % self.address])
assert_equal(self.nodes[0].getzmqnotifications(), [
{"type": "pubhashtx", "address": self.address},
])
if __name__ == '__main__':
RPCZMQTest().main()
|
aredo/httpie
|
refs/heads/master
|
tests/test_stream.py
|
49
|
import pytest
from httpie.compat import is_windows
from httpie.output.streams import BINARY_SUPPRESSED_NOTICE
from utils import http, TestEnvironment
from fixtures import BIN_FILE_CONTENT, BIN_FILE_PATH
class TestStream:
# GET because httpbin 500s with binary POST body.
@pytest.mark.skipif(is_windows,
reason='Pretty redirect not supported under Windows')
def test_pretty_redirected_stream(self, httpbin):
"""Test that --stream works with prettified redirected output."""
with open(BIN_FILE_PATH, 'rb') as f:
env = TestEnvironment(colors=256, stdin=f,
stdin_isatty=False,
stdout_isatty=False)
r = http('--verbose', '--pretty=all', '--stream', 'GET',
httpbin.url + '/get', env=env)
assert BINARY_SUPPRESSED_NOTICE.decode() in r
def test_encoded_stream(self, httpbin):
"""Test that --stream works with non-prettified
redirected terminal output."""
with open(BIN_FILE_PATH, 'rb') as f:
env = TestEnvironment(stdin=f, stdin_isatty=False)
r = http('--pretty=none', '--stream', '--verbose', 'GET',
httpbin.url + '/get', env=env)
assert BINARY_SUPPRESSED_NOTICE.decode() in r
def test_redirected_stream(self, httpbin):
"""Test that --stream works with non-prettified
redirected terminal output."""
with open(BIN_FILE_PATH, 'rb') as f:
env = TestEnvironment(stdout_isatty=False,
stdin_isatty=False,
stdin=f)
r = http('--pretty=none', '--stream', '--verbose', 'GET',
httpbin.url + '/get', env=env)
assert BIN_FILE_CONTENT in r
|
marmyshev/transitions
|
refs/heads/master
|
openlp/plugins/alerts/lib/alertstab.py
|
1
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
from PyQt4 import QtCore, QtGui
from openlp.core.lib import SettingsTab, Receiver, Settings, UiStrings, translate
from openlp.core.lib.ui import create_valign_selection_widgets
class AlertsTab(SettingsTab):
"""
AlertsTab is the alerts settings tab in the settings dialog.
"""
def __init__(self, parent, name, visible_title, icon_path):
SettingsTab.__init__(self, parent, name, visible_title, icon_path)
def setupUi(self):
self.setObjectName(u'AlertsTab')
SettingsTab.setupUi(self)
self.fontGroupBox = QtGui.QGroupBox(self.leftColumn)
self.fontGroupBox.setObjectName(u'fontGroupBox')
self.fontLayout = QtGui.QFormLayout(self.fontGroupBox)
self.fontLayout.setObjectName(u'fontLayout')
self.fontLabel = QtGui.QLabel(self.fontGroupBox)
self.fontLabel.setObjectName(u'fontLabel')
self.fontComboBox = QtGui.QFontComboBox(self.fontGroupBox)
self.fontComboBox.setObjectName(u'fontComboBox')
self.fontLayout.addRow(self.fontLabel, self.fontComboBox)
self.fontColorLabel = QtGui.QLabel(self.fontGroupBox)
self.fontColorLabel.setObjectName(u'fontColorLabel')
self.colorLayout = QtGui.QHBoxLayout()
self.colorLayout.setObjectName(u'colorLayout')
self.fontColorButton = QtGui.QPushButton(self.fontGroupBox)
self.fontColorButton.setObjectName(u'fontColorButton')
self.colorLayout.addWidget(self.fontColorButton)
self.colorLayout.addSpacing(20)
self.backgroundColorLabel = QtGui.QLabel(self.fontGroupBox)
self.backgroundColorLabel.setObjectName(u'backgroundColorLabel')
self.colorLayout.addWidget(self.backgroundColorLabel)
self.backgroundColorButton = QtGui.QPushButton(self.fontGroupBox)
self.backgroundColorButton.setObjectName(u'backgroundColorButton')
self.colorLayout.addWidget(self.backgroundColorButton)
self.fontLayout.addRow(self.fontColorLabel, self.colorLayout)
self.fontSizeLabel = QtGui.QLabel(self.fontGroupBox)
self.fontSizeLabel.setObjectName(u'fontSizeLabel')
self.fontSizeSpinBox = QtGui.QSpinBox(self.fontGroupBox)
self.fontSizeSpinBox.setObjectName(u'fontSizeSpinBox')
self.fontLayout.addRow(self.fontSizeLabel, self.fontSizeSpinBox)
self.timeoutLabel = QtGui.QLabel(self.fontGroupBox)
self.timeoutLabel.setObjectName(u'timeoutLabel')
self.timeoutSpinBox = QtGui.QSpinBox(self.fontGroupBox)
self.timeoutSpinBox.setMaximum(180)
self.timeoutSpinBox.setObjectName(u'timeoutSpinBox')
self.fontLayout.addRow(self.timeoutLabel, self.timeoutSpinBox)
self.verticalLabel, self.verticalComboBox = create_valign_selection_widgets(self.fontGroupBox)
self.verticalLabel.setObjectName(u'verticalLabel')
self.verticalComboBox.setObjectName(u'verticalComboBox')
self.fontLayout.addRow(self.verticalLabel, self.verticalComboBox)
self.leftLayout.addWidget(self.fontGroupBox)
self.leftLayout.addStretch()
self.previewGroupBox = QtGui.QGroupBox(self.rightColumn)
self.previewGroupBox.setObjectName(u'previewGroupBox')
self.previewLayout = QtGui.QVBoxLayout(self.previewGroupBox)
self.previewLayout.setObjectName(u'previewLayout')
self.fontPreview = QtGui.QLineEdit(self.previewGroupBox)
self.fontPreview.setObjectName(u'fontPreview')
self.previewLayout.addWidget(self.fontPreview)
self.rightLayout.addWidget(self.previewGroupBox)
self.rightLayout.addStretch()
# Signals and slots
QtCore.QObject.connect(self.backgroundColorButton, QtCore.SIGNAL(u'clicked()'),
self.onBackgroundColorButtonClicked)
QtCore.QObject.connect(self.fontColorButton, QtCore.SIGNAL(u'clicked()'), self.onFontColorButtonClicked)
QtCore.QObject.connect(self.fontComboBox, QtCore.SIGNAL(u'activated(int)'), self.onFontComboBoxClicked)
QtCore.QObject.connect(self.timeoutSpinBox, QtCore.SIGNAL(u'valueChanged(int)'), self.onTimeoutSpinBoxChanged)
QtCore.QObject.connect(self.fontSizeSpinBox, QtCore.SIGNAL(u'valueChanged(int)'), self.onFontSizeSpinBoxChanged)
def retranslateUi(self):
self.fontGroupBox.setTitle(translate('AlertsPlugin.AlertsTab', 'Font'))
self.fontLabel.setText(translate('AlertsPlugin.AlertsTab', 'Font name:'))
self.fontColorLabel.setText(translate('AlertsPlugin.AlertsTab', 'Font color:'))
self.backgroundColorLabel.setText(translate('AlertsPlugin.AlertsTab', 'Background color:'))
self.fontSizeLabel.setText(translate('AlertsPlugin.AlertsTab', 'Font size:'))
self.fontSizeSpinBox.setSuffix(UiStrings().FontSizePtUnit)
self.timeoutLabel.setText(translate('AlertsPlugin.AlertsTab', 'Alert timeout:'))
self.timeoutSpinBox.setSuffix(UiStrings().Seconds)
self.previewGroupBox.setTitle(UiStrings().Preview)
self.fontPreview.setText(UiStrings().OLPV2x)
def onBackgroundColorButtonClicked(self):
new_color = QtGui.QColorDialog.getColor(QtGui.QColor(self.bg_color), self)
if new_color.isValid():
self.bg_color = new_color.name()
self.backgroundColorButton.setStyleSheet(u'background-color: %s' % self.bg_color)
self.updateDisplay()
def onFontComboBoxClicked(self):
self.updateDisplay()
def onFontColorButtonClicked(self):
new_color = QtGui.QColorDialog.getColor(QtGui.QColor(self.font_color), self)
if new_color.isValid():
self.font_color = new_color.name()
self.fontColorButton.setStyleSheet(u'background-color: %s' % self.font_color)
self.updateDisplay()
def onTimeoutSpinBoxChanged(self):
self.timeout = self.timeoutSpinBox.value()
self.changed = True
def onFontSizeSpinBoxChanged(self):
self.font_size = self.fontSizeSpinBox.value()
self.updateDisplay()
def load(self):
settings = Settings()
settings.beginGroup(self.settingsSection)
self.timeout = settings.value(u'timeout')
self.font_color = settings.value(u'font color')
self.font_size = settings.value(u'font size')
self.bg_color = settings.value(u'background color')
self.font_face = settings.value(u'font face')
self.location = settings.value(u'location')
settings.endGroup()
self.fontSizeSpinBox.setValue(self.font_size)
self.timeoutSpinBox.setValue(self.timeout)
self.fontColorButton.setStyleSheet(u'background-color: %s' % self.font_color)
self.backgroundColorButton.setStyleSheet(u'background-color: %s' % self.bg_color)
self.verticalComboBox.setCurrentIndex(self.location)
font = QtGui.QFont()
font.setFamily(self.font_face)
self.fontComboBox.setCurrentFont(font)
self.updateDisplay()
self.changed = False
def save(self):
settings = Settings()
settings.beginGroup(self.settingsSection)
# Check value has changed as no event handles this field
if settings.value(u'location') != self.verticalComboBox.currentIndex():
self.changed = True
settings.setValue(u'background color', self.bg_color)
settings.setValue(u'font color', self.font_color)
settings.setValue(u'font size', self.font_size)
self.font_face = self.fontComboBox.currentFont().family()
settings.setValue(u'font face', self.font_face)
settings.setValue(u'timeout', self.timeout)
self.location = self.verticalComboBox.currentIndex()
settings.setValue(u'location', self.location)
settings.endGroup()
if self.changed:
Receiver.send_message(u'update_display_css')
self.changed = False
def updateDisplay(self):
font = QtGui.QFont()
font.setFamily(self.fontComboBox.currentFont().family())
font.setBold(True)
font.setPointSize(self.font_size)
self.fontPreview.setFont(font)
self.fontPreview.setStyleSheet(u'background-color: %s; color: %s' % (self.bg_color, self.font_color))
self.changed = True
|
billputer/howl
|
refs/heads/master
|
tests/simple_http_test.py
|
1
|
"""
simple_http_test.py
simple test for debugging
"""
import unittest, simplejson
from test_utils import WebCase, print_timing
class TestSimple(WebCase):
"""Simple test for debugging purposes"""
def testSimple(self):
"""Sends an HTTP request to 'player/status/'"""
self.getPage("player/status/", method = "GET")
self.assertStatus(status = 200)
print 'http headers:'
for header in self.headers.items():
print header
print 'body:'
print simplejson.dumps(self.body,indent=1)\
if __name__ == '__main__':
unittest.main()
|
enochd/RMG-Py
|
refs/heads/master
|
external/cclib/progress/qt4progress.py
|
24
|
"""
cclib (http://cclib.sf.net) is (c) 2006, the cclib development team
and licensed under the LGPL (http://www.gnu.org/copyleft/lgpl.html).
"""
__revision__ = "$Revision: 238 $"
from PyQt4 import QtGui,QtCore
class Qt4Progress(QtGui.QProgressDialog):
def __init__(self, title, parent=None):
QtGui.QProgressDialog.__init__(self, parent)
self.nstep = 0
self.text = None
self.oldprogress = 0
self.progress = 0
self.calls = 0
self.loop=QtCore.QEventLoop(self)
self.setWindowTitle(title)
def initialize(self, nstep, text=None):
self.nstep = nstep
self.text = text
self.setRange(0,nstep)
if text:
self.setLabelText(text)
self.setValue(1)
#sys.stdout.write("\n")
def update(self, step, text=None):
if text:
self.setLabelText(text)
self.setValue(step)
self.loop.processEvents(QtCore.QEventLoop.ExcludeUserInputEvents)
|
dcroc16/skunk_works
|
refs/heads/master
|
google_appengine/google/appengine/api/logservice/log_service_pb.py
|
4
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.net.proto import ProtocolBuffer
import array
import dummy_thread as thread
__pychecker__ = """maxreturns=0 maxbranches=0 no-callinit
unusednames=printElemNumber,debug_strs no-special"""
if hasattr(ProtocolBuffer, 'ExtendableProtocolMessage'):
_extension_runtime = True
_ExtendableProtocolMessage = ProtocolBuffer.ExtendableProtocolMessage
else:
_extension_runtime = False
_ExtendableProtocolMessage = ProtocolBuffer.ProtocolMessage
from google.appengine.api.api_base_pb import *
import google.appengine.api.api_base_pb
class LogServiceError(ProtocolBuffer.ProtocolMessage):
OK = 0
INVALID_REQUEST = 1
STORAGE_ERROR = 2
_ErrorCode_NAMES = {
0: "OK",
1: "INVALID_REQUEST",
2: "STORAGE_ERROR",
}
def ErrorCode_Name(cls, x): return cls._ErrorCode_NAMES.get(x, "")
ErrorCode_Name = classmethod(ErrorCode_Name)
def __init__(self, contents=None):
pass
if contents is not None: self.MergeFromString(contents)
def MergeFrom(self, x):
assert x is not self
def Equals(self, x):
if x is self: return 1
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
return n
def ByteSizePartial(self):
n = 0
return n
def Clear(self):
pass
def OutputUnchecked(self, out):
pass
def OutputPartial(self, out):
pass
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
}, 0)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
}, 0, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.LogServiceError'
class UserAppLogLine(ProtocolBuffer.ProtocolMessage):
has_timestamp_usec_ = 0
timestamp_usec_ = 0
has_level_ = 0
level_ = 0
has_message_ = 0
message_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def timestamp_usec(self): return self.timestamp_usec_
def set_timestamp_usec(self, x):
self.has_timestamp_usec_ = 1
self.timestamp_usec_ = x
def clear_timestamp_usec(self):
if self.has_timestamp_usec_:
self.has_timestamp_usec_ = 0
self.timestamp_usec_ = 0
def has_timestamp_usec(self): return self.has_timestamp_usec_
def level(self): return self.level_
def set_level(self, x):
self.has_level_ = 1
self.level_ = x
def clear_level(self):
if self.has_level_:
self.has_level_ = 0
self.level_ = 0
def has_level(self): return self.has_level_
def message(self): return self.message_
def set_message(self, x):
self.has_message_ = 1
self.message_ = x
def clear_message(self):
if self.has_message_:
self.has_message_ = 0
self.message_ = ""
def has_message(self): return self.has_message_
def MergeFrom(self, x):
assert x is not self
if (x.has_timestamp_usec()): self.set_timestamp_usec(x.timestamp_usec())
if (x.has_level()): self.set_level(x.level())
if (x.has_message()): self.set_message(x.message())
def Equals(self, x):
if x is self: return 1
if self.has_timestamp_usec_ != x.has_timestamp_usec_: return 0
if self.has_timestamp_usec_ and self.timestamp_usec_ != x.timestamp_usec_: return 0
if self.has_level_ != x.has_level_: return 0
if self.has_level_ and self.level_ != x.level_: return 0
if self.has_message_ != x.has_message_: return 0
if self.has_message_ and self.message_ != x.message_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_timestamp_usec_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: timestamp_usec not set.')
if (not self.has_level_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: level not set.')
if (not self.has_message_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: message not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthVarInt64(self.timestamp_usec_)
n += self.lengthVarInt64(self.level_)
n += self.lengthString(len(self.message_))
return n + 3
def ByteSizePartial(self):
n = 0
if (self.has_timestamp_usec_):
n += 1
n += self.lengthVarInt64(self.timestamp_usec_)
if (self.has_level_):
n += 1
n += self.lengthVarInt64(self.level_)
if (self.has_message_):
n += 1
n += self.lengthString(len(self.message_))
return n
def Clear(self):
self.clear_timestamp_usec()
self.clear_level()
self.clear_message()
def OutputUnchecked(self, out):
out.putVarInt32(8)
out.putVarInt64(self.timestamp_usec_)
out.putVarInt32(16)
out.putVarInt64(self.level_)
out.putVarInt32(26)
out.putPrefixedString(self.message_)
def OutputPartial(self, out):
if (self.has_timestamp_usec_):
out.putVarInt32(8)
out.putVarInt64(self.timestamp_usec_)
if (self.has_level_):
out.putVarInt32(16)
out.putVarInt64(self.level_)
if (self.has_message_):
out.putVarInt32(26)
out.putPrefixedString(self.message_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.set_timestamp_usec(d.getVarInt64())
continue
if tt == 16:
self.set_level(d.getVarInt64())
continue
if tt == 26:
self.set_message(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_timestamp_usec_: res+=prefix+("timestamp_usec: %s\n" % self.DebugFormatInt64(self.timestamp_usec_))
if self.has_level_: res+=prefix+("level: %s\n" % self.DebugFormatInt64(self.level_))
if self.has_message_: res+=prefix+("message: %s\n" % self.DebugFormatString(self.message_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
ktimestamp_usec = 1
klevel = 2
kmessage = 3
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "timestamp_usec",
2: "level",
3: "message",
}, 3)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.NUMERIC,
2: ProtocolBuffer.Encoder.NUMERIC,
3: ProtocolBuffer.Encoder.STRING,
}, 3, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.UserAppLogLine'
class UserAppLogGroup(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
self.log_line_ = []
if contents is not None: self.MergeFromString(contents)
def log_line_size(self): return len(self.log_line_)
def log_line_list(self): return self.log_line_
def log_line(self, i):
return self.log_line_[i]
def mutable_log_line(self, i):
return self.log_line_[i]
def add_log_line(self):
x = UserAppLogLine()
self.log_line_.append(x)
return x
def clear_log_line(self):
self.log_line_ = []
def MergeFrom(self, x):
assert x is not self
for i in xrange(x.log_line_size()): self.add_log_line().CopyFrom(x.log_line(i))
def Equals(self, x):
if x is self: return 1
if len(self.log_line_) != len(x.log_line_): return 0
for e1, e2 in zip(self.log_line_, x.log_line_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
for p in self.log_line_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += 1 * len(self.log_line_)
for i in xrange(len(self.log_line_)): n += self.lengthString(self.log_line_[i].ByteSize())
return n
def ByteSizePartial(self):
n = 0
n += 1 * len(self.log_line_)
for i in xrange(len(self.log_line_)): n += self.lengthString(self.log_line_[i].ByteSizePartial())
return n
def Clear(self):
self.clear_log_line()
def OutputUnchecked(self, out):
for i in xrange(len(self.log_line_)):
out.putVarInt32(18)
out.putVarInt32(self.log_line_[i].ByteSize())
self.log_line_[i].OutputUnchecked(out)
def OutputPartial(self, out):
for i in xrange(len(self.log_line_)):
out.putVarInt32(18)
out.putVarInt32(self.log_line_[i].ByteSizePartial())
self.log_line_[i].OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_log_line().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
cnt=0
for e in self.log_line_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("log_line%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
klog_line = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
2: "log_line",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
2: ProtocolBuffer.Encoder.STRING,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.UserAppLogGroup'
class FlushRequest(ProtocolBuffer.ProtocolMessage):
has_logs_ = 0
logs_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def logs(self): return self.logs_
def set_logs(self, x):
self.has_logs_ = 1
self.logs_ = x
def clear_logs(self):
if self.has_logs_:
self.has_logs_ = 0
self.logs_ = ""
def has_logs(self): return self.has_logs_
def MergeFrom(self, x):
assert x is not self
if (x.has_logs()): self.set_logs(x.logs())
def Equals(self, x):
if x is self: return 1
if self.has_logs_ != x.has_logs_: return 0
if self.has_logs_ and self.logs_ != x.logs_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
if (self.has_logs_): n += 1 + self.lengthString(len(self.logs_))
return n
def ByteSizePartial(self):
n = 0
if (self.has_logs_): n += 1 + self.lengthString(len(self.logs_))
return n
def Clear(self):
self.clear_logs()
def OutputUnchecked(self, out):
if (self.has_logs_):
out.putVarInt32(10)
out.putPrefixedString(self.logs_)
def OutputPartial(self, out):
if (self.has_logs_):
out.putVarInt32(10)
out.putPrefixedString(self.logs_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_logs(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_logs_: res+=prefix+("logs: %s\n" % self.DebugFormatString(self.logs_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
klogs = 1
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "logs",
}, 1)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
}, 1, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.FlushRequest'
class SetStatusRequest(ProtocolBuffer.ProtocolMessage):
has_status_ = 0
status_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def status(self): return self.status_
def set_status(self, x):
self.has_status_ = 1
self.status_ = x
def clear_status(self):
if self.has_status_:
self.has_status_ = 0
self.status_ = ""
def has_status(self): return self.has_status_
def MergeFrom(self, x):
assert x is not self
if (x.has_status()): self.set_status(x.status())
def Equals(self, x):
if x is self: return 1
if self.has_status_ != x.has_status_: return 0
if self.has_status_ and self.status_ != x.status_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_status_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: status not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.status_))
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_status_):
n += 1
n += self.lengthString(len(self.status_))
return n
def Clear(self):
self.clear_status()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.status_)
def OutputPartial(self, out):
if (self.has_status_):
out.putVarInt32(10)
out.putPrefixedString(self.status_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_status(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_status_: res+=prefix+("status: %s\n" % self.DebugFormatString(self.status_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kstatus = 1
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "status",
}, 1)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
}, 1, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.SetStatusRequest'
class LogOffset(ProtocolBuffer.ProtocolMessage):
has_request_id_ = 0
request_id_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def request_id(self): return self.request_id_
def set_request_id(self, x):
self.has_request_id_ = 1
self.request_id_ = x
def clear_request_id(self):
if self.has_request_id_:
self.has_request_id_ = 0
self.request_id_ = ""
def has_request_id(self): return self.has_request_id_
def MergeFrom(self, x):
assert x is not self
if (x.has_request_id()): self.set_request_id(x.request_id())
def Equals(self, x):
if x is self: return 1
if self.has_request_id_ != x.has_request_id_: return 0
if self.has_request_id_ and self.request_id_ != x.request_id_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
if (self.has_request_id_): n += 1 + self.lengthString(len(self.request_id_))
return n
def ByteSizePartial(self):
n = 0
if (self.has_request_id_): n += 1 + self.lengthString(len(self.request_id_))
return n
def Clear(self):
self.clear_request_id()
def OutputUnchecked(self, out):
if (self.has_request_id_):
out.putVarInt32(10)
out.putPrefixedString(self.request_id_)
def OutputPartial(self, out):
if (self.has_request_id_):
out.putVarInt32(10)
out.putPrefixedString(self.request_id_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_request_id(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_request_id_: res+=prefix+("request_id: %s\n" % self.DebugFormatString(self.request_id_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
krequest_id = 1
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "request_id",
}, 1)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
}, 1, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.LogOffset'
class LogLine(ProtocolBuffer.ProtocolMessage):
has_time_ = 0
time_ = 0
has_level_ = 0
level_ = 0
has_log_message_ = 0
log_message_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def time(self): return self.time_
def set_time(self, x):
self.has_time_ = 1
self.time_ = x
def clear_time(self):
if self.has_time_:
self.has_time_ = 0
self.time_ = 0
def has_time(self): return self.has_time_
def level(self): return self.level_
def set_level(self, x):
self.has_level_ = 1
self.level_ = x
def clear_level(self):
if self.has_level_:
self.has_level_ = 0
self.level_ = 0
def has_level(self): return self.has_level_
def log_message(self): return self.log_message_
def set_log_message(self, x):
self.has_log_message_ = 1
self.log_message_ = x
def clear_log_message(self):
if self.has_log_message_:
self.has_log_message_ = 0
self.log_message_ = ""
def has_log_message(self): return self.has_log_message_
def MergeFrom(self, x):
assert x is not self
if (x.has_time()): self.set_time(x.time())
if (x.has_level()): self.set_level(x.level())
if (x.has_log_message()): self.set_log_message(x.log_message())
def Equals(self, x):
if x is self: return 1
if self.has_time_ != x.has_time_: return 0
if self.has_time_ and self.time_ != x.time_: return 0
if self.has_level_ != x.has_level_: return 0
if self.has_level_ and self.level_ != x.level_: return 0
if self.has_log_message_ != x.has_log_message_: return 0
if self.has_log_message_ and self.log_message_ != x.log_message_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_time_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: time not set.')
if (not self.has_level_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: level not set.')
if (not self.has_log_message_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: log_message not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthVarInt64(self.time_)
n += self.lengthVarInt64(self.level_)
n += self.lengthString(len(self.log_message_))
return n + 3
def ByteSizePartial(self):
n = 0
if (self.has_time_):
n += 1
n += self.lengthVarInt64(self.time_)
if (self.has_level_):
n += 1
n += self.lengthVarInt64(self.level_)
if (self.has_log_message_):
n += 1
n += self.lengthString(len(self.log_message_))
return n
def Clear(self):
self.clear_time()
self.clear_level()
self.clear_log_message()
def OutputUnchecked(self, out):
out.putVarInt32(8)
out.putVarInt64(self.time_)
out.putVarInt32(16)
out.putVarInt32(self.level_)
out.putVarInt32(26)
out.putPrefixedString(self.log_message_)
def OutputPartial(self, out):
if (self.has_time_):
out.putVarInt32(8)
out.putVarInt64(self.time_)
if (self.has_level_):
out.putVarInt32(16)
out.putVarInt32(self.level_)
if (self.has_log_message_):
out.putVarInt32(26)
out.putPrefixedString(self.log_message_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.set_time(d.getVarInt64())
continue
if tt == 16:
self.set_level(d.getVarInt32())
continue
if tt == 26:
self.set_log_message(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_time_: res+=prefix+("time: %s\n" % self.DebugFormatInt64(self.time_))
if self.has_level_: res+=prefix+("level: %s\n" % self.DebugFormatInt32(self.level_))
if self.has_log_message_: res+=prefix+("log_message: %s\n" % self.DebugFormatString(self.log_message_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
ktime = 1
klevel = 2
klog_message = 3
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "time",
2: "level",
3: "log_message",
}, 3)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.NUMERIC,
2: ProtocolBuffer.Encoder.NUMERIC,
3: ProtocolBuffer.Encoder.STRING,
}, 3, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.LogLine'
class RequestLog(ProtocolBuffer.ProtocolMessage):
has_app_id_ = 0
app_id_ = ""
has_module_id_ = 0
module_id_ = "default"
has_version_id_ = 0
version_id_ = ""
has_request_id_ = 0
request_id_ = ""
has_offset_ = 0
offset_ = None
has_ip_ = 0
ip_ = ""
has_nickname_ = 0
nickname_ = ""
has_start_time_ = 0
start_time_ = 0
has_end_time_ = 0
end_time_ = 0
has_latency_ = 0
latency_ = 0
has_mcycles_ = 0
mcycles_ = 0
has_method_ = 0
method_ = ""
has_resource_ = 0
resource_ = ""
has_http_version_ = 0
http_version_ = ""
has_status_ = 0
status_ = 0
has_response_size_ = 0
response_size_ = 0
has_referrer_ = 0
referrer_ = ""
has_user_agent_ = 0
user_agent_ = ""
has_url_map_entry_ = 0
url_map_entry_ = ""
has_combined_ = 0
combined_ = ""
has_api_mcycles_ = 0
api_mcycles_ = 0
has_host_ = 0
host_ = ""
has_cost_ = 0
cost_ = 0.0
has_task_queue_name_ = 0
task_queue_name_ = ""
has_task_name_ = 0
task_name_ = ""
has_was_loading_request_ = 0
was_loading_request_ = 0
has_pending_time_ = 0
pending_time_ = 0
has_replica_index_ = 0
replica_index_ = -1
has_finished_ = 0
finished_ = 1
has_clone_key_ = 0
clone_key_ = ""
has_lines_incomplete_ = 0
lines_incomplete_ = 0
has_app_engine_release_ = 0
app_engine_release_ = ""
has_trace_id_ = 0
trace_id_ = ""
has_exit_reason_ = 0
exit_reason_ = 0
has_was_throttled_for_time_ = 0
was_throttled_for_time_ = 0
has_was_throttled_for_requests_ = 0
was_throttled_for_requests_ = 0
has_throttled_time_ = 0
throttled_time_ = 0
has_server_name_ = 0
server_name_ = ""
def __init__(self, contents=None):
self.line_ = []
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def app_id(self): return self.app_id_
def set_app_id(self, x):
self.has_app_id_ = 1
self.app_id_ = x
def clear_app_id(self):
if self.has_app_id_:
self.has_app_id_ = 0
self.app_id_ = ""
def has_app_id(self): return self.has_app_id_
def module_id(self): return self.module_id_
def set_module_id(self, x):
self.has_module_id_ = 1
self.module_id_ = x
def clear_module_id(self):
if self.has_module_id_:
self.has_module_id_ = 0
self.module_id_ = "default"
def has_module_id(self): return self.has_module_id_
def version_id(self): return self.version_id_
def set_version_id(self, x):
self.has_version_id_ = 1
self.version_id_ = x
def clear_version_id(self):
if self.has_version_id_:
self.has_version_id_ = 0
self.version_id_ = ""
def has_version_id(self): return self.has_version_id_
def request_id(self): return self.request_id_
def set_request_id(self, x):
self.has_request_id_ = 1
self.request_id_ = x
def clear_request_id(self):
if self.has_request_id_:
self.has_request_id_ = 0
self.request_id_ = ""
def has_request_id(self): return self.has_request_id_
def offset(self):
if self.offset_ is None:
self.lazy_init_lock_.acquire()
try:
if self.offset_ is None: self.offset_ = LogOffset()
finally:
self.lazy_init_lock_.release()
return self.offset_
def mutable_offset(self): self.has_offset_ = 1; return self.offset()
def clear_offset(self):
if self.has_offset_:
self.has_offset_ = 0;
if self.offset_ is not None: self.offset_.Clear()
def has_offset(self): return self.has_offset_
def ip(self): return self.ip_
def set_ip(self, x):
self.has_ip_ = 1
self.ip_ = x
def clear_ip(self):
if self.has_ip_:
self.has_ip_ = 0
self.ip_ = ""
def has_ip(self): return self.has_ip_
def nickname(self): return self.nickname_
def set_nickname(self, x):
self.has_nickname_ = 1
self.nickname_ = x
def clear_nickname(self):
if self.has_nickname_:
self.has_nickname_ = 0
self.nickname_ = ""
def has_nickname(self): return self.has_nickname_
def start_time(self): return self.start_time_
def set_start_time(self, x):
self.has_start_time_ = 1
self.start_time_ = x
def clear_start_time(self):
if self.has_start_time_:
self.has_start_time_ = 0
self.start_time_ = 0
def has_start_time(self): return self.has_start_time_
def end_time(self): return self.end_time_
def set_end_time(self, x):
self.has_end_time_ = 1
self.end_time_ = x
def clear_end_time(self):
if self.has_end_time_:
self.has_end_time_ = 0
self.end_time_ = 0
def has_end_time(self): return self.has_end_time_
def latency(self): return self.latency_
def set_latency(self, x):
self.has_latency_ = 1
self.latency_ = x
def clear_latency(self):
if self.has_latency_:
self.has_latency_ = 0
self.latency_ = 0
def has_latency(self): return self.has_latency_
def mcycles(self): return self.mcycles_
def set_mcycles(self, x):
self.has_mcycles_ = 1
self.mcycles_ = x
def clear_mcycles(self):
if self.has_mcycles_:
self.has_mcycles_ = 0
self.mcycles_ = 0
def has_mcycles(self): return self.has_mcycles_
def method(self): return self.method_
def set_method(self, x):
self.has_method_ = 1
self.method_ = x
def clear_method(self):
if self.has_method_:
self.has_method_ = 0
self.method_ = ""
def has_method(self): return self.has_method_
def resource(self): return self.resource_
def set_resource(self, x):
self.has_resource_ = 1
self.resource_ = x
def clear_resource(self):
if self.has_resource_:
self.has_resource_ = 0
self.resource_ = ""
def has_resource(self): return self.has_resource_
def http_version(self): return self.http_version_
def set_http_version(self, x):
self.has_http_version_ = 1
self.http_version_ = x
def clear_http_version(self):
if self.has_http_version_:
self.has_http_version_ = 0
self.http_version_ = ""
def has_http_version(self): return self.has_http_version_
def status(self): return self.status_
def set_status(self, x):
self.has_status_ = 1
self.status_ = x
def clear_status(self):
if self.has_status_:
self.has_status_ = 0
self.status_ = 0
def has_status(self): return self.has_status_
def response_size(self): return self.response_size_
def set_response_size(self, x):
self.has_response_size_ = 1
self.response_size_ = x
def clear_response_size(self):
if self.has_response_size_:
self.has_response_size_ = 0
self.response_size_ = 0
def has_response_size(self): return self.has_response_size_
def referrer(self): return self.referrer_
def set_referrer(self, x):
self.has_referrer_ = 1
self.referrer_ = x
def clear_referrer(self):
if self.has_referrer_:
self.has_referrer_ = 0
self.referrer_ = ""
def has_referrer(self): return self.has_referrer_
def user_agent(self): return self.user_agent_
def set_user_agent(self, x):
self.has_user_agent_ = 1
self.user_agent_ = x
def clear_user_agent(self):
if self.has_user_agent_:
self.has_user_agent_ = 0
self.user_agent_ = ""
def has_user_agent(self): return self.has_user_agent_
def url_map_entry(self): return self.url_map_entry_
def set_url_map_entry(self, x):
self.has_url_map_entry_ = 1
self.url_map_entry_ = x
def clear_url_map_entry(self):
if self.has_url_map_entry_:
self.has_url_map_entry_ = 0
self.url_map_entry_ = ""
def has_url_map_entry(self): return self.has_url_map_entry_
def combined(self): return self.combined_
def set_combined(self, x):
self.has_combined_ = 1
self.combined_ = x
def clear_combined(self):
if self.has_combined_:
self.has_combined_ = 0
self.combined_ = ""
def has_combined(self): return self.has_combined_
def api_mcycles(self): return self.api_mcycles_
def set_api_mcycles(self, x):
self.has_api_mcycles_ = 1
self.api_mcycles_ = x
def clear_api_mcycles(self):
if self.has_api_mcycles_:
self.has_api_mcycles_ = 0
self.api_mcycles_ = 0
def has_api_mcycles(self): return self.has_api_mcycles_
def host(self): return self.host_
def set_host(self, x):
self.has_host_ = 1
self.host_ = x
def clear_host(self):
if self.has_host_:
self.has_host_ = 0
self.host_ = ""
def has_host(self): return self.has_host_
def cost(self): return self.cost_
def set_cost(self, x):
self.has_cost_ = 1
self.cost_ = x
def clear_cost(self):
if self.has_cost_:
self.has_cost_ = 0
self.cost_ = 0.0
def has_cost(self): return self.has_cost_
def task_queue_name(self): return self.task_queue_name_
def set_task_queue_name(self, x):
self.has_task_queue_name_ = 1
self.task_queue_name_ = x
def clear_task_queue_name(self):
if self.has_task_queue_name_:
self.has_task_queue_name_ = 0
self.task_queue_name_ = ""
def has_task_queue_name(self): return self.has_task_queue_name_
def task_name(self): return self.task_name_
def set_task_name(self, x):
self.has_task_name_ = 1
self.task_name_ = x
def clear_task_name(self):
if self.has_task_name_:
self.has_task_name_ = 0
self.task_name_ = ""
def has_task_name(self): return self.has_task_name_
def was_loading_request(self): return self.was_loading_request_
def set_was_loading_request(self, x):
self.has_was_loading_request_ = 1
self.was_loading_request_ = x
def clear_was_loading_request(self):
if self.has_was_loading_request_:
self.has_was_loading_request_ = 0
self.was_loading_request_ = 0
def has_was_loading_request(self): return self.has_was_loading_request_
def pending_time(self): return self.pending_time_
def set_pending_time(self, x):
self.has_pending_time_ = 1
self.pending_time_ = x
def clear_pending_time(self):
if self.has_pending_time_:
self.has_pending_time_ = 0
self.pending_time_ = 0
def has_pending_time(self): return self.has_pending_time_
def replica_index(self): return self.replica_index_
def set_replica_index(self, x):
self.has_replica_index_ = 1
self.replica_index_ = x
def clear_replica_index(self):
if self.has_replica_index_:
self.has_replica_index_ = 0
self.replica_index_ = -1
def has_replica_index(self): return self.has_replica_index_
def finished(self): return self.finished_
def set_finished(self, x):
self.has_finished_ = 1
self.finished_ = x
def clear_finished(self):
if self.has_finished_:
self.has_finished_ = 0
self.finished_ = 1
def has_finished(self): return self.has_finished_
def clone_key(self): return self.clone_key_
def set_clone_key(self, x):
self.has_clone_key_ = 1
self.clone_key_ = x
def clear_clone_key(self):
if self.has_clone_key_:
self.has_clone_key_ = 0
self.clone_key_ = ""
def has_clone_key(self): return self.has_clone_key_
def line_size(self): return len(self.line_)
def line_list(self): return self.line_
def line(self, i):
return self.line_[i]
def mutable_line(self, i):
return self.line_[i]
def add_line(self):
x = LogLine()
self.line_.append(x)
return x
def clear_line(self):
self.line_ = []
def lines_incomplete(self): return self.lines_incomplete_
def set_lines_incomplete(self, x):
self.has_lines_incomplete_ = 1
self.lines_incomplete_ = x
def clear_lines_incomplete(self):
if self.has_lines_incomplete_:
self.has_lines_incomplete_ = 0
self.lines_incomplete_ = 0
def has_lines_incomplete(self): return self.has_lines_incomplete_
def app_engine_release(self): return self.app_engine_release_
def set_app_engine_release(self, x):
self.has_app_engine_release_ = 1
self.app_engine_release_ = x
def clear_app_engine_release(self):
if self.has_app_engine_release_:
self.has_app_engine_release_ = 0
self.app_engine_release_ = ""
def has_app_engine_release(self): return self.has_app_engine_release_
def trace_id(self): return self.trace_id_
def set_trace_id(self, x):
self.has_trace_id_ = 1
self.trace_id_ = x
def clear_trace_id(self):
if self.has_trace_id_:
self.has_trace_id_ = 0
self.trace_id_ = ""
def has_trace_id(self): return self.has_trace_id_
def exit_reason(self): return self.exit_reason_
def set_exit_reason(self, x):
self.has_exit_reason_ = 1
self.exit_reason_ = x
def clear_exit_reason(self):
if self.has_exit_reason_:
self.has_exit_reason_ = 0
self.exit_reason_ = 0
def has_exit_reason(self): return self.has_exit_reason_
def was_throttled_for_time(self): return self.was_throttled_for_time_
def set_was_throttled_for_time(self, x):
self.has_was_throttled_for_time_ = 1
self.was_throttled_for_time_ = x
def clear_was_throttled_for_time(self):
if self.has_was_throttled_for_time_:
self.has_was_throttled_for_time_ = 0
self.was_throttled_for_time_ = 0
def has_was_throttled_for_time(self): return self.has_was_throttled_for_time_
def was_throttled_for_requests(self): return self.was_throttled_for_requests_
def set_was_throttled_for_requests(self, x):
self.has_was_throttled_for_requests_ = 1
self.was_throttled_for_requests_ = x
def clear_was_throttled_for_requests(self):
if self.has_was_throttled_for_requests_:
self.has_was_throttled_for_requests_ = 0
self.was_throttled_for_requests_ = 0
def has_was_throttled_for_requests(self): return self.has_was_throttled_for_requests_
def throttled_time(self): return self.throttled_time_
def set_throttled_time(self, x):
self.has_throttled_time_ = 1
self.throttled_time_ = x
def clear_throttled_time(self):
if self.has_throttled_time_:
self.has_throttled_time_ = 0
self.throttled_time_ = 0
def has_throttled_time(self): return self.has_throttled_time_
def server_name(self): return self.server_name_
def set_server_name(self, x):
self.has_server_name_ = 1
self.server_name_ = x
def clear_server_name(self):
if self.has_server_name_:
self.has_server_name_ = 0
self.server_name_ = ""
def has_server_name(self): return self.has_server_name_
def MergeFrom(self, x):
assert x is not self
if (x.has_app_id()): self.set_app_id(x.app_id())
if (x.has_module_id()): self.set_module_id(x.module_id())
if (x.has_version_id()): self.set_version_id(x.version_id())
if (x.has_request_id()): self.set_request_id(x.request_id())
if (x.has_offset()): self.mutable_offset().MergeFrom(x.offset())
if (x.has_ip()): self.set_ip(x.ip())
if (x.has_nickname()): self.set_nickname(x.nickname())
if (x.has_start_time()): self.set_start_time(x.start_time())
if (x.has_end_time()): self.set_end_time(x.end_time())
if (x.has_latency()): self.set_latency(x.latency())
if (x.has_mcycles()): self.set_mcycles(x.mcycles())
if (x.has_method()): self.set_method(x.method())
if (x.has_resource()): self.set_resource(x.resource())
if (x.has_http_version()): self.set_http_version(x.http_version())
if (x.has_status()): self.set_status(x.status())
if (x.has_response_size()): self.set_response_size(x.response_size())
if (x.has_referrer()): self.set_referrer(x.referrer())
if (x.has_user_agent()): self.set_user_agent(x.user_agent())
if (x.has_url_map_entry()): self.set_url_map_entry(x.url_map_entry())
if (x.has_combined()): self.set_combined(x.combined())
if (x.has_api_mcycles()): self.set_api_mcycles(x.api_mcycles())
if (x.has_host()): self.set_host(x.host())
if (x.has_cost()): self.set_cost(x.cost())
if (x.has_task_queue_name()): self.set_task_queue_name(x.task_queue_name())
if (x.has_task_name()): self.set_task_name(x.task_name())
if (x.has_was_loading_request()): self.set_was_loading_request(x.was_loading_request())
if (x.has_pending_time()): self.set_pending_time(x.pending_time())
if (x.has_replica_index()): self.set_replica_index(x.replica_index())
if (x.has_finished()): self.set_finished(x.finished())
if (x.has_clone_key()): self.set_clone_key(x.clone_key())
for i in xrange(x.line_size()): self.add_line().CopyFrom(x.line(i))
if (x.has_lines_incomplete()): self.set_lines_incomplete(x.lines_incomplete())
if (x.has_app_engine_release()): self.set_app_engine_release(x.app_engine_release())
if (x.has_trace_id()): self.set_trace_id(x.trace_id())
if (x.has_exit_reason()): self.set_exit_reason(x.exit_reason())
if (x.has_was_throttled_for_time()): self.set_was_throttled_for_time(x.was_throttled_for_time())
if (x.has_was_throttled_for_requests()): self.set_was_throttled_for_requests(x.was_throttled_for_requests())
if (x.has_throttled_time()): self.set_throttled_time(x.throttled_time())
if (x.has_server_name()): self.set_server_name(x.server_name())
def Equals(self, x):
if x is self: return 1
if self.has_app_id_ != x.has_app_id_: return 0
if self.has_app_id_ and self.app_id_ != x.app_id_: return 0
if self.has_module_id_ != x.has_module_id_: return 0
if self.has_module_id_ and self.module_id_ != x.module_id_: return 0
if self.has_version_id_ != x.has_version_id_: return 0
if self.has_version_id_ and self.version_id_ != x.version_id_: return 0
if self.has_request_id_ != x.has_request_id_: return 0
if self.has_request_id_ and self.request_id_ != x.request_id_: return 0
if self.has_offset_ != x.has_offset_: return 0
if self.has_offset_ and self.offset_ != x.offset_: return 0
if self.has_ip_ != x.has_ip_: return 0
if self.has_ip_ and self.ip_ != x.ip_: return 0
if self.has_nickname_ != x.has_nickname_: return 0
if self.has_nickname_ and self.nickname_ != x.nickname_: return 0
if self.has_start_time_ != x.has_start_time_: return 0
if self.has_start_time_ and self.start_time_ != x.start_time_: return 0
if self.has_end_time_ != x.has_end_time_: return 0
if self.has_end_time_ and self.end_time_ != x.end_time_: return 0
if self.has_latency_ != x.has_latency_: return 0
if self.has_latency_ and self.latency_ != x.latency_: return 0
if self.has_mcycles_ != x.has_mcycles_: return 0
if self.has_mcycles_ and self.mcycles_ != x.mcycles_: return 0
if self.has_method_ != x.has_method_: return 0
if self.has_method_ and self.method_ != x.method_: return 0
if self.has_resource_ != x.has_resource_: return 0
if self.has_resource_ and self.resource_ != x.resource_: return 0
if self.has_http_version_ != x.has_http_version_: return 0
if self.has_http_version_ and self.http_version_ != x.http_version_: return 0
if self.has_status_ != x.has_status_: return 0
if self.has_status_ and self.status_ != x.status_: return 0
if self.has_response_size_ != x.has_response_size_: return 0
if self.has_response_size_ and self.response_size_ != x.response_size_: return 0
if self.has_referrer_ != x.has_referrer_: return 0
if self.has_referrer_ and self.referrer_ != x.referrer_: return 0
if self.has_user_agent_ != x.has_user_agent_: return 0
if self.has_user_agent_ and self.user_agent_ != x.user_agent_: return 0
if self.has_url_map_entry_ != x.has_url_map_entry_: return 0
if self.has_url_map_entry_ and self.url_map_entry_ != x.url_map_entry_: return 0
if self.has_combined_ != x.has_combined_: return 0
if self.has_combined_ and self.combined_ != x.combined_: return 0
if self.has_api_mcycles_ != x.has_api_mcycles_: return 0
if self.has_api_mcycles_ and self.api_mcycles_ != x.api_mcycles_: return 0
if self.has_host_ != x.has_host_: return 0
if self.has_host_ and self.host_ != x.host_: return 0
if self.has_cost_ != x.has_cost_: return 0
if self.has_cost_ and self.cost_ != x.cost_: return 0
if self.has_task_queue_name_ != x.has_task_queue_name_: return 0
if self.has_task_queue_name_ and self.task_queue_name_ != x.task_queue_name_: return 0
if self.has_task_name_ != x.has_task_name_: return 0
if self.has_task_name_ and self.task_name_ != x.task_name_: return 0
if self.has_was_loading_request_ != x.has_was_loading_request_: return 0
if self.has_was_loading_request_ and self.was_loading_request_ != x.was_loading_request_: return 0
if self.has_pending_time_ != x.has_pending_time_: return 0
if self.has_pending_time_ and self.pending_time_ != x.pending_time_: return 0
if self.has_replica_index_ != x.has_replica_index_: return 0
if self.has_replica_index_ and self.replica_index_ != x.replica_index_: return 0
if self.has_finished_ != x.has_finished_: return 0
if self.has_finished_ and self.finished_ != x.finished_: return 0
if self.has_clone_key_ != x.has_clone_key_: return 0
if self.has_clone_key_ and self.clone_key_ != x.clone_key_: return 0
if len(self.line_) != len(x.line_): return 0
for e1, e2 in zip(self.line_, x.line_):
if e1 != e2: return 0
if self.has_lines_incomplete_ != x.has_lines_incomplete_: return 0
if self.has_lines_incomplete_ and self.lines_incomplete_ != x.lines_incomplete_: return 0
if self.has_app_engine_release_ != x.has_app_engine_release_: return 0
if self.has_app_engine_release_ and self.app_engine_release_ != x.app_engine_release_: return 0
if self.has_trace_id_ != x.has_trace_id_: return 0
if self.has_trace_id_ and self.trace_id_ != x.trace_id_: return 0
if self.has_exit_reason_ != x.has_exit_reason_: return 0
if self.has_exit_reason_ and self.exit_reason_ != x.exit_reason_: return 0
if self.has_was_throttled_for_time_ != x.has_was_throttled_for_time_: return 0
if self.has_was_throttled_for_time_ and self.was_throttled_for_time_ != x.was_throttled_for_time_: return 0
if self.has_was_throttled_for_requests_ != x.has_was_throttled_for_requests_: return 0
if self.has_was_throttled_for_requests_ and self.was_throttled_for_requests_ != x.was_throttled_for_requests_: return 0
if self.has_throttled_time_ != x.has_throttled_time_: return 0
if self.has_throttled_time_ and self.throttled_time_ != x.throttled_time_: return 0
if self.has_server_name_ != x.has_server_name_: return 0
if self.has_server_name_ and self.server_name_ != x.server_name_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_app_id_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: app_id not set.')
if (not self.has_version_id_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: version_id not set.')
if (not self.has_request_id_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: request_id not set.')
if (self.has_offset_ and not self.offset_.IsInitialized(debug_strs)): initialized = 0
if (not self.has_ip_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: ip not set.')
if (not self.has_start_time_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: start_time not set.')
if (not self.has_end_time_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: end_time not set.')
if (not self.has_latency_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: latency not set.')
if (not self.has_mcycles_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: mcycles not set.')
if (not self.has_method_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: method not set.')
if (not self.has_resource_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: resource not set.')
if (not self.has_http_version_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: http_version not set.')
if (not self.has_status_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: status not set.')
if (not self.has_response_size_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: response_size not set.')
if (not self.has_url_map_entry_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: url_map_entry not set.')
if (not self.has_combined_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: combined not set.')
for p in self.line_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.app_id_))
if (self.has_module_id_): n += 2 + self.lengthString(len(self.module_id_))
n += self.lengthString(len(self.version_id_))
n += self.lengthString(len(self.request_id_))
if (self.has_offset_): n += 2 + self.lengthString(self.offset_.ByteSize())
n += self.lengthString(len(self.ip_))
if (self.has_nickname_): n += 1 + self.lengthString(len(self.nickname_))
n += self.lengthVarInt64(self.start_time_)
n += self.lengthVarInt64(self.end_time_)
n += self.lengthVarInt64(self.latency_)
n += self.lengthVarInt64(self.mcycles_)
n += self.lengthString(len(self.method_))
n += self.lengthString(len(self.resource_))
n += self.lengthString(len(self.http_version_))
n += self.lengthVarInt64(self.status_)
n += self.lengthVarInt64(self.response_size_)
if (self.has_referrer_): n += 1 + self.lengthString(len(self.referrer_))
if (self.has_user_agent_): n += 2 + self.lengthString(len(self.user_agent_))
n += self.lengthString(len(self.url_map_entry_))
n += self.lengthString(len(self.combined_))
if (self.has_api_mcycles_): n += 2 + self.lengthVarInt64(self.api_mcycles_)
if (self.has_host_): n += 2 + self.lengthString(len(self.host_))
if (self.has_cost_): n += 10
if (self.has_task_queue_name_): n += 2 + self.lengthString(len(self.task_queue_name_))
if (self.has_task_name_): n += 2 + self.lengthString(len(self.task_name_))
if (self.has_was_loading_request_): n += 3
if (self.has_pending_time_): n += 2 + self.lengthVarInt64(self.pending_time_)
if (self.has_replica_index_): n += 2 + self.lengthVarInt64(self.replica_index_)
if (self.has_finished_): n += 3
if (self.has_clone_key_): n += 2 + self.lengthString(len(self.clone_key_))
n += 2 * len(self.line_)
for i in xrange(len(self.line_)): n += self.lengthString(self.line_[i].ByteSize())
if (self.has_lines_incomplete_): n += 3
if (self.has_app_engine_release_): n += 2 + self.lengthString(len(self.app_engine_release_))
if (self.has_trace_id_): n += 2 + self.lengthString(len(self.trace_id_))
if (self.has_exit_reason_): n += 2 + self.lengthVarInt64(self.exit_reason_)
if (self.has_was_throttled_for_time_): n += 3
if (self.has_was_throttled_for_requests_): n += 3
if (self.has_throttled_time_): n += 2 + self.lengthVarInt64(self.throttled_time_)
if (self.has_server_name_): n += 2 + self.lengthString(len(self.server_name_))
return n + 17
def ByteSizePartial(self):
n = 0
if (self.has_app_id_):
n += 1
n += self.lengthString(len(self.app_id_))
if (self.has_module_id_): n += 2 + self.lengthString(len(self.module_id_))
if (self.has_version_id_):
n += 1
n += self.lengthString(len(self.version_id_))
if (self.has_request_id_):
n += 1
n += self.lengthString(len(self.request_id_))
if (self.has_offset_): n += 2 + self.lengthString(self.offset_.ByteSizePartial())
if (self.has_ip_):
n += 1
n += self.lengthString(len(self.ip_))
if (self.has_nickname_): n += 1 + self.lengthString(len(self.nickname_))
if (self.has_start_time_):
n += 1
n += self.lengthVarInt64(self.start_time_)
if (self.has_end_time_):
n += 1
n += self.lengthVarInt64(self.end_time_)
if (self.has_latency_):
n += 1
n += self.lengthVarInt64(self.latency_)
if (self.has_mcycles_):
n += 1
n += self.lengthVarInt64(self.mcycles_)
if (self.has_method_):
n += 1
n += self.lengthString(len(self.method_))
if (self.has_resource_):
n += 1
n += self.lengthString(len(self.resource_))
if (self.has_http_version_):
n += 1
n += self.lengthString(len(self.http_version_))
if (self.has_status_):
n += 1
n += self.lengthVarInt64(self.status_)
if (self.has_response_size_):
n += 1
n += self.lengthVarInt64(self.response_size_)
if (self.has_referrer_): n += 1 + self.lengthString(len(self.referrer_))
if (self.has_user_agent_): n += 2 + self.lengthString(len(self.user_agent_))
if (self.has_url_map_entry_):
n += 2
n += self.lengthString(len(self.url_map_entry_))
if (self.has_combined_):
n += 2
n += self.lengthString(len(self.combined_))
if (self.has_api_mcycles_): n += 2 + self.lengthVarInt64(self.api_mcycles_)
if (self.has_host_): n += 2 + self.lengthString(len(self.host_))
if (self.has_cost_): n += 10
if (self.has_task_queue_name_): n += 2 + self.lengthString(len(self.task_queue_name_))
if (self.has_task_name_): n += 2 + self.lengthString(len(self.task_name_))
if (self.has_was_loading_request_): n += 3
if (self.has_pending_time_): n += 2 + self.lengthVarInt64(self.pending_time_)
if (self.has_replica_index_): n += 2 + self.lengthVarInt64(self.replica_index_)
if (self.has_finished_): n += 3
if (self.has_clone_key_): n += 2 + self.lengthString(len(self.clone_key_))
n += 2 * len(self.line_)
for i in xrange(len(self.line_)): n += self.lengthString(self.line_[i].ByteSizePartial())
if (self.has_lines_incomplete_): n += 3
if (self.has_app_engine_release_): n += 2 + self.lengthString(len(self.app_engine_release_))
if (self.has_trace_id_): n += 2 + self.lengthString(len(self.trace_id_))
if (self.has_exit_reason_): n += 2 + self.lengthVarInt64(self.exit_reason_)
if (self.has_was_throttled_for_time_): n += 3
if (self.has_was_throttled_for_requests_): n += 3
if (self.has_throttled_time_): n += 2 + self.lengthVarInt64(self.throttled_time_)
if (self.has_server_name_): n += 2 + self.lengthString(len(self.server_name_))
return n
def Clear(self):
self.clear_app_id()
self.clear_module_id()
self.clear_version_id()
self.clear_request_id()
self.clear_offset()
self.clear_ip()
self.clear_nickname()
self.clear_start_time()
self.clear_end_time()
self.clear_latency()
self.clear_mcycles()
self.clear_method()
self.clear_resource()
self.clear_http_version()
self.clear_status()
self.clear_response_size()
self.clear_referrer()
self.clear_user_agent()
self.clear_url_map_entry()
self.clear_combined()
self.clear_api_mcycles()
self.clear_host()
self.clear_cost()
self.clear_task_queue_name()
self.clear_task_name()
self.clear_was_loading_request()
self.clear_pending_time()
self.clear_replica_index()
self.clear_finished()
self.clear_clone_key()
self.clear_line()
self.clear_lines_incomplete()
self.clear_app_engine_release()
self.clear_trace_id()
self.clear_exit_reason()
self.clear_was_throttled_for_time()
self.clear_was_throttled_for_requests()
self.clear_throttled_time()
self.clear_server_name()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.app_id_)
out.putVarInt32(18)
out.putPrefixedString(self.version_id_)
out.putVarInt32(26)
out.putPrefixedString(self.request_id_)
out.putVarInt32(34)
out.putPrefixedString(self.ip_)
if (self.has_nickname_):
out.putVarInt32(42)
out.putPrefixedString(self.nickname_)
out.putVarInt32(48)
out.putVarInt64(self.start_time_)
out.putVarInt32(56)
out.putVarInt64(self.end_time_)
out.putVarInt32(64)
out.putVarInt64(self.latency_)
out.putVarInt32(72)
out.putVarInt64(self.mcycles_)
out.putVarInt32(82)
out.putPrefixedString(self.method_)
out.putVarInt32(90)
out.putPrefixedString(self.resource_)
out.putVarInt32(98)
out.putPrefixedString(self.http_version_)
out.putVarInt32(104)
out.putVarInt32(self.status_)
out.putVarInt32(112)
out.putVarInt64(self.response_size_)
if (self.has_referrer_):
out.putVarInt32(122)
out.putPrefixedString(self.referrer_)
if (self.has_user_agent_):
out.putVarInt32(130)
out.putPrefixedString(self.user_agent_)
out.putVarInt32(138)
out.putPrefixedString(self.url_map_entry_)
out.putVarInt32(146)
out.putPrefixedString(self.combined_)
if (self.has_api_mcycles_):
out.putVarInt32(152)
out.putVarInt64(self.api_mcycles_)
if (self.has_host_):
out.putVarInt32(162)
out.putPrefixedString(self.host_)
if (self.has_cost_):
out.putVarInt32(169)
out.putDouble(self.cost_)
if (self.has_task_queue_name_):
out.putVarInt32(178)
out.putPrefixedString(self.task_queue_name_)
if (self.has_task_name_):
out.putVarInt32(186)
out.putPrefixedString(self.task_name_)
if (self.has_was_loading_request_):
out.putVarInt32(192)
out.putBoolean(self.was_loading_request_)
if (self.has_pending_time_):
out.putVarInt32(200)
out.putVarInt64(self.pending_time_)
if (self.has_replica_index_):
out.putVarInt32(208)
out.putVarInt32(self.replica_index_)
if (self.has_finished_):
out.putVarInt32(216)
out.putBoolean(self.finished_)
if (self.has_clone_key_):
out.putVarInt32(226)
out.putPrefixedString(self.clone_key_)
for i in xrange(len(self.line_)):
out.putVarInt32(234)
out.putVarInt32(self.line_[i].ByteSize())
self.line_[i].OutputUnchecked(out)
if (self.has_exit_reason_):
out.putVarInt32(240)
out.putVarInt32(self.exit_reason_)
if (self.has_was_throttled_for_time_):
out.putVarInt32(248)
out.putBoolean(self.was_throttled_for_time_)
if (self.has_was_throttled_for_requests_):
out.putVarInt32(256)
out.putBoolean(self.was_throttled_for_requests_)
if (self.has_throttled_time_):
out.putVarInt32(264)
out.putVarInt64(self.throttled_time_)
if (self.has_server_name_):
out.putVarInt32(274)
out.putPrefixedString(self.server_name_)
if (self.has_offset_):
out.putVarInt32(282)
out.putVarInt32(self.offset_.ByteSize())
self.offset_.OutputUnchecked(out)
if (self.has_lines_incomplete_):
out.putVarInt32(288)
out.putBoolean(self.lines_incomplete_)
if (self.has_module_id_):
out.putVarInt32(298)
out.putPrefixedString(self.module_id_)
if (self.has_app_engine_release_):
out.putVarInt32(306)
out.putPrefixedString(self.app_engine_release_)
if (self.has_trace_id_):
out.putVarInt32(314)
out.putPrefixedString(self.trace_id_)
def OutputPartial(self, out):
if (self.has_app_id_):
out.putVarInt32(10)
out.putPrefixedString(self.app_id_)
if (self.has_version_id_):
out.putVarInt32(18)
out.putPrefixedString(self.version_id_)
if (self.has_request_id_):
out.putVarInt32(26)
out.putPrefixedString(self.request_id_)
if (self.has_ip_):
out.putVarInt32(34)
out.putPrefixedString(self.ip_)
if (self.has_nickname_):
out.putVarInt32(42)
out.putPrefixedString(self.nickname_)
if (self.has_start_time_):
out.putVarInt32(48)
out.putVarInt64(self.start_time_)
if (self.has_end_time_):
out.putVarInt32(56)
out.putVarInt64(self.end_time_)
if (self.has_latency_):
out.putVarInt32(64)
out.putVarInt64(self.latency_)
if (self.has_mcycles_):
out.putVarInt32(72)
out.putVarInt64(self.mcycles_)
if (self.has_method_):
out.putVarInt32(82)
out.putPrefixedString(self.method_)
if (self.has_resource_):
out.putVarInt32(90)
out.putPrefixedString(self.resource_)
if (self.has_http_version_):
out.putVarInt32(98)
out.putPrefixedString(self.http_version_)
if (self.has_status_):
out.putVarInt32(104)
out.putVarInt32(self.status_)
if (self.has_response_size_):
out.putVarInt32(112)
out.putVarInt64(self.response_size_)
if (self.has_referrer_):
out.putVarInt32(122)
out.putPrefixedString(self.referrer_)
if (self.has_user_agent_):
out.putVarInt32(130)
out.putPrefixedString(self.user_agent_)
if (self.has_url_map_entry_):
out.putVarInt32(138)
out.putPrefixedString(self.url_map_entry_)
if (self.has_combined_):
out.putVarInt32(146)
out.putPrefixedString(self.combined_)
if (self.has_api_mcycles_):
out.putVarInt32(152)
out.putVarInt64(self.api_mcycles_)
if (self.has_host_):
out.putVarInt32(162)
out.putPrefixedString(self.host_)
if (self.has_cost_):
out.putVarInt32(169)
out.putDouble(self.cost_)
if (self.has_task_queue_name_):
out.putVarInt32(178)
out.putPrefixedString(self.task_queue_name_)
if (self.has_task_name_):
out.putVarInt32(186)
out.putPrefixedString(self.task_name_)
if (self.has_was_loading_request_):
out.putVarInt32(192)
out.putBoolean(self.was_loading_request_)
if (self.has_pending_time_):
out.putVarInt32(200)
out.putVarInt64(self.pending_time_)
if (self.has_replica_index_):
out.putVarInt32(208)
out.putVarInt32(self.replica_index_)
if (self.has_finished_):
out.putVarInt32(216)
out.putBoolean(self.finished_)
if (self.has_clone_key_):
out.putVarInt32(226)
out.putPrefixedString(self.clone_key_)
for i in xrange(len(self.line_)):
out.putVarInt32(234)
out.putVarInt32(self.line_[i].ByteSizePartial())
self.line_[i].OutputPartial(out)
if (self.has_exit_reason_):
out.putVarInt32(240)
out.putVarInt32(self.exit_reason_)
if (self.has_was_throttled_for_time_):
out.putVarInt32(248)
out.putBoolean(self.was_throttled_for_time_)
if (self.has_was_throttled_for_requests_):
out.putVarInt32(256)
out.putBoolean(self.was_throttled_for_requests_)
if (self.has_throttled_time_):
out.putVarInt32(264)
out.putVarInt64(self.throttled_time_)
if (self.has_server_name_):
out.putVarInt32(274)
out.putPrefixedString(self.server_name_)
if (self.has_offset_):
out.putVarInt32(282)
out.putVarInt32(self.offset_.ByteSizePartial())
self.offset_.OutputPartial(out)
if (self.has_lines_incomplete_):
out.putVarInt32(288)
out.putBoolean(self.lines_incomplete_)
if (self.has_module_id_):
out.putVarInt32(298)
out.putPrefixedString(self.module_id_)
if (self.has_app_engine_release_):
out.putVarInt32(306)
out.putPrefixedString(self.app_engine_release_)
if (self.has_trace_id_):
out.putVarInt32(314)
out.putPrefixedString(self.trace_id_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_app_id(d.getPrefixedString())
continue
if tt == 18:
self.set_version_id(d.getPrefixedString())
continue
if tt == 26:
self.set_request_id(d.getPrefixedString())
continue
if tt == 34:
self.set_ip(d.getPrefixedString())
continue
if tt == 42:
self.set_nickname(d.getPrefixedString())
continue
if tt == 48:
self.set_start_time(d.getVarInt64())
continue
if tt == 56:
self.set_end_time(d.getVarInt64())
continue
if tt == 64:
self.set_latency(d.getVarInt64())
continue
if tt == 72:
self.set_mcycles(d.getVarInt64())
continue
if tt == 82:
self.set_method(d.getPrefixedString())
continue
if tt == 90:
self.set_resource(d.getPrefixedString())
continue
if tt == 98:
self.set_http_version(d.getPrefixedString())
continue
if tt == 104:
self.set_status(d.getVarInt32())
continue
if tt == 112:
self.set_response_size(d.getVarInt64())
continue
if tt == 122:
self.set_referrer(d.getPrefixedString())
continue
if tt == 130:
self.set_user_agent(d.getPrefixedString())
continue
if tt == 138:
self.set_url_map_entry(d.getPrefixedString())
continue
if tt == 146:
self.set_combined(d.getPrefixedString())
continue
if tt == 152:
self.set_api_mcycles(d.getVarInt64())
continue
if tt == 162:
self.set_host(d.getPrefixedString())
continue
if tt == 169:
self.set_cost(d.getDouble())
continue
if tt == 178:
self.set_task_queue_name(d.getPrefixedString())
continue
if tt == 186:
self.set_task_name(d.getPrefixedString())
continue
if tt == 192:
self.set_was_loading_request(d.getBoolean())
continue
if tt == 200:
self.set_pending_time(d.getVarInt64())
continue
if tt == 208:
self.set_replica_index(d.getVarInt32())
continue
if tt == 216:
self.set_finished(d.getBoolean())
continue
if tt == 226:
self.set_clone_key(d.getPrefixedString())
continue
if tt == 234:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_line().TryMerge(tmp)
continue
if tt == 240:
self.set_exit_reason(d.getVarInt32())
continue
if tt == 248:
self.set_was_throttled_for_time(d.getBoolean())
continue
if tt == 256:
self.set_was_throttled_for_requests(d.getBoolean())
continue
if tt == 264:
self.set_throttled_time(d.getVarInt64())
continue
if tt == 274:
self.set_server_name(d.getPrefixedString())
continue
if tt == 282:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_offset().TryMerge(tmp)
continue
if tt == 288:
self.set_lines_incomplete(d.getBoolean())
continue
if tt == 298:
self.set_module_id(d.getPrefixedString())
continue
if tt == 306:
self.set_app_engine_release(d.getPrefixedString())
continue
if tt == 314:
self.set_trace_id(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_app_id_: res+=prefix+("app_id: %s\n" % self.DebugFormatString(self.app_id_))
if self.has_module_id_: res+=prefix+("module_id: %s\n" % self.DebugFormatString(self.module_id_))
if self.has_version_id_: res+=prefix+("version_id: %s\n" % self.DebugFormatString(self.version_id_))
if self.has_request_id_: res+=prefix+("request_id: %s\n" % self.DebugFormatString(self.request_id_))
if self.has_offset_:
res+=prefix+"offset <\n"
res+=self.offset_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_ip_: res+=prefix+("ip: %s\n" % self.DebugFormatString(self.ip_))
if self.has_nickname_: res+=prefix+("nickname: %s\n" % self.DebugFormatString(self.nickname_))
if self.has_start_time_: res+=prefix+("start_time: %s\n" % self.DebugFormatInt64(self.start_time_))
if self.has_end_time_: res+=prefix+("end_time: %s\n" % self.DebugFormatInt64(self.end_time_))
if self.has_latency_: res+=prefix+("latency: %s\n" % self.DebugFormatInt64(self.latency_))
if self.has_mcycles_: res+=prefix+("mcycles: %s\n" % self.DebugFormatInt64(self.mcycles_))
if self.has_method_: res+=prefix+("method: %s\n" % self.DebugFormatString(self.method_))
if self.has_resource_: res+=prefix+("resource: %s\n" % self.DebugFormatString(self.resource_))
if self.has_http_version_: res+=prefix+("http_version: %s\n" % self.DebugFormatString(self.http_version_))
if self.has_status_: res+=prefix+("status: %s\n" % self.DebugFormatInt32(self.status_))
if self.has_response_size_: res+=prefix+("response_size: %s\n" % self.DebugFormatInt64(self.response_size_))
if self.has_referrer_: res+=prefix+("referrer: %s\n" % self.DebugFormatString(self.referrer_))
if self.has_user_agent_: res+=prefix+("user_agent: %s\n" % self.DebugFormatString(self.user_agent_))
if self.has_url_map_entry_: res+=prefix+("url_map_entry: %s\n" % self.DebugFormatString(self.url_map_entry_))
if self.has_combined_: res+=prefix+("combined: %s\n" % self.DebugFormatString(self.combined_))
if self.has_api_mcycles_: res+=prefix+("api_mcycles: %s\n" % self.DebugFormatInt64(self.api_mcycles_))
if self.has_host_: res+=prefix+("host: %s\n" % self.DebugFormatString(self.host_))
if self.has_cost_: res+=prefix+("cost: %s\n" % self.DebugFormat(self.cost_))
if self.has_task_queue_name_: res+=prefix+("task_queue_name: %s\n" % self.DebugFormatString(self.task_queue_name_))
if self.has_task_name_: res+=prefix+("task_name: %s\n" % self.DebugFormatString(self.task_name_))
if self.has_was_loading_request_: res+=prefix+("was_loading_request: %s\n" % self.DebugFormatBool(self.was_loading_request_))
if self.has_pending_time_: res+=prefix+("pending_time: %s\n" % self.DebugFormatInt64(self.pending_time_))
if self.has_replica_index_: res+=prefix+("replica_index: %s\n" % self.DebugFormatInt32(self.replica_index_))
if self.has_finished_: res+=prefix+("finished: %s\n" % self.DebugFormatBool(self.finished_))
if self.has_clone_key_: res+=prefix+("clone_key: %s\n" % self.DebugFormatString(self.clone_key_))
cnt=0
for e in self.line_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("line%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
if self.has_lines_incomplete_: res+=prefix+("lines_incomplete: %s\n" % self.DebugFormatBool(self.lines_incomplete_))
if self.has_app_engine_release_: res+=prefix+("app_engine_release: %s\n" % self.DebugFormatString(self.app_engine_release_))
if self.has_trace_id_: res+=prefix+("trace_id: %s\n" % self.DebugFormatString(self.trace_id_))
if self.has_exit_reason_: res+=prefix+("exit_reason: %s\n" % self.DebugFormatInt32(self.exit_reason_))
if self.has_was_throttled_for_time_: res+=prefix+("was_throttled_for_time: %s\n" % self.DebugFormatBool(self.was_throttled_for_time_))
if self.has_was_throttled_for_requests_: res+=prefix+("was_throttled_for_requests: %s\n" % self.DebugFormatBool(self.was_throttled_for_requests_))
if self.has_throttled_time_: res+=prefix+("throttled_time: %s\n" % self.DebugFormatInt64(self.throttled_time_))
if self.has_server_name_: res+=prefix+("server_name: %s\n" % self.DebugFormatString(self.server_name_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kapp_id = 1
kmodule_id = 37
kversion_id = 2
krequest_id = 3
koffset = 35
kip = 4
knickname = 5
kstart_time = 6
kend_time = 7
klatency = 8
kmcycles = 9
kmethod = 10
kresource = 11
khttp_version = 12
kstatus = 13
kresponse_size = 14
kreferrer = 15
kuser_agent = 16
kurl_map_entry = 17
kcombined = 18
kapi_mcycles = 19
khost = 20
kcost = 21
ktask_queue_name = 22
ktask_name = 23
kwas_loading_request = 24
kpending_time = 25
kreplica_index = 26
kfinished = 27
kclone_key = 28
kline = 29
klines_incomplete = 36
kapp_engine_release = 38
ktrace_id = 39
kexit_reason = 30
kwas_throttled_for_time = 31
kwas_throttled_for_requests = 32
kthrottled_time = 33
kserver_name = 34
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "app_id",
2: "version_id",
3: "request_id",
4: "ip",
5: "nickname",
6: "start_time",
7: "end_time",
8: "latency",
9: "mcycles",
10: "method",
11: "resource",
12: "http_version",
13: "status",
14: "response_size",
15: "referrer",
16: "user_agent",
17: "url_map_entry",
18: "combined",
19: "api_mcycles",
20: "host",
21: "cost",
22: "task_queue_name",
23: "task_name",
24: "was_loading_request",
25: "pending_time",
26: "replica_index",
27: "finished",
28: "clone_key",
29: "line",
30: "exit_reason",
31: "was_throttled_for_time",
32: "was_throttled_for_requests",
33: "throttled_time",
34: "server_name",
35: "offset",
36: "lines_incomplete",
37: "module_id",
38: "app_engine_release",
39: "trace_id",
}, 39)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.STRING,
5: ProtocolBuffer.Encoder.STRING,
6: ProtocolBuffer.Encoder.NUMERIC,
7: ProtocolBuffer.Encoder.NUMERIC,
8: ProtocolBuffer.Encoder.NUMERIC,
9: ProtocolBuffer.Encoder.NUMERIC,
10: ProtocolBuffer.Encoder.STRING,
11: ProtocolBuffer.Encoder.STRING,
12: ProtocolBuffer.Encoder.STRING,
13: ProtocolBuffer.Encoder.NUMERIC,
14: ProtocolBuffer.Encoder.NUMERIC,
15: ProtocolBuffer.Encoder.STRING,
16: ProtocolBuffer.Encoder.STRING,
17: ProtocolBuffer.Encoder.STRING,
18: ProtocolBuffer.Encoder.STRING,
19: ProtocolBuffer.Encoder.NUMERIC,
20: ProtocolBuffer.Encoder.STRING,
21: ProtocolBuffer.Encoder.DOUBLE,
22: ProtocolBuffer.Encoder.STRING,
23: ProtocolBuffer.Encoder.STRING,
24: ProtocolBuffer.Encoder.NUMERIC,
25: ProtocolBuffer.Encoder.NUMERIC,
26: ProtocolBuffer.Encoder.NUMERIC,
27: ProtocolBuffer.Encoder.NUMERIC,
28: ProtocolBuffer.Encoder.STRING,
29: ProtocolBuffer.Encoder.STRING,
30: ProtocolBuffer.Encoder.NUMERIC,
31: ProtocolBuffer.Encoder.NUMERIC,
32: ProtocolBuffer.Encoder.NUMERIC,
33: ProtocolBuffer.Encoder.NUMERIC,
34: ProtocolBuffer.Encoder.STRING,
35: ProtocolBuffer.Encoder.STRING,
36: ProtocolBuffer.Encoder.NUMERIC,
37: ProtocolBuffer.Encoder.STRING,
38: ProtocolBuffer.Encoder.STRING,
39: ProtocolBuffer.Encoder.STRING,
}, 39, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.RequestLog'
class LogModuleVersion(ProtocolBuffer.ProtocolMessage):
has_module_id_ = 0
module_id_ = "default"
has_version_id_ = 0
version_id_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def module_id(self): return self.module_id_
def set_module_id(self, x):
self.has_module_id_ = 1
self.module_id_ = x
def clear_module_id(self):
if self.has_module_id_:
self.has_module_id_ = 0
self.module_id_ = "default"
def has_module_id(self): return self.has_module_id_
def version_id(self): return self.version_id_
def set_version_id(self, x):
self.has_version_id_ = 1
self.version_id_ = x
def clear_version_id(self):
if self.has_version_id_:
self.has_version_id_ = 0
self.version_id_ = ""
def has_version_id(self): return self.has_version_id_
def MergeFrom(self, x):
assert x is not self
if (x.has_module_id()): self.set_module_id(x.module_id())
if (x.has_version_id()): self.set_version_id(x.version_id())
def Equals(self, x):
if x is self: return 1
if self.has_module_id_ != x.has_module_id_: return 0
if self.has_module_id_ and self.module_id_ != x.module_id_: return 0
if self.has_version_id_ != x.has_version_id_: return 0
if self.has_version_id_ and self.version_id_ != x.version_id_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
if (self.has_module_id_): n += 1 + self.lengthString(len(self.module_id_))
if (self.has_version_id_): n += 1 + self.lengthString(len(self.version_id_))
return n
def ByteSizePartial(self):
n = 0
if (self.has_module_id_): n += 1 + self.lengthString(len(self.module_id_))
if (self.has_version_id_): n += 1 + self.lengthString(len(self.version_id_))
return n
def Clear(self):
self.clear_module_id()
self.clear_version_id()
def OutputUnchecked(self, out):
if (self.has_module_id_):
out.putVarInt32(10)
out.putPrefixedString(self.module_id_)
if (self.has_version_id_):
out.putVarInt32(18)
out.putPrefixedString(self.version_id_)
def OutputPartial(self, out):
if (self.has_module_id_):
out.putVarInt32(10)
out.putPrefixedString(self.module_id_)
if (self.has_version_id_):
out.putVarInt32(18)
out.putPrefixedString(self.version_id_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_module_id(d.getPrefixedString())
continue
if tt == 18:
self.set_version_id(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_module_id_: res+=prefix+("module_id: %s\n" % self.DebugFormatString(self.module_id_))
if self.has_version_id_: res+=prefix+("version_id: %s\n" % self.DebugFormatString(self.version_id_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kmodule_id = 1
kversion_id = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "module_id",
2: "version_id",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.LogModuleVersion'
class LogReadRequest(ProtocolBuffer.ProtocolMessage):
has_app_id_ = 0
app_id_ = ""
has_start_time_ = 0
start_time_ = 0
has_end_time_ = 0
end_time_ = 0
has_offset_ = 0
offset_ = None
has_minimum_log_level_ = 0
minimum_log_level_ = 0
has_include_incomplete_ = 0
include_incomplete_ = 0
has_count_ = 0
count_ = 0
has_combined_log_regex_ = 0
combined_log_regex_ = ""
has_host_regex_ = 0
host_regex_ = ""
has_replica_index_ = 0
replica_index_ = 0
has_include_app_logs_ = 0
include_app_logs_ = 0
has_app_logs_per_request_ = 0
app_logs_per_request_ = 0
has_include_host_ = 0
include_host_ = 0
has_include_all_ = 0
include_all_ = 0
has_cache_iterator_ = 0
cache_iterator_ = 0
has_num_shards_ = 0
num_shards_ = 0
def __init__(self, contents=None):
self.version_id_ = []
self.module_version_ = []
self.request_id_ = []
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def app_id(self): return self.app_id_
def set_app_id(self, x):
self.has_app_id_ = 1
self.app_id_ = x
def clear_app_id(self):
if self.has_app_id_:
self.has_app_id_ = 0
self.app_id_ = ""
def has_app_id(self): return self.has_app_id_
def version_id_size(self): return len(self.version_id_)
def version_id_list(self): return self.version_id_
def version_id(self, i):
return self.version_id_[i]
def set_version_id(self, i, x):
self.version_id_[i] = x
def add_version_id(self, x):
self.version_id_.append(x)
def clear_version_id(self):
self.version_id_ = []
def module_version_size(self): return len(self.module_version_)
def module_version_list(self): return self.module_version_
def module_version(self, i):
return self.module_version_[i]
def mutable_module_version(self, i):
return self.module_version_[i]
def add_module_version(self):
x = LogModuleVersion()
self.module_version_.append(x)
return x
def clear_module_version(self):
self.module_version_ = []
def start_time(self): return self.start_time_
def set_start_time(self, x):
self.has_start_time_ = 1
self.start_time_ = x
def clear_start_time(self):
if self.has_start_time_:
self.has_start_time_ = 0
self.start_time_ = 0
def has_start_time(self): return self.has_start_time_
def end_time(self): return self.end_time_
def set_end_time(self, x):
self.has_end_time_ = 1
self.end_time_ = x
def clear_end_time(self):
if self.has_end_time_:
self.has_end_time_ = 0
self.end_time_ = 0
def has_end_time(self): return self.has_end_time_
def offset(self):
if self.offset_ is None:
self.lazy_init_lock_.acquire()
try:
if self.offset_ is None: self.offset_ = LogOffset()
finally:
self.lazy_init_lock_.release()
return self.offset_
def mutable_offset(self): self.has_offset_ = 1; return self.offset()
def clear_offset(self):
if self.has_offset_:
self.has_offset_ = 0;
if self.offset_ is not None: self.offset_.Clear()
def has_offset(self): return self.has_offset_
def request_id_size(self): return len(self.request_id_)
def request_id_list(self): return self.request_id_
def request_id(self, i):
return self.request_id_[i]
def set_request_id(self, i, x):
self.request_id_[i] = x
def add_request_id(self, x):
self.request_id_.append(x)
def clear_request_id(self):
self.request_id_ = []
def minimum_log_level(self): return self.minimum_log_level_
def set_minimum_log_level(self, x):
self.has_minimum_log_level_ = 1
self.minimum_log_level_ = x
def clear_minimum_log_level(self):
if self.has_minimum_log_level_:
self.has_minimum_log_level_ = 0
self.minimum_log_level_ = 0
def has_minimum_log_level(self): return self.has_minimum_log_level_
def include_incomplete(self): return self.include_incomplete_
def set_include_incomplete(self, x):
self.has_include_incomplete_ = 1
self.include_incomplete_ = x
def clear_include_incomplete(self):
if self.has_include_incomplete_:
self.has_include_incomplete_ = 0
self.include_incomplete_ = 0
def has_include_incomplete(self): return self.has_include_incomplete_
def count(self): return self.count_
def set_count(self, x):
self.has_count_ = 1
self.count_ = x
def clear_count(self):
if self.has_count_:
self.has_count_ = 0
self.count_ = 0
def has_count(self): return self.has_count_
def combined_log_regex(self): return self.combined_log_regex_
def set_combined_log_regex(self, x):
self.has_combined_log_regex_ = 1
self.combined_log_regex_ = x
def clear_combined_log_regex(self):
if self.has_combined_log_regex_:
self.has_combined_log_regex_ = 0
self.combined_log_regex_ = ""
def has_combined_log_regex(self): return self.has_combined_log_regex_
def host_regex(self): return self.host_regex_
def set_host_regex(self, x):
self.has_host_regex_ = 1
self.host_regex_ = x
def clear_host_regex(self):
if self.has_host_regex_:
self.has_host_regex_ = 0
self.host_regex_ = ""
def has_host_regex(self): return self.has_host_regex_
def replica_index(self): return self.replica_index_
def set_replica_index(self, x):
self.has_replica_index_ = 1
self.replica_index_ = x
def clear_replica_index(self):
if self.has_replica_index_:
self.has_replica_index_ = 0
self.replica_index_ = 0
def has_replica_index(self): return self.has_replica_index_
def include_app_logs(self): return self.include_app_logs_
def set_include_app_logs(self, x):
self.has_include_app_logs_ = 1
self.include_app_logs_ = x
def clear_include_app_logs(self):
if self.has_include_app_logs_:
self.has_include_app_logs_ = 0
self.include_app_logs_ = 0
def has_include_app_logs(self): return self.has_include_app_logs_
def app_logs_per_request(self): return self.app_logs_per_request_
def set_app_logs_per_request(self, x):
self.has_app_logs_per_request_ = 1
self.app_logs_per_request_ = x
def clear_app_logs_per_request(self):
if self.has_app_logs_per_request_:
self.has_app_logs_per_request_ = 0
self.app_logs_per_request_ = 0
def has_app_logs_per_request(self): return self.has_app_logs_per_request_
def include_host(self): return self.include_host_
def set_include_host(self, x):
self.has_include_host_ = 1
self.include_host_ = x
def clear_include_host(self):
if self.has_include_host_:
self.has_include_host_ = 0
self.include_host_ = 0
def has_include_host(self): return self.has_include_host_
def include_all(self): return self.include_all_
def set_include_all(self, x):
self.has_include_all_ = 1
self.include_all_ = x
def clear_include_all(self):
if self.has_include_all_:
self.has_include_all_ = 0
self.include_all_ = 0
def has_include_all(self): return self.has_include_all_
def cache_iterator(self): return self.cache_iterator_
def set_cache_iterator(self, x):
self.has_cache_iterator_ = 1
self.cache_iterator_ = x
def clear_cache_iterator(self):
if self.has_cache_iterator_:
self.has_cache_iterator_ = 0
self.cache_iterator_ = 0
def has_cache_iterator(self): return self.has_cache_iterator_
def num_shards(self): return self.num_shards_
def set_num_shards(self, x):
self.has_num_shards_ = 1
self.num_shards_ = x
def clear_num_shards(self):
if self.has_num_shards_:
self.has_num_shards_ = 0
self.num_shards_ = 0
def has_num_shards(self): return self.has_num_shards_
def MergeFrom(self, x):
assert x is not self
if (x.has_app_id()): self.set_app_id(x.app_id())
for i in xrange(x.version_id_size()): self.add_version_id(x.version_id(i))
for i in xrange(x.module_version_size()): self.add_module_version().CopyFrom(x.module_version(i))
if (x.has_start_time()): self.set_start_time(x.start_time())
if (x.has_end_time()): self.set_end_time(x.end_time())
if (x.has_offset()): self.mutable_offset().MergeFrom(x.offset())
for i in xrange(x.request_id_size()): self.add_request_id(x.request_id(i))
if (x.has_minimum_log_level()): self.set_minimum_log_level(x.minimum_log_level())
if (x.has_include_incomplete()): self.set_include_incomplete(x.include_incomplete())
if (x.has_count()): self.set_count(x.count())
if (x.has_combined_log_regex()): self.set_combined_log_regex(x.combined_log_regex())
if (x.has_host_regex()): self.set_host_regex(x.host_regex())
if (x.has_replica_index()): self.set_replica_index(x.replica_index())
if (x.has_include_app_logs()): self.set_include_app_logs(x.include_app_logs())
if (x.has_app_logs_per_request()): self.set_app_logs_per_request(x.app_logs_per_request())
if (x.has_include_host()): self.set_include_host(x.include_host())
if (x.has_include_all()): self.set_include_all(x.include_all())
if (x.has_cache_iterator()): self.set_cache_iterator(x.cache_iterator())
if (x.has_num_shards()): self.set_num_shards(x.num_shards())
def Equals(self, x):
if x is self: return 1
if self.has_app_id_ != x.has_app_id_: return 0
if self.has_app_id_ and self.app_id_ != x.app_id_: return 0
if len(self.version_id_) != len(x.version_id_): return 0
for e1, e2 in zip(self.version_id_, x.version_id_):
if e1 != e2: return 0
if len(self.module_version_) != len(x.module_version_): return 0
for e1, e2 in zip(self.module_version_, x.module_version_):
if e1 != e2: return 0
if self.has_start_time_ != x.has_start_time_: return 0
if self.has_start_time_ and self.start_time_ != x.start_time_: return 0
if self.has_end_time_ != x.has_end_time_: return 0
if self.has_end_time_ and self.end_time_ != x.end_time_: return 0
if self.has_offset_ != x.has_offset_: return 0
if self.has_offset_ and self.offset_ != x.offset_: return 0
if len(self.request_id_) != len(x.request_id_): return 0
for e1, e2 in zip(self.request_id_, x.request_id_):
if e1 != e2: return 0
if self.has_minimum_log_level_ != x.has_minimum_log_level_: return 0
if self.has_minimum_log_level_ and self.minimum_log_level_ != x.minimum_log_level_: return 0
if self.has_include_incomplete_ != x.has_include_incomplete_: return 0
if self.has_include_incomplete_ and self.include_incomplete_ != x.include_incomplete_: return 0
if self.has_count_ != x.has_count_: return 0
if self.has_count_ and self.count_ != x.count_: return 0
if self.has_combined_log_regex_ != x.has_combined_log_regex_: return 0
if self.has_combined_log_regex_ and self.combined_log_regex_ != x.combined_log_regex_: return 0
if self.has_host_regex_ != x.has_host_regex_: return 0
if self.has_host_regex_ and self.host_regex_ != x.host_regex_: return 0
if self.has_replica_index_ != x.has_replica_index_: return 0
if self.has_replica_index_ and self.replica_index_ != x.replica_index_: return 0
if self.has_include_app_logs_ != x.has_include_app_logs_: return 0
if self.has_include_app_logs_ and self.include_app_logs_ != x.include_app_logs_: return 0
if self.has_app_logs_per_request_ != x.has_app_logs_per_request_: return 0
if self.has_app_logs_per_request_ and self.app_logs_per_request_ != x.app_logs_per_request_: return 0
if self.has_include_host_ != x.has_include_host_: return 0
if self.has_include_host_ and self.include_host_ != x.include_host_: return 0
if self.has_include_all_ != x.has_include_all_: return 0
if self.has_include_all_ and self.include_all_ != x.include_all_: return 0
if self.has_cache_iterator_ != x.has_cache_iterator_: return 0
if self.has_cache_iterator_ and self.cache_iterator_ != x.cache_iterator_: return 0
if self.has_num_shards_ != x.has_num_shards_: return 0
if self.has_num_shards_ and self.num_shards_ != x.num_shards_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_app_id_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: app_id not set.')
for p in self.module_version_:
if not p.IsInitialized(debug_strs): initialized=0
if (self.has_offset_ and not self.offset_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.app_id_))
n += 1 * len(self.version_id_)
for i in xrange(len(self.version_id_)): n += self.lengthString(len(self.version_id_[i]))
n += 2 * len(self.module_version_)
for i in xrange(len(self.module_version_)): n += self.lengthString(self.module_version_[i].ByteSize())
if (self.has_start_time_): n += 1 + self.lengthVarInt64(self.start_time_)
if (self.has_end_time_): n += 1 + self.lengthVarInt64(self.end_time_)
if (self.has_offset_): n += 1 + self.lengthString(self.offset_.ByteSize())
n += 1 * len(self.request_id_)
for i in xrange(len(self.request_id_)): n += self.lengthString(len(self.request_id_[i]))
if (self.has_minimum_log_level_): n += 1 + self.lengthVarInt64(self.minimum_log_level_)
if (self.has_include_incomplete_): n += 2
if (self.has_count_): n += 1 + self.lengthVarInt64(self.count_)
if (self.has_combined_log_regex_): n += 1 + self.lengthString(len(self.combined_log_regex_))
if (self.has_host_regex_): n += 1 + self.lengthString(len(self.host_regex_))
if (self.has_replica_index_): n += 2 + self.lengthVarInt64(self.replica_index_)
if (self.has_include_app_logs_): n += 2
if (self.has_app_logs_per_request_): n += 2 + self.lengthVarInt64(self.app_logs_per_request_)
if (self.has_include_host_): n += 2
if (self.has_include_all_): n += 2
if (self.has_cache_iterator_): n += 2
if (self.has_num_shards_): n += 2 + self.lengthVarInt64(self.num_shards_)
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_app_id_):
n += 1
n += self.lengthString(len(self.app_id_))
n += 1 * len(self.version_id_)
for i in xrange(len(self.version_id_)): n += self.lengthString(len(self.version_id_[i]))
n += 2 * len(self.module_version_)
for i in xrange(len(self.module_version_)): n += self.lengthString(self.module_version_[i].ByteSizePartial())
if (self.has_start_time_): n += 1 + self.lengthVarInt64(self.start_time_)
if (self.has_end_time_): n += 1 + self.lengthVarInt64(self.end_time_)
if (self.has_offset_): n += 1 + self.lengthString(self.offset_.ByteSizePartial())
n += 1 * len(self.request_id_)
for i in xrange(len(self.request_id_)): n += self.lengthString(len(self.request_id_[i]))
if (self.has_minimum_log_level_): n += 1 + self.lengthVarInt64(self.minimum_log_level_)
if (self.has_include_incomplete_): n += 2
if (self.has_count_): n += 1 + self.lengthVarInt64(self.count_)
if (self.has_combined_log_regex_): n += 1 + self.lengthString(len(self.combined_log_regex_))
if (self.has_host_regex_): n += 1 + self.lengthString(len(self.host_regex_))
if (self.has_replica_index_): n += 2 + self.lengthVarInt64(self.replica_index_)
if (self.has_include_app_logs_): n += 2
if (self.has_app_logs_per_request_): n += 2 + self.lengthVarInt64(self.app_logs_per_request_)
if (self.has_include_host_): n += 2
if (self.has_include_all_): n += 2
if (self.has_cache_iterator_): n += 2
if (self.has_num_shards_): n += 2 + self.lengthVarInt64(self.num_shards_)
return n
def Clear(self):
self.clear_app_id()
self.clear_version_id()
self.clear_module_version()
self.clear_start_time()
self.clear_end_time()
self.clear_offset()
self.clear_request_id()
self.clear_minimum_log_level()
self.clear_include_incomplete()
self.clear_count()
self.clear_combined_log_regex()
self.clear_host_regex()
self.clear_replica_index()
self.clear_include_app_logs()
self.clear_app_logs_per_request()
self.clear_include_host()
self.clear_include_all()
self.clear_cache_iterator()
self.clear_num_shards()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.app_id_)
for i in xrange(len(self.version_id_)):
out.putVarInt32(18)
out.putPrefixedString(self.version_id_[i])
if (self.has_start_time_):
out.putVarInt32(24)
out.putVarInt64(self.start_time_)
if (self.has_end_time_):
out.putVarInt32(32)
out.putVarInt64(self.end_time_)
if (self.has_offset_):
out.putVarInt32(42)
out.putVarInt32(self.offset_.ByteSize())
self.offset_.OutputUnchecked(out)
for i in xrange(len(self.request_id_)):
out.putVarInt32(50)
out.putPrefixedString(self.request_id_[i])
if (self.has_minimum_log_level_):
out.putVarInt32(56)
out.putVarInt32(self.minimum_log_level_)
if (self.has_include_incomplete_):
out.putVarInt32(64)
out.putBoolean(self.include_incomplete_)
if (self.has_count_):
out.putVarInt32(72)
out.putVarInt64(self.count_)
if (self.has_include_app_logs_):
out.putVarInt32(80)
out.putBoolean(self.include_app_logs_)
if (self.has_include_host_):
out.putVarInt32(88)
out.putBoolean(self.include_host_)
if (self.has_include_all_):
out.putVarInt32(96)
out.putBoolean(self.include_all_)
if (self.has_cache_iterator_):
out.putVarInt32(104)
out.putBoolean(self.cache_iterator_)
if (self.has_combined_log_regex_):
out.putVarInt32(114)
out.putPrefixedString(self.combined_log_regex_)
if (self.has_host_regex_):
out.putVarInt32(122)
out.putPrefixedString(self.host_regex_)
if (self.has_replica_index_):
out.putVarInt32(128)
out.putVarInt32(self.replica_index_)
if (self.has_app_logs_per_request_):
out.putVarInt32(136)
out.putVarInt32(self.app_logs_per_request_)
if (self.has_num_shards_):
out.putVarInt32(144)
out.putVarInt32(self.num_shards_)
for i in xrange(len(self.module_version_)):
out.putVarInt32(154)
out.putVarInt32(self.module_version_[i].ByteSize())
self.module_version_[i].OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_app_id_):
out.putVarInt32(10)
out.putPrefixedString(self.app_id_)
for i in xrange(len(self.version_id_)):
out.putVarInt32(18)
out.putPrefixedString(self.version_id_[i])
if (self.has_start_time_):
out.putVarInt32(24)
out.putVarInt64(self.start_time_)
if (self.has_end_time_):
out.putVarInt32(32)
out.putVarInt64(self.end_time_)
if (self.has_offset_):
out.putVarInt32(42)
out.putVarInt32(self.offset_.ByteSizePartial())
self.offset_.OutputPartial(out)
for i in xrange(len(self.request_id_)):
out.putVarInt32(50)
out.putPrefixedString(self.request_id_[i])
if (self.has_minimum_log_level_):
out.putVarInt32(56)
out.putVarInt32(self.minimum_log_level_)
if (self.has_include_incomplete_):
out.putVarInt32(64)
out.putBoolean(self.include_incomplete_)
if (self.has_count_):
out.putVarInt32(72)
out.putVarInt64(self.count_)
if (self.has_include_app_logs_):
out.putVarInt32(80)
out.putBoolean(self.include_app_logs_)
if (self.has_include_host_):
out.putVarInt32(88)
out.putBoolean(self.include_host_)
if (self.has_include_all_):
out.putVarInt32(96)
out.putBoolean(self.include_all_)
if (self.has_cache_iterator_):
out.putVarInt32(104)
out.putBoolean(self.cache_iterator_)
if (self.has_combined_log_regex_):
out.putVarInt32(114)
out.putPrefixedString(self.combined_log_regex_)
if (self.has_host_regex_):
out.putVarInt32(122)
out.putPrefixedString(self.host_regex_)
if (self.has_replica_index_):
out.putVarInt32(128)
out.putVarInt32(self.replica_index_)
if (self.has_app_logs_per_request_):
out.putVarInt32(136)
out.putVarInt32(self.app_logs_per_request_)
if (self.has_num_shards_):
out.putVarInt32(144)
out.putVarInt32(self.num_shards_)
for i in xrange(len(self.module_version_)):
out.putVarInt32(154)
out.putVarInt32(self.module_version_[i].ByteSizePartial())
self.module_version_[i].OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_app_id(d.getPrefixedString())
continue
if tt == 18:
self.add_version_id(d.getPrefixedString())
continue
if tt == 24:
self.set_start_time(d.getVarInt64())
continue
if tt == 32:
self.set_end_time(d.getVarInt64())
continue
if tt == 42:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_offset().TryMerge(tmp)
continue
if tt == 50:
self.add_request_id(d.getPrefixedString())
continue
if tt == 56:
self.set_minimum_log_level(d.getVarInt32())
continue
if tt == 64:
self.set_include_incomplete(d.getBoolean())
continue
if tt == 72:
self.set_count(d.getVarInt64())
continue
if tt == 80:
self.set_include_app_logs(d.getBoolean())
continue
if tt == 88:
self.set_include_host(d.getBoolean())
continue
if tt == 96:
self.set_include_all(d.getBoolean())
continue
if tt == 104:
self.set_cache_iterator(d.getBoolean())
continue
if tt == 114:
self.set_combined_log_regex(d.getPrefixedString())
continue
if tt == 122:
self.set_host_regex(d.getPrefixedString())
continue
if tt == 128:
self.set_replica_index(d.getVarInt32())
continue
if tt == 136:
self.set_app_logs_per_request(d.getVarInt32())
continue
if tt == 144:
self.set_num_shards(d.getVarInt32())
continue
if tt == 154:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_module_version().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_app_id_: res+=prefix+("app_id: %s\n" % self.DebugFormatString(self.app_id_))
cnt=0
for e in self.version_id_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("version_id%s: %s\n" % (elm, self.DebugFormatString(e)))
cnt+=1
cnt=0
for e in self.module_version_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("module_version%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
if self.has_start_time_: res+=prefix+("start_time: %s\n" % self.DebugFormatInt64(self.start_time_))
if self.has_end_time_: res+=prefix+("end_time: %s\n" % self.DebugFormatInt64(self.end_time_))
if self.has_offset_:
res+=prefix+"offset <\n"
res+=self.offset_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt=0
for e in self.request_id_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("request_id%s: %s\n" % (elm, self.DebugFormatString(e)))
cnt+=1
if self.has_minimum_log_level_: res+=prefix+("minimum_log_level: %s\n" % self.DebugFormatInt32(self.minimum_log_level_))
if self.has_include_incomplete_: res+=prefix+("include_incomplete: %s\n" % self.DebugFormatBool(self.include_incomplete_))
if self.has_count_: res+=prefix+("count: %s\n" % self.DebugFormatInt64(self.count_))
if self.has_combined_log_regex_: res+=prefix+("combined_log_regex: %s\n" % self.DebugFormatString(self.combined_log_regex_))
if self.has_host_regex_: res+=prefix+("host_regex: %s\n" % self.DebugFormatString(self.host_regex_))
if self.has_replica_index_: res+=prefix+("replica_index: %s\n" % self.DebugFormatInt32(self.replica_index_))
if self.has_include_app_logs_: res+=prefix+("include_app_logs: %s\n" % self.DebugFormatBool(self.include_app_logs_))
if self.has_app_logs_per_request_: res+=prefix+("app_logs_per_request: %s\n" % self.DebugFormatInt32(self.app_logs_per_request_))
if self.has_include_host_: res+=prefix+("include_host: %s\n" % self.DebugFormatBool(self.include_host_))
if self.has_include_all_: res+=prefix+("include_all: %s\n" % self.DebugFormatBool(self.include_all_))
if self.has_cache_iterator_: res+=prefix+("cache_iterator: %s\n" % self.DebugFormatBool(self.cache_iterator_))
if self.has_num_shards_: res+=prefix+("num_shards: %s\n" % self.DebugFormatInt32(self.num_shards_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kapp_id = 1
kversion_id = 2
kmodule_version = 19
kstart_time = 3
kend_time = 4
koffset = 5
krequest_id = 6
kminimum_log_level = 7
kinclude_incomplete = 8
kcount = 9
kcombined_log_regex = 14
khost_regex = 15
kreplica_index = 16
kinclude_app_logs = 10
kapp_logs_per_request = 17
kinclude_host = 11
kinclude_all = 12
kcache_iterator = 13
knum_shards = 18
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "app_id",
2: "version_id",
3: "start_time",
4: "end_time",
5: "offset",
6: "request_id",
7: "minimum_log_level",
8: "include_incomplete",
9: "count",
10: "include_app_logs",
11: "include_host",
12: "include_all",
13: "cache_iterator",
14: "combined_log_regex",
15: "host_regex",
16: "replica_index",
17: "app_logs_per_request",
18: "num_shards",
19: "module_version",
}, 19)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.NUMERIC,
4: ProtocolBuffer.Encoder.NUMERIC,
5: ProtocolBuffer.Encoder.STRING,
6: ProtocolBuffer.Encoder.STRING,
7: ProtocolBuffer.Encoder.NUMERIC,
8: ProtocolBuffer.Encoder.NUMERIC,
9: ProtocolBuffer.Encoder.NUMERIC,
10: ProtocolBuffer.Encoder.NUMERIC,
11: ProtocolBuffer.Encoder.NUMERIC,
12: ProtocolBuffer.Encoder.NUMERIC,
13: ProtocolBuffer.Encoder.NUMERIC,
14: ProtocolBuffer.Encoder.STRING,
15: ProtocolBuffer.Encoder.STRING,
16: ProtocolBuffer.Encoder.NUMERIC,
17: ProtocolBuffer.Encoder.NUMERIC,
18: ProtocolBuffer.Encoder.NUMERIC,
19: ProtocolBuffer.Encoder.STRING,
}, 19, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.LogReadRequest'
class LogReadResponse(ProtocolBuffer.ProtocolMessage):
has_offset_ = 0
offset_ = None
has_last_end_time_ = 0
last_end_time_ = 0
def __init__(self, contents=None):
self.log_ = []
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def log_size(self): return len(self.log_)
def log_list(self): return self.log_
def log(self, i):
return self.log_[i]
def mutable_log(self, i):
return self.log_[i]
def add_log(self):
x = RequestLog()
self.log_.append(x)
return x
def clear_log(self):
self.log_ = []
def offset(self):
if self.offset_ is None:
self.lazy_init_lock_.acquire()
try:
if self.offset_ is None: self.offset_ = LogOffset()
finally:
self.lazy_init_lock_.release()
return self.offset_
def mutable_offset(self): self.has_offset_ = 1; return self.offset()
def clear_offset(self):
if self.has_offset_:
self.has_offset_ = 0;
if self.offset_ is not None: self.offset_.Clear()
def has_offset(self): return self.has_offset_
def last_end_time(self): return self.last_end_time_
def set_last_end_time(self, x):
self.has_last_end_time_ = 1
self.last_end_time_ = x
def clear_last_end_time(self):
if self.has_last_end_time_:
self.has_last_end_time_ = 0
self.last_end_time_ = 0
def has_last_end_time(self): return self.has_last_end_time_
def MergeFrom(self, x):
assert x is not self
for i in xrange(x.log_size()): self.add_log().CopyFrom(x.log(i))
if (x.has_offset()): self.mutable_offset().MergeFrom(x.offset())
if (x.has_last_end_time()): self.set_last_end_time(x.last_end_time())
def Equals(self, x):
if x is self: return 1
if len(self.log_) != len(x.log_): return 0
for e1, e2 in zip(self.log_, x.log_):
if e1 != e2: return 0
if self.has_offset_ != x.has_offset_: return 0
if self.has_offset_ and self.offset_ != x.offset_: return 0
if self.has_last_end_time_ != x.has_last_end_time_: return 0
if self.has_last_end_time_ and self.last_end_time_ != x.last_end_time_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
for p in self.log_:
if not p.IsInitialized(debug_strs): initialized=0
if (self.has_offset_ and not self.offset_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += 1 * len(self.log_)
for i in xrange(len(self.log_)): n += self.lengthString(self.log_[i].ByteSize())
if (self.has_offset_): n += 1 + self.lengthString(self.offset_.ByteSize())
if (self.has_last_end_time_): n += 1 + self.lengthVarInt64(self.last_end_time_)
return n
def ByteSizePartial(self):
n = 0
n += 1 * len(self.log_)
for i in xrange(len(self.log_)): n += self.lengthString(self.log_[i].ByteSizePartial())
if (self.has_offset_): n += 1 + self.lengthString(self.offset_.ByteSizePartial())
if (self.has_last_end_time_): n += 1 + self.lengthVarInt64(self.last_end_time_)
return n
def Clear(self):
self.clear_log()
self.clear_offset()
self.clear_last_end_time()
def OutputUnchecked(self, out):
for i in xrange(len(self.log_)):
out.putVarInt32(10)
out.putVarInt32(self.log_[i].ByteSize())
self.log_[i].OutputUnchecked(out)
if (self.has_offset_):
out.putVarInt32(18)
out.putVarInt32(self.offset_.ByteSize())
self.offset_.OutputUnchecked(out)
if (self.has_last_end_time_):
out.putVarInt32(24)
out.putVarInt64(self.last_end_time_)
def OutputPartial(self, out):
for i in xrange(len(self.log_)):
out.putVarInt32(10)
out.putVarInt32(self.log_[i].ByteSizePartial())
self.log_[i].OutputPartial(out)
if (self.has_offset_):
out.putVarInt32(18)
out.putVarInt32(self.offset_.ByteSizePartial())
self.offset_.OutputPartial(out)
if (self.has_last_end_time_):
out.putVarInt32(24)
out.putVarInt64(self.last_end_time_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_log().TryMerge(tmp)
continue
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_offset().TryMerge(tmp)
continue
if tt == 24:
self.set_last_end_time(d.getVarInt64())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
cnt=0
for e in self.log_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("log%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
if self.has_offset_:
res+=prefix+"offset <\n"
res+=self.offset_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_last_end_time_: res+=prefix+("last_end_time: %s\n" % self.DebugFormatInt64(self.last_end_time_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
klog = 1
koffset = 2
klast_end_time = 3
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "log",
2: "offset",
3: "last_end_time",
}, 3)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.NUMERIC,
}, 3, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.LogReadResponse'
class LogUsageRecord(ProtocolBuffer.ProtocolMessage):
has_version_id_ = 0
version_id_ = ""
has_start_time_ = 0
start_time_ = 0
has_end_time_ = 0
end_time_ = 0
has_count_ = 0
count_ = 0
has_total_size_ = 0
total_size_ = 0
has_records_ = 0
records_ = 0
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def version_id(self): return self.version_id_
def set_version_id(self, x):
self.has_version_id_ = 1
self.version_id_ = x
def clear_version_id(self):
if self.has_version_id_:
self.has_version_id_ = 0
self.version_id_ = ""
def has_version_id(self): return self.has_version_id_
def start_time(self): return self.start_time_
def set_start_time(self, x):
self.has_start_time_ = 1
self.start_time_ = x
def clear_start_time(self):
if self.has_start_time_:
self.has_start_time_ = 0
self.start_time_ = 0
def has_start_time(self): return self.has_start_time_
def end_time(self): return self.end_time_
def set_end_time(self, x):
self.has_end_time_ = 1
self.end_time_ = x
def clear_end_time(self):
if self.has_end_time_:
self.has_end_time_ = 0
self.end_time_ = 0
def has_end_time(self): return self.has_end_time_
def count(self): return self.count_
def set_count(self, x):
self.has_count_ = 1
self.count_ = x
def clear_count(self):
if self.has_count_:
self.has_count_ = 0
self.count_ = 0
def has_count(self): return self.has_count_
def total_size(self): return self.total_size_
def set_total_size(self, x):
self.has_total_size_ = 1
self.total_size_ = x
def clear_total_size(self):
if self.has_total_size_:
self.has_total_size_ = 0
self.total_size_ = 0
def has_total_size(self): return self.has_total_size_
def records(self): return self.records_
def set_records(self, x):
self.has_records_ = 1
self.records_ = x
def clear_records(self):
if self.has_records_:
self.has_records_ = 0
self.records_ = 0
def has_records(self): return self.has_records_
def MergeFrom(self, x):
assert x is not self
if (x.has_version_id()): self.set_version_id(x.version_id())
if (x.has_start_time()): self.set_start_time(x.start_time())
if (x.has_end_time()): self.set_end_time(x.end_time())
if (x.has_count()): self.set_count(x.count())
if (x.has_total_size()): self.set_total_size(x.total_size())
if (x.has_records()): self.set_records(x.records())
def Equals(self, x):
if x is self: return 1
if self.has_version_id_ != x.has_version_id_: return 0
if self.has_version_id_ and self.version_id_ != x.version_id_: return 0
if self.has_start_time_ != x.has_start_time_: return 0
if self.has_start_time_ and self.start_time_ != x.start_time_: return 0
if self.has_end_time_ != x.has_end_time_: return 0
if self.has_end_time_ and self.end_time_ != x.end_time_: return 0
if self.has_count_ != x.has_count_: return 0
if self.has_count_ and self.count_ != x.count_: return 0
if self.has_total_size_ != x.has_total_size_: return 0
if self.has_total_size_ and self.total_size_ != x.total_size_: return 0
if self.has_records_ != x.has_records_: return 0
if self.has_records_ and self.records_ != x.records_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
if (self.has_version_id_): n += 1 + self.lengthString(len(self.version_id_))
if (self.has_start_time_): n += 1 + self.lengthVarInt64(self.start_time_)
if (self.has_end_time_): n += 1 + self.lengthVarInt64(self.end_time_)
if (self.has_count_): n += 1 + self.lengthVarInt64(self.count_)
if (self.has_total_size_): n += 1 + self.lengthVarInt64(self.total_size_)
if (self.has_records_): n += 1 + self.lengthVarInt64(self.records_)
return n
def ByteSizePartial(self):
n = 0
if (self.has_version_id_): n += 1 + self.lengthString(len(self.version_id_))
if (self.has_start_time_): n += 1 + self.lengthVarInt64(self.start_time_)
if (self.has_end_time_): n += 1 + self.lengthVarInt64(self.end_time_)
if (self.has_count_): n += 1 + self.lengthVarInt64(self.count_)
if (self.has_total_size_): n += 1 + self.lengthVarInt64(self.total_size_)
if (self.has_records_): n += 1 + self.lengthVarInt64(self.records_)
return n
def Clear(self):
self.clear_version_id()
self.clear_start_time()
self.clear_end_time()
self.clear_count()
self.clear_total_size()
self.clear_records()
def OutputUnchecked(self, out):
if (self.has_version_id_):
out.putVarInt32(10)
out.putPrefixedString(self.version_id_)
if (self.has_start_time_):
out.putVarInt32(16)
out.putVarInt32(self.start_time_)
if (self.has_end_time_):
out.putVarInt32(24)
out.putVarInt32(self.end_time_)
if (self.has_count_):
out.putVarInt32(32)
out.putVarInt64(self.count_)
if (self.has_total_size_):
out.putVarInt32(40)
out.putVarInt64(self.total_size_)
if (self.has_records_):
out.putVarInt32(48)
out.putVarInt32(self.records_)
def OutputPartial(self, out):
if (self.has_version_id_):
out.putVarInt32(10)
out.putPrefixedString(self.version_id_)
if (self.has_start_time_):
out.putVarInt32(16)
out.putVarInt32(self.start_time_)
if (self.has_end_time_):
out.putVarInt32(24)
out.putVarInt32(self.end_time_)
if (self.has_count_):
out.putVarInt32(32)
out.putVarInt64(self.count_)
if (self.has_total_size_):
out.putVarInt32(40)
out.putVarInt64(self.total_size_)
if (self.has_records_):
out.putVarInt32(48)
out.putVarInt32(self.records_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_version_id(d.getPrefixedString())
continue
if tt == 16:
self.set_start_time(d.getVarInt32())
continue
if tt == 24:
self.set_end_time(d.getVarInt32())
continue
if tt == 32:
self.set_count(d.getVarInt64())
continue
if tt == 40:
self.set_total_size(d.getVarInt64())
continue
if tt == 48:
self.set_records(d.getVarInt32())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_version_id_: res+=prefix+("version_id: %s\n" % self.DebugFormatString(self.version_id_))
if self.has_start_time_: res+=prefix+("start_time: %s\n" % self.DebugFormatInt32(self.start_time_))
if self.has_end_time_: res+=prefix+("end_time: %s\n" % self.DebugFormatInt32(self.end_time_))
if self.has_count_: res+=prefix+("count: %s\n" % self.DebugFormatInt64(self.count_))
if self.has_total_size_: res+=prefix+("total_size: %s\n" % self.DebugFormatInt64(self.total_size_))
if self.has_records_: res+=prefix+("records: %s\n" % self.DebugFormatInt32(self.records_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kversion_id = 1
kstart_time = 2
kend_time = 3
kcount = 4
ktotal_size = 5
krecords = 6
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "version_id",
2: "start_time",
3: "end_time",
4: "count",
5: "total_size",
6: "records",
}, 6)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.NUMERIC,
3: ProtocolBuffer.Encoder.NUMERIC,
4: ProtocolBuffer.Encoder.NUMERIC,
5: ProtocolBuffer.Encoder.NUMERIC,
6: ProtocolBuffer.Encoder.NUMERIC,
}, 6, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.LogUsageRecord'
class LogUsageRequest(ProtocolBuffer.ProtocolMessage):
has_app_id_ = 0
app_id_ = ""
has_start_time_ = 0
start_time_ = 0
has_end_time_ = 0
end_time_ = 0
has_resolution_hours_ = 0
resolution_hours_ = 1
has_combine_versions_ = 0
combine_versions_ = 0
has_usage_version_ = 0
usage_version_ = 0
has_versions_only_ = 0
versions_only_ = 0
def __init__(self, contents=None):
self.version_id_ = []
if contents is not None: self.MergeFromString(contents)
def app_id(self): return self.app_id_
def set_app_id(self, x):
self.has_app_id_ = 1
self.app_id_ = x
def clear_app_id(self):
if self.has_app_id_:
self.has_app_id_ = 0
self.app_id_ = ""
def has_app_id(self): return self.has_app_id_
def version_id_size(self): return len(self.version_id_)
def version_id_list(self): return self.version_id_
def version_id(self, i):
return self.version_id_[i]
def set_version_id(self, i, x):
self.version_id_[i] = x
def add_version_id(self, x):
self.version_id_.append(x)
def clear_version_id(self):
self.version_id_ = []
def start_time(self): return self.start_time_
def set_start_time(self, x):
self.has_start_time_ = 1
self.start_time_ = x
def clear_start_time(self):
if self.has_start_time_:
self.has_start_time_ = 0
self.start_time_ = 0
def has_start_time(self): return self.has_start_time_
def end_time(self): return self.end_time_
def set_end_time(self, x):
self.has_end_time_ = 1
self.end_time_ = x
def clear_end_time(self):
if self.has_end_time_:
self.has_end_time_ = 0
self.end_time_ = 0
def has_end_time(self): return self.has_end_time_
def resolution_hours(self): return self.resolution_hours_
def set_resolution_hours(self, x):
self.has_resolution_hours_ = 1
self.resolution_hours_ = x
def clear_resolution_hours(self):
if self.has_resolution_hours_:
self.has_resolution_hours_ = 0
self.resolution_hours_ = 1
def has_resolution_hours(self): return self.has_resolution_hours_
def combine_versions(self): return self.combine_versions_
def set_combine_versions(self, x):
self.has_combine_versions_ = 1
self.combine_versions_ = x
def clear_combine_versions(self):
if self.has_combine_versions_:
self.has_combine_versions_ = 0
self.combine_versions_ = 0
def has_combine_versions(self): return self.has_combine_versions_
def usage_version(self): return self.usage_version_
def set_usage_version(self, x):
self.has_usage_version_ = 1
self.usage_version_ = x
def clear_usage_version(self):
if self.has_usage_version_:
self.has_usage_version_ = 0
self.usage_version_ = 0
def has_usage_version(self): return self.has_usage_version_
def versions_only(self): return self.versions_only_
def set_versions_only(self, x):
self.has_versions_only_ = 1
self.versions_only_ = x
def clear_versions_only(self):
if self.has_versions_only_:
self.has_versions_only_ = 0
self.versions_only_ = 0
def has_versions_only(self): return self.has_versions_only_
def MergeFrom(self, x):
assert x is not self
if (x.has_app_id()): self.set_app_id(x.app_id())
for i in xrange(x.version_id_size()): self.add_version_id(x.version_id(i))
if (x.has_start_time()): self.set_start_time(x.start_time())
if (x.has_end_time()): self.set_end_time(x.end_time())
if (x.has_resolution_hours()): self.set_resolution_hours(x.resolution_hours())
if (x.has_combine_versions()): self.set_combine_versions(x.combine_versions())
if (x.has_usage_version()): self.set_usage_version(x.usage_version())
if (x.has_versions_only()): self.set_versions_only(x.versions_only())
def Equals(self, x):
if x is self: return 1
if self.has_app_id_ != x.has_app_id_: return 0
if self.has_app_id_ and self.app_id_ != x.app_id_: return 0
if len(self.version_id_) != len(x.version_id_): return 0
for e1, e2 in zip(self.version_id_, x.version_id_):
if e1 != e2: return 0
if self.has_start_time_ != x.has_start_time_: return 0
if self.has_start_time_ and self.start_time_ != x.start_time_: return 0
if self.has_end_time_ != x.has_end_time_: return 0
if self.has_end_time_ and self.end_time_ != x.end_time_: return 0
if self.has_resolution_hours_ != x.has_resolution_hours_: return 0
if self.has_resolution_hours_ and self.resolution_hours_ != x.resolution_hours_: return 0
if self.has_combine_versions_ != x.has_combine_versions_: return 0
if self.has_combine_versions_ and self.combine_versions_ != x.combine_versions_: return 0
if self.has_usage_version_ != x.has_usage_version_: return 0
if self.has_usage_version_ and self.usage_version_ != x.usage_version_: return 0
if self.has_versions_only_ != x.has_versions_only_: return 0
if self.has_versions_only_ and self.versions_only_ != x.versions_only_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_app_id_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: app_id not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.app_id_))
n += 1 * len(self.version_id_)
for i in xrange(len(self.version_id_)): n += self.lengthString(len(self.version_id_[i]))
if (self.has_start_time_): n += 1 + self.lengthVarInt64(self.start_time_)
if (self.has_end_time_): n += 1 + self.lengthVarInt64(self.end_time_)
if (self.has_resolution_hours_): n += 1 + self.lengthVarInt64(self.resolution_hours_)
if (self.has_combine_versions_): n += 2
if (self.has_usage_version_): n += 1 + self.lengthVarInt64(self.usage_version_)
if (self.has_versions_only_): n += 2
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_app_id_):
n += 1
n += self.lengthString(len(self.app_id_))
n += 1 * len(self.version_id_)
for i in xrange(len(self.version_id_)): n += self.lengthString(len(self.version_id_[i]))
if (self.has_start_time_): n += 1 + self.lengthVarInt64(self.start_time_)
if (self.has_end_time_): n += 1 + self.lengthVarInt64(self.end_time_)
if (self.has_resolution_hours_): n += 1 + self.lengthVarInt64(self.resolution_hours_)
if (self.has_combine_versions_): n += 2
if (self.has_usage_version_): n += 1 + self.lengthVarInt64(self.usage_version_)
if (self.has_versions_only_): n += 2
return n
def Clear(self):
self.clear_app_id()
self.clear_version_id()
self.clear_start_time()
self.clear_end_time()
self.clear_resolution_hours()
self.clear_combine_versions()
self.clear_usage_version()
self.clear_versions_only()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.app_id_)
for i in xrange(len(self.version_id_)):
out.putVarInt32(18)
out.putPrefixedString(self.version_id_[i])
if (self.has_start_time_):
out.putVarInt32(24)
out.putVarInt32(self.start_time_)
if (self.has_end_time_):
out.putVarInt32(32)
out.putVarInt32(self.end_time_)
if (self.has_resolution_hours_):
out.putVarInt32(40)
out.putVarUint64(self.resolution_hours_)
if (self.has_combine_versions_):
out.putVarInt32(48)
out.putBoolean(self.combine_versions_)
if (self.has_usage_version_):
out.putVarInt32(56)
out.putVarInt32(self.usage_version_)
if (self.has_versions_only_):
out.putVarInt32(64)
out.putBoolean(self.versions_only_)
def OutputPartial(self, out):
if (self.has_app_id_):
out.putVarInt32(10)
out.putPrefixedString(self.app_id_)
for i in xrange(len(self.version_id_)):
out.putVarInt32(18)
out.putPrefixedString(self.version_id_[i])
if (self.has_start_time_):
out.putVarInt32(24)
out.putVarInt32(self.start_time_)
if (self.has_end_time_):
out.putVarInt32(32)
out.putVarInt32(self.end_time_)
if (self.has_resolution_hours_):
out.putVarInt32(40)
out.putVarUint64(self.resolution_hours_)
if (self.has_combine_versions_):
out.putVarInt32(48)
out.putBoolean(self.combine_versions_)
if (self.has_usage_version_):
out.putVarInt32(56)
out.putVarInt32(self.usage_version_)
if (self.has_versions_only_):
out.putVarInt32(64)
out.putBoolean(self.versions_only_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_app_id(d.getPrefixedString())
continue
if tt == 18:
self.add_version_id(d.getPrefixedString())
continue
if tt == 24:
self.set_start_time(d.getVarInt32())
continue
if tt == 32:
self.set_end_time(d.getVarInt32())
continue
if tt == 40:
self.set_resolution_hours(d.getVarUint64())
continue
if tt == 48:
self.set_combine_versions(d.getBoolean())
continue
if tt == 56:
self.set_usage_version(d.getVarInt32())
continue
if tt == 64:
self.set_versions_only(d.getBoolean())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_app_id_: res+=prefix+("app_id: %s\n" % self.DebugFormatString(self.app_id_))
cnt=0
for e in self.version_id_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("version_id%s: %s\n" % (elm, self.DebugFormatString(e)))
cnt+=1
if self.has_start_time_: res+=prefix+("start_time: %s\n" % self.DebugFormatInt32(self.start_time_))
if self.has_end_time_: res+=prefix+("end_time: %s\n" % self.DebugFormatInt32(self.end_time_))
if self.has_resolution_hours_: res+=prefix+("resolution_hours: %s\n" % self.DebugFormatInt64(self.resolution_hours_))
if self.has_combine_versions_: res+=prefix+("combine_versions: %s\n" % self.DebugFormatBool(self.combine_versions_))
if self.has_usage_version_: res+=prefix+("usage_version: %s\n" % self.DebugFormatInt32(self.usage_version_))
if self.has_versions_only_: res+=prefix+("versions_only: %s\n" % self.DebugFormatBool(self.versions_only_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kapp_id = 1
kversion_id = 2
kstart_time = 3
kend_time = 4
kresolution_hours = 5
kcombine_versions = 6
kusage_version = 7
kversions_only = 8
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "app_id",
2: "version_id",
3: "start_time",
4: "end_time",
5: "resolution_hours",
6: "combine_versions",
7: "usage_version",
8: "versions_only",
}, 8)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.NUMERIC,
4: ProtocolBuffer.Encoder.NUMERIC,
5: ProtocolBuffer.Encoder.NUMERIC,
6: ProtocolBuffer.Encoder.NUMERIC,
7: ProtocolBuffer.Encoder.NUMERIC,
8: ProtocolBuffer.Encoder.NUMERIC,
}, 8, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.LogUsageRequest'
class LogUsageResponse(ProtocolBuffer.ProtocolMessage):
has_summary_ = 0
summary_ = None
def __init__(self, contents=None):
self.usage_ = []
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def usage_size(self): return len(self.usage_)
def usage_list(self): return self.usage_
def usage(self, i):
return self.usage_[i]
def mutable_usage(self, i):
return self.usage_[i]
def add_usage(self):
x = LogUsageRecord()
self.usage_.append(x)
return x
def clear_usage(self):
self.usage_ = []
def summary(self):
if self.summary_ is None:
self.lazy_init_lock_.acquire()
try:
if self.summary_ is None: self.summary_ = LogUsageRecord()
finally:
self.lazy_init_lock_.release()
return self.summary_
def mutable_summary(self): self.has_summary_ = 1; return self.summary()
def clear_summary(self):
if self.has_summary_:
self.has_summary_ = 0;
if self.summary_ is not None: self.summary_.Clear()
def has_summary(self): return self.has_summary_
def MergeFrom(self, x):
assert x is not self
for i in xrange(x.usage_size()): self.add_usage().CopyFrom(x.usage(i))
if (x.has_summary()): self.mutable_summary().MergeFrom(x.summary())
def Equals(self, x):
if x is self: return 1
if len(self.usage_) != len(x.usage_): return 0
for e1, e2 in zip(self.usage_, x.usage_):
if e1 != e2: return 0
if self.has_summary_ != x.has_summary_: return 0
if self.has_summary_ and self.summary_ != x.summary_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
for p in self.usage_:
if not p.IsInitialized(debug_strs): initialized=0
if (self.has_summary_ and not self.summary_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += 1 * len(self.usage_)
for i in xrange(len(self.usage_)): n += self.lengthString(self.usage_[i].ByteSize())
if (self.has_summary_): n += 1 + self.lengthString(self.summary_.ByteSize())
return n
def ByteSizePartial(self):
n = 0
n += 1 * len(self.usage_)
for i in xrange(len(self.usage_)): n += self.lengthString(self.usage_[i].ByteSizePartial())
if (self.has_summary_): n += 1 + self.lengthString(self.summary_.ByteSizePartial())
return n
def Clear(self):
self.clear_usage()
self.clear_summary()
def OutputUnchecked(self, out):
for i in xrange(len(self.usage_)):
out.putVarInt32(10)
out.putVarInt32(self.usage_[i].ByteSize())
self.usage_[i].OutputUnchecked(out)
if (self.has_summary_):
out.putVarInt32(18)
out.putVarInt32(self.summary_.ByteSize())
self.summary_.OutputUnchecked(out)
def OutputPartial(self, out):
for i in xrange(len(self.usage_)):
out.putVarInt32(10)
out.putVarInt32(self.usage_[i].ByteSizePartial())
self.usage_[i].OutputPartial(out)
if (self.has_summary_):
out.putVarInt32(18)
out.putVarInt32(self.summary_.ByteSizePartial())
self.summary_.OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_usage().TryMerge(tmp)
continue
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_summary().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
cnt=0
for e in self.usage_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("usage%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
if self.has_summary_:
res+=prefix+"summary <\n"
res+=self.summary_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kusage = 1
ksummary = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "usage",
2: "summary",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.LogUsageResponse'
if _extension_runtime:
pass
__all__ = ['LogServiceError','UserAppLogLine','UserAppLogGroup','FlushRequest','SetStatusRequest','LogOffset','LogLine','RequestLog','LogModuleVersion','LogReadRequest','LogReadResponse','LogUsageRecord','LogUsageRequest','LogUsageResponse']
|
kittiu/odoo
|
refs/heads/8.0
|
addons/crm/crm.py
|
267
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
from openerp.http import request
AVAILABLE_PRIORITIES = [
('0', 'Very Low'),
('1', 'Low'),
('2', 'Normal'),
('3', 'High'),
('4', 'Very High'),
]
class crm_tracking_medium(osv.Model):
# OLD crm.case.channel
_name = "crm.tracking.medium"
_description = "Channels"
_order = 'name'
_columns = {
'name': fields.char('Channel Name', required=True),
'active': fields.boolean('Active'),
}
_defaults = {
'active': lambda *a: 1,
}
class crm_tracking_campaign(osv.Model):
# OLD crm.case.resource.type
_name = "crm.tracking.campaign"
_description = "Campaign"
_rec_name = "name"
_columns = {
'name': fields.char('Campaign Name', required=True, translate=True),
'section_id': fields.many2one('crm.case.section', 'Sales Team'),
}
class crm_tracking_source(osv.Model):
_name = "crm.tracking.source"
_description = "Source"
_rec_name = "name"
_columns = {
'name': fields.char('Source Name', required=True, translate=True),
}
class crm_tracking_mixin(osv.AbstractModel):
"""Mixin class for objects which can be tracked by marketing. """
_name = 'crm.tracking.mixin'
_columns = {
'campaign_id': fields.many2one('crm.tracking.campaign', 'Campaign', # old domain ="['|',('section_id','=',section_id),('section_id','=',False)]"
help="This is a name that helps you keep track of your different campaign efforts Ex: Fall_Drive, Christmas_Special"),
'source_id': fields.many2one('crm.tracking.source', 'Source', help="This is the source of the link Ex: Search Engine, another domain, or name of email list"),
'medium_id': fields.many2one('crm.tracking.medium', 'Channel', help="This is the method of delivery. Ex: Postcard, Email, or Banner Ad", oldname='channel_id'),
}
def tracking_fields(self):
return [('utm_campaign', 'campaign_id'), ('utm_source', 'source_id'), ('utm_medium', 'medium_id')]
def tracking_get_values(self, cr, uid, vals, context=None):
for key, fname in self.tracking_fields():
field = self._fields[fname]
value = vals.get(fname) or (request and request.httprequest.cookies.get(key)) # params.get should be always in session by the dispatch from ir_http
if field.type == 'many2one' and isinstance(value, basestring):
# if we receive a string for a many2one, we search/create the id
if value:
Model = self.pool[field.comodel_name]
rel_id = Model.name_search(cr, uid, value, context=context)
if rel_id:
rel_id = rel_id[0][0]
else:
rel_id = Model.create(cr, uid, {'name': value}, context=context)
vals[fname] = rel_id
else:
# Here the code for others cases that many2one
vals[fname] = value
return vals
def _get_default_track(self, cr, uid, field, context=None):
return self.tracking_get_values(cr, uid, {}, context=context).get(field)
_defaults = {
'source_id': lambda self, cr, uid, ctx: self._get_default_track(cr, uid, 'source_id', ctx),
'campaign_id': lambda self, cr, uid, ctx: self._get_default_track(cr, uid, 'campaign_id', ctx),
'medium_id': lambda self, cr, uid, ctx: self._get_default_track(cr, uid, 'medium_id', ctx),
}
class crm_case_stage(osv.osv):
""" Model for case stages. This models the main stages of a document
management flow. Main CRM objects (leads, opportunities, project
issues, ...) will now use only stages, instead of state and stages.
Stages are for example used to display the kanban view of records.
"""
_name = "crm.case.stage"
_description = "Stage of case"
_rec_name = 'name'
_order = "sequence"
_columns = {
'name': fields.char('Stage Name', required=True, translate=True),
'sequence': fields.integer('Sequence', help="Used to order stages. Lower is better."),
'probability': fields.float('Probability (%)', required=True, help="This percentage depicts the default/average probability of the Case for this stage to be a success"),
'on_change': fields.boolean('Change Probability Automatically', help="Setting this stage will change the probability automatically on the opportunity."),
'requirements': fields.text('Requirements'),
'section_ids': fields.many2many('crm.case.section', 'section_stage_rel', 'stage_id', 'section_id', string='Sections',
help="Link between stages and sales teams. When set, this limitate the current stage to the selected sales teams."),
'case_default': fields.boolean('Default to New Sales Team',
help="If you check this field, this stage will be proposed by default on each sales team. It will not assign this stage to existing teams."),
'fold': fields.boolean('Folded in Kanban View',
help='This stage is folded in the kanban view when'
'there are no records in that stage to display.'),
'type': fields.selection([('lead', 'Lead'), ('opportunity', 'Opportunity'), ('both', 'Both')],
string='Type', required=True,
help="This field is used to distinguish stages related to Leads from stages related to Opportunities, or to specify stages available for both types."),
}
_defaults = {
'sequence': 1,
'probability': 0.0,
'on_change': True,
'fold': False,
'type': 'both',
'case_default': True,
}
class crm_case_categ(osv.osv):
""" Category of Case """
_name = "crm.case.categ"
_description = "Category of Case"
_columns = {
'name': fields.char('Name', required=True, translate=True),
'section_id': fields.many2one('crm.case.section', 'Sales Team'),
'object_id': fields.many2one('ir.model', 'Object Name'),
}
def _find_object_id(self, cr, uid, context=None):
"""Finds id for case object"""
context = context or {}
object_id = context.get('object_id', False)
ids = self.pool.get('ir.model').search(cr, uid, ['|', ('id', '=', object_id), ('model', '=', context.get('object_name', False))])
return ids and ids[0] or False
_defaults = {
'object_id': _find_object_id
}
class crm_payment_mode(osv.osv):
""" Payment Mode for Fund """
_name = "crm.payment.mode"
_description = "CRM Payment Mode"
_columns = {
'name': fields.char('Name', required=True),
'section_id': fields.many2one('crm.case.section', 'Sales Team'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
msiedlarek/grpc
|
refs/heads/master
|
src/python/grpcio/grpc/framework/alpha/__init__.py
|
40
|
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import warnings
warnings.simplefilter('always', DeprecationWarning)
warnings.warn('the alpha API (includes this package) is deprecated, '
'unmaintained, and no longer tested. Please migrate to the beta '
'API.', DeprecationWarning, stacklevel=2)
|
slisson/intellij-community
|
refs/heads/master
|
python/testData/debug/test4.py
|
68
|
xval = 0
xvalue1 = 1
xvalue2 = 2
print(xvalue1 + xvalue2)
|
drammock/mne-python
|
refs/heads/main
|
examples/visualization/meg_sensors.py
|
10
|
"""
.. _ex-plot-meg-sensors:
======================================
Plotting sensor layouts of MEG systems
======================================
Show sensor layouts of different MEG systems.
"""
# Author: Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD (3-clause)
import os.path as op
import mne
from mne.io import read_raw_fif, read_raw_ctf, read_raw_bti, read_raw_kit
from mne.io import read_raw_artemis123
from mne.datasets import sample, spm_face, testing
from mne.viz import plot_alignment, set_3d_title
print(__doc__)
###############################################################################
# Neuromag
# --------
kwargs = dict(eeg=False, coord_frame='meg', show_axes=True, verbose=True)
raw = read_raw_fif(sample.data_path() + '/MEG/sample/sample_audvis_raw.fif')
fig = plot_alignment(raw.info, meg=('helmet', 'sensors'), **kwargs)
set_3d_title(figure=fig, title='Neuromag')
###############################################################################
# CTF
# ---
raw = read_raw_ctf(spm_face.data_path() +
'/MEG/spm/SPM_CTF_MEG_example_faces1_3D.ds')
fig = plot_alignment(raw.info, meg=('helmet', 'sensors', 'ref'), **kwargs)
set_3d_title(figure=fig, title='CTF 275')
###############################################################################
# BTi
# ---
bti_path = op.abspath(op.dirname(mne.__file__)) + '/io/bti/tests/data/'
raw = read_raw_bti(op.join(bti_path, 'test_pdf_linux'),
op.join(bti_path, 'test_config_linux'),
op.join(bti_path, 'test_hs_linux'))
fig = plot_alignment(raw.info, meg=('helmet', 'sensors', 'ref'), **kwargs)
set_3d_title(figure=fig, title='Magnes 3600wh')
###############################################################################
# KIT
# ---
kit_path = op.abspath(op.dirname(mne.__file__)) + '/io/kit/tests/data/'
raw = read_raw_kit(op.join(kit_path, 'test.sqd'))
fig = plot_alignment(raw.info, meg=('helmet', 'sensors'), **kwargs)
set_3d_title(figure=fig, title='KIT')
###############################################################################
# Artemis123
# ----------
raw = read_raw_artemis123(op.join(
testing.data_path(), 'ARTEMIS123',
'Artemis_Data_2017-04-14-10h-38m-59s_Phantom_1k_HPI_1s.bin'))
fig = plot_alignment(raw.info, meg=('helmet', 'sensors', 'ref'), **kwargs)
set_3d_title(figure=fig, title='Artemis123')
|
joopert/home-assistant
|
refs/heads/dev
|
tests/helpers/test_storage.py
|
4
|
"""Tests for the storage helper."""
import asyncio
from datetime import timedelta
import json
from unittest.mock import patch, Mock
import pytest
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.helpers import storage
from homeassistant.util import dt
from tests.common import async_fire_time_changed, mock_coro
MOCK_VERSION = 1
MOCK_KEY = "storage-test"
MOCK_DATA = {"hello": "world"}
MOCK_DATA2 = {"goodbye": "cruel world"}
@pytest.fixture
def store(hass):
"""Fixture of a store that prevents writing on HASS stop."""
yield storage.Store(hass, MOCK_VERSION, MOCK_KEY)
async def test_loading(hass, store):
"""Test we can save and load data."""
await store.async_save(MOCK_DATA)
data = await store.async_load()
assert data == MOCK_DATA
async def test_custom_encoder(hass):
"""Test we can save and load data."""
class JSONEncoder(json.JSONEncoder):
"""Mock JSON encoder."""
def default(self, o):
"""Mock JSON encode method."""
return "9"
store = storage.Store(hass, MOCK_VERSION, MOCK_KEY, encoder=JSONEncoder)
await store.async_save(Mock())
data = await store.async_load()
assert data == "9"
async def test_loading_non_existing(hass, store):
"""Test we can save and load data."""
with patch("homeassistant.util.json.open", side_effect=FileNotFoundError):
data = await store.async_load()
assert data is None
async def test_loading_parallel(hass, store, hass_storage, caplog):
"""Test we can save and load data."""
hass_storage[store.key] = {"version": MOCK_VERSION, "data": MOCK_DATA}
results = await asyncio.gather(store.async_load(), store.async_load())
assert results[0] is MOCK_DATA
assert results[1] is MOCK_DATA
assert caplog.text.count("Loading data for {}".format(store.key))
async def test_saving_with_delay(hass, store, hass_storage):
"""Test saving data after a delay."""
store.async_delay_save(lambda: MOCK_DATA, 1)
assert store.key not in hass_storage
async_fire_time_changed(hass, dt.utcnow() + timedelta(seconds=1))
await hass.async_block_till_done()
assert hass_storage[store.key] == {
"version": MOCK_VERSION,
"key": MOCK_KEY,
"data": MOCK_DATA,
}
async def test_saving_on_stop(hass, hass_storage):
"""Test delayed saves trigger when we quit Home Assistant."""
store = storage.Store(hass, MOCK_VERSION, MOCK_KEY)
store.async_delay_save(lambda: MOCK_DATA, 1)
assert store.key not in hass_storage
hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
await hass.async_block_till_done()
assert hass_storage[store.key] == {
"version": MOCK_VERSION,
"key": MOCK_KEY,
"data": MOCK_DATA,
}
async def test_loading_while_delay(hass, store, hass_storage):
"""Test we load new data even if not written yet."""
await store.async_save({"delay": "no"})
assert hass_storage[store.key] == {
"version": MOCK_VERSION,
"key": MOCK_KEY,
"data": {"delay": "no"},
}
store.async_delay_save(lambda: {"delay": "yes"}, 1)
assert hass_storage[store.key] == {
"version": MOCK_VERSION,
"key": MOCK_KEY,
"data": {"delay": "no"},
}
data = await store.async_load()
assert data == {"delay": "yes"}
async def test_writing_while_writing_delay(hass, store, hass_storage):
"""Test a write while a write with delay is active."""
store.async_delay_save(lambda: {"delay": "yes"}, 1)
assert store.key not in hass_storage
await store.async_save({"delay": "no"})
assert hass_storage[store.key] == {
"version": MOCK_VERSION,
"key": MOCK_KEY,
"data": {"delay": "no"},
}
async_fire_time_changed(hass, dt.utcnow() + timedelta(seconds=1))
await hass.async_block_till_done()
assert hass_storage[store.key] == {
"version": MOCK_VERSION,
"key": MOCK_KEY,
"data": {"delay": "no"},
}
data = await store.async_load()
assert data == {"delay": "no"}
async def test_migrator_no_existing_config(hass, store, hass_storage):
"""Test migrator with no existing config."""
with patch("os.path.isfile", return_value=False), patch.object(
store, "async_load", return_value=mock_coro({"cur": "config"})
):
data = await storage.async_migrator(hass, "old-path", store)
assert data == {"cur": "config"}
assert store.key not in hass_storage
async def test_migrator_existing_config(hass, store, hass_storage):
"""Test migrating existing config."""
with patch("os.path.isfile", return_value=True), patch("os.remove") as mock_remove:
data = await storage.async_migrator(
hass, "old-path", store, old_conf_load_func=lambda _: {"old": "config"}
)
assert len(mock_remove.mock_calls) == 1
assert data == {"old": "config"}
assert hass_storage[store.key] == {
"key": MOCK_KEY,
"version": MOCK_VERSION,
"data": data,
}
async def test_migrator_transforming_config(hass, store, hass_storage):
"""Test migrating config to new format."""
async def old_conf_migrate_func(old_config):
"""Migrate old config to new format."""
return {"new": old_config["old"]}
with patch("os.path.isfile", return_value=True), patch("os.remove") as mock_remove:
data = await storage.async_migrator(
hass,
"old-path",
store,
old_conf_migrate_func=old_conf_migrate_func,
old_conf_load_func=lambda _: {"old": "config"},
)
assert len(mock_remove.mock_calls) == 1
assert data == {"new": "config"}
assert hass_storage[store.key] == {
"key": MOCK_KEY,
"version": MOCK_VERSION,
"data": data,
}
|
nhicher/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/routing/net_static_route.py
|
65
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: net_static_route
version_added: "2.4"
author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
short_description: Manage static IP routes on network appliances (routers, switches et. al.)
description:
- This module provides declarative management of static
IP routes on network appliances (routers, switches et. al.).
options:
prefix:
description:
- Network prefix of the static route.
required: true
mask:
description:
- Network prefix mask of the static route.
required: true
next_hop:
description:
- Next hop IP of the static route.
required: true
admin_distance:
description:
- Admin distance of the static route.
aggregate:
description: List of static route definitions
purge:
description:
- Purge static routes not defined in the I(aggregate) parameter.
default: no
state:
description:
- State of the static route configuration.
default: present
choices: ['present', 'absent']
"""
EXAMPLES = """
- name: configure static route
net_static_route:
prefix: 192.168.2.0
mask: 255.255.255.0
next_hop: 10.0.0.1
- name: remove configuration
net_static_route:
prefix: 192.168.2.0
mask: 255.255.255.0
next_hop: 10.0.0.1
state: absent
- name: configure aggregates of static routes
net_static_route:
aggregate:
- { prefix: 192.168.2.0, mask 255.255.255.0, next_hop: 10.0.0.1 }
- { prefix: 192.168.3.0, mask 255.255.255.0, next_hop: 10.0.2.1 }
- name: Remove static route collections
net_static_route:
aggregate:
- { prefix: 172.24.1.0/24, next_hop: 192.168.42.64 }
- { prefix: 172.24.3.0/24, next_hop: 192.168.42.64 }
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- ip route 192.168.2.0/24 10.0.0.1
"""
|
axbaretto/beam
|
refs/heads/master
|
sdks/python/.tox/lint/lib/python2.7/site-packages/pylint/reporters/ureports/text_writer.py
|
2
|
# Copyright (c) 2015-2016 Claudiu Popa <pcmanticore@gmail.com>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
"""Text formatting drivers for ureports"""
from __future__ import print_function
from pylint.reporters.ureports import BaseWriter
TITLE_UNDERLINES = [u'', u'=', u'-', u'`', u'.', u'~', u'^']
BULLETS = [u'*', u'-']
class TextWriter(BaseWriter):
"""format layouts as text
(ReStructured inspiration but not totally handled yet)
"""
def begin_format(self):
super(TextWriter, self).begin_format()
self.list_level = 0
def visit_section(self, layout):
"""display a section as text
"""
self.section += 1
self.writeln()
self.format_children(layout)
self.section -= 1
self.writeln()
def visit_title(self, layout):
title = u''.join(list(self.compute_content(layout)))
self.writeln(title)
try:
self.writeln(TITLE_UNDERLINES[self.section] * len(title))
except IndexError:
print("FIXME TITLE TOO DEEP. TURNING TITLE INTO TEXT")
def visit_paragraph(self, layout):
"""enter a paragraph"""
self.format_children(layout)
self.writeln()
def visit_table(self, layout):
"""display a table as text"""
table_content = self.get_table_content(layout)
# get columns width
cols_width = [0]*len(table_content[0])
for row in table_content:
for index, col in enumerate(row):
cols_width[index] = max(cols_width[index], len(col))
self.default_table(layout, table_content, cols_width)
self.writeln()
def default_table(self, layout, table_content, cols_width):
"""format a table"""
cols_width = [size+1 for size in cols_width]
format_strings = u' '.join([u'%%-%ss'] * len(cols_width))
format_strings = format_strings % tuple(cols_width)
format_strings = format_strings.split(u' ')
table_linesep = u'\n+' + u'+'.join([u'-'*w for w in cols_width]) + u'+\n'
headsep = u'\n+' + u'+'.join([u'='*w for w in cols_width]) + u'+\n'
# FIXME: layout.cheaders
self.write(table_linesep)
for index, line in enumerate(table_content):
self.write(u'|')
for line_index, at_index in enumerate(line):
self.write(format_strings[line_index] % at_index)
self.write(u'|')
if index == 0 and layout.rheaders:
self.write(headsep)
else:
self.write(table_linesep)
def visit_verbatimtext(self, layout):
"""display a verbatim layout as text (so difficult ;)
"""
self.writeln(u'::\n')
for line in layout.data.splitlines():
self.writeln(u' ' + line)
self.writeln()
def visit_text(self, layout):
"""add some text"""
self.write(u'%s' % layout.data)
|
Behemyth/LifeSim
|
refs/heads/master
|
LifeSim/Libraries/bullet/Demos/NativeClient/bin_html/httpd.py
|
39
|
#!/usr/bin/python
#
# Copyright (c) 2011, The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
"""A tiny web server.
This is intended to be used for testing, and only run from within the examples
directory.
"""
import BaseHTTPServer
import logging
import os
import SimpleHTTPServer
import SocketServer
import sys
import urlparse
logging.getLogger().setLevel(logging.INFO)
# Using 'localhost' means that we only accept connections
# via the loop back interface.
SERVER_PORT = 5103
SERVER_HOST = ''
# We only run from the examples directory (the one that contains scons-out), so
# that not too much is exposed via this HTTP server. Everything in the
# directory is served, so there should never be anything potentially sensitive
# in the serving directory, especially if the machine might be a
# multi-user machine and not all users are trusted. We only serve via
# the loopback interface.
SAFE_DIR_COMPONENTS = ['bin_html']
SAFE_DIR_SUFFIX = apply(os.path.join, SAFE_DIR_COMPONENTS)
def SanityCheckDirectory():
if os.getcwd().endswith(SAFE_DIR_SUFFIX):
return
logging.error('httpd.py should only be run from the %s', SAFE_DIR_SUFFIX)
logging.error('directory for testing purposes.')
logging.error('We are currently in %s', os.getcwd())
sys.exit(1)
# An HTTP server that will quit when |is_running| is set to False. We also use
# SocketServer.ThreadingMixIn in order to handle requests asynchronously for
# faster responses.
class QuittableHTTPServer(SocketServer.ThreadingMixIn,
BaseHTTPServer.HTTPServer):
def serve_forever(self, timeout=0.5):
self.is_running = True
self.timeout = timeout
while self.is_running:
self.handle_request()
def shutdown(self):
self.is_running = False
return 1
# "Safely" split a string at |sep| into a [key, value] pair. If |sep| does not
# exist in |str|, then the entire |str| is the key and the value is set to an
# empty string.
def KeyValuePair(str, sep='='):
if sep in str:
return str.split(sep)
else:
return [str, '']
# A small handler that looks for '?quit=1' query in the path and shuts itself
# down if it finds that parameter.
class QuittableHTTPHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_GET(self):
(_, _, _, query, _) = urlparse.urlsplit(self.path)
url_params = dict([KeyValuePair(key_value)
for key_value in query.split('&')])
if 'quit' in url_params and '1' in url_params['quit']:
self.send_response(200, 'OK')
self.send_header('Content-type', 'text/html')
self.send_header('Content-length', '0')
self.end_headers()
self.server.shutdown()
return
SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
def Run(server_address,
server_class=QuittableHTTPServer,
handler_class=QuittableHTTPHandler):
httpd = server_class(server_address, handler_class)
logging.info("Starting local server on port %d", server_address[1])
logging.info("To shut down send http://localhost:%d?quit=1",
server_address[1])
try:
httpd.serve_forever()
except KeyboardInterrupt:
logging.info("Received keyboard interrupt.")
httpd.server_close()
logging.info("Shutting down local server on port %d", server_address[1])
if __name__ == '__main__':
SanityCheckDirectory()
if len(sys.argv) > 1:
Run((SERVER_HOST, int(sys.argv[1])))
else:
Run((SERVER_HOST, SERVER_PORT))
sys.exit(0)
|
wkentaro/chainer
|
refs/heads/master
|
tests/chainer_tests/links_tests/loss_tests/test_black_out.py
|
5
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import links
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64]
}))
class TestBlackOut(unittest.TestCase):
batch_size = 5
in_size = 4
count = [3, 2, 1]
n_samples = 7
def setUp(self):
self._config_user = chainer.using_config('dtype', self.dtype)
self._config_user.__enter__()
x_shape = (self.batch_size, self.in_size)
self.x = numpy.random.uniform(-1, 1, x_shape).astype(self.dtype)
self.t = numpy.random.randint(
len(self.count), size=self.batch_size).astype(numpy.int32)
self.link = links.BlackOut(self.in_size, self.count, self.n_samples)
self.w = numpy.random.uniform(-1, 1, self.link.W.data.shape)
self.link.W.data[:] = self.w
if self.dtype == numpy.float16:
self.check_forward_options = {'atol': 5e-3}
else:
self.check_forward_options = {'atol': 1e-4}
def tearDown(self):
self._config_user.__exit__(None, None, None)
def check_forward(self, x_data, t_data):
x = chainer.Variable(x_data)
t = chainer.Variable(t_data, requires_grad=False)
self.link.sample_data = self.link.sampler.sample(
(self.batch_size, self.n_samples))
y = self.link(x, t)
expect_y = numpy.empty((self.batch_size), dtype=self.dtype)
samples = cuda.to_cpu(self.link.sample_data)
for b in range(self.batch_size):
z = 0
for i in range(self.n_samples):
w = samples[b, i]
z += numpy.exp(self.w[w].dot(self.x[b]))
y0 = self.w[self.t[b]].dot(self.x[b])
z += numpy.exp(y0)
l = y0 - numpy.log(z)
for i in range(self.n_samples):
w = samples[b, i]
l += numpy.log(1 - numpy.exp(self.w[w].dot(self.x[b])) / z)
expect_y[b] = l
loss = -numpy.sum(expect_y) / self.batch_size
testing.assert_allclose(y.data, loss, **self.check_forward_options)
def test_forward_cpu(self):
self.check_forward(self.x, self.t)
@attr.gpu
def test_forward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_forward(cuda.to_gpu(self.x), cuda.to_gpu(self.t))
@attr.chainerx
def test_forward_chainerx_native(self):
device = chainer.get_device('native:0')
self.link.to_device(device)
self.check_forward(device.send(self.x), device.send(self.t))
@attr.chainerx
@attr.gpu
def test_forward_chainerx_cuda(self):
device = chainer.get_device('cuda:0')
self.link.to_device(device)
self.check_forward(device.send(self.x), device.send(self.t))
testing.run_module(__name__, __file__)
|
lttng/lttng-ci
|
refs/heads/master
|
automation/update_standalone.py
|
2
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 - Michael Jeanson <mjeanson@efficios.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
""" This script is used to upgrade the base snapshot of standalone ci slaves """
USERNAME = ''
APIKEY = ''
JENKINS_URL = 'https://ci.lttng.org'
DISTRO_LIST = ['el', 'sles', 'ubuntu']
DEFAULT_DISTRO = 'ubuntu'
DISTRO_COMMAND = {
'el': 'yum update -y && package-cleanup -y --oldkernels --count=2 && yum clean all',
'sles': 'zypper --non-interactive refresh && zypper --non-interactive patch --auto-agree-with-licenses --with-interactive',
'ubuntu': 'apt-get update && apt-get dist-upgrade -V -y && apt-get clean && apt-get --purge autoremove -y',
}
BASESNAP = 'base-configuration'
SNAPSHOTXML = """
<domainsnapshot>
<name>%s</name>
<description>Snapshot of OS install and updates</description>
<memory snapshot='no'/>
</domainsnapshot>
""" % BASESNAP
import argparse
import sys
import libvirt
from jenkinsapi.jenkins import Jenkins
from time import sleep
import paramiko
import select
def main():
""" Main """
parser = argparse.ArgumentParser(description='Update base snapshot.')
parser.add_argument('instance_name', metavar='INSTANCE', type=str,
help='the shortname of the instance to update')
parser.add_argument('vmhost_name', metavar='VMHOST', type=str,
help='the hostname of the VM host')
parser.add_argument('--distro', choices=DISTRO_LIST,
default=DEFAULT_DISTRO, type=str,
help='the distro of the target instance')
args = parser.parse_args()
instance_name = args.instance_name
vmhost_name = args.vmhost_name
distro = args.distro
# Get jenkibs connexion
jenkins = Jenkins(JENKINS_URL, username=USERNAME, password=APIKEY)
# Get jenkins node
print("Getting node %s from Jenkins..." % instance_name)
node = jenkins.get_node(instance_name)
if not node:
print("Could not get node %s on %s" % (instance_name, JENKINS_URL))
sys.exit(1)
# Check if node is idle
if not node.is_idle:
print("Node %s is not idle" % instance_name)
sys.exit(1)
# Set node temporarily offline
if not node.is_temporarily_offline():
node.toggle_temporarily_offline('Down for upgrade to base snapshot')
# Get libvirt connexion
print("Opening libvirt connexion to %s..." % vmhost_name)
vmhost = libvirt.open("qemu+ssh://root@%s/system" % vmhost_name)
if not vmhost:
print("Could not connect to libvirt on %s" % vmhost_name)
sys.exit(1)
# Get instance
print("Getting instance %s from libvirt..." % instance_name)
vminstance = vmhost.lookupByName(instance_name)
if not vminstance:
print("Could not get instance %s on %s" % (instance_name, vmhost_name))
sys.exit(1)
# If instance is running, shutdown
print("Checking if instance %s is running..." % instance_name)
if vminstance.isActive():
try:
print("Shutting down instance %s" % instance_name)
vminstance.destroy()
except:
print("Failed to shutdown %s", instance_name)
sys.exit(1)
# Revert to base snapshot
print("Getting base snapshot...")
basesnap = vminstance.snapshotLookupByName(BASESNAP)
if not basesnap:
print("Could not find base snapshot %s" % BASESNAP)
sys.exit(1)
#if not basesnap.isCurrent():
# print("Not current snapshot")
print("Reverting to base snapshot...")
try:
vminstance.revertToSnapshot(basesnap)
except:
print("Failed to revert to base snapshot %s" % basesnap.getName())
sys.exit(1)
# Launch instance
try:
print("Starting instance %s.." % instance_name)
vminstance.create()
except:
print("Failed to start instance %s" % instance_name)
sys.exit(1)
# Wait for instance to boot
print("Waiting for instance to boot...")
sleep(10)
# Run dist-upgrade
print("Running upgrade command...")
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.load_system_host_keys()
client.connect(instance_name, username="root")
stdin, stdout, stderr = client.exec_command(DISTRO_COMMAND[distro])
while not stdout.channel.exit_status_ready():
if stdout.channel.recv_ready():
rl, wl, xl = select.select([stdout.channel], [], [], 0.0)
if len(rl) > 0:
print(stdout.channel.recv(1024)),
if stdout.channel.recv_exit_status() != 0:
print("Update command failed!")
sys.exit(1)
# Close ssh connexion
client.close()
# Shutdown VM
print("Shutting down instance...")
try:
vminstance.shutdown()
except:
print("Failed to shutdown instance %s" % instance_name)
sys.exit(1)
while vminstance.isActive():
sleep(1)
print("Waiting for instance to shutdown...")
# Delete original base snapshot
print("Deleting current base snapshot...")
try:
basesnap.delete()
except:
print("Failed to delete base snapshot %s" % basesnap.getName())
sys.exit(1)
# Create new base snapshot
print("Creating new base snapshot...")
try:
vminstance.snapshotCreateXML(SNAPSHOTXML)
except:
print("Failed to create new snapshot.")
sys.exit(1)
# Set node online in jenkins
if node.is_temporarily_offline():
node.toggle_temporarily_offline()
# And we're done!
print("All done!")
if __name__ == "__main__":
main()
# EOF
|
nzavagli/UnrealPy
|
refs/heads/master
|
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Pygments-2.0.2/pygments/lexers/_mapping.py
|
43
|
# -*- coding: utf-8 -*-
"""
pygments.lexers._mapping
~~~~~~~~~~~~~~~~~~~~~~~~
Lexer mapping definitions. This file is generated by itself. Everytime
you change something on a builtin lexer definition, run this script from
the lexers folder to update it.
Do not alter the LEXERS dictionary by hand.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
LEXERS = {
'ABAPLexer': ('pygments.lexers.business', 'ABAP', ('abap',), ('*.abap',), ('text/x-abap',)),
'APLLexer': ('pygments.lexers.apl', 'APL', ('apl',), ('*.apl',), ()),
'ActionScript3Lexer': ('pygments.lexers.actionscript', 'ActionScript 3', ('as3', 'actionscript3'), ('*.as',), ('application/x-actionscript3', 'text/x-actionscript3', 'text/actionscript3')),
'ActionScriptLexer': ('pygments.lexers.actionscript', 'ActionScript', ('as', 'actionscript'), ('*.as',), ('application/x-actionscript', 'text/x-actionscript', 'text/actionscript')),
'AdaLexer': ('pygments.lexers.pascal', 'Ada', ('ada', 'ada95', 'ada2005'), ('*.adb', '*.ads', '*.ada'), ('text/x-ada',)),
'AgdaLexer': ('pygments.lexers.haskell', 'Agda', ('agda',), ('*.agda',), ('text/x-agda',)),
'AlloyLexer': ('pygments.lexers.dsls', 'Alloy', ('alloy',), ('*.als',), ('text/x-alloy',)),
'AmbientTalkLexer': ('pygments.lexers.ambient', 'AmbientTalk', ('at', 'ambienttalk', 'ambienttalk/2'), ('*.at',), ('text/x-ambienttalk',)),
'AntlrActionScriptLexer': ('pygments.lexers.parsers', 'ANTLR With ActionScript Target', ('antlr-as', 'antlr-actionscript'), ('*.G', '*.g'), ()),
'AntlrCSharpLexer': ('pygments.lexers.parsers', 'ANTLR With C# Target', ('antlr-csharp', 'antlr-c#'), ('*.G', '*.g'), ()),
'AntlrCppLexer': ('pygments.lexers.parsers', 'ANTLR With CPP Target', ('antlr-cpp',), ('*.G', '*.g'), ()),
'AntlrJavaLexer': ('pygments.lexers.parsers', 'ANTLR With Java Target', ('antlr-java',), ('*.G', '*.g'), ()),
'AntlrLexer': ('pygments.lexers.parsers', 'ANTLR', ('antlr',), (), ()),
'AntlrObjectiveCLexer': ('pygments.lexers.parsers', 'ANTLR With ObjectiveC Target', ('antlr-objc',), ('*.G', '*.g'), ()),
'AntlrPerlLexer': ('pygments.lexers.parsers', 'ANTLR With Perl Target', ('antlr-perl',), ('*.G', '*.g'), ()),
'AntlrPythonLexer': ('pygments.lexers.parsers', 'ANTLR With Python Target', ('antlr-python',), ('*.G', '*.g'), ()),
'AntlrRubyLexer': ('pygments.lexers.parsers', 'ANTLR With Ruby Target', ('antlr-ruby', 'antlr-rb'), ('*.G', '*.g'), ()),
'ApacheConfLexer': ('pygments.lexers.configs', 'ApacheConf', ('apacheconf', 'aconf', 'apache'), ('.htaccess', 'apache.conf', 'apache2.conf'), ('text/x-apacheconf',)),
'AppleScriptLexer': ('pygments.lexers.scripting', 'AppleScript', ('applescript',), ('*.applescript',), ()),
'AspectJLexer': ('pygments.lexers.jvm', 'AspectJ', ('aspectj',), ('*.aj',), ('text/x-aspectj',)),
'AsymptoteLexer': ('pygments.lexers.graphics', 'Asymptote', ('asy', 'asymptote'), ('*.asy',), ('text/x-asymptote',)),
'AutoItLexer': ('pygments.lexers.automation', 'AutoIt', ('autoit',), ('*.au3',), ('text/x-autoit',)),
'AutohotkeyLexer': ('pygments.lexers.automation', 'autohotkey', ('ahk', 'autohotkey'), ('*.ahk', '*.ahkl'), ('text/x-autohotkey',)),
'AwkLexer': ('pygments.lexers.textedit', 'Awk', ('awk', 'gawk', 'mawk', 'nawk'), ('*.awk',), ('application/x-awk',)),
'BBCodeLexer': ('pygments.lexers.markup', 'BBCode', ('bbcode',), (), ('text/x-bbcode',)),
'BaseMakefileLexer': ('pygments.lexers.make', 'Base Makefile', ('basemake',), (), ()),
'BashLexer': ('pygments.lexers.shell', 'Bash', ('bash', 'sh', 'ksh', 'shell'), ('*.sh', '*.ksh', '*.bash', '*.ebuild', '*.eclass', '.bashrc', 'bashrc', '.bash_*', 'bash_*', 'PKGBUILD'), ('application/x-sh', 'application/x-shellscript')),
'BashSessionLexer': ('pygments.lexers.shell', 'Bash Session', ('console',), ('*.sh-session',), ('application/x-shell-session',)),
'BatchLexer': ('pygments.lexers.shell', 'Batchfile', ('bat', 'batch', 'dosbatch', 'winbatch'), ('*.bat', '*.cmd'), ('application/x-dos-batch',)),
'BefungeLexer': ('pygments.lexers.esoteric', 'Befunge', ('befunge',), ('*.befunge',), ('application/x-befunge',)),
'BlitzBasicLexer': ('pygments.lexers.basic', 'BlitzBasic', ('blitzbasic', 'b3d', 'bplus'), ('*.bb', '*.decls'), ('text/x-bb',)),
'BlitzMaxLexer': ('pygments.lexers.basic', 'BlitzMax', ('blitzmax', 'bmax'), ('*.bmx',), ('text/x-bmx',)),
'BooLexer': ('pygments.lexers.dotnet', 'Boo', ('boo',), ('*.boo',), ('text/x-boo',)),
'BrainfuckLexer': ('pygments.lexers.esoteric', 'Brainfuck', ('brainfuck', 'bf'), ('*.bf', '*.b'), ('application/x-brainfuck',)),
'BroLexer': ('pygments.lexers.dsls', 'Bro', ('bro',), ('*.bro',), ()),
'BugsLexer': ('pygments.lexers.modeling', 'BUGS', ('bugs', 'winbugs', 'openbugs'), ('*.bug',), ()),
'CLexer': ('pygments.lexers.c_cpp', 'C', ('c',), ('*.c', '*.h', '*.idc'), ('text/x-chdr', 'text/x-csrc')),
'CMakeLexer': ('pygments.lexers.make', 'CMake', ('cmake',), ('*.cmake', 'CMakeLists.txt'), ('text/x-cmake',)),
'CObjdumpLexer': ('pygments.lexers.asm', 'c-objdump', ('c-objdump',), ('*.c-objdump',), ('text/x-c-objdump',)),
'CSharpAspxLexer': ('pygments.lexers.dotnet', 'aspx-cs', ('aspx-cs',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()),
'CSharpLexer': ('pygments.lexers.dotnet', 'C#', ('csharp', 'c#'), ('*.cs',), ('text/x-csharp',)),
'Ca65Lexer': ('pygments.lexers.asm', 'ca65 assembler', ('ca65',), ('*.s',), ()),
'CbmBasicV2Lexer': ('pygments.lexers.basic', 'CBM BASIC V2', ('cbmbas',), ('*.bas',), ()),
'CeylonLexer': ('pygments.lexers.jvm', 'Ceylon', ('ceylon',), ('*.ceylon',), ('text/x-ceylon',)),
'Cfengine3Lexer': ('pygments.lexers.configs', 'CFEngine3', ('cfengine3', 'cf3'), ('*.cf',), ()),
'ChaiscriptLexer': ('pygments.lexers.scripting', 'ChaiScript', ('chai', 'chaiscript'), ('*.chai',), ('text/x-chaiscript', 'application/x-chaiscript')),
'ChapelLexer': ('pygments.lexers.chapel', 'Chapel', ('chapel', 'chpl'), ('*.chpl',), ()),
'CheetahHtmlLexer': ('pygments.lexers.templates', 'HTML+Cheetah', ('html+cheetah', 'html+spitfire', 'htmlcheetah'), (), ('text/html+cheetah', 'text/html+spitfire')),
'CheetahJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Cheetah', ('js+cheetah', 'javascript+cheetah', 'js+spitfire', 'javascript+spitfire'), (), ('application/x-javascript+cheetah', 'text/x-javascript+cheetah', 'text/javascript+cheetah', 'application/x-javascript+spitfire', 'text/x-javascript+spitfire', 'text/javascript+spitfire')),
'CheetahLexer': ('pygments.lexers.templates', 'Cheetah', ('cheetah', 'spitfire'), ('*.tmpl', '*.spt'), ('application/x-cheetah', 'application/x-spitfire')),
'CheetahXmlLexer': ('pygments.lexers.templates', 'XML+Cheetah', ('xml+cheetah', 'xml+spitfire'), (), ('application/xml+cheetah', 'application/xml+spitfire')),
'CirruLexer': ('pygments.lexers.webmisc', 'Cirru', ('cirru',), ('*.cirru',), ('text/x-cirru',)),
'ClayLexer': ('pygments.lexers.c_like', 'Clay', ('clay',), ('*.clay',), ('text/x-clay',)),
'ClojureLexer': ('pygments.lexers.jvm', 'Clojure', ('clojure', 'clj'), ('*.clj',), ('text/x-clojure', 'application/x-clojure')),
'ClojureScriptLexer': ('pygments.lexers.jvm', 'ClojureScript', ('clojurescript', 'cljs'), ('*.cljs',), ('text/x-clojurescript', 'application/x-clojurescript')),
'CobolFreeformatLexer': ('pygments.lexers.business', 'COBOLFree', ('cobolfree',), ('*.cbl', '*.CBL'), ()),
'CobolLexer': ('pygments.lexers.business', 'COBOL', ('cobol',), ('*.cob', '*.COB', '*.cpy', '*.CPY'), ('text/x-cobol',)),
'CoffeeScriptLexer': ('pygments.lexers.javascript', 'CoffeeScript', ('coffee-script', 'coffeescript', 'coffee'), ('*.coffee',), ('text/coffeescript',)),
'ColdfusionCFCLexer': ('pygments.lexers.templates', 'Coldfusion CFC', ('cfc',), ('*.cfc',), ()),
'ColdfusionHtmlLexer': ('pygments.lexers.templates', 'Coldfusion HTML', ('cfm',), ('*.cfm', '*.cfml'), ('application/x-coldfusion',)),
'ColdfusionLexer': ('pygments.lexers.templates', 'cfstatement', ('cfs',), (), ()),
'CommonLispLexer': ('pygments.lexers.lisp', 'Common Lisp', ('common-lisp', 'cl', 'lisp', 'elisp', 'emacs', 'emacs-lisp'), ('*.cl', '*.lisp', '*.el'), ('text/x-common-lisp',)),
'CoqLexer': ('pygments.lexers.theorem', 'Coq', ('coq',), ('*.v',), ('text/x-coq',)),
'CppLexer': ('pygments.lexers.c_cpp', 'C++', ('cpp', 'c++'), ('*.cpp', '*.hpp', '*.c++', '*.h++', '*.cc', '*.hh', '*.cxx', '*.hxx', '*.C', '*.H', '*.cp', '*.CPP'), ('text/x-c++hdr', 'text/x-c++src')),
'CppObjdumpLexer': ('pygments.lexers.asm', 'cpp-objdump', ('cpp-objdump', 'c++-objdumb', 'cxx-objdump'), ('*.cpp-objdump', '*.c++-objdump', '*.cxx-objdump'), ('text/x-cpp-objdump',)),
'CrocLexer': ('pygments.lexers.d', 'Croc', ('croc',), ('*.croc',), ('text/x-crocsrc',)),
'CryptolLexer': ('pygments.lexers.haskell', 'Cryptol', ('cryptol', 'cry'), ('*.cry',), ('text/x-cryptol',)),
'CssDjangoLexer': ('pygments.lexers.templates', 'CSS+Django/Jinja', ('css+django', 'css+jinja'), (), ('text/css+django', 'text/css+jinja')),
'CssErbLexer': ('pygments.lexers.templates', 'CSS+Ruby', ('css+erb', 'css+ruby'), (), ('text/css+ruby',)),
'CssGenshiLexer': ('pygments.lexers.templates', 'CSS+Genshi Text', ('css+genshitext', 'css+genshi'), (), ('text/css+genshi',)),
'CssLexer': ('pygments.lexers.css', 'CSS', ('css',), ('*.css',), ('text/css',)),
'CssPhpLexer': ('pygments.lexers.templates', 'CSS+PHP', ('css+php',), (), ('text/css+php',)),
'CssSmartyLexer': ('pygments.lexers.templates', 'CSS+Smarty', ('css+smarty',), (), ('text/css+smarty',)),
'CudaLexer': ('pygments.lexers.c_like', 'CUDA', ('cuda', 'cu'), ('*.cu', '*.cuh'), ('text/x-cuda',)),
'CypherLexer': ('pygments.lexers.graph', 'Cypher', ('cypher',), ('*.cyp', '*.cypher'), ()),
'CythonLexer': ('pygments.lexers.python', 'Cython', ('cython', 'pyx', 'pyrex'), ('*.pyx', '*.pxd', '*.pxi'), ('text/x-cython', 'application/x-cython')),
'DLexer': ('pygments.lexers.d', 'D', ('d',), ('*.d', '*.di'), ('text/x-dsrc',)),
'DObjdumpLexer': ('pygments.lexers.asm', 'd-objdump', ('d-objdump',), ('*.d-objdump',), ('text/x-d-objdump',)),
'DarcsPatchLexer': ('pygments.lexers.diff', 'Darcs Patch', ('dpatch',), ('*.dpatch', '*.darcspatch'), ()),
'DartLexer': ('pygments.lexers.javascript', 'Dart', ('dart',), ('*.dart',), ('text/x-dart',)),
'DebianControlLexer': ('pygments.lexers.installers', 'Debian Control file', ('control', 'debcontrol'), ('control',), ()),
'DelphiLexer': ('pygments.lexers.pascal', 'Delphi', ('delphi', 'pas', 'pascal', 'objectpascal'), ('*.pas',), ('text/x-pascal',)),
'DgLexer': ('pygments.lexers.python', 'dg', ('dg',), ('*.dg',), ('text/x-dg',)),
'DiffLexer': ('pygments.lexers.diff', 'Diff', ('diff', 'udiff'), ('*.diff', '*.patch'), ('text/x-diff', 'text/x-patch')),
'DjangoLexer': ('pygments.lexers.templates', 'Django/Jinja', ('django', 'jinja'), (), ('application/x-django-templating', 'application/x-jinja')),
'DockerLexer': ('pygments.lexers.configs', 'Docker', ('docker', 'dockerfile'), ('Dockerfile', '*.docker'), ('text/x-dockerfile-config',)),
'DtdLexer': ('pygments.lexers.html', 'DTD', ('dtd',), ('*.dtd',), ('application/xml-dtd',)),
'DuelLexer': ('pygments.lexers.webmisc', 'Duel', ('duel', 'jbst', 'jsonml+bst'), ('*.duel', '*.jbst'), ('text/x-duel', 'text/x-jbst')),
'DylanConsoleLexer': ('pygments.lexers.dylan', 'Dylan session', ('dylan-console', 'dylan-repl'), ('*.dylan-console',), ('text/x-dylan-console',)),
'DylanLexer': ('pygments.lexers.dylan', 'Dylan', ('dylan',), ('*.dylan', '*.dyl', '*.intr'), ('text/x-dylan',)),
'DylanLidLexer': ('pygments.lexers.dylan', 'DylanLID', ('dylan-lid', 'lid'), ('*.lid', '*.hdp'), ('text/x-dylan-lid',)),
'ECLLexer': ('pygments.lexers.ecl', 'ECL', ('ecl',), ('*.ecl',), ('application/x-ecl',)),
'ECLexer': ('pygments.lexers.c_like', 'eC', ('ec',), ('*.ec', '*.eh'), ('text/x-echdr', 'text/x-ecsrc')),
'EbnfLexer': ('pygments.lexers.parsers', 'EBNF', ('ebnf',), ('*.ebnf',), ('text/x-ebnf',)),
'EiffelLexer': ('pygments.lexers.eiffel', 'Eiffel', ('eiffel',), ('*.e',), ('text/x-eiffel',)),
'ElixirConsoleLexer': ('pygments.lexers.erlang', 'Elixir iex session', ('iex',), (), ('text/x-elixir-shellsession',)),
'ElixirLexer': ('pygments.lexers.erlang', 'Elixir', ('elixir', 'ex', 'exs'), ('*.ex', '*.exs'), ('text/x-elixir',)),
'ErbLexer': ('pygments.lexers.templates', 'ERB', ('erb',), (), ('application/x-ruby-templating',)),
'ErlangLexer': ('pygments.lexers.erlang', 'Erlang', ('erlang',), ('*.erl', '*.hrl', '*.es', '*.escript'), ('text/x-erlang',)),
'ErlangShellLexer': ('pygments.lexers.erlang', 'Erlang erl session', ('erl',), ('*.erl-sh',), ('text/x-erl-shellsession',)),
'EvoqueHtmlLexer': ('pygments.lexers.templates', 'HTML+Evoque', ('html+evoque',), ('*.html',), ('text/html+evoque',)),
'EvoqueLexer': ('pygments.lexers.templates', 'Evoque', ('evoque',), ('*.evoque',), ('application/x-evoque',)),
'EvoqueXmlLexer': ('pygments.lexers.templates', 'XML+Evoque', ('xml+evoque',), ('*.xml',), ('application/xml+evoque',)),
'FSharpLexer': ('pygments.lexers.dotnet', 'FSharp', ('fsharp',), ('*.fs', '*.fsi'), ('text/x-fsharp',)),
'FactorLexer': ('pygments.lexers.factor', 'Factor', ('factor',), ('*.factor',), ('text/x-factor',)),
'FancyLexer': ('pygments.lexers.ruby', 'Fancy', ('fancy', 'fy'), ('*.fy', '*.fancypack'), ('text/x-fancysrc',)),
'FantomLexer': ('pygments.lexers.fantom', 'Fantom', ('fan',), ('*.fan',), ('application/x-fantom',)),
'FelixLexer': ('pygments.lexers.felix', 'Felix', ('felix', 'flx'), ('*.flx', '*.flxh'), ('text/x-felix',)),
'FortranLexer': ('pygments.lexers.fortran', 'Fortran', ('fortran',), ('*.f', '*.f90', '*.F', '*.F90'), ('text/x-fortran',)),
'FoxProLexer': ('pygments.lexers.foxpro', 'FoxPro', ('foxpro', 'vfp', 'clipper', 'xbase'), ('*.PRG', '*.prg'), ()),
'GAPLexer': ('pygments.lexers.algebra', 'GAP', ('gap',), ('*.g', '*.gd', '*.gi', '*.gap'), ()),
'GLShaderLexer': ('pygments.lexers.graphics', 'GLSL', ('glsl',), ('*.vert', '*.frag', '*.geo'), ('text/x-glslsrc',)),
'GasLexer': ('pygments.lexers.asm', 'GAS', ('gas', 'asm'), ('*.s', '*.S'), ('text/x-gas',)),
'GenshiLexer': ('pygments.lexers.templates', 'Genshi', ('genshi', 'kid', 'xml+genshi', 'xml+kid'), ('*.kid',), ('application/x-genshi', 'application/x-kid')),
'GenshiTextLexer': ('pygments.lexers.templates', 'Genshi Text', ('genshitext',), (), ('application/x-genshi-text', 'text/x-genshi')),
'GettextLexer': ('pygments.lexers.textfmts', 'Gettext Catalog', ('pot', 'po'), ('*.pot', '*.po'), ('application/x-gettext', 'text/x-gettext', 'text/gettext')),
'GherkinLexer': ('pygments.lexers.testing', 'Gherkin', ('cucumber', 'gherkin'), ('*.feature',), ('text/x-gherkin',)),
'GnuplotLexer': ('pygments.lexers.graphics', 'Gnuplot', ('gnuplot',), ('*.plot', '*.plt'), ('text/x-gnuplot',)),
'GoLexer': ('pygments.lexers.go', 'Go', ('go',), ('*.go',), ('text/x-gosrc',)),
'GoloLexer': ('pygments.lexers.jvm', 'Golo', ('golo',), ('*.golo',), ()),
'GoodDataCLLexer': ('pygments.lexers.business', 'GoodData-CL', ('gooddata-cl',), ('*.gdc',), ('text/x-gooddata-cl',)),
'GosuLexer': ('pygments.lexers.jvm', 'Gosu', ('gosu',), ('*.gs', '*.gsx', '*.gsp', '*.vark'), ('text/x-gosu',)),
'GosuTemplateLexer': ('pygments.lexers.jvm', 'Gosu Template', ('gst',), ('*.gst',), ('text/x-gosu-template',)),
'GroffLexer': ('pygments.lexers.markup', 'Groff', ('groff', 'nroff', 'man'), ('*.[1234567]', '*.man'), ('application/x-troff', 'text/troff')),
'GroovyLexer': ('pygments.lexers.jvm', 'Groovy', ('groovy',), ('*.groovy',), ('text/x-groovy',)),
'HamlLexer': ('pygments.lexers.html', 'Haml', ('haml',), ('*.haml',), ('text/x-haml',)),
'HandlebarsHtmlLexer': ('pygments.lexers.templates', 'HTML+Handlebars', ('html+handlebars',), ('*.handlebars', '*.hbs'), ('text/html+handlebars', 'text/x-handlebars-template')),
'HandlebarsLexer': ('pygments.lexers.templates', 'Handlebars', ('handlebars',), (), ()),
'HaskellLexer': ('pygments.lexers.haskell', 'Haskell', ('haskell', 'hs'), ('*.hs',), ('text/x-haskell',)),
'HaxeLexer': ('pygments.lexers.haxe', 'Haxe', ('hx', 'haxe', 'hxsl'), ('*.hx', '*.hxsl'), ('text/haxe', 'text/x-haxe', 'text/x-hx')),
'HtmlDjangoLexer': ('pygments.lexers.templates', 'HTML+Django/Jinja', ('html+django', 'html+jinja', 'htmldjango'), (), ('text/html+django', 'text/html+jinja')),
'HtmlGenshiLexer': ('pygments.lexers.templates', 'HTML+Genshi', ('html+genshi', 'html+kid'), (), ('text/html+genshi',)),
'HtmlLexer': ('pygments.lexers.html', 'HTML', ('html',), ('*.html', '*.htm', '*.xhtml', '*.xslt'), ('text/html', 'application/xhtml+xml')),
'HtmlPhpLexer': ('pygments.lexers.templates', 'HTML+PHP', ('html+php',), ('*.phtml',), ('application/x-php', 'application/x-httpd-php', 'application/x-httpd-php3', 'application/x-httpd-php4', 'application/x-httpd-php5')),
'HtmlSmartyLexer': ('pygments.lexers.templates', 'HTML+Smarty', ('html+smarty',), (), ('text/html+smarty',)),
'HttpLexer': ('pygments.lexers.textfmts', 'HTTP', ('http',), (), ()),
'HxmlLexer': ('pygments.lexers.haxe', 'Hxml', ('haxeml', 'hxml'), ('*.hxml',), ()),
'HyLexer': ('pygments.lexers.lisp', 'Hy', ('hylang',), ('*.hy',), ('text/x-hy', 'application/x-hy')),
'HybrisLexer': ('pygments.lexers.scripting', 'Hybris', ('hybris', 'hy'), ('*.hy', '*.hyb'), ('text/x-hybris', 'application/x-hybris')),
'IDLLexer': ('pygments.lexers.idl', 'IDL', ('idl',), ('*.pro',), ('text/idl',)),
'IdrisLexer': ('pygments.lexers.haskell', 'Idris', ('idris', 'idr'), ('*.idr',), ('text/x-idris',)),
'IgorLexer': ('pygments.lexers.igor', 'Igor', ('igor', 'igorpro'), ('*.ipf',), ('text/ipf',)),
'Inform6Lexer': ('pygments.lexers.int_fiction', 'Inform 6', ('inform6', 'i6'), ('*.inf',), ()),
'Inform6TemplateLexer': ('pygments.lexers.int_fiction', 'Inform 6 template', ('i6t',), ('*.i6t',), ()),
'Inform7Lexer': ('pygments.lexers.int_fiction', 'Inform 7', ('inform7', 'i7'), ('*.ni', '*.i7x'), ()),
'IniLexer': ('pygments.lexers.configs', 'INI', ('ini', 'cfg', 'dosini'), ('*.ini', '*.cfg'), ('text/x-ini',)),
'IoLexer': ('pygments.lexers.iolang', 'Io', ('io',), ('*.io',), ('text/x-iosrc',)),
'IokeLexer': ('pygments.lexers.jvm', 'Ioke', ('ioke', 'ik'), ('*.ik',), ('text/x-iokesrc',)),
'IrcLogsLexer': ('pygments.lexers.textfmts', 'IRC logs', ('irc',), ('*.weechatlog',), ('text/x-irclog',)),
'IsabelleLexer': ('pygments.lexers.theorem', 'Isabelle', ('isabelle',), ('*.thy',), ('text/x-isabelle',)),
'JadeLexer': ('pygments.lexers.html', 'Jade', ('jade',), ('*.jade',), ('text/x-jade',)),
'JagsLexer': ('pygments.lexers.modeling', 'JAGS', ('jags',), ('*.jag', '*.bug'), ()),
'JasminLexer': ('pygments.lexers.jvm', 'Jasmin', ('jasmin', 'jasminxt'), ('*.j',), ()),
'JavaLexer': ('pygments.lexers.jvm', 'Java', ('java',), ('*.java',), ('text/x-java',)),
'JavascriptDjangoLexer': ('pygments.lexers.templates', 'JavaScript+Django/Jinja', ('js+django', 'javascript+django', 'js+jinja', 'javascript+jinja'), (), ('application/x-javascript+django', 'application/x-javascript+jinja', 'text/x-javascript+django', 'text/x-javascript+jinja', 'text/javascript+django', 'text/javascript+jinja')),
'JavascriptErbLexer': ('pygments.lexers.templates', 'JavaScript+Ruby', ('js+erb', 'javascript+erb', 'js+ruby', 'javascript+ruby'), (), ('application/x-javascript+ruby', 'text/x-javascript+ruby', 'text/javascript+ruby')),
'JavascriptGenshiLexer': ('pygments.lexers.templates', 'JavaScript+Genshi Text', ('js+genshitext', 'js+genshi', 'javascript+genshitext', 'javascript+genshi'), (), ('application/x-javascript+genshi', 'text/x-javascript+genshi', 'text/javascript+genshi')),
'JavascriptLexer': ('pygments.lexers.javascript', 'JavaScript', ('js', 'javascript'), ('*.js',), ('application/javascript', 'application/x-javascript', 'text/x-javascript', 'text/javascript')),
'JavascriptPhpLexer': ('pygments.lexers.templates', 'JavaScript+PHP', ('js+php', 'javascript+php'), (), ('application/x-javascript+php', 'text/x-javascript+php', 'text/javascript+php')),
'JavascriptSmartyLexer': ('pygments.lexers.templates', 'JavaScript+Smarty', ('js+smarty', 'javascript+smarty'), (), ('application/x-javascript+smarty', 'text/x-javascript+smarty', 'text/javascript+smarty')),
'JsonLdLexer': ('pygments.lexers.data', 'JSON-LD', ('jsonld', 'json-ld'), ('*.jsonld',), ('application/ld+json',)),
'JsonLexer': ('pygments.lexers.data', 'JSON', ('json',), ('*.json',), ('application/json',)),
'JspLexer': ('pygments.lexers.templates', 'Java Server Page', ('jsp',), ('*.jsp',), ('application/x-jsp',)),
'JuliaConsoleLexer': ('pygments.lexers.julia', 'Julia console', ('jlcon',), (), ()),
'JuliaLexer': ('pygments.lexers.julia', 'Julia', ('julia', 'jl'), ('*.jl',), ('text/x-julia', 'application/x-julia')),
'KalLexer': ('pygments.lexers.javascript', 'Kal', ('kal',), ('*.kal',), ('text/kal', 'application/kal')),
'KconfigLexer': ('pygments.lexers.configs', 'Kconfig', ('kconfig', 'menuconfig', 'linux-config', 'kernel-config'), ('Kconfig', '*Config.in*', 'external.in*', 'standard-modules.in'), ('text/x-kconfig',)),
'KokaLexer': ('pygments.lexers.haskell', 'Koka', ('koka',), ('*.kk', '*.kki'), ('text/x-koka',)),
'KotlinLexer': ('pygments.lexers.jvm', 'Kotlin', ('kotlin',), ('*.kt',), ('text/x-kotlin',)),
'LSLLexer': ('pygments.lexers.scripting', 'LSL', ('lsl',), ('*.lsl',), ('text/x-lsl',)),
'LassoCssLexer': ('pygments.lexers.templates', 'CSS+Lasso', ('css+lasso',), (), ('text/css+lasso',)),
'LassoHtmlLexer': ('pygments.lexers.templates', 'HTML+Lasso', ('html+lasso',), (), ('text/html+lasso', 'application/x-httpd-lasso', 'application/x-httpd-lasso[89]')),
'LassoJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Lasso', ('js+lasso', 'javascript+lasso'), (), ('application/x-javascript+lasso', 'text/x-javascript+lasso', 'text/javascript+lasso')),
'LassoLexer': ('pygments.lexers.javascript', 'Lasso', ('lasso', 'lassoscript'), ('*.lasso', '*.lasso[89]'), ('text/x-lasso',)),
'LassoXmlLexer': ('pygments.lexers.templates', 'XML+Lasso', ('xml+lasso',), (), ('application/xml+lasso',)),
'LeanLexer': ('pygments.lexers.theorem', 'Lean', ('lean',), ('*.lean',), ('text/x-lean',)),
'LighttpdConfLexer': ('pygments.lexers.configs', 'Lighttpd configuration file', ('lighty', 'lighttpd'), (), ('text/x-lighttpd-conf',)),
'LimboLexer': ('pygments.lexers.inferno', 'Limbo', ('limbo',), ('*.b',), ('text/limbo',)),
'LiquidLexer': ('pygments.lexers.templates', 'liquid', ('liquid',), ('*.liquid',), ()),
'LiterateAgdaLexer': ('pygments.lexers.haskell', 'Literate Agda', ('lagda', 'literate-agda'), ('*.lagda',), ('text/x-literate-agda',)),
'LiterateCryptolLexer': ('pygments.lexers.haskell', 'Literate Cryptol', ('lcry', 'literate-cryptol', 'lcryptol'), ('*.lcry',), ('text/x-literate-cryptol',)),
'LiterateHaskellLexer': ('pygments.lexers.haskell', 'Literate Haskell', ('lhs', 'literate-haskell', 'lhaskell'), ('*.lhs',), ('text/x-literate-haskell',)),
'LiterateIdrisLexer': ('pygments.lexers.haskell', 'Literate Idris', ('lidr', 'literate-idris', 'lidris'), ('*.lidr',), ('text/x-literate-idris',)),
'LiveScriptLexer': ('pygments.lexers.javascript', 'LiveScript', ('live-script', 'livescript'), ('*.ls',), ('text/livescript',)),
'LlvmLexer': ('pygments.lexers.asm', 'LLVM', ('llvm',), ('*.ll',), ('text/x-llvm',)),
'LogosLexer': ('pygments.lexers.objective', 'Logos', ('logos',), ('*.x', '*.xi', '*.xm', '*.xmi'), ('text/x-logos',)),
'LogtalkLexer': ('pygments.lexers.prolog', 'Logtalk', ('logtalk',), ('*.lgt', '*.logtalk'), ('text/x-logtalk',)),
'LuaLexer': ('pygments.lexers.scripting', 'Lua', ('lua',), ('*.lua', '*.wlua'), ('text/x-lua', 'application/x-lua')),
'MOOCodeLexer': ('pygments.lexers.scripting', 'MOOCode', ('moocode', 'moo'), ('*.moo',), ('text/x-moocode',)),
'MakefileLexer': ('pygments.lexers.make', 'Makefile', ('make', 'makefile', 'mf', 'bsdmake'), ('*.mak', '*.mk', 'Makefile', 'makefile', 'Makefile.*', 'GNUmakefile'), ('text/x-makefile',)),
'MakoCssLexer': ('pygments.lexers.templates', 'CSS+Mako', ('css+mako',), (), ('text/css+mako',)),
'MakoHtmlLexer': ('pygments.lexers.templates', 'HTML+Mako', ('html+mako',), (), ('text/html+mako',)),
'MakoJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Mako', ('js+mako', 'javascript+mako'), (), ('application/x-javascript+mako', 'text/x-javascript+mako', 'text/javascript+mako')),
'MakoLexer': ('pygments.lexers.templates', 'Mako', ('mako',), ('*.mao',), ('application/x-mako',)),
'MakoXmlLexer': ('pygments.lexers.templates', 'XML+Mako', ('xml+mako',), (), ('application/xml+mako',)),
'MaqlLexer': ('pygments.lexers.business', 'MAQL', ('maql',), ('*.maql',), ('text/x-gooddata-maql', 'application/x-gooddata-maql')),
'MaskLexer': ('pygments.lexers.javascript', 'Mask', ('mask',), ('*.mask',), ('text/x-mask',)),
'MasonLexer': ('pygments.lexers.templates', 'Mason', ('mason',), ('*.m', '*.mhtml', '*.mc', '*.mi', 'autohandler', 'dhandler'), ('application/x-mason',)),
'MathematicaLexer': ('pygments.lexers.algebra', 'Mathematica', ('mathematica', 'mma', 'nb'), ('*.nb', '*.cdf', '*.nbp', '*.ma'), ('application/mathematica', 'application/vnd.wolfram.mathematica', 'application/vnd.wolfram.mathematica.package', 'application/vnd.wolfram.cdf')),
'MatlabLexer': ('pygments.lexers.matlab', 'Matlab', ('matlab',), ('*.m',), ('text/matlab',)),
'MatlabSessionLexer': ('pygments.lexers.matlab', 'Matlab session', ('matlabsession',), (), ()),
'MiniDLexer': ('pygments.lexers.d', 'MiniD', ('minid',), (), ('text/x-minidsrc',)),
'ModelicaLexer': ('pygments.lexers.modeling', 'Modelica', ('modelica',), ('*.mo',), ('text/x-modelica',)),
'Modula2Lexer': ('pygments.lexers.pascal', 'Modula-2', ('modula2', 'm2'), ('*.def', '*.mod'), ('text/x-modula2',)),
'MoinWikiLexer': ('pygments.lexers.markup', 'MoinMoin/Trac Wiki markup', ('trac-wiki', 'moin'), (), ('text/x-trac-wiki',)),
'MonkeyLexer': ('pygments.lexers.basic', 'Monkey', ('monkey',), ('*.monkey',), ('text/x-monkey',)),
'MoonScriptLexer': ('pygments.lexers.scripting', 'MoonScript', ('moon', 'moonscript'), ('*.moon',), ('text/x-moonscript', 'application/x-moonscript')),
'MozPreprocCssLexer': ('pygments.lexers.markup', 'CSS+mozpreproc', ('css+mozpreproc',), ('*.css.in',), ()),
'MozPreprocHashLexer': ('pygments.lexers.markup', 'mozhashpreproc', ('mozhashpreproc',), (), ()),
'MozPreprocJavascriptLexer': ('pygments.lexers.markup', 'Javascript+mozpreproc', ('javascript+mozpreproc',), ('*.js.in',), ()),
'MozPreprocPercentLexer': ('pygments.lexers.markup', 'mozpercentpreproc', ('mozpercentpreproc',), (), ()),
'MozPreprocXulLexer': ('pygments.lexers.markup', 'XUL+mozpreproc', ('xul+mozpreproc',), ('*.xul.in',), ()),
'MqlLexer': ('pygments.lexers.c_like', 'MQL', ('mql', 'mq4', 'mq5', 'mql4', 'mql5'), ('*.mq4', '*.mq5', '*.mqh'), ('text/x-mql',)),
'MscgenLexer': ('pygments.lexers.dsls', 'Mscgen', ('mscgen', 'msc'), ('*.msc',), ()),
'MuPADLexer': ('pygments.lexers.algebra', 'MuPAD', ('mupad',), ('*.mu',), ()),
'MxmlLexer': ('pygments.lexers.actionscript', 'MXML', ('mxml',), ('*.mxml',), ()),
'MySqlLexer': ('pygments.lexers.sql', 'MySQL', ('mysql',), (), ('text/x-mysql',)),
'MyghtyCssLexer': ('pygments.lexers.templates', 'CSS+Myghty', ('css+myghty',), (), ('text/css+myghty',)),
'MyghtyHtmlLexer': ('pygments.lexers.templates', 'HTML+Myghty', ('html+myghty',), (), ('text/html+myghty',)),
'MyghtyJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Myghty', ('js+myghty', 'javascript+myghty'), (), ('application/x-javascript+myghty', 'text/x-javascript+myghty', 'text/javascript+mygthy')),
'MyghtyLexer': ('pygments.lexers.templates', 'Myghty', ('myghty',), ('*.myt', 'autodelegate'), ('application/x-myghty',)),
'MyghtyXmlLexer': ('pygments.lexers.templates', 'XML+Myghty', ('xml+myghty',), (), ('application/xml+myghty',)),
'NSISLexer': ('pygments.lexers.installers', 'NSIS', ('nsis', 'nsi', 'nsh'), ('*.nsi', '*.nsh'), ('text/x-nsis',)),
'NasmLexer': ('pygments.lexers.asm', 'NASM', ('nasm',), ('*.asm', '*.ASM'), ('text/x-nasm',)),
'NasmObjdumpLexer': ('pygments.lexers.asm', 'objdump-nasm', ('objdump-nasm',), ('*.objdump-intel',), ('text/x-nasm-objdump',)),
'NemerleLexer': ('pygments.lexers.dotnet', 'Nemerle', ('nemerle',), ('*.n',), ('text/x-nemerle',)),
'NesCLexer': ('pygments.lexers.c_like', 'nesC', ('nesc',), ('*.nc',), ('text/x-nescsrc',)),
'NewLispLexer': ('pygments.lexers.lisp', 'NewLisp', ('newlisp',), ('*.lsp', '*.nl'), ('text/x-newlisp', 'application/x-newlisp')),
'NewspeakLexer': ('pygments.lexers.smalltalk', 'Newspeak', ('newspeak',), ('*.ns2',), ('text/x-newspeak',)),
'NginxConfLexer': ('pygments.lexers.configs', 'Nginx configuration file', ('nginx',), (), ('text/x-nginx-conf',)),
'NimrodLexer': ('pygments.lexers.nimrod', 'Nimrod', ('nimrod', 'nim'), ('*.nim', '*.nimrod'), ('text/x-nimrod',)),
'NitLexer': ('pygments.lexers.nit', 'Nit', ('nit',), ('*.nit',), ()),
'NixLexer': ('pygments.lexers.nix', 'Nix', ('nixos', 'nix'), ('*.nix',), ('text/x-nix',)),
'NumPyLexer': ('pygments.lexers.python', 'NumPy', ('numpy',), (), ()),
'ObjdumpLexer': ('pygments.lexers.asm', 'objdump', ('objdump',), ('*.objdump',), ('text/x-objdump',)),
'ObjectiveCLexer': ('pygments.lexers.objective', 'Objective-C', ('objective-c', 'objectivec', 'obj-c', 'objc'), ('*.m', '*.h'), ('text/x-objective-c',)),
'ObjectiveCppLexer': ('pygments.lexers.objective', 'Objective-C++', ('objective-c++', 'objectivec++', 'obj-c++', 'objc++'), ('*.mm', '*.hh'), ('text/x-objective-c++',)),
'ObjectiveJLexer': ('pygments.lexers.javascript', 'Objective-J', ('objective-j', 'objectivej', 'obj-j', 'objj'), ('*.j',), ('text/x-objective-j',)),
'OcamlLexer': ('pygments.lexers.ml', 'OCaml', ('ocaml',), ('*.ml', '*.mli', '*.mll', '*.mly'), ('text/x-ocaml',)),
'OctaveLexer': ('pygments.lexers.matlab', 'Octave', ('octave',), ('*.m',), ('text/octave',)),
'OocLexer': ('pygments.lexers.ooc', 'Ooc', ('ooc',), ('*.ooc',), ('text/x-ooc',)),
'OpaLexer': ('pygments.lexers.ml', 'Opa', ('opa',), ('*.opa',), ('text/x-opa',)),
'OpenEdgeLexer': ('pygments.lexers.business', 'OpenEdge ABL', ('openedge', 'abl', 'progress'), ('*.p', '*.cls'), ('text/x-openedge', 'application/x-openedge')),
'PanLexer': ('pygments.lexers.dsls', 'Pan', ('pan',), ('*.pan',), ()),
'PawnLexer': ('pygments.lexers.pawn', 'Pawn', ('pawn',), ('*.p', '*.pwn', '*.inc'), ('text/x-pawn',)),
'Perl6Lexer': ('pygments.lexers.perl', 'Perl6', ('perl6', 'pl6'), ('*.pl', '*.pm', '*.nqp', '*.p6', '*.6pl', '*.p6l', '*.pl6', '*.6pm', '*.p6m', '*.pm6', '*.t'), ('text/x-perl6', 'application/x-perl6')),
'PerlLexer': ('pygments.lexers.perl', 'Perl', ('perl', 'pl'), ('*.pl', '*.pm', '*.t'), ('text/x-perl', 'application/x-perl')),
'PhpLexer': ('pygments.lexers.php', 'PHP', ('php', 'php3', 'php4', 'php5'), ('*.php', '*.php[345]', '*.inc'), ('text/x-php',)),
'PigLexer': ('pygments.lexers.jvm', 'Pig', ('pig',), ('*.pig',), ('text/x-pig',)),
'PikeLexer': ('pygments.lexers.c_like', 'Pike', ('pike',), ('*.pike', '*.pmod'), ('text/x-pike',)),
'PlPgsqlLexer': ('pygments.lexers.sql', 'PL/pgSQL', ('plpgsql',), (), ('text/x-plpgsql',)),
'PostScriptLexer': ('pygments.lexers.graphics', 'PostScript', ('postscript', 'postscr'), ('*.ps', '*.eps'), ('application/postscript',)),
'PostgresConsoleLexer': ('pygments.lexers.sql', 'PostgreSQL console (psql)', ('psql', 'postgresql-console', 'postgres-console'), (), ('text/x-postgresql-psql',)),
'PostgresLexer': ('pygments.lexers.sql', 'PostgreSQL SQL dialect', ('postgresql', 'postgres'), (), ('text/x-postgresql',)),
'PovrayLexer': ('pygments.lexers.graphics', 'POVRay', ('pov',), ('*.pov', '*.inc'), ('text/x-povray',)),
'PowerShellLexer': ('pygments.lexers.shell', 'PowerShell', ('powershell', 'posh', 'ps1', 'psm1'), ('*.ps1', '*.psm1'), ('text/x-powershell',)),
'PrologLexer': ('pygments.lexers.prolog', 'Prolog', ('prolog',), ('*.ecl', '*.prolog', '*.pro', '*.pl'), ('text/x-prolog',)),
'PropertiesLexer': ('pygments.lexers.configs', 'Properties', ('properties', 'jproperties'), ('*.properties',), ('text/x-java-properties',)),
'ProtoBufLexer': ('pygments.lexers.dsls', 'Protocol Buffer', ('protobuf', 'proto'), ('*.proto',), ()),
'PuppetLexer': ('pygments.lexers.dsls', 'Puppet', ('puppet',), ('*.pp',), ()),
'PyPyLogLexer': ('pygments.lexers.console', 'PyPy Log', ('pypylog', 'pypy'), ('*.pypylog',), ('application/x-pypylog',)),
'Python3Lexer': ('pygments.lexers.python', 'Python 3', ('python3', 'py3'), (), ('text/x-python3', 'application/x-python3')),
'Python3TracebackLexer': ('pygments.lexers.python', 'Python 3.0 Traceback', ('py3tb',), ('*.py3tb',), ('text/x-python3-traceback',)),
'PythonConsoleLexer': ('pygments.lexers.python', 'Python console session', ('pycon',), (), ('text/x-python-doctest',)),
'PythonLexer': ('pygments.lexers.python', 'Python', ('python', 'py', 'sage'), ('*.py', '*.pyw', '*.sc', 'SConstruct', 'SConscript', '*.tac', '*.sage'), ('text/x-python', 'application/x-python')),
'PythonTracebackLexer': ('pygments.lexers.python', 'Python Traceback', ('pytb',), ('*.pytb',), ('text/x-python-traceback',)),
'QBasicLexer': ('pygments.lexers.basic', 'QBasic', ('qbasic', 'basic'), ('*.BAS', '*.bas'), ('text/basic',)),
'QmlLexer': ('pygments.lexers.webmisc', 'QML', ('qml',), ('*.qml',), ('application/x-qml',)),
'RConsoleLexer': ('pygments.lexers.r', 'RConsole', ('rconsole', 'rout'), ('*.Rout',), ()),
'RPMSpecLexer': ('pygments.lexers.installers', 'RPMSpec', ('spec',), ('*.spec',), ('text/x-rpm-spec',)),
'RacketLexer': ('pygments.lexers.lisp', 'Racket', ('racket', 'rkt'), ('*.rkt', '*.rktd', '*.rktl'), ('text/x-racket', 'application/x-racket')),
'RagelCLexer': ('pygments.lexers.parsers', 'Ragel in C Host', ('ragel-c',), ('*.rl',), ()),
'RagelCppLexer': ('pygments.lexers.parsers', 'Ragel in CPP Host', ('ragel-cpp',), ('*.rl',), ()),
'RagelDLexer': ('pygments.lexers.parsers', 'Ragel in D Host', ('ragel-d',), ('*.rl',), ()),
'RagelEmbeddedLexer': ('pygments.lexers.parsers', 'Embedded Ragel', ('ragel-em',), ('*.rl',), ()),
'RagelJavaLexer': ('pygments.lexers.parsers', 'Ragel in Java Host', ('ragel-java',), ('*.rl',), ()),
'RagelLexer': ('pygments.lexers.parsers', 'Ragel', ('ragel',), (), ()),
'RagelObjectiveCLexer': ('pygments.lexers.parsers', 'Ragel in Objective C Host', ('ragel-objc',), ('*.rl',), ()),
'RagelRubyLexer': ('pygments.lexers.parsers', 'Ragel in Ruby Host', ('ragel-ruby', 'ragel-rb'), ('*.rl',), ()),
'RawTokenLexer': ('pygments.lexers.special', 'Raw token data', ('raw',), (), ('application/x-pygments-tokens',)),
'RdLexer': ('pygments.lexers.r', 'Rd', ('rd',), ('*.Rd',), ('text/x-r-doc',)),
'RebolLexer': ('pygments.lexers.rebol', 'REBOL', ('rebol',), ('*.r', '*.r3', '*.reb'), ('text/x-rebol',)),
'RedLexer': ('pygments.lexers.rebol', 'Red', ('red', 'red/system'), ('*.red', '*.reds'), ('text/x-red', 'text/x-red-system')),
'RedcodeLexer': ('pygments.lexers.esoteric', 'Redcode', ('redcode',), ('*.cw',), ()),
'RegeditLexer': ('pygments.lexers.configs', 'reg', ('registry',), ('*.reg',), ('text/x-windows-registry',)),
'ResourceLexer': ('pygments.lexers.resource', 'ResourceBundle', ('resource', 'resourcebundle'), ('*.txt',), ()),
'RexxLexer': ('pygments.lexers.scripting', 'Rexx', ('rexx', 'arexx'), ('*.rexx', '*.rex', '*.rx', '*.arexx'), ('text/x-rexx',)),
'RhtmlLexer': ('pygments.lexers.templates', 'RHTML', ('rhtml', 'html+erb', 'html+ruby'), ('*.rhtml',), ('text/html+ruby',)),
'RobotFrameworkLexer': ('pygments.lexers.robotframework', 'RobotFramework', ('robotframework',), ('*.txt', '*.robot'), ('text/x-robotframework',)),
'RqlLexer': ('pygments.lexers.sql', 'RQL', ('rql',), ('*.rql',), ('text/x-rql',)),
'RslLexer': ('pygments.lexers.dsls', 'RSL', ('rsl',), ('*.rsl',), ('text/rsl',)),
'RstLexer': ('pygments.lexers.markup', 'reStructuredText', ('rst', 'rest', 'restructuredtext'), ('*.rst', '*.rest'), ('text/x-rst', 'text/prs.fallenstein.rst')),
'RubyConsoleLexer': ('pygments.lexers.ruby', 'Ruby irb session', ('rbcon', 'irb'), (), ('text/x-ruby-shellsession',)),
'RubyLexer': ('pygments.lexers.ruby', 'Ruby', ('rb', 'ruby', 'duby'), ('*.rb', '*.rbw', 'Rakefile', '*.rake', '*.gemspec', '*.rbx', '*.duby'), ('text/x-ruby', 'application/x-ruby')),
'RustLexer': ('pygments.lexers.rust', 'Rust', ('rust',), ('*.rs',), ('text/x-rustsrc',)),
'SLexer': ('pygments.lexers.r', 'S', ('splus', 's', 'r'), ('*.S', '*.R', '.Rhistory', '.Rprofile', '.Renviron'), ('text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r', 'text/x-R', 'text/x-r-history', 'text/x-r-profile')),
'SMLLexer': ('pygments.lexers.ml', 'Standard ML', ('sml',), ('*.sml', '*.sig', '*.fun'), ('text/x-standardml', 'application/x-standardml')),
'SassLexer': ('pygments.lexers.css', 'Sass', ('sass',), ('*.sass',), ('text/x-sass',)),
'ScalaLexer': ('pygments.lexers.jvm', 'Scala', ('scala',), ('*.scala',), ('text/x-scala',)),
'ScamlLexer': ('pygments.lexers.html', 'Scaml', ('scaml',), ('*.scaml',), ('text/x-scaml',)),
'SchemeLexer': ('pygments.lexers.lisp', 'Scheme', ('scheme', 'scm'), ('*.scm', '*.ss'), ('text/x-scheme', 'application/x-scheme')),
'ScilabLexer': ('pygments.lexers.matlab', 'Scilab', ('scilab',), ('*.sci', '*.sce', '*.tst'), ('text/scilab',)),
'ScssLexer': ('pygments.lexers.css', 'SCSS', ('scss',), ('*.scss',), ('text/x-scss',)),
'ShellSessionLexer': ('pygments.lexers.shell', 'Shell Session', ('shell-session',), ('*.shell-session',), ('application/x-sh-session',)),
'SlimLexer': ('pygments.lexers.webmisc', 'Slim', ('slim',), ('*.slim',), ('text/x-slim',)),
'SmaliLexer': ('pygments.lexers.dalvik', 'Smali', ('smali',), ('*.smali',), ('text/smali',)),
'SmalltalkLexer': ('pygments.lexers.smalltalk', 'Smalltalk', ('smalltalk', 'squeak', 'st'), ('*.st',), ('text/x-smalltalk',)),
'SmartyLexer': ('pygments.lexers.templates', 'Smarty', ('smarty',), ('*.tpl',), ('application/x-smarty',)),
'SnobolLexer': ('pygments.lexers.snobol', 'Snobol', ('snobol',), ('*.snobol',), ('text/x-snobol',)),
'SourcePawnLexer': ('pygments.lexers.pawn', 'SourcePawn', ('sp',), ('*.sp',), ('text/x-sourcepawn',)),
'SourcesListLexer': ('pygments.lexers.installers', 'Debian Sourcelist', ('sourceslist', 'sources.list', 'debsources'), ('sources.list',), ()),
'SparqlLexer': ('pygments.lexers.rdf', 'SPARQL', ('sparql',), ('*.rq', '*.sparql'), ('application/sparql-query',)),
'SqlLexer': ('pygments.lexers.sql', 'SQL', ('sql',), ('*.sql',), ('text/x-sql',)),
'SqliteConsoleLexer': ('pygments.lexers.sql', 'sqlite3con', ('sqlite3',), ('*.sqlite3-console',), ('text/x-sqlite3-console',)),
'SquidConfLexer': ('pygments.lexers.configs', 'SquidConf', ('squidconf', 'squid.conf', 'squid'), ('squid.conf',), ('text/x-squidconf',)),
'SspLexer': ('pygments.lexers.templates', 'Scalate Server Page', ('ssp',), ('*.ssp',), ('application/x-ssp',)),
'StanLexer': ('pygments.lexers.modeling', 'Stan', ('stan',), ('*.stan',), ()),
'SwiftLexer': ('pygments.lexers.objective', 'Swift', ('swift',), ('*.swift',), ('text/x-swift',)),
'SwigLexer': ('pygments.lexers.c_like', 'SWIG', ('swig',), ('*.swg', '*.i'), ('text/swig',)),
'SystemVerilogLexer': ('pygments.lexers.hdl', 'systemverilog', ('systemverilog', 'sv'), ('*.sv', '*.svh'), ('text/x-systemverilog',)),
'Tads3Lexer': ('pygments.lexers.int_fiction', 'TADS 3', ('tads3',), ('*.t',), ()),
'TclLexer': ('pygments.lexers.tcl', 'Tcl', ('tcl',), ('*.tcl', '*.rvt'), ('text/x-tcl', 'text/x-script.tcl', 'application/x-tcl')),
'TcshLexer': ('pygments.lexers.shell', 'Tcsh', ('tcsh', 'csh'), ('*.tcsh', '*.csh'), ('application/x-csh',)),
'TeaTemplateLexer': ('pygments.lexers.templates', 'Tea', ('tea',), ('*.tea',), ('text/x-tea',)),
'TexLexer': ('pygments.lexers.markup', 'TeX', ('tex', 'latex'), ('*.tex', '*.aux', '*.toc'), ('text/x-tex', 'text/x-latex')),
'TextLexer': ('pygments.lexers.special', 'Text only', ('text',), ('*.txt',), ('text/plain',)),
'TodotxtLexer': ('pygments.lexers.textfmts', 'Todotxt', ('todotxt',), ('todo.txt', '*.todotxt'), ('text/x-todo',)),
'TreetopLexer': ('pygments.lexers.parsers', 'Treetop', ('treetop',), ('*.treetop', '*.tt'), ()),
'TwigHtmlLexer': ('pygments.lexers.templates', 'HTML+Twig', ('html+twig',), ('*.twig',), ('text/html+twig',)),
'TwigLexer': ('pygments.lexers.templates', 'Twig', ('twig',), (), ('application/x-twig',)),
'TypeScriptLexer': ('pygments.lexers.javascript', 'TypeScript', ('ts',), ('*.ts',), ('text/x-typescript',)),
'UrbiscriptLexer': ('pygments.lexers.urbi', 'UrbiScript', ('urbiscript',), ('*.u',), ('application/x-urbiscript',)),
'VCTreeStatusLexer': ('pygments.lexers.console', 'VCTreeStatus', ('vctreestatus',), (), ()),
'VGLLexer': ('pygments.lexers.dsls', 'VGL', ('vgl',), ('*.rpf',), ()),
'ValaLexer': ('pygments.lexers.c_like', 'Vala', ('vala', 'vapi'), ('*.vala', '*.vapi'), ('text/x-vala',)),
'VbNetAspxLexer': ('pygments.lexers.dotnet', 'aspx-vb', ('aspx-vb',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()),
'VbNetLexer': ('pygments.lexers.dotnet', 'VB.net', ('vb.net', 'vbnet'), ('*.vb', '*.bas'), ('text/x-vbnet', 'text/x-vba')),
'VelocityHtmlLexer': ('pygments.lexers.templates', 'HTML+Velocity', ('html+velocity',), (), ('text/html+velocity',)),
'VelocityLexer': ('pygments.lexers.templates', 'Velocity', ('velocity',), ('*.vm', '*.fhtml'), ()),
'VelocityXmlLexer': ('pygments.lexers.templates', 'XML+Velocity', ('xml+velocity',), (), ('application/xml+velocity',)),
'VerilogLexer': ('pygments.lexers.hdl', 'verilog', ('verilog', 'v'), ('*.v',), ('text/x-verilog',)),
'VhdlLexer': ('pygments.lexers.hdl', 'vhdl', ('vhdl',), ('*.vhdl', '*.vhd'), ('text/x-vhdl',)),
'VimLexer': ('pygments.lexers.textedit', 'VimL', ('vim',), ('*.vim', '.vimrc', '.exrc', '.gvimrc', '_vimrc', '_exrc', '_gvimrc', 'vimrc', 'gvimrc'), ('text/x-vim',)),
'XQueryLexer': ('pygments.lexers.webmisc', 'XQuery', ('xquery', 'xqy', 'xq', 'xql', 'xqm'), ('*.xqy', '*.xquery', '*.xq', '*.xql', '*.xqm'), ('text/xquery', 'application/xquery')),
'XmlDjangoLexer': ('pygments.lexers.templates', 'XML+Django/Jinja', ('xml+django', 'xml+jinja'), (), ('application/xml+django', 'application/xml+jinja')),
'XmlErbLexer': ('pygments.lexers.templates', 'XML+Ruby', ('xml+erb', 'xml+ruby'), (), ('application/xml+ruby',)),
'XmlLexer': ('pygments.lexers.html', 'XML', ('xml',), ('*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd', '*.wsdl', '*.wsf'), ('text/xml', 'application/xml', 'image/svg+xml', 'application/rss+xml', 'application/atom+xml')),
'XmlPhpLexer': ('pygments.lexers.templates', 'XML+PHP', ('xml+php',), (), ('application/xml+php',)),
'XmlSmartyLexer': ('pygments.lexers.templates', 'XML+Smarty', ('xml+smarty',), (), ('application/xml+smarty',)),
'XsltLexer': ('pygments.lexers.html', 'XSLT', ('xslt',), ('*.xsl', '*.xslt', '*.xpl'), ('application/xsl+xml', 'application/xslt+xml')),
'XtendLexer': ('pygments.lexers.jvm', 'Xtend', ('xtend',), ('*.xtend',), ('text/x-xtend',)),
'YamlJinjaLexer': ('pygments.lexers.templates', 'YAML+Jinja', ('yaml+jinja', 'salt', 'sls'), ('*.sls',), ('text/x-yaml+jinja', 'text/x-sls')),
'YamlLexer': ('pygments.lexers.data', 'YAML', ('yaml',), ('*.yaml', '*.yml'), ('text/x-yaml',)),
'ZephirLexer': ('pygments.lexers.php', 'Zephir', ('zephir',), ('*.zep',), ()),
}
if __name__ == '__main__': # pragma: no cover
import sys
import os
# lookup lexers
found_lexers = []
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
for root, dirs, files in os.walk('.'):
for filename in files:
if filename.endswith('.py') and not filename.startswith('_'):
module_name = 'pygments.lexers%s.%s' % (
root[1:].replace('/', '.'), filename[:-3])
print(module_name)
module = __import__(module_name, None, None, [''])
for lexer_name in module.__all__:
lexer = getattr(module, lexer_name)
found_lexers.append(
'%r: %r' % (lexer_name,
(module_name,
lexer.name,
tuple(lexer.aliases),
tuple(lexer.filenames),
tuple(lexer.mimetypes))))
# sort them to make the diff minimal
found_lexers.sort()
# extract useful sourcecode from this file
with open(__file__) as fp:
content = fp.read()
header = content[:content.find('LEXERS = {')]
footer = content[content.find("if __name__ == '__main__':"):]
# write new file
with open(__file__, 'w') as fp:
fp.write(header)
fp.write('LEXERS = {\n %s,\n}\n\n' % ',\n '.join(found_lexers))
fp.write(footer)
print ('=== %d lexers processed.' % len(found_lexers))
|
marissazhou/django
|
refs/heads/master
|
tests/save_delete_hooks/models.py
|
409
|
"""
Adding hooks before/after saving and deleting
To execute arbitrary code around ``save()`` and ``delete()``, just subclass
the methods.
"""
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Person(models.Model):
first_name = models.CharField(max_length=20)
last_name = models.CharField(max_length=20)
def __init__(self, *args, **kwargs):
super(Person, self).__init__(*args, **kwargs)
self.data = []
def __str__(self):
return "%s %s" % (self.first_name, self.last_name)
def save(self, *args, **kwargs):
self.data.append("Before save")
# Call the "real" save() method
super(Person, self).save(*args, **kwargs)
self.data.append("After save")
def delete(self):
self.data.append("Before deletion")
# Call the "real" delete() method
super(Person, self).delete()
self.data.append("After deletion")
|
LockScreen/Backend
|
refs/heads/master
|
venv/lib/python2.7/site-packages/requests/packages/urllib3/packages/six.py
|
2374
|
"""Utilities for writing code that runs on Python 2 and 3"""
#Copyright (c) 2010-2011 Benjamin Peterson
#Permission is hereby granted, free of charge, to any person obtaining a copy of
#this software and associated documentation files (the "Software"), to deal in
#the Software without restriction, including without limitation the rights to
#use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
#the Software, and to permit persons to whom the Software is furnished to do so,
#subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
#FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
#COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
#IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
#CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.2.0" # Revision 41c74fef2ded
# True if we are running on Python 3.
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result)
# This is a bit ugly, but it avoids running this again.
delattr(tp, self.name)
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _MovedItems(types.ModuleType):
"""Lazy loading of moved objects"""
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
del attr
moves = sys.modules[__name__ + ".moves"] = _MovedItems("moves")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_iterkeys = "keys"
_itervalues = "values"
_iteritems = "items"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_code = "func_code"
_func_defaults = "func_defaults"
_iterkeys = "iterkeys"
_itervalues = "itervalues"
_iteritems = "iteritems"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
if PY3:
def get_unbound_function(unbound):
return unbound
Iterator = object
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
else:
def get_unbound_function(unbound):
return unbound.im_func
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
def iterkeys(d):
"""Return an iterator over the keys of a dictionary."""
return iter(getattr(d, _iterkeys)())
def itervalues(d):
"""Return an iterator over the values of a dictionary."""
return iter(getattr(d, _itervalues)())
def iteritems(d):
"""Return an iterator over the (key, value) pairs of a dictionary."""
return iter(getattr(d, _iteritems)())
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
else:
def b(s):
return s
def u(s):
return unicode(s, "unicode_escape")
int2byte = chr
import StringIO
StringIO = BytesIO = StringIO.StringIO
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
if PY3:
import builtins
exec_ = getattr(builtins, "exec")
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
print_ = getattr(builtins, "print")
del builtins
else:
def exec_(code, globs=None, locs=None):
"""Execute code in a namespace."""
if globs is None:
frame = sys._getframe(1)
globs = frame.f_globals
if locs is None:
locs = frame.f_locals
del frame
elif locs is None:
locs = globs
exec("""exec code in globs, locs""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
def print_(*args, **kwargs):
"""The new-style print function."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
_add_doc(reraise, """Reraise an exception.""")
def with_metaclass(meta, base=object):
"""Create a base class with a metaclass."""
return meta("NewBase", (base,), {})
|
un33k/robotframework
|
refs/heads/master
|
src/robot/utils/error.py
|
11
|
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
import traceback
from robot.errors import RobotError
from .platform import JYTHON
from .unic import unic
EXCLUDE_ROBOT_TRACES = True # Exclude internal traceback by default or not.
RERAISED_EXCEPTIONS = (KeyboardInterrupt, SystemExit, MemoryError)
if JYTHON:
from java.io import StringWriter, PrintWriter
from java.lang import Throwable, OutOfMemoryError
RERAISED_EXCEPTIONS += (OutOfMemoryError,)
else:
Throwable = ()
def get_error_message():
"""Returns error message of the last occurred exception.
This method handles also exceptions containing unicode messages. Thus it
MUST be used to get messages from all exceptions originating outside the
framework.
"""
return ErrorDetails().message
def get_error_details(exclude_robot_traces=EXCLUDE_ROBOT_TRACES):
"""Returns error message and details of the last occurred exception."""
details = ErrorDetails(exclude_robot_traces)
return details.message, details.traceback
def ErrorDetails(exclude_robot_traces=EXCLUDE_ROBOT_TRACES):
"""This factory returns an object that wraps the last occurred exception
It has attributes `message`, `traceback` and `error`, where `message`
contains type and message of the original error, `traceback` contains the
traceback/stack trace and `error` contains the original error instance.
"""
exc_type, exc_value, exc_traceback = sys.exc_info()
if exc_type in RERAISED_EXCEPTIONS:
raise exc_value
details = PythonErrorDetails \
if not isinstance(exc_value, Throwable) else JavaErrorDetails
return details(exc_type, exc_value, exc_traceback, exclude_robot_traces)
class _ErrorDetails(object):
_generic_exception_names = ('AssertionError', 'AssertionFailedError',
'Exception', 'Error', 'RuntimeError',
'RuntimeException')
def __init__(self, exc_type, exc_value, exc_traceback,
exclude_robot_traces=True):
self.error = exc_value
self._exc_type = exc_type
self._exc_traceback = exc_traceback
self._exclude_robot_traces = exclude_robot_traces
self._message = None
self._traceback = None
@property
def message(self):
if self._message is None:
self._message = self._get_message()
return self._message
def _get_message(self):
raise NotImplementedError
@property
def traceback(self):
if self._traceback is None:
self._traceback = self._get_details()
return self._traceback
def _get_details(self):
raise NotImplementedError
def _get_name(self, exc_type):
try:
return exc_type.__name__
except AttributeError:
return unic(exc_type)
def _format_message(self, name, message):
message = unic(message or '')
message = self._clean_up_message(message, name)
name = name.split('.')[-1] # Use only last part of the name
if not message:
return name
if self._is_generic_exception(name):
return message
return '%s: %s' % (name, message)
def _is_generic_exception(self, name):
return (name in self._generic_exception_names or
isinstance(self.error, RobotError) or
getattr(self.error, 'ROBOT_SUPPRESS_NAME', False))
def _clean_up_message(self, message, name):
return message
class PythonErrorDetails(_ErrorDetails):
def _get_message(self):
# If exception is a "string exception" without a message exc_value is None
if self.error is None:
return unic(self._exc_type)
name = self._get_name(self._exc_type)
try:
msg = unicode(self.error)
except UnicodeError: # Happens if message is Unicode and version < 2.6
msg = ' '.join(unic(a) for a in self.error.args)
return self._format_message(name, msg)
def _get_details(self):
if isinstance(self.error, RobotError):
return self.error.details
return 'Traceback (most recent call last):\n' + self._get_traceback()
def _get_traceback(self):
tb = self._exc_traceback
while tb and self._is_excluded_traceback(tb):
tb = tb.tb_next
return ''.join(traceback.format_tb(tb)).rstrip() or ' None'
def _is_excluded_traceback(self, traceback):
if not self._exclude_robot_traces:
return False
module = traceback.tb_frame.f_globals.get('__name__')
return module and module.startswith('robot.')
class JavaErrorDetails(_ErrorDetails):
_java_trace_re = re.compile('^\s+at (\w.+)')
_ignored_java_trace = ('org.python.', 'robot.running.', 'robot$py.',
'sun.reflect.', 'java.lang.reflect.')
def _get_message(self):
exc_name = self._get_name(self._exc_type)
# OOME.getMessage and even toString seem to throw NullPointerException
if not self._is_out_of_memory_error(self._exc_type):
exc_msg = self.error.getMessage()
else:
exc_msg = str(self.error)
return self._format_message(exc_name, exc_msg)
def _is_out_of_memory_error(self, exc_type):
return exc_type is OutOfMemoryError
def _get_details(self):
# OOME.printStackTrace seems to throw NullPointerException
if self._is_out_of_memory_error(self._exc_type):
return ''
output = StringWriter()
self.error.printStackTrace(PrintWriter(output))
details = '\n'.join(line for line in output.toString().splitlines()
if not self._is_ignored_stack_trace_line(line))
msg = unic(self.error.getMessage() or '')
if msg:
details = details.replace(msg, '', 1)
return details
def _is_ignored_stack_trace_line(self, line):
if not line:
return True
res = self._java_trace_re.match(line)
if res is None:
return False
location = res.group(1)
for entry in self._ignored_java_trace:
if location.startswith(entry):
return True
return False
def _clean_up_message(self, msg, name):
msg = self._remove_stack_trace_lines(msg)
return self._remove_exception_name(msg, name).strip()
def _remove_stack_trace_lines(self, msg):
lines = msg.splitlines()
while lines:
if self._java_trace_re.match(lines[-1]):
lines.pop()
else:
break
return '\n'.join(lines)
def _remove_exception_name(self, msg, name):
tokens = msg.split(':', 1)
if len(tokens) == 2 and tokens[0] == name:
msg = tokens[1]
return msg
|
timhughes/gnome15
|
refs/heads/master
|
src/gnome15/util/g15cairo.py
|
8
|
# Gnome15 - Suite of tools for the Logitech G series keyboards and headsets
# Copyright (C) 2010 Brett Smith <tanktarta@blueyonder.co.uk>
# Copyright (C) 2013 Nuno Araujo <nuno.araujo@russo79.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
Cairo utilities
Has functions to transform, load and convert cairo surfaces
'''
import gtk.gdk
import os, os.path
import cairo
import math
import rsvg
import urllib
import base64
import xdg.Mime as mime
import g15convert
import g15os
import gnome15.g15globals
# Logging
import logging
logger = logging.getLogger(__name__)
from cStringIO import StringIO
def rotate(context, degrees):
context.rotate(g15convert.degrees_to_radians(degrees));
def rotate_around_center(context, width, height, degrees):
context.translate (height * 0.5, width * 0.5);
context.rotate(degrees * (math.pi / 180));
context.translate(-width * 0.5, -height * 0.5);
def flip_horizontal(context, width, height):
flip_hv_centered_on(context, -1, 1, width / 2, height / 2)
def flip_vertical(context, width, height):
# TODO - Should work according to http://cairographics.org/matrix_transform/, but doesn't
flip_hv_centered_on(context, -1, 1, width / 2, height / 2)
def flip_hv_centered_on(context, fx, fy, cx, cy):
mtrx = cairo.Matrix(fx,0,0,fy,cx*(1-fx),cy*(fy-1))
context.transform(mtrx)
def get_cache_filename(filename, size = None):
cache_file = base64.urlsafe_b64encode("%s-%s" % ( filename, str(size if size is not None else "0,0") ) )
g15os.mkdir_p(g15globals.user_cache_dir)
return os.path.join(g15globals.user_cache_dir, "%s.img" % cache_file)
def get_image_cache_file(filename, size = None):
full_cache_path = get_cache_filename(filename, size)
if os.path.exists(full_cache_path):
return full_cache_path
def is_url(path):
# TODO try harder
return "://" in path
def load_surface_from_file(filename, size = None):
type = None
if filename == None:
logger.warning("Empty filename requested")
return None
if filename.startswith("http:") or filename.startswith("https:"):
full_cache_path = get_image_cache_file(filename, size)
if full_cache_path:
meta_fileobj = open(full_cache_path + "m", "r")
type = meta_fileobj.readline()
meta_fileobj.close()
if type == "image/svg+xml" or filename.lower().endswith(".svg"):
return load_svg_as_surface(filename, size)
else:
return pixbuf_to_surface(gtk.gdk.pixbuf_new_from_file(full_cache_path), size)
if is_url(filename):
type = None
try:
file = urllib.urlopen(filename)
data = file.read()
type = file.info().gettype()
if filename.startswith("file://"):
type = str(mime.get_type(filename))
if filename.startswith("http:") or filename.startswith("https:"):
full_cache_path = get_cache_filename(filename, size)
cache_fileobj = open(full_cache_path, "w")
cache_fileobj.write(data)
cache_fileobj.close()
meta_fileobj = open(full_cache_path + "m", "w")
meta_fileobj.write(type + "\n")
meta_fileobj.close()
if type == "image/svg+xml" or filename.lower().endswith(".svg"):
svg = rsvg.Handle()
try:
if not svg.write(data):
raise Exception("Failed to load SVG")
svg_size = svg.get_dimension_data()[2:4]
if size == None:
size = svg_size
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, int(size[0]) if not isinstance(size, int) else size, int(size[1]) if not isinstance(size, int) else size)
context = cairo.Context(surface)
if size != svg_size:
scale = get_scale(size, svg_size)
context.scale(scale, scale)
svg.render_cairo(context)
surface.flush()
return surface
finally:
svg.close()
else:
if type == "text/plain":
if filename.startswith("file://"):
pixbuf = gtk.gdk.pixbuf_new_from_file(filename[7:])
return pixbuf_to_surface(pixbuf, size)
raise Exception("Could not determine type")
else:
pbl = gtk.gdk.pixbuf_loader_new_with_mime_type(type)
pbl.write(data)
pixbuf = pbl.get_pixbuf()
pbl.close()
return pixbuf_to_surface(pixbuf, size)
return None
except Exception as e:
logger.warning("Failed to get image %s (%s).", filename, type, exc_info = e)
return None
else:
if os.path.exists(filename):
try:
if filename.lower().endswith(".svg"):
if os.path.islink(filename):
filename = os.path.realpath(filename)
return load_svg_as_surface(filename, size)
else:
return pixbuf_to_surface(gtk.gdk.pixbuf_new_from_file(filename), size)
except Exception as e:
logger.warning("Failed to get image %s (%s).", filename, type, exc_info = e)
return None
def load_svg_as_surface(filename, size):
svg = rsvg.Handle(filename)
try:
svg_size = svg.get_dimension_data()[2:4]
if size == None:
size = svg_size
sx = int(size) if isinstance(size, int) or isinstance(size, float) else int(size[0])
sy = int(size) if isinstance(size, int) or isinstance(size, float) else int(size[1])
surface = cairo.ImageSurface(0, sx, sy)
context = cairo.Context(surface)
if size != svg_size:
scale = get_scale(size, svg_size)
context.scale(scale, scale)
svg.render_cairo(context)
return surface
finally:
svg.close()
def image_to_surface(image, type = "ppm"):
# TODO make better
return pixbuf_to_surface(image_to_pixbuf(image, type))
def pixbuf_to_surface(pixbuf, size = None):
x = pixbuf.get_width()
y = pixbuf.get_height()
scale = get_scale(size, (x, y))
surface = cairo.ImageSurface(0, int(x * scale), int(y * scale))
context = cairo.Context(surface)
gdk_context = gtk.gdk.CairoContext(context)
if size != None:
gdk_context.scale(scale, scale)
gdk_context.set_source_pixbuf(pixbuf,0,0)
gdk_context.paint()
gdk_context.scale(1 / scale, 1 / scale)
return surface
'''
Convert a PIL image to a GDK pixbuf
'''
def image_to_pixbuf(im, type = "ppm"):
p_type = type
if type == "ppm":
p_type = "pnm"
file1 = StringIO()
try:
im.save(file1, type)
contents = file1.getvalue()
finally:
file1.close()
loader = gtk.gdk.PixbufLoader(p_type)
loader.write(contents, len(contents))
pixbuf = loader.get_pixbuf()
loader.close()
return pixbuf
def surface_to_pixbuf(surface):
try:
file1 = StringIO()
surface.write_to_png(file1)
contents = file1.getvalue()
finally:
file1.close()
loader = gtk.gdk.PixbufLoader("png")
loader.write(contents, len(contents))
pixbuf = loader.get_pixbuf()
loader.close()
return pixbuf
def paint_thumbnail_image(allocated_size, image, canvas):
s = float(allocated_size) / image.get_height()
canvas.save()
canvas.scale(s, s)
canvas.set_source_surface(image)
canvas.paint()
canvas.scale(1 / s, 1 / s)
canvas.restore()
return image.get_width() * s
def get_scale(target, actual):
scale = 1.0
if target != None:
if isinstance(target, int) or isinstance(target, float):
sx = float(target) / actual[0]
sy = float(target) / actual[1]
else:
sx = float(target[0]) / actual[0]
sy = float(target[1]) / actual[1]
scale = max(sx, sy)
return scale
pt_to_px = {
6.0: 8.0,
7.0: 9,
7.5: 10,
8.0: 11,
9.0: 12,
10.0: 13,
10.5: 14,
11.0: 15,
12.0: 16,
13.0: 17,
13.5: 18,
14.0: 19,
14.5: 20,
15.0: 21,
16.0: 22,
17.0: 23,
18.0: 24,
20.0: 26,
22.0: 29,
24.0: 32,
26.0: 35,
27.0: 36,
28.0: 37,
29.0: 38,
30.0: 40,
32.0: 42,
34.0: 45,
36.0: 48
}
px_to_pt = {}
for pt in pt_to_px:
px_to_pt[pt_to_px[pt]] = pt
def approx_px_to_pt(px):
px = round(px)
if px in px_to_pt:
return px_to_pt[px]
else:
return int(px * 72.0 / 96)
|
blacklin/kbengine
|
refs/heads/master
|
kbe/src/lib/python/Lib/functools.py
|
68
|
"""functools.py - Tools for working with functions and callable objects
"""
# Python module wrapper for _functools C module
# to allow utilities written in Python to be added
# to the functools module.
# Written by Nick Coghlan <ncoghlan at gmail.com>,
# Raymond Hettinger <python at rcn.com>,
# and Łukasz Langa <lukasz at langa.pl>.
# Copyright (C) 2006-2013 Python Software Foundation.
# See C source code for _functools credits/copyright
__all__ = ['update_wrapper', 'wraps', 'WRAPPER_ASSIGNMENTS', 'WRAPPER_UPDATES',
'total_ordering', 'cmp_to_key', 'lru_cache', 'reduce', 'partial',
'partialmethod', 'singledispatch']
try:
from _functools import reduce
except ImportError:
pass
from abc import get_cache_token
from collections import namedtuple
from types import MappingProxyType
from weakref import WeakKeyDictionary
try:
from _thread import RLock
except:
class RLock:
'Dummy reentrant lock for builds without threads'
def __enter__(self): pass
def __exit__(self, exctype, excinst, exctb): pass
################################################################################
### update_wrapper() and wraps() decorator
################################################################################
# update_wrapper() and wraps() are tools to help write
# wrapper functions that can handle naive introspection
WRAPPER_ASSIGNMENTS = ('__module__', '__name__', '__qualname__', '__doc__',
'__annotations__')
WRAPPER_UPDATES = ('__dict__',)
def update_wrapper(wrapper,
wrapped,
assigned = WRAPPER_ASSIGNMENTS,
updated = WRAPPER_UPDATES):
"""Update a wrapper function to look like the wrapped function
wrapper is the function to be updated
wrapped is the original function
assigned is a tuple naming the attributes assigned directly
from the wrapped function to the wrapper function (defaults to
functools.WRAPPER_ASSIGNMENTS)
updated is a tuple naming the attributes of the wrapper that
are updated with the corresponding attribute from the wrapped
function (defaults to functools.WRAPPER_UPDATES)
"""
for attr in assigned:
try:
value = getattr(wrapped, attr)
except AttributeError:
pass
else:
setattr(wrapper, attr, value)
for attr in updated:
getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
# Issue #17482: set __wrapped__ last so we don't inadvertently copy it
# from the wrapped function when updating __dict__
wrapper.__wrapped__ = wrapped
# Return the wrapper so this can be used as a decorator via partial()
return wrapper
def wraps(wrapped,
assigned = WRAPPER_ASSIGNMENTS,
updated = WRAPPER_UPDATES):
"""Decorator factory to apply update_wrapper() to a wrapper function
Returns a decorator that invokes update_wrapper() with the decorated
function as the wrapper argument and the arguments to wraps() as the
remaining arguments. Default arguments are as for update_wrapper().
This is a convenience function to simplify applying partial() to
update_wrapper().
"""
return partial(update_wrapper, wrapped=wrapped,
assigned=assigned, updated=updated)
################################################################################
### total_ordering class decorator
################################################################################
# The correct way to indicate that a comparison operation doesn't
# recognise the other type is to return NotImplemented and let the
# interpreter handle raising TypeError if both operands return
# NotImplemented from their respective comparison methods
#
# This makes the implementation of total_ordering more complicated, since
# we need to be careful not to trigger infinite recursion when two
# different types that both use this decorator encounter each other.
#
# For example, if a type implements __lt__, it's natural to define
# __gt__ as something like:
#
# lambda self, other: not self < other and not self == other
#
# However, using the operator syntax like that ends up invoking the full
# type checking machinery again and means we can end up bouncing back and
# forth between the two operands until we run out of stack space.
#
# The solution is to define helper functions that invoke the appropriate
# magic methods directly, ensuring we only try each operand once, and
# return NotImplemented immediately if it is returned from the
# underlying user provided method. Using this scheme, the __gt__ derived
# from a user provided __lt__ becomes:
#
# lambda self, other: _not_op_and_not_eq(self.__lt__, self, other))
def _not_op(op, other):
# "not a < b" handles "a >= b"
# "not a <= b" handles "a > b"
# "not a >= b" handles "a < b"
# "not a > b" handles "a <= b"
op_result = op(other)
if op_result is NotImplemented:
return NotImplemented
return not op_result
def _op_or_eq(op, self, other):
# "a < b or a == b" handles "a <= b"
# "a > b or a == b" handles "a >= b"
op_result = op(other)
if op_result is NotImplemented:
return NotImplemented
return op_result or self == other
def _not_op_and_not_eq(op, self, other):
# "not (a < b or a == b)" handles "a > b"
# "not a < b and a != b" is equivalent
# "not (a > b or a == b)" handles "a < b"
# "not a > b and a != b" is equivalent
op_result = op(other)
if op_result is NotImplemented:
return NotImplemented
return not op_result and self != other
def _not_op_or_eq(op, self, other):
# "not a <= b or a == b" handles "a >= b"
# "not a >= b or a == b" handles "a <= b"
op_result = op(other)
if op_result is NotImplemented:
return NotImplemented
return not op_result or self == other
def _op_and_not_eq(op, self, other):
# "a <= b and not a == b" handles "a < b"
# "a >= b and not a == b" handles "a > b"
op_result = op(other)
if op_result is NotImplemented:
return NotImplemented
return op_result and self != other
def total_ordering(cls):
"""Class decorator that fills in missing ordering methods"""
convert = {
'__lt__': [('__gt__', lambda self, other: _not_op_and_not_eq(self.__lt__, self, other)),
('__le__', lambda self, other: _op_or_eq(self.__lt__, self, other)),
('__ge__', lambda self, other: _not_op(self.__lt__, other))],
'__le__': [('__ge__', lambda self, other: _not_op_or_eq(self.__le__, self, other)),
('__lt__', lambda self, other: _op_and_not_eq(self.__le__, self, other)),
('__gt__', lambda self, other: _not_op(self.__le__, other))],
'__gt__': [('__lt__', lambda self, other: _not_op_and_not_eq(self.__gt__, self, other)),
('__ge__', lambda self, other: _op_or_eq(self.__gt__, self, other)),
('__le__', lambda self, other: _not_op(self.__gt__, other))],
'__ge__': [('__le__', lambda self, other: _not_op_or_eq(self.__ge__, self, other)),
('__gt__', lambda self, other: _op_and_not_eq(self.__ge__, self, other)),
('__lt__', lambda self, other: _not_op(self.__ge__, other))]
}
# Find user-defined comparisons (not those inherited from object).
roots = [op for op in convert if getattr(cls, op, None) is not getattr(object, op, None)]
if not roots:
raise ValueError('must define at least one ordering operation: < > <= >=')
root = max(roots) # prefer __lt__ to __le__ to __gt__ to __ge__
for opname, opfunc in convert[root]:
if opname not in roots:
opfunc.__name__ = opname
opfunc.__doc__ = getattr(int, opname).__doc__
setattr(cls, opname, opfunc)
return cls
################################################################################
### cmp_to_key() function converter
################################################################################
def cmp_to_key(mycmp):
"""Convert a cmp= function into a key= function"""
class K(object):
__slots__ = ['obj']
def __init__(self, obj):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
__hash__ = None
return K
try:
from _functools import cmp_to_key
except ImportError:
pass
################################################################################
### partial() argument application
################################################################################
# Purely functional, no descriptor behaviour
def partial(func, *args, **keywords):
"""New function with partial application of the given arguments
and keywords.
"""
def newfunc(*fargs, **fkeywords):
newkeywords = keywords.copy()
newkeywords.update(fkeywords)
return func(*(args + fargs), **newkeywords)
newfunc.func = func
newfunc.args = args
newfunc.keywords = keywords
return newfunc
try:
from _functools import partial
except ImportError:
pass
# Descriptor version
class partialmethod(object):
"""Method descriptor with partial application of the given arguments
and keywords.
Supports wrapping existing descriptors and handles non-descriptor
callables as instance methods.
"""
def __init__(self, func, *args, **keywords):
if not callable(func) and not hasattr(func, "__get__"):
raise TypeError("{!r} is not callable or a descriptor"
.format(func))
# func could be a descriptor like classmethod which isn't callable,
# so we can't inherit from partial (it verifies func is callable)
if isinstance(func, partialmethod):
# flattening is mandatory in order to place cls/self before all
# other arguments
# it's also more efficient since only one function will be called
self.func = func.func
self.args = func.args + args
self.keywords = func.keywords.copy()
self.keywords.update(keywords)
else:
self.func = func
self.args = args
self.keywords = keywords
def __repr__(self):
args = ", ".join(map(repr, self.args))
keywords = ", ".join("{}={!r}".format(k, v)
for k, v in self.keywords.items())
format_string = "{module}.{cls}({func}, {args}, {keywords})"
return format_string.format(module=self.__class__.__module__,
cls=self.__class__.__name__,
func=self.func,
args=args,
keywords=keywords)
def _make_unbound_method(self):
def _method(*args, **keywords):
call_keywords = self.keywords.copy()
call_keywords.update(keywords)
cls_or_self, *rest = args
call_args = (cls_or_self,) + self.args + tuple(rest)
return self.func(*call_args, **call_keywords)
_method.__isabstractmethod__ = self.__isabstractmethod__
_method._partialmethod = self
return _method
def __get__(self, obj, cls):
get = getattr(self.func, "__get__", None)
result = None
if get is not None:
new_func = get(obj, cls)
if new_func is not self.func:
# Assume __get__ returning something new indicates the
# creation of an appropriate callable
result = partial(new_func, *self.args, **self.keywords)
try:
result.__self__ = new_func.__self__
except AttributeError:
pass
if result is None:
# If the underlying descriptor didn't do anything, treat this
# like an instance method
result = self._make_unbound_method().__get__(obj, cls)
return result
@property
def __isabstractmethod__(self):
return getattr(self.func, "__isabstractmethod__", False)
################################################################################
### LRU Cache function decorator
################################################################################
_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])
class _HashedSeq(list):
""" This class guarantees that hash() will be called no more than once
per element. This is important because the lru_cache() will hash
the key multiple times on a cache miss.
"""
__slots__ = 'hashvalue'
def __init__(self, tup, hash=hash):
self[:] = tup
self.hashvalue = hash(tup)
def __hash__(self):
return self.hashvalue
def _make_key(args, kwds, typed,
kwd_mark = (object(),),
fasttypes = {int, str, frozenset, type(None)},
sorted=sorted, tuple=tuple, type=type, len=len):
"""Make a cache key from optionally typed positional and keyword arguments
The key is constructed in a way that is flat as possible rather than
as a nested structure that would take more memory.
If there is only a single argument and its data type is known to cache
its hash value, then that argument is returned without a wrapper. This
saves space and improves lookup speed.
"""
key = args
if kwds:
sorted_items = sorted(kwds.items())
key += kwd_mark
for item in sorted_items:
key += item
if typed:
key += tuple(type(v) for v in args)
if kwds:
key += tuple(type(v) for k, v in sorted_items)
elif len(key) == 1 and type(key[0]) in fasttypes:
return key[0]
return _HashedSeq(key)
def lru_cache(maxsize=128, typed=False):
"""Least-recently-used cache decorator.
If *maxsize* is set to None, the LRU features are disabled and the cache
can grow without bound.
If *typed* is True, arguments of different types will be cached separately.
For example, f(3.0) and f(3) will be treated as distinct calls with
distinct results.
Arguments to the cached function must be hashable.
View the cache statistics named tuple (hits, misses, maxsize, currsize)
with f.cache_info(). Clear the cache and statistics with f.cache_clear().
Access the underlying function with f.__wrapped__.
See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
"""
# Users should only access the lru_cache through its public API:
# cache_info, cache_clear, and f.__wrapped__
# The internals of the lru_cache are encapsulated for thread safety and
# to allow the implementation to change (including a possible C version).
# Early detection of an erroneous call to @lru_cache without any arguments
# resulting in the inner function being passed to maxsize instead of an
# integer or None.
if maxsize is not None and not isinstance(maxsize, int):
raise TypeError('Expected maxsize to be an integer or None')
# Constants shared by all lru cache instances:
sentinel = object() # unique object used to signal cache misses
make_key = _make_key # build a key from the function arguments
PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields
def decorating_function(user_function):
cache = {}
hits = misses = 0
full = False
cache_get = cache.get # bound method to lookup a key or return None
lock = RLock() # because linkedlist updates aren't threadsafe
root = [] # root of the circular doubly linked list
root[:] = [root, root, None, None] # initialize by pointing to self
if maxsize == 0:
def wrapper(*args, **kwds):
# No caching -- just a statistics update after a successful call
nonlocal misses
result = user_function(*args, **kwds)
misses += 1
return result
elif maxsize is None:
def wrapper(*args, **kwds):
# Simple caching without ordering or size limit
nonlocal hits, misses
key = make_key(args, kwds, typed)
result = cache_get(key, sentinel)
if result is not sentinel:
hits += 1
return result
result = user_function(*args, **kwds)
cache[key] = result
misses += 1
return result
else:
def wrapper(*args, **kwds):
# Size limited caching that tracks accesses by recency
nonlocal root, hits, misses, full
key = make_key(args, kwds, typed)
with lock:
link = cache_get(key)
if link is not None:
# Move the link to the front of the circular queue
link_prev, link_next, _key, result = link
link_prev[NEXT] = link_next
link_next[PREV] = link_prev
last = root[PREV]
last[NEXT] = root[PREV] = link
link[PREV] = last
link[NEXT] = root
hits += 1
return result
result = user_function(*args, **kwds)
with lock:
if key in cache:
# Getting here means that this same key was added to the
# cache while the lock was released. Since the link
# update is already done, we need only return the
# computed result and update the count of misses.
pass
elif full:
# Use the old root to store the new key and result.
oldroot = root
oldroot[KEY] = key
oldroot[RESULT] = result
# Empty the oldest link and make it the new root.
# Keep a reference to the old key and old result to
# prevent their ref counts from going to zero during the
# update. That will prevent potentially arbitrary object
# clean-up code (i.e. __del__) from running while we're
# still adjusting the links.
root = oldroot[NEXT]
oldkey = root[KEY]
oldresult = root[RESULT]
root[KEY] = root[RESULT] = None
# Now update the cache dictionary.
del cache[oldkey]
# Save the potentially reentrant cache[key] assignment
# for last, after the root and links have been put in
# a consistent state.
cache[key] = oldroot
else:
# Put result in a new link at the front of the queue.
last = root[PREV]
link = [last, root, key, result]
last[NEXT] = root[PREV] = cache[key] = link
full = (len(cache) >= maxsize)
misses += 1
return result
def cache_info():
"""Report cache statistics"""
with lock:
return _CacheInfo(hits, misses, maxsize, len(cache))
def cache_clear():
"""Clear the cache and cache statistics"""
nonlocal hits, misses, full
with lock:
cache.clear()
root[:] = [root, root, None, None]
hits = misses = 0
full = False
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return update_wrapper(wrapper, user_function)
return decorating_function
################################################################################
### singledispatch() - single-dispatch generic function decorator
################################################################################
def _c3_merge(sequences):
"""Merges MROs in *sequences* to a single MRO using the C3 algorithm.
Adapted from http://www.python.org/download/releases/2.3/mro/.
"""
result = []
while True:
sequences = [s for s in sequences if s] # purge empty sequences
if not sequences:
return result
for s1 in sequences: # find merge candidates among seq heads
candidate = s1[0]
for s2 in sequences:
if candidate in s2[1:]:
candidate = None
break # reject the current head, it appears later
else:
break
if not candidate:
raise RuntimeError("Inconsistent hierarchy")
result.append(candidate)
# remove the chosen candidate
for seq in sequences:
if seq[0] == candidate:
del seq[0]
def _c3_mro(cls, abcs=None):
"""Computes the method resolution order using extended C3 linearization.
If no *abcs* are given, the algorithm works exactly like the built-in C3
linearization used for method resolution.
If given, *abcs* is a list of abstract base classes that should be inserted
into the resulting MRO. Unrelated ABCs are ignored and don't end up in the
result. The algorithm inserts ABCs where their functionality is introduced,
i.e. issubclass(cls, abc) returns True for the class itself but returns
False for all its direct base classes. Implicit ABCs for a given class
(either registered or inferred from the presence of a special method like
__len__) are inserted directly after the last ABC explicitly listed in the
MRO of said class. If two implicit ABCs end up next to each other in the
resulting MRO, their ordering depends on the order of types in *abcs*.
"""
for i, base in enumerate(reversed(cls.__bases__)):
if hasattr(base, '__abstractmethods__'):
boundary = len(cls.__bases__) - i
break # Bases up to the last explicit ABC are considered first.
else:
boundary = 0
abcs = list(abcs) if abcs else []
explicit_bases = list(cls.__bases__[:boundary])
abstract_bases = []
other_bases = list(cls.__bases__[boundary:])
for base in abcs:
if issubclass(cls, base) and not any(
issubclass(b, base) for b in cls.__bases__
):
# If *cls* is the class that introduces behaviour described by
# an ABC *base*, insert said ABC to its MRO.
abstract_bases.append(base)
for base in abstract_bases:
abcs.remove(base)
explicit_c3_mros = [_c3_mro(base, abcs=abcs) for base in explicit_bases]
abstract_c3_mros = [_c3_mro(base, abcs=abcs) for base in abstract_bases]
other_c3_mros = [_c3_mro(base, abcs=abcs) for base in other_bases]
return _c3_merge(
[[cls]] +
explicit_c3_mros + abstract_c3_mros + other_c3_mros +
[explicit_bases] + [abstract_bases] + [other_bases]
)
def _compose_mro(cls, types):
"""Calculates the method resolution order for a given class *cls*.
Includes relevant abstract base classes (with their respective bases) from
the *types* iterable. Uses a modified C3 linearization algorithm.
"""
bases = set(cls.__mro__)
# Remove entries which are already present in the __mro__ or unrelated.
def is_related(typ):
return (typ not in bases and hasattr(typ, '__mro__')
and issubclass(cls, typ))
types = [n for n in types if is_related(n)]
# Remove entries which are strict bases of other entries (they will end up
# in the MRO anyway.
def is_strict_base(typ):
for other in types:
if typ != other and typ in other.__mro__:
return True
return False
types = [n for n in types if not is_strict_base(n)]
# Subclasses of the ABCs in *types* which are also implemented by
# *cls* can be used to stabilize ABC ordering.
type_set = set(types)
mro = []
for typ in types:
found = []
for sub in typ.__subclasses__():
if sub not in bases and issubclass(cls, sub):
found.append([s for s in sub.__mro__ if s in type_set])
if not found:
mro.append(typ)
continue
# Favor subclasses with the biggest number of useful bases
found.sort(key=len, reverse=True)
for sub in found:
for subcls in sub:
if subcls not in mro:
mro.append(subcls)
return _c3_mro(cls, abcs=mro)
def _find_impl(cls, registry):
"""Returns the best matching implementation from *registry* for type *cls*.
Where there is no registered implementation for a specific type, its method
resolution order is used to find a more generic implementation.
Note: if *registry* does not contain an implementation for the base
*object* type, this function may return None.
"""
mro = _compose_mro(cls, registry.keys())
match = None
for t in mro:
if match is not None:
# If *match* is an implicit ABC but there is another unrelated,
# equally matching implicit ABC, refuse the temptation to guess.
if (t in registry and t not in cls.__mro__
and match not in cls.__mro__
and not issubclass(match, t)):
raise RuntimeError("Ambiguous dispatch: {} or {}".format(
match, t))
break
if t in registry:
match = t
return registry.get(match)
def singledispatch(func):
"""Single-dispatch generic function decorator.
Transforms a function into a generic function, which can have different
behaviours depending upon the type of its first argument. The decorated
function acts as the default implementation, and additional
implementations can be registered using the register() attribute of the
generic function.
"""
registry = {}
dispatch_cache = WeakKeyDictionary()
cache_token = None
def dispatch(cls):
"""generic_func.dispatch(cls) -> <function implementation>
Runs the dispatch algorithm to return the best available implementation
for the given *cls* registered on *generic_func*.
"""
nonlocal cache_token
if cache_token is not None:
current_token = get_cache_token()
if cache_token != current_token:
dispatch_cache.clear()
cache_token = current_token
try:
impl = dispatch_cache[cls]
except KeyError:
try:
impl = registry[cls]
except KeyError:
impl = _find_impl(cls, registry)
dispatch_cache[cls] = impl
return impl
def register(cls, func=None):
"""generic_func.register(cls, func) -> func
Registers a new implementation for the given *cls* on a *generic_func*.
"""
nonlocal cache_token
if func is None:
return lambda f: register(cls, f)
registry[cls] = func
if cache_token is None and hasattr(cls, '__abstractmethods__'):
cache_token = get_cache_token()
dispatch_cache.clear()
return func
def wrapper(*args, **kw):
return dispatch(args[0].__class__)(*args, **kw)
registry[object] = func
wrapper.register = register
wrapper.dispatch = dispatch
wrapper.registry = MappingProxyType(registry)
wrapper._clear_cache = dispatch_cache.clear
update_wrapper(wrapper, func)
return wrapper
|
zouyapeng/horizon_change
|
refs/heads/juno
|
openstack_dashboard/dashboards/project/routers/extensions/routerrules/tabs.py
|
11
|
# Copyright 2013
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from django import template
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from horizon import tabs
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.routers.extensions.routerrules\
import rulemanager
from openstack_dashboard.dashboards.project.routers.extensions.routerrules\
import tables as rrtbl
class RouterRulesTab(tabs.TableTab):
table_classes = (rrtbl.RouterRulesTable,)
name = _("Router Rules")
slug = "routerrules"
template_name = "horizon/common/_detail_table.html"
def allowed(self, request):
try:
getattr(self.tab_group.router, 'router_rules')
return True
except Exception:
return False
def get_routerrules_data(self):
try:
routerrules = getattr(self.tab_group.router, 'router_rules')
except Exception:
routerrules = []
return [rulemanager.RuleObject(r) for r in routerrules]
def post(self, request, *args, **kwargs):
if request.POST['action'] == 'routerrules__resetrules':
kwargs['reset_rules'] = True
rulemanager.remove_rules(request, [], **kwargs)
self.tab_group.router = api.neutron.router_get(request,
kwargs['router_id'])
class RulesGridTab(tabs.Tab):
name = _("Router Rules Grid")
slug = "rulesgrid"
template_name = ("project/routers/extensions/routerrules/grid.html")
def allowed(self, request):
try:
getattr(self.tab_group.router, 'router_rules')
return True
except Exception:
return False
def render(self):
context = template.RequestContext(self.request)
return render_to_string(self.get_template_name(self.request),
self.data, context_instance=context)
def get_context_data(self, request, **kwargs):
data = {'router': {'id':
self.tab_group.kwargs['router_id']}}
self.request = request
rules, supported = self.get_routerrules_data(checksupport=True)
if supported:
data["rulesmatrix"] = self.get_routerrulesgrid_data(rules)
return data
def get_routerrulesgrid_data(self, rules):
ports = self.tab_group.ports
networks = api.neutron.network_list_for_tenant(self.request,
self.request.user.tenant_id)
for n in networks:
n.set_id_as_name_if_empty()
netnamemap = {}
subnetmap = {}
for n in networks:
netnamemap[n['id']] = n['name']
for s in n.subnets:
subnetmap[s.id] = {'name': s.name,
'cidr': s.cidr}
matrix = []
subnets = []
for port in ports:
for ip in port['fixed_ips']:
if ip['subnet_id'] not in subnetmap:
continue
sub = {'ip': ip['ip_address'],
'subnetid': ip['subnet_id'],
'subnetname': subnetmap[ip['subnet_id']]['name'],
'networkid': port['network_id'],
'networkname': netnamemap[port['network_id']],
'cidr': subnetmap[ip['subnet_id']]['cidr']}
subnets.append(sub)
subnets.append({'ip': '0.0.0.0',
'subnetid': 'external',
'subnetname': '',
'networkname': 'external',
'networkid': 'external',
'cidr': '0.0.0.0/0'})
subnets.append({'ip': '0.0.0.0',
'subnetid': 'any',
'subnetname': '',
'networkname': 'any',
'networkid': 'any',
'cidr': '0.0.0.0/0'})
for source in subnets:
row = {'source': dict(source),
'targets': []}
for target in subnets:
target.update(self._get_subnet_connectivity(
source, target, rules))
row['targets'].append(dict(target))
matrix.append(row)
return matrix
def _get_subnet_connectivity(self, src_sub, dst_sub, rules):
v4_any_words = ['external', 'any']
connectivity = {'reachable': '',
'inverse_rule': {},
'rule_to_delete': False}
src = src_sub['cidr']
dst = dst_sub['cidr']
# differentiate between external and any
src_rulename = src_sub['subnetid'] if src == '0.0.0.0/0' else src
dst_rulename = dst_sub['subnetid'] if dst == '0.0.0.0/0' else dst
if str(src) == str(dst):
connectivity['reachable'] = 'full'
return connectivity
matchingrules = []
for rule in rules:
rd = rule['destination']
if rule['destination'] in v4_any_words:
rd = '0.0.0.0/0'
rs = rule['source']
if rule['source'] in v4_any_words:
rs = '0.0.0.0/0'
rs = netaddr.IPNetwork(rs)
src = netaddr.IPNetwork(src)
rd = netaddr.IPNetwork(rd)
dst = netaddr.IPNetwork(dst)
# check if cidrs are affected by rule first
if (int(dst.network) >= int(rd.broadcast) or
int(dst.broadcast) <= int(rd.network) or
int(src.network) >= int(rs.broadcast) or
int(src.broadcast) <= int(rs.network)):
continue
# skip matching rules for 'any' and 'external' networks
if (str(dst) == '0.0.0.0/0' and str(rd) != '0.0.0.0/0'):
continue
if (str(src) == '0.0.0.0/0' and str(rs) != '0.0.0.0/0'):
continue
# external network rules only affect external traffic
if (rule['source'] == 'external' and
src_rulename not in v4_any_words):
continue
if (rule['destination'] == 'external' and
dst_rulename not in v4_any_words):
continue
match = {'bitsinsrc': rs.prefixlen,
'bitsindst': rd.prefixlen,
'rule': rule}
matchingrules.append(match)
if not matchingrules:
connectivity['reachable'] = 'none'
connectivity['inverse_rule'] = {'source': src_rulename,
'destination': dst_rulename,
'action': 'permit'}
return connectivity
sortedrules = sorted(matchingrules,
key=lambda k: (k['bitsinsrc'], k['bitsindst']),
reverse=True)
match = sortedrules[0]
if (match['bitsinsrc'] > src.prefixlen or
match['bitsindst'] > dst.prefixlen):
connectivity['reachable'] = 'partial'
connectivity['conflicting_rule'] = match['rule']
return connectivity
if (match['rule']['source'] == src_rulename and
match['rule']['destination'] == dst_rulename):
connectivity['rule_to_delete'] = match['rule']
if match['rule']['action'] == 'permit':
connectivity['reachable'] = 'full'
inverseaction = 'deny'
else:
connectivity['reachable'] = 'none'
inverseaction = 'permit'
connectivity['inverse_rule'] = {'source': src_rulename,
'destination': dst_rulename,
'action': inverseaction}
return connectivity
def get_routerrules_data(self, checksupport=False):
try:
routerrules = getattr(self.tab_group.router, 'router_rules')
supported = True
except Exception:
routerrules = []
supported = False
if checksupport:
return routerrules, supported
return routerrules
|
annatisch/autorest
|
refs/heads/master
|
src/generator/AutoRest.Python.Azure.Tests/Expected/AcceptanceTests/AzureParameterGrouping/autorestparametergroupingtestservice/auto_rest_parameter_grouping_test_service.py
|
3
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.service_client import ServiceClient
from msrest import Serializer, Deserializer
from msrestazure import AzureConfiguration
from .version import VERSION
from .operations.parameter_grouping_operations import ParameterGroupingOperations
from . import models
class AutoRestParameterGroupingTestServiceConfiguration(AzureConfiguration):
"""Configuration for AutoRestParameterGroupingTestService
Note that all parameters used to create this instance are saved as instance
attributes.
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param str base_url: Service URL
"""
def __init__(
self, credentials, base_url=None):
if credentials is None:
raise ValueError("Parameter 'credentials' must not be None.")
if not base_url:
base_url = 'https://localhost'
super(AutoRestParameterGroupingTestServiceConfiguration, self).__init__(base_url)
self.add_user_agent('autorestparametergroupingtestservice/{}'.format(VERSION))
self.add_user_agent('Azure-SDK-For-Python')
self.credentials = credentials
class AutoRestParameterGroupingTestService(object):
"""Test Infrastructure for AutoRest
:ivar config: Configuration for client.
:vartype config: AutoRestParameterGroupingTestServiceConfiguration
:ivar parameter_grouping: ParameterGrouping operations
:vartype parameter_grouping: .operations.ParameterGroupingOperations
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param str base_url: Service URL
"""
def __init__(
self, credentials, base_url=None):
self.config = AutoRestParameterGroupingTestServiceConfiguration(credentials, base_url)
self._client = ServiceClient(self.config.credentials, self.config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.parameter_grouping = ParameterGroupingOperations(
self._client, self.config, self._serialize, self._deserialize)
|
NexusIS/libcloud
|
refs/heads/trunk
|
docs/examples/compute/vsphere/connect_host.py
|
56
|
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
cls = get_driver(Provider.VSPHERE)
driver = cls(host='192.168.1.100',
username='admin', password='admin')
print(driver.list_nodes())
# ...
|
snakeleon/YouCompleteMe-x86
|
refs/heads/master
|
third_party/ycmd/third_party/JediHTTP/vendor/jedi/test/test_api/test_unicode.py
|
27
|
# -*- coding: utf-8 -*-
"""
All character set and unicode related tests.
"""
from jedi import Script
from jedi._compatibility import u, unicode
def test_unicode_script():
""" normally no unicode objects are being used. (<=2.7) """
s = unicode("import datetime; datetime.timedelta")
completions = Script(s).completions()
assert len(completions)
assert type(completions[0].description) is unicode
s = u("author='öä'; author")
completions = Script(s).completions()
x = completions[0].description
assert type(x) is unicode
s = u("#-*- coding: iso-8859-1 -*-\nauthor='öä'; author")
s = s.encode('latin-1')
completions = Script(s).completions()
assert type(completions[0].description) is unicode
def test_unicode_attribute():
""" github jedi-vim issue #94 """
s1 = u('#-*- coding: utf-8 -*-\nclass Person():\n'
' name = "e"\n\nPerson().name.')
completions1 = Script(s1).completions()
assert 'strip' in [c.name for c in completions1]
s2 = u('#-*- coding: utf-8 -*-\nclass Person():\n'
' name = "é"\n\nPerson().name.')
completions2 = Script(s2).completions()
assert 'strip' in [c.name for c in completions2]
def test_multibyte_script():
""" `jedi.Script` must accept multi-byte string source. """
try:
code = u("import datetime; datetime.d")
comment = u("# multi-byte comment あいうえおä")
s = (u('%s\n%s') % (code, comment)).encode('utf-8')
except NameError:
pass # python 3 has no unicode method
else:
assert len(Script(s, 1, len(code)).completions())
def test_goto_definition_at_zero():
"""At zero usually sometimes raises unicode issues."""
assert Script("a", 1, 1).goto_definitions() == []
s = Script("str", 1, 1).goto_definitions()
assert len(s) == 1
assert list(s)[0].description == 'class str'
assert Script("", 1, 0).goto_definitions() == []
def test_complete_at_zero():
s = Script("str", 1, 3).completions()
assert len(s) == 1
assert list(s)[0].name == 'str'
s = Script("", 1, 0).completions()
assert len(s) > 0
|
toshimasa-nasu/hbase
|
refs/heads/master
|
hbase-examples/src/main/python/thrift1/gen-py/hbase/Hbase.py
|
47
|
#
# Autogenerated by Thrift Compiler (0.9.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
from ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class Iface:
def enableTable(self, tableName):
"""
Brings a table on-line (enables it)
Parameters:
- tableName: name of the table
"""
pass
def disableTable(self, tableName):
"""
Disables a table (takes it off-line) If it is being served, the master
will tell the servers to stop serving it.
Parameters:
- tableName: name of the table
"""
pass
def isTableEnabled(self, tableName):
"""
@return true if table is on-line
Parameters:
- tableName: name of the table to check
"""
pass
def compact(self, tableNameOrRegionName):
"""
Parameters:
- tableNameOrRegionName
"""
pass
def majorCompact(self, tableNameOrRegionName):
"""
Parameters:
- tableNameOrRegionName
"""
pass
def getTableNames(self, ):
"""
List all the userspace tables.
@return returns a list of names
"""
pass
def getColumnDescriptors(self, tableName):
"""
List all the column families assoicated with a table.
@return list of column family descriptors
Parameters:
- tableName: table name
"""
pass
def getTableRegions(self, tableName):
"""
List the regions associated with a table.
@return list of region descriptors
Parameters:
- tableName: table name
"""
pass
def createTable(self, tableName, columnFamilies):
"""
Create a table with the specified column families. The name
field for each ColumnDescriptor must be set and must end in a
colon (:). All other fields are optional and will get default
values if not explicitly specified.
@throws IllegalArgument if an input parameter is invalid
@throws AlreadyExists if the table name already exists
Parameters:
- tableName: name of table to create
- columnFamilies: list of column family descriptors
"""
pass
def deleteTable(self, tableName):
"""
Deletes a table
@throws IOError if table doesn't exist on server or there was some other
problem
Parameters:
- tableName: name of table to delete
"""
pass
def get(self, tableName, row, column, attributes):
"""
Get a single TCell for the specified table, row, and column at the
latest timestamp. Returns an empty list if no such value exists.
@return value for specified row/column
Parameters:
- tableName: name of table
- row: row key
- column: column name
- attributes: Get attributes
"""
pass
def getVer(self, tableName, row, column, numVersions, attributes):
"""
Get the specified number of versions for the specified table,
row, and column.
@return list of cells for specified row/column
Parameters:
- tableName: name of table
- row: row key
- column: column name
- numVersions: number of versions to retrieve
- attributes: Get attributes
"""
pass
def getVerTs(self, tableName, row, column, timestamp, numVersions, attributes):
"""
Get the specified number of versions for the specified table,
row, and column. Only versions less than or equal to the specified
timestamp will be returned.
@return list of cells for specified row/column
Parameters:
- tableName: name of table
- row: row key
- column: column name
- timestamp: timestamp
- numVersions: number of versions to retrieve
- attributes: Get attributes
"""
pass
def getRow(self, tableName, row, attributes):
"""
Get all the data for the specified table and row at the latest
timestamp. Returns an empty list if the row does not exist.
@return TRowResult containing the row and map of columns to TCells
Parameters:
- tableName: name of table
- row: row key
- attributes: Get attributes
"""
pass
def getRowWithColumns(self, tableName, row, columns, attributes):
"""
Get the specified columns for the specified table and row at the latest
timestamp. Returns an empty list if the row does not exist.
@return TRowResult containing the row and map of columns to TCells
Parameters:
- tableName: name of table
- row: row key
- columns: List of columns to return, null for all columns
- attributes: Get attributes
"""
pass
def getRowTs(self, tableName, row, timestamp, attributes):
"""
Get all the data for the specified table and row at the specified
timestamp. Returns an empty list if the row does not exist.
@return TRowResult containing the row and map of columns to TCells
Parameters:
- tableName: name of the table
- row: row key
- timestamp: timestamp
- attributes: Get attributes
"""
pass
def getRowWithColumnsTs(self, tableName, row, columns, timestamp, attributes):
"""
Get the specified columns for the specified table and row at the specified
timestamp. Returns an empty list if the row does not exist.
@return TRowResult containing the row and map of columns to TCells
Parameters:
- tableName: name of table
- row: row key
- columns: List of columns to return, null for all columns
- timestamp
- attributes: Get attributes
"""
pass
def getRows(self, tableName, rows, attributes):
"""
Get all the data for the specified table and rows at the latest
timestamp. Returns an empty list if no rows exist.
@return TRowResult containing the rows and map of columns to TCells
Parameters:
- tableName: name of table
- rows: row keys
- attributes: Get attributes
"""
pass
def getRowsWithColumns(self, tableName, rows, columns, attributes):
"""
Get the specified columns for the specified table and rows at the latest
timestamp. Returns an empty list if no rows exist.
@return TRowResult containing the rows and map of columns to TCells
Parameters:
- tableName: name of table
- rows: row keys
- columns: List of columns to return, null for all columns
- attributes: Get attributes
"""
pass
def getRowsTs(self, tableName, rows, timestamp, attributes):
"""
Get all the data for the specified table and rows at the specified
timestamp. Returns an empty list if no rows exist.
@return TRowResult containing the rows and map of columns to TCells
Parameters:
- tableName: name of the table
- rows: row keys
- timestamp: timestamp
- attributes: Get attributes
"""
pass
def getRowsWithColumnsTs(self, tableName, rows, columns, timestamp, attributes):
"""
Get the specified columns for the specified table and rows at the specified
timestamp. Returns an empty list if no rows exist.
@return TRowResult containing the rows and map of columns to TCells
Parameters:
- tableName: name of table
- rows: row keys
- columns: List of columns to return, null for all columns
- timestamp
- attributes: Get attributes
"""
pass
def mutateRow(self, tableName, row, mutations, attributes):
"""
Apply a series of mutations (updates/deletes) to a row in a
single transaction. If an exception is thrown, then the
transaction is aborted. Default current timestamp is used, and
all entries will have an identical timestamp.
Parameters:
- tableName: name of table
- row: row key
- mutations: list of mutation commands
- attributes: Mutation attributes
"""
pass
def mutateRowTs(self, tableName, row, mutations, timestamp, attributes):
"""
Apply a series of mutations (updates/deletes) to a row in a
single transaction. If an exception is thrown, then the
transaction is aborted. The specified timestamp is used, and
all entries will have an identical timestamp.
Parameters:
- tableName: name of table
- row: row key
- mutations: list of mutation commands
- timestamp: timestamp
- attributes: Mutation attributes
"""
pass
def mutateRows(self, tableName, rowBatches, attributes):
"""
Apply a series of batches (each a series of mutations on a single row)
in a single transaction. If an exception is thrown, then the
transaction is aborted. Default current timestamp is used, and
all entries will have an identical timestamp.
Parameters:
- tableName: name of table
- rowBatches: list of row batches
- attributes: Mutation attributes
"""
pass
def mutateRowsTs(self, tableName, rowBatches, timestamp, attributes):
"""
Apply a series of batches (each a series of mutations on a single row)
in a single transaction. If an exception is thrown, then the
transaction is aborted. The specified timestamp is used, and
all entries will have an identical timestamp.
Parameters:
- tableName: name of table
- rowBatches: list of row batches
- timestamp: timestamp
- attributes: Mutation attributes
"""
pass
def atomicIncrement(self, tableName, row, column, value):
"""
Atomically increment the column value specified. Returns the next value post increment.
Parameters:
- tableName: name of table
- row: row to increment
- column: name of column
- value: amount to increment by
"""
pass
def deleteAll(self, tableName, row, column, attributes):
"""
Delete all cells that match the passed row and column.
Parameters:
- tableName: name of table
- row: Row to update
- column: name of column whose value is to be deleted
- attributes: Delete attributes
"""
pass
def deleteAllTs(self, tableName, row, column, timestamp, attributes):
"""
Delete all cells that match the passed row and column and whose
timestamp is equal-to or older than the passed timestamp.
Parameters:
- tableName: name of table
- row: Row to update
- column: name of column whose value is to be deleted
- timestamp: timestamp
- attributes: Delete attributes
"""
pass
def deleteAllRow(self, tableName, row, attributes):
"""
Completely delete the row's cells.
Parameters:
- tableName: name of table
- row: key of the row to be completely deleted.
- attributes: Delete attributes
"""
pass
def increment(self, increment):
"""
Increment a cell by the ammount.
Increments can be applied async if hbase.regionserver.thrift.coalesceIncrement is set to true.
False is the default. Turn to true if you need the extra performance and can accept some
data loss if a thrift server dies with increments still in the queue.
Parameters:
- increment: The single increment to apply
"""
pass
def incrementRows(self, increments):
"""
Parameters:
- increments: The list of increments
"""
pass
def deleteAllRowTs(self, tableName, row, timestamp, attributes):
"""
Completely delete the row's cells marked with a timestamp
equal-to or older than the passed timestamp.
Parameters:
- tableName: name of table
- row: key of the row to be completely deleted.
- timestamp: timestamp
- attributes: Delete attributes
"""
pass
def scannerOpenWithScan(self, tableName, scan, attributes):
"""
Get a scanner on the current table, using the Scan instance
for the scan parameters.
Parameters:
- tableName: name of table
- scan: Scan instance
- attributes: Scan attributes
"""
pass
def scannerOpen(self, tableName, startRow, columns, attributes):
"""
Get a scanner on the current table starting at the specified row and
ending at the last row in the table. Return the specified columns.
@return scanner id to be used with other scanner procedures
Parameters:
- tableName: name of table
- startRow: Starting row in table to scan.
Send "" (empty string) to start at the first row.
- columns: columns to scan. If column name is a column family, all
columns of the specified column family are returned. It's also possible
to pass a regex in the column qualifier.
- attributes: Scan attributes
"""
pass
def scannerOpenWithStop(self, tableName, startRow, stopRow, columns, attributes):
"""
Get a scanner on the current table starting and stopping at the
specified rows. ending at the last row in the table. Return the
specified columns.
@return scanner id to be used with other scanner procedures
Parameters:
- tableName: name of table
- startRow: Starting row in table to scan.
Send "" (empty string) to start at the first row.
- stopRow: row to stop scanning on. This row is *not* included in the
scanner's results
- columns: columns to scan. If column name is a column family, all
columns of the specified column family are returned. It's also possible
to pass a regex in the column qualifier.
- attributes: Scan attributes
"""
pass
def scannerOpenWithPrefix(self, tableName, startAndPrefix, columns, attributes):
"""
Open a scanner for a given prefix. That is all rows will have the specified
prefix. No other rows will be returned.
@return scanner id to use with other scanner calls
Parameters:
- tableName: name of table
- startAndPrefix: the prefix (and thus start row) of the keys you want
- columns: the columns you want returned
- attributes: Scan attributes
"""
pass
def scannerOpenTs(self, tableName, startRow, columns, timestamp, attributes):
"""
Get a scanner on the current table starting at the specified row and
ending at the last row in the table. Return the specified columns.
Only values with the specified timestamp are returned.
@return scanner id to be used with other scanner procedures
Parameters:
- tableName: name of table
- startRow: Starting row in table to scan.
Send "" (empty string) to start at the first row.
- columns: columns to scan. If column name is a column family, all
columns of the specified column family are returned. It's also possible
to pass a regex in the column qualifier.
- timestamp: timestamp
- attributes: Scan attributes
"""
pass
def scannerOpenWithStopTs(self, tableName, startRow, stopRow, columns, timestamp, attributes):
"""
Get a scanner on the current table starting and stopping at the
specified rows. ending at the last row in the table. Return the
specified columns. Only values with the specified timestamp are
returned.
@return scanner id to be used with other scanner procedures
Parameters:
- tableName: name of table
- startRow: Starting row in table to scan.
Send "" (empty string) to start at the first row.
- stopRow: row to stop scanning on. This row is *not* included in the
scanner's results
- columns: columns to scan. If column name is a column family, all
columns of the specified column family are returned. It's also possible
to pass a regex in the column qualifier.
- timestamp: timestamp
- attributes: Scan attributes
"""
pass
def scannerGet(self, id):
"""
Returns the scanner's current row value and advances to the next
row in the table. When there are no more rows in the table, or a key
greater-than-or-equal-to the scanner's specified stopRow is reached,
an empty list is returned.
@return a TRowResult containing the current row and a map of the columns to TCells.
@throws IllegalArgument if ScannerID is invalid
@throws NotFound when the scanner reaches the end
Parameters:
- id: id of a scanner returned by scannerOpen
"""
pass
def scannerGetList(self, id, nbRows):
"""
Returns, starting at the scanner's current row value nbRows worth of
rows and advances to the next row in the table. When there are no more
rows in the table, or a key greater-than-or-equal-to the scanner's
specified stopRow is reached, an empty list is returned.
@return a TRowResult containing the current row and a map of the columns to TCells.
@throws IllegalArgument if ScannerID is invalid
@throws NotFound when the scanner reaches the end
Parameters:
- id: id of a scanner returned by scannerOpen
- nbRows: number of results to return
"""
pass
def scannerClose(self, id):
"""
Closes the server-state associated with an open scanner.
@throws IllegalArgument if ScannerID is invalid
Parameters:
- id: id of a scanner returned by scannerOpen
"""
pass
def getRowOrBefore(self, tableName, row, family):
"""
Get the row just before the specified one.
@return value for specified row/column
Parameters:
- tableName: name of table
- row: row key
- family: column name
"""
pass
def getRegionInfo(self, row):
"""
Get the regininfo for the specified row. It scans
the metatable to find region's start and end keys.
@return value for specified row/column
Parameters:
- row: row key
"""
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot is not None:
self._oprot = oprot
self._seqid = 0
def enableTable(self, tableName):
"""
Brings a table on-line (enables it)
Parameters:
- tableName: name of the table
"""
self.send_enableTable(tableName)
self.recv_enableTable()
def send_enableTable(self, tableName):
self._oprot.writeMessageBegin('enableTable', TMessageType.CALL, self._seqid)
args = enableTable_args()
args.tableName = tableName
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_enableTable(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = enableTable_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.io is not None:
raise result.io
return
def disableTable(self, tableName):
"""
Disables a table (takes it off-line) If it is being served, the master
will tell the servers to stop serving it.
Parameters:
- tableName: name of the table
"""
self.send_disableTable(tableName)
self.recv_disableTable()
def send_disableTable(self, tableName):
self._oprot.writeMessageBegin('disableTable', TMessageType.CALL, self._seqid)
args = disableTable_args()
args.tableName = tableName
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_disableTable(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = disableTable_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.io is not None:
raise result.io
return
def isTableEnabled(self, tableName):
"""
@return true if table is on-line
Parameters:
- tableName: name of the table to check
"""
self.send_isTableEnabled(tableName)
return self.recv_isTableEnabled()
def send_isTableEnabled(self, tableName):
self._oprot.writeMessageBegin('isTableEnabled', TMessageType.CALL, self._seqid)
args = isTableEnabled_args()
args.tableName = tableName
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_isTableEnabled(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = isTableEnabled_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "isTableEnabled failed: unknown result");
def compact(self, tableNameOrRegionName):
"""
Parameters:
- tableNameOrRegionName
"""
self.send_compact(tableNameOrRegionName)
self.recv_compact()
def send_compact(self, tableNameOrRegionName):
self._oprot.writeMessageBegin('compact', TMessageType.CALL, self._seqid)
args = compact_args()
args.tableNameOrRegionName = tableNameOrRegionName
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_compact(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = compact_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.io is not None:
raise result.io
return
def majorCompact(self, tableNameOrRegionName):
"""
Parameters:
- tableNameOrRegionName
"""
self.send_majorCompact(tableNameOrRegionName)
self.recv_majorCompact()
def send_majorCompact(self, tableNameOrRegionName):
self._oprot.writeMessageBegin('majorCompact', TMessageType.CALL, self._seqid)
args = majorCompact_args()
args.tableNameOrRegionName = tableNameOrRegionName
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_majorCompact(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = majorCompact_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.io is not None:
raise result.io
return
def getTableNames(self, ):
"""
List all the userspace tables.
@return returns a list of names
"""
self.send_getTableNames()
return self.recv_getTableNames()
def send_getTableNames(self, ):
self._oprot.writeMessageBegin('getTableNames', TMessageType.CALL, self._seqid)
args = getTableNames_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getTableNames(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getTableNames_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "getTableNames failed: unknown result");
def getColumnDescriptors(self, tableName):
"""
List all the column families assoicated with a table.
@return list of column family descriptors
Parameters:
- tableName: table name
"""
self.send_getColumnDescriptors(tableName)
return self.recv_getColumnDescriptors()
def send_getColumnDescriptors(self, tableName):
self._oprot.writeMessageBegin('getColumnDescriptors', TMessageType.CALL, self._seqid)
args = getColumnDescriptors_args()
args.tableName = tableName
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getColumnDescriptors(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getColumnDescriptors_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "getColumnDescriptors failed: unknown result");
def getTableRegions(self, tableName):
"""
List the regions associated with a table.
@return list of region descriptors
Parameters:
- tableName: table name
"""
self.send_getTableRegions(tableName)
return self.recv_getTableRegions()
def send_getTableRegions(self, tableName):
self._oprot.writeMessageBegin('getTableRegions', TMessageType.CALL, self._seqid)
args = getTableRegions_args()
args.tableName = tableName
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getTableRegions(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getTableRegions_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "getTableRegions failed: unknown result");
def createTable(self, tableName, columnFamilies):
"""
Create a table with the specified column families. The name
field for each ColumnDescriptor must be set and must end in a
colon (:). All other fields are optional and will get default
values if not explicitly specified.
@throws IllegalArgument if an input parameter is invalid
@throws AlreadyExists if the table name already exists
Parameters:
- tableName: name of table to create
- columnFamilies: list of column family descriptors
"""
self.send_createTable(tableName, columnFamilies)
self.recv_createTable()
def send_createTable(self, tableName, columnFamilies):
self._oprot.writeMessageBegin('createTable', TMessageType.CALL, self._seqid)
args = createTable_args()
args.tableName = tableName
args.columnFamilies = columnFamilies
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_createTable(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = createTable_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.io is not None:
raise result.io
if result.ia is not None:
raise result.ia
if result.exist is not None:
raise result.exist
return
def deleteTable(self, tableName):
"""
Deletes a table
@throws IOError if table doesn't exist on server or there was some other
problem
Parameters:
- tableName: name of table to delete
"""
self.send_deleteTable(tableName)
self.recv_deleteTable()
def send_deleteTable(self, tableName):
self._oprot.writeMessageBegin('deleteTable', TMessageType.CALL, self._seqid)
args = deleteTable_args()
args.tableName = tableName
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_deleteTable(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = deleteTable_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.io is not None:
raise result.io
return
def get(self, tableName, row, column, attributes):
"""
Get a single TCell for the specified table, row, and column at the
latest timestamp. Returns an empty list if no such value exists.
@return value for specified row/column
Parameters:
- tableName: name of table
- row: row key
- column: column name
- attributes: Get attributes
"""
self.send_get(tableName, row, column, attributes)
return self.recv_get()
def send_get(self, tableName, row, column, attributes):
self._oprot.writeMessageBegin('get', TMessageType.CALL, self._seqid)
args = get_args()
args.tableName = tableName
args.row = row
args.column = column
args.attributes = attributes
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "get failed: unknown result");
def getVer(self, tableName, row, column, numVersions, attributes):
"""
Get the specified number of versions for the specified table,
row, and column.
@return list of cells for specified row/column
Parameters:
- tableName: name of table
- row: row key
- column: column name
- numVersions: number of versions to retrieve
- attributes: Get attributes
"""
self.send_getVer(tableName, row, column, numVersions, attributes)
return self.recv_getVer()
def send_getVer(self, tableName, row, column, numVersions, attributes):
self._oprot.writeMessageBegin('getVer', TMessageType.CALL, self._seqid)
args = getVer_args()
args.tableName = tableName
args.row = row
args.column = column
args.numVersions = numVersions
args.attributes = attributes
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getVer(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getVer_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "getVer failed: unknown result");
def getVerTs(self, tableName, row, column, timestamp, numVersions, attributes):
"""
Get the specified number of versions for the specified table,
row, and column. Only versions less than or equal to the specified
timestamp will be returned.
@return list of cells for specified row/column
Parameters:
- tableName: name of table
- row: row key
- column: column name
- timestamp: timestamp
- numVersions: number of versions to retrieve
- attributes: Get attributes
"""
self.send_getVerTs(tableName, row, column, timestamp, numVersions, attributes)
return self.recv_getVerTs()
def send_getVerTs(self, tableName, row, column, timestamp, numVersions, attributes):
self._oprot.writeMessageBegin('getVerTs', TMessageType.CALL, self._seqid)
args = getVerTs_args()
args.tableName = tableName
args.row = row
args.column = column
args.timestamp = timestamp
args.numVersions = numVersions
args.attributes = attributes
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getVerTs(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getVerTs_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "getVerTs failed: unknown result");
def getRow(self, tableName, row, attributes):
"""
Get all the data for the specified table and row at the latest
timestamp. Returns an empty list if the row does not exist.
@return TRowResult containing the row and map of columns to TCells
Parameters:
- tableName: name of table
- row: row key
- attributes: Get attributes
"""
self.send_getRow(tableName, row, attributes)
return self.recv_getRow()
def send_getRow(self, tableName, row, attributes):
self._oprot.writeMessageBegin('getRow', TMessageType.CALL, self._seqid)
args = getRow_args()
args.tableName = tableName
args.row = row
args.attributes = attributes
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getRow(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getRow_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "getRow failed: unknown result");
def getRowWithColumns(self, tableName, row, columns, attributes):
"""
Get the specified columns for the specified table and row at the latest
timestamp. Returns an empty list if the row does not exist.
@return TRowResult containing the row and map of columns to TCells
Parameters:
- tableName: name of table
- row: row key
- columns: List of columns to return, null for all columns
- attributes: Get attributes
"""
self.send_getRowWithColumns(tableName, row, columns, attributes)
return self.recv_getRowWithColumns()
def send_getRowWithColumns(self, tableName, row, columns, attributes):
self._oprot.writeMessageBegin('getRowWithColumns', TMessageType.CALL, self._seqid)
args = getRowWithColumns_args()
args.tableName = tableName
args.row = row
args.columns = columns
args.attributes = attributes
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getRowWithColumns(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getRowWithColumns_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "getRowWithColumns failed: unknown result");
def getRowTs(self, tableName, row, timestamp, attributes):
"""
Get all the data for the specified table and row at the specified
timestamp. Returns an empty list if the row does not exist.
@return TRowResult containing the row and map of columns to TCells
Parameters:
- tableName: name of the table
- row: row key
- timestamp: timestamp
- attributes: Get attributes
"""
self.send_getRowTs(tableName, row, timestamp, attributes)
return self.recv_getRowTs()
def send_getRowTs(self, tableName, row, timestamp, attributes):
self._oprot.writeMessageBegin('getRowTs', TMessageType.CALL, self._seqid)
args = getRowTs_args()
args.tableName = tableName
args.row = row
args.timestamp = timestamp
args.attributes = attributes
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getRowTs(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getRowTs_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "getRowTs failed: unknown result");
def getRowWithColumnsTs(self, tableName, row, columns, timestamp, attributes):
"""
Get the specified columns for the specified table and row at the specified
timestamp. Returns an empty list if the row does not exist.
@return TRowResult containing the row and map of columns to TCells
Parameters:
- tableName: name of table
- row: row key
- columns: List of columns to return, null for all columns
- timestamp
- attributes: Get attributes
"""
self.send_getRowWithColumnsTs(tableName, row, columns, timestamp, attributes)
return self.recv_getRowWithColumnsTs()
def send_getRowWithColumnsTs(self, tableName, row, columns, timestamp, attributes):
self._oprot.writeMessageBegin('getRowWithColumnsTs', TMessageType.CALL, self._seqid)
args = getRowWithColumnsTs_args()
args.tableName = tableName
args.row = row
args.columns = columns
args.timestamp = timestamp
args.attributes = attributes
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getRowWithColumnsTs(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getRowWithColumnsTs_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "getRowWithColumnsTs failed: unknown result");
def getRows(self, tableName, rows, attributes):
"""
Get all the data for the specified table and rows at the latest
timestamp. Returns an empty list if no rows exist.
@return TRowResult containing the rows and map of columns to TCells
Parameters:
- tableName: name of table
- rows: row keys
- attributes: Get attributes
"""
self.send_getRows(tableName, rows, attributes)
return self.recv_getRows()
def send_getRows(self, tableName, rows, attributes):
self._oprot.writeMessageBegin('getRows', TMessageType.CALL, self._seqid)
args = getRows_args()
args.tableName = tableName
args.rows = rows
args.attributes = attributes
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getRows(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getRows_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "getRows failed: unknown result");
def getRowsWithColumns(self, tableName, rows, columns, attributes):
"""
Get the specified columns for the specified table and rows at the latest
timestamp. Returns an empty list if no rows exist.
@return TRowResult containing the rows and map of columns to TCells
Parameters:
- tableName: name of table
- rows: row keys
- columns: List of columns to return, null for all columns
- attributes: Get attributes
"""
self.send_getRowsWithColumns(tableName, rows, columns, attributes)
return self.recv_getRowsWithColumns()
def send_getRowsWithColumns(self, tableName, rows, columns, attributes):
self._oprot.writeMessageBegin('getRowsWithColumns', TMessageType.CALL, self._seqid)
args = getRowsWithColumns_args()
args.tableName = tableName
args.rows = rows
args.columns = columns
args.attributes = attributes
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getRowsWithColumns(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getRowsWithColumns_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "getRowsWithColumns failed: unknown result");
def getRowsTs(self, tableName, rows, timestamp, attributes):
"""
Get all the data for the specified table and rows at the specified
timestamp. Returns an empty list if no rows exist.
@return TRowResult containing the rows and map of columns to TCells
Parameters:
- tableName: name of the table
- rows: row keys
- timestamp: timestamp
- attributes: Get attributes
"""
self.send_getRowsTs(tableName, rows, timestamp, attributes)
return self.recv_getRowsTs()
def send_getRowsTs(self, tableName, rows, timestamp, attributes):
self._oprot.writeMessageBegin('getRowsTs', TMessageType.CALL, self._seqid)
args = getRowsTs_args()
args.tableName = tableName
args.rows = rows
args.timestamp = timestamp
args.attributes = attributes
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getRowsTs(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getRowsTs_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "getRowsTs failed: unknown result");
def getRowsWithColumnsTs(self, tableName, rows, columns, timestamp, attributes):
"""
Get the specified columns for the specified table and rows at the specified
timestamp. Returns an empty list if no rows exist.
@return TRowResult containing the rows and map of columns to TCells
Parameters:
- tableName: name of table
- rows: row keys
- columns: List of columns to return, null for all columns
- timestamp
- attributes: Get attributes
"""
self.send_getRowsWithColumnsTs(tableName, rows, columns, timestamp, attributes)
return self.recv_getRowsWithColumnsTs()
def send_getRowsWithColumnsTs(self, tableName, rows, columns, timestamp, attributes):
self._oprot.writeMessageBegin('getRowsWithColumnsTs', TMessageType.CALL, self._seqid)
args = getRowsWithColumnsTs_args()
args.tableName = tableName
args.rows = rows
args.columns = columns
args.timestamp = timestamp
args.attributes = attributes
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getRowsWithColumnsTs(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getRowsWithColumnsTs_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "getRowsWithColumnsTs failed: unknown result");
def mutateRow(self, tableName, row, mutations, attributes):
"""
Apply a series of mutations (updates/deletes) to a row in a
single transaction. If an exception is thrown, then the
transaction is aborted. Default current timestamp is used, and
all entries will have an identical timestamp.
Parameters:
- tableName: name of table
- row: row key
- mutations: list of mutation commands
- attributes: Mutation attributes
"""
self.send_mutateRow(tableName, row, mutations, attributes)
self.recv_mutateRow()
def send_mutateRow(self, tableName, row, mutations, attributes):
self._oprot.writeMessageBegin('mutateRow', TMessageType.CALL, self._seqid)
args = mutateRow_args()
args.tableName = tableName
args.row = row
args.mutations = mutations
args.attributes = attributes
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_mutateRow(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = mutateRow_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.io is not None:
raise result.io
if result.ia is not None:
raise result.ia
return
def mutateRowTs(self, tableName, row, mutations, timestamp, attributes):
"""
Apply a series of mutations (updates/deletes) to a row in a
single transaction. If an exception is thrown, then the
transaction is aborted. The specified timestamp is used, and
all entries will have an identical timestamp.
Parameters:
- tableName: name of table
- row: row key
- mutations: list of mutation commands
- timestamp: timestamp
- attributes: Mutation attributes
"""
self.send_mutateRowTs(tableName, row, mutations, timestamp, attributes)
self.recv_mutateRowTs()
def send_mutateRowTs(self, tableName, row, mutations, timestamp, attributes):
self._oprot.writeMessageBegin('mutateRowTs', TMessageType.CALL, self._seqid)
args = mutateRowTs_args()
args.tableName = tableName
args.row = row
args.mutations = mutations
args.timestamp = timestamp
args.attributes = attributes
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_mutateRowTs(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = mutateRowTs_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.io is not None:
raise result.io
if result.ia is not None:
raise result.ia
return
def mutateRows(self, tableName, rowBatches, attributes):
"""
Apply a series of batches (each a series of mutations on a single row)
in a single transaction. If an exception is thrown, then the
transaction is aborted. Default current timestamp is used, and
all entries will have an identical timestamp.
Parameters:
- tableName: name of table
- rowBatches: list of row batches
- attributes: Mutation attributes
"""
self.send_mutateRows(tableName, rowBatches, attributes)
self.recv_mutateRows()
def send_mutateRows(self, tableName, rowBatches, attributes):
self._oprot.writeMessageBegin('mutateRows', TMessageType.CALL, self._seqid)
args = mutateRows_args()
args.tableName = tableName
args.rowBatches = rowBatches
args.attributes = attributes
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_mutateRows(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = mutateRows_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.io is not None:
raise result.io
if result.ia is not None:
raise result.ia
return
def mutateRowsTs(self, tableName, rowBatches, timestamp, attributes):
"""
Apply a series of batches (each a series of mutations on a single row)
in a single transaction. If an exception is thrown, then the
transaction is aborted. The specified timestamp is used, and
all entries will have an identical timestamp.
Parameters:
- tableName: name of table
- rowBatches: list of row batches
- timestamp: timestamp
- attributes: Mutation attributes
"""
self.send_mutateRowsTs(tableName, rowBatches, timestamp, attributes)
self.recv_mutateRowsTs()
def send_mutateRowsTs(self, tableName, rowBatches, timestamp, attributes):
self._oprot.writeMessageBegin('mutateRowsTs', TMessageType.CALL, self._seqid)
args = mutateRowsTs_args()
args.tableName = tableName
args.rowBatches = rowBatches
args.timestamp = timestamp
args.attributes = attributes
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_mutateRowsTs(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = mutateRowsTs_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.io is not None:
raise result.io
if result.ia is not None:
raise result.ia
return
def atomicIncrement(self, tableName, row, column, value):
"""
Atomically increment the column value specified. Returns the next value post increment.
Parameters:
- tableName: name of table
- row: row to increment
- column: name of column
- value: amount to increment by
"""
self.send_atomicIncrement(tableName, row, column, value)
return self.recv_atomicIncrement()
def send_atomicIncrement(self, tableName, row, column, value):
self._oprot.writeMessageBegin('atomicIncrement', TMessageType.CALL, self._seqid)
args = atomicIncrement_args()
args.tableName = tableName
args.row = row
args.column = column
args.value = value
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_atomicIncrement(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = atomicIncrement_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
if result.ia is not None:
raise result.ia
raise TApplicationException(TApplicationException.MISSING_RESULT, "atomicIncrement failed: unknown result");
def deleteAll(self, tableName, row, column, attributes):
"""
Delete all cells that match the passed row and column.
Parameters:
- tableName: name of table
- row: Row to update
- column: name of column whose value is to be deleted
- attributes: Delete attributes
"""
self.send_deleteAll(tableName, row, column, attributes)
self.recv_deleteAll()
def send_deleteAll(self, tableName, row, column, attributes):
self._oprot.writeMessageBegin('deleteAll', TMessageType.CALL, self._seqid)
args = deleteAll_args()
args.tableName = tableName
args.row = row
args.column = column
args.attributes = attributes
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_deleteAll(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = deleteAll_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.io is not None:
raise result.io
return
def deleteAllTs(self, tableName, row, column, timestamp, attributes):
"""
Delete all cells that match the passed row and column and whose
timestamp is equal-to or older than the passed timestamp.
Parameters:
- tableName: name of table
- row: Row to update
- column: name of column whose value is to be deleted
- timestamp: timestamp
- attributes: Delete attributes
"""
self.send_deleteAllTs(tableName, row, column, timestamp, attributes)
self.recv_deleteAllTs()
def send_deleteAllTs(self, tableName, row, column, timestamp, attributes):
self._oprot.writeMessageBegin('deleteAllTs', TMessageType.CALL, self._seqid)
args = deleteAllTs_args()
args.tableName = tableName
args.row = row
args.column = column
args.timestamp = timestamp
args.attributes = attributes
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_deleteAllTs(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = deleteAllTs_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.io is not None:
raise result.io
return
def deleteAllRow(self, tableName, row, attributes):
"""
Completely delete the row's cells.
Parameters:
- tableName: name of table
- row: key of the row to be completely deleted.
- attributes: Delete attributes
"""
self.send_deleteAllRow(tableName, row, attributes)
self.recv_deleteAllRow()
def send_deleteAllRow(self, tableName, row, attributes):
self._oprot.writeMessageBegin('deleteAllRow', TMessageType.CALL, self._seqid)
args = deleteAllRow_args()
args.tableName = tableName
args.row = row
args.attributes = attributes
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_deleteAllRow(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = deleteAllRow_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.io is not None:
raise result.io
return
def increment(self, increment):
"""
Increment a cell by the ammount.
Increments can be applied async if hbase.regionserver.thrift.coalesceIncrement is set to true.
False is the default. Turn to true if you need the extra performance and can accept some
data loss if a thrift server dies with increments still in the queue.
Parameters:
- increment: The single increment to apply
"""
self.send_increment(increment)
self.recv_increment()
def send_increment(self, increment):
self._oprot.writeMessageBegin('increment', TMessageType.CALL, self._seqid)
args = increment_args()
args.increment = increment
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_increment(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = increment_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.io is not None:
raise result.io
return
def incrementRows(self, increments):
"""
Parameters:
- increments: The list of increments
"""
self.send_incrementRows(increments)
self.recv_incrementRows()
def send_incrementRows(self, increments):
self._oprot.writeMessageBegin('incrementRows', TMessageType.CALL, self._seqid)
args = incrementRows_args()
args.increments = increments
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_incrementRows(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = incrementRows_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.io is not None:
raise result.io
return
def deleteAllRowTs(self, tableName, row, timestamp, attributes):
"""
Completely delete the row's cells marked with a timestamp
equal-to or older than the passed timestamp.
Parameters:
- tableName: name of table
- row: key of the row to be completely deleted.
- timestamp: timestamp
- attributes: Delete attributes
"""
self.send_deleteAllRowTs(tableName, row, timestamp, attributes)
self.recv_deleteAllRowTs()
def send_deleteAllRowTs(self, tableName, row, timestamp, attributes):
self._oprot.writeMessageBegin('deleteAllRowTs', TMessageType.CALL, self._seqid)
args = deleteAllRowTs_args()
args.tableName = tableName
args.row = row
args.timestamp = timestamp
args.attributes = attributes
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_deleteAllRowTs(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = deleteAllRowTs_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.io is not None:
raise result.io
return
def scannerOpenWithScan(self, tableName, scan, attributes):
"""
Get a scanner on the current table, using the Scan instance
for the scan parameters.
Parameters:
- tableName: name of table
- scan: Scan instance
- attributes: Scan attributes
"""
self.send_scannerOpenWithScan(tableName, scan, attributes)
return self.recv_scannerOpenWithScan()
def send_scannerOpenWithScan(self, tableName, scan, attributes):
self._oprot.writeMessageBegin('scannerOpenWithScan', TMessageType.CALL, self._seqid)
args = scannerOpenWithScan_args()
args.tableName = tableName
args.scan = scan
args.attributes = attributes
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_scannerOpenWithScan(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = scannerOpenWithScan_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "scannerOpenWithScan failed: unknown result");
def scannerOpen(self, tableName, startRow, columns, attributes):
"""
Get a scanner on the current table starting at the specified row and
ending at the last row in the table. Return the specified columns.
@return scanner id to be used with other scanner procedures
Parameters:
- tableName: name of table
- startRow: Starting row in table to scan.
Send "" (empty string) to start at the first row.
- columns: columns to scan. If column name is a column family, all
columns of the specified column family are returned. It's also possible
to pass a regex in the column qualifier.
- attributes: Scan attributes
"""
self.send_scannerOpen(tableName, startRow, columns, attributes)
return self.recv_scannerOpen()
def send_scannerOpen(self, tableName, startRow, columns, attributes):
self._oprot.writeMessageBegin('scannerOpen', TMessageType.CALL, self._seqid)
args = scannerOpen_args()
args.tableName = tableName
args.startRow = startRow
args.columns = columns
args.attributes = attributes
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_scannerOpen(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = scannerOpen_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "scannerOpen failed: unknown result");
def scannerOpenWithStop(self, tableName, startRow, stopRow, columns, attributes):
"""
Get a scanner on the current table starting and stopping at the
specified rows. ending at the last row in the table. Return the
specified columns.
@return scanner id to be used with other scanner procedures
Parameters:
- tableName: name of table
- startRow: Starting row in table to scan.
Send "" (empty string) to start at the first row.
- stopRow: row to stop scanning on. This row is *not* included in the
scanner's results
- columns: columns to scan. If column name is a column family, all
columns of the specified column family are returned. It's also possible
to pass a regex in the column qualifier.
- attributes: Scan attributes
"""
self.send_scannerOpenWithStop(tableName, startRow, stopRow, columns, attributes)
return self.recv_scannerOpenWithStop()
def send_scannerOpenWithStop(self, tableName, startRow, stopRow, columns, attributes):
self._oprot.writeMessageBegin('scannerOpenWithStop', TMessageType.CALL, self._seqid)
args = scannerOpenWithStop_args()
args.tableName = tableName
args.startRow = startRow
args.stopRow = stopRow
args.columns = columns
args.attributes = attributes
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_scannerOpenWithStop(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = scannerOpenWithStop_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "scannerOpenWithStop failed: unknown result");
def scannerOpenWithPrefix(self, tableName, startAndPrefix, columns, attributes):
"""
Open a scanner for a given prefix. That is all rows will have the specified
prefix. No other rows will be returned.
@return scanner id to use with other scanner calls
Parameters:
- tableName: name of table
- startAndPrefix: the prefix (and thus start row) of the keys you want
- columns: the columns you want returned
- attributes: Scan attributes
"""
self.send_scannerOpenWithPrefix(tableName, startAndPrefix, columns, attributes)
return self.recv_scannerOpenWithPrefix()
def send_scannerOpenWithPrefix(self, tableName, startAndPrefix, columns, attributes):
self._oprot.writeMessageBegin('scannerOpenWithPrefix', TMessageType.CALL, self._seqid)
args = scannerOpenWithPrefix_args()
args.tableName = tableName
args.startAndPrefix = startAndPrefix
args.columns = columns
args.attributes = attributes
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_scannerOpenWithPrefix(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = scannerOpenWithPrefix_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "scannerOpenWithPrefix failed: unknown result");
def scannerOpenTs(self, tableName, startRow, columns, timestamp, attributes):
"""
Get a scanner on the current table starting at the specified row and
ending at the last row in the table. Return the specified columns.
Only values with the specified timestamp are returned.
@return scanner id to be used with other scanner procedures
Parameters:
- tableName: name of table
- startRow: Starting row in table to scan.
Send "" (empty string) to start at the first row.
- columns: columns to scan. If column name is a column family, all
columns of the specified column family are returned. It's also possible
to pass a regex in the column qualifier.
- timestamp: timestamp
- attributes: Scan attributes
"""
self.send_scannerOpenTs(tableName, startRow, columns, timestamp, attributes)
return self.recv_scannerOpenTs()
def send_scannerOpenTs(self, tableName, startRow, columns, timestamp, attributes):
self._oprot.writeMessageBegin('scannerOpenTs', TMessageType.CALL, self._seqid)
args = scannerOpenTs_args()
args.tableName = tableName
args.startRow = startRow
args.columns = columns
args.timestamp = timestamp
args.attributes = attributes
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_scannerOpenTs(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = scannerOpenTs_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "scannerOpenTs failed: unknown result");
def scannerOpenWithStopTs(self, tableName, startRow, stopRow, columns, timestamp, attributes):
"""
Get a scanner on the current table starting and stopping at the
specified rows. ending at the last row in the table. Return the
specified columns. Only values with the specified timestamp are
returned.
@return scanner id to be used with other scanner procedures
Parameters:
- tableName: name of table
- startRow: Starting row in table to scan.
Send "" (empty string) to start at the first row.
- stopRow: row to stop scanning on. This row is *not* included in the
scanner's results
- columns: columns to scan. If column name is a column family, all
columns of the specified column family are returned. It's also possible
to pass a regex in the column qualifier.
- timestamp: timestamp
- attributes: Scan attributes
"""
self.send_scannerOpenWithStopTs(tableName, startRow, stopRow, columns, timestamp, attributes)
return self.recv_scannerOpenWithStopTs()
def send_scannerOpenWithStopTs(self, tableName, startRow, stopRow, columns, timestamp, attributes):
self._oprot.writeMessageBegin('scannerOpenWithStopTs', TMessageType.CALL, self._seqid)
args = scannerOpenWithStopTs_args()
args.tableName = tableName
args.startRow = startRow
args.stopRow = stopRow
args.columns = columns
args.timestamp = timestamp
args.attributes = attributes
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_scannerOpenWithStopTs(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = scannerOpenWithStopTs_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "scannerOpenWithStopTs failed: unknown result");
def scannerGet(self, id):
"""
Returns the scanner's current row value and advances to the next
row in the table. When there are no more rows in the table, or a key
greater-than-or-equal-to the scanner's specified stopRow is reached,
an empty list is returned.
@return a TRowResult containing the current row and a map of the columns to TCells.
@throws IllegalArgument if ScannerID is invalid
@throws NotFound when the scanner reaches the end
Parameters:
- id: id of a scanner returned by scannerOpen
"""
self.send_scannerGet(id)
return self.recv_scannerGet()
def send_scannerGet(self, id):
self._oprot.writeMessageBegin('scannerGet', TMessageType.CALL, self._seqid)
args = scannerGet_args()
args.id = id
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_scannerGet(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = scannerGet_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
if result.ia is not None:
raise result.ia
raise TApplicationException(TApplicationException.MISSING_RESULT, "scannerGet failed: unknown result");
def scannerGetList(self, id, nbRows):
"""
Returns, starting at the scanner's current row value nbRows worth of
rows and advances to the next row in the table. When there are no more
rows in the table, or a key greater-than-or-equal-to the scanner's
specified stopRow is reached, an empty list is returned.
@return a TRowResult containing the current row and a map of the columns to TCells.
@throws IllegalArgument if ScannerID is invalid
@throws NotFound when the scanner reaches the end
Parameters:
- id: id of a scanner returned by scannerOpen
- nbRows: number of results to return
"""
self.send_scannerGetList(id, nbRows)
return self.recv_scannerGetList()
def send_scannerGetList(self, id, nbRows):
self._oprot.writeMessageBegin('scannerGetList', TMessageType.CALL, self._seqid)
args = scannerGetList_args()
args.id = id
args.nbRows = nbRows
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_scannerGetList(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = scannerGetList_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
if result.ia is not None:
raise result.ia
raise TApplicationException(TApplicationException.MISSING_RESULT, "scannerGetList failed: unknown result");
def scannerClose(self, id):
"""
Closes the server-state associated with an open scanner.
@throws IllegalArgument if ScannerID is invalid
Parameters:
- id: id of a scanner returned by scannerOpen
"""
self.send_scannerClose(id)
self.recv_scannerClose()
def send_scannerClose(self, id):
self._oprot.writeMessageBegin('scannerClose', TMessageType.CALL, self._seqid)
args = scannerClose_args()
args.id = id
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_scannerClose(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = scannerClose_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.io is not None:
raise result.io
if result.ia is not None:
raise result.ia
return
def getRowOrBefore(self, tableName, row, family):
"""
Get the row just before the specified one.
@return value for specified row/column
Parameters:
- tableName: name of table
- row: row key
- family: column name
"""
self.send_getRowOrBefore(tableName, row, family)
return self.recv_getRowOrBefore()
def send_getRowOrBefore(self, tableName, row, family):
self._oprot.writeMessageBegin('getRowOrBefore', TMessageType.CALL, self._seqid)
args = getRowOrBefore_args()
args.tableName = tableName
args.row = row
args.family = family
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getRowOrBefore(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getRowOrBefore_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "getRowOrBefore failed: unknown result");
def getRegionInfo(self, row):
"""
Get the regininfo for the specified row. It scans
the metatable to find region's start and end keys.
@return value for specified row/column
Parameters:
- row: row key
"""
self.send_getRegionInfo(row)
return self.recv_getRegionInfo()
def send_getRegionInfo(self, row):
self._oprot.writeMessageBegin('getRegionInfo', TMessageType.CALL, self._seqid)
args = getRegionInfo_args()
args.row = row
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getRegionInfo(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getRegionInfo_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "getRegionInfo failed: unknown result");
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["enableTable"] = Processor.process_enableTable
self._processMap["disableTable"] = Processor.process_disableTable
self._processMap["isTableEnabled"] = Processor.process_isTableEnabled
self._processMap["compact"] = Processor.process_compact
self._processMap["majorCompact"] = Processor.process_majorCompact
self._processMap["getTableNames"] = Processor.process_getTableNames
self._processMap["getColumnDescriptors"] = Processor.process_getColumnDescriptors
self._processMap["getTableRegions"] = Processor.process_getTableRegions
self._processMap["createTable"] = Processor.process_createTable
self._processMap["deleteTable"] = Processor.process_deleteTable
self._processMap["get"] = Processor.process_get
self._processMap["getVer"] = Processor.process_getVer
self._processMap["getVerTs"] = Processor.process_getVerTs
self._processMap["getRow"] = Processor.process_getRow
self._processMap["getRowWithColumns"] = Processor.process_getRowWithColumns
self._processMap["getRowTs"] = Processor.process_getRowTs
self._processMap["getRowWithColumnsTs"] = Processor.process_getRowWithColumnsTs
self._processMap["getRows"] = Processor.process_getRows
self._processMap["getRowsWithColumns"] = Processor.process_getRowsWithColumns
self._processMap["getRowsTs"] = Processor.process_getRowsTs
self._processMap["getRowsWithColumnsTs"] = Processor.process_getRowsWithColumnsTs
self._processMap["mutateRow"] = Processor.process_mutateRow
self._processMap["mutateRowTs"] = Processor.process_mutateRowTs
self._processMap["mutateRows"] = Processor.process_mutateRows
self._processMap["mutateRowsTs"] = Processor.process_mutateRowsTs
self._processMap["atomicIncrement"] = Processor.process_atomicIncrement
self._processMap["deleteAll"] = Processor.process_deleteAll
self._processMap["deleteAllTs"] = Processor.process_deleteAllTs
self._processMap["deleteAllRow"] = Processor.process_deleteAllRow
self._processMap["increment"] = Processor.process_increment
self._processMap["incrementRows"] = Processor.process_incrementRows
self._processMap["deleteAllRowTs"] = Processor.process_deleteAllRowTs
self._processMap["scannerOpenWithScan"] = Processor.process_scannerOpenWithScan
self._processMap["scannerOpen"] = Processor.process_scannerOpen
self._processMap["scannerOpenWithStop"] = Processor.process_scannerOpenWithStop
self._processMap["scannerOpenWithPrefix"] = Processor.process_scannerOpenWithPrefix
self._processMap["scannerOpenTs"] = Processor.process_scannerOpenTs
self._processMap["scannerOpenWithStopTs"] = Processor.process_scannerOpenWithStopTs
self._processMap["scannerGet"] = Processor.process_scannerGet
self._processMap["scannerGetList"] = Processor.process_scannerGetList
self._processMap["scannerClose"] = Processor.process_scannerClose
self._processMap["getRowOrBefore"] = Processor.process_getRowOrBefore
self._processMap["getRegionInfo"] = Processor.process_getRegionInfo
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_enableTable(self, seqid, iprot, oprot):
args = enableTable_args()
args.read(iprot)
iprot.readMessageEnd()
result = enableTable_result()
try:
self._handler.enableTable(args.tableName)
except IOError as io:
result.io = io
oprot.writeMessageBegin("enableTable", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_disableTable(self, seqid, iprot, oprot):
args = disableTable_args()
args.read(iprot)
iprot.readMessageEnd()
result = disableTable_result()
try:
self._handler.disableTable(args.tableName)
except IOError as io:
result.io = io
oprot.writeMessageBegin("disableTable", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_isTableEnabled(self, seqid, iprot, oprot):
args = isTableEnabled_args()
args.read(iprot)
iprot.readMessageEnd()
result = isTableEnabled_result()
try:
result.success = self._handler.isTableEnabled(args.tableName)
except IOError as io:
result.io = io
oprot.writeMessageBegin("isTableEnabled", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_compact(self, seqid, iprot, oprot):
args = compact_args()
args.read(iprot)
iprot.readMessageEnd()
result = compact_result()
try:
self._handler.compact(args.tableNameOrRegionName)
except IOError as io:
result.io = io
oprot.writeMessageBegin("compact", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_majorCompact(self, seqid, iprot, oprot):
args = majorCompact_args()
args.read(iprot)
iprot.readMessageEnd()
result = majorCompact_result()
try:
self._handler.majorCompact(args.tableNameOrRegionName)
except IOError as io:
result.io = io
oprot.writeMessageBegin("majorCompact", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getTableNames(self, seqid, iprot, oprot):
args = getTableNames_args()
args.read(iprot)
iprot.readMessageEnd()
result = getTableNames_result()
try:
result.success = self._handler.getTableNames()
except IOError as io:
result.io = io
oprot.writeMessageBegin("getTableNames", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getColumnDescriptors(self, seqid, iprot, oprot):
args = getColumnDescriptors_args()
args.read(iprot)
iprot.readMessageEnd()
result = getColumnDescriptors_result()
try:
result.success = self._handler.getColumnDescriptors(args.tableName)
except IOError as io:
result.io = io
oprot.writeMessageBegin("getColumnDescriptors", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getTableRegions(self, seqid, iprot, oprot):
args = getTableRegions_args()
args.read(iprot)
iprot.readMessageEnd()
result = getTableRegions_result()
try:
result.success = self._handler.getTableRegions(args.tableName)
except IOError as io:
result.io = io
oprot.writeMessageBegin("getTableRegions", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_createTable(self, seqid, iprot, oprot):
args = createTable_args()
args.read(iprot)
iprot.readMessageEnd()
result = createTable_result()
try:
self._handler.createTable(args.tableName, args.columnFamilies)
except IOError as io:
result.io = io
except IllegalArgument as ia:
result.ia = ia
except AlreadyExists as exist:
result.exist = exist
oprot.writeMessageBegin("createTable", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_deleteTable(self, seqid, iprot, oprot):
args = deleteTable_args()
args.read(iprot)
iprot.readMessageEnd()
result = deleteTable_result()
try:
self._handler.deleteTable(args.tableName)
except IOError as io:
result.io = io
oprot.writeMessageBegin("deleteTable", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get(self, seqid, iprot, oprot):
args = get_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_result()
try:
result.success = self._handler.get(args.tableName, args.row, args.column, args.attributes)
except IOError as io:
result.io = io
oprot.writeMessageBegin("get", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getVer(self, seqid, iprot, oprot):
args = getVer_args()
args.read(iprot)
iprot.readMessageEnd()
result = getVer_result()
try:
result.success = self._handler.getVer(args.tableName, args.row, args.column, args.numVersions, args.attributes)
except IOError as io:
result.io = io
oprot.writeMessageBegin("getVer", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getVerTs(self, seqid, iprot, oprot):
args = getVerTs_args()
args.read(iprot)
iprot.readMessageEnd()
result = getVerTs_result()
try:
result.success = self._handler.getVerTs(args.tableName, args.row, args.column, args.timestamp, args.numVersions, args.attributes)
except IOError as io:
result.io = io
oprot.writeMessageBegin("getVerTs", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getRow(self, seqid, iprot, oprot):
args = getRow_args()
args.read(iprot)
iprot.readMessageEnd()
result = getRow_result()
try:
result.success = self._handler.getRow(args.tableName, args.row, args.attributes)
except IOError as io:
result.io = io
oprot.writeMessageBegin("getRow", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getRowWithColumns(self, seqid, iprot, oprot):
args = getRowWithColumns_args()
args.read(iprot)
iprot.readMessageEnd()
result = getRowWithColumns_result()
try:
result.success = self._handler.getRowWithColumns(args.tableName, args.row, args.columns, args.attributes)
except IOError as io:
result.io = io
oprot.writeMessageBegin("getRowWithColumns", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getRowTs(self, seqid, iprot, oprot):
args = getRowTs_args()
args.read(iprot)
iprot.readMessageEnd()
result = getRowTs_result()
try:
result.success = self._handler.getRowTs(args.tableName, args.row, args.timestamp, args.attributes)
except IOError as io:
result.io = io
oprot.writeMessageBegin("getRowTs", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getRowWithColumnsTs(self, seqid, iprot, oprot):
args = getRowWithColumnsTs_args()
args.read(iprot)
iprot.readMessageEnd()
result = getRowWithColumnsTs_result()
try:
result.success = self._handler.getRowWithColumnsTs(args.tableName, args.row, args.columns, args.timestamp, args.attributes)
except IOError as io:
result.io = io
oprot.writeMessageBegin("getRowWithColumnsTs", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getRows(self, seqid, iprot, oprot):
args = getRows_args()
args.read(iprot)
iprot.readMessageEnd()
result = getRows_result()
try:
result.success = self._handler.getRows(args.tableName, args.rows, args.attributes)
except IOError as io:
result.io = io
oprot.writeMessageBegin("getRows", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getRowsWithColumns(self, seqid, iprot, oprot):
args = getRowsWithColumns_args()
args.read(iprot)
iprot.readMessageEnd()
result = getRowsWithColumns_result()
try:
result.success = self._handler.getRowsWithColumns(args.tableName, args.rows, args.columns, args.attributes)
except IOError as io:
result.io = io
oprot.writeMessageBegin("getRowsWithColumns", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getRowsTs(self, seqid, iprot, oprot):
args = getRowsTs_args()
args.read(iprot)
iprot.readMessageEnd()
result = getRowsTs_result()
try:
result.success = self._handler.getRowsTs(args.tableName, args.rows, args.timestamp, args.attributes)
except IOError as io:
result.io = io
oprot.writeMessageBegin("getRowsTs", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getRowsWithColumnsTs(self, seqid, iprot, oprot):
args = getRowsWithColumnsTs_args()
args.read(iprot)
iprot.readMessageEnd()
result = getRowsWithColumnsTs_result()
try:
result.success = self._handler.getRowsWithColumnsTs(args.tableName, args.rows, args.columns, args.timestamp, args.attributes)
except IOError as io:
result.io = io
oprot.writeMessageBegin("getRowsWithColumnsTs", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_mutateRow(self, seqid, iprot, oprot):
args = mutateRow_args()
args.read(iprot)
iprot.readMessageEnd()
result = mutateRow_result()
try:
self._handler.mutateRow(args.tableName, args.row, args.mutations, args.attributes)
except IOError as io:
result.io = io
except IllegalArgument as ia:
result.ia = ia
oprot.writeMessageBegin("mutateRow", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_mutateRowTs(self, seqid, iprot, oprot):
args = mutateRowTs_args()
args.read(iprot)
iprot.readMessageEnd()
result = mutateRowTs_result()
try:
self._handler.mutateRowTs(args.tableName, args.row, args.mutations, args.timestamp, args.attributes)
except IOError as io:
result.io = io
except IllegalArgument as ia:
result.ia = ia
oprot.writeMessageBegin("mutateRowTs", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_mutateRows(self, seqid, iprot, oprot):
args = mutateRows_args()
args.read(iprot)
iprot.readMessageEnd()
result = mutateRows_result()
try:
self._handler.mutateRows(args.tableName, args.rowBatches, args.attributes)
except IOError as io:
result.io = io
except IllegalArgument as ia:
result.ia = ia
oprot.writeMessageBegin("mutateRows", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_mutateRowsTs(self, seqid, iprot, oprot):
args = mutateRowsTs_args()
args.read(iprot)
iprot.readMessageEnd()
result = mutateRowsTs_result()
try:
self._handler.mutateRowsTs(args.tableName, args.rowBatches, args.timestamp, args.attributes)
except IOError as io:
result.io = io
except IllegalArgument as ia:
result.ia = ia
oprot.writeMessageBegin("mutateRowsTs", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_atomicIncrement(self, seqid, iprot, oprot):
args = atomicIncrement_args()
args.read(iprot)
iprot.readMessageEnd()
result = atomicIncrement_result()
try:
result.success = self._handler.atomicIncrement(args.tableName, args.row, args.column, args.value)
except IOError as io:
result.io = io
except IllegalArgument as ia:
result.ia = ia
oprot.writeMessageBegin("atomicIncrement", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_deleteAll(self, seqid, iprot, oprot):
args = deleteAll_args()
args.read(iprot)
iprot.readMessageEnd()
result = deleteAll_result()
try:
self._handler.deleteAll(args.tableName, args.row, args.column, args.attributes)
except IOError as io:
result.io = io
oprot.writeMessageBegin("deleteAll", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_deleteAllTs(self, seqid, iprot, oprot):
args = deleteAllTs_args()
args.read(iprot)
iprot.readMessageEnd()
result = deleteAllTs_result()
try:
self._handler.deleteAllTs(args.tableName, args.row, args.column, args.timestamp, args.attributes)
except IOError as io:
result.io = io
oprot.writeMessageBegin("deleteAllTs", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_deleteAllRow(self, seqid, iprot, oprot):
args = deleteAllRow_args()
args.read(iprot)
iprot.readMessageEnd()
result = deleteAllRow_result()
try:
self._handler.deleteAllRow(args.tableName, args.row, args.attributes)
except IOError as io:
result.io = io
oprot.writeMessageBegin("deleteAllRow", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_increment(self, seqid, iprot, oprot):
args = increment_args()
args.read(iprot)
iprot.readMessageEnd()
result = increment_result()
try:
self._handler.increment(args.increment)
except IOError as io:
result.io = io
oprot.writeMessageBegin("increment", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_incrementRows(self, seqid, iprot, oprot):
args = incrementRows_args()
args.read(iprot)
iprot.readMessageEnd()
result = incrementRows_result()
try:
self._handler.incrementRows(args.increments)
except IOError as io:
result.io = io
oprot.writeMessageBegin("incrementRows", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_deleteAllRowTs(self, seqid, iprot, oprot):
args = deleteAllRowTs_args()
args.read(iprot)
iprot.readMessageEnd()
result = deleteAllRowTs_result()
try:
self._handler.deleteAllRowTs(args.tableName, args.row, args.timestamp, args.attributes)
except IOError as io:
result.io = io
oprot.writeMessageBegin("deleteAllRowTs", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_scannerOpenWithScan(self, seqid, iprot, oprot):
args = scannerOpenWithScan_args()
args.read(iprot)
iprot.readMessageEnd()
result = scannerOpenWithScan_result()
try:
result.success = self._handler.scannerOpenWithScan(args.tableName, args.scan, args.attributes)
except IOError as io:
result.io = io
oprot.writeMessageBegin("scannerOpenWithScan", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_scannerOpen(self, seqid, iprot, oprot):
args = scannerOpen_args()
args.read(iprot)
iprot.readMessageEnd()
result = scannerOpen_result()
try:
result.success = self._handler.scannerOpen(args.tableName, args.startRow, args.columns, args.attributes)
except IOError as io:
result.io = io
oprot.writeMessageBegin("scannerOpen", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_scannerOpenWithStop(self, seqid, iprot, oprot):
args = scannerOpenWithStop_args()
args.read(iprot)
iprot.readMessageEnd()
result = scannerOpenWithStop_result()
try:
result.success = self._handler.scannerOpenWithStop(args.tableName, args.startRow, args.stopRow, args.columns, args.attributes)
except IOError as io:
result.io = io
oprot.writeMessageBegin("scannerOpenWithStop", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_scannerOpenWithPrefix(self, seqid, iprot, oprot):
args = scannerOpenWithPrefix_args()
args.read(iprot)
iprot.readMessageEnd()
result = scannerOpenWithPrefix_result()
try:
result.success = self._handler.scannerOpenWithPrefix(args.tableName, args.startAndPrefix, args.columns, args.attributes)
except IOError as io:
result.io = io
oprot.writeMessageBegin("scannerOpenWithPrefix", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_scannerOpenTs(self, seqid, iprot, oprot):
args = scannerOpenTs_args()
args.read(iprot)
iprot.readMessageEnd()
result = scannerOpenTs_result()
try:
result.success = self._handler.scannerOpenTs(args.tableName, args.startRow, args.columns, args.timestamp, args.attributes)
except IOError as io:
result.io = io
oprot.writeMessageBegin("scannerOpenTs", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_scannerOpenWithStopTs(self, seqid, iprot, oprot):
args = scannerOpenWithStopTs_args()
args.read(iprot)
iprot.readMessageEnd()
result = scannerOpenWithStopTs_result()
try:
result.success = self._handler.scannerOpenWithStopTs(args.tableName, args.startRow, args.stopRow, args.columns, args.timestamp, args.attributes)
except IOError as io:
result.io = io
oprot.writeMessageBegin("scannerOpenWithStopTs", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_scannerGet(self, seqid, iprot, oprot):
args = scannerGet_args()
args.read(iprot)
iprot.readMessageEnd()
result = scannerGet_result()
try:
result.success = self._handler.scannerGet(args.id)
except IOError as io:
result.io = io
except IllegalArgument as ia:
result.ia = ia
oprot.writeMessageBegin("scannerGet", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_scannerGetList(self, seqid, iprot, oprot):
args = scannerGetList_args()
args.read(iprot)
iprot.readMessageEnd()
result = scannerGetList_result()
try:
result.success = self._handler.scannerGetList(args.id, args.nbRows)
except IOError as io:
result.io = io
except IllegalArgument as ia:
result.ia = ia
oprot.writeMessageBegin("scannerGetList", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_scannerClose(self, seqid, iprot, oprot):
args = scannerClose_args()
args.read(iprot)
iprot.readMessageEnd()
result = scannerClose_result()
try:
self._handler.scannerClose(args.id)
except IOError as io:
result.io = io
except IllegalArgument as ia:
result.ia = ia
oprot.writeMessageBegin("scannerClose", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getRowOrBefore(self, seqid, iprot, oprot):
args = getRowOrBefore_args()
args.read(iprot)
iprot.readMessageEnd()
result = getRowOrBefore_result()
try:
result.success = self._handler.getRowOrBefore(args.tableName, args.row, args.family)
except IOError as io:
result.io = io
oprot.writeMessageBegin("getRowOrBefore", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getRegionInfo(self, seqid, iprot, oprot):
args = getRegionInfo_args()
args.read(iprot)
iprot.readMessageEnd()
result = getRegionInfo_result()
try:
result.success = self._handler.getRegionInfo(args.row)
except IOError as io:
result.io = io
oprot.writeMessageBegin("getRegionInfo", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class enableTable_args:
"""
Attributes:
- tableName: name of the table
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
)
def __init__(self, tableName=None,):
self.tableName = tableName
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('enableTable_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class enableTable_result:
"""
Attributes:
- io
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, io=None,):
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('enableTable_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class disableTable_args:
"""
Attributes:
- tableName: name of the table
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
)
def __init__(self, tableName=None,):
self.tableName = tableName
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('disableTable_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class disableTable_result:
"""
Attributes:
- io
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, io=None,):
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('disableTable_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class isTableEnabled_args:
"""
Attributes:
- tableName: name of the table to check
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
)
def __init__(self, tableName=None,):
self.tableName = tableName
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('isTableEnabled_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class isTableEnabled_result:
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('isTableEnabled_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class compact_args:
"""
Attributes:
- tableNameOrRegionName
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableNameOrRegionName', None, None, ), # 1
)
def __init__(self, tableNameOrRegionName=None,):
self.tableNameOrRegionName = tableNameOrRegionName
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableNameOrRegionName = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('compact_args')
if self.tableNameOrRegionName is not None:
oprot.writeFieldBegin('tableNameOrRegionName', TType.STRING, 1)
oprot.writeString(self.tableNameOrRegionName)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class compact_result:
"""
Attributes:
- io
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, io=None,):
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('compact_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class majorCompact_args:
"""
Attributes:
- tableNameOrRegionName
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableNameOrRegionName', None, None, ), # 1
)
def __init__(self, tableNameOrRegionName=None,):
self.tableNameOrRegionName = tableNameOrRegionName
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableNameOrRegionName = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('majorCompact_args')
if self.tableNameOrRegionName is not None:
oprot.writeFieldBegin('tableNameOrRegionName', TType.STRING, 1)
oprot.writeString(self.tableNameOrRegionName)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class majorCompact_result:
"""
Attributes:
- io
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, io=None,):
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('majorCompact_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getTableNames_args:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getTableNames_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getTableNames_result:
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRING,None), None, ), # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype26, _size23) = iprot.readListBegin()
for _i27 in xrange(_size23):
_elem28 = iprot.readString();
self.success.append(_elem28)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getTableNames_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRING, len(self.success))
for iter29 in self.success:
oprot.writeString(iter29)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getColumnDescriptors_args:
"""
Attributes:
- tableName: table name
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
)
def __init__(self, tableName=None,):
self.tableName = tableName
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getColumnDescriptors_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getColumnDescriptors_result:
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.MAP, 'success', (TType.STRING,None,TType.STRUCT,(ColumnDescriptor, ColumnDescriptor.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.MAP:
self.success = {}
(_ktype31, _vtype32, _size30 ) = iprot.readMapBegin()
for _i34 in xrange(_size30):
_key35 = iprot.readString();
_val36 = ColumnDescriptor()
_val36.read(iprot)
self.success[_key35] = _val36
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getColumnDescriptors_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.MAP, 0)
oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.success))
for kiter37,viter38 in self.success.items():
oprot.writeString(kiter37)
viter38.write(oprot)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getTableRegions_args:
"""
Attributes:
- tableName: table name
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
)
def __init__(self, tableName=None,):
self.tableName = tableName
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getTableRegions_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getTableRegions_result:
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(TRegionInfo, TRegionInfo.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype42, _size39) = iprot.readListBegin()
for _i43 in xrange(_size39):
_elem44 = TRegionInfo()
_elem44.read(iprot)
self.success.append(_elem44)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getTableRegions_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter45 in self.success:
iter45.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class createTable_args:
"""
Attributes:
- tableName: name of table to create
- columnFamilies: list of column family descriptors
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.LIST, 'columnFamilies', (TType.STRUCT,(ColumnDescriptor, ColumnDescriptor.thrift_spec)), None, ), # 2
)
def __init__(self, tableName=None, columnFamilies=None,):
self.tableName = tableName
self.columnFamilies = columnFamilies
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.columnFamilies = []
(_etype49, _size46) = iprot.readListBegin()
for _i50 in xrange(_size46):
_elem51 = ColumnDescriptor()
_elem51.read(iprot)
self.columnFamilies.append(_elem51)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('createTable_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.columnFamilies is not None:
oprot.writeFieldBegin('columnFamilies', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.columnFamilies))
for iter52 in self.columnFamilies:
iter52.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class createTable_result:
"""
Attributes:
- io
- ia
- exist
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ia', (IllegalArgument, IllegalArgument.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'exist', (AlreadyExists, AlreadyExists.thrift_spec), None, ), # 3
)
def __init__(self, io=None, ia=None, exist=None,):
self.io = io
self.ia = ia
self.exist = exist
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ia = IllegalArgument()
self.ia.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.exist = AlreadyExists()
self.exist.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('createTable_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
if self.ia is not None:
oprot.writeFieldBegin('ia', TType.STRUCT, 2)
self.ia.write(oprot)
oprot.writeFieldEnd()
if self.exist is not None:
oprot.writeFieldBegin('exist', TType.STRUCT, 3)
self.exist.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class deleteTable_args:
"""
Attributes:
- tableName: name of table to delete
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
)
def __init__(self, tableName=None,):
self.tableName = tableName
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('deleteTable_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class deleteTable_result:
"""
Attributes:
- io
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, io=None,):
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('deleteTable_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_args:
"""
Attributes:
- tableName: name of table
- row: row key
- column: column name
- attributes: Get attributes
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.STRING, 'row', None, None, ), # 2
(3, TType.STRING, 'column', None, None, ), # 3
(4, TType.MAP, 'attributes', (TType.STRING,None,TType.STRING,None), None, ), # 4
)
def __init__(self, tableName=None, row=None, column=None, attributes=None,):
self.tableName = tableName
self.row = row
self.column = column
self.attributes = attributes
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.row = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.column = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.MAP:
self.attributes = {}
(_ktype54, _vtype55, _size53 ) = iprot.readMapBegin()
for _i57 in xrange(_size53):
_key58 = iprot.readString();
_val59 = iprot.readString();
self.attributes[_key58] = _val59
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 2)
oprot.writeString(self.row)
oprot.writeFieldEnd()
if self.column is not None:
oprot.writeFieldBegin('column', TType.STRING, 3)
oprot.writeString(self.column)
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 4)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter60,viter61 in self.attributes.items():
oprot.writeString(kiter60)
oprot.writeString(viter61)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_result:
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(TCell, TCell.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype65, _size62) = iprot.readListBegin()
for _i66 in xrange(_size62):
_elem67 = TCell()
_elem67.read(iprot)
self.success.append(_elem67)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter68 in self.success:
iter68.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getVer_args:
"""
Attributes:
- tableName: name of table
- row: row key
- column: column name
- numVersions: number of versions to retrieve
- attributes: Get attributes
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.STRING, 'row', None, None, ), # 2
(3, TType.STRING, 'column', None, None, ), # 3
(4, TType.I32, 'numVersions', None, None, ), # 4
(5, TType.MAP, 'attributes', (TType.STRING,None,TType.STRING,None), None, ), # 5
)
def __init__(self, tableName=None, row=None, column=None, numVersions=None, attributes=None,):
self.tableName = tableName
self.row = row
self.column = column
self.numVersions = numVersions
self.attributes = attributes
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.row = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.column = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.numVersions = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.MAP:
self.attributes = {}
(_ktype70, _vtype71, _size69 ) = iprot.readMapBegin()
for _i73 in xrange(_size69):
_key74 = iprot.readString();
_val75 = iprot.readString();
self.attributes[_key74] = _val75
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getVer_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 2)
oprot.writeString(self.row)
oprot.writeFieldEnd()
if self.column is not None:
oprot.writeFieldBegin('column', TType.STRING, 3)
oprot.writeString(self.column)
oprot.writeFieldEnd()
if self.numVersions is not None:
oprot.writeFieldBegin('numVersions', TType.I32, 4)
oprot.writeI32(self.numVersions)
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 5)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter76,viter77 in self.attributes.items():
oprot.writeString(kiter76)
oprot.writeString(viter77)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getVer_result:
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(TCell, TCell.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype81, _size78) = iprot.readListBegin()
for _i82 in xrange(_size78):
_elem83 = TCell()
_elem83.read(iprot)
self.success.append(_elem83)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getVer_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter84 in self.success:
iter84.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getVerTs_args:
"""
Attributes:
- tableName: name of table
- row: row key
- column: column name
- timestamp: timestamp
- numVersions: number of versions to retrieve
- attributes: Get attributes
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.STRING, 'row', None, None, ), # 2
(3, TType.STRING, 'column', None, None, ), # 3
(4, TType.I64, 'timestamp', None, None, ), # 4
(5, TType.I32, 'numVersions', None, None, ), # 5
(6, TType.MAP, 'attributes', (TType.STRING,None,TType.STRING,None), None, ), # 6
)
def __init__(self, tableName=None, row=None, column=None, timestamp=None, numVersions=None, attributes=None,):
self.tableName = tableName
self.row = row
self.column = column
self.timestamp = timestamp
self.numVersions = numVersions
self.attributes = attributes
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.row = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.column = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.timestamp = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I32:
self.numVersions = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.MAP:
self.attributes = {}
(_ktype86, _vtype87, _size85 ) = iprot.readMapBegin()
for _i89 in xrange(_size85):
_key90 = iprot.readString();
_val91 = iprot.readString();
self.attributes[_key90] = _val91
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getVerTs_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 2)
oprot.writeString(self.row)
oprot.writeFieldEnd()
if self.column is not None:
oprot.writeFieldBegin('column', TType.STRING, 3)
oprot.writeString(self.column)
oprot.writeFieldEnd()
if self.timestamp is not None:
oprot.writeFieldBegin('timestamp', TType.I64, 4)
oprot.writeI64(self.timestamp)
oprot.writeFieldEnd()
if self.numVersions is not None:
oprot.writeFieldBegin('numVersions', TType.I32, 5)
oprot.writeI32(self.numVersions)
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 6)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter92,viter93 in self.attributes.items():
oprot.writeString(kiter92)
oprot.writeString(viter93)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getVerTs_result:
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(TCell, TCell.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype97, _size94) = iprot.readListBegin()
for _i98 in xrange(_size94):
_elem99 = TCell()
_elem99.read(iprot)
self.success.append(_elem99)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getVerTs_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter100 in self.success:
iter100.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getRow_args:
"""
Attributes:
- tableName: name of table
- row: row key
- attributes: Get attributes
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.STRING, 'row', None, None, ), # 2
(3, TType.MAP, 'attributes', (TType.STRING,None,TType.STRING,None), None, ), # 3
)
def __init__(self, tableName=None, row=None, attributes=None,):
self.tableName = tableName
self.row = row
self.attributes = attributes
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.row = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.MAP:
self.attributes = {}
(_ktype102, _vtype103, _size101 ) = iprot.readMapBegin()
for _i105 in xrange(_size101):
_key106 = iprot.readString();
_val107 = iprot.readString();
self.attributes[_key106] = _val107
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getRow_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 2)
oprot.writeString(self.row)
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 3)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter108,viter109 in self.attributes.items():
oprot.writeString(kiter108)
oprot.writeString(viter109)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getRow_result:
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(TRowResult, TRowResult.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype113, _size110) = iprot.readListBegin()
for _i114 in xrange(_size110):
_elem115 = TRowResult()
_elem115.read(iprot)
self.success.append(_elem115)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getRow_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter116 in self.success:
iter116.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getRowWithColumns_args:
"""
Attributes:
- tableName: name of table
- row: row key
- columns: List of columns to return, null for all columns
- attributes: Get attributes
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.STRING, 'row', None, None, ), # 2
(3, TType.LIST, 'columns', (TType.STRING,None), None, ), # 3
(4, TType.MAP, 'attributes', (TType.STRING,None,TType.STRING,None), None, ), # 4
)
def __init__(self, tableName=None, row=None, columns=None, attributes=None,):
self.tableName = tableName
self.row = row
self.columns = columns
self.attributes = attributes
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.row = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.columns = []
(_etype120, _size117) = iprot.readListBegin()
for _i121 in xrange(_size117):
_elem122 = iprot.readString();
self.columns.append(_elem122)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.MAP:
self.attributes = {}
(_ktype124, _vtype125, _size123 ) = iprot.readMapBegin()
for _i127 in xrange(_size123):
_key128 = iprot.readString();
_val129 = iprot.readString();
self.attributes[_key128] = _val129
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getRowWithColumns_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 2)
oprot.writeString(self.row)
oprot.writeFieldEnd()
if self.columns is not None:
oprot.writeFieldBegin('columns', TType.LIST, 3)
oprot.writeListBegin(TType.STRING, len(self.columns))
for iter130 in self.columns:
oprot.writeString(iter130)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 4)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter131,viter132 in self.attributes.items():
oprot.writeString(kiter131)
oprot.writeString(viter132)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getRowWithColumns_result:
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(TRowResult, TRowResult.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype136, _size133) = iprot.readListBegin()
for _i137 in xrange(_size133):
_elem138 = TRowResult()
_elem138.read(iprot)
self.success.append(_elem138)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getRowWithColumns_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter139 in self.success:
iter139.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getRowTs_args:
"""
Attributes:
- tableName: name of the table
- row: row key
- timestamp: timestamp
- attributes: Get attributes
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.STRING, 'row', None, None, ), # 2
(3, TType.I64, 'timestamp', None, None, ), # 3
(4, TType.MAP, 'attributes', (TType.STRING,None,TType.STRING,None), None, ), # 4
)
def __init__(self, tableName=None, row=None, timestamp=None, attributes=None,):
self.tableName = tableName
self.row = row
self.timestamp = timestamp
self.attributes = attributes
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.row = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.timestamp = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.MAP:
self.attributes = {}
(_ktype141, _vtype142, _size140 ) = iprot.readMapBegin()
for _i144 in xrange(_size140):
_key145 = iprot.readString();
_val146 = iprot.readString();
self.attributes[_key145] = _val146
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getRowTs_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 2)
oprot.writeString(self.row)
oprot.writeFieldEnd()
if self.timestamp is not None:
oprot.writeFieldBegin('timestamp', TType.I64, 3)
oprot.writeI64(self.timestamp)
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 4)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter147,viter148 in self.attributes.items():
oprot.writeString(kiter147)
oprot.writeString(viter148)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getRowTs_result:
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(TRowResult, TRowResult.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype152, _size149) = iprot.readListBegin()
for _i153 in xrange(_size149):
_elem154 = TRowResult()
_elem154.read(iprot)
self.success.append(_elem154)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getRowTs_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter155 in self.success:
iter155.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getRowWithColumnsTs_args:
"""
Attributes:
- tableName: name of table
- row: row key
- columns: List of columns to return, null for all columns
- timestamp
- attributes: Get attributes
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.STRING, 'row', None, None, ), # 2
(3, TType.LIST, 'columns', (TType.STRING,None), None, ), # 3
(4, TType.I64, 'timestamp', None, None, ), # 4
(5, TType.MAP, 'attributes', (TType.STRING,None,TType.STRING,None), None, ), # 5
)
def __init__(self, tableName=None, row=None, columns=None, timestamp=None, attributes=None,):
self.tableName = tableName
self.row = row
self.columns = columns
self.timestamp = timestamp
self.attributes = attributes
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.row = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.columns = []
(_etype159, _size156) = iprot.readListBegin()
for _i160 in xrange(_size156):
_elem161 = iprot.readString();
self.columns.append(_elem161)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.timestamp = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.MAP:
self.attributes = {}
(_ktype163, _vtype164, _size162 ) = iprot.readMapBegin()
for _i166 in xrange(_size162):
_key167 = iprot.readString();
_val168 = iprot.readString();
self.attributes[_key167] = _val168
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getRowWithColumnsTs_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 2)
oprot.writeString(self.row)
oprot.writeFieldEnd()
if self.columns is not None:
oprot.writeFieldBegin('columns', TType.LIST, 3)
oprot.writeListBegin(TType.STRING, len(self.columns))
for iter169 in self.columns:
oprot.writeString(iter169)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.timestamp is not None:
oprot.writeFieldBegin('timestamp', TType.I64, 4)
oprot.writeI64(self.timestamp)
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 5)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter170,viter171 in self.attributes.items():
oprot.writeString(kiter170)
oprot.writeString(viter171)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getRowWithColumnsTs_result:
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(TRowResult, TRowResult.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype175, _size172) = iprot.readListBegin()
for _i176 in xrange(_size172):
_elem177 = TRowResult()
_elem177.read(iprot)
self.success.append(_elem177)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getRowWithColumnsTs_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter178 in self.success:
iter178.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getRows_args:
"""
Attributes:
- tableName: name of table
- rows: row keys
- attributes: Get attributes
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.LIST, 'rows', (TType.STRING,None), None, ), # 2
(3, TType.MAP, 'attributes', (TType.STRING,None,TType.STRING,None), None, ), # 3
)
def __init__(self, tableName=None, rows=None, attributes=None,):
self.tableName = tableName
self.rows = rows
self.attributes = attributes
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.rows = []
(_etype182, _size179) = iprot.readListBegin()
for _i183 in xrange(_size179):
_elem184 = iprot.readString();
self.rows.append(_elem184)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.MAP:
self.attributes = {}
(_ktype186, _vtype187, _size185 ) = iprot.readMapBegin()
for _i189 in xrange(_size185):
_key190 = iprot.readString();
_val191 = iprot.readString();
self.attributes[_key190] = _val191
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getRows_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.rows is not None:
oprot.writeFieldBegin('rows', TType.LIST, 2)
oprot.writeListBegin(TType.STRING, len(self.rows))
for iter192 in self.rows:
oprot.writeString(iter192)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 3)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter193,viter194 in self.attributes.items():
oprot.writeString(kiter193)
oprot.writeString(viter194)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getRows_result:
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(TRowResult, TRowResult.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype198, _size195) = iprot.readListBegin()
for _i199 in xrange(_size195):
_elem200 = TRowResult()
_elem200.read(iprot)
self.success.append(_elem200)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getRows_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter201 in self.success:
iter201.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getRowsWithColumns_args:
"""
Attributes:
- tableName: name of table
- rows: row keys
- columns: List of columns to return, null for all columns
- attributes: Get attributes
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.LIST, 'rows', (TType.STRING,None), None, ), # 2
(3, TType.LIST, 'columns', (TType.STRING,None), None, ), # 3
(4, TType.MAP, 'attributes', (TType.STRING,None,TType.STRING,None), None, ), # 4
)
def __init__(self, tableName=None, rows=None, columns=None, attributes=None,):
self.tableName = tableName
self.rows = rows
self.columns = columns
self.attributes = attributes
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.rows = []
(_etype205, _size202) = iprot.readListBegin()
for _i206 in xrange(_size202):
_elem207 = iprot.readString();
self.rows.append(_elem207)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.columns = []
(_etype211, _size208) = iprot.readListBegin()
for _i212 in xrange(_size208):
_elem213 = iprot.readString();
self.columns.append(_elem213)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.MAP:
self.attributes = {}
(_ktype215, _vtype216, _size214 ) = iprot.readMapBegin()
for _i218 in xrange(_size214):
_key219 = iprot.readString();
_val220 = iprot.readString();
self.attributes[_key219] = _val220
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getRowsWithColumns_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.rows is not None:
oprot.writeFieldBegin('rows', TType.LIST, 2)
oprot.writeListBegin(TType.STRING, len(self.rows))
for iter221 in self.rows:
oprot.writeString(iter221)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.columns is not None:
oprot.writeFieldBegin('columns', TType.LIST, 3)
oprot.writeListBegin(TType.STRING, len(self.columns))
for iter222 in self.columns:
oprot.writeString(iter222)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 4)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter223,viter224 in self.attributes.items():
oprot.writeString(kiter223)
oprot.writeString(viter224)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getRowsWithColumns_result:
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(TRowResult, TRowResult.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype228, _size225) = iprot.readListBegin()
for _i229 in xrange(_size225):
_elem230 = TRowResult()
_elem230.read(iprot)
self.success.append(_elem230)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getRowsWithColumns_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter231 in self.success:
iter231.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getRowsTs_args:
"""
Attributes:
- tableName: name of the table
- rows: row keys
- timestamp: timestamp
- attributes: Get attributes
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.LIST, 'rows', (TType.STRING,None), None, ), # 2
(3, TType.I64, 'timestamp', None, None, ), # 3
(4, TType.MAP, 'attributes', (TType.STRING,None,TType.STRING,None), None, ), # 4
)
def __init__(self, tableName=None, rows=None, timestamp=None, attributes=None,):
self.tableName = tableName
self.rows = rows
self.timestamp = timestamp
self.attributes = attributes
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.rows = []
(_etype235, _size232) = iprot.readListBegin()
for _i236 in xrange(_size232):
_elem237 = iprot.readString();
self.rows.append(_elem237)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.timestamp = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.MAP:
self.attributes = {}
(_ktype239, _vtype240, _size238 ) = iprot.readMapBegin()
for _i242 in xrange(_size238):
_key243 = iprot.readString();
_val244 = iprot.readString();
self.attributes[_key243] = _val244
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getRowsTs_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.rows is not None:
oprot.writeFieldBegin('rows', TType.LIST, 2)
oprot.writeListBegin(TType.STRING, len(self.rows))
for iter245 in self.rows:
oprot.writeString(iter245)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.timestamp is not None:
oprot.writeFieldBegin('timestamp', TType.I64, 3)
oprot.writeI64(self.timestamp)
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 4)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter246,viter247 in self.attributes.items():
oprot.writeString(kiter246)
oprot.writeString(viter247)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getRowsTs_result:
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(TRowResult, TRowResult.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype251, _size248) = iprot.readListBegin()
for _i252 in xrange(_size248):
_elem253 = TRowResult()
_elem253.read(iprot)
self.success.append(_elem253)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getRowsTs_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter254 in self.success:
iter254.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getRowsWithColumnsTs_args:
"""
Attributes:
- tableName: name of table
- rows: row keys
- columns: List of columns to return, null for all columns
- timestamp
- attributes: Get attributes
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.LIST, 'rows', (TType.STRING,None), None, ), # 2
(3, TType.LIST, 'columns', (TType.STRING,None), None, ), # 3
(4, TType.I64, 'timestamp', None, None, ), # 4
(5, TType.MAP, 'attributes', (TType.STRING,None,TType.STRING,None), None, ), # 5
)
def __init__(self, tableName=None, rows=None, columns=None, timestamp=None, attributes=None,):
self.tableName = tableName
self.rows = rows
self.columns = columns
self.timestamp = timestamp
self.attributes = attributes
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.rows = []
(_etype258, _size255) = iprot.readListBegin()
for _i259 in xrange(_size255):
_elem260 = iprot.readString();
self.rows.append(_elem260)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.columns = []
(_etype264, _size261) = iprot.readListBegin()
for _i265 in xrange(_size261):
_elem266 = iprot.readString();
self.columns.append(_elem266)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.timestamp = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.MAP:
self.attributes = {}
(_ktype268, _vtype269, _size267 ) = iprot.readMapBegin()
for _i271 in xrange(_size267):
_key272 = iprot.readString();
_val273 = iprot.readString();
self.attributes[_key272] = _val273
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getRowsWithColumnsTs_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.rows is not None:
oprot.writeFieldBegin('rows', TType.LIST, 2)
oprot.writeListBegin(TType.STRING, len(self.rows))
for iter274 in self.rows:
oprot.writeString(iter274)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.columns is not None:
oprot.writeFieldBegin('columns', TType.LIST, 3)
oprot.writeListBegin(TType.STRING, len(self.columns))
for iter275 in self.columns:
oprot.writeString(iter275)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.timestamp is not None:
oprot.writeFieldBegin('timestamp', TType.I64, 4)
oprot.writeI64(self.timestamp)
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 5)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter276,viter277 in self.attributes.items():
oprot.writeString(kiter276)
oprot.writeString(viter277)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getRowsWithColumnsTs_result:
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(TRowResult, TRowResult.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype281, _size278) = iprot.readListBegin()
for _i282 in xrange(_size278):
_elem283 = TRowResult()
_elem283.read(iprot)
self.success.append(_elem283)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getRowsWithColumnsTs_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter284 in self.success:
iter284.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class mutateRow_args:
"""
Attributes:
- tableName: name of table
- row: row key
- mutations: list of mutation commands
- attributes: Mutation attributes
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.STRING, 'row', None, None, ), # 2
(3, TType.LIST, 'mutations', (TType.STRUCT,(Mutation, Mutation.thrift_spec)), None, ), # 3
(4, TType.MAP, 'attributes', (TType.STRING,None,TType.STRING,None), None, ), # 4
)
def __init__(self, tableName=None, row=None, mutations=None, attributes=None,):
self.tableName = tableName
self.row = row
self.mutations = mutations
self.attributes = attributes
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.row = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.mutations = []
(_etype288, _size285) = iprot.readListBegin()
for _i289 in xrange(_size285):
_elem290 = Mutation()
_elem290.read(iprot)
self.mutations.append(_elem290)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.MAP:
self.attributes = {}
(_ktype292, _vtype293, _size291 ) = iprot.readMapBegin()
for _i295 in xrange(_size291):
_key296 = iprot.readString();
_val297 = iprot.readString();
self.attributes[_key296] = _val297
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('mutateRow_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 2)
oprot.writeString(self.row)
oprot.writeFieldEnd()
if self.mutations is not None:
oprot.writeFieldBegin('mutations', TType.LIST, 3)
oprot.writeListBegin(TType.STRUCT, len(self.mutations))
for iter298 in self.mutations:
iter298.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 4)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter299,viter300 in self.attributes.items():
oprot.writeString(kiter299)
oprot.writeString(viter300)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class mutateRow_result:
"""
Attributes:
- io
- ia
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ia', (IllegalArgument, IllegalArgument.thrift_spec), None, ), # 2
)
def __init__(self, io=None, ia=None,):
self.io = io
self.ia = ia
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ia = IllegalArgument()
self.ia.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('mutateRow_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
if self.ia is not None:
oprot.writeFieldBegin('ia', TType.STRUCT, 2)
self.ia.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class mutateRowTs_args:
"""
Attributes:
- tableName: name of table
- row: row key
- mutations: list of mutation commands
- timestamp: timestamp
- attributes: Mutation attributes
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.STRING, 'row', None, None, ), # 2
(3, TType.LIST, 'mutations', (TType.STRUCT,(Mutation, Mutation.thrift_spec)), None, ), # 3
(4, TType.I64, 'timestamp', None, None, ), # 4
(5, TType.MAP, 'attributes', (TType.STRING,None,TType.STRING,None), None, ), # 5
)
def __init__(self, tableName=None, row=None, mutations=None, timestamp=None, attributes=None,):
self.tableName = tableName
self.row = row
self.mutations = mutations
self.timestamp = timestamp
self.attributes = attributes
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.row = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.mutations = []
(_etype304, _size301) = iprot.readListBegin()
for _i305 in xrange(_size301):
_elem306 = Mutation()
_elem306.read(iprot)
self.mutations.append(_elem306)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.timestamp = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.MAP:
self.attributes = {}
(_ktype308, _vtype309, _size307 ) = iprot.readMapBegin()
for _i311 in xrange(_size307):
_key312 = iprot.readString();
_val313 = iprot.readString();
self.attributes[_key312] = _val313
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('mutateRowTs_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 2)
oprot.writeString(self.row)
oprot.writeFieldEnd()
if self.mutations is not None:
oprot.writeFieldBegin('mutations', TType.LIST, 3)
oprot.writeListBegin(TType.STRUCT, len(self.mutations))
for iter314 in self.mutations:
iter314.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.timestamp is not None:
oprot.writeFieldBegin('timestamp', TType.I64, 4)
oprot.writeI64(self.timestamp)
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 5)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter315,viter316 in self.attributes.items():
oprot.writeString(kiter315)
oprot.writeString(viter316)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class mutateRowTs_result:
"""
Attributes:
- io
- ia
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ia', (IllegalArgument, IllegalArgument.thrift_spec), None, ), # 2
)
def __init__(self, io=None, ia=None,):
self.io = io
self.ia = ia
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ia = IllegalArgument()
self.ia.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('mutateRowTs_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
if self.ia is not None:
oprot.writeFieldBegin('ia', TType.STRUCT, 2)
self.ia.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class mutateRows_args:
"""
Attributes:
- tableName: name of table
- rowBatches: list of row batches
- attributes: Mutation attributes
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.LIST, 'rowBatches', (TType.STRUCT,(BatchMutation, BatchMutation.thrift_spec)), None, ), # 2
(3, TType.MAP, 'attributes', (TType.STRING,None,TType.STRING,None), None, ), # 3
)
def __init__(self, tableName=None, rowBatches=None, attributes=None,):
self.tableName = tableName
self.rowBatches = rowBatches
self.attributes = attributes
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.rowBatches = []
(_etype320, _size317) = iprot.readListBegin()
for _i321 in xrange(_size317):
_elem322 = BatchMutation()
_elem322.read(iprot)
self.rowBatches.append(_elem322)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.MAP:
self.attributes = {}
(_ktype324, _vtype325, _size323 ) = iprot.readMapBegin()
for _i327 in xrange(_size323):
_key328 = iprot.readString();
_val329 = iprot.readString();
self.attributes[_key328] = _val329
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('mutateRows_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.rowBatches is not None:
oprot.writeFieldBegin('rowBatches', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.rowBatches))
for iter330 in self.rowBatches:
iter330.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 3)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter331,viter332 in self.attributes.items():
oprot.writeString(kiter331)
oprot.writeString(viter332)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class mutateRows_result:
"""
Attributes:
- io
- ia
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ia', (IllegalArgument, IllegalArgument.thrift_spec), None, ), # 2
)
def __init__(self, io=None, ia=None,):
self.io = io
self.ia = ia
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ia = IllegalArgument()
self.ia.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('mutateRows_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
if self.ia is not None:
oprot.writeFieldBegin('ia', TType.STRUCT, 2)
self.ia.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class mutateRowsTs_args:
"""
Attributes:
- tableName: name of table
- rowBatches: list of row batches
- timestamp: timestamp
- attributes: Mutation attributes
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.LIST, 'rowBatches', (TType.STRUCT,(BatchMutation, BatchMutation.thrift_spec)), None, ), # 2
(3, TType.I64, 'timestamp', None, None, ), # 3
(4, TType.MAP, 'attributes', (TType.STRING,None,TType.STRING,None), None, ), # 4
)
def __init__(self, tableName=None, rowBatches=None, timestamp=None, attributes=None,):
self.tableName = tableName
self.rowBatches = rowBatches
self.timestamp = timestamp
self.attributes = attributes
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.rowBatches = []
(_etype336, _size333) = iprot.readListBegin()
for _i337 in xrange(_size333):
_elem338 = BatchMutation()
_elem338.read(iprot)
self.rowBatches.append(_elem338)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.timestamp = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.MAP:
self.attributes = {}
(_ktype340, _vtype341, _size339 ) = iprot.readMapBegin()
for _i343 in xrange(_size339):
_key344 = iprot.readString();
_val345 = iprot.readString();
self.attributes[_key344] = _val345
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('mutateRowsTs_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.rowBatches is not None:
oprot.writeFieldBegin('rowBatches', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.rowBatches))
for iter346 in self.rowBatches:
iter346.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.timestamp is not None:
oprot.writeFieldBegin('timestamp', TType.I64, 3)
oprot.writeI64(self.timestamp)
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 4)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter347,viter348 in self.attributes.items():
oprot.writeString(kiter347)
oprot.writeString(viter348)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class mutateRowsTs_result:
"""
Attributes:
- io
- ia
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ia', (IllegalArgument, IllegalArgument.thrift_spec), None, ), # 2
)
def __init__(self, io=None, ia=None,):
self.io = io
self.ia = ia
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ia = IllegalArgument()
self.ia.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('mutateRowsTs_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
if self.ia is not None:
oprot.writeFieldBegin('ia', TType.STRUCT, 2)
self.ia.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class atomicIncrement_args:
"""
Attributes:
- tableName: name of table
- row: row to increment
- column: name of column
- value: amount to increment by
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.STRING, 'row', None, None, ), # 2
(3, TType.STRING, 'column', None, None, ), # 3
(4, TType.I64, 'value', None, None, ), # 4
)
def __init__(self, tableName=None, row=None, column=None, value=None,):
self.tableName = tableName
self.row = row
self.column = column
self.value = value
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.row = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.column = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.value = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('atomicIncrement_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 2)
oprot.writeString(self.row)
oprot.writeFieldEnd()
if self.column is not None:
oprot.writeFieldBegin('column', TType.STRING, 3)
oprot.writeString(self.column)
oprot.writeFieldEnd()
if self.value is not None:
oprot.writeFieldBegin('value', TType.I64, 4)
oprot.writeI64(self.value)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class atomicIncrement_result:
"""
Attributes:
- success
- io
- ia
"""
thrift_spec = (
(0, TType.I64, 'success', None, None, ), # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ia', (IllegalArgument, IllegalArgument.thrift_spec), None, ), # 2
)
def __init__(self, success=None, io=None, ia=None,):
self.success = success
self.io = io
self.ia = ia
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I64:
self.success = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ia = IllegalArgument()
self.ia.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('atomicIncrement_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I64, 0)
oprot.writeI64(self.success)
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
if self.ia is not None:
oprot.writeFieldBegin('ia', TType.STRUCT, 2)
self.ia.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class deleteAll_args:
"""
Attributes:
- tableName: name of table
- row: Row to update
- column: name of column whose value is to be deleted
- attributes: Delete attributes
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.STRING, 'row', None, None, ), # 2
(3, TType.STRING, 'column', None, None, ), # 3
(4, TType.MAP, 'attributes', (TType.STRING,None,TType.STRING,None), None, ), # 4
)
def __init__(self, tableName=None, row=None, column=None, attributes=None,):
self.tableName = tableName
self.row = row
self.column = column
self.attributes = attributes
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.row = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.column = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.MAP:
self.attributes = {}
(_ktype350, _vtype351, _size349 ) = iprot.readMapBegin()
for _i353 in xrange(_size349):
_key354 = iprot.readString();
_val355 = iprot.readString();
self.attributes[_key354] = _val355
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('deleteAll_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 2)
oprot.writeString(self.row)
oprot.writeFieldEnd()
if self.column is not None:
oprot.writeFieldBegin('column', TType.STRING, 3)
oprot.writeString(self.column)
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 4)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter356,viter357 in self.attributes.items():
oprot.writeString(kiter356)
oprot.writeString(viter357)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class deleteAll_result:
"""
Attributes:
- io
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, io=None,):
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('deleteAll_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class deleteAllTs_args:
"""
Attributes:
- tableName: name of table
- row: Row to update
- column: name of column whose value is to be deleted
- timestamp: timestamp
- attributes: Delete attributes
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.STRING, 'row', None, None, ), # 2
(3, TType.STRING, 'column', None, None, ), # 3
(4, TType.I64, 'timestamp', None, None, ), # 4
(5, TType.MAP, 'attributes', (TType.STRING,None,TType.STRING,None), None, ), # 5
)
def __init__(self, tableName=None, row=None, column=None, timestamp=None, attributes=None,):
self.tableName = tableName
self.row = row
self.column = column
self.timestamp = timestamp
self.attributes = attributes
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.row = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.column = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.timestamp = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.MAP:
self.attributes = {}
(_ktype359, _vtype360, _size358 ) = iprot.readMapBegin()
for _i362 in xrange(_size358):
_key363 = iprot.readString();
_val364 = iprot.readString();
self.attributes[_key363] = _val364
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('deleteAllTs_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 2)
oprot.writeString(self.row)
oprot.writeFieldEnd()
if self.column is not None:
oprot.writeFieldBegin('column', TType.STRING, 3)
oprot.writeString(self.column)
oprot.writeFieldEnd()
if self.timestamp is not None:
oprot.writeFieldBegin('timestamp', TType.I64, 4)
oprot.writeI64(self.timestamp)
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 5)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter365,viter366 in self.attributes.items():
oprot.writeString(kiter365)
oprot.writeString(viter366)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class deleteAllTs_result:
"""
Attributes:
- io
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, io=None,):
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('deleteAllTs_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class deleteAllRow_args:
"""
Attributes:
- tableName: name of table
- row: key of the row to be completely deleted.
- attributes: Delete attributes
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.STRING, 'row', None, None, ), # 2
(3, TType.MAP, 'attributes', (TType.STRING,None,TType.STRING,None), None, ), # 3
)
def __init__(self, tableName=None, row=None, attributes=None,):
self.tableName = tableName
self.row = row
self.attributes = attributes
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.row = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.MAP:
self.attributes = {}
(_ktype368, _vtype369, _size367 ) = iprot.readMapBegin()
for _i371 in xrange(_size367):
_key372 = iprot.readString();
_val373 = iprot.readString();
self.attributes[_key372] = _val373
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('deleteAllRow_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 2)
oprot.writeString(self.row)
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 3)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter374,viter375 in self.attributes.items():
oprot.writeString(kiter374)
oprot.writeString(viter375)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class deleteAllRow_result:
"""
Attributes:
- io
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, io=None,):
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('deleteAllRow_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class increment_args:
"""
Attributes:
- increment: The single increment to apply
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'increment', (TIncrement, TIncrement.thrift_spec), None, ), # 1
)
def __init__(self, increment=None,):
self.increment = increment
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.increment = TIncrement()
self.increment.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('increment_args')
if self.increment is not None:
oprot.writeFieldBegin('increment', TType.STRUCT, 1)
self.increment.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class increment_result:
"""
Attributes:
- io
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, io=None,):
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('increment_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class incrementRows_args:
"""
Attributes:
- increments: The list of increments
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'increments', (TType.STRUCT,(TIncrement, TIncrement.thrift_spec)), None, ), # 1
)
def __init__(self, increments=None,):
self.increments = increments
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.increments = []
(_etype379, _size376) = iprot.readListBegin()
for _i380 in xrange(_size376):
_elem381 = TIncrement()
_elem381.read(iprot)
self.increments.append(_elem381)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('incrementRows_args')
if self.increments is not None:
oprot.writeFieldBegin('increments', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.increments))
for iter382 in self.increments:
iter382.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class incrementRows_result:
"""
Attributes:
- io
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, io=None,):
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('incrementRows_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class deleteAllRowTs_args:
"""
Attributes:
- tableName: name of table
- row: key of the row to be completely deleted.
- timestamp: timestamp
- attributes: Delete attributes
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.STRING, 'row', None, None, ), # 2
(3, TType.I64, 'timestamp', None, None, ), # 3
(4, TType.MAP, 'attributes', (TType.STRING,None,TType.STRING,None), None, ), # 4
)
def __init__(self, tableName=None, row=None, timestamp=None, attributes=None,):
self.tableName = tableName
self.row = row
self.timestamp = timestamp
self.attributes = attributes
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.row = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.timestamp = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.MAP:
self.attributes = {}
(_ktype384, _vtype385, _size383 ) = iprot.readMapBegin()
for _i387 in xrange(_size383):
_key388 = iprot.readString();
_val389 = iprot.readString();
self.attributes[_key388] = _val389
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('deleteAllRowTs_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 2)
oprot.writeString(self.row)
oprot.writeFieldEnd()
if self.timestamp is not None:
oprot.writeFieldBegin('timestamp', TType.I64, 3)
oprot.writeI64(self.timestamp)
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 4)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter390,viter391 in self.attributes.items():
oprot.writeString(kiter390)
oprot.writeString(viter391)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class deleteAllRowTs_result:
"""
Attributes:
- io
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, io=None,):
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('deleteAllRowTs_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class scannerOpenWithScan_args:
"""
Attributes:
- tableName: name of table
- scan: Scan instance
- attributes: Scan attributes
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.STRUCT, 'scan', (TScan, TScan.thrift_spec), None, ), # 2
(3, TType.MAP, 'attributes', (TType.STRING,None,TType.STRING,None), None, ), # 3
)
def __init__(self, tableName=None, scan=None, attributes=None,):
self.tableName = tableName
self.scan = scan
self.attributes = attributes
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.scan = TScan()
self.scan.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.MAP:
self.attributes = {}
(_ktype393, _vtype394, _size392 ) = iprot.readMapBegin()
for _i396 in xrange(_size392):
_key397 = iprot.readString();
_val398 = iprot.readString();
self.attributes[_key397] = _val398
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('scannerOpenWithScan_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.scan is not None:
oprot.writeFieldBegin('scan', TType.STRUCT, 2)
self.scan.write(oprot)
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 3)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter399,viter400 in self.attributes.items():
oprot.writeString(kiter399)
oprot.writeString(viter400)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class scannerOpenWithScan_result:
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.I32, 'success', None, None, ), # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('scannerOpenWithScan_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class scannerOpen_args:
"""
Attributes:
- tableName: name of table
- startRow: Starting row in table to scan.
Send "" (empty string) to start at the first row.
- columns: columns to scan. If column name is a column family, all
columns of the specified column family are returned. It's also possible
to pass a regex in the column qualifier.
- attributes: Scan attributes
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.STRING, 'startRow', None, None, ), # 2
(3, TType.LIST, 'columns', (TType.STRING,None), None, ), # 3
(4, TType.MAP, 'attributes', (TType.STRING,None,TType.STRING,None), None, ), # 4
)
def __init__(self, tableName=None, startRow=None, columns=None, attributes=None,):
self.tableName = tableName
self.startRow = startRow
self.columns = columns
self.attributes = attributes
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.startRow = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.columns = []
(_etype404, _size401) = iprot.readListBegin()
for _i405 in xrange(_size401):
_elem406 = iprot.readString();
self.columns.append(_elem406)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.MAP:
self.attributes = {}
(_ktype408, _vtype409, _size407 ) = iprot.readMapBegin()
for _i411 in xrange(_size407):
_key412 = iprot.readString();
_val413 = iprot.readString();
self.attributes[_key412] = _val413
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('scannerOpen_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.startRow is not None:
oprot.writeFieldBegin('startRow', TType.STRING, 2)
oprot.writeString(self.startRow)
oprot.writeFieldEnd()
if self.columns is not None:
oprot.writeFieldBegin('columns', TType.LIST, 3)
oprot.writeListBegin(TType.STRING, len(self.columns))
for iter414 in self.columns:
oprot.writeString(iter414)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 4)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter415,viter416 in self.attributes.items():
oprot.writeString(kiter415)
oprot.writeString(viter416)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class scannerOpen_result:
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.I32, 'success', None, None, ), # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('scannerOpen_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class scannerOpenWithStop_args:
"""
Attributes:
- tableName: name of table
- startRow: Starting row in table to scan.
Send "" (empty string) to start at the first row.
- stopRow: row to stop scanning on. This row is *not* included in the
scanner's results
- columns: columns to scan. If column name is a column family, all
columns of the specified column family are returned. It's also possible
to pass a regex in the column qualifier.
- attributes: Scan attributes
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.STRING, 'startRow', None, None, ), # 2
(3, TType.STRING, 'stopRow', None, None, ), # 3
(4, TType.LIST, 'columns', (TType.STRING,None), None, ), # 4
(5, TType.MAP, 'attributes', (TType.STRING,None,TType.STRING,None), None, ), # 5
)
def __init__(self, tableName=None, startRow=None, stopRow=None, columns=None, attributes=None,):
self.tableName = tableName
self.startRow = startRow
self.stopRow = stopRow
self.columns = columns
self.attributes = attributes
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.startRow = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.stopRow = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.LIST:
self.columns = []
(_etype420, _size417) = iprot.readListBegin()
for _i421 in xrange(_size417):
_elem422 = iprot.readString();
self.columns.append(_elem422)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.MAP:
self.attributes = {}
(_ktype424, _vtype425, _size423 ) = iprot.readMapBegin()
for _i427 in xrange(_size423):
_key428 = iprot.readString();
_val429 = iprot.readString();
self.attributes[_key428] = _val429
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('scannerOpenWithStop_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.startRow is not None:
oprot.writeFieldBegin('startRow', TType.STRING, 2)
oprot.writeString(self.startRow)
oprot.writeFieldEnd()
if self.stopRow is not None:
oprot.writeFieldBegin('stopRow', TType.STRING, 3)
oprot.writeString(self.stopRow)
oprot.writeFieldEnd()
if self.columns is not None:
oprot.writeFieldBegin('columns', TType.LIST, 4)
oprot.writeListBegin(TType.STRING, len(self.columns))
for iter430 in self.columns:
oprot.writeString(iter430)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 5)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter431,viter432 in self.attributes.items():
oprot.writeString(kiter431)
oprot.writeString(viter432)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class scannerOpenWithStop_result:
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.I32, 'success', None, None, ), # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('scannerOpenWithStop_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class scannerOpenWithPrefix_args:
"""
Attributes:
- tableName: name of table
- startAndPrefix: the prefix (and thus start row) of the keys you want
- columns: the columns you want returned
- attributes: Scan attributes
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.STRING, 'startAndPrefix', None, None, ), # 2
(3, TType.LIST, 'columns', (TType.STRING,None), None, ), # 3
(4, TType.MAP, 'attributes', (TType.STRING,None,TType.STRING,None), None, ), # 4
)
def __init__(self, tableName=None, startAndPrefix=None, columns=None, attributes=None,):
self.tableName = tableName
self.startAndPrefix = startAndPrefix
self.columns = columns
self.attributes = attributes
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.startAndPrefix = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.columns = []
(_etype436, _size433) = iprot.readListBegin()
for _i437 in xrange(_size433):
_elem438 = iprot.readString();
self.columns.append(_elem438)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.MAP:
self.attributes = {}
(_ktype440, _vtype441, _size439 ) = iprot.readMapBegin()
for _i443 in xrange(_size439):
_key444 = iprot.readString();
_val445 = iprot.readString();
self.attributes[_key444] = _val445
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('scannerOpenWithPrefix_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.startAndPrefix is not None:
oprot.writeFieldBegin('startAndPrefix', TType.STRING, 2)
oprot.writeString(self.startAndPrefix)
oprot.writeFieldEnd()
if self.columns is not None:
oprot.writeFieldBegin('columns', TType.LIST, 3)
oprot.writeListBegin(TType.STRING, len(self.columns))
for iter446 in self.columns:
oprot.writeString(iter446)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 4)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter447,viter448 in self.attributes.items():
oprot.writeString(kiter447)
oprot.writeString(viter448)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class scannerOpenWithPrefix_result:
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.I32, 'success', None, None, ), # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('scannerOpenWithPrefix_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class scannerOpenTs_args:
"""
Attributes:
- tableName: name of table
- startRow: Starting row in table to scan.
Send "" (empty string) to start at the first row.
- columns: columns to scan. If column name is a column family, all
columns of the specified column family are returned. It's also possible
to pass a regex in the column qualifier.
- timestamp: timestamp
- attributes: Scan attributes
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.STRING, 'startRow', None, None, ), # 2
(3, TType.LIST, 'columns', (TType.STRING,None), None, ), # 3
(4, TType.I64, 'timestamp', None, None, ), # 4
(5, TType.MAP, 'attributes', (TType.STRING,None,TType.STRING,None), None, ), # 5
)
def __init__(self, tableName=None, startRow=None, columns=None, timestamp=None, attributes=None,):
self.tableName = tableName
self.startRow = startRow
self.columns = columns
self.timestamp = timestamp
self.attributes = attributes
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.startRow = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.columns = []
(_etype452, _size449) = iprot.readListBegin()
for _i453 in xrange(_size449):
_elem454 = iprot.readString();
self.columns.append(_elem454)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.timestamp = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.MAP:
self.attributes = {}
(_ktype456, _vtype457, _size455 ) = iprot.readMapBegin()
for _i459 in xrange(_size455):
_key460 = iprot.readString();
_val461 = iprot.readString();
self.attributes[_key460] = _val461
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('scannerOpenTs_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.startRow is not None:
oprot.writeFieldBegin('startRow', TType.STRING, 2)
oprot.writeString(self.startRow)
oprot.writeFieldEnd()
if self.columns is not None:
oprot.writeFieldBegin('columns', TType.LIST, 3)
oprot.writeListBegin(TType.STRING, len(self.columns))
for iter462 in self.columns:
oprot.writeString(iter462)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.timestamp is not None:
oprot.writeFieldBegin('timestamp', TType.I64, 4)
oprot.writeI64(self.timestamp)
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 5)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter463,viter464 in self.attributes.items():
oprot.writeString(kiter463)
oprot.writeString(viter464)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class scannerOpenTs_result:
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.I32, 'success', None, None, ), # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('scannerOpenTs_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class scannerOpenWithStopTs_args:
"""
Attributes:
- tableName: name of table
- startRow: Starting row in table to scan.
Send "" (empty string) to start at the first row.
- stopRow: row to stop scanning on. This row is *not* included in the
scanner's results
- columns: columns to scan. If column name is a column family, all
columns of the specified column family are returned. It's also possible
to pass a regex in the column qualifier.
- timestamp: timestamp
- attributes: Scan attributes
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.STRING, 'startRow', None, None, ), # 2
(3, TType.STRING, 'stopRow', None, None, ), # 3
(4, TType.LIST, 'columns', (TType.STRING,None), None, ), # 4
(5, TType.I64, 'timestamp', None, None, ), # 5
(6, TType.MAP, 'attributes', (TType.STRING,None,TType.STRING,None), None, ), # 6
)
def __init__(self, tableName=None, startRow=None, stopRow=None, columns=None, timestamp=None, attributes=None,):
self.tableName = tableName
self.startRow = startRow
self.stopRow = stopRow
self.columns = columns
self.timestamp = timestamp
self.attributes = attributes
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.startRow = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.stopRow = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.LIST:
self.columns = []
(_etype468, _size465) = iprot.readListBegin()
for _i469 in xrange(_size465):
_elem470 = iprot.readString();
self.columns.append(_elem470)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I64:
self.timestamp = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.MAP:
self.attributes = {}
(_ktype472, _vtype473, _size471 ) = iprot.readMapBegin()
for _i475 in xrange(_size471):
_key476 = iprot.readString();
_val477 = iprot.readString();
self.attributes[_key476] = _val477
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('scannerOpenWithStopTs_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.startRow is not None:
oprot.writeFieldBegin('startRow', TType.STRING, 2)
oprot.writeString(self.startRow)
oprot.writeFieldEnd()
if self.stopRow is not None:
oprot.writeFieldBegin('stopRow', TType.STRING, 3)
oprot.writeString(self.stopRow)
oprot.writeFieldEnd()
if self.columns is not None:
oprot.writeFieldBegin('columns', TType.LIST, 4)
oprot.writeListBegin(TType.STRING, len(self.columns))
for iter478 in self.columns:
oprot.writeString(iter478)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.timestamp is not None:
oprot.writeFieldBegin('timestamp', TType.I64, 5)
oprot.writeI64(self.timestamp)
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 6)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter479,viter480 in self.attributes.items():
oprot.writeString(kiter479)
oprot.writeString(viter480)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class scannerOpenWithStopTs_result:
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.I32, 'success', None, None, ), # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('scannerOpenWithStopTs_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class scannerGet_args:
"""
Attributes:
- id: id of a scanner returned by scannerOpen
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'id', None, None, ), # 1
)
def __init__(self, id=None,):
self.id = id
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.id = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('scannerGet_args')
if self.id is not None:
oprot.writeFieldBegin('id', TType.I32, 1)
oprot.writeI32(self.id)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class scannerGet_result:
"""
Attributes:
- success
- io
- ia
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(TRowResult, TRowResult.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ia', (IllegalArgument, IllegalArgument.thrift_spec), None, ), # 2
)
def __init__(self, success=None, io=None, ia=None,):
self.success = success
self.io = io
self.ia = ia
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype484, _size481) = iprot.readListBegin()
for _i485 in xrange(_size481):
_elem486 = TRowResult()
_elem486.read(iprot)
self.success.append(_elem486)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ia = IllegalArgument()
self.ia.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('scannerGet_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter487 in self.success:
iter487.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
if self.ia is not None:
oprot.writeFieldBegin('ia', TType.STRUCT, 2)
self.ia.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class scannerGetList_args:
"""
Attributes:
- id: id of a scanner returned by scannerOpen
- nbRows: number of results to return
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'id', None, None, ), # 1
(2, TType.I32, 'nbRows', None, None, ), # 2
)
def __init__(self, id=None, nbRows=None,):
self.id = id
self.nbRows = nbRows
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.id = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.nbRows = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('scannerGetList_args')
if self.id is not None:
oprot.writeFieldBegin('id', TType.I32, 1)
oprot.writeI32(self.id)
oprot.writeFieldEnd()
if self.nbRows is not None:
oprot.writeFieldBegin('nbRows', TType.I32, 2)
oprot.writeI32(self.nbRows)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class scannerGetList_result:
"""
Attributes:
- success
- io
- ia
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(TRowResult, TRowResult.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ia', (IllegalArgument, IllegalArgument.thrift_spec), None, ), # 2
)
def __init__(self, success=None, io=None, ia=None,):
self.success = success
self.io = io
self.ia = ia
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype491, _size488) = iprot.readListBegin()
for _i492 in xrange(_size488):
_elem493 = TRowResult()
_elem493.read(iprot)
self.success.append(_elem493)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ia = IllegalArgument()
self.ia.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('scannerGetList_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter494 in self.success:
iter494.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
if self.ia is not None:
oprot.writeFieldBegin('ia', TType.STRUCT, 2)
self.ia.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class scannerClose_args:
"""
Attributes:
- id: id of a scanner returned by scannerOpen
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'id', None, None, ), # 1
)
def __init__(self, id=None,):
self.id = id
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.id = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('scannerClose_args')
if self.id is not None:
oprot.writeFieldBegin('id', TType.I32, 1)
oprot.writeI32(self.id)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class scannerClose_result:
"""
Attributes:
- io
- ia
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ia', (IllegalArgument, IllegalArgument.thrift_spec), None, ), # 2
)
def __init__(self, io=None, ia=None,):
self.io = io
self.ia = ia
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ia = IllegalArgument()
self.ia.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('scannerClose_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
if self.ia is not None:
oprot.writeFieldBegin('ia', TType.STRUCT, 2)
self.ia.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getRowOrBefore_args:
"""
Attributes:
- tableName: name of table
- row: row key
- family: column name
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.STRING, 'row', None, None, ), # 2
(3, TType.STRING, 'family', None, None, ), # 3
)
def __init__(self, tableName=None, row=None, family=None,):
self.tableName = tableName
self.row = row
self.family = family
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.row = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.family = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getRowOrBefore_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 2)
oprot.writeString(self.row)
oprot.writeFieldEnd()
if self.family is not None:
oprot.writeFieldBegin('family', TType.STRING, 3)
oprot.writeString(self.family)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getRowOrBefore_result:
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(TCell, TCell.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype498, _size495) = iprot.readListBegin()
for _i499 in xrange(_size495):
_elem500 = TCell()
_elem500.read(iprot)
self.success.append(_elem500)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getRowOrBefore_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter501 in self.success:
iter501.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getRegionInfo_args:
"""
Attributes:
- row: row key
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'row', None, None, ), # 1
)
def __init__(self, row=None,):
self.row = row
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.row = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getRegionInfo_args')
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 1)
oprot.writeString(self.row)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getRegionInfo_result:
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (TRegionInfo, TRegionInfo.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = TRegionInfo()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getRegionInfo_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
|
Adel-Magebinary/odoo
|
refs/heads/8.0
|
addons/website_google_map/__init__.py
|
1350
|
import controllers
|
bigzz/linaro-aarch64
|
refs/heads/arm64-v3.10
|
tools/perf/scripts/python/sctop.py
|
11180
|
# system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
|
timeyyy/PyUpdater
|
refs/heads/master
|
pyupdater/vendor/PyInstaller/hooks/hook-tables.py
|
10
|
#-----------------------------------------------------------------------------
# Copyright (c) 2013, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
hiddenimports = ["tables._comp_lzo", "tables._comp_bzip2"]
|
ArianeFire/HaniCam
|
refs/heads/master
|
Hanicam/OpenCV/opencv-3.1.0/samples/python/hist.py
|
5
|
#!/usr/bin/env python
''' This is a sample for histogram plotting for RGB images and grayscale images for better understanding of colour distribution
Benefit : Learn how to draw histogram of images
Get familier with cv2.calcHist, cv2.equalizeHist,cv2.normalize and some drawing functions
Level : Beginner or Intermediate
Functions : 1) hist_curve : returns histogram of an image drawn as curves
2) hist_lines : return histogram of an image drawn as bins ( only for grayscale images )
Usage : python hist.py <image_file>
Abid Rahman 3/14/12 debug Gary Bradski
'''
# Python 2/3 compatibility
from __future__ import print_function
import cv2
import numpy as np
bins = np.arange(256).reshape(256,1)
def hist_curve(im):
h = np.zeros((300,256,3))
if len(im.shape) == 2:
color = [(255,255,255)]
elif im.shape[2] == 3:
color = [ (255,0,0),(0,255,0),(0,0,255) ]
for ch, col in enumerate(color):
hist_item = cv2.calcHist([im],[ch],None,[256],[0,256])
cv2.normalize(hist_item,hist_item,0,255,cv2.NORM_MINMAX)
hist=np.int32(np.around(hist_item))
pts = np.int32(np.column_stack((bins,hist)))
cv2.polylines(h,[pts],False,col)
y=np.flipud(h)
return y
def hist_lines(im):
h = np.zeros((300,256,3))
if len(im.shape)!=2:
print("hist_lines applicable only for grayscale images")
#print("so converting image to grayscale for representation"
im = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
hist_item = cv2.calcHist([im],[0],None,[256],[0,256])
cv2.normalize(hist_item,hist_item,0,255,cv2.NORM_MINMAX)
hist=np.int32(np.around(hist_item))
for x,y in enumerate(hist):
cv2.line(h,(x,0),(x,y),(255,255,255))
y = np.flipud(h)
return y
if __name__ == '__main__':
import sys
if len(sys.argv)>1:
fname = sys.argv[1]
else :
fname = '../data/lena.jpg'
print("usage : python hist.py <image_file>")
im = cv2.imread(fname)
if im is None:
print('Failed to load image file:', fname)
sys.exit(1)
gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
print(''' Histogram plotting \n
Keymap :\n
a - show histogram for color image in curve mode \n
b - show histogram in bin mode \n
c - show equalized histogram (always in bin mode) \n
d - show histogram for color image in curve mode \n
e - show histogram for a normalized image in curve mode \n
Esc - exit \n
''')
cv2.imshow('image',im)
while True:
k = cv2.waitKey(0)&0xFF
if k == ord('a'):
curve = hist_curve(im)
cv2.imshow('histogram',curve)
cv2.imshow('image',im)
print('a')
elif k == ord('b'):
print('b')
lines = hist_lines(im)
cv2.imshow('histogram',lines)
cv2.imshow('image',gray)
elif k == ord('c'):
print('c')
equ = cv2.equalizeHist(gray)
lines = hist_lines(equ)
cv2.imshow('histogram',lines)
cv2.imshow('image',equ)
elif k == ord('d'):
print('d')
curve = hist_curve(gray)
cv2.imshow('histogram',curve)
cv2.imshow('image',gray)
elif k == ord('e'):
print('e')
norm = cv2.normalize(gray, gray, alpha = 0,beta = 255,norm_type = cv2.NORM_MINMAX)
lines = hist_lines(norm)
cv2.imshow('histogram',lines)
cv2.imshow('image',norm)
elif k == 27:
print('ESC')
cv2.destroyAllWindows()
break
cv2.destroyAllWindows()
|
vmrob/needy
|
refs/heads/master
|
needy/sources/download.py
|
1
|
from __future__ import print_function
import io
import os
import binascii
import hashlib
import socket
import shutil
import sys
import tarfile
import tempfile
import time
import zipfile
import logging
try:
import urllib.request as urllib2
except ImportError:
import urllib2
from ..source import Source
class Download(Source):
def __init__(self, url, checksum, destination, cache_directory):
Source.__init__(self)
self.url = url
self.checksum = checksum
self.destination = destination
self.cache_directory = cache_directory
self.local_download_path = os.path.join(cache_directory, checksum)
@classmethod
def identifier(cls):
return 'download'
def clean(self):
if not self.checksum:
raise ValueError('checksums are required for downloads')
self.__fetch()
logging.info('Unpacking to %s' % self.destination)
self.__clean_destination_dir()
self.__unpack()
self.__trim_lone_dirs()
def __fetch(self):
if not os.path.exists(self.cache_directory):
os.makedirs(self.cache_directory)
if not os.path.isfile(self.local_download_path):
self.get(self.url, self.checksum, self.local_download_path)
@classmethod
def get(cls, url, checksum, destination):
logging.info('Downloading from %s' % url)
download = None
attempts = 0
download_successful = False
while not download_successful and attempts < 5:
try:
download = urllib2.urlopen(url, timeout=5)
except urllib2.URLError as e:
logging.warning(e)
except socket.timeout as e:
logging.warning(e)
attempts = attempts + 1
download_successful = download and download.code == 200 and 'content-length' in download.info()
if not download_successful:
logging.warning('Download failed. Retrying...')
time.sleep(attempts)
if not download_successful:
raise IOError('unable to download library')
size = int(download.info()['content-length'])
progress = 0
if sys.stdout.isatty():
print('{:.1%}'.format(float(progress) / size), end='')
sys.stdout.flush()
local_file = tempfile.NamedTemporaryFile('wb', delete=False)
try:
chunk_size = 1024
while True:
chunk = download.read(chunk_size)
progress = progress + chunk_size
if sys.stdout.isatty():
print('\r{:.1%}'.format(float(progress) / size), end='')
sys.stdout.flush()
if not chunk:
break
local_file.write(chunk)
local_file.close()
if sys.stdout.isatty():
print('\r \r', end='')
sys.stdout.flush()
logging.debug('Verifying checksum...')
if not cls.verify_checksum(local_file.name, checksum):
raise ValueError('incorrect checksum')
logging.debug('Checksum verified.')
shutil.move(local_file.name, destination)
except:
os.unlink(local_file.name)
raise
del download
@classmethod
def verify_checksum(cls, path, expected):
expected = binascii.unhexlify(expected)
with open(path, 'rb') as file:
file_contents = file.read()
hash = None
if len(expected) == hashlib.md5().digest_size:
hash = hashlib.md5()
elif len(expected) == hashlib.sha1().digest_size:
hash = hashlib.sha1()
else:
raise ValueError('unknown checksum type')
hash.update(file_contents)
return expected == hash.digest()
def __clean_destination_dir(self):
if os.path.exists(self.destination):
shutil.rmtree(self.destination)
os.makedirs(self.destination)
def __unpack(self):
if tarfile.is_tarfile(self.local_download_path):
self.__tarfile_unpack()
return
if zipfile.is_zipfile(self.local_download_path):
self.__zipfile_unpack()
return
def __tarfile_unpack(self):
with open(self.local_download_path, 'rb') as file:
tar = tarfile.open(fileobj=file, mode='r|*')
tar.extractall(self.destination if isinstance(self.destination, str) else self.destination.encode(sys.getfilesystemencoding()))
del tar
def __zipfile_unpack(self):
with zipfile.ZipFile(self.local_download_path, 'r') as file:
file.extractall(self.destination)
def __trim_lone_dirs(self):
temporary_directory = os.path.join(self.cache_directory, 'temp_')
while True:
destination_contents = os.listdir(self.destination)
if len(destination_contents) != 1:
break
lone_directory = os.path.join(self.destination, destination_contents[0])
if not os.path.isdir(lone_directory):
break
shutil.move(lone_directory, temporary_directory)
shutil.rmtree(self.destination)
shutil.move(temporary_directory, self.destination)
|
e-democracy/edem.group.member.invite.csv
|
refs/heads/master
|
edem/group/__init__.py
|
85
|
# See http://peak.telecommunity.com/DevCenter/setuptools#namespace-packages
try:
__import__('pkg_resources').declare_namespace(__name__)
except ImportError:
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
|
ilmanzo/scratch_extensions
|
refs/heads/master
|
venv/lib/python3.4/site-packages/pip/_vendor/html5lib/treeadapters/sax.py
|
1835
|
from __future__ import absolute_import, division, unicode_literals
from xml.sax.xmlreader import AttributesNSImpl
from ..constants import adjustForeignAttributes, unadjustForeignAttributes
prefix_mapping = {}
for prefix, localName, namespace in adjustForeignAttributes.values():
if prefix is not None:
prefix_mapping[prefix] = namespace
def to_sax(walker, handler):
"""Call SAX-like content handler based on treewalker walker"""
handler.startDocument()
for prefix, namespace in prefix_mapping.items():
handler.startPrefixMapping(prefix, namespace)
for token in walker:
type = token["type"]
if type == "Doctype":
continue
elif type in ("StartTag", "EmptyTag"):
attrs = AttributesNSImpl(token["data"],
unadjustForeignAttributes)
handler.startElementNS((token["namespace"], token["name"]),
token["name"],
attrs)
if type == "EmptyTag":
handler.endElementNS((token["namespace"], token["name"]),
token["name"])
elif type == "EndTag":
handler.endElementNS((token["namespace"], token["name"]),
token["name"])
elif type in ("Characters", "SpaceCharacters"):
handler.characters(token["data"])
elif type == "Comment":
pass
else:
assert False, "Unknown token type"
for prefix, namespace in prefix_mapping.items():
handler.endPrefixMapping(prefix)
handler.endDocument()
|
Forritarar-FS/Kastali
|
refs/heads/master
|
pythonHus/room44.py
|
1
|
from . import room
from tkinter import *
def do():
leid = room.grunnur(44)
print("Velkominn í herbergi 44.")
print("")
svar = input("Viltu fara í annað herbergi? [j/n] ").lower()
if(svar[0]=='j'):
print("")
herbVal = input("Í hvaða herbergi viltu fara: 43,34 eða 54? ")
if(herbVal=="43"):
leid.go("w")
elif(herbVal=="34"):
leid.go("n")
elif(herbVal=="54"):
print("Hurðinn er læst")
elif(svar[0]=='n'):
print("")
print("Flott, þú horfir í kringum þig og sérð ekkert nema myndir á veggjunum.")
print("Það er eitthvað skrítið við eina myndina svo þú labbar upp að henni og skoðar hana nánar")
print("")
cont = input("Ýttu á k til að halda áfram. ")
if(cont[0]=='k'):
master = Tk()
w = Canvas(master, width=200, height=100)
w.pack()
w.create_line(0, 0, 200, 100)
w.create_line(0, 100, 200, 0, fill="red", dash=(4, 4))
w.create_rectangle(50, 25, 150, 75, fill="blue")
mainloop()
print("")
cont2 = input("Ýttu á k til að halda áfram. ")
if(cont2[0]=='k'):
print("")
print("Farðu inn í annað herbergi!")
print("")
herbVal2 = input("Í hvaða herbergi viltu fara: 43,34 eða 54? ")
if(herbVal2=="43"):
leid.go("w")
elif(herbVal2=="34"):
leid.go("n")
elif(herbVal2=="54"):
print("Hurðinn er læst")
|
AlexAsh/pygame15
|
refs/heads/master
|
controller/mouse.py
|
1
|
"""Handling mouse application actions"""
class MouseController:
"""Handling mouse application actions: press, release and move"""
MOUSE_BUTTON_LEFT = 1
def __init__(self, models):
self.status = ""
self.models = models
def release(self, event):
"""Handle mouse button release"""
self.status = ("mouse button release " +
str(event.pos) + " " +
str(event.button))
if event.button == self.MOUSE_BUTTON_LEFT:
self.models["Field"].release()
return self.status
def press(self, event):
"""Handle mouse button press"""
self.status = ("mouse button press " +
str(event.pos) + " " +
str(event.button))
if event.button == self.MOUSE_BUTTON_LEFT:
self.models["Field"].freeze(map(float, event.pos))
return self.status
def move(self, event):
"""Handle mouse move"""
self.status = ("mouse move " +
str(event.pos) + " " +
str(event.rel) + " " +
str(event.buttons))
if event.buttons[0]:
self.models["Field"].manual_move(map(float, event.pos),
map(float, event.rel))
return self.status
|
thaim/ansible
|
refs/heads/fix-broken-link
|
lib/ansible/modules/network/aci/aci_bd_to_l3out.py
|
27
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: aci_bd_to_l3out
short_description: Bind Bridge Domain to L3 Out (fv:RsBDToOut)
description:
- Bind Bridge Domain to L3 Out on Cisco ACI fabrics.
version_added: '2.4'
options:
bd:
description:
- The name of the Bridge Domain.
type: str
aliases: [ bd_name, bridge_domain ]
l3out:
description:
- The name of the l3out to associate with th Bridge Domain.
type: str
tenant:
description:
- The name of the Tenant.
type: str
aliases: [ tenant_name ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
type: str
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
notes:
- The C(bd) and C(l3out) parameters should exist before using this module.
The M(aci_bd) and C(aci_l3out) can be used for these.
seealso:
- module: aci_bd
- module: aci_l3out
- name: APIC Management Information Model reference
description: More information about the internal APIC class B(fv:RsBDToOut).
link: https://developer.cisco.com/docs/apic-mim-ref/
author:
- Jacob McGill (@jmcgill298)
'''
EXAMPLES = r''' # '''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: str
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: str
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: str
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: str
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: str
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
SUBNET_CONTROL_MAPPING = dict(
nd_ra='nd',
no_gw='no-default-gateway',
querier_ip='querier',
unspecified='',
)
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
bd=dict(type='str', aliases=['bd_name', 'bridge_domain']), # Not required for querying all objects
l3out=dict(type='str'), # Not required for querying all objects
tenant=dict(type='str', aliases=['tenant_name']), # Not required for querying all objects
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_together=[['gateway', 'mask']],
required_if=[
['state', 'present', ['bd', 'l3out', 'tenant']],
['state', 'absent', ['bd', 'l3out', 'tenant']],
],
)
bd = module.params['bd']
l3out = module.params['l3out']
state = module.params['state']
tenant = module.params['tenant']
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='fvTenant',
aci_rn='tn-{0}'.format(tenant),
module_object=tenant,
target_filter={'name': tenant},
),
subclass_1=dict(
aci_class='fvBD',
aci_rn='BD-{0}'.format(bd),
module_object=bd,
target_filter={'name': bd},
),
subclass_2=dict(
aci_class='fvRsBDToOut',
aci_rn='rsBDToOut-{0}'.format(l3out),
module_object=l3out,
target_filter={'tnL3extOutName': l3out},
),
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='fvRsBDToOut',
class_config=dict(tnL3extOutName=l3out),
)
aci.get_diff(aci_class='fvRsBDToOut')
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
|
cogeorg/econlib
|
refs/heads/master
|
networkx/algorithms/tests/test_distance_measures.py
|
66
|
#!/usr/bin/env python
from nose.tools import *
import networkx
class TestDistance:
def setUp(self):
G=networkx.Graph()
from networkx import convert_node_labels_to_integers as cnlti
G=cnlti(networkx.grid_2d_graph(4,4),first_label=1,ordering="sorted")
self.G=G
def test_eccentricity(self):
assert_equal(networkx.eccentricity(self.G,1),6)
e=networkx.eccentricity(self.G)
assert_equal(e[1],6)
sp=networkx.shortest_path_length(self.G)
e=networkx.eccentricity(self.G,sp=sp)
assert_equal(e[1],6)
e=networkx.eccentricity(self.G,v=1)
assert_equal(e,6)
e=networkx.eccentricity(self.G,v=[1,1]) #This behavior changed in version 1.8 (ticket #739)
assert_equal(e[1],6)
e=networkx.eccentricity(self.G,v=[1,2])
assert_equal(e[1],6)
# test against graph with one node
G=networkx.path_graph(1)
e=networkx.eccentricity(G)
assert_equal(e[0],0)
e=networkx.eccentricity(G,v=0)
assert_equal(e,0)
assert_raises(networkx.NetworkXError, networkx.eccentricity, G, 1)
# test against empty graph
G=networkx.empty_graph()
e=networkx.eccentricity(G)
assert_equal(e,{})
def test_diameter(self):
assert_equal(networkx.diameter(self.G),6)
def test_radius(self):
assert_equal(networkx.radius(self.G),4)
def test_periphery(self):
assert_equal(set(networkx.periphery(self.G)),set([1, 4, 13, 16]))
def test_center(self):
assert_equal(set(networkx.center(self.G)),set([6, 7, 10, 11]))
def test_radius_exception(self):
G=networkx.Graph()
G.add_edge(1,2)
G.add_edge(3,4)
assert_raises(networkx.NetworkXError, networkx.diameter, G)
@raises(networkx.NetworkXError)
def test_eccentricity_infinite(self):
G=networkx.Graph([(1,2),(3,4)])
e = networkx.eccentricity(G)
@raises(networkx.NetworkXError)
def test_eccentricity_invalid(self):
G=networkx.Graph([(1,2),(3,4)])
e = networkx.eccentricity(G,sp=1)
|
tsl143/addons-server
|
refs/heads/master
|
src/olympia/migrations/626-override-outdated-jetpack-compat.py
|
9
|
from olympia import amo
from addons.models import Addon, CompatOverride, CompatOverrideRange
def run():
addons = (
Addon.objects
.filter(type=amo.ADDON_EXTENSION, appsupport__app=amo.FIREFOX.id,
_current_version__files__jetpack_version__isnull=False)
.exclude(_current_version__files__jetpack_version='1.14'))
# Fix invalid compat ranges from last migration
(CompatOverrideRange.objects.filter(
compat__addon__in=addons, type=1, app_id=amo.FIREFOX.id,
min_app_version='0', max_app_version='21.*', min_version='0')
.delete())
count = 0
for addon in addons:
co, created = CompatOverride.objects.get_or_create(addon=addon,
guid=addon.guid,
name=addon.name)
CompatOverrideRange.objects.create(
compat=co, type=1, app_id=amo.FIREFOX.id,
min_app_version='21.*', max_app_version='*',
min_version='0', max_version=addon.current_version.version)
count += 1
print('Overrode compatibility for %d SDK add-ons.' % count)
|
supriyantomaftuh/pip
|
refs/heads/develop
|
pip/_vendor/distlib/util.py
|
224
|
#
# Copyright (C) 2012-2014 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import codecs
from collections import deque
import contextlib
import csv
from glob import iglob as std_iglob
import io
import json
import logging
import os
import py_compile
import re
import shutil
import socket
import ssl
import subprocess
import sys
import tarfile
import tempfile
try:
import threading
except ImportError:
import dummy_threading as threading
import time
from . import DistlibException
from .compat import (string_types, text_type, shutil, raw_input, StringIO,
cache_from_source, urlopen, httplib, xmlrpclib, splittype,
HTTPHandler, HTTPSHandler as BaseHTTPSHandler,
BaseConfigurator, valid_ident, Container, configparser,
URLError, match_hostname, CertificateError, ZipFile)
logger = logging.getLogger(__name__)
#
# Requirement parsing code for name + optional constraints + optional extras
#
# e.g. 'foo >= 1.2, < 2.0 [bar, baz]'
#
# The regex can seem a bit hairy, so we build it up out of smaller pieces
# which are manageable.
#
COMMA = r'\s*,\s*'
COMMA_RE = re.compile(COMMA)
IDENT = r'(\w|[.-])+'
EXTRA_IDENT = r'(\*|:(\*|\w+):|' + IDENT + ')'
VERSPEC = IDENT + r'\*?'
RELOP = '([<>=!~]=)|[<>]'
#
# The first relop is optional - if absent, will be taken as '~='
#
BARE_CONSTRAINTS = ('(' + RELOP + r')?\s*(' + VERSPEC + ')(' + COMMA + '(' +
RELOP + r')\s*(' + VERSPEC + '))*')
DIRECT_REF = '(from\s+(?P<diref>.*))'
#
# Either the bare constraints or the bare constraints in parentheses
#
CONSTRAINTS = (r'\(\s*(?P<c1>' + BARE_CONSTRAINTS + '|' + DIRECT_REF +
r')\s*\)|(?P<c2>' + BARE_CONSTRAINTS + '\s*)')
EXTRA_LIST = EXTRA_IDENT + '(' + COMMA + EXTRA_IDENT + ')*'
EXTRAS = r'\[\s*(?P<ex>' + EXTRA_LIST + r')?\s*\]'
REQUIREMENT = ('(?P<dn>' + IDENT + r')\s*(' + EXTRAS + r'\s*)?(\s*' +
CONSTRAINTS + ')?$')
REQUIREMENT_RE = re.compile(REQUIREMENT)
#
# Used to scan through the constraints
#
RELOP_IDENT = '(?P<op>' + RELOP + r')\s*(?P<vn>' + VERSPEC + ')'
RELOP_IDENT_RE = re.compile(RELOP_IDENT)
def parse_requirement(s):
def get_constraint(m):
d = m.groupdict()
return d['op'], d['vn']
result = None
m = REQUIREMENT_RE.match(s)
if m:
d = m.groupdict()
name = d['dn']
cons = d['c1'] or d['c2']
if not d['diref']:
url = None
else:
# direct reference
cons = None
url = d['diref'].strip()
if not cons:
cons = None
constr = ''
rs = d['dn']
else:
if cons[0] not in '<>!=':
cons = '~=' + cons
iterator = RELOP_IDENT_RE.finditer(cons)
cons = [get_constraint(m) for m in iterator]
rs = '%s (%s)' % (name, ', '.join(['%s %s' % con for con in cons]))
if not d['ex']:
extras = None
else:
extras = COMMA_RE.split(d['ex'])
result = Container(name=name, constraints=cons, extras=extras,
requirement=rs, source=s, url=url)
return result
def get_resources_dests(resources_root, rules):
"""Find destinations for resources files"""
def get_rel_path(base, path):
# normalizes and returns a lstripped-/-separated path
base = base.replace(os.path.sep, '/')
path = path.replace(os.path.sep, '/')
assert path.startswith(base)
return path[len(base):].lstrip('/')
destinations = {}
for base, suffix, dest in rules:
prefix = os.path.join(resources_root, base)
for abs_base in iglob(prefix):
abs_glob = os.path.join(abs_base, suffix)
for abs_path in iglob(abs_glob):
resource_file = get_rel_path(resources_root, abs_path)
if dest is None: # remove the entry if it was here
destinations.pop(resource_file, None)
else:
rel_path = get_rel_path(abs_base, abs_path)
rel_dest = dest.replace(os.path.sep, '/').rstrip('/')
destinations[resource_file] = rel_dest + '/' + rel_path
return destinations
def in_venv():
if hasattr(sys, 'real_prefix'):
# virtualenv venvs
result = True
else:
# PEP 405 venvs
result = sys.prefix != getattr(sys, 'base_prefix', sys.prefix)
return result
def get_executable():
# The __PYVENV_LAUNCHER__ dance is apparently no longer needed, as
# changes to the stub launcher mean that sys.executable always points
# to the stub on OS X
# if sys.platform == 'darwin' and ('__PYVENV_LAUNCHER__'
# in os.environ):
# result = os.environ['__PYVENV_LAUNCHER__']
# else:
# result = sys.executable
# return result
return os.path.normcase(sys.executable)
def proceed(prompt, allowed_chars, error_prompt=None, default=None):
p = prompt
while True:
s = raw_input(p)
p = prompt
if not s and default:
s = default
if s:
c = s[0].lower()
if c in allowed_chars:
break
if error_prompt:
p = '%c: %s\n%s' % (c, error_prompt, prompt)
return c
def extract_by_key(d, keys):
if isinstance(keys, string_types):
keys = keys.split()
result = {}
for key in keys:
if key in d:
result[key] = d[key]
return result
def read_exports(stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader('utf-8')(stream)
# Try to load as JSON, falling back on legacy format
data = stream.read()
stream = StringIO(data)
try:
data = json.load(stream)
result = data['extensions']['python.exports']['exports']
for group, entries in result.items():
for k, v in entries.items():
s = '%s = %s' % (k, v)
entry = get_export_entry(s)
assert entry is not None
entries[k] = entry
return result
except Exception:
stream.seek(0, 0)
cp = configparser.ConfigParser()
if hasattr(cp, 'read_file'):
cp.read_file(stream)
else:
cp.readfp(stream)
result = {}
for key in cp.sections():
result[key] = entries = {}
for name, value in cp.items(key):
s = '%s = %s' % (name, value)
entry = get_export_entry(s)
assert entry is not None
#entry.dist = self
entries[name] = entry
return result
def write_exports(exports, stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getwriter('utf-8')(stream)
cp = configparser.ConfigParser()
for k, v in exports.items():
# TODO check k, v for valid values
cp.add_section(k)
for entry in v.values():
if entry.suffix is None:
s = entry.prefix
else:
s = '%s:%s' % (entry.prefix, entry.suffix)
if entry.flags:
s = '%s [%s]' % (s, ', '.join(entry.flags))
cp.set(k, entry.name, s)
cp.write(stream)
@contextlib.contextmanager
def tempdir():
td = tempfile.mkdtemp()
try:
yield td
finally:
shutil.rmtree(td)
@contextlib.contextmanager
def chdir(d):
cwd = os.getcwd()
try:
os.chdir(d)
yield
finally:
os.chdir(cwd)
@contextlib.contextmanager
def socket_timeout(seconds=15):
cto = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(seconds)
yield
finally:
socket.setdefaulttimeout(cto)
class cached_property(object):
def __init__(self, func):
self.func = func
#for attr in ('__name__', '__module__', '__doc__'):
# setattr(self, attr, getattr(func, attr, None))
def __get__(self, obj, cls=None):
if obj is None:
return self
value = self.func(obj)
object.__setattr__(obj, self.func.__name__, value)
#obj.__dict__[self.func.__name__] = value = self.func(obj)
return value
def convert_path(pathname):
"""Return 'pathname' as a name that will work on the native filesystem.
The path is split on '/' and put back together again using the current
directory separator. Needed because filenames in the setup script are
always supplied in Unix style, and have to be converted to the local
convention before we can actually use them in the filesystem. Raises
ValueError on non-Unix-ish systems if 'pathname' either starts or
ends with a slash.
"""
if os.sep == '/':
return pathname
if not pathname:
return pathname
if pathname[0] == '/':
raise ValueError("path '%s' cannot be absolute" % pathname)
if pathname[-1] == '/':
raise ValueError("path '%s' cannot end with '/'" % pathname)
paths = pathname.split('/')
while os.curdir in paths:
paths.remove(os.curdir)
if not paths:
return os.curdir
return os.path.join(*paths)
class FileOperator(object):
def __init__(self, dry_run=False):
self.dry_run = dry_run
self.ensured = set()
self._init_record()
def _init_record(self):
self.record = False
self.files_written = set()
self.dirs_created = set()
def record_as_written(self, path):
if self.record:
self.files_written.add(path)
def newer(self, source, target):
"""Tell if the target is newer than the source.
Returns true if 'source' exists and is more recently modified than
'target', or if 'source' exists and 'target' doesn't.
Returns false if both exist and 'target' is the same age or younger
than 'source'. Raise PackagingFileError if 'source' does not exist.
Note that this test is not very accurate: files created in the same
second will have the same "age".
"""
if not os.path.exists(source):
raise DistlibException("file '%r' does not exist" %
os.path.abspath(source))
if not os.path.exists(target):
return True
return os.stat(source).st_mtime > os.stat(target).st_mtime
def copy_file(self, infile, outfile, check=True):
"""Copy a file respecting dry-run and force flags.
"""
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying %s to %s', infile, outfile)
if not self.dry_run:
msg = None
if check:
if os.path.islink(outfile):
msg = '%s is a symlink' % outfile
elif os.path.exists(outfile) and not os.path.isfile(outfile):
msg = '%s is a non-regular file' % outfile
if msg:
raise ValueError(msg + ' which would be overwritten')
shutil.copyfile(infile, outfile)
self.record_as_written(outfile)
def copy_stream(self, instream, outfile, encoding=None):
assert not os.path.isdir(outfile)
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying stream %s to %s', instream, outfile)
if not self.dry_run:
if encoding is None:
outstream = open(outfile, 'wb')
else:
outstream = codecs.open(outfile, 'w', encoding=encoding)
try:
shutil.copyfileobj(instream, outstream)
finally:
outstream.close()
self.record_as_written(outfile)
def write_binary_file(self, path, data):
self.ensure_dir(os.path.dirname(path))
if not self.dry_run:
with open(path, 'wb') as f:
f.write(data)
self.record_as_written(path)
def write_text_file(self, path, data, encoding):
self.ensure_dir(os.path.dirname(path))
if not self.dry_run:
with open(path, 'wb') as f:
f.write(data.encode(encoding))
self.record_as_written(path)
def set_mode(self, bits, mask, files):
if os.name == 'posix' or (os.name == 'java' and os._name == 'posix'):
# Set the executable bits (owner, group, and world) on
# all the files specified.
for f in files:
if self.dry_run:
logger.info("changing mode of %s", f)
else:
mode = (os.stat(f).st_mode | bits) & mask
logger.info("changing mode of %s to %o", f, mode)
os.chmod(f, mode)
set_executable_mode = lambda s, f: s.set_mode(0o555, 0o7777, f)
def ensure_dir(self, path):
path = os.path.abspath(path)
if path not in self.ensured and not os.path.exists(path):
self.ensured.add(path)
d, f = os.path.split(path)
self.ensure_dir(d)
logger.info('Creating %s' % path)
if not self.dry_run:
os.mkdir(path)
if self.record:
self.dirs_created.add(path)
def byte_compile(self, path, optimize=False, force=False, prefix=None):
dpath = cache_from_source(path, not optimize)
logger.info('Byte-compiling %s to %s', path, dpath)
if not self.dry_run:
if force or self.newer(path, dpath):
if not prefix:
diagpath = None
else:
assert path.startswith(prefix)
diagpath = path[len(prefix):]
py_compile.compile(path, dpath, diagpath, True) # raise error
self.record_as_written(dpath)
return dpath
def ensure_removed(self, path):
if os.path.exists(path):
if os.path.isdir(path) and not os.path.islink(path):
logger.debug('Removing directory tree at %s', path)
if not self.dry_run:
shutil.rmtree(path)
if self.record:
if path in self.dirs_created:
self.dirs_created.remove(path)
else:
if os.path.islink(path):
s = 'link'
else:
s = 'file'
logger.debug('Removing %s %s', s, path)
if not self.dry_run:
os.remove(path)
if self.record:
if path in self.files_written:
self.files_written.remove(path)
def is_writable(self, path):
result = False
while not result:
if os.path.exists(path):
result = os.access(path, os.W_OK)
break
parent = os.path.dirname(path)
if parent == path:
break
path = parent
return result
def commit(self):
"""
Commit recorded changes, turn off recording, return
changes.
"""
assert self.record
result = self.files_written, self.dirs_created
self._init_record()
return result
def rollback(self):
if not self.dry_run:
for f in list(self.files_written):
if os.path.exists(f):
os.remove(f)
# dirs should all be empty now, except perhaps for
# __pycache__ subdirs
# reverse so that subdirs appear before their parents
dirs = sorted(self.dirs_created, reverse=True)
for d in dirs:
flist = os.listdir(d)
if flist:
assert flist == ['__pycache__']
sd = os.path.join(d, flist[0])
os.rmdir(sd)
os.rmdir(d) # should fail if non-empty
self._init_record()
def resolve(module_name, dotted_path):
if module_name in sys.modules:
mod = sys.modules[module_name]
else:
mod = __import__(module_name)
if dotted_path is None:
result = mod
else:
parts = dotted_path.split('.')
result = getattr(mod, parts.pop(0))
for p in parts:
result = getattr(result, p)
return result
class ExportEntry(object):
def __init__(self, name, prefix, suffix, flags):
self.name = name
self.prefix = prefix
self.suffix = suffix
self.flags = flags
@cached_property
def value(self):
return resolve(self.prefix, self.suffix)
def __repr__(self):
return '<ExportEntry %s = %s:%s %s>' % (self.name, self.prefix,
self.suffix, self.flags)
def __eq__(self, other):
if not isinstance(other, ExportEntry):
result = False
else:
result = (self.name == other.name and
self.prefix == other.prefix and
self.suffix == other.suffix and
self.flags == other.flags)
return result
__hash__ = object.__hash__
ENTRY_RE = re.compile(r'''(?P<name>(\w|[-.])+)
\s*=\s*(?P<callable>(\w+)([:\.]\w+)*)
\s*(\[\s*(?P<flags>\w+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])?
''', re.VERBOSE)
def get_export_entry(specification):
m = ENTRY_RE.search(specification)
if not m:
result = None
if '[' in specification or ']' in specification:
raise DistlibException('Invalid specification '
'%r' % specification)
else:
d = m.groupdict()
name = d['name']
path = d['callable']
colons = path.count(':')
if colons == 0:
prefix, suffix = path, None
else:
if colons != 1:
raise DistlibException('Invalid specification '
'%r' % specification)
prefix, suffix = path.split(':')
flags = d['flags']
if flags is None:
if '[' in specification or ']' in specification:
raise DistlibException('Invalid specification '
'%r' % specification)
flags = []
else:
flags = [f.strip() for f in flags.split(',')]
result = ExportEntry(name, prefix, suffix, flags)
return result
def get_cache_base(suffix=None):
"""
Return the default base location for distlib caches. If the directory does
not exist, it is created. Use the suffix provided for the base directory,
and default to '.distlib' if it isn't provided.
On Windows, if LOCALAPPDATA is defined in the environment, then it is
assumed to be a directory, and will be the parent directory of the result.
On POSIX, and on Windows if LOCALAPPDATA is not defined, the user's home
directory - using os.expanduser('~') - will be the parent directory of
the result.
The result is just the directory '.distlib' in the parent directory as
determined above, or with the name specified with ``suffix``.
"""
if suffix is None:
suffix = '.distlib'
if os.name == 'nt' and 'LOCALAPPDATA' in os.environ:
result = os.path.expandvars('$localappdata')
else:
# Assume posix, or old Windows
result = os.path.expanduser('~')
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if os.path.isdir(result):
usable = os.access(result, os.W_OK)
if not usable:
logger.warning('Directory exists but is not writable: %s', result)
else:
try:
os.makedirs(result)
usable = True
except OSError:
logger.warning('Unable to create %s', result, exc_info=True)
usable = False
if not usable:
result = tempfile.mkdtemp()
logger.warning('Default location unusable, using %s', result)
return os.path.join(result, suffix)
def path_to_cache_dir(path):
"""
Convert an absolute path to a directory name for use in a cache.
The algorithm used is:
#. On Windows, any ``':'`` in the drive is replaced with ``'---'``.
#. Any occurrence of ``os.sep`` is replaced with ``'--'``.
#. ``'.cache'`` is appended.
"""
d, p = os.path.splitdrive(os.path.abspath(path))
if d:
d = d.replace(':', '---')
p = p.replace(os.sep, '--')
return d + p + '.cache'
def ensure_slash(s):
if not s.endswith('/'):
return s + '/'
return s
def parse_credentials(netloc):
username = password = None
if '@' in netloc:
prefix, netloc = netloc.split('@', 1)
if ':' not in prefix:
username = prefix
else:
username, password = prefix.split(':', 1)
return username, password, netloc
def get_process_umask():
result = os.umask(0o22)
os.umask(result)
return result
def is_string_sequence(seq):
result = True
i = None
for i, s in enumerate(seq):
if not isinstance(s, string_types):
result = False
break
assert i is not None
return result
PROJECT_NAME_AND_VERSION = re.compile('([a-z0-9_]+([.-][a-z_][a-z0-9_]*)*)-'
'([a-z0-9_.+-]+)', re.I)
PYTHON_VERSION = re.compile(r'-py(\d\.?\d?)')
def split_filename(filename, project_name=None):
"""
Extract name, version, python version from a filename (no extension)
Return name, version, pyver or None
"""
result = None
pyver = None
m = PYTHON_VERSION.search(filename)
if m:
pyver = m.group(1)
filename = filename[:m.start()]
if project_name and len(filename) > len(project_name) + 1:
m = re.match(re.escape(project_name) + r'\b', filename)
if m:
n = m.end()
result = filename[:n], filename[n + 1:], pyver
if result is None:
m = PROJECT_NAME_AND_VERSION.match(filename)
if m:
result = m.group(1), m.group(3), pyver
return result
# Allow spaces in name because of legacy dists like "Twisted Core"
NAME_VERSION_RE = re.compile(r'(?P<name>[\w .-]+)\s*'
r'\(\s*(?P<ver>[^\s)]+)\)$')
def parse_name_and_version(p):
"""
A utility method used to get name and version from a string.
From e.g. a Provides-Dist value.
:param p: A value in a form 'foo (1.0)'
:return: The name and version as a tuple.
"""
m = NAME_VERSION_RE.match(p)
if not m:
raise DistlibException('Ill-formed name/version string: \'%s\'' % p)
d = m.groupdict()
return d['name'].strip().lower(), d['ver']
def get_extras(requested, available):
result = set()
requested = set(requested or [])
available = set(available or [])
if '*' in requested:
requested.remove('*')
result |= available
for r in requested:
if r == '-':
result.add(r)
elif r.startswith('-'):
unwanted = r[1:]
if unwanted not in available:
logger.warning('undeclared extra: %s' % unwanted)
if unwanted in result:
result.remove(unwanted)
else:
if r not in available:
logger.warning('undeclared extra: %s' % r)
result.add(r)
return result
#
# Extended metadata functionality
#
def _get_external_data(url):
result = {}
try:
# urlopen might fail if it runs into redirections,
# because of Python issue #13696. Fixed in locators
# using a custom redirect handler.
resp = urlopen(url)
headers = resp.info()
if headers.get('Content-Type') != 'application/json':
logger.debug('Unexpected response for JSON request')
else:
reader = codecs.getreader('utf-8')(resp)
#data = reader.read().decode('utf-8')
#result = json.loads(data)
result = json.load(reader)
except Exception as e:
logger.exception('Failed to get external data for %s: %s', url, e)
return result
def get_project_data(name):
url = ('https://www.red-dove.com/pypi/projects/'
'%s/%s/project.json' % (name[0].upper(), name))
result = _get_external_data(url)
return result
def get_package_data(name, version):
url = ('https://www.red-dove.com/pypi/projects/'
'%s/%s/package-%s.json' % (name[0].upper(), name, version))
return _get_external_data(url)
class Cache(object):
"""
A class implementing a cache for resources that need to live in the file system
e.g. shared libraries. This class was moved from resources to here because it
could be used by other modules, e.g. the wheel module.
"""
def __init__(self, base):
"""
Initialise an instance.
:param base: The base directory where the cache should be located.
"""
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if not os.path.isdir(base):
os.makedirs(base)
if (os.stat(base).st_mode & 0o77) != 0:
logger.warning('Directory \'%s\' is not private', base)
self.base = os.path.abspath(os.path.normpath(base))
def prefix_to_dir(self, prefix):
"""
Converts a resource prefix to a directory name in the cache.
"""
return path_to_cache_dir(prefix)
def clear(self):
"""
Clear the cache.
"""
not_removed = []
for fn in os.listdir(self.base):
fn = os.path.join(self.base, fn)
try:
if os.path.islink(fn) or os.path.isfile(fn):
os.remove(fn)
elif os.path.isdir(fn):
shutil.rmtree(fn)
except Exception:
not_removed.append(fn)
return not_removed
class EventMixin(object):
"""
A very simple publish/subscribe system.
"""
def __init__(self):
self._subscribers = {}
def add(self, event, subscriber, append=True):
"""
Add a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be added (and called when the
event is published).
:param append: Whether to append or prepend the subscriber to an
existing subscriber list for the event.
"""
subs = self._subscribers
if event not in subs:
subs[event] = deque([subscriber])
else:
sq = subs[event]
if append:
sq.append(subscriber)
else:
sq.appendleft(subscriber)
def remove(self, event, subscriber):
"""
Remove a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be removed.
"""
subs = self._subscribers
if event not in subs:
raise ValueError('No subscribers: %r' % event)
subs[event].remove(subscriber)
def get_subscribers(self, event):
"""
Return an iterator for the subscribers for an event.
:param event: The event to return subscribers for.
"""
return iter(self._subscribers.get(event, ()))
def publish(self, event, *args, **kwargs):
"""
Publish a event and return a list of values returned by its
subscribers.
:param event: The event to publish.
:param args: The positional arguments to pass to the event's
subscribers.
:param kwargs: The keyword arguments to pass to the event's
subscribers.
"""
result = []
for subscriber in self.get_subscribers(event):
try:
value = subscriber(event, *args, **kwargs)
except Exception:
logger.exception('Exception during event publication')
value = None
result.append(value)
logger.debug('publish %s: args = %s, kwargs = %s, result = %s',
event, args, kwargs, result)
return result
#
# Simple sequencing
#
class Sequencer(object):
def __init__(self):
self._preds = {}
self._succs = {}
self._nodes = set() # nodes with no preds/succs
def add_node(self, node):
self._nodes.add(node)
def remove_node(self, node, edges=False):
if node in self._nodes:
self._nodes.remove(node)
if edges:
for p in set(self._preds.get(node, ())):
self.remove(p, node)
for s in set(self._succs.get(node, ())):
self.remove(node, s)
# Remove empties
for k, v in list(self._preds.items()):
if not v:
del self._preds[k]
for k, v in list(self._succs.items()):
if not v:
del self._succs[k]
def add(self, pred, succ):
assert pred != succ
self._preds.setdefault(succ, set()).add(pred)
self._succs.setdefault(pred, set()).add(succ)
def remove(self, pred, succ):
assert pred != succ
try:
preds = self._preds[succ]
succs = self._succs[pred]
except KeyError:
raise ValueError('%r not a successor of anything' % succ)
try:
preds.remove(pred)
succs.remove(succ)
except KeyError:
raise ValueError('%r not a successor of %r' % (succ, pred))
def is_step(self, step):
return (step in self._preds or step in self._succs or
step in self._nodes)
def get_steps(self, final):
if not self.is_step(final):
raise ValueError('Unknown: %r' % final)
result = []
todo = []
seen = set()
todo.append(final)
while todo:
step = todo.pop(0)
if step in seen:
# if a step was already seen,
# move it to the end (so it will appear earlier
# when reversed on return) ... but not for the
# final step, as that would be confusing for
# users
if step != final:
result.remove(step)
result.append(step)
else:
seen.add(step)
result.append(step)
preds = self._preds.get(step, ())
todo.extend(preds)
return reversed(result)
@property
def strong_connections(self):
#http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
index_counter = [0]
stack = []
lowlinks = {}
index = {}
result = []
graph = self._succs
def strongconnect(node):
# set the depth index for this node to the smallest unused index
index[node] = index_counter[0]
lowlinks[node] = index_counter[0]
index_counter[0] += 1
stack.append(node)
# Consider successors
try:
successors = graph[node]
except Exception:
successors = []
for successor in successors:
if successor not in lowlinks:
# Successor has not yet been visited
strongconnect(successor)
lowlinks[node] = min(lowlinks[node],lowlinks[successor])
elif successor in stack:
# the successor is in the stack and hence in the current
# strongly connected component (SCC)
lowlinks[node] = min(lowlinks[node],index[successor])
# If `node` is a root node, pop the stack and generate an SCC
if lowlinks[node] == index[node]:
connected_component = []
while True:
successor = stack.pop()
connected_component.append(successor)
if successor == node: break
component = tuple(connected_component)
# storing the result
result.append(component)
for node in graph:
if node not in lowlinks:
strongconnect(node)
return result
@property
def dot(self):
result = ['digraph G {']
for succ in self._preds:
preds = self._preds[succ]
for pred in preds:
result.append(' %s -> %s;' % (pred, succ))
for node in self._nodes:
result.append(' %s;' % node)
result.append('}')
return '\n'.join(result)
#
# Unarchiving functionality for zip, tar, tgz, tbz, whl
#
ARCHIVE_EXTENSIONS = ('.tar.gz', '.tar.bz2', '.tar', '.zip',
'.tgz', '.tbz', '.whl')
def unarchive(archive_filename, dest_dir, format=None, check=True):
def check_path(path):
if not isinstance(path, text_type):
path = path.decode('utf-8')
p = os.path.abspath(os.path.join(dest_dir, path))
if not p.startswith(dest_dir) or p[plen] != os.sep:
raise ValueError('path outside destination: %r' % p)
dest_dir = os.path.abspath(dest_dir)
plen = len(dest_dir)
archive = None
if format is None:
if archive_filename.endswith(('.zip', '.whl')):
format = 'zip'
elif archive_filename.endswith(('.tar.gz', '.tgz')):
format = 'tgz'
mode = 'r:gz'
elif archive_filename.endswith(('.tar.bz2', '.tbz')):
format = 'tbz'
mode = 'r:bz2'
elif archive_filename.endswith('.tar'):
format = 'tar'
mode = 'r'
else:
raise ValueError('Unknown format for %r' % archive_filename)
try:
if format == 'zip':
archive = ZipFile(archive_filename, 'r')
if check:
names = archive.namelist()
for name in names:
check_path(name)
else:
archive = tarfile.open(archive_filename, mode)
if check:
names = archive.getnames()
for name in names:
check_path(name)
if format != 'zip' and sys.version_info[0] < 3:
# See Python issue 17153. If the dest path contains Unicode,
# tarfile extraction fails on Python 2.x if a member path name
# contains non-ASCII characters - it leads to an implicit
# bytes -> unicode conversion using ASCII to decode.
for tarinfo in archive.getmembers():
if not isinstance(tarinfo.name, text_type):
tarinfo.name = tarinfo.name.decode('utf-8')
archive.extractall(dest_dir)
finally:
if archive:
archive.close()
def zip_dir(directory):
"""zip a directory tree into a BytesIO object"""
result = io.BytesIO()
dlen = len(directory)
with ZipFile(result, "w") as zf:
for root, dirs, files in os.walk(directory):
for name in files:
full = os.path.join(root, name)
rel = root[dlen:]
dest = os.path.join(rel, name)
zf.write(full, dest)
return result
#
# Simple progress bar
#
UNITS = ('', 'K', 'M', 'G','T','P')
class Progress(object):
unknown = 'UNKNOWN'
def __init__(self, minval=0, maxval=100):
assert maxval is None or maxval >= minval
self.min = self.cur = minval
self.max = maxval
self.started = None
self.elapsed = 0
self.done = False
def update(self, curval):
assert self.min <= curval
assert self.max is None or curval <= self.max
self.cur = curval
now = time.time()
if self.started is None:
self.started = now
else:
self.elapsed = now - self.started
def increment(self, incr):
assert incr >= 0
self.update(self.cur + incr)
def start(self):
self.update(self.min)
return self
def stop(self):
if self.max is not None:
self.update(self.max)
self.done = True
@property
def maximum(self):
return self.unknown if self.max is None else self.max
@property
def percentage(self):
if self.done:
result = '100 %'
elif self.max is None:
result = ' ?? %'
else:
v = 100.0 * (self.cur - self.min) / (self.max - self.min)
result = '%3d %%' % v
return result
def format_duration(self, duration):
if (duration <= 0) and self.max is None or self.cur == self.min:
result = '??:??:??'
#elif duration < 1:
# result = '--:--:--'
else:
result = time.strftime('%H:%M:%S', time.gmtime(duration))
return result
@property
def ETA(self):
if self.done:
prefix = 'Done'
t = self.elapsed
#import pdb; pdb.set_trace()
else:
prefix = 'ETA '
if self.max is None:
t = -1
elif self.elapsed == 0 or (self.cur == self.min):
t = 0
else:
#import pdb; pdb.set_trace()
t = float(self.max - self.min)
t /= self.cur - self.min
t = (t - 1) * self.elapsed
return '%s: %s' % (prefix, self.format_duration(t))
@property
def speed(self):
if self.elapsed == 0:
result = 0.0
else:
result = (self.cur - self.min) / self.elapsed
for unit in UNITS:
if result < 1000:
break
result /= 1000.0
return '%d %sB/s' % (result, unit)
#
# Glob functionality
#
RICH_GLOB = re.compile(r'\{([^}]*)\}')
_CHECK_RECURSIVE_GLOB = re.compile(r'[^/\\,{]\*\*|\*\*[^/\\,}]')
_CHECK_MISMATCH_SET = re.compile(r'^[^{]*\}|\{[^}]*$')
def iglob(path_glob):
"""Extended globbing function that supports ** and {opt1,opt2,opt3}."""
if _CHECK_RECURSIVE_GLOB.search(path_glob):
msg = """invalid glob %r: recursive glob "**" must be used alone"""
raise ValueError(msg % path_glob)
if _CHECK_MISMATCH_SET.search(path_glob):
msg = """invalid glob %r: mismatching set marker '{' or '}'"""
raise ValueError(msg % path_glob)
return _iglob(path_glob)
def _iglob(path_glob):
rich_path_glob = RICH_GLOB.split(path_glob, 1)
if len(rich_path_glob) > 1:
assert len(rich_path_glob) == 3, rich_path_glob
prefix, set, suffix = rich_path_glob
for item in set.split(','):
for path in _iglob(''.join((prefix, item, suffix))):
yield path
else:
if '**' not in path_glob:
for item in std_iglob(path_glob):
yield item
else:
prefix, radical = path_glob.split('**', 1)
if prefix == '':
prefix = '.'
if radical == '':
radical = '*'
else:
# we support both
radical = radical.lstrip('/')
radical = radical.lstrip('\\')
for path, dir, files in os.walk(prefix):
path = os.path.normpath(path)
for fn in _iglob(os.path.join(path, radical)):
yield fn
#
# HTTPSConnection which verifies certificates/matches domains
#
class HTTPSConnection(httplib.HTTPSConnection):
ca_certs = None # set this to the path to the certs file (.pem)
check_domain = True # only used if ca_certs is not None
# noinspection PyPropertyAccess
def connect(self):
sock = socket.create_connection((self.host, self.port), self.timeout)
if getattr(self, '_tunnel_host', False):
self.sock = sock
self._tunnel()
if not hasattr(ssl, 'SSLContext'):
# For 2.x
if self.ca_certs:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file,
cert_reqs=cert_reqs,
ssl_version=ssl.PROTOCOL_SSLv23,
ca_certs=self.ca_certs)
else:
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.options |= ssl.OP_NO_SSLv2
if self.cert_file:
context.load_cert_chain(self.cert_file, self.key_file)
kwargs = {}
if self.ca_certs:
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(cafile=self.ca_certs)
if getattr(ssl, 'HAS_SNI', False):
kwargs['server_hostname'] = self.host
self.sock = context.wrap_socket(sock, **kwargs)
if self.ca_certs and self.check_domain:
try:
match_hostname(self.sock.getpeercert(), self.host)
logger.debug('Host verified: %s', self.host)
except CertificateError:
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
raise
class HTTPSHandler(BaseHTTPSHandler):
def __init__(self, ca_certs, check_domain=True):
BaseHTTPSHandler.__init__(self)
self.ca_certs = ca_certs
self.check_domain = check_domain
def _conn_maker(self, *args, **kwargs):
"""
This is called to create a connection instance. Normally you'd
pass a connection class to do_open, but it doesn't actually check for
a class, and just expects a callable. As long as we behave just as a
constructor would have, we should be OK. If it ever changes so that
we *must* pass a class, we'll create an UnsafeHTTPSConnection class
which just sets check_domain to False in the class definition, and
choose which one to pass to do_open.
"""
result = HTTPSConnection(*args, **kwargs)
if self.ca_certs:
result.ca_certs = self.ca_certs
result.check_domain = self.check_domain
return result
def https_open(self, req):
try:
return self.do_open(self._conn_maker, req)
except URLError as e:
if 'certificate verify failed' in str(e.reason):
raise CertificateError('Unable to verify server certificate '
'for %s' % req.host)
else:
raise
#
# To prevent against mixing HTTP traffic with HTTPS (examples: A Man-In-The-
# Middle proxy using HTTP listens on port 443, or an index mistakenly serves
# HTML containing a http://xyz link when it should be https://xyz),
# you can use the following handler class, which does not allow HTTP traffic.
#
# It works by inheriting from HTTPHandler - so build_opener won't add a
# handler for HTTP itself.
#
class HTTPSOnlyHandler(HTTPSHandler, HTTPHandler):
def http_open(self, req):
raise URLError('Unexpected HTTP request on what should be a secure '
'connection: %s' % req)
#
# XML-RPC with timeouts
#
_ver_info = sys.version_info[:2]
if _ver_info == (2, 6):
class HTTP(httplib.HTTP):
def __init__(self, host='', port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
class HTTPS(httplib.HTTPS):
def __init__(self, host='', port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
class Transport(xmlrpclib.Transport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.Transport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, x509 = self.get_host_info(host)
if _ver_info == (2, 6):
result = HTTP(h, timeout=self.timeout)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPConnection(h)
result = self._connection[1]
return result
class SafeTransport(xmlrpclib.SafeTransport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.SafeTransport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, kwargs = self.get_host_info(host)
if not kwargs:
kwargs = {}
kwargs['timeout'] = self.timeout
if _ver_info == (2, 6):
result = HTTPS(host, None, **kwargs)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPSConnection(h, None,
**kwargs)
result = self._connection[1]
return result
class ServerProxy(xmlrpclib.ServerProxy):
def __init__(self, uri, **kwargs):
self.timeout = timeout = kwargs.pop('timeout', None)
# The above classes only come into play if a timeout
# is specified
if timeout is not None:
scheme, _ = splittype(uri)
use_datetime = kwargs.get('use_datetime', 0)
if scheme == 'https':
tcls = SafeTransport
else:
tcls = Transport
kwargs['transport'] = t = tcls(timeout, use_datetime=use_datetime)
self.transport = t
xmlrpclib.ServerProxy.__init__(self, uri, **kwargs)
#
# CSV functionality. This is provided because on 2.x, the csv module can't
# handle Unicode. However, we need to deal with Unicode in e.g. RECORD files.
#
def _csv_open(fn, mode, **kwargs):
if sys.version_info[0] < 3:
mode += 'b'
else:
kwargs['newline'] = ''
return open(fn, mode, **kwargs)
class CSVBase(object):
defaults = {
'delimiter': str(','), # The strs are used because we need native
'quotechar': str('"'), # str in the csv API (2.x won't take
'lineterminator': str('\n') # Unicode)
}
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.stream.close()
class CSVReader(CSVBase):
def __init__(self, **kwargs):
if 'stream' in kwargs:
stream = kwargs['stream']
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader('utf-8')(stream)
self.stream = stream
else:
self.stream = _csv_open(kwargs['path'], 'r')
self.reader = csv.reader(self.stream, **self.defaults)
def __iter__(self):
return self
def next(self):
result = next(self.reader)
if sys.version_info[0] < 3:
for i, item in enumerate(result):
if not isinstance(item, text_type):
result[i] = item.decode('utf-8')
return result
__next__ = next
class CSVWriter(CSVBase):
def __init__(self, fn, **kwargs):
self.stream = _csv_open(fn, 'w')
self.writer = csv.writer(self.stream, **self.defaults)
def writerow(self, row):
if sys.version_info[0] < 3:
r = []
for item in row:
if isinstance(item, text_type):
item = item.encode('utf-8')
r.append(item)
row = r
self.writer.writerow(row)
#
# Configurator functionality
#
class Configurator(BaseConfigurator):
value_converters = dict(BaseConfigurator.value_converters)
value_converters['inc'] = 'inc_convert'
def __init__(self, config, base=None):
super(Configurator, self).__init__(config)
self.base = base or os.getcwd()
def configure_custom(self, config):
def convert(o):
if isinstance(o, (list, tuple)):
result = type(o)([convert(i) for i in o])
elif isinstance(o, dict):
if '()' in o:
result = self.configure_custom(o)
else:
result = {}
for k in o:
result[k] = convert(o[k])
else:
result = self.convert(o)
return result
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
args = config.pop('[]', ())
if args:
args = tuple([convert(o) for o in args])
items = [(k, convert(config[k])) for k in config if valid_ident(k)]
kwargs = dict(items)
result = c(*args, **kwargs)
if props:
for n, v in props.items():
setattr(result, n, convert(v))
return result
def __getitem__(self, key):
result = self.config[key]
if isinstance(result, dict) and '()' in result:
self.config[key] = result = self.configure_custom(result)
return result
def inc_convert(self, value):
"""Default converter for the inc:// protocol."""
if not os.path.isabs(value):
value = os.path.join(self.base, value)
with codecs.open(value, 'r', encoding='utf-8') as f:
result = json.load(f)
return result
#
# Mixin for running subprocesses and capturing their output
#
class SubprocessMixin(object):
def __init__(self, verbose=False, progress=None):
self.verbose = verbose
self.progress = progress
def reader(self, stream, context):
"""
Read lines from a subprocess' output stream and either pass to a progress
callable (if specified) or write progress information to sys.stderr.
"""
progress = self.progress
verbose = self.verbose
while True:
s = stream.readline()
if not s:
break
if progress is not None:
progress(s, context)
else:
if not verbose:
sys.stderr.write('.')
else:
sys.stderr.write(s.decode('utf-8'))
sys.stderr.flush()
stream.close()
def run_command(self, cmd, **kwargs):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, **kwargs)
t1 = threading.Thread(target=self.reader, args=(p.stdout, 'stdout'))
t1.start()
t2 = threading.Thread(target=self.reader, args=(p.stderr, 'stderr'))
t2.start()
p.wait()
t1.join()
t2.join()
if self.progress is not None:
self.progress('done.', 'main')
elif self.verbose:
sys.stderr.write('done.\n')
return p
|
Huskerboy/startbootstrap-freelancer
|
refs/heads/master
|
freelancer_env/Lib/site-packages/pip/_vendor/packaging/version.py
|
1151
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import collections
import itertools
import re
from ._structures import Infinity
__all__ = [
"parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"
]
_Version = collections.namedtuple(
"_Version",
["epoch", "release", "dev", "pre", "post", "local"],
)
def parse(version):
"""
Parse the given version string and return either a :class:`Version` object
or a :class:`LegacyVersion` object depending on if the given version is
a valid PEP 440 version or a legacy version.
"""
try:
return Version(version)
except InvalidVersion:
return LegacyVersion(version)
class InvalidVersion(ValueError):
"""
An invalid version was found, users should refer to PEP 440.
"""
class _BaseVersion(object):
def __hash__(self):
return hash(self._key)
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ge__(self, other):
return self._compare(other, lambda s, o: s >= o)
def __gt__(self, other):
return self._compare(other, lambda s, o: s > o)
def __ne__(self, other):
return self._compare(other, lambda s, o: s != o)
def _compare(self, other, method):
if not isinstance(other, _BaseVersion):
return NotImplemented
return method(self._key, other._key)
class LegacyVersion(_BaseVersion):
def __init__(self, version):
self._version = str(version)
self._key = _legacy_cmpkey(self._version)
def __str__(self):
return self._version
def __repr__(self):
return "<LegacyVersion({0})>".format(repr(str(self)))
@property
def public(self):
return self._version
@property
def base_version(self):
return self._version
@property
def local(self):
return None
@property
def is_prerelease(self):
return False
@property
def is_postrelease(self):
return False
_legacy_version_component_re = re.compile(
r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE,
)
_legacy_version_replacement_map = {
"pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@",
}
def _parse_version_parts(s):
for part in _legacy_version_component_re.split(s):
part = _legacy_version_replacement_map.get(part, part)
if not part or part == ".":
continue
if part[:1] in "0123456789":
# pad for numeric comparison
yield part.zfill(8)
else:
yield "*" + part
# ensure that alpha/beta/candidate are before final
yield "*final"
def _legacy_cmpkey(version):
# We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch
# greater than or equal to 0. This will effectively put the LegacyVersion,
# which uses the defacto standard originally implemented by setuptools,
# as before all PEP 440 versions.
epoch = -1
# This scheme is taken from pkg_resources.parse_version setuptools prior to
# it's adoption of the packaging library.
parts = []
for part in _parse_version_parts(version.lower()):
if part.startswith("*"):
# remove "-" before a prerelease tag
if part < "*final":
while parts and parts[-1] == "*final-":
parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1] == "00000000":
parts.pop()
parts.append(part)
parts = tuple(parts)
return epoch, parts
# Deliberately not anchored to the start and end of the string, to make it
# easier for 3rd party code to reuse
VERSION_PATTERN = r"""
v?
(?:
(?:(?P<epoch>[0-9]+)!)? # epoch
(?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
(?P<pre> # pre-release
[-_\.]?
(?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
[-_\.]?
(?P<pre_n>[0-9]+)?
)?
(?P<post> # post release
(?:-(?P<post_n1>[0-9]+))
|
(?:
[-_\.]?
(?P<post_l>post|rev|r)
[-_\.]?
(?P<post_n2>[0-9]+)?
)
)?
(?P<dev> # dev release
[-_\.]?
(?P<dev_l>dev)
[-_\.]?
(?P<dev_n>[0-9]+)?
)?
)
(?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
"""
class Version(_BaseVersion):
_regex = re.compile(
r"^\s*" + VERSION_PATTERN + r"\s*$",
re.VERBOSE | re.IGNORECASE,
)
def __init__(self, version):
# Validate the version and parse it into pieces
match = self._regex.search(version)
if not match:
raise InvalidVersion("Invalid version: '{0}'".format(version))
# Store the parsed out pieces of the version
self._version = _Version(
epoch=int(match.group("epoch")) if match.group("epoch") else 0,
release=tuple(int(i) for i in match.group("release").split(".")),
pre=_parse_letter_version(
match.group("pre_l"),
match.group("pre_n"),
),
post=_parse_letter_version(
match.group("post_l"),
match.group("post_n1") or match.group("post_n2"),
),
dev=_parse_letter_version(
match.group("dev_l"),
match.group("dev_n"),
),
local=_parse_local_version(match.group("local")),
)
# Generate a key which will be used for sorting
self._key = _cmpkey(
self._version.epoch,
self._version.release,
self._version.pre,
self._version.post,
self._version.dev,
self._version.local,
)
def __repr__(self):
return "<Version({0})>".format(repr(str(self)))
def __str__(self):
parts = []
# Epoch
if self._version.epoch != 0:
parts.append("{0}!".format(self._version.epoch))
# Release segment
parts.append(".".join(str(x) for x in self._version.release))
# Pre-release
if self._version.pre is not None:
parts.append("".join(str(x) for x in self._version.pre))
# Post-release
if self._version.post is not None:
parts.append(".post{0}".format(self._version.post[1]))
# Development release
if self._version.dev is not None:
parts.append(".dev{0}".format(self._version.dev[1]))
# Local version segment
if self._version.local is not None:
parts.append(
"+{0}".format(".".join(str(x) for x in self._version.local))
)
return "".join(parts)
@property
def public(self):
return str(self).split("+", 1)[0]
@property
def base_version(self):
parts = []
# Epoch
if self._version.epoch != 0:
parts.append("{0}!".format(self._version.epoch))
# Release segment
parts.append(".".join(str(x) for x in self._version.release))
return "".join(parts)
@property
def local(self):
version_string = str(self)
if "+" in version_string:
return version_string.split("+", 1)[1]
@property
def is_prerelease(self):
return bool(self._version.dev or self._version.pre)
@property
def is_postrelease(self):
return bool(self._version.post)
def _parse_letter_version(letter, number):
if letter:
# We consider there to be an implicit 0 in a pre-release if there is
# not a numeral associated with it.
if number is None:
number = 0
# We normalize any letters to their lower case form
letter = letter.lower()
# We consider some words to be alternate spellings of other words and
# in those cases we want to normalize the spellings to our preferred
# spelling.
if letter == "alpha":
letter = "a"
elif letter == "beta":
letter = "b"
elif letter in ["c", "pre", "preview"]:
letter = "rc"
elif letter in ["rev", "r"]:
letter = "post"
return letter, int(number)
if not letter and number:
# We assume if we are given a number, but we are not given a letter
# then this is using the implicit post release syntax (e.g. 1.0-1)
letter = "post"
return letter, int(number)
_local_version_seperators = re.compile(r"[\._-]")
def _parse_local_version(local):
"""
Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
"""
if local is not None:
return tuple(
part.lower() if not part.isdigit() else int(part)
for part in _local_version_seperators.split(local)
)
def _cmpkey(epoch, release, pre, post, dev, local):
# When we compare a release version, we want to compare it with all of the
# trailing zeros removed. So we'll use a reverse the list, drop all the now
# leading zeros until we come to something non zero, then take the rest
# re-reverse it back into the correct order and make it a tuple and use
# that for our sorting key.
release = tuple(
reversed(list(
itertools.dropwhile(
lambda x: x == 0,
reversed(release),
)
))
)
# We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
# We'll do this by abusing the pre segment, but we _only_ want to do this
# if there is not a pre or a post segment. If we have one of those then
# the normal sorting rules will handle this case correctly.
if pre is None and post is None and dev is not None:
pre = -Infinity
# Versions without a pre-release (except as noted above) should sort after
# those with one.
elif pre is None:
pre = Infinity
# Versions without a post segment should sort before those with one.
if post is None:
post = -Infinity
# Versions without a development segment should sort after those with one.
if dev is None:
dev = Infinity
if local is None:
# Versions without a local segment should sort before those with one.
local = -Infinity
else:
# Versions with a local segment need that segment parsed to implement
# the sorting rules in PEP440.
# - Alpha numeric segments sort before numeric segments
# - Alpha numeric segments sort lexicographically
# - Numeric segments sort numerically
# - Shorter versions sort before longer versions when the prefixes
# match exactly
local = tuple(
(i, "") if isinstance(i, int) else (-Infinity, i)
for i in local
)
return epoch, release, pre, post, dev, local
|
edx/edx-platform
|
refs/heads/master
|
openedx/core/djangoapps/site_configuration/migrations/0004_add_site_values_field.py
|
4
|
# Generated by Django 1.11.28 on 2020-02-19 16:50
import collections
import jsonfield.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('site_configuration', '0003_auto_20200217_1058'),
]
operations = [
migrations.AddField(
model_name='siteconfiguration',
name='site_values',
field=jsonfield.fields.JSONField(blank=True, default=dict, dump_kwargs={'cls': jsonfield.encoder.JSONEncoder, 'separators': (',', ':')}, load_kwargs={'object_pairs_hook': collections.OrderedDict}),
),
migrations.AddField(
model_name='siteconfigurationhistory',
name='site_values',
field=jsonfield.fields.JSONField(blank=True, dump_kwargs={'cls': jsonfield.encoder.JSONEncoder, 'separators': (',', ':')}, load_kwargs={'object_pairs_hook': collections.OrderedDict}),
),
]
|
cs-au-dk/Artemis
|
refs/heads/master
|
WebKit/Tools/Scripts/webkitpy/tool/commands/earlywarningsystem.py
|
2
|
# Copyright (c) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from optparse import make_option
from webkitpy.common.config.committers import CommitterList
from webkitpy.common.config.ports import DeprecatedPort
from webkitpy.common.system.deprecated_logging import error, log
from webkitpy.common.system.executive import ScriptError
from webkitpy.tool.bot.earlywarningsystemtask import EarlyWarningSystemTask, EarlyWarningSystemTaskDelegate
from webkitpy.tool.bot.expectedfailures import ExpectedFailures
from webkitpy.tool.bot.layouttestresultsreader import LayoutTestResultsReader
from webkitpy.tool.bot.patchanalysistask import UnableToApplyPatch
from webkitpy.tool.bot.queueengine import QueueEngine
from webkitpy.tool.commands.queues import AbstractReviewQueue
class AbstractEarlyWarningSystem(AbstractReviewQueue, EarlyWarningSystemTaskDelegate):
_build_style = "release"
# FIXME: Switch _default_run_tests from opt-in to opt-out once more bots are ready to run tests.
_default_run_tests = False
def __init__(self):
options = [make_option("--run-tests", action="store_true", dest="run_tests", default=self._default_run_tests, help="Run the Layout tests for each patch")]
AbstractReviewQueue.__init__(self, options=options)
self.port = DeprecatedPort.port(self.port_name)
def begin_work_queue(self):
# FIXME: This violates abstraction
self._tool._deprecated_port = self.port
AbstractReviewQueue.begin_work_queue(self)
self._expected_failures = ExpectedFailures()
self._layout_test_results_reader = LayoutTestResultsReader(self._tool, self._log_directory())
def _failing_tests_message(self, task, patch):
results = task.results_from_patch_test_run(patch)
unexpected_failures = self._expected_failures.unexpected_failures_observed(results)
if not unexpected_failures:
return None
return "New failing tests:\n%s" % "\n".join(unexpected_failures)
def _post_reject_message_on_bug(self, tool, patch, status_id, extra_message_text=None):
results_link = tool.status_server.results_url_for_status(status_id)
message = "Attachment %s did not pass %s (%s):\nOutput: %s" % (patch.id(), self.name, self.port_name, results_link)
# FIXME: We might want to add some text about rejecting from the commit-queue in
# the case where patch.commit_queue() isn't already set to '-'.
if self.watchers:
tool.bugs.add_cc_to_bug(patch.bug_id(), self.watchers)
tool.bugs.set_flag_on_attachment(patch.id(), "commit-queue", "-", message, extra_message_text)
def review_patch(self, patch):
task = EarlyWarningSystemTask(self, patch, self._options.run_tests)
if not task.validate():
self._did_error(patch, "%s did not process patch." % self.name)
return False
try:
return task.run()
except UnableToApplyPatch, e:
self._did_error(patch, "%s unable to apply patch." % self.name)
return False
except ScriptError, e:
self._post_reject_message_on_bug(self._tool, patch, task.failure_status_id, self._failing_tests_message(task, patch))
results_archive = task.results_archive_from_patch_test_run(patch)
if results_archive:
self._upload_results_archive_for_patch(patch, results_archive)
self._did_fail(patch)
# FIXME: We're supposed to be able to raise e again here and have
# one of our base classes mark the patch as fail, but there seems
# to be an issue with the exit_code.
return False
# EarlyWarningSystemDelegate methods
def parent_command(self):
return self.name
def run_command(self, command):
self.run_webkit_patch(command + [self.port.flag()])
def command_passed(self, message, patch):
pass
def command_failed(self, message, script_error, patch):
failure_log = self._log_from_script_error_for_upload(script_error)
return self._update_status(message, patch=patch, results_file=failure_log)
def expected_failures(self):
return self._expected_failures
def test_results(self):
return self._layout_test_results_reader.results()
def archive_last_test_results(self, patch):
return self._layout_test_results_reader.archive(patch)
def build_style(self):
return self._build_style
def refetch_patch(self, patch):
return self._tool.bugs.fetch_attachment(patch.id())
def report_flaky_tests(self, patch, flaky_test_results, results_archive):
pass
# StepSequenceErrorHandler methods
@classmethod
def handle_script_error(cls, tool, state, script_error):
# FIXME: Why does this not exit(1) like the superclass does?
log(script_error.message_with_output())
class GtkEWS(AbstractEarlyWarningSystem):
name = "gtk-ews"
port_name = "gtk"
watchers = AbstractEarlyWarningSystem.watchers + [
"gns@gnome.org",
"xan.lopez@gmail.com",
]
class EflEWS(AbstractEarlyWarningSystem):
name = "efl-ews"
port_name = "efl"
watchers = AbstractEarlyWarningSystem.watchers + [
"leandro@profusion.mobi",
"antognolli@profusion.mobi",
"lucas.demarchi@profusion.mobi",
"gyuyoung.kim@samsung.com",
]
class QtEWS(AbstractEarlyWarningSystem):
name = "qt-ews"
port_name = "qt"
class QtWK2EWS(AbstractEarlyWarningSystem):
name = "qt-wk2-ews"
port_name = "qt"
class WinEWS(AbstractEarlyWarningSystem):
name = "win-ews"
port_name = "win"
# Use debug, the Apple Win port fails to link Release on 32-bit Windows.
# https://bugs.webkit.org/show_bug.cgi?id=39197
_build_style = "debug"
class AbstractChromiumEWS(AbstractEarlyWarningSystem):
port_name = "chromium"
watchers = AbstractEarlyWarningSystem.watchers + [
"dglazkov@chromium.org",
]
class ChromiumLinuxEWS(AbstractChromiumEWS):
# FIXME: We should rename this command to cr-linux-ews, but that requires
# a database migration. :(
name = "chromium-ews"
port_name = "chromium-xvfb"
_default_run_tests = True
class ChromiumWindowsEWS(AbstractChromiumEWS):
name = "cr-win-ews"
class MacEWS(AbstractEarlyWarningSystem):
name = "mac-ews"
port_name = "mac"
|
Rocamadour7/ml_tutorial
|
refs/heads/master
|
04. SVM/breast-cancer-example.py
|
1
|
import numpy as np
from sklearn import preprocessing, svm
from sklearn.model_selection import train_test_split
import pandas as pd
df = pd.read_csv('../03. KNN/breast-cancer-wisconsin.data.csv')
df.replace('?', -99999, inplace=True)
df.drop(['id'], 1, inplace=True)
X = np.array(df.drop(['class'], 1))
y = np.array(df['class'])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
clf = svm.SVC()
clf.fit(X_train, y_train)
accuracy = clf.score(X_test, y_test)
print(accuracy)
example_measures = np.array([[4, 2, 1, 1, 1, 2, 3, 2, 1], [6, 8, 2, 3, 4, 6, 5, 2, 1]])
example_measures = example_measures.reshape(len(example_measures), -1)
prediction = clf.predict(example_measures)
print(prediction)
|
fhueske/flink
|
refs/heads/master
|
flink-python/pyflink/common/tests/test_configuration.py
|
26
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from copy import deepcopy
from pyflink.common import Configuration
from pyflink.testing.test_case_utils import PyFlinkTestCase
class ConfigurationTests(PyFlinkTestCase):
def test_init(self):
conf = Configuration()
self.assertEqual(conf.to_dict(), dict())
conf.set_string("k1", "v1")
conf2 = Configuration(conf)
self.assertEqual(conf2.to_dict(), {"k1": "v1"})
def test_getters_and_setters(self):
conf = Configuration()
conf.set_string("str", "v1")
conf.set_integer("int", 2)
conf.set_boolean("bool", True)
conf.set_float("float", 0.5)
conf.set_bytearray("bytearray", bytearray([1, 2, 3]))
str_value = conf.get_string("str", "")
int_value = conf.get_integer("int", 0)
bool_value = conf.get_boolean("bool", False)
float_value = conf.get_float("float", 0)
bytearray_value = conf.get_bytearray("bytearray", bytearray())
self.assertEqual(str_value, "v1")
self.assertEqual(int_value, 2)
self.assertEqual(bool_value, True)
self.assertEqual(float_value, 0.5)
self.assertEqual(bytearray_value, bytearray([1, 2, 3]))
def test_key_set(self):
conf = Configuration()
conf.set_string("k1", "v1")
conf.set_string("k2", "v2")
conf.set_string("k3", "v3")
key_set = conf.key_set()
self.assertEqual(key_set, {"k1", "k2", "k3"})
def test_add_all_to_dict(self):
conf = Configuration()
conf.set_string("k1", "v1")
conf.set_integer("k2", 1)
conf.set_float("k3", 1.2)
conf.set_boolean("k4", True)
conf.set_bytearray("k5", bytearray([1, 2, 3]))
target_dict = dict()
conf.add_all_to_dict(target_dict)
self.assertEqual(target_dict, {"k1": "v1",
"k2": 1,
"k3": 1.2,
"k4": True,
"k5": bytearray([1, 2, 3])})
def test_add_all(self):
conf = Configuration()
conf.set_string("k1", "v1")
conf2 = Configuration()
conf2.add_all(conf)
value1 = conf2.get_string("k1", "")
self.assertEqual(value1, "v1")
conf2.add_all(conf, "conf_")
value2 = conf2.get_string("conf_k1", "")
self.assertEqual(value2, "v1")
def test_deepcopy(self):
conf = Configuration()
conf.set_string("k1", "v1")
conf2 = deepcopy(conf)
self.assertEqual(conf2, conf)
conf2.set_string("k1", "v2")
self.assertNotEqual(conf2, conf)
def test_contains_key(self):
conf = Configuration()
conf.set_string("k1", "v1")
contains_k1 = conf.contains_key("k1")
contains_k2 = conf.contains_key("k2")
self.assertTrue(contains_k1)
self.assertFalse(contains_k2)
def test_to_dict(self):
conf = Configuration()
conf.set_string("k1", "v1")
conf.set_integer("k2", 1)
conf.set_float("k3", 1.2)
conf.set_boolean("k4", True)
target_dict = conf.to_dict()
self.assertEqual(target_dict, {"k1": "v1", "k2": "1", "k3": "1.2", "k4": "true"})
def test_remove_config(self):
conf = Configuration()
conf.set_string("k1", "v1")
conf.set_integer("k2", 1)
self.assertTrue(conf.contains_key("k1"))
self.assertTrue(conf.contains_key("k2"))
self.assertTrue(conf.remove_config("k1"))
self.assertFalse(conf.remove_config("k1"))
self.assertFalse(conf.contains_key("k1"))
conf.remove_config("k2")
self.assertFalse(conf.contains_key("k2"))
def test_hash_equal_str(self):
conf = Configuration()
conf2 = Configuration()
conf.set_string("k1", "v1")
conf.set_integer("k2", 1)
conf2.set_string("k1", "v1")
self.assertNotEqual(hash(conf), hash(conf2))
self.assertNotEqual(conf, conf2)
conf2.set_integer("k2", 1)
self.assertEqual(hash(conf), hash(conf2))
self.assertEqual(conf, conf2)
self.assertEqual(str(conf), "{k1=v1, k2=1}")
|
openhatch/new-mini-tasks
|
refs/heads/master
|
vendor/packages/Django/scripts/manage_translations.py
|
41
|
#!/usr/bin/env python
#
# This python file contains utility scripts to manage Django translations.
# It has to be run inside the django git root directory.
#
# The following commands are available:
#
# * update_catalogs: check for new strings in core and contrib catalogs, and
# output how much strings are new/changed.
#
# * lang_stats: output statistics for each catalog/language combination
#
# * fetch: fetch translations from transifex.com
#
# Each command support the --languages and --resources options to limit their
# operation to the specified language or resource. For example, to get stats
# for Spanish in contrib.admin, run:
#
# $ python scripts/manage_translations.py lang_stats --language=es --resources=admin
import os
from optparse import OptionParser
from subprocess import call, Popen, PIPE
from django.core.management import call_command
HAVE_JS = ['admin']
def _get_locale_dirs(include_core=True):
"""
Return a tuple (contrib name, absolute path) for all locale directories,
optionally including the django core catalog.
"""
contrib_dir = os.path.join(os.getcwd(), 'django', 'contrib')
dirs = []
for contrib_name in os.listdir(contrib_dir):
path = os.path.join(contrib_dir, contrib_name, 'locale')
if os.path.isdir(path):
dirs.append((contrib_name, path))
if contrib_name in HAVE_JS:
dirs.append(("%s-js" % contrib_name, path))
if include_core:
dirs.insert(0, ('core', os.path.join(os.getcwd(), 'django', 'conf', 'locale')))
return dirs
def _tx_resource_for_name(name):
""" Return the Transifex resource name """
if name == 'core':
return "django-core.core"
else:
return "django-core.contrib-%s" % name
def _check_diff(cat_name, base_path):
"""
Output the approximate number of changed/added strings in the en catalog.
"""
po_path = '%(path)s/en/LC_MESSAGES/django%(ext)s.po' % {
'path': base_path, 'ext': 'js' if cat_name.endswith('-js') else ''}
p = Popen("git diff -U0 %s | egrep -v '^@@|^[-+]#|^..POT-Creation' | wc -l" % po_path,
stdout=PIPE, stderr=PIPE, shell=True)
output, errors = p.communicate()
num_changes = int(output.strip()) - 4
print("%d changed/added messages in '%s' catalog." % (num_changes, cat_name))
def update_catalogs(resources=None, languages=None):
"""
Update the en/LC_MESSAGES/django.po (main and contrib) files with
new/updated translatable strings.
"""
contrib_dirs = _get_locale_dirs(include_core=False)
os.chdir(os.path.join(os.getcwd(), 'django'))
print("Updating main en catalog")
call_command('makemessages', locale='en')
_check_diff('core', os.path.join(os.getcwd(), 'conf', 'locale'))
# Contrib catalogs
for name, dir_ in contrib_dirs:
if resources and not name in resources:
continue
os.chdir(os.path.join(dir_, '..'))
print("Updating en catalog in %s" % dir_)
if name.endswith('-js'):
call_command('makemessages', locale='en', domain='djangojs')
else:
call_command('makemessages', locale='en')
_check_diff(name, dir_)
def lang_stats(resources=None, languages=None):
"""
Output language statistics of committed translation files for each
Django catalog.
If resources is provided, it should be a list of translation resource to
limit the output (e.g. ['core', 'gis']).
"""
locale_dirs = _get_locale_dirs()
for name, dir_ in locale_dirs:
if resources and not name in resources:
continue
print("\nShowing translations stats for '%s':" % name)
langs = sorted([d for d in os.listdir(dir_) if not d.startswith('_')])
for lang in langs:
if languages and not lang in languages:
continue
# TODO: merge first with the latest en catalog
p = Popen("msgfmt -vc -o /dev/null %(path)s/%(lang)s/LC_MESSAGES/django%(ext)s.po" % {
'path': dir_, 'lang': lang, 'ext': 'js' if name.endswith('-js') else ''},
stdout=PIPE, stderr=PIPE, shell=True)
output, errors = p.communicate()
if p.returncode == 0:
# msgfmt output stats on stderr
print("%s: %s" % (lang, errors.strip()))
def fetch(resources=None, languages=None):
"""
Fetch translations from Transifex, wrap long lines, generate mo files.
"""
locale_dirs = _get_locale_dirs()
for name, dir_ in locale_dirs:
if resources and not name in resources:
continue
# Transifex pull
if languages is None:
call('tx pull -r %(res)s -a -f' % {'res': _tx_resource_for_name(name)}, shell=True)
languages = sorted([d for d in os.listdir(dir_) if not d.startswith('_')])
else:
for lang in languages:
call('tx pull -r %(res)s -f -l %(lang)s' % {
'res': _tx_resource_for_name(name), 'lang': lang}, shell=True)
# msgcat to wrap lines and msgfmt for compilation of .mo file
for lang in languages:
po_path = '%(path)s/%(lang)s/LC_MESSAGES/django%(ext)s.po' % {
'path': dir_, 'lang': lang, 'ext': 'js' if name.endswith('-js') else ''}
call('msgcat -o %s %s' % (po_path, po_path), shell=True)
mo_path = '%s.mo' % po_path[:-3]
call('msgfmt -o %s %s' % (mo_path, po_path), shell=True)
if __name__ == "__main__":
RUNABLE_SCRIPTS = ('update_catalogs', 'lang_stats', 'fetch')
parser = OptionParser(usage="usage: %prog [options] cmd")
parser.add_option("-r", "--resources", action='append',
help="limit operation to the specified resources")
parser.add_option("-l", "--languages", action='append',
help="limit operation to the specified languages")
options, args = parser.parse_args()
if not args:
parser.print_usage()
exit(1)
if args[0] in RUNABLE_SCRIPTS:
eval(args[0])(options.resources, options.languages)
else:
print("Available commands are: %s" % ", ".join(RUNABLE_SCRIPTS))
|
OpenAgInitiative/openag-brain
|
refs/heads/master
|
nodes/sensor_persistence.py
|
4
|
#!/usr/bin/python
"""
The `sensor_persistence.py` module listens for measurements of the ambient
conditions of an environment and writes those measurements to the CouchDB
instance. There should be exactly one instance of this module per environment
in the system.
"""
import sys
import time
import rospy
import rostopic
from couchdb import Server
from std_msgs.msg import Float64
from openag_lib.db_bootstrap.db_names import ENVIRONMENTAL_DATA_POINT
from openag_lib.config import config as cli_config
from openag_brain.models import EnvironmentalDataPoint
from openag_brain.utils import gen_doc_id, read_environment_from_ns
from openag_brain.load_env_var_types import create_variables
# Filter a list of environmental variables that are specific to environment
# sensors and actuators
ENVIRONMENT_VARIABLES = create_variables(rospy.get_param('/var_types/environment_variables'))
class TopicPersistence:
def __init__(
self, db, topic, topic_type, environment, variable, is_desired,
max_update_interval, min_update_interval
):
self.db = db
self.environment = environment
self.variable = variable
self.is_desired = is_desired
self.last_value = None
self.last_time = 0
self.sub = rospy.Subscriber(topic, topic_type, self.on_data)
self.max_update_interval = max_update_interval
self.min_update_interval = min_update_interval
def on_data(self, item):
curr_time = time.time()
value = item.data
# This is kind of a hack to correctly interpret UInt8MultiArray
# messages. There should be a better way to do this
if item._slot_types[item.__slots__.index('data')] == "uint8[]":
value = [ord(x) for x in value]
# Throttle updates
delta_time = curr_time - self.last_time
if delta_time < self.min_update_interval:
return
if delta_time < self.max_update_interval and self.last_value:
delta_val = value - self.last_value
if abs(delta_val / self.last_value) <= 0.01:
return
# Save the data point
point = EnvironmentalDataPoint({
"environment": self.environment,
"variable": self.variable,
"is_desired": self.is_desired,
"value": value,
"timestamp": curr_time
})
point_id = gen_doc_id(curr_time)
self.db[point_id] = point
self.last_value = value
self.last_time = curr_time
def create_persistence_objects(
server, environment_id, max_update_interval, min_update_interval
):
env_var_db = server[ENVIRONMENTAL_DATA_POINT]
for variable in ENVIRONMENT_VARIABLES.itervalues():
variable = str(variable)
topic = "{}/measured".format(variable)
TopicPersistence(
topic=topic, topic_type=Float64,
environment=environment_id,
variable=variable, is_desired=False,
db=env_var_db, max_update_interval=max_update_interval,
min_update_interval=min_update_interval
)
if __name__ == '__main__':
db_server = cli_config["local_server"]["url"]
if not db_server:
raise RuntimeError("No local database specified")
server = Server(db_server)
rospy.init_node('sensor_persistence')
try:
max_update_interval = rospy.get_param("~max_update_interval")
except KeyError:
rospy.logwarn(
"No maximum update interval specified for sensor persistence "
"module"
)
max_update_interval = 600
try:
min_update_interval = rospy.get_param("~min_update_interval")
except KeyError:
rospy.logwarn(
"No minimum update interval specified for sensor persistence "
"module"
)
min_update_interval = 5
environment_id = read_environment_from_ns(rospy.get_namespace())
create_persistence_objects(
server, environment_id,
max_update_interval=max_update_interval,
min_update_interval=min_update_interval
)
rospy.spin()
|
sanjeevtripurari/hue
|
refs/heads/master
|
desktop/libs/libzookeeper/src/libzookeeper/conf.py
|
24
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from urlparse import urlparse
from desktop.lib.conf import Config, coerce_string
LOG = logging.getLogger(__name__)
def zkensemble():
"""
Try to guess the value if no values are specified.
"""
from django.conf import settings
if 'zookeeper' in settings.INSTALLED_APPS:
try:
# Backward compatibility until Hue 4
from zookeeper.conf import CLUSTERS
clusters = CLUSTERS.get()
if clusters['default'].HOST_PORTS.get() != 'localhost:2181':
return '%s' % clusters['default'].HOST_PORTS.get()
except:
LOG.warn('Could not get zookeeper ensemble from the zookeeper app')
if 'search' in settings.INSTALLED_APPS:
try:
from search.conf import SOLR_URL
parsed = urlparse(SOLR_URL.get())
return "%s:2181" % (parsed.hostname or 'localhost')
except:
LOG.warn('Could not get zookeeper ensemble from the search app')
return "localhost:2181"
ENSEMBLE=Config(
"ensemble",
help="ZooKeeper ensemble. Comma separated list of Host/Port, e.g. localhost:2181,localhost:2182,localhost:2183",
dynamic_default=zkensemble,
type=coerce_string,
)
PRINCIPAL_NAME=Config(
"principal_name",
help="Name of Kerberos principal when using security",
default="zookeeper",
type=str,
)
|
gEt-rIgHt-jR/voc
|
refs/heads/master
|
tests/builtins/test_slice.py
|
4
|
from .. utils import TranspileTestCase, BuiltinFunctionTestCase
class SliceTests(TranspileTestCase):
pass
class BuiltinSliceFunctionTests(BuiltinFunctionTestCase, TranspileTestCase):
functions = ["slice"]
not_implemented = [
'test_bool',
'test_bytearray',
'test_bytes',
'test_class',
'test_complex',
'test_dict',
'test_float',
'test_frozenset',
'test_int',
'test_list',
'test_None',
'test_NotImplemented',
'test_range',
'test_set',
'test_slice',
'test_str',
'test_tuple',
]
|
kogotko/carburetor
|
refs/heads/master
|
openstack_dashboard/dashboards/admin/flavors/urls.py
|
2
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf import settings
from django.conf.urls import url
from django.utils.translation import ugettext_lazy as _
from horizon.browsers.views import AngularIndexView
from openstack_dashboard.dashboards.admin.flavors import views
if settings.ANGULAR_FEATURES['flavors_panel']:
title = _("Flavors")
# New angular panel
urlpatterns = [
url(r'^$', AngularIndexView.as_view(title=title), name='index'),
url(r'^create/$', AngularIndexView.as_view(title=title),
name='create'),
url(r'^(?P<id>[^/]+)/update/$', AngularIndexView.as_view(title=title),
name='index'),
]
else:
urlpatterns = [
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^create/$', views.CreateView.as_view(), name='create'),
url(r'^(?P<id>[^/]+)/update/$',
views.UpdateView.as_view(), name='update'),
]
|
FranMachio/plugin.video.Machio.fran
|
refs/heads/master
|
servers/bayfiles.py
|
35
|
# -*- coding: iso-8859-1 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para bayfiles
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os
from core import scrapertools
from core import logger
from core import config
def test_video_exists( page_url ):
logger.info("[bayfiles.py] test_video_exists(page_url='%s')" % page_url)
return True,""
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
logger.info("[bayfiles.py] get_video_url("+page_url+")")
from servers import servertools
video_urls = []
data = scrapertools.cache_page(page_url)
try:
vfid = re.compile('var vfid = ([^;]+);').findall(data)[0]
except:
logger.info("[bayfiles.py] Error no encontro vfid")
return ''
try:
delay = re.compile('var delay = ([^;]+);').findall(data)[0]
delay = int(delay)
except:
delay = 300
logger.info("[bayfiles.py] vfid="+vfid)
logger.info("[bayfiles.py] delay="+str(delay))
from platformcode.xbmc import xbmctools
t = millis()
#http://bayfiles.com/ajax_download?_=1336330599281&action=startTimer&vfid=2174049
url_token = "http://bayfiles.com/ajax_download?_=%s&action=startTimer&vfid=%s"%(t,vfid)
data = scrapertools.cache_page(url_token)
logger.info("data="+data)
datajson = load_json(data)
if datajson['set']==True:
token=datajson['token']
resultado = xbmctools.handle_wait(delay,"Progreso","Conectando con servidor BayFiles (Free)")
#if resultado == False:
url_ajax = 'http://bayfiles.com/ajax_download'
post = "action=getLink&vfid=%s&token=%s" %(vfid,token)
data = scrapertools.cache_page( url_ajax , post=post, headers=[['User-Agent','Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14'],['Referer',page_url]] )
# Extrae la url del video
patron = 'onclick="javascript:window.location.href = \'(.+?)\''
matches = re.compile(patron,re.DOTALL).findall(data)
#scrapertools.printMatches(matches)
if len(matches)>0:
mediaurl = matches[0]
try:
location = scrapertools.getLocationHeaderFromResponse(mediaurl)
if location:
mediaurl = location
except:
logger.info("Error al redireccionar")
mediaurl = mediaurl + "|Referer="+urllib.quote(page_url)
video_urls.append( ["."+mediaurl.rsplit('.',1)[1]+" [bayfiles]",mediaurl,60])
for video_url in video_urls:
logger.info("[bayfiles.py] %s - %s" % (video_url[0],video_url[1]))
return video_urls
# Encuentra vídeos de este servidor en el texto pasado
def find_videos(text):
encontrados = set()
devuelve = []
# http://bayfiles.com/file/3R2P/8QqLEo/A.Gifted.Man.S01E15.HDTV.XviD-2HD.mp4
# http://www.bayfiles.com/file/3yUL/NQ6Kl0/hu60.mp4
# linkto?url=http://bayfiles.com/file/4pMd/Mhu9Ht/Megamente.720p-Latino.mp4?cid=3154&ctipo=pelicula&cdef=720
patronvideos = '(bayfiles.com/file/[a-zA-Z0-9]+/[a-zA-Z0-9]+/[^&^"^\'^<\?]+)'
logger.info("[bayfiles.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(text)
for match in matches:
titulo = "[bayfiles]"
url = "http://"+match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'bayfiles' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
return devuelve
def load_json(data):
# callback to transform json string values to utf8
def to_utf8(dct):
rdct = {}
for k, v in dct.items() :
if isinstance(v, (str, unicode)) :
rdct[k] = v.encode('utf8', 'ignore')
else :
rdct[k] = v
return rdct
try :
from lib import simplejson
json_data = simplejson.loads(data, object_hook=to_utf8)
return json_data
except:
try:
import json
json_data = json.loads(data, object_hook=to_utf8)
return json_data
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return None
def millis():
import time as time_ #make sure we don't override time
return int(round(time_.time() * 1000))
|
kurgm/Bitmap2OTF
|
refs/heads/master
|
bitmap2otf/dotshape.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import re
def _intorfloat(v):
vi = int(v)
if v == vi:
return vi
return v
def _vec2string(x, y, suf=""):
if x == 0:
return "{} v{}".format(_intorfloat(y), suf)
if y == 0:
return "{} h{}".format(_intorfloat(x), suf)
return "{} {} r{}".format(_intorfloat(x), _intorfloat(y), suf)
def _evalxy(xystr, x=1.0, y=1.0):
if xystr[-1] == "x":
return float(xystr[:-1]) * x
if xystr[-1] == "y":
return float(xystr[:-1]) * y
return float(xystr)
class DotShape(object):
def getGlyphBBX(self, bitmap, dw=100.0, dh=100.0):
bBBX = bitmap.getBoundingBox()
dBBX = self.getDotBBX(dw, dh)
return [
bBBX[0] * dw + dBBX[0],
bBBX[1] * dh + dBBX[1],
bBBX[2] * dw + dBBX[2],
bBBX[3] * dh + dBBX[3]
]
class DotShapePixelOutline(DotShape):
def bitmap2charstring(self, bitmap, dw=100.0, dh=100.0, subrs=[]):
polygons = bitmap.toPolygons()
if not polygons:
return ""
buf = ""
x = y = 0.0
for polygon in polygons:
x1, y1 = polygon[0]
buf += _vec2string((x1 - x) * dw, (y1 - y) * dh, "moveto ")
x, y = x1, y1
if polygon[0][0] == polygon[1][0]:
currentDirection = 0
elif polygon[0][1] == polygon[1][1]:
currentDirection = 1
else:
assert False
ops = ["hlineto ", "vlineto "]
op = ops[1 - currentDirection]
args = 0
for (x1, y1), (x, y) in zip(polygon[:-1], polygon[1:]):
distance = ((x - x1) * dw, (y - y1) * dh)
assert distance[currentDirection] == 0.0
currentDirection = 1 - currentDirection
buf += "{} ".format(_intorfloat(distance[currentDirection]))
args += 1
# Type 2 charstring interpreter's argument stack has limit of 48
if args == 48:
buf += op
op = ops[1 - currentDirection]
args = 0
if args == 0:
pass
else:
buf += op
return buf.strip()
def getSubroutines(self, dw=100.0, dh=100.0):
return []
def getDotBBX(self, dw=100.0, dh=100.0):
return [0.0, 0.0, dw, dh]
_FACTOR_XORY_RE = re.compile(r"^[+-]?[\d.]+(?:e[+-]?\d+)?[xy]$")
class DotShapeExternal(DotShape):
def __init__(self, obj, scale=(1.0, 1.0)):
if isinstance(obj, dict):
shape = obj
else:
with open(obj) as f:
shape = json.load(f)
startX = shape.get("startX", "0x")
startY = shape.get("startY", "0y")
self.startX = startX
self.startY = startY
self.endX = shape.get("endX", startX)
self.endY = shape.get("endY", startY)
self.charstring = shape["data"]
if isinstance(scale, (tuple, list)):
self.sx, self.sy = scale
else:
self.sx = self.sy = scale
self.bbx = [
shape.get("minX", "0x"),
shape.get("minY", "0y"),
shape.get("maxX", "1x"),
shape.get("maxY", "1y")
]
def bitmap2charstring(self, bitmap, dw=100.0, dh=100.0, subrs=[]):
dots = list(bitmap.dotiter())
if not dots:
return ""
sx = self.sx
sy = self.sy
sw = sx * dw
sh = sy * dh
startX = _evalxy(self.startX, x=sw, y=sh)
startY = _evalxy(self.startY, x=sw, y=sh)
endX = _evalxy(self.endX, x=sw, y=sh)
endY = _evalxy(self.endY, x=sw, y=sh)
subrno = subrs[0]
e2sX = startX - endX
e2sY = startY - endY
buf = ""
buf += _vec2string(dots[0][0] * dw + startX,
dots[0][1] * dh + startY, "moveto ")
buf += "{} callsubr ".format(subrno)
for (x0, y0), (x1, y1) in zip(dots[:-1], dots[1:]):
buf += _vec2string((x1 - x0) * dw + e2sX,
(y1 - y0) * dh + e2sY, "moveto ")
buf += "{} callsubr ".format(subrno)
return buf.strip()
def getSubroutines(self, dw=100.0, dh=100.0):
sw = self.sx * dw
sh = self.sy * dh
buf = []
for token in self.charstring.split():
if _FACTOR_XORY_RE.match(token):
buf.append(str(_evalxy(token, x=sw, y=sh)))
else:
buf.append(token)
return [" ".join(buf)]
def getDotBBX(self, dw=100.0, dh=100.0):
sw = self.sx * dw
sh = self.sy * dh
return [_evalxy(v, x=sw, y=sh) for v in self.bbx]
|
si618/pi-time
|
refs/heads/master
|
node_modules/grunt-pylint/tasks/lib/astroid/tests/testdata/python2/data/noendingnewline.py
|
42
|
import unittest
class TestCase(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
def tearDown(self):
unittest.TestCase.tearDown(self)
def testIt(self):
self.a = 10
self.xxx()
def xxx(self):
if False:
pass
print 'a'
if False:
pass
pass
if False:
pass
print 'rara'
if __name__ == '__main__':
print 'test2'
unittest.main()
|
truetone/AutobahnPython
|
refs/heads/master
|
examples/websocket/echo_wsgi/server.py
|
26
|
###############################################################################
##
## Copyright 2012 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import uuid, sys
from twisted.python import log
from twisted.internet import reactor
from twisted.web.server import Site
from twisted.web.wsgi import WSGIResource
from flask import Flask, render_template
from autobahn.websocket import WebSocketServerFactory, \
WebSocketServerProtocol
from autobahn.resource import WebSocketResource, \
WSGIRootResource, \
HTTPChannelHixie76Aware
##
## Our WebSocket Server protocol
##
class EchoServerProtocol(WebSocketServerProtocol):
def onMessage(self, msg, binary):
self.sendMessage(msg, binary)
##
## Our WSGI application .. in this case Flask based
##
app = Flask(__name__)
app.secret_key = str(uuid.uuid4())
@app.route('/')
def page_home():
return render_template('index.html')
if __name__ == "__main__":
if len(sys.argv) > 1 and sys.argv[1] == 'debug':
log.startLogging(sys.stdout)
debug = True
else:
debug = False
app.debug = debug
if debug:
log.startLogging(sys.stdout)
##
## create a Twisted Web resource for our WebSocket server
##
wsFactory = WebSocketServerFactory("ws://localhost:8080",
debug = debug,
debugCodePaths = debug)
wsFactory.protocol = EchoServerProtocol
wsFactory.setProtocolOptions(allowHixie76 = True) # needed if Hixie76 is to be supported
wsResource = WebSocketResource(wsFactory)
##
## create a Twisted Web WSGI resource for our Flask server
##
wsgiResource = WSGIResource(reactor, reactor.getThreadPool(), app)
##
## create a root resource serving everything via WSGI/Flask, but
## the path "/ws" served by our WebSocket stuff
##
rootResource = WSGIRootResource(wsgiResource, {'ws': wsResource})
##
## create a Twisted Web Site and run everything
##
site = Site(rootResource)
site.protocol = HTTPChannelHixie76Aware # needed if Hixie76 is to be supported
reactor.listenTCP(8080, site)
reactor.run()
|
kifcaliph/odoo
|
refs/heads/8.0
|
addons/base_geolocalize/models/res_partner.py
|
239
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013_Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
try:
import simplejson as json
except ImportError:
import json # noqa
import urllib
from openerp.osv import osv, fields
from openerp import tools
from openerp.tools.translate import _
def geo_find(addr):
url = 'https://maps.googleapis.com/maps/api/geocode/json?sensor=false&address='
url += urllib.quote(addr.encode('utf8'))
try:
result = json.load(urllib.urlopen(url))
except Exception, e:
raise osv.except_osv(_('Network error'),
_('Cannot contact geolocation servers. Please make sure that your internet connection is up and running (%s).') % e)
if result['status'] != 'OK':
return None
try:
geo = result['results'][0]['geometry']['location']
return float(geo['lat']), float(geo['lng'])
except (KeyError, ValueError):
return None
def geo_query_address(street=None, zip=None, city=None, state=None, country=None):
if country and ',' in country and (country.endswith(' of') or country.endswith(' of the')):
# put country qualifier in front, otherwise GMap gives wrong results,
# e.g. 'Congo, Democratic Republic of the' => 'Democratic Republic of the Congo'
country = '{1} {0}'.format(*country.split(',', 1))
return tools.ustr(', '.join(filter(None, [street,
("%s %s" % (zip or '', city or '')).strip(),
state,
country])))
class res_partner(osv.osv):
_inherit = "res.partner"
_columns = {
'partner_latitude': fields.float('Geo Latitude', digits=(16, 5)),
'partner_longitude': fields.float('Geo Longitude', digits=(16, 5)),
'date_localization': fields.date('Geo Localization Date'),
}
def geo_localize(self, cr, uid, ids, context=None):
# Don't pass context to browse()! We need country names in english below
for partner in self.browse(cr, uid, ids):
if not partner:
continue
result = geo_find(geo_query_address(street=partner.street,
zip=partner.zip,
city=partner.city,
state=partner.state_id.name,
country=partner.country_id.name))
if result:
self.write(cr, uid, [partner.id], {
'partner_latitude': result[0],
'partner_longitude': result[1],
'date_localization': fields.date.context_today(self, cr, uid, context=context)
}, context=context)
return True
|
rahushen/ansible
|
refs/heads/devel
|
lib/ansible/plugins/lookup/credstash.py
|
96
|
# (c) 2015, Ensighten <infra@ensighten.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: credstash
version_added: "2.0"
short_description: retrieve secrets from Credstash on AWS
requirements:
- credstash (python library)
description:
- "Credstash is a small utility for managing secrets using AWS's KMS and DynamoDB: https://github.com/fugue/credstash"
options:
_terms:
description: term or list of terms to lookup in the credit store
type: list
required: True
table:
description: name of the credstash table to query
default: 'credential-store'
required: True
version:
description: Credstash version
region:
description: AWS region
profile_name:
description: AWS profile to use for authentication
env:
- name: AWS_PROFILE
aws_access_key_id:
description: AWS access key ID
env:
- name: AWS_ACCESS_KEY_ID
aws_secret_access_key:
description: AWS access key
env:
- name: AWS_SECRET_ACCESS_KEY
aws_session_token:
description: AWS session token
env:
- name: AWS_SESSION_TOKEN
"""
EXAMPLES = """
- name: first use credstash to store your secrets
shell: credstash put my-github-password secure123
- name: "Test credstash lookup plugin -- get my github password"
debug: msg="Credstash lookup! {{ lookup('credstash', 'my-github-password') }}"
- name: "Test credstash lookup plugin -- get my other password from us-west-1"
debug: msg="Credstash lookup! {{ lookup('credstash', 'my-other-password', region='us-west-1') }}"
- name: "Test credstash lookup plugin -- get the company's github password"
debug: msg="Credstash lookup! {{ lookup('credstash', 'company-github-password', table='company-passwords') }}"
- name: Example play using the 'context' feature
hosts: localhost
vars:
context:
app: my_app
environment: production
tasks:
- name: "Test credstash lookup plugin -- get the password with a context passed as a variable"
debug: msg="{{ lookup('credstash', 'some-password', context=context) }}"
- name: "Test credstash lookup plugin -- get the password with a context defined here"
debug: msg="{{ lookup('credstash', 'some-password', context=dict(app='my_app', environment='production')) }}"
"""
RETURN = """
_raw:
description:
- value(s) stored in Credstash
"""
import os
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
CREDSTASH_INSTALLED = False
try:
import credstash
CREDSTASH_INSTALLED = True
except ImportError:
CREDSTASH_INSTALLED = False
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
if not CREDSTASH_INSTALLED:
raise AnsibleError('The credstash lookup plugin requires credstash to be installed.')
ret = []
for term in terms:
try:
version = kwargs.pop('version', '')
region = kwargs.pop('region', None)
table = kwargs.pop('table', 'credential-store')
profile_name = kwargs.pop('profile_name', os.getenv('AWS_PROFILE', None))
aws_access_key_id = kwargs.pop('aws_access_key_id', os.getenv('AWS_ACCESS_KEY_ID', None))
aws_secret_access_key = kwargs.pop('aws_secret_access_key', os.getenv('AWS_SECRET_ACCESS_KEY', None))
aws_session_token = kwargs.pop('aws_session_token', os.getenv('AWS_SESSION_TOKEN', None))
kwargs_pass = {'profile_name': profile_name, 'aws_access_key_id': aws_access_key_id,
'aws_secret_access_key': aws_secret_access_key, 'aws_session_token': aws_session_token}
val = credstash.getSecret(term, version, region, table, context=kwargs, **kwargs_pass)
except credstash.ItemNotFound:
raise AnsibleError('Key {0} not found'.format(term))
except Exception as e:
raise AnsibleError('Encountered exception while fetching {0}: {1}'.format(term, e.message))
ret.append(val)
return ret
|
drexly/tonginBlobStore
|
refs/heads/master
|
lib/django/contrib/sessions/backends/cache.py
|
227
|
from django.conf import settings
from django.contrib.sessions.backends.base import CreateError, SessionBase
from django.core.cache import caches
from django.utils.six.moves import range
KEY_PREFIX = "django.contrib.sessions.cache"
class SessionStore(SessionBase):
"""
A cache-based session store.
"""
cache_key_prefix = KEY_PREFIX
def __init__(self, session_key=None):
self._cache = caches[settings.SESSION_CACHE_ALIAS]
super(SessionStore, self).__init__(session_key)
@property
def cache_key(self):
return self.cache_key_prefix + self._get_or_create_session_key()
def load(self):
try:
session_data = self._cache.get(self.cache_key)
except Exception:
# Some backends (e.g. memcache) raise an exception on invalid
# cache keys. If this happens, reset the session. See #17810.
session_data = None
if session_data is not None:
return session_data
self._session_key = None
return {}
def create(self):
# Because a cache can fail silently (e.g. memcache), we don't know if
# we are failing to create a new session because of a key collision or
# because the cache is missing. So we try for a (large) number of times
# and then raise an exception. That's the risk you shoulder if using
# cache backing.
for i in range(10000):
self._session_key = self._get_new_session_key()
try:
self.save(must_create=True)
except CreateError:
continue
self.modified = True
return
raise RuntimeError(
"Unable to create a new session key. "
"It is likely that the cache is unavailable.")
def save(self, must_create=False):
if self.session_key is None:
return self.create()
if must_create:
func = self._cache.add
else:
func = self._cache.set
result = func(self.cache_key,
self._get_session(no_load=must_create),
self.get_expiry_age())
if must_create and not result:
raise CreateError
def exists(self, session_key):
return session_key and (self.cache_key_prefix + session_key) in self._cache
def delete(self, session_key=None):
if session_key is None:
if self.session_key is None:
return
session_key = self.session_key
self._cache.delete(self.cache_key_prefix + session_key)
@classmethod
def clear_expired(cls):
pass
|
psgganesh/sparkplug
|
refs/heads/master
|
packages/Sparkplug/Admin/src/node_modules/laravel-elixir/node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/setup.py
|
2462
|
#!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from setuptools import setup
setup(
name='gyp',
version='0.1',
description='Generate Your Projects',
author='Chromium Authors',
author_email='chromium-dev@googlegroups.com',
url='http://code.google.com/p/gyp',
package_dir = {'': 'pylib'},
packages=['gyp', 'gyp.generator'],
entry_points = {'console_scripts': ['gyp=gyp:script_main'] }
)
|
Limags/MissionPlanner
|
refs/heads/master
|
Lib/email/utils.py
|
53
|
# Copyright (C) 2001-2010 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
"""Miscellaneous utilities."""
__all__ = [
'collapse_rfc2231_value',
'decode_params',
'decode_rfc2231',
'encode_rfc2231',
'formataddr',
'formatdate',
'getaddresses',
'make_msgid',
'mktime_tz',
'parseaddr',
'parsedate',
'parsedate_tz',
'unquote',
]
import os
import re
import time
import base64
import random
import socket
import urllib
import warnings
from email._parseaddr import quote
from email._parseaddr import AddressList as _AddressList
from email._parseaddr import mktime_tz
# We need wormarounds for bugs in these methods in older Pythons (see below)
from email._parseaddr import parsedate as _parsedate
from email._parseaddr import parsedate_tz as _parsedate_tz
from quopri import decodestring as _qdecode
# Intrapackage imports
from email.encoders import _bencode, _qencode
COMMASPACE = ', '
EMPTYSTRING = ''
UEMPTYSTRING = u''
CRLF = '\r\n'
TICK = "'"
specialsre = re.compile(r'[][\\()<>@,:;".]')
escapesre = re.compile(r'[][\\()"]')
# Helpers
def _identity(s):
return s
def _bdecode(s):
"""Decodes a base64 string.
This function is equivalent to base64.decodestring and it's retained only
for backward compatibility. It used to remove the last \n of the decoded
string, if it had any (see issue 7143).
"""
if not s:
return s
return base64.decodestring(s)
def fix_eols(s):
"""Replace all line-ending characters with \r\n."""
# Fix newlines with no preceding carriage return
s = re.sub(r'(?<!\r)\n', CRLF, s)
# Fix carriage returns with no following newline
s = re.sub(r'\r(?!\n)', CRLF, s)
return s
def formataddr(pair):
"""The inverse of parseaddr(), this takes a 2-tuple of the form
(realname, email_address) and returns the string value suitable
for an RFC 2822 From, To or Cc header.
If the first element of pair is false, then the second element is
returned unmodified.
"""
name, address = pair
if name:
quotes = ''
if specialsre.search(name):
quotes = '"'
name = escapesre.sub(r'\\\g<0>', name)
return '%s%s%s <%s>' % (quotes, name, quotes, address)
return address
def getaddresses(fieldvalues):
"""Return a list of (REALNAME, EMAIL) for each fieldvalue."""
all = COMMASPACE.join(fieldvalues)
a = _AddressList(all)
return a.addresslist
ecre = re.compile(r'''
=\? # literal =?
(?P<charset>[^?]*?) # non-greedy up to the next ? is the charset
\? # literal ?
(?P<encoding>[qb]) # either a "q" or a "b", case insensitive
\? # literal ?
(?P<atom>.*?) # non-greedy up to the next ?= is the atom
\?= # literal ?=
''', re.VERBOSE | re.IGNORECASE)
def formatdate(timeval=None, localtime=False, usegmt=False):
"""Returns a date string as specified by RFC 2822, e.g.:
Fri, 09 Nov 2001 01:08:47 -0000
Optional timeval if given is a floating point time value as accepted by
gmtime() and localtime(), otherwise the current time is used.
Optional localtime is a flag that when True, interprets timeval, and
returns a date relative to the local timezone instead of UTC, properly
taking daylight savings time into account.
Optional argument usegmt means that the timezone is written out as
an ascii string, not numeric one (so "GMT" instead of "+0000"). This
is needed for HTTP, and is only used when localtime==False.
"""
# Note: we cannot use strftime() because that honors the locale and RFC
# 2822 requires that day and month names be the English abbreviations.
if timeval is None:
timeval = time.time()
if localtime:
now = time.localtime(timeval)
# Calculate timezone offset, based on whether the local zone has
# daylight savings time, and whether DST is in effect.
if time.daylight and now[-1]:
offset = time.altzone
else:
offset = time.timezone
hours, minutes = divmod(abs(offset), 3600)
# Remember offset is in seconds west of UTC, but the timezone is in
# minutes east of UTC, so the signs differ.
if offset > 0:
sign = '-'
else:
sign = '+'
zone = '%s%02d%02d' % (sign, hours, minutes // 60)
else:
now = time.gmtime(timeval)
# Timezone offset is always -0000
if usegmt:
zone = 'GMT'
else:
zone = '-0000'
return '%s, %02d %s %04d %02d:%02d:%02d %s' % (
['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'][now[6]],
now[2],
['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'][now[1] - 1],
now[0], now[3], now[4], now[5],
zone)
def make_msgid(idstring=None):
"""Returns a string suitable for RFC 2822 compliant Message-ID, e.g:
<20020201195627.33539.96671@nightshade.la.mastaler.com>
Optional idstring if given is a string used to strengthen the
uniqueness of the message id.
"""
timeval = time.time()
utcdate = time.strftime('%Y%m%d%H%M%S', time.gmtime(timeval))
pid = os.getpid()
randint = random.randrange(100000)
if idstring is None:
idstring = ''
else:
idstring = '.' + idstring
idhost = socket.getfqdn()
msgid = '<%s.%s.%s%s@%s>' % (utcdate, pid, randint, idstring, idhost)
return msgid
# These functions are in the standalone mimelib version only because they've
# subsequently been fixed in the latest Python versions. We use this to worm
# around broken older Pythons.
def parsedate(data):
if not data:
return None
return _parsedate(data)
def parsedate_tz(data):
if not data:
return None
return _parsedate_tz(data)
def parseaddr(addr):
addrs = _AddressList(addr).addresslist
if not addrs:
return '', ''
return addrs[0]
# rfc822.unquote() doesn't properly de-backslash-ify in Python pre-2.3.
def unquote(str):
"""Remove quotes from a string."""
if len(str) > 1:
if str.startswith('"') and str.endswith('"'):
return str[1:-1].replace('\\\\', '\\').replace('\\"', '"')
if str.startswith('<') and str.endswith('>'):
return str[1:-1]
return str
# RFC2231-related functions - parameter encoding and decoding
def decode_rfc2231(s):
"""Decode string according to RFC 2231"""
parts = s.split(TICK, 2)
if len(parts) <= 2:
return None, None, s
return parts
def encode_rfc2231(s, charset=None, language=None):
"""Encode string according to RFC 2231.
If neither charset nor language is given, then s is returned as-is. If
charset is given but not language, the string is encoded using the empty
string for language.
"""
import urllib
s = urllib.quote(s, safe='')
if charset is None and language is None:
return s
if language is None:
language = ''
return "%s'%s'%s" % (charset, language, s)
rfc2231_continuation = re.compile(r'^(?P<name>\w+)\*((?P<num>[0-9]+)\*?)?$')
def decode_params(params):
"""Decode parameters list according to RFC 2231.
params is a sequence of 2-tuples containing (param name, string value).
"""
# Copy params so we don't mess with the original
params = params[:]
new_params = []
# Map parameter's name to a list of continuations. The values are a
# 3-tuple of the continuation number, the string value, and a flag
# specifying whether a particular segment is %-encoded.
rfc2231_params = {}
name, value = params.pop(0)
new_params.append((name, value))
while params:
name, value = params.pop(0)
if name.endswith('*'):
encoded = True
else:
encoded = False
value = unquote(value)
mo = rfc2231_continuation.match(name)
if mo:
name, num = mo.group('name', 'num')
if num is not None:
num = int(num)
rfc2231_params.setdefault(name, []).append((num, value, encoded))
else:
new_params.append((name, '"%s"' % quote(value)))
if rfc2231_params:
for name, continuations in rfc2231_params.items():
value = []
extended = False
# Sort by number
continuations.sort()
# And now append all values in numerical order, converting
# %-encodings for the encoded segments. If any of the
# continuation names ends in a *, then the entire string, after
# decoding segments and concatenating, must have the charset and
# language specifiers at the beginning of the string.
for num, s, encoded in continuations:
if encoded:
s = urllib.unquote(s)
extended = True
value.append(s)
value = quote(EMPTYSTRING.join(value))
if extended:
charset, language, value = decode_rfc2231(value)
new_params.append((name, (charset, language, '"%s"' % value)))
else:
new_params.append((name, '"%s"' % value))
return new_params
def collapse_rfc2231_value(value, errors='replace',
fallback_charset='us-ascii'):
if isinstance(value, tuple):
rawval = unquote(value[2])
charset = value[0] or 'us-ascii'
try:
return unicode(rawval, charset, errors)
except LookupError:
# XXX charset is unknown to Python.
return unicode(rawval, fallback_charset, errors)
else:
return unquote(value)
|
apache/airflow
|
refs/heads/main
|
airflow/providers/google/cloud/transfers/gcs_to_bigquery.py
|
2
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a Google Cloud Storage to BigQuery operator."""
import json
from typing import Optional, Sequence, Union
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.bigquery import BigQueryHook
from airflow.providers.google.cloud.hooks.gcs import GCSHook
class GCSToBigQueryOperator(BaseOperator):
"""
Loads files from Google Cloud Storage into BigQuery.
The schema to be used for the BigQuery table may be specified in one of
two ways. You may either directly pass the schema fields in, or you may
point the operator to a Google Cloud Storage object name. The object in
Google Cloud Storage must be a JSON file with the schema fields in it.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GCSToBigQueryOperator`
:param bucket: The bucket to load from. (templated)
:type bucket: str
:param source_objects: String or List of Google Cloud Storage URIs to load from. (templated)
If source_format is 'DATASTORE_BACKUP', the list must only contain a single URI.
:type source_objects: str, list[str]
:param destination_project_dataset_table: The dotted
``(<project>.|<project>:)<dataset>.<table>`` BigQuery table to load data into.
If ``<project>`` is not included, project will be the project defined in
the connection json. (templated)
:type destination_project_dataset_table: str
:param schema_fields: If set, the schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load
Should not be set when source_format is 'DATASTORE_BACKUP'.
Parameter must be defined if 'schema_object' is null and autodetect is False.
:type schema_fields: list
:param schema_object: If set, a GCS object path pointing to a .json file that
contains the schema for the table. (templated)
Parameter must be defined if 'schema_fields' is null and autodetect is False.
:type schema_object: str
:param source_format: File format to export.
:type source_format: str
:param compression: [Optional] The compression type of the data source.
Possible values include GZIP and NONE.
The default value is NONE.
This setting is ignored for Google Cloud Bigtable,
Google Cloud Datastore backups and Avro formats.
:type compression: str
:param create_disposition: The create disposition if the table doesn't exist.
:type create_disposition: str
:param skip_leading_rows: Number of rows to skip when loading from a CSV.
:type skip_leading_rows: int
:param write_disposition: The write disposition if the table already exists.
:type write_disposition: str
:param field_delimiter: The delimiter to use when loading from a CSV.
:type field_delimiter: str
:param max_bad_records: The maximum number of bad records that BigQuery can
ignore when running the job.
:type max_bad_records: int
:param quote_character: The value that is used to quote data sections in a CSV file.
:type quote_character: str
:param ignore_unknown_values: [Optional] Indicates if BigQuery should allow
extra values that are not represented in the table schema.
If true, the extra values are ignored. If false, records with extra columns
are treated as bad records, and if there are too many bad records, an
invalid error is returned in the job result.
:type ignore_unknown_values: bool
:param allow_quoted_newlines: Whether to allow quoted newlines (true) or not (false).
:type allow_quoted_newlines: bool
:param allow_jagged_rows: Accept rows that are missing trailing optional columns.
The missing values are treated as nulls. If false, records with missing trailing
columns are treated as bad records, and if there are too many bad records, an
invalid error is returned in the job result. Only applicable to CSV, ignored
for other formats.
:type allow_jagged_rows: bool
:param encoding: The character encoding of the data. See:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.tableDefinitions.(key).csvOptions.encoding
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#externalDataConfiguration.csvOptions.encoding
:param max_id_key: If set, the name of a column in the BigQuery table
that's to be loaded. This will be used to select the MAX value from
BigQuery after the load occurs. The results will be returned by the
execute() command, which in turn gets stored in XCom for future
operators to use. This can be helpful with incremental loads--during
future executions, you can pick up from the max ID.
:type max_id_key: str
:param bigquery_conn_id: (Optional) The connection ID used to connect to Google Cloud and
interact with the BigQuery service.
:type bigquery_conn_id: str
:param google_cloud_storage_conn_id: (Optional) The connection ID used to connect to Google Cloud
and interact with the Google Cloud Storage service.
:type google_cloud_storage_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param schema_update_options: Allows the schema of the destination
table to be updated as a side effect of the load job.
:type schema_update_options: list
:param src_fmt_configs: configure optional fields specific to the source format
:type src_fmt_configs: dict
:param external_table: Flag to specify if the destination table should be
a BigQuery external table. Default Value is False.
:type external_table: bool
:param time_partitioning: configure optional time partitioning fields i.e.
partition by field, type and expiration as per API specifications.
Note that 'field' is not available in concurrency with
dataset.table$partition.
:type time_partitioning: dict
:param cluster_fields: Request that the result of this load be stored sorted
by one or more columns. BigQuery supports clustering for both partitioned and
non-partitioned tables. The order of columns given determines the sort order.
Not applicable for external tables.
:type cluster_fields: list[str]
:param autodetect: [Optional] Indicates if we should automatically infer the
options and schema for CSV and JSON sources. (Default: ``True``).
Parameter must be set to True if 'schema_fields' and 'schema_object' are undefined.
It is suggested to set to True if table are create outside of Airflow.
:type autodetect: bool
:param encryption_configuration: [Optional] Custom encryption configuration (e.g., Cloud KMS keys).
**Example**: ::
encryption_configuration = {
"kmsKeyName": "projects/testp/locations/us/keyRings/test-kr/cryptoKeys/test-key"
}
:type encryption_configuration: dict
:param location: [Optional] The geographic location of the job. Required except for US and EU.
See details at https://cloud.google.com/bigquery/docs/locations#specifying_your_location
:type location: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
:param labels: [Optional] Labels for the BiqQuery table.
:type labels: dict
:param description: [Optional] Description for the BigQuery table.
:type description: str
"""
template_fields = (
'bucket',
'source_objects',
'schema_object',
'destination_project_dataset_table',
'impersonation_chain',
)
template_ext = ('.sql',)
ui_color = '#f0eee4'
def __init__(
self,
*,
bucket,
source_objects,
destination_project_dataset_table,
schema_fields=None,
schema_object=None,
source_format='CSV',
compression='NONE',
create_disposition='CREATE_IF_NEEDED',
skip_leading_rows=0,
write_disposition='WRITE_EMPTY',
field_delimiter=',',
max_bad_records=0,
quote_character=None,
ignore_unknown_values=False,
allow_quoted_newlines=False,
allow_jagged_rows=False,
encoding="UTF-8",
max_id_key=None,
bigquery_conn_id='google_cloud_default',
google_cloud_storage_conn_id='google_cloud_default',
delegate_to=None,
schema_update_options=(),
src_fmt_configs=None,
external_table=False,
time_partitioning=None,
cluster_fields=None,
autodetect=True,
encryption_configuration=None,
location=None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
labels=None,
description=None,
**kwargs,
):
super().__init__(**kwargs)
# GCS config
if src_fmt_configs is None:
src_fmt_configs = {}
if time_partitioning is None:
time_partitioning = {}
self.bucket = bucket
self.source_objects = source_objects if isinstance(source_objects, list) else [source_objects]
self.schema_object = schema_object
# BQ config
self.destination_project_dataset_table = destination_project_dataset_table
self.schema_fields = schema_fields
self.source_format = source_format
self.compression = compression
self.create_disposition = create_disposition
self.skip_leading_rows = skip_leading_rows
self.write_disposition = write_disposition
self.field_delimiter = field_delimiter
self.max_bad_records = max_bad_records
self.quote_character = quote_character
self.ignore_unknown_values = ignore_unknown_values
self.allow_quoted_newlines = allow_quoted_newlines
self.allow_jagged_rows = allow_jagged_rows
self.external_table = external_table
self.encoding = encoding
self.max_id_key = max_id_key
self.bigquery_conn_id = bigquery_conn_id
self.google_cloud_storage_conn_id = google_cloud_storage_conn_id
self.delegate_to = delegate_to
self.schema_update_options = schema_update_options
self.src_fmt_configs = src_fmt_configs
self.time_partitioning = time_partitioning
self.cluster_fields = cluster_fields
self.autodetect = autodetect
self.encryption_configuration = encryption_configuration
self.location = location
self.impersonation_chain = impersonation_chain
self.labels = labels
self.description = description
def execute(self, context):
bq_hook = BigQueryHook(
bigquery_conn_id=self.bigquery_conn_id,
delegate_to=self.delegate_to,
location=self.location,
impersonation_chain=self.impersonation_chain,
)
if not self.schema_fields:
if self.schema_object and self.source_format != 'DATASTORE_BACKUP':
gcs_hook = GCSHook(
gcp_conn_id=self.google_cloud_storage_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
blob = gcs_hook.download(
bucket_name=self.bucket,
object_name=self.schema_object,
)
schema_fields = json.loads(blob.decode("utf-8"))
elif self.schema_object is None and self.autodetect is False:
raise AirflowException(
'At least one of `schema_fields`, `schema_object`, or `autodetect` must be passed.'
)
else:
schema_fields = None
else:
schema_fields = self.schema_fields
source_uris = [f'gs://{self.bucket}/{source_object}' for source_object in self.source_objects]
conn = bq_hook.get_conn()
cursor = conn.cursor()
if self.external_table:
cursor.create_external_table(
external_project_dataset_table=self.destination_project_dataset_table,
schema_fields=schema_fields,
source_uris=source_uris,
source_format=self.source_format,
compression=self.compression,
skip_leading_rows=self.skip_leading_rows,
field_delimiter=self.field_delimiter,
max_bad_records=self.max_bad_records,
quote_character=self.quote_character,
ignore_unknown_values=self.ignore_unknown_values,
allow_quoted_newlines=self.allow_quoted_newlines,
allow_jagged_rows=self.allow_jagged_rows,
encoding=self.encoding,
src_fmt_configs=self.src_fmt_configs,
encryption_configuration=self.encryption_configuration,
labels=self.labels,
description=self.description,
)
else:
cursor.run_load(
destination_project_dataset_table=self.destination_project_dataset_table,
schema_fields=schema_fields,
source_uris=source_uris,
source_format=self.source_format,
autodetect=self.autodetect,
create_disposition=self.create_disposition,
skip_leading_rows=self.skip_leading_rows,
write_disposition=self.write_disposition,
field_delimiter=self.field_delimiter,
max_bad_records=self.max_bad_records,
quote_character=self.quote_character,
ignore_unknown_values=self.ignore_unknown_values,
allow_quoted_newlines=self.allow_quoted_newlines,
allow_jagged_rows=self.allow_jagged_rows,
encoding=self.encoding,
schema_update_options=self.schema_update_options,
src_fmt_configs=self.src_fmt_configs,
time_partitioning=self.time_partitioning,
cluster_fields=self.cluster_fields,
encryption_configuration=self.encryption_configuration,
labels=self.labels,
description=self.description,
)
if cursor.use_legacy_sql:
escaped_table_name = f'[{self.destination_project_dataset_table}]'
else:
escaped_table_name = f'`{self.destination_project_dataset_table}`'
if self.max_id_key:
cursor.execute(f'SELECT MAX({self.max_id_key}) FROM {escaped_table_name}')
row = cursor.fetchone()
max_id = row[0] if row[0] else 0
self.log.info(
'Loaded BQ data with max %s.%s=%s',
self.destination_project_dataset_table,
self.max_id_key,
max_id,
)
|
ahb0327/intellij-community
|
refs/heads/master
|
python/testData/inspections/PyUnboundLocalVariableInspection/DefaultArgument.py
|
83
|
def f():
z = 2
def g(z=z): #pass
return z
return g
|
carsonmcdonald/selenium
|
refs/heads/master
|
py/test/selenium/webdriver/common/window_switching_tests.py
|
60
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import pytest
from selenium.common.exceptions import NoSuchWindowException
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.common.by import By
class WindowSwitchingTests(unittest.TestCase):
def testShouldSwitchFocusToANewWindowWhenItIsOpenedAndNotStopFutureOperations(self):
self._loadPage("xhtmlTest")
current = self.driver.current_window_handle
self.driver.find_element_by_link_text("Open new window").click()
self.assertEqual(self.driver.title, "XHTML Test Page")
self.driver.switch_to.window("result")
self.assertEqual(self.driver.title, "We Arrive Here")
self._loadPage("iframes")
handle = self.driver.current_window_handle
self.driver.find_element_by_id("iframe_page_heading")
self.driver.switch_to.frame("iframe1")
self.assertEqual(self.driver.current_window_handle, handle)
self.driver.close()
self.driver.switch_to.window(current)
def testShouldThrowNoSuchWindowException(self):
self._loadPage("xhtmlTest")
current = self.driver.current_window_handle
try:
self.driver.switch_to.window("invalid name")
self.fail("NoSuchWindowException expected")
except NoSuchWindowException:
pass # Expected
self.driver.switch_to.window(current)
@pytest.mark.ignore_chrome
def testShouldThrowNoSuchWindowExceptionOnAnAttemptToGetItsHandle(self):
self._loadPage("xhtmlTest")
current = self.driver.current_window_handle
self.driver.find_element(By.LINK_TEXT,"Open new window").click()
self.driver.switch_to.window("result")
self.driver.close()
try :
self.driver.current_window_handle
self.fail("NoSuchWindowException expected")
except NoSuchWindowException:
pass # Expected.
finally:
self.driver.switch_to.window(current)
@pytest.mark.ignore_chrome
@pytest.mark.ignore_ie
def testShouldThrowNoSuchWindowExceptionOnAnyOperationIfAWindowIsClosed(self):
self._loadPage("xhtmlTest")
current = self.driver.current_window_handle
self.driver.find_element(By.LINK_TEXT,"Open new window").click()
self.driver.switch_to.window("result")
self.driver.close()
try:
try :
self.driver.title
self.fail("NoSuchWindowException expected")
except NoSuchWindowException:
pass # Expected.
try :
self.driver.find_element_by_tag_name("body")
self.fail("NoSuchWindowException expected")
except NoSuchWindowException:
pass # Expected.
finally:
self.driver.switch_to.window(current)
@pytest.mark.ignore_chrome
@pytest.mark.ignore_ie
def testShouldThrowNoSuchWindowExceptionOnAnyElementOperationIfAWindowIsClosed(self):
self._loadPage("xhtmlTest")
current = self.driver.current_window_handle
self.driver.find_element(By.LINK_TEXT,"Open new window").click()
self.driver.switch_to.window("result")
element = self.driver.find_element_by_tag_name("body")
self.driver.close()
try :
element.text
self.fail("NoSuchWindowException expected")
except NoSuchWindowException:
pass # Expected.
finally:
self.driver.switch_to.window(current)
def testClickingOnAButtonThatClosesAnOpenWindowDoesNotCauseTheBrowserToHang(self):
self._loadPage("xhtmlTest")
currentHandle = self.driver.current_window_handle
self.driver.find_element_by_name("windowThree").click()
self.driver.switch_to.window("result")
try:
self.driver.find_element_by_id("close").click()
finally:
self.driver.switch_to.window(currentHandle)
self.driver.find_element_by_id("linkId")
def testCanCallGetWindowHandlesAfterClosingAWindow(self):
self._loadPage("xhtmlTest")
currentHandle = self.driver.current_window_handle
self.driver.find_element_by_name("windowThree").click()
self.driver.switch_to.window("result")
try:
self.driver.find_element_by_id("close").click()
all_handles = self.driver.window_handles
self.assertEqual(1, len(all_handles))
finally:
self.driver.switch_to.window(currentHandle)
def testCanObtainAWindowHandle(self):
self._loadPage("xhtmlTest")
currentHandle = self.driver.current_window_handle
self.assertTrue(currentHandle is not None)
def testFailingToSwitchToAWindowLeavesTheCurrentWindowAsIs(self):
self._loadPage("xhtmlTest")
current = self.driver.current_window_handle
try:
self.driver.switch_to.window("I will never exist")
self.fail("expected exception")
except NoSuchWindowException:
pass
new_handle = self.driver.current_window_handle
self.assertEqual(current, new_handle)
def testThatAccessingFindingAnElementAfterWindowIsClosedAndHaventswitchedDoesntCrash(self):
self._loadPage("xhtmlTest")
currentHandle = self.driver.current_window_handle
self.driver.find_element_by_name("windowThree").click()
self.driver.switch_to.window("result")
try:
self.driver.find_element_by_id("close").click()
all_handles = self.driver.window_handles
self.assertEqual(1, len(all_handles))
self.driver.find_element_by_id("close")
self.fail("Should complain that driver not available but MUST NOT HANG!")
except WebDriverException:
pass #this is expected
finally:
self.driver.switch_to.window(currentHandle)
def _pageURL(self, name):
return self.webserver.where_is(name + '.html')
def _loadSimplePage(self):
self._loadPage("simpleTest")
def _loadPage(self, name):
self.driver.get(self._pageURL(name))
|
Ballz0fSteel/Umeko
|
refs/heads/master
|
lib/youtube_dl/extractor/toypics.py
|
50
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
import re
class ToypicsIE(InfoExtractor):
IE_DESC = 'Toypics video'
_VALID_URL = r'https?://videos\.toypics\.net/view/(?P<id>[0-9]+)'
_TEST = {
'url': 'http://videos.toypics.net/view/514/chancebulged,-2-1/',
'md5': '16e806ad6d6f58079d210fe30985e08b',
'info_dict': {
'id': '514',
'ext': 'mp4',
'title': "Chance-Bulge'd, 2",
'age_limit': 18,
'uploader': 'kidsune',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
formats = self._parse_html5_media_entries(
url, webpage, video_id)[0]['formats']
title = self._html_search_regex([
r'<h1[^>]+class=["\']view-video-title[^>]+>([^<]+)</h',
r'<title>([^<]+) - Toypics</title>',
], webpage, 'title')
uploader = self._html_search_regex(
r'More videos from <strong>([^<]+)</strong>', webpage, 'uploader',
fatal=False)
return {
'id': video_id,
'formats': formats,
'title': title,
'uploader': uploader,
'age_limit': 18,
}
class ToypicsUserIE(InfoExtractor):
IE_DESC = 'Toypics user profile'
_VALID_URL = r'https?://videos\.toypics\.net/(?!view)(?P<id>[^/?#&]+)'
_TEST = {
'url': 'http://videos.toypics.net/Mikey',
'info_dict': {
'id': 'Mikey',
},
'playlist_mincount': 19,
}
def _real_extract(self, url):
username = self._match_id(url)
profile_page = self._download_webpage(
url, username, note='Retrieving profile page')
video_count = int(self._search_regex(
r'public/">Public Videos \(([0-9]+)\)</a></li>', profile_page,
'video count'))
PAGE_SIZE = 8
urls = []
page_count = (video_count + PAGE_SIZE + 1) // PAGE_SIZE
for n in range(1, page_count + 1):
lpage_url = url + '/public/%d' % n
lpage = self._download_webpage(
lpage_url, username,
note='Downloading page %d/%d' % (n, page_count))
urls.extend(
re.findall(
r'<div[^>]+class=["\']preview[^>]+>\s*<a[^>]+href="(https?://videos\.toypics\.net/view/[^"]+)"',
lpage))
return {
'_type': 'playlist',
'id': username,
'entries': [{
'_type': 'url',
'url': eurl,
'ie_key': 'Toypics',
} for eurl in urls]
}
|
mrg666/android_kernel_icon
|
refs/heads/master
|
scripts/tracing/draw_functrace.py
|
14679
|
#!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
|
wong2/sentry
|
refs/heads/master
|
tests/sentry/metrics/test_statsd.py
|
27
|
from __future__ import absolute_import
from mock import patch
from sentry.metrics.statsd import StatsdMetricsBackend
from sentry.testutils import TestCase
class StatsdMetricsBackendTest(TestCase):
def setUp(self):
self.backend = StatsdMetricsBackend(prefix='sentrytest.')
@patch('statsd.StatsClient.incr')
def test_incr(self, mock_incr):
self.backend.incr('foo')
mock_incr.assert_called_once_with('sentrytest.foo', 1, 1)
@patch('statsd.StatsClient.timing')
def test_timing(self, mock_timing):
self.backend.timing('foo', 30)
mock_timing.assert_called_once_with('sentrytest.foo', 30, 1)
|
Aloomaio/googleads-python-lib
|
refs/heads/master
|
examples/adwords/v201806/misc/upload_media_bundle.py
|
1
|
#!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example uploads an HTML5 zip file.
"""
import urllib2
from googleads import adwords
def main(client):
# Initialize appropriate service.
media_service = client.GetService('MediaService', version='v201806')
# Create HTML5 media.
html5_zip = GetHTML5ZipFromUrl('https://goo.gl/9Y7qI2')
# Create a media bundle containing the zip file with all the HTML5 components.
media = [{
'xsi_type': 'MediaBundle',
'data': html5_zip,
'type': 'MEDIA_BUNDLE'
}]
# Upload HTML5 zip.
response = media_service.upload(media)
if response:
for media in response:
print(
'HTML5 media with ID %d, dimensions %dx%d, and MIME type "%s" '
'uploaded successfully.' %
(media['mediaId'], media['dimensions'][0]['value']['width'],
media['dimensions'][0]['value']['height'], media['mimeType']))
def GetHTML5ZipFromUrl(url):
"""Retrieve zip file from the given URL."""
response = urllib2.urlopen(url)
# Note: The utf-8 decode is for 2to3 Python 3 compatibility.
return response.read().decode('utf-8')
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client)
|
vmindru/ansible
|
refs/heads/devel
|
test/units/modules/network/iosxr/test_iosxr_system.py
|
59
|
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from units.modules.utils import set_module_args
from .iosxr_module import TestIosxrModule, load_fixture
from ansible.modules.network.iosxr import iosxr_system
class TestIosxrSystemModule(TestIosxrModule):
module = iosxr_system
def setUp(self):
super(TestIosxrSystemModule, self).setUp()
self.mock_get_config = patch('ansible.modules.network.iosxr.iosxr_system.get_config')
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch('ansible.modules.network.iosxr.iosxr_system.load_config')
self.load_config = self.mock_load_config.start()
self.mock_is_cliconf = patch('ansible.modules.network.iosxr.iosxr_system.is_cliconf')
self.is_cliconf = self.mock_is_cliconf.start()
def tearDown(self):
super(TestIosxrSystemModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None):
self.get_config.return_value = load_fixture('iosxr_system_config.cfg')
self.load_config.return_value = dict(diff=None, session='session')
self.is_cliconf.return_value = True
def test_iosxr_system_hostname_changed(self):
set_module_args(dict(hostname='foo'))
commands = ['hostname foo', 'no domain lookup disable']
self.execute_module(changed=True, commands=commands)
def test_iosxr_system_domain_name(self):
set_module_args(dict(domain_name='test.com'))
commands = ['domain name test.com', 'no domain lookup disable']
self.execute_module(changed=True, commands=commands)
def test_iosxr_system_domain_search(self):
set_module_args(dict(domain_search=['ansible.com', 'redhat.com']))
commands = ['domain list ansible.com', 'no domain list cisco.com', 'no domain lookup disable']
self.execute_module(changed=True, commands=commands)
def test_iosxr_system_lookup_source(self):
set_module_args(dict(lookup_source='Ethernet1'))
commands = ['domain lookup source-interface Ethernet1', 'no domain lookup disable']
self.execute_module(changed=True, commands=commands)
def test_iosxr_system_lookup_enabled(self):
set_module_args(dict(lookup_enabled=True))
commands = ['no domain lookup disable']
self.execute_module(changed=True, commands=commands)
def test_iosxr_system_name_servers(self):
name_servers = ['8.8.8.8', '8.8.4.4', '1.1.1.1']
set_module_args(dict(name_servers=name_servers))
commands = ['domain name-server 1.1.1.1', 'no domain name-server 8.8.4.4', 'no domain lookup disable']
self.execute_module(changed=True)
def test_iosxr_system_state_absent(self):
set_module_args(dict(state='absent'))
commands = [
'no hostname',
'no domain name',
'no domain lookup disable',
'no domain lookup source-interface MgmtEth0/0/CPU0/0',
'no domain list redhat.com',
'no domain list cisco.com',
'no domain name-server 8.8.8.8',
'no domain name-server 8.8.4.4'
]
self.execute_module(changed=True, commands=commands)
def test_iosxr_system_no_change(self):
set_module_args(dict(hostname='iosxr01', domain_name='eng.ansible.com', lookup_enabled=False))
self.execute_module()
|
neerajvashistha/pa-dude
|
refs/heads/master
|
lib/python2.7/site-packages/numpy/compat/_inspect.py
|
114
|
"""Subset of inspect module from upstream python
We use this instead of upstream because upstream inspect is slow to import, and
significanly contributes to numpy import times. Importing this copy has almost
no overhead.
"""
from __future__ import division, absolute_import, print_function
import types
__all__ = ['getargspec', 'formatargspec']
# ----------------------------------------------------------- type-checking
def ismethod(object):
"""Return true if the object is an instance method.
Instance method objects provide these attributes:
__doc__ documentation string
__name__ name with which this method was defined
im_class class object in which this method belongs
im_func function object containing implementation of method
im_self instance to which this method is bound, or None
"""
return isinstance(object, types.MethodType)
def isfunction(object):
"""Return true if the object is a user-defined function.
Function objects provide these attributes:
__doc__ documentation string
__name__ name with which this function was defined
func_code code object containing compiled function bytecode
func_defaults tuple of any default values for arguments
func_doc (same as __doc__)
func_globals global namespace in which this function was defined
func_name (same as __name__)
"""
return isinstance(object, types.FunctionType)
def iscode(object):
"""Return true if the object is a code object.
Code objects provide these attributes:
co_argcount number of arguments (not including * or ** args)
co_code string of raw compiled bytecode
co_consts tuple of constants used in the bytecode
co_filename name of file in which this code object was created
co_firstlineno number of first line in Python source code
co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg
co_lnotab encoded mapping of line numbers to bytecode indices
co_name name with which this code object was defined
co_names tuple of names of local variables
co_nlocals number of local variables
co_stacksize virtual machine stack space required
co_varnames tuple of names of arguments and local variables
"""
return isinstance(object, types.CodeType)
# ------------------------------------------------ argument list extraction
# These constants are from Python's compile.h.
CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS = 1, 2, 4, 8
def getargs(co):
"""Get information about the arguments accepted by a code object.
Three things are returned: (args, varargs, varkw), where 'args' is
a list of argument names (possibly containing nested lists), and
'varargs' and 'varkw' are the names of the * and ** arguments or None.
"""
if not iscode(co):
raise TypeError('arg is not a code object')
nargs = co.co_argcount
names = co.co_varnames
args = list(names[:nargs])
# The following acrobatics are for anonymous (tuple) arguments.
# Which we do not need to support, so remove to avoid importing
# the dis module.
for i in range(nargs):
if args[i][:1] in ['', '.']:
raise TypeError("tuple function arguments are not supported")
varargs = None
if co.co_flags & CO_VARARGS:
varargs = co.co_varnames[nargs]
nargs = nargs + 1
varkw = None
if co.co_flags & CO_VARKEYWORDS:
varkw = co.co_varnames[nargs]
return args, varargs, varkw
def getargspec(func):
"""Get the names and default values of a function's arguments.
A tuple of four things is returned: (args, varargs, varkw, defaults).
'args' is a list of the argument names (it may contain nested lists).
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'defaults' is an n-tuple of the default values of the last n arguments.
"""
if ismethod(func):
func = func.__func__
if not isfunction(func):
raise TypeError('arg is not a Python function')
args, varargs, varkw = getargs(func.__code__)
return args, varargs, varkw, func.__defaults__
def getargvalues(frame):
"""Get information about arguments passed into a particular frame.
A tuple of four things is returned: (args, varargs, varkw, locals).
'args' is a list of the argument names (it may contain nested lists).
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'locals' is the locals dictionary of the given frame.
"""
args, varargs, varkw = getargs(frame.f_code)
return args, varargs, varkw, frame.f_locals
def joinseq(seq):
if len(seq) == 1:
return '(' + seq[0] + ',)'
else:
return '(' + ', '.join(seq) + ')'
def strseq(object, convert, join=joinseq):
"""Recursively walk a sequence, stringifying each element.
"""
if type(object) in [list, tuple]:
return join([strseq(_o, convert, join) for _o in object])
else:
return convert(object)
def formatargspec(args, varargs=None, varkw=None, defaults=None,
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value),
join=joinseq):
"""Format an argument spec from the 4 values returned by getargspec.
The first four arguments are (args, varargs, varkw, defaults). The
other four arguments are the corresponding optional formatting functions
that are called to turn names and values into strings. The ninth
argument is an optional function to format the sequence of arguments.
"""
specs = []
if defaults:
firstdefault = len(args) - len(defaults)
for i in range(len(args)):
spec = strseq(args[i], formatarg, join)
if defaults and i >= firstdefault:
spec = spec + formatvalue(defaults[i - firstdefault])
specs.append(spec)
if varargs is not None:
specs.append(formatvarargs(varargs))
if varkw is not None:
specs.append(formatvarkw(varkw))
return '(' + ', '.join(specs) + ')'
def formatargvalues(args, varargs, varkw, locals,
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value),
join=joinseq):
"""Format an argument spec from the 4 values returned by getargvalues.
The first four arguments are (args, varargs, varkw, locals). The
next four arguments are the corresponding optional formatting functions
that are called to turn names and values into strings. The ninth
argument is an optional function to format the sequence of arguments.
"""
def convert(name, locals=locals,
formatarg=formatarg, formatvalue=formatvalue):
return formatarg(name) + formatvalue(locals[name])
specs = []
for i in range(len(args)):
specs.append(strseq(args[i], convert, join))
if varargs:
specs.append(formatvarargs(varargs) + formatvalue(locals[varargs]))
if varkw:
specs.append(formatvarkw(varkw) + formatvalue(locals[varkw]))
return '(' + ', '.join(specs) + ')'
|
etos/django
|
refs/heads/master
|
django/contrib/sessions/exceptions.py
|
931
|
from django.core.exceptions import SuspiciousOperation
class InvalidSessionKey(SuspiciousOperation):
"""Invalid characters in session key"""
pass
class SuspiciousSession(SuspiciousOperation):
"""The session may be tampered with"""
pass
|
deeplook/bokeh
|
refs/heads/master
|
sphinx/source/docs/user_guide/source_examples/plotting_line_missing_points.py
|
63
|
from bokeh.plotting import figure, output_file, show
output_file("line.html")
p = figure(plot_width=400, plot_height=400)
# add a line renderer with a NaN
nan = float('nan')
p.line([1, 2, 3, nan, 4, 5], [6, 7, 2, 4, 4, 5], line_width=2)
show(p)
|
fabiobatalha/crossrefapi
|
refs/heads/master
|
tests/test_restful.py
|
1
|
# coding: utf-8
import unittest
from crossref import restful
from crossref import VERSION
class RestfulTest(unittest.TestCase):
"""
These tests are testing the API live integration, the main purpouse of these
testes is to validate the JSON structure of the API results.
These tests may lead to connectivity erros if the Crossref API is temporary
out of service.
"""
def setUp(self):
self.etiquette = restful.Etiquette(
application_name='UnitTest CrossrefAPI',
application_version=VERSION,
application_url='https://github.com/fabiobatalha/crossrefapi',
contact_email='undefined'
)
def test_work_agency_message(self):
"""
Testing the base structure for the /works/{DOI}/agency endpoint.
If all the base structure is present, this test may not lead to dict
keyerror exceptions.
"""
works = restful.Works(etiquette=self.etiquette)
result = works.agency('10.1590/S0102-09352010000200002')
self.assertEqual(result['agency']['id'], 'crossref')
def test_work_agency_header(self):
"""
Testing the base structure for the /works/{DOI}/agency endpoint.
If all the base structure is present, this test may not lead to dict
keyerror exceptions.
"""
works = restful.Works(etiquette=self.etiquette)
result = works.agency('10.1590/S0102-09352010000200002', only_message=False)
self.assertEqual(result['message-type'], 'work-agency')
def test_work_select_fields(self):
result = restful.Works(etiquette=self.etiquette).select('DOI').url
self.assertEqual(result, 'https://api.crossref.org/works?select=DOI')
def test_work_select_fields_multiple_parameter_and_array(self):
result = restful.Works(etiquette=self.etiquette).select('DOI', 'title').select('subject').select(['relation', 'editor']).select('relation, editor').url
self.assertEqual(result, 'https://api.crossref.org/works?select=DOI%2Ceditor%2Crelation%2Csubject%2Ctitle')
def test_work_with_sample(self):
result = restful.Works(etiquette=self.etiquette).sample(5).url
self.assertEqual(result, 'https://api.crossref.org/works?sample=5')
def test_work_with_sample_and_filters(self):
result = restful.Works(etiquette=self.etiquette).filter(type='journal-article').sample(5).url
self.assertEqual(result, 'https://api.crossref.org/works?filter=type%3Ajournal-article&sample=5')
def test_members_filters(self):
result = restful.Members(etiquette=self.etiquette).filter(has_public_references="true").url
self.assertEqual(result, 'https://api.crossref.org/members?filter=has-public-references%3Atrue')
def test_funders_filters(self):
result = restful.Funders(etiquette=self.etiquette).filter(location="Japan").url
self.assertEqual(result, 'https://api.crossref.org/funders?filter=location%3AJapan')
class HTTPRequestTest(unittest.TestCase):
def setUp(self):
self.httprequest = restful.HTTPRequest()
def test_default_rate_limits(self):
expected = {'X-Rate-Limit-Interval': 1, 'X-Rate-Limit-Limit': 50}
self.assertEqual(self.httprequest.rate_limits, expected)
def test_update_rate_limits_seconds(self):
headers = {'X-Rate-Limit-Interval': '2s', 'X-Rate-Limit-Limit': 50}
self.httprequest._update_rate_limits(headers)
expected = {'X-Rate-Limit-Interval': 2, 'X-Rate-Limit-Limit': 50}
self.assertEqual(self.httprequest.rate_limits, expected)
def test_update_rate_limits_minutes(self):
headers = {'X-Rate-Limit-Interval': '2m', 'X-Rate-Limit-Limit': 50}
self.httprequest._update_rate_limits(headers)
expected = {'X-Rate-Limit-Interval': 120, 'X-Rate-Limit-Limit': 50}
self.assertEqual(self.httprequest.rate_limits, expected)
def test_update_rate_limits_hours(self):
headers = {'X-Rate-Limit-Interval': '2h', 'X-Rate-Limit-Limit': 50}
self.httprequest._update_rate_limits(headers)
expected = {'X-Rate-Limit-Interval': 7200, 'X-Rate-Limit-Limit': 50}
self.assertEqual(self.httprequest.rate_limits, expected)
|
dbarbier/ot-svn
|
refs/heads/master
|
python/doc/sphinxext/numpydoc/docscrape.py
|
3
|
"""Extract reference documentation from the NumPy source tree.
"""
from __future__ import division, absolute_import, print_function
import inspect
import textwrap
import re
import pydoc
from warnings import warn
import collections
class Reader(object):
"""A line-based string reader.
"""
def __init__(self, data):
"""
Parameters
----------
data : str
String with lines separated by '\n'.
"""
if isinstance(data, list):
self._str = data
else:
self._str = data.split('\n') # store string as list of lines
self.reset()
def __getitem__(self, n):
return self._str[n]
def reset(self):
self._l = 0 # current line nr
def read(self):
if not self.eof():
out = self[self._l]
self._l += 1
return out
else:
return ''
def seek_next_non_empty_line(self):
for l in self[self._l:]:
if l.strip():
break
else:
self._l += 1
def eof(self):
return self._l >= len(self._str)
def read_to_condition(self, condition_func):
start = self._l
for line in self[start:]:
if condition_func(line):
return self[start:self._l]
self._l += 1
if self.eof():
return self[start:self._l + 1]
return []
def read_to_next_empty_line(self):
self.seek_next_non_empty_line()
def is_empty(line):
return not line.strip()
return self.read_to_condition(is_empty)
def read_to_next_unindented_line(self):
def is_unindented(line):
return (line.strip() and (len(line.lstrip()) == len(line)))
return self.read_to_condition(is_unindented)
def peek(self, n=0):
if self._l + n < len(self._str):
return self[self._l + n]
else:
return ''
def is_empty(self):
return not ''.join(self._str).strip()
class NumpyDocString(object):
def __init__(self, docstring, config={}):
docstring = textwrap.dedent(docstring).split('\n')
self._doc = Reader(docstring)
self._parsed_data = {
'Signature': '',
'Summary': [''],
'Extended Summary': [],
'Parameters': [],
'Returns': [],
'Raises': [],
'Warns': [],
'Other Parameters': [],
'Attributes': [],
'Methods': [],
'See Also': [],
'Notes': [],
'Warnings': [],
'References': '',
'Examples': '',
'index': {}
}
self._parse()
def __getitem__(self, key):
return self._parsed_data[key]
def __setitem__(self, key, val):
if key not in self._parsed_data:
warn("Unknown section %s" % key)
else:
self._parsed_data[key] = val
def _is_at_section(self):
self._doc.seek_next_non_empty_line()
if self._doc.eof():
return False
l1 = self._doc.peek().strip() # e.g. Parameters
if l1.startswith('.. index::'):
return True
l2 = self._doc.peek(1).strip() # ---------- or ==========
return l2.startswith('-' * len(l1)) or l2.startswith('=' * len(l1))
def _strip(self, doc):
i = 0
j = 0
for i, line in enumerate(doc):
if line.strip():
break
for j, line in enumerate(doc[::-1]):
if line.strip():
break
return doc[i:len(doc) - j]
def _read_to_next_section(self):
section = self._doc.read_to_next_empty_line()
while not self._is_at_section() and not self._doc.eof():
if not self._doc.peek(-1).strip(): # previous line was empty
section += ['']
section += self._doc.read_to_next_empty_line()
return section
def _read_sections(self):
while not self._doc.eof():
data = self._read_to_next_section()
name = data[0].strip()
if name.startswith('..'): # index section
yield name, data[1:]
elif len(data) < 2:
yield StopIteration
else:
yield name, self._strip(data[2:])
def _parse_param_list(self, content):
r = Reader(content)
params = []
while not r.eof():
header = r.read().strip()
if ' : ' in header:
arg_name, arg_type = header.split(' : ')[:2]
else:
arg_name, arg_type = header, ''
desc = r.read_to_next_unindented_line()
desc = dedent_lines(desc)
params.append((arg_name, arg_type, desc))
return params
_name_rgx = re.compile(r"^\s*(:(?P<role>\w+):`(?P<name>[a-zA-Z0-9_.-]+)`|"
r" (?P<name2>[a-zA-Z0-9_.-]+))\s*", re.X)
def _parse_see_also(self, content):
"""
func_name : Descriptive text
continued text
another_func_name : Descriptive text
func_name1, func_name2, :meth:`func_name`, func_name3
"""
items = []
def parse_item_name(text):
"""Match ':role:`name`' or 'name'"""
m = self._name_rgx.match(text)
if m:
g = m.groups()
if g[1] is None:
return g[3], None
else:
return g[2], g[1]
raise ValueError("%s is not a item name" % text)
def push_item(name, rest):
if not name:
return
name, role = parse_item_name(name)
items.append((name, list(rest), role))
del rest[:]
current_func = None
rest = []
for line in content:
if not line.strip():
continue
m = self._name_rgx.match(line)
if m and line[m.end():].strip().startswith(':'):
push_item(current_func, rest)
current_func, line = line[:m.end()], line[m.end():]
rest = [line.split(':', 1)[1].strip()]
if not rest[0]:
rest = []
elif not line.startswith(' '):
push_item(current_func, rest)
current_func = None
if ',' in line:
for func in line.split(','):
if func.strip():
push_item(func, [])
elif line.strip():
current_func = line
elif current_func is not None:
rest.append(line.strip())
push_item(current_func, rest)
return items
def _parse_index(self, section, content):
"""
.. index: default
:refguide: something, else, and more
"""
def strip_each_in(lst):
return [s.strip() for s in lst]
out = {}
section = section.split('::')
if len(section) > 1:
out['default'] = strip_each_in(section[1].split(','))[0]
for line in content:
line = line.split(':')
if len(line) > 2:
out[line[1]] = strip_each_in(line[2].split(','))
return out
def _parse_summary(self):
"""Grab signature (if given) and summary"""
if self._is_at_section():
return
# If several signatures present, take the last one
while True:
summary = self._doc.read_to_next_empty_line()
summary_str = " ".join([s.strip() for s in summary]).strip()
if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str):
self['Signature'] = summary_str
if not self._is_at_section():
continue
break
if summary is not None:
self['Summary'] = summary
if not self._is_at_section():
self['Extended Summary'] = self._read_to_next_section()
def _parse(self):
self._doc.reset()
self._parse_summary()
for (section, content) in self._read_sections():
if not section.startswith('..'):
section = ' '.join([s.capitalize()
for s in section.split(' ')])
if section in ('Parameters', 'Returns', 'Raises', 'Warns',
'Other Parameters', 'Attributes', 'Methods'):
self[section] = self._parse_param_list(content)
elif section.startswith('.. index::'):
self['index'] = self._parse_index(section, content)
elif section == 'See Also':
self['See Also'] = self._parse_see_also(content)
else:
self[section] = content
# string conversion routines
def _str_header(self, name, symbol='-'):
return [name, len(name) * symbol]
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' ' * indent + line]
return out
def _str_signature(self):
if self['Signature']:
return [self['Signature'].replace('*', '\*')] + ['']
else:
return ['']
def _str_summary(self):
if self['Summary']:
return self['Summary'] + ['']
else:
return []
def _str_extended_summary(self):
if self['Extended Summary']:
return self['Extended Summary'] + ['']
else:
return []
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_header(name)
for param, param_type, desc in self[name]:
if param_type:
out += ['%s : %s' % (param, param_type)]
else:
out += [param]
out += self._str_indent(desc)
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += self[name]
out += ['']
return out
def _str_see_also(self, func_role):
if not self['See Also']:
return []
out = []
out += self._str_header("See Also")
last_had_desc = True
for func, desc, role in self['See Also']:
if role:
link = ':%s:`%s`' % (role, func)
elif func_role:
link = ':%s:`%s`' % (func_role, func)
else:
link = "`%s`_" % func
if desc or last_had_desc:
out += ['']
out += [link]
else:
out[-1] += ", %s" % link
if desc:
out += self._str_indent([' '.join(desc)])
last_had_desc = True
else:
last_had_desc = False
out += ['']
return out
def _str_index(self):
idx = self['index']
out = []
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.items():
if section == 'default':
continue
out += [' :%s: %s' % (section, ', '.join(references))]
return out
def __str__(self, func_role=''):
out = []
out += self._str_signature()
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Other Parameters',
'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_section('Warnings')
out += self._str_see_also(func_role)
for s in ('Notes', 'References', 'Examples'):
out += self._str_section(s)
for param_list in ('Attributes', 'Methods'):
out += self._str_param_list(param_list)
out += self._str_index()
return '\n'.join(out)
def indent(str, indent=4):
indent_str = ' ' * indent
if str is None:
return indent_str
lines = str.split('\n')
return '\n'.join(indent_str + l for l in lines)
def dedent_lines(lines):
"""Deindent a list of lines maximally"""
return textwrap.dedent("\n".join(lines)).split("\n")
def header(text, style='-'):
return text + '\n' + style * len(text) + '\n'
class FunctionDoc(NumpyDocString):
def __init__(self, func, role='func', doc=None, config={}):
self._f = func
self._role = role # e.g. "func" or "meth"
if doc is None:
if func is None:
raise ValueError("No function or docstring given")
doc = inspect.getdoc(func) or ''
NumpyDocString.__init__(self, doc)
if not self['Signature'] and func is not None:
func, func_name = self.get_func()
try:
# try to read signature
argspec = inspect.getargspec(func)
argspec = inspect.formatargspec(*argspec)
argspec = argspec.replace('*', '\*')
signature = '%s%s' % (func_name, argspec)
except TypeError as e:
signature = '%s()' % func_name
self['Signature'] = signature
def get_func(self):
func_name = getattr(self._f, '__name__', self.__class__.__name__)
if inspect.isclass(self._f):
func = getattr(self._f, '__call__', self._f.__init__)
else:
func = self._f
return func, func_name
def __str__(self):
out = ''
func, func_name = self.get_func()
signature = self['Signature'].replace('*', '\*')
roles = {'func': 'function',
'meth': 'method'}
if self._role:
if self._role not in roles:
print("Warning: invalid role %s" % self._role)
out += '.. %s:: %s\n \n\n' % (roles.get(self._role, ''),
func_name)
out += super(FunctionDoc, self).__str__(func_role=self._role)
return out
class ClassDoc(NumpyDocString):
extra_public_methods = ['__call__']
def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc,
config={}):
if not inspect.isclass(cls) and cls is not None:
raise ValueError("Expected a class or None, but got %r" % cls)
self._cls = cls
if modulename and not modulename.endswith('.'):
modulename += '.'
self._mod = modulename
if doc is None:
if cls is None:
raise ValueError("No class or documentation string given")
doc = pydoc.getdoc(cls)
NumpyDocString.__init__(self, doc)
if config.get('show_class_members', True):
def splitlines_x(s):
if not s:
return []
else:
return s.splitlines()
for field, items in [('Methods', self.methods),
('Attributes', self.properties)]:
if not self[field]:
doc_list = []
for name in sorted(items):
try:
doc_item = pydoc.getdoc(getattr(self._cls, name))
doc_list.append((name, '', splitlines_x(doc_item)))
except AttributeError:
pass # method doesn't exist
self[field] = doc_list
@property
def methods(self):
if self._cls is None:
return []
return [name for name, func in inspect.getmembers(self._cls)
if ((not name.startswith('_')
or name in self.extra_public_methods)
and isinstance(func, collections.Callable))]
@property
def properties(self):
if self._cls is None:
return []
return [name for name, func in inspect.getmembers(self._cls)
if not name.startswith('_') and
(func is None or isinstance(func, property) or
inspect.isgetsetdescriptor(func))]
|
ecosoft-odoo/odoo
|
refs/heads/8.0
|
addons/web_diagram/controllers/__init__.py
|
1214
|
from . import main
|
EarToEarOak/Wild-Find
|
refs/heads/master
|
wildfind/harrier/database.py
|
1
|
#!/usr/bin/env python
#
#
# Wild Find
#
#
# Copyright 2014 - 2017 Al Brown
#
# Wildlife tracking and mapping
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as published by
# the Free Software Foundation
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import Queue
import ctypes
import os
import platform
import sqlite3
import threading
import time
from wildfind.common.database import create_database, name_factory
from wildfind.harrier import events
GET_SCANS, \
ADD_SIGNAL, GET_SIGNALS, \
ADD_LOG, GET_LOG, \
CLOSE = range(6)
class Database(threading.Thread):
def __init__(self, path, notify):
threading.Thread.__init__(self)
self.name = 'Database'
self._path = path
self._notify = notify
self._conn = None
self._queue = Queue.Queue()
if os.path.exists(path):
print 'Appending:\t{}'.format(path)
else:
print 'Creating:\t{}'.format(path)
self.start()
def __connect(self):
self._conn = sqlite3.connect(self._path)
self._conn.row_factory = name_factory
error = create_database(self._conn)
if error is not None:
events.Post(self._notify).error(error)
def __add_signal(self, **kwargs):
with self._conn:
timeStamp = int(kwargs['timeStamp'])
signal = kwargs['signal']
frequency = kwargs['frequency']
survey = kwargs['survey']
cmd = 'insert into Scans values(?, ?, ?)'
try:
self._conn.execute(cmd, (timeStamp, frequency, survey))
except sqlite3.IntegrityError:
pass
cmd = 'insert into Signals values (null, ?, ?, ?, ?, ?, ?, ?)'
self._conn.execute(cmd, (timeStamp,
signal.freq,
signal.mod,
signal.rate,
signal.level,
signal.lon,
signal.lat))
def __add_log(self, **kwargs):
with self._conn:
timeStamp = int(kwargs['timeStamp'])
message = kwargs['message']
cmd = 'insert into Log values (null, ?, ?)'
self._conn.execute(cmd, (timeStamp, message))
def __get_scans(self, callback):
cursor = self._conn.cursor()
cmd = 'select * from Scans'
cursor.execute(cmd)
scans = cursor.fetchall()
callback(scans)
def __get_signals(self, callback):
cursor = self._conn.cursor()
cmd = 'select * from Signals'
cursor.execute(cmd)
signals = cursor.fetchall()
for signal in signals:
del signal['Id']
callback(signals)
def __get_log(self, callback):
cursor = self._conn.cursor()
cmd = 'select * from Log'
cursor.execute(cmd)
signals = cursor.fetchall()
for signal in signals:
del signal['Id']
callback(signals)
def run(self):
self.__connect()
while True:
if not self._queue.empty():
event = self._queue.get()
eventType = event.get_type()
if eventType == GET_SCANS:
callback = event.get_arg('callback')
self.__get_scans(callback)
elif eventType == ADD_SIGNAL:
self.__add_signal(**event.get_args())
elif eventType == GET_SIGNALS:
callback = event.get_arg('callback')
self.__get_signals(callback)
elif eventType == ADD_LOG:
self.__add_log(**event.get_args())
elif eventType == GET_LOG:
callback = event.get_arg('callback')
self.__get_log(callback)
elif eventType == CLOSE:
break
else:
try:
time.sleep(0.1)
except IOError:
pass
self._conn.close()
def append_signal(self, timeStamp, signal, frequency, survey):
event = events.Event(ADD_SIGNAL,
survey=survey,
signal=signal,
frequency=frequency,
timeStamp=timeStamp)
self._queue.put(event)
def append_log(self, message):
timeStamp = time.time()
event = events.Event(ADD_LOG, message=message, timeStamp=timeStamp)
self._queue.put(event)
return timeStamp
def get_size(self):
path = os.path.realpath(self._path)
folder, _tail = os.path.split(path)
size = os.path.getsize(path)
space = 0
if platform.system() == 'Windows':
puSpace = ctypes.c_ulonglong(0)
ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(folder), # @UndefinedVariable
None,
None,
ctypes.pointer(puSpace))
space = puSpace.value
else:
statvfs = os.statvfs(folder) # @UndefinedVariable
space = statvfs.f_frsize * statvfs.f_bfree
return size, space
def get_scans(self, callback):
event = events.Event(GET_SCANS, callback=callback)
self._queue.put(event)
def get_signals(self, callback):
event = events.Event(GET_SIGNALS, callback=callback)
self._queue.put(event)
def get_log(self, callback):
event = events.Event(GET_LOG, callback=callback)
self._queue.put(event)
def stop(self):
event = events.Event(CLOSE)
self._queue.put(event)
if __name__ == '__main__':
print 'Please run harrier.py'
exit(1)
|
kevinxucs/p2pool
|
refs/heads/master
|
p2pool/p2p.py
|
21
|
from __future__ import division
import math
import random
import sys
import time
from twisted.internet import defer, protocol, reactor
from twisted.python import failure, log
import p2pool
from p2pool import data as p2pool_data
from p2pool.bitcoin import data as bitcoin_data
from p2pool.util import deferral, p2protocol, pack, variable
class PeerMisbehavingError(Exception):
pass
def fragment(f, **kwargs):
try:
f(**kwargs)
except p2protocol.TooLong:
fragment(f, **dict((k, v[:len(v)//2]) for k, v in kwargs.iteritems()))
fragment(f, **dict((k, v[len(v)//2:]) for k, v in kwargs.iteritems()))
class Protocol(p2protocol.Protocol):
max_remembered_txs_size = 2500000
def __init__(self, node, incoming):
p2protocol.Protocol.__init__(self, node.net.PREFIX, 1000000, node.traffic_happened)
self.node = node
self.incoming = incoming
self.other_version = None
self.connected2 = False
def connectionMade(self):
self.factory.proto_made_connection(self)
self.connection_lost_event = variable.Event()
self.addr = self.transport.getPeer().host, self.transport.getPeer().port
self.send_version(
version=1100,
services=0,
addr_to=dict(
services=0,
address=self.transport.getPeer().host,
port=self.transport.getPeer().port,
),
addr_from=dict(
services=0,
address=self.transport.getHost().host,
port=self.transport.getHost().port,
),
nonce=self.node.nonce,
sub_version=p2pool.__version__,
mode=1,
best_share_hash=self.node.best_share_hash_func(),
)
self.timeout_delayed = reactor.callLater(10, self._connect_timeout)
self.get_shares = deferral.GenericDeferrer(
max_id=2**256,
func=lambda id, hashes, parents, stops: self.send_sharereq(id=id, hashes=hashes, parents=parents, stops=stops),
timeout=15,
on_timeout=self.transport.loseConnection,
)
self.remote_tx_hashes = set() # view of peer's known_txs # not actually initially empty, but sending txs instead of tx hashes won't hurt
self.remote_remembered_txs_size = 0
self.remembered_txs = {} # view of peer's mining_txs
self.remembered_txs_size = 0
self.known_txs_cache = {}
def _connect_timeout(self):
self.timeout_delayed = None
print 'Handshake timed out, disconnecting from %s:%i' % self.addr
if hasattr(self.transport, 'abortConnection'):
# Available since Twisted 11.1
self.transport.abortConnection()
else:
# This doesn't always close timed out connections!
self.transport.loseConnection()
def packetReceived(self, command, payload2):
try:
if command != 'version' and not self.connected2:
raise PeerMisbehavingError('first message was not version message')
p2protocol.Protocol.packetReceived(self, command, payload2)
except PeerMisbehavingError, e:
print 'Peer %s:%i misbehaving, will drop and ban. Reason:' % self.addr, e.message
self.badPeerHappened()
def badPeerHappened(self):
if p2pool.DEBUG:
print "Bad peer banned:", self.addr
self.transport.loseConnection()
if self.transport.getPeer().host != '127.0.0.1': # never ban localhost
self.node.bans[self.transport.getPeer().host] = time.time() + 60*60
def _timeout(self):
self.timeout_delayed = None
print 'Connection timed out, disconnecting from %s:%i' % self.addr
if hasattr(self.transport, 'abortConnection'):
# Available since Twisted 11.1
self.transport.abortConnection()
else:
# This doesn't always close timed out connections!
self.transport.loseConnection()
message_version = pack.ComposedType([
('version', pack.IntType(32)),
('services', pack.IntType(64)),
('addr_to', bitcoin_data.address_type),
('addr_from', bitcoin_data.address_type),
('nonce', pack.IntType(64)),
('sub_version', pack.VarStrType()),
('mode', pack.IntType(32)), # always 1 for legacy compatibility
('best_share_hash', pack.PossiblyNoneType(0, pack.IntType(256))),
])
def handle_version(self, version, services, addr_to, addr_from, nonce, sub_version, mode, best_share_hash):
if self.other_version is not None:
raise PeerMisbehavingError('more than one version message')
if version < 8:
raise PeerMisbehavingError('peer too old')
self.other_version = version
self.other_sub_version = sub_version[:512]
self.other_services = services
if nonce == self.node.nonce:
raise PeerMisbehavingError('was connected to self')
if nonce in self.node.peers:
if p2pool.DEBUG:
print 'Detected duplicate connection, disconnecting from %s:%i' % self.addr
self.transport.loseConnection()
return
self.nonce = nonce
self.connected2 = True
self.timeout_delayed.cancel()
self.timeout_delayed = reactor.callLater(100, self._timeout)
old_dataReceived = self.dataReceived
def new_dataReceived(data):
if self.timeout_delayed is not None:
self.timeout_delayed.reset(100)
old_dataReceived(data)
self.dataReceived = new_dataReceived
self.factory.proto_connected(self)
self._stop_thread = deferral.run_repeatedly(lambda: [
self.send_ping(),
random.expovariate(1/100)][-1])
self._stop_thread2 = deferral.run_repeatedly(lambda: [
self.send_addrme(port=self.node.serverfactory.listen_port.getHost().port) if self.node.serverfactory.listen_port is not None else None,
random.expovariate(1/(100*len(self.node.peers) + 1))][-1])
if best_share_hash is not None:
self.node.handle_share_hashes([best_share_hash], self)
if self.other_version < 8:
return
def update_remote_view_of_my_known_txs(before, after):
added = set(after) - set(before)
removed = set(before) - set(after)
if added:
self.send_have_tx(tx_hashes=list(added))
if removed:
self.send_losing_tx(tx_hashes=list(removed))
# cache forgotten txs here for a little while so latency of "losing_tx" packets doesn't cause problems
key = max(self.known_txs_cache) + 1 if self.known_txs_cache else 0
self.known_txs_cache[key] = dict((h, before[h]) for h in removed)
reactor.callLater(20, self.known_txs_cache.pop, key)
watch_id = self.node.known_txs_var.transitioned.watch(update_remote_view_of_my_known_txs)
self.connection_lost_event.watch(lambda: self.node.known_txs_var.transitioned.unwatch(watch_id))
self.send_have_tx(tx_hashes=self.node.known_txs_var.value.keys())
def update_remote_view_of_my_mining_txs(before, after):
added = set(after) - set(before)
removed = set(before) - set(after)
if added:
self.remote_remembered_txs_size += sum(100 + bitcoin_data.tx_type.packed_size(after[x]) for x in added)
assert self.remote_remembered_txs_size <= self.max_remembered_txs_size
fragment(self.send_remember_tx, tx_hashes=[x for x in added if x in self.remote_tx_hashes], txs=[after[x] for x in added if x not in self.remote_tx_hashes])
if removed:
self.send_forget_tx(tx_hashes=list(removed))
self.remote_remembered_txs_size -= sum(100 + bitcoin_data.tx_type.packed_size(before[x]) for x in removed)
watch_id2 = self.node.mining_txs_var.transitioned.watch(update_remote_view_of_my_mining_txs)
self.connection_lost_event.watch(lambda: self.node.mining_txs_var.transitioned.unwatch(watch_id2))
self.remote_remembered_txs_size += sum(100 + bitcoin_data.tx_type.packed_size(x) for x in self.node.mining_txs_var.value.values())
assert self.remote_remembered_txs_size <= self.max_remembered_txs_size
fragment(self.send_remember_tx, tx_hashes=[], txs=self.node.mining_txs_var.value.values())
message_ping = pack.ComposedType([])
def handle_ping(self):
pass
message_addrme = pack.ComposedType([
('port', pack.IntType(16)),
])
def handle_addrme(self, port):
host = self.transport.getPeer().host
#print 'addrme from', host, port
if host == '127.0.0.1':
if random.random() < .8 and self.node.peers:
random.choice(self.node.peers.values()).send_addrme(port=port) # services...
else:
self.node.got_addr((self.transport.getPeer().host, port), self.other_services, int(time.time()))
if random.random() < .8 and self.node.peers:
random.choice(self.node.peers.values()).send_addrs(addrs=[
dict(
address=dict(
services=self.other_services,
address=host,
port=port,
),
timestamp=int(time.time()),
),
])
message_addrs = pack.ComposedType([
('addrs', pack.ListType(pack.ComposedType([
('timestamp', pack.IntType(64)),
('address', bitcoin_data.address_type),
]))),
])
def handle_addrs(self, addrs):
for addr_record in addrs:
self.node.got_addr((addr_record['address']['address'], addr_record['address']['port']), addr_record['address']['services'], min(int(time.time()), addr_record['timestamp']))
if random.random() < .8 and self.node.peers:
random.choice(self.node.peers.values()).send_addrs(addrs=[addr_record])
message_getaddrs = pack.ComposedType([
('count', pack.IntType(32)),
])
def handle_getaddrs(self, count):
if count > 100:
count = 100
self.send_addrs(addrs=[
dict(
timestamp=int(self.node.addr_store[host, port][2]),
address=dict(
services=self.node.addr_store[host, port][0],
address=host,
port=port,
),
) for host, port in
self.node.get_good_peers(count)
])
message_shares = pack.ComposedType([
('shares', pack.ListType(p2pool_data.share_type)),
])
def handle_shares(self, shares):
self.node.handle_shares([p2pool_data.load_share(share, self.node.net, self.addr) for share in shares if share['type'] >= 9], self)
def sendShares(self, shares, tracker, known_txs, include_txs_with=[]):
if self.other_version >= 8:
tx_hashes = set()
for share in shares:
if share.hash in include_txs_with:
x = share.get_other_tx_hashes(tracker)
if x is not None:
tx_hashes.update(x)
hashes_to_send = [x for x in tx_hashes if x not in self.node.mining_txs_var.value and x in known_txs]
new_remote_remembered_txs_size = self.remote_remembered_txs_size + sum(100 + bitcoin_data.tx_type.packed_size(known_txs[x]) for x in hashes_to_send)
if new_remote_remembered_txs_size > self.max_remembered_txs_size:
raise ValueError('shares have too many txs')
self.remote_remembered_txs_size = new_remote_remembered_txs_size
fragment(self.send_remember_tx, tx_hashes=[x for x in hashes_to_send if x in self.remote_tx_hashes], txs=[known_txs[x] for x in hashes_to_send if x not in self.remote_tx_hashes])
fragment(self.send_shares, shares=[share.as_share() for share in shares])
if self.other_version >= 8:
self.send_forget_tx(tx_hashes=hashes_to_send)
self.remote_remembered_txs_size -= sum(100 + bitcoin_data.tx_type.packed_size(known_txs[x]) for x in hashes_to_send)
message_sharereq = pack.ComposedType([
('id', pack.IntType(256)),
('hashes', pack.ListType(pack.IntType(256))),
('parents', pack.VarIntType()),
('stops', pack.ListType(pack.IntType(256))),
])
def handle_sharereq(self, id, hashes, parents, stops):
shares = self.node.handle_get_shares(hashes, parents, stops, self)
try:
self.send_sharereply(id=id, result='good', shares=[share.as_share() for share in shares])
except p2protocol.TooLong:
self.send_sharereply(id=id, result='too long', shares=[])
message_sharereply = pack.ComposedType([
('id', pack.IntType(256)),
('result', pack.EnumType(pack.VarIntType(), {0: 'good', 1: 'too long', 2: 'unk2', 3: 'unk3', 4: 'unk4', 5: 'unk5', 6: 'unk6'})),
('shares', pack.ListType(p2pool_data.share_type)),
])
def handle_sharereply(self, id, result, shares):
if result == 'good':
res = [p2pool_data.load_share(share, self.node.net, self.addr) for share in shares if share['type'] >= 9]
else:
res = failure.Failure("sharereply result: " + result)
self.get_shares.got_response(id, res)
message_bestblock = pack.ComposedType([
('header', bitcoin_data.block_header_type),
])
def handle_bestblock(self, header):
self.node.handle_bestblock(header, self)
message_have_tx = pack.ComposedType([
('tx_hashes', pack.ListType(pack.IntType(256))),
])
def handle_have_tx(self, tx_hashes):
#assert self.remote_tx_hashes.isdisjoint(tx_hashes)
self.remote_tx_hashes.update(tx_hashes)
while len(self.remote_tx_hashes) > 10000:
self.remote_tx_hashes.pop()
message_losing_tx = pack.ComposedType([
('tx_hashes', pack.ListType(pack.IntType(256))),
])
def handle_losing_tx(self, tx_hashes):
#assert self.remote_tx_hashes.issuperset(tx_hashes)
self.remote_tx_hashes.difference_update(tx_hashes)
message_remember_tx = pack.ComposedType([
('tx_hashes', pack.ListType(pack.IntType(256))),
('txs', pack.ListType(bitcoin_data.tx_type)),
])
def handle_remember_tx(self, tx_hashes, txs):
for tx_hash in tx_hashes:
if tx_hash in self.remembered_txs:
print >>sys.stderr, 'Peer referenced transaction twice, disconnecting'
self.transport.loseConnection()
return
if tx_hash in self.node.known_txs_var.value:
tx = self.node.known_txs_var.value[tx_hash]
else:
for cache in self.known_txs_cache.itervalues():
if tx_hash in cache:
tx = cache[tx_hash]
print 'Transaction %064x rescued from peer latency cache!' % (tx_hash,)
break
else:
print >>sys.stderr, 'Peer referenced unknown transaction %064x, disconnecting' % (tx_hash,)
self.transport.loseConnection()
return
self.remembered_txs[tx_hash] = tx
self.remembered_txs_size += 100 + bitcoin_data.tx_type.packed_size(tx)
new_known_txs = dict(self.node.known_txs_var.value)
warned = False
for tx in txs:
tx_hash = bitcoin_data.hash256(bitcoin_data.tx_type.pack(tx))
if tx_hash in self.remembered_txs:
print >>sys.stderr, 'Peer referenced transaction twice, disconnecting'
self.transport.loseConnection()
return
if tx_hash in self.node.known_txs_var.value and not warned:
print 'Peer sent entire transaction %064x that was already received' % (tx_hash,)
warned = True
self.remembered_txs[tx_hash] = tx
self.remembered_txs_size += 100 + bitcoin_data.tx_type.packed_size(tx)
new_known_txs[tx_hash] = tx
self.node.known_txs_var.set(new_known_txs)
if self.remembered_txs_size >= self.max_remembered_txs_size:
raise PeerMisbehavingError('too much transaction data stored')
message_forget_tx = pack.ComposedType([
('tx_hashes', pack.ListType(pack.IntType(256))),
])
def handle_forget_tx(self, tx_hashes):
for tx_hash in tx_hashes:
self.remembered_txs_size -= 100 + bitcoin_data.tx_type.packed_size(self.remembered_txs[tx_hash])
assert self.remembered_txs_size >= 0
del self.remembered_txs[tx_hash]
def connectionLost(self, reason):
self.connection_lost_event.happened()
if self.timeout_delayed is not None:
self.timeout_delayed.cancel()
if self.connected2:
self.factory.proto_disconnected(self, reason)
self._stop_thread()
self._stop_thread2()
self.connected2 = False
self.factory.proto_lost_connection(self, reason)
if p2pool.DEBUG:
print "Peer connection lost:", self.addr, reason
self.get_shares.respond_all(reason)
@defer.inlineCallbacks
def do_ping(self):
start = reactor.seconds()
yield self.get_shares(hashes=[0], parents=0, stops=[])
end = reactor.seconds()
defer.returnValue(end - start)
class ServerFactory(protocol.ServerFactory):
def __init__(self, node, max_conns):
self.node = node
self.max_conns = max_conns
self.conns = {}
self.running = False
self.listen_port = None
def buildProtocol(self, addr):
if sum(self.conns.itervalues()) >= self.max_conns or self.conns.get(self._host_to_ident(addr.host), 0) >= 3:
return None
if addr.host in self.node.bans and self.node.bans[addr.host] > time.time():
return None
p = Protocol(self.node, True)
p.factory = self
if p2pool.DEBUG:
print "Got peer connection from:", addr
return p
def _host_to_ident(self, host):
a, b, c, d = host.split('.')
return a, b
def proto_made_connection(self, proto):
ident = self._host_to_ident(proto.transport.getPeer().host)
self.conns[ident] = self.conns.get(ident, 0) + 1
def proto_lost_connection(self, proto, reason):
ident = self._host_to_ident(proto.transport.getPeer().host)
self.conns[ident] -= 1
if not self.conns[ident]:
del self.conns[ident]
def proto_connected(self, proto):
self.node.got_conn(proto)
def proto_disconnected(self, proto, reason):
self.node.lost_conn(proto, reason)
def start(self):
assert not self.running
self.running = True
def attempt_listen():
if self.running:
self.listen_port = reactor.listenTCP(self.node.port, self)
deferral.retry('Error binding to P2P port:', traceback=False)(attempt_listen)()
def stop(self):
assert self.running
self.running = False
return self.listen_port.stopListening()
class ClientFactory(protocol.ClientFactory):
def __init__(self, node, desired_conns, max_attempts):
self.node = node
self.desired_conns = desired_conns
self.max_attempts = max_attempts
self.attempts = set()
self.conns = set()
self.running = False
def _host_to_ident(self, host):
a, b, c, d = host.split('.')
return a, b
def buildProtocol(self, addr):
p = Protocol(self.node, False)
p.factory = self
return p
def startedConnecting(self, connector):
ident = self._host_to_ident(connector.getDestination().host)
if ident in self.attempts:
raise AssertionError('already have attempt')
self.attempts.add(ident)
def clientConnectionFailed(self, connector, reason):
self.attempts.remove(self._host_to_ident(connector.getDestination().host))
def clientConnectionLost(self, connector, reason):
self.attempts.remove(self._host_to_ident(connector.getDestination().host))
def proto_made_connection(self, proto):
pass
def proto_lost_connection(self, proto, reason):
pass
def proto_connected(self, proto):
self.conns.add(proto)
self.node.got_conn(proto)
def proto_disconnected(self, proto, reason):
self.conns.remove(proto)
self.node.lost_conn(proto, reason)
def start(self):
assert not self.running
self.running = True
self._stop_thinking = deferral.run_repeatedly(self._think)
def stop(self):
assert self.running
self.running = False
self._stop_thinking()
def _think(self):
try:
if len(self.conns) < self.desired_conns and len(self.attempts) < self.max_attempts and self.node.addr_store:
(host, port), = self.node.get_good_peers(1)
if self._host_to_ident(host) in self.attempts:
pass
elif host in self.node.bans and self.node.bans[host] > time.time():
pass
else:
#print 'Trying to connect to', host, port
reactor.connectTCP(host, port, self, timeout=5)
except:
log.err()
return random.expovariate(1/1)
class SingleClientFactory(protocol.ReconnectingClientFactory):
def __init__(self, node):
self.node = node
def buildProtocol(self, addr):
p = Protocol(self.node, incoming=False)
p.factory = self
return p
def proto_made_connection(self, proto):
pass
def proto_lost_connection(self, proto, reason):
pass
def proto_connected(self, proto):
self.resetDelay()
self.node.got_conn(proto)
def proto_disconnected(self, proto, reason):
self.node.lost_conn(proto, reason)
class Node(object):
def __init__(self, best_share_hash_func, port, net, addr_store={}, connect_addrs=set(), desired_outgoing_conns=10, max_outgoing_attempts=30, max_incoming_conns=50, preferred_storage=1000, known_txs_var=variable.Variable({}), mining_txs_var=variable.Variable({})):
self.best_share_hash_func = best_share_hash_func
self.port = port
self.net = net
self.addr_store = dict(addr_store)
self.connect_addrs = connect_addrs
self.preferred_storage = preferred_storage
self.known_txs_var = known_txs_var
self.mining_txs_var = mining_txs_var
self.traffic_happened = variable.Event()
self.nonce = random.randrange(2**64)
self.peers = {}
self.bans = {} # address -> end_time
self.clientfactory = ClientFactory(self, desired_outgoing_conns, max_outgoing_attempts)
self.serverfactory = ServerFactory(self, max_incoming_conns)
self.running = False
def start(self):
if self.running:
raise ValueError('already running')
self.clientfactory.start()
self.serverfactory.start()
self.singleclientconnectors = [reactor.connectTCP(addr, port, SingleClientFactory(self)) for addr, port in self.connect_addrs]
self.running = True
self._stop_thinking = deferral.run_repeatedly(self._think)
def _think(self):
try:
if len(self.addr_store) < self.preferred_storage and self.peers:
random.choice(self.peers.values()).send_getaddrs(count=8)
except:
log.err()
return random.expovariate(1/20)
@defer.inlineCallbacks
def stop(self):
if not self.running:
raise ValueError('already stopped')
self.running = False
self._stop_thinking()
yield self.clientfactory.stop()
yield self.serverfactory.stop()
for singleclientconnector in self.singleclientconnectors:
yield singleclientconnector.factory.stopTrying()
yield singleclientconnector.disconnect()
del self.singleclientconnectors
def got_conn(self, conn):
if conn.nonce in self.peers:
raise ValueError('already have peer')
self.peers[conn.nonce] = conn
print '%s connection to peer %s:%i established. p2pool version: %i %r' % ('Incoming' if conn.incoming else 'Outgoing', conn.addr[0], conn.addr[1], conn.other_version, conn.other_sub_version)
def lost_conn(self, conn, reason):
if conn.nonce not in self.peers:
raise ValueError('''don't have peer''')
if conn is not self.peers[conn.nonce]:
raise ValueError('wrong conn')
del self.peers[conn.nonce]
print 'Lost peer %s:%i - %s' % (conn.addr[0], conn.addr[1], reason.getErrorMessage())
def got_addr(self, (host, port), services, timestamp):
if (host, port) in self.addr_store:
old_services, old_first_seen, old_last_seen = self.addr_store[host, port]
self.addr_store[host, port] = services, old_first_seen, max(old_last_seen, timestamp)
else:
if len(self.addr_store) < 10000:
self.addr_store[host, port] = services, timestamp, timestamp
def handle_shares(self, shares, peer):
print 'handle_shares', (shares, peer)
def handle_share_hashes(self, hashes, peer):
print 'handle_share_hashes', (hashes, peer)
def handle_get_shares(self, hashes, parents, stops, peer):
print 'handle_get_shares', (hashes, parents, stops, peer)
def handle_bestblock(self, header, peer):
print 'handle_bestblock', header
def get_good_peers(self, max_count):
t = time.time()
return [x[0] for x in sorted(self.addr_store.iteritems(), key=lambda (k, (services, first_seen, last_seen)):
-math.log(max(3600, last_seen - first_seen))/math.log(max(3600, t - last_seen))*random.expovariate(1)
)][:max_count]
|
bcherry/bcherry
|
refs/heads/master
|
oldstuff/python/space/shot.py
|
1
|
class Shot:
def __init__(self,
|
YuriGural/erpnext
|
refs/heads/master
|
erpnext/docs/temp.py
|
33
|
import os, re
for basepath, folders, files in os.walk("."):
for f in files:
if f.endswith(".html") or f.endswith(".md"):
with open(os.path.join(basepath, f), "r") as c:
content = c.read()
for path in re.findall("""{{.?docs_base_url.?}}([^'"\)]*)""", content):
print path
|
eigentor/tommiblog
|
refs/heads/master
|
vendor/psy/psysh/test/tools/vis.py
|
710
|
"""
vis.py
======
Ctypes based module to access libbsd's strvis & strunvis functions.
The `vis` function is the equivalent of strvis.
The `unvis` function is the equivalent of strunvis.
All functions accept unicode string as input and return a unicode string.
Constants:
----------
* to select alternate encoding format
`VIS_OCTAL`: use octal \ddd format
`VIS_CSTYLE`: use \[nrft0..] where appropiate
* to alter set of characters encoded
(default is to encode all non-graphic except space, tab, and newline).
`VIS_SP`: also encode space
`VIS_TAB`: also encode tab
`VIS_NL`: also encode newline
`VIS_WHITE`: same as (VIS_SP | VIS_TAB | VIS_NL)
`VIS_SAFE`: only encode "unsafe" characters
* other
`VIS_NOSLASH`: inhibit printing '\'
`VIS_HTTP1808`: http-style escape % hex hex
`VIS_HTTPSTYLE`: http-style escape % hex hex
`VIS_MIMESTYLE`: mime-style escape = HEX HEX
`VIS_HTTP1866`: http-style &#num; or &string;
`VIS_NOESCAPE`: don't decode `\'
`VIS_GLOB`: encode glob(3) magic characters
:Authors:
- ju1ius (http://github.com/ju1ius)
:Version: 1
:Date: 2014-01-05
"""
from ctypes import CDLL, c_char_p, c_int
from ctypes.util import find_library
__all__ = [
'vis', 'unvis',
'VIS_OCTAL', 'VIS_CSTYLE',
'VIS_SP', 'VIS_TAB', 'VIS_NL', 'VIS_WHITE', 'VIS_SAFE',
'VIS_NOSLASH', 'VIS_HTTP1808', 'VIS_HTTPSTYLE', 'VIS_MIMESTYLE',
'VIS_HTTP1866', 'VIS_NOESCAPE', 'VIS_GLOB'
]
#############################################################
# Constants from bsd/vis.h
#############################################################
#to select alternate encoding format
VIS_OCTAL = 0x0001
VIS_CSTYLE = 0x0002
# to alter set of characters encoded
# (default is to encode all non-graphic except space, tab, and newline).
VIS_SP = 0x0004
VIS_TAB = 0x0008
VIS_NL = 0x0010
VIS_WHITE = VIS_SP | VIS_TAB | VIS_NL
VIS_SAFE = 0x0020
# other
VIS_NOSLASH = 0x0040
VIS_HTTP1808 = 0x0080
VIS_HTTPSTYLE = 0x0080
VIS_MIMESTYLE = 0x0100
VIS_HTTP1866 = 0x0200
VIS_NOESCAPE = 0x0400
VIS_GLOB = 0x1000
#############################################################
# Import libbsd/vis functions
#############################################################
_libbsd = CDLL(find_library('bsd'))
_strvis = _libbsd.strvis
_strvis.argtypes = [c_char_p, c_char_p, c_int]
_strvis.restype = c_int
_strunvis = _libbsd.strunvis
_strvis.argtypes = [c_char_p, c_char_p]
_strvis.restype = c_int
def vis(src, flags=VIS_WHITE):
"""
Encodes the string `src` into libbsd's vis encoding.
`flags` must be one of the VIS_* constants
C definition:
int strvis(char *dst, char *src, int flags);
"""
src = bytes(src, 'utf-8')
dst_p = c_char_p(bytes(len(src) * 4))
src_p = c_char_p(src)
flags = c_int(flags)
bytes_written = _strvis(dst_p, src_p, flags)
if -1 == bytes_written:
raise RuntimeError('vis failed to encode string "{}"'.format(src))
return dst_p.value.decode('utf-8')
def unvis(src):
"""
Decodes a string encoded by vis.
C definition:
int strunvis(char *dst, char *src);
"""
src = bytes(src, 'utf-8')
dst_p = c_char_p(bytes(len(src)))
src_p = c_char_p(src)
bytes_written = _strunvis(dst_p, src_p)
if -1 == bytes_written:
raise RuntimeError('unvis failed to decode string "{}"'.format(src))
return dst_p.value.decode('utf-8')
|
MIPS/external-chromium_org
|
refs/heads/dev-mips-jb-kitkat
|
build/android/pylib/utils/apk_helper.py
|
62
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module containing utilities for apk packages."""
import re
from pylib import cmd_helper
def GetPackageName(apk_path):
"""Returns the package name of the apk."""
aapt_output = cmd_helper.GetCmdOutput(
['aapt', 'dump', 'badging', apk_path]).split('\n')
package_name_re = re.compile(r'package: .*name=\'(\S*)\'')
for line in aapt_output:
m = package_name_re.match(line)
if m:
return m.group(1)
raise Exception('Failed to determine package name of %s' % apk_path)
|
gautamMalu/rootfs_xen_arndale
|
refs/heads/master
|
usr/lib/python2.7/email/encoders.py
|
263
|
# Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
"""Encodings and related functions."""
__all__ = [
'encode_7or8bit',
'encode_base64',
'encode_noop',
'encode_quopri',
]
import base64
from quopri import encodestring as _encodestring
def _qencode(s):
enc = _encodestring(s, quotetabs=True)
# Must encode spaces, which quopri.encodestring() doesn't do
return enc.replace(' ', '=20')
def _bencode(s):
# We can't quite use base64.encodestring() since it tacks on a "courtesy
# newline". Blech!
if not s:
return s
hasnewline = (s[-1] == '\n')
value = base64.encodestring(s)
if not hasnewline and value[-1] == '\n':
return value[:-1]
return value
def encode_base64(msg):
"""Encode the message's payload in Base64.
Also, add an appropriate Content-Transfer-Encoding header.
"""
orig = msg.get_payload()
encdata = _bencode(orig)
msg.set_payload(encdata)
msg['Content-Transfer-Encoding'] = 'base64'
def encode_quopri(msg):
"""Encode the message's payload in quoted-printable.
Also, add an appropriate Content-Transfer-Encoding header.
"""
orig = msg.get_payload()
encdata = _qencode(orig)
msg.set_payload(encdata)
msg['Content-Transfer-Encoding'] = 'quoted-printable'
def encode_7or8bit(msg):
"""Set the Content-Transfer-Encoding header to 7bit or 8bit."""
orig = msg.get_payload()
if orig is None:
# There's no payload. For backwards compatibility we use 7bit
msg['Content-Transfer-Encoding'] = '7bit'
return
# We play a trick to make this go fast. If encoding to ASCII succeeds, we
# know the data must be 7bit, otherwise treat it as 8bit.
try:
orig.encode('ascii')
except UnicodeError:
msg['Content-Transfer-Encoding'] = '8bit'
else:
msg['Content-Transfer-Encoding'] = '7bit'
def encode_noop(msg):
"""Do nothing."""
|
dhalleine/tensorflow
|
refs/heads/master
|
tensorflow/contrib/slim/python/slim/evaluation_test.py
|
2
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.evaluation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import numpy as np
import tensorflow as tf
slim = tf.contrib.slim
def GenerateTestData(num_classes, batch_size):
inputs = np.random.rand(batch_size, num_classes)
np.random.seed(0)
labels = np.random.randint(low=0, high=num_classes, size=batch_size)
labels = labels.reshape((batch_size,))
return inputs, labels
def TestModel(inputs):
scale = tf.Variable(1.0, trainable=False)
# Scaling the outputs wont change the result...
outputs = tf.mul(inputs, scale)
return tf.argmax(outputs, 1), scale
def GroundTruthAccuracy(inputs, labels, batch_size):
predictions = np.argmax(inputs, 1)
num_correct = np.sum(predictions == labels)
return float(num_correct) / batch_size
class EvaluationTest(tf.test.TestCase):
def setUp(self):
super(EvaluationTest, self).setUp()
num_classes = 8
batch_size = 16
inputs, labels = GenerateTestData(num_classes, batch_size)
self._expected_accuracy = GroundTruthAccuracy(inputs, labels, batch_size)
self._global_step = slim.get_or_create_global_step()
self._inputs = tf.constant(inputs, dtype=tf.float32)
self._labels = tf.constant(labels, dtype=tf.int64)
self._predictions, self._scale = TestModel(self._inputs)
def testUpdateOpsAreEvaluated(self):
accuracy, update_op = slim.metrics.streaming_accuracy(
self._predictions, self._labels)
init_op = tf.group(tf.initialize_all_variables(),
tf.initialize_local_variables())
with self.test_session() as sess:
slim.evaluation.evaluation(
sess, init_op=init_op, eval_op=update_op)
self.assertAlmostEqual(accuracy.eval(), self._expected_accuracy)
def testSummariesAreFlushedToDisk(self):
output_dir = os.path.join(self.get_temp_dir(), 'flush_test')
if tf.gfile.Exists(output_dir): # For running on jenkins.
tf.gfile.DeleteRecursively(output_dir)
accuracy0, update_op0 = tf.contrib.metrics.streaming_accuracy(
self._predictions, self._labels)
accuracy1, update_op1 = tf.contrib.metrics.streaming_accuracy(
self._predictions+1, self._labels)
names_to_metrics = {
'Accuracy': accuracy0,
'Another accuracy': accuracy1,
}
for k in names_to_metrics:
v = names_to_metrics[k]
tf.scalar_summary(k, v)
summary_writer = tf.train.SummaryWriter(output_dir)
init_op = tf.group(tf.initialize_all_variables(),
tf.initialize_local_variables())
eval_op = tf.group(update_op0, update_op1)
with self.test_session() as sess:
slim.evaluation.evaluation(
sess,
init_op=init_op,
eval_op=eval_op,
summary_op=tf.merge_all_summaries(),
summary_writer=summary_writer,
global_step=self._global_step)
# Check that the results were saved. The events file may have additional
# entries, e.g. the event version stamp, so have to parse things a bit.
output_filepath = glob.glob(os.path.join(output_dir, '*'))
self.assertEqual(len(output_filepath), 1)
events = tf.train.summary_iterator(output_filepath[0])
summaries = [e.summary for e in events if e.summary.value]
values = []
for summary in summaries:
for value in summary.value:
values.append(value)
saved_results = {v.tag: v.simple_value for v in values}
for name in names_to_metrics:
self.assertAlmostEqual(names_to_metrics[name].eval(),
saved_results[name])
def testWithFeedDict(self):
accuracy, update_op = slim.metrics.streaming_accuracy(
self._predictions, self._labels)
init_op = tf.group(tf.initialize_all_variables(),
tf.initialize_local_variables())
with self.test_session() as sess:
slim.evaluation.evaluation(
sess,
init_op=init_op,
eval_op=update_op,
eval_op_feed_dict={self._scale: np.ones([], dtype=np.float32)})
self.assertAlmostEqual(accuracy.eval(), self._expected_accuracy)
def testWithQueueRunning(self):
strings = ['the', 'cat', 'in', 'the', 'hat']
_ = tf.train.string_input_producer(strings, capacity=5)
accuracy, update_op = slim.metrics.streaming_accuracy(
self._predictions, self._labels)
init_op = tf.group(tf.initialize_all_variables(),
tf.initialize_local_variables())
with self.test_session() as sess:
slim.evaluation.evaluation(
sess, init_op=init_op, eval_op=update_op)
self.assertAlmostEqual(accuracy.eval(), self._expected_accuracy)
if __name__ == '__main__':
tf.test.main()
|
satvikdhandhania/vit-11
|
refs/heads/master
|
build/lib.linux-x86_64-2.7/moca/urls.py
|
3
|
from django.conf.urls.defaults import patterns, url, include
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns(
'',
(r'^log/', include('requestlog.urls')),
(r'^admin/', include(admin.site.urls)),
# Pass anything that doesn't match on to the mrs app
url(r'^',
include('moca.mrs.urls')),
)
from django.conf import settings
if settings.DEBUG:
urlpatterns += patterns(
'',
(r'^static/(?P<path>.*)$',
'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT}),
)
|
DeepThoughtTeam/tensorflow
|
refs/heads/master
|
tensorflow/python/client/session.py
|
1
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A client interface for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import sys
import threading
import tensorflow.python.platform
import numpy as np
import six
from tensorflow.python import pywrap_tensorflow as tf_session
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.platform import logging
from tensorflow.python.util import compat
class SessionInterface(object):
"""Base class for implementations of TensorFlow client sessions."""
@property
def graph(self):
"""The underlying TensorFlow graph, to be used in building Operations."""
raise NotImplementedError('graph')
@property
def sess_str(self):
"""The TensorFlow process to which this session will connect."""
raise NotImplementedError('sess_str')
def run(self, fetches, feed_dict=None):
"""Runs operations in the session. See `Session.run()` for details."""
raise NotImplementedError('Run')
def _get_indexed_slices_value_from_fetches(fetched_vals):
return ops.IndexedSlicesValue(fetched_vals[0], fetched_vals[1], fetched_vals[2] if len(fetched_vals) == 3 else None)
def _get_feeds_for_indexed_slices(feed, feed_val):
return list(zip(
[feed.values, feed.indices] if feed.dense_shape is None
else [feed.values, feed.indices, feed.dense_shape], feed_val))
class BaseSession(SessionInterface):
"""A class for interacting with a TensorFlow computation.
The BaseSession enables incremental graph building with inline
execution of Operations and evaluation of Tensors.
"""
def __init__(self, target='', graph=None, config=None):
"""Constructs a new TensorFlow session.
Args:
target: (Optional) The TensorFlow execution engine to connect to.
graph: (Optional) The graph to be used. If this argument is None,
the default graph will be used.
config: (Optional) ConfigProto proto used to configure the session.
Raises:
RuntimeError: If an error occurs while creating the TensorFlow
session.
"""
if graph is None:
self._graph = ops.get_default_graph()
else:
self._graph = graph
self._opened = False
self._closed = False
self._current_version = 0
self._extend_lock = threading.Lock()
self._target = target
self._session = None
opts = tf_session.TF_NewSessionOptions(target=target, config=config)
try:
status = tf_session.TF_NewStatus()
try:
self._session = tf_session.TF_NewSession(opts, status)
if tf_session.TF_GetCode(status) != 0:
raise RuntimeError(compat.as_text(tf_session.TF_Message(status)))
finally:
tf_session.TF_DeleteStatus(status)
finally:
tf_session.TF_DeleteSessionOptions(opts)
def close(self):
"""Closes this session.
Calling this method frees all resources associated with the session.
Raises:
RuntimeError: If an error occurs while closing the session.
"""
with self._extend_lock:
if self._opened and not self._closed:
self._closed = True
try:
status = tf_session.TF_NewStatus()
tf_session.TF_CloseSession(self._session, status)
if tf_session.TF_GetCode(status) != 0:
raise RuntimeError(compat.as_text(tf_session.TF_Message(status)))
finally:
tf_session.TF_DeleteStatus(status)
def __del__(self):
self.close()
try:
status = tf_session.TF_NewStatus()
if self._session is not None:
tf_session.TF_DeleteSession(self._session, status)
if tf_session.TF_GetCode(status) != 0:
raise RuntimeError(compat.as_text(tf_session.TF_Message(status)))
self._session = None
finally:
tf_session.TF_DeleteStatus(status)
@property
def graph(self):
"""The graph that was launched in this session."""
return self._graph
@property
def graph_def(self):
"""A serializable version of the underlying TensorFlow graph.
Returns:
A graph_pb2.GraphDef proto containing nodes for all of the Operations in
the underlying TensorFlow graph.
"""
return self._graph.as_graph_def()
@property
def sess_str(self):
return self._target
def as_default(self):
"""Returns a context manager that makes this object the default session.
Use with the `with` keyword to specify that calls to
[`Operation.run()`](../../api_docs/python/framework.md#Operation.run) or
[`Tensor.run()`](../../api_docs/python/framework.md#Tensor.run) should be
executed in this session.
```python
c = tf.constant(..)
sess = tf.Session()
with sess.as_default():
assert tf.get_default_session() is sess
print(c.eval())
```
To get the current default session, use
[`tf.get_default_session()`](#get_default_session).
*N.B.* The `as_default` context manager *does not* close the
session when you exit the context, and you must close the session
explicitly.
```python
c = tf.constant(...)
sess = tf.Session()
with sess.as_default():
print(c.eval())
# ...
with sess.as_default():
print(c.eval())
sess.close()
```
Alternatively, you can use `with tf.Session():` to create a
session that is automatically closed on exiting the context,
including when an uncaught exception is raised.
*N.B.* The default graph is a property of the current thread. If you
create a new thread, and wish to use the default session in that
thread, you must explicitly add a `with sess.as_default():` in that
thread's function.
Returns:
A context manager using this session as the default session.
"""
return ops.default_session(self)
# Eventually, this registration could be opened up to support custom
# Tensor expansions. Expects tuples of (Type, fetch_fn, feed_fn),
# where the signatures are:
# fetch_fn : Type -> (list of Tensors,
# lambda: list of fetched np.ndarray -> TypeVal)
# feed_fn : Type, TypeVal -> list of (Tensor, value)
# Conceptually, fetch_fn describes how to expand fetch into its
# component Tensors and how to contracting the fetched results back into
# a single return value. feed_fn describes how to unpack a single fed
# value and map it to feeds of a Tensor and its corresponding value.
# pylint: disable=g-long-lambda
_REGISTERED_EXPANSIONS = [
# SparseTensors are fetched as SparseTensorValues. They can be fed
# SparseTensorValues or normal tuples.
(ops.SparseTensor,
lambda fetch: (
[fetch.indices, fetch.values, fetch.shape],
lambda fetched_vals: ops.SparseTensorValue(*fetched_vals)),
lambda feed, feed_val: list(zip(
[feed.indices, feed.values, feed.shape], feed_val))),
# IndexedSlices are fetched as IndexedSlicesValues. They can be fed
# IndexedSlicesValues or normal tuples.
(ops.IndexedSlices,
lambda fetch: (
[fetch.values, fetch.indices] if fetch.dense_shape is None
else [fetch.values, fetch.indices, fetch.dense_shape],
_get_indexed_slices_value_from_fetches),
_get_feeds_for_indexed_slices),
# The default catches all types and performs no expansions.
(object,
lambda fetch: ([fetch], lambda fetched_vals: fetched_vals[0]),
lambda feed, feed_val: [(feed, feed_val)])]
# pylint: enable=g-long-lambda
def run(self, fetches, feed_dict=None):
"""Runs the operations and evaluates the tensors in `fetches`.
This method runs one "step" of TensorFlow computation, by
running the necessary graph fragment to execute every `Operation`
and evaluate every `Tensor` in `fetches`, substituting the values in
`feed_dict` for the corresponding input values.
The `fetches` argument may be a list of graph elements or a single
graph element, and these determine the return value of this
method. A graph element can be one of the following types:
* If the *i*th element of `fetches` is an
[`Operation`](../../api_docs/python/framework.md#Operation), the *i*th
return value will be `None`.
* If the *i*th element of `fetches` is a
[`Tensor`](../../api_docs/python/framework.md#Tensor), the *i*th return
value will be a numpy ndarray containing the value of that tensor.
* If the *i*th element of `fetches` is a
[`SparseTensor`](../../api_docs/python/sparse_ops.md#SparseTensor),
the *i*th return value will be a
[`SparseTensorValue`](../../api_docs/python/sparse_ops.md#SparseTensorValue)
containing the value of that sparse tensor.
The optional `feed_dict` argument allows the caller to override
the value of tensors in the graph. Each key in `feed_dict` can be
one of the following types:
* If the key is a [`Tensor`](../../api_docs/python/framework.md#Tensor), the
value may be a Python scalar, string, list, or numpy ndarray
that can be converted to the same `dtype` as that
tensor. Additionally, if the key is a
[placeholder](../../api_docs/python/io_ops.md#placeholder), the shape of
the value will be checked for compatibility with the placeholder.
* If the key is a
[`SparseTensor`](../../api_docs/python/sparse_ops.md#SparseTensor),
the value should be a
[`SparseTensorValue`](../../api_docs/python/sparse_ops.md#SparseTensorValue).
Args:
fetches: A single graph element, or a list of graph elements
(described above).
feed_dict: A dictionary that maps graph elements to values
(described above).
Returns:
Either a single value if `fetches` is a single graph element, or
a list of values if `fetches` is a list (described above).
Raises:
RuntimeError: If this `Session` is in an invalid state (e.g. has been
closed).
TypeError: If `fetches` or `feed_dict` keys are of an inappropriate type.
ValueError: If `fetches` or `feed_dict` keys are invalid or refer to a
`Tensor` that doesn't exist.
"""
def _fetch_fn(fetch):
for tensor_type, fetch_fn, _ in BaseSession._REGISTERED_EXPANSIONS:
if isinstance(fetch, tensor_type):
return fetch_fn(fetch)
raise TypeError('Fetch argument %r has invalid type %r'
% (fetch, type(fetch)))
def _feed_fn(feed, feed_val):
for tensor_type, _, feed_fn in BaseSession._REGISTERED_EXPANSIONS:
if isinstance(feed, tensor_type):
return feed_fn(feed, feed_val)
raise TypeError('Feed argument %r has invalid type %r'
% (feed, type(feed)))
# Check session.
if self._closed:
raise RuntimeError('Attempted to use a closed Session.')
if self.graph.version == 0:
raise RuntimeError('The Session graph is empty. Add operations to the '
'graph before calling run().')
# Validate and process fetches.
is_list_fetch = isinstance(fetches, (list, tuple))
if not is_list_fetch:
fetches = [fetches]
unique_fetch_targets = set()
target_list = []
fetch_info = []
for fetch in fetches:
subfetches, fetch_contraction_fn = _fetch_fn(fetch)
subfetch_names = []
for subfetch in subfetches:
try:
fetch_t = self.graph.as_graph_element(subfetch, allow_tensor=True,
allow_operation=True)
if isinstance(fetch_t, ops.Operation):
target_list.append(compat.as_bytes(fetch_t.name))
else:
subfetch_names.append(compat.as_bytes(fetch_t.name))
except TypeError as e:
raise TypeError('Fetch argument %r of %r has invalid type %r, '
'must be a string or Tensor. (%s)'
% (subfetch, fetch, type(subfetch), str(e)))
except ValueError as e:
raise ValueError('Fetch argument %r of %r cannot be interpreted as a '
'Tensor. (%s)' % (subfetch, fetch, str(e)))
except KeyError as e:
raise ValueError('Fetch argument %r of %r cannot be interpreted as a '
'Tensor. (%s)' % (subfetch, fetch, str(e)))
unique_fetch_targets.update(subfetch_names)
fetch_info.append((subfetch_names, fetch_contraction_fn))
unique_fetch_targets = list(unique_fetch_targets)
# Create request.
feed_dict_string = {}
# Validate and process feed_dict.
if feed_dict:
for feed, feed_val in feed_dict.items():
for subfeed, subfeed_val in _feed_fn(feed, feed_val):
try:
subfeed_t = self.graph.as_graph_element(subfeed, allow_tensor=True,
allow_operation=False)
except Exception as e:
raise e('Cannot interpret feed_dict key as Tensor: ' + e.args[0])
if isinstance(subfeed_val, ops.Tensor):
raise TypeError('The value of a feed cannot be a tf.Tensor object. '
'Acceptable feed values include Python scalars, '
'strings, lists, or numpy ndarrays.')
np_val = np.array(subfeed_val, dtype=subfeed_t.dtype.as_numpy_dtype)
if subfeed_t.op.type == 'Placeholder':
if not subfeed_t.get_shape().is_compatible_with(np_val.shape):
raise ValueError(
'Cannot feed value of shape %r for Tensor %r, '
'which has shape %r'
% (np_val.shape, subfeed_t.name, str(subfeed_t.get_shape())))
feed_dict_string[compat.as_bytes(subfeed_t.name)] = np_val
# Run request and get response.
results = self._do_run(target_list, unique_fetch_targets, feed_dict_string)
# User may have fetched the same tensor multiple times, but we
# only fetch them from the runtime once. Furthermore, they may
# be wrapped as a tuple of tensors. Here we map the results back
# to what the client asked for.
fetched_results = dict(zip(unique_fetch_targets, results))
ret = []
for fetch_names, fetch_contraction_fn in fetch_info:
if fetch_names:
fetched_vals = [fetched_results[name] for name in fetch_names]
ret.append(fetch_contraction_fn(fetched_vals))
else:
ret.append(None)
if is_list_fetch:
return ret
else:
return ret[0]
# Captures the name of a node in an error status.
_NODEDEF_NAME_RE = re.compile(r'\[\[Node: ([^ ]*?) =')
def _do_run(self, target_list, fetch_list, feed_dict):
"""Runs a step based on the given fetches and feeds.
Args:
target_list: A list of byte arrays corresponding to names of tensors
or operations to be run to, but not fetched.
fetch_list: A list of byte arrays corresponding to names of tensors to
be fetched and operations to be run.
feed_dict: A dictionary that maps tensor names (as byte arrays) to
numpy ndarrays.
Returns:
A list of numpy ndarrays, corresponding to the elements of
`fetch_list`. If the ith element of `fetch_list` contains the
name of an operation, the first Tensor output of that operation
will be returned for that element.
"""
try:
# Ensure any changes to the graph are reflected in the runtime.
with self._extend_lock:
if self._graph.version > self._current_version:
graph_def = self._graph.as_graph_def(
from_version=self._current_version)
try:
status = tf_session.TF_NewStatus()
tf_session.TF_ExtendGraph(
self._session, graph_def.SerializeToString(), status)
if tf_session.TF_GetCode(status) != 0:
raise RuntimeError(compat.as_text(tf_session.TF_Message(status)))
self._opened = True
finally:
tf_session.TF_DeleteStatus(status)
self._current_version = self._graph.version
return tf_session.TF_Run(self._session, feed_dict, fetch_list,
target_list)
except tf_session.StatusNotOK as e:
e_type, e_value, e_traceback = sys.exc_info()
error_message = compat.as_text(e.error_message)
m = BaseSession._NODEDEF_NAME_RE.search(error_message)
if m is not None:
node_name = m.group(1)
node_def = None
try:
op = self._graph.get_operation_by_name(node_name)
node_def = op.node_def
except KeyError:
op = None
# pylint: disable=protected-access
raise errors._make_specific_exception(node_def, op, error_message,
e.code)
# pylint: enable=protected-access
six.reraise(e_type, e_value, e_traceback)
class Session(BaseSession):
"""A class for running TensorFlow operations.
A `Session` object encapsulates the environment in which `Operation`
objects are executed, and `Tensor` objects are evaluated. For
example:
```python
# Build a graph.
a = tf.constant(5.0)
b = tf.constant(6.0)
c = a * b
# Launch the graph in a session.
sess = tf.Session()
# Evaluate the tensor `c`.
print(sess.run(c))
```
A session may own resources, such as
[variables](../../api_docs/python/state_ops.md#Variable), [queues](../../api_docs/python/io_ops.md#QueueBase),
and [readers](../../api_docs/python/io_ops.md#ReaderBase). It is important to release
these resources when they are no longer required. To do this, either
invoke the [`close()`](#Session.close) method on the session, or use
the session as a context manager. The following two examples are
equivalent:
```python
# Using the `close()` method.
sess = tf.Session()
sess.run(...)
sess.close()
# Using the context manager.
with tf.Session() as sess:
sess.run(...)
```
The [`ConfigProto`]
(https://www.tensorflow.org/code/tensorflow/core/framework/config.proto)
protocol buffer exposes various configuration options for a
session. For example, to create a session that uses soft constraints
for device placement, and log the resulting placement decisions,
create a session as follows:
```python
# Launch the graph in a session that allows soft device placement and
# logs the placement decisions.
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
log_device_placement=True))
```
@@__init__
@@run
@@close
@@graph
@@as_default
"""
def __init__(self, target='', graph=None, config=None):
"""Creates a new TensorFlow session.
If no `graph` argument is specified when constructing the session,
the default graph will be launched in the session. If you are
using more than one graph (created with `tf.Graph()` in the same
process, you will have to use different sessions for each graph,
but each graph can be used in multiple sessions. In this case, it
is often clearer to pass the graph to be launched explicitly to
the session constructor.
Args:
target: (Optional.) The execution engine to connect to.
Defaults to using an in-process engine. At present, no value
other than the empty string is supported.
graph: (Optional.) The `Graph` to be launched (described above).
config: (Optional.) A [`ConfigProto`](https://www.tensorflow.org/code/tensorflow/core/framework/config.proto)
protocol buffer with configuration options for the session.
"""
super(Session, self).__init__(target, graph, config=config)
self._context_managers = [self.graph.as_default(), self.as_default()]
def __enter__(self):
for context_manager in self._context_managers:
context_manager.__enter__()
return self
def __exit__(self, exec_type, exec_value, exec_tb):
if exec_type is errors.OpError:
logging.error('Session closing due to OpError: %s', (exec_value,))
for context_manager in reversed(self._context_managers):
context_manager.__exit__(exec_type, exec_value, exec_tb)
self.close()
class InteractiveSession(BaseSession):
"""A TensorFlow `Session` for use in interactive contexts, such as a shell.
The only difference with a regular `Session` is that an `InteractiveSession`
installs itself as the default session on construction.
The methods [`Tensor.eval()`](../../api_docs/python/framework.md#Tensor.eval)
and [`Operation.run()`](../../api_docs/python/framework.md#Operation.run)
will use that session to run ops.
This is convenient in interactive shells and [IPython
notebooks](http://ipython.org), as it avoids having to pass an explicit
`Session` object to run ops.
For example:
```python
sess = tf.InteractiveSession()
a = tf.constant(5.0)
b = tf.constant(6.0)
c = a * b
# We can just use 'c.eval()' without passing 'sess'
print(c.eval())
sess.close()
```
Note that a regular session installs itself as the default session when it
is created in a `with` statement. The common usage in non-interactive
programs is to follow that pattern:
```python
a = tf.constant(5.0)
b = tf.constant(6.0)
c = a * b
with tf.Session():
# We can also use 'c.eval()' here.
print(c.eval())
```
@@__init__
@@close
"""
def __init__(self, target='', graph=None, config=None):
"""Creates a new interactive TensorFlow session.
If no `graph` argument is specified when constructing the session,
the default graph will be launched in the session. If you are
using more than one graph (created with `tf.Graph()` in the same
process, you will have to use different sessions for each graph,
but each graph can be used in multiple sessions. In this case, it
is often clearer to pass the graph to be launched explicitly to
the session constructor.
Args:
target: (Optional.) The execution engine to connect to.
Defaults to using an in-process engine. At present, no value
other than the empty string is supported.
graph: (Optional.) The `Graph` to be launched (described above).
config: (Optional) `ConfigProto` proto used to configure the session.
"""
super(InteractiveSession, self).__init__(target, graph, config)
self._default_session = self.as_default()
self._default_session.__enter__()
self._explicit_graph = graph
if self._explicit_graph is not None:
self._default_graph = graph.as_default()
self._default_graph.__enter__()
def close(self):
"""Closes an `InteractiveSession`."""
super(InteractiveSession, self).close()
if self._explicit_graph is not None:
self._default_graph.__exit__(None, None, None)
self._default_session.__exit__(None, None, None)
|
rameshbabu79/fabric
|
refs/heads/master
|
bddtests/events_pb2.py
|
46
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: events.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import fabric_pb2 as fabric__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='events.proto',
package='protos',
syntax='proto3',
serialized_pb=_b('\n\x0c\x65vents.proto\x12\x06protos\x1a\x0c\x66\x61\x62ric.proto\"\x88\x01\n\x08Interest\x12\x11\n\teventType\x18\x01 \x01(\t\x12\x33\n\x0cresponseType\x18\x02 \x01(\x0e\x32\x1d.protos.Interest.ResponseType\"4\n\x0cResponseType\x12\x0c\n\x08\x44ONTSEND\x10\x00\x12\x0c\n\x08PROTOBUF\x10\x01\x12\x08\n\x04JSON\x10\x02\",\n\x08Register\x12 \n\x06\x65vents\x18\x01 \x03(\x0b\x32\x10.protos.Interest\"-\n\x07Generic\x12\x11\n\teventType\x18\x01 \x01(\t\x12\x0f\n\x07payload\x18\x02 \x01(\x0c\"z\n\x05\x45vent\x12$\n\x08register\x18\x01 \x01(\x0b\x32\x10.protos.RegisterH\x00\x12\x1e\n\x05\x62lock\x18\x02 \x01(\x0b\x32\r.protos.BlockH\x00\x12\"\n\x07generic\x18\x03 \x01(\x0b\x32\x0f.protos.GenericH\x00\x42\x07\n\x05\x45vent24\n\x06\x45vents\x12*\n\x04\x43hat\x12\r.protos.Event\x1a\r.protos.Event\"\x00(\x01\x30\x01\x62\x06proto3')
,
dependencies=[fabric__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_INTEREST_RESPONSETYPE = _descriptor.EnumDescriptor(
name='ResponseType',
full_name='protos.Interest.ResponseType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='DONTSEND', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PROTOBUF', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='JSON', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=123,
serialized_end=175,
)
_sym_db.RegisterEnumDescriptor(_INTEREST_RESPONSETYPE)
_INTEREST = _descriptor.Descriptor(
name='Interest',
full_name='protos.Interest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='eventType', full_name='protos.Interest.eventType', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='responseType', full_name='protos.Interest.responseType', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_INTEREST_RESPONSETYPE,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=39,
serialized_end=175,
)
_REGISTER = _descriptor.Descriptor(
name='Register',
full_name='protos.Register',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='events', full_name='protos.Register.events', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=177,
serialized_end=221,
)
_GENERIC = _descriptor.Descriptor(
name='Generic',
full_name='protos.Generic',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='eventType', full_name='protos.Generic.eventType', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='payload', full_name='protos.Generic.payload', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=223,
serialized_end=268,
)
_EVENT = _descriptor.Descriptor(
name='Event',
full_name='protos.Event',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='register', full_name='protos.Event.register', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='block', full_name='protos.Event.block', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='generic', full_name='protos.Event.generic', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='Event', full_name='protos.Event.Event',
index=0, containing_type=None, fields=[]),
],
serialized_start=270,
serialized_end=392,
)
_INTEREST.fields_by_name['responseType'].enum_type = _INTEREST_RESPONSETYPE
_INTEREST_RESPONSETYPE.containing_type = _INTEREST
_REGISTER.fields_by_name['events'].message_type = _INTEREST
_EVENT.fields_by_name['register'].message_type = _REGISTER
_EVENT.fields_by_name['block'].message_type = fabric__pb2._BLOCK
_EVENT.fields_by_name['generic'].message_type = _GENERIC
_EVENT.oneofs_by_name['Event'].fields.append(
_EVENT.fields_by_name['register'])
_EVENT.fields_by_name['register'].containing_oneof = _EVENT.oneofs_by_name['Event']
_EVENT.oneofs_by_name['Event'].fields.append(
_EVENT.fields_by_name['block'])
_EVENT.fields_by_name['block'].containing_oneof = _EVENT.oneofs_by_name['Event']
_EVENT.oneofs_by_name['Event'].fields.append(
_EVENT.fields_by_name['generic'])
_EVENT.fields_by_name['generic'].containing_oneof = _EVENT.oneofs_by_name['Event']
DESCRIPTOR.message_types_by_name['Interest'] = _INTEREST
DESCRIPTOR.message_types_by_name['Register'] = _REGISTER
DESCRIPTOR.message_types_by_name['Generic'] = _GENERIC
DESCRIPTOR.message_types_by_name['Event'] = _EVENT
Interest = _reflection.GeneratedProtocolMessageType('Interest', (_message.Message,), dict(
DESCRIPTOR = _INTEREST,
__module__ = 'events_pb2'
# @@protoc_insertion_point(class_scope:protos.Interest)
))
_sym_db.RegisterMessage(Interest)
Register = _reflection.GeneratedProtocolMessageType('Register', (_message.Message,), dict(
DESCRIPTOR = _REGISTER,
__module__ = 'events_pb2'
# @@protoc_insertion_point(class_scope:protos.Register)
))
_sym_db.RegisterMessage(Register)
Generic = _reflection.GeneratedProtocolMessageType('Generic', (_message.Message,), dict(
DESCRIPTOR = _GENERIC,
__module__ = 'events_pb2'
# @@protoc_insertion_point(class_scope:protos.Generic)
))
_sym_db.RegisterMessage(Generic)
Event = _reflection.GeneratedProtocolMessageType('Event', (_message.Message,), dict(
DESCRIPTOR = _EVENT,
__module__ = 'events_pb2'
# @@protoc_insertion_point(class_scope:protos.Event)
))
_sym_db.RegisterMessage(Event)
import abc
import six
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
class BetaEventsServicer(object):
"""Interface exported by the events server
"""
def Chat(self, request_iterator, context):
"""event chatting using Event
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
class BetaEventsStub(object):
"""Interface exported by the events server
"""
def Chat(self, request_iterator, timeout):
"""event chatting using Event
"""
raise NotImplementedError()
def beta_create_Events_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
import events_pb2
import events_pb2
request_deserializers = {
('protos.Events', 'Chat'): events_pb2.Event.FromString,
}
response_serializers = {
('protos.Events', 'Chat'): events_pb2.Event.SerializeToString,
}
method_implementations = {
('protos.Events', 'Chat'): face_utilities.stream_stream_inline(servicer.Chat),
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
def beta_create_Events_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
import events_pb2
import events_pb2
request_serializers = {
('protos.Events', 'Chat'): events_pb2.Event.SerializeToString,
}
response_deserializers = {
('protos.Events', 'Chat'): events_pb2.Event.FromString,
}
cardinalities = {
'Chat': cardinality.Cardinality.STREAM_STREAM,
}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'protos.Events', cardinalities, options=stub_options)
# @@protoc_insertion_point(module_scope)
|
rayNymous/nupic
|
refs/heads/master
|
src/nupic/regions/KNNAnomalyClassifierRegion.py
|
12
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
## @file
This file defines the k Nearest Neighbor classifier region.
"""
import copy
import numpy
from PyRegion import PyRegion
from KNNClassifierRegion import KNNClassifierRegion
from nupic.algorithms.anomaly import computeRawAnomalyScore
from nupic.bindings.math import Random
from nupic.frameworks.opf.exceptions import (CLAModelInvalidRangeError,
CLAModelInvalidArgument)
class KNNAnomalyClassifierRegion(PyRegion):
"""
KNNAnomalyClassifierRegion wraps the KNNClassifierRegion to classify clamodel
state. It allows for individual records to be classified as anomalies and
supports anomaly detection even after the model has learned the anomalous
sequence.
Methods:
compute() - called by clamodel during record processing
getLabels() - return points with classification records
addLabel() - add a set label to a given set of points
removeLabels() - remove labels from a given set of points
Parameters:
trainRecords - number of records to skip before classification
anomalyThreshold - threshold on anomaly score to automatically classify
record as an anomaly
cacheSize - number of records to keep in cache. Can only recalculate
records kept in cache when setting the trainRecords.
"""
@classmethod
def getSpec(cls):
ns = dict(
description=KNNAnomalyClassifierRegion.__doc__,
singleNodeOnly=True,
inputs=dict(
spBottomUpOut=dict(
description="""The output signal generated from the bottom-up inputs
from lower levels.""",
dataType='Real32',
count=0,
required=True,
regionLevel=False,
isDefaultInput=True,
requireSplitterMap=False),
tpTopDownOut=dict(
description="""The top-down inputsignal, generated from
feedback from upper levels""",
dataType='Real32',
count=0,
required=True,
regionLevel=False,
isDefaultInput=True,
requireSplitterMap=False),
tpLrnActiveStateT=dict(
description="""Active cells in the learn state at time T from TP.
This is used to classify on.""",
dataType='Real32',
count=0,
required=True,
regionLevel=False,
isDefaultInput=True,
requireSplitterMap=False)
),
outputs=dict(
),
parameters=dict(
trainRecords=dict(
description='Number of records to wait for training',
dataType='UInt32',
count=1,
constraints='',
defaultValue=0,
accessMode='Create'),
anomalyThreshold=dict(
description='Threshold used to classify anomalies.',
dataType='Real32',
count=1,
constraints='',
defaultValue=0,
accessMode='Create'),
cacheSize=dict(
description='Number of records to store in cache.',
dataType='UInt32',
count=1,
constraints='',
defaultValue=0,
accessMode='Create'),
classificationVectorType=dict(
description="""Vector type to use when classifying.
1 - Vector Column with Difference (TP and SP)
""",
dataType='UInt32',
count=1,
constraints='',
defaultValue=1,
accessMode='ReadWrite'),
activeColumnCount=dict(
description="""Number of active columns in a given step. Typically
equivalent to SP.numActiveColumnsPerInhArea""",
dataType='UInt32',
count=1,
constraints='',
defaultValue=40,
accessMode='ReadWrite'),
classificationMaxDist=dict(
description="""Maximum distance a sample can be from an anomaly
in the classifier to be labeled as an anomaly.
Ex: With rawOverlap distance, a value of 0.65 means that the points
must be at most a distance 0.65 apart from each other. This
translates to they must be at least 35% similar.""",
dataType='Real32',
count=1,
constraints='',
defaultValue=0.65,
accessMode='Create'
)
),
commands=dict(
getLabels=dict(description=
"Returns a list of label dicts with properties ROWID and labels."
"ROWID corresponds to the records id and labels is a list of "
"strings representing the records labels. Takes additional "
"integer properties start and end representing the range that "
"will be returned."),
addLabel=dict(description=
"Takes parameters start, end and labelName. Adds the label "
"labelName to the records from start to end. This will recalculate "
"labels from end to the most recent record."),
removeLabels=dict(description=
"Takes additional parameters start, end, labelFilter. Start and "
"end correspond to range to remove the label. Remove labels from "
"each record with record ROWID in range from start to end, "
"noninclusive of end. Removes all records if labelFilter is None, "
"otherwise only removes the labels eqaul to labelFilter.")
)
)
ns['parameters'].update(KNNClassifierRegion.getSpec()['parameters'])
return ns
__VERSION__ = 1
AUTO_THRESHOLD_CLASSIFIED_LABEL = "Auto Threshold Classification"
AUTO_TAG = " (auto)"
def __init__(self,
trainRecords,
anomalyThreshold,
cacheSize,
classificationVectorType=1,
activeColumnCount=40,
classificationMaxDist=0.30,
**classifierArgs):
# Internal Region Values
self._maxLabelOutputs = 16
self._activeColumnCount = activeColumnCount
self._prevPredictedColumns = numpy.array([])
self._anomalyVectorLength = None
self._classificationMaxDist = classificationMaxDist
self._iteration = 0
# Set to create deterministic classifier
classifierArgs['SVDDimCount'] = None
# Parameters
self.trainRecords = trainRecords
self.anomalyThreshold = anomalyThreshold
self.cacheSize = cacheSize
self.classificationVectorType = classificationVectorType
self._knnclassifierArgs = classifierArgs
self._knnclassifier = KNNClassifierRegion(**self._knnclassifierArgs)
self.labelResults = []
self.saved_categories = []
self._recordsCache = []
self._version = KNNAnomalyClassifierRegion.__VERSION__
def initialize(self, dims, splitterMaps):
assert tuple(dims) == (1,) * len(dims)
def getParameter(self, name, index=-1):
"""
Get the value of the parameter.
@param name -- the name of the parameter to retrieve, as defined
by the Node Spec.
"""
if name == "trainRecords":
return self.trainRecords
elif name == "anomalyThreshold":
return self.anomalyThreshold
elif name == "activeColumnCount":
return self._activeColumnCount
elif name == "classificationMaxDist":
return self._classificationMaxDist
else:
# If any spec parameter name is the same as an attribute, this call
# will get it automatically, e.g. self.learningMode
return PyRegion.getParameter(self, name, index)
def setParameter(self, name, index, value):
"""
Set the value of the parameter.
@param name -- the name of the parameter to update, as defined
by the Node Spec.
@param value -- the value to which the parameter is to be set.
"""
if name == "trainRecords":
# Ensure that the trainRecords can only be set to minimum of the ROWID in
# the saved states
if not (isinstance(value, float) or isinstance(value, int)):
raise CLAModelInvalidArgument("Invalid argument type \'%s\'. threshold "
"must be a number." % (type(value)))
if len(self._recordsCache) > 0 and value < self._recordsCache[0].ROWID:
raise CLAModelInvalidArgument("Invalid value. autoDetectWaitRecord "
"value must be valid record within output stream. Current minimum "
" ROWID in output stream is %d." % (self._recordsCache[0].ROWID))
self.trainRecords = value
# Remove any labels before the first cached record (wont be used anymore)
self._deleteRangeFromKNN(0, self._recordsCache[0].ROWID)
# Reclassify all states
self.classifyStates()
elif name == "anomalyThreshold":
if not (isinstance(value, float) or isinstance(value, int)):
raise CLAModelInvalidArgument("Invalid argument type \'%s\'. threshold "
"must be a number." % (type(value)))
self.anomalyThreshold = value
self.classifyStates()
elif name == "classificationMaxDist":
if not (isinstance(value, float) or isinstance(value, int)):
raise CLAModelInvalidArgument("Invalid argument type \'%s\'. "
"classificationMaxDist must be a number." % (type(value)))
self._classificationMaxDist = value
self.classifyStates()
elif name == "activeColumnCount":
self._activeColumnCount = value
else:
return PyRegion.setParameter(self, name, index, value)
def compute(self, inputs, outputs):
"""
Process one input sample.
This method is called by the runtime engine.
"""
record = self.constructClassificationRecord(inputs)
#Classify this point after waiting the classification delay
if record.ROWID >= self.getParameter('trainRecords'):
self.classifyState(record)
#Save new classification record and keep history as moving window
self._recordsCache.append(record)
while len(self._recordsCache) > self.cacheSize:
self._recordsCache.pop(0)
self.labelResults = record.anomalyLabel
self._iteration += 1
def getLabelResults(self):
"""
Get the labels of the previously computed record.
----------------
retval - array of strings representing the classification labels
"""
return self.labelResults
def classifyStates(self):
"""
Reclassifies all internal state
"""
for state in self._recordsCache:
self.classifyState(state)
def classifyState(self, state):
"""
Reclassifies given state.
"""
# Record is before wait period do not classifiy
if state.ROWID < self.getParameter('trainRecords'):
if not state.setByUser:
state.anomalyLabel = []
self._deleteRecordsFromKNN([state])
return
label = KNNAnomalyClassifierRegion.AUTO_THRESHOLD_CLASSIFIED_LABEL
autoLabel = label + KNNAnomalyClassifierRegion.AUTO_TAG
# Update the label based on classifications
newCategory = self._recomputeRecordFromKNN(state)
labelList = self._categoryToLabelList(newCategory)
if state.setByUser:
if label in state.anomalyLabel:
state.anomalyLabel.remove(label)
if autoLabel in state.anomalyLabel:
state.anomalyLabel.remove(autoLabel)
labelList.extend(state.anomalyLabel)
# Add threshold classification label if above threshold, else if
# classified to add the auto threshold classification.
if state.anomalyScore >= self.getParameter('anomalyThreshold'):
labelList.append(label)
elif label in labelList:
ind = labelList.index(label)
labelList[ind] = autoLabel
# Make all entries unique
labelList = list(set(labelList))
# If both above threshold and auto classified above - remove auto label
if label in labelList and autoLabel in labelList:
labelList.remove(autoLabel)
if state.anomalyLabel == labelList:
return
# Update state's labeling
state.anomalyLabel = labelList
# Update KNN Classifier with new labeling
if state.anomalyLabel == []:
self._deleteRecordsFromKNN([state])
else:
self._addRecordToKNN(state)
def constructClassificationRecord(self, inputs):
"""
Construct a _CLAClassificationRecord based on the state of the model
passed in through the inputs.
Types for self.classificationVectorType:
1 - TP active cells in learn state
2 - SP columns concatenated with error from TP column predictions and SP
"""
# Count the number of unpredicted columns
allSPColumns = inputs["spBottomUpOut"]
activeSPColumns = allSPColumns.nonzero()[0]
score = computeRawAnomalyScore(activeSPColumns, self._prevPredictedColumns)
spSize = len(allSPColumns)
allTPCells = inputs['tpTopDownOut']
tpSize = len(inputs['tpLrnActiveStateT'])
classificationVector = numpy.array([])
if self.classificationVectorType == 1:
# Classification Vector: [---TP Cells---]
classificationVector = numpy.zeros(tpSize)
activeCellMatrix = inputs["tpLrnActiveStateT"].reshape(tpSize, 1)
activeCellIdx = numpy.where(activeCellMatrix > 0)[0]
if activeCellIdx.shape[0] > 0:
classificationVector[numpy.array(activeCellIdx, dtype=numpy.uint16)] = 1
elif self.classificationVectorType == 2:
# Classification Vecotr: [---SP---|---(TP-SP)----]
classificationVector = numpy.zeros(spSize+spSize)
if activeSPColumns.shape[0] > 0:
classificationVector[activeSPColumns] = 1.0
errorColumns = numpy.setdiff1d(self._prevPredictedColumns,
activeSPColumns)
if errorColumns.shape[0] > 0:
errorColumnIndexes = ( numpy.array(errorColumns, dtype=numpy.uint16) +
spSize )
classificationVector[errorColumnIndexes] = 1.0
else:
raise TypeError("Classification vector type must be either 'tpc' or"
" 'sp_tpe', current value is %s" % (self.classificationVectorType))
# Store the state for next time step
numPredictedCols = len(self._prevPredictedColumns)
predictedColumns = allTPCells.nonzero()[0]
self._prevPredictedColumns = copy.deepcopy(predictedColumns)
if self._anomalyVectorLength is None:
self._anomalyVectorLength = len(classificationVector)
result = _CLAClassificationRecord(
ROWID=self._iteration, #__numRunCalls called
#at beginning of model.run
anomalyScore=score,
anomalyVector=classificationVector.nonzero()[0].tolist(),
anomalyLabel=[]
)
return result
def _addRecordToKNN(self, record):
"""
Adds the record to the KNN classifier.
"""
knn = self._knnclassifier._knn
prototype_idx = self._knnclassifier.getParameter('categoryRecencyList')
category = self._labelListToCategoryNumber(record.anomalyLabel)
# If record is already in the classifier, overwrite its labeling
if record.ROWID in prototype_idx:
knn.prototypeSetCategory(record.ROWID, category)
return
# Learn this pattern in the knn
pattern = self._getStateAnomalyVector(record)
rowID = record.ROWID
knn.learn(pattern, category, rowID=rowID)
def _deleteRecordsFromKNN(self, recordsToDelete):
"""
Removes the given records from the classifier.
parameters
------------
recordsToDelete - list of records to delete from the classififier
"""
prototype_idx = self._knnclassifier.getParameter('categoryRecencyList')
idsToDelete = ([r.ROWID for r in recordsToDelete if
not r.setByUser and r.ROWID in prototype_idx])
nProtos = self._knnclassifier._knn._numPatterns
self._knnclassifier._knn.removeIds(idsToDelete)
assert self._knnclassifier._knn._numPatterns == nProtos - len(idsToDelete)
def _deleteRangeFromKNN(self, start=0, end=None):
"""
Removes any stored records within the range from start to
end. Noninclusive of end.
parameters
------------
start - integer representing the ROWID of the start of the deletion range,
end - integer representing the ROWID of the end of the deletion range,
if None, it will default to end.
"""
prototype_idx = numpy.array(
self._knnclassifier.getParameter('categoryRecencyList'))
if end is None:
end = prototype_idx.max() + 1
idsIdxToDelete = numpy.logical_and(prototype_idx >= start,
prototype_idx < end)
idsToDelete = prototype_idx[idsIdxToDelete]
nProtos = self._knnclassifier._knn._numPatterns
self._knnclassifier._knn.removeIds(idsToDelete.tolist())
assert self._knnclassifier._knn._numPatterns == nProtos - len(idsToDelete)
def _recomputeRecordFromKNN(self, record):
"""
returns the classified labeling of record
"""
inputs = {
"categoryIn": [None],
"bottomUpIn": self._getStateAnomalyVector(record),
}
outputs = {"categoriesOut": numpy.zeros((1,)),
"bestPrototypeIndices":numpy.zeros((1,)),
"categoryProbabilitiesOut":numpy.zeros((1,))}
# Only use points before record to classify and after the wait period.
classifier_indexes = numpy.array(
self._knnclassifier.getParameter('categoryRecencyList'))
valid_idx = numpy.where(
(classifier_indexes >= self.getParameter('trainRecords')) &
(classifier_indexes < record.ROWID)
)[0].tolist()
if len(valid_idx) == 0:
return None
self._knnclassifier.setParameter('inferenceMode', None, True)
self._knnclassifier.setParameter('learningMode', None, False)
self._knnclassifier.compute(inputs, outputs)
self._knnclassifier.setParameter('learningMode', None, True)
classifier_distances = self._knnclassifier.getLatestDistances()
valid_distances = classifier_distances[valid_idx]
if valid_distances.min() <= self._classificationMaxDist:
classifier_indexes_prev = classifier_indexes[valid_idx]
rowID = classifier_indexes_prev[valid_distances.argmin()]
indexID = numpy.where(classifier_indexes == rowID)[0][0]
category = self._knnclassifier.getCategoryList()[indexID]
return category
return None
def _labelToCategoryNumber(self, label):
"""
Since the KNN Classifier stores categories as numbers, we must store each
label as a number. This method converts from a label to a unique number.
Each label is assigned a unique bit so multiple labels may be assigned to
a single record.
"""
if label not in self.saved_categories:
self.saved_categories.append(label)
return pow(2, self.saved_categories.index(label))
def _labelListToCategoryNumber(self, labelList):
"""
This method takes a list of labels and returns a unique category number.
This enables this class to store a list of categories for each point since
the KNN classifier only stores a single number category for each record.
"""
categoryNumber = 0
for label in labelList:
categoryNumber += self._labelToCategoryNumber(label)
return categoryNumber
def _categoryToLabelList(self, category):
"""
Converts a category number into a list of labels
"""
if category is None:
return []
labelList = []
labelNum = 0
while category > 0:
if category % 2 == 1:
labelList.append(self.saved_categories[labelNum])
labelNum += 1
category = category >> 1
return labelList
def _getStateAnomalyVector(self, state):
"""
Returns a state's anomaly vertor converting it from spare to dense
"""
vector = numpy.zeros(self._anomalyVectorLength)
vector[state.anomalyVector] = 1
return vector
def getLabels(self, start=None, end=None):
"""
Get the labels on classified points within range start to end. Not inclusive
of end.
reval - dict of format:
{
'isProcessing': boolean,
'recordLabels': list of results
}
isProcessing - currently always false as recalculation blocks; used if
reprocessing of records is still being performed;
Each item in recordLabels is of format:
{
'ROWID': id of the row,
'labels': list of strings
}
"""
if len(self._recordsCache) == 0:
return {
'isProcessing': False,
'recordLabels': []
}
try:
start = int(start)
except Exception:
start = 0
try:
end = int(end)
except Exception:
end = self._recordsCache[-1].ROWID
if end <= start:
raise CLAModelInvalidRangeError("Invalid supplied range for 'getLabels'.",
debugInfo={
'requestRange': {
'startRecordID': start,
'endRecordID': end
},
'numRecordsStored': len(self._recordsCache)
})
results = {
'isProcessing': False,
'recordLabels': []
}
ROWIDX = numpy.array(
self._knnclassifier.getParameter('categoryRecencyList'))
validIdx = numpy.where((ROWIDX >= start) & (ROWIDX < end))[0].tolist()
categories = self._knnclassifier.getCategoryList()
for idx in validIdx:
row = dict(
ROWID=int(ROWIDX[idx]),
labels=self._categoryToLabelList(categories[idx]))
results['recordLabels'].append(row)
return results
def addLabel(self, start, end, labelName):
"""
Add the label labelName to each record with record ROWID in range from
start to end, noninclusive of end.
This will recalculate all points from end to the last record stored in the
internal cache of this classifier.
"""
if len(self._recordsCache) == 0:
raise CLAModelInvalidRangeError("Invalid supplied range for 'addLabel'. "
"Model has no saved records.")
try:
start = int(start)
except Exception:
start = 0
try:
end = int(end)
except Exception:
end = int(self._recordsCache[-1].ROWID)
startID = self._recordsCache[0].ROWID
clippedStart = max(0, start - startID)
clippedEnd = max(0, min( len( self._recordsCache) , end - startID))
if clippedEnd <= clippedStart:
raise CLAModelInvalidRangeError("Invalid supplied range for 'addLabel'.",
debugInfo={
'requestRange': {
'startRecordID': start,
'endRecordID': end
},
'clippedRequestRange': {
'startRecordID': clippedStart,
'endRecordID': clippedEnd
},
'validRange': {
'startRecordID': startID,
'endRecordID': self._recordsCache[len(self._recordsCache)-1].ROWID
},
'numRecordsStored': len(self._recordsCache)
})
# Add label to range [clippedStart, clippedEnd)
for state in self._recordsCache[clippedStart:clippedEnd]:
if labelName not in state.anomalyLabel:
state.anomalyLabel.append(labelName)
state.setByUser = True
self._addRecordToKNN(state)
assert len(self.saved_categories) > 0
# Recompute [end, ...)
for state in self._recordsCache[clippedEnd:]:
self.classifyState(state)
def removeLabels(self, start=None, end=None, labelFilter=None):
"""
Remove labels from each record with record ROWID in range from
start to end, noninclusive of end. Removes all records if labelFilter is
None, otherwise only removes the labels eqaul to labelFilter.
This will recalculate all points from end to the last record stored in the
internal cache of this classifier.
"""
if len(self._recordsCache) == 0:
raise CLAModelInvalidRangeError("Invalid supplied range for "
"'removeLabels'. Model has no saved records.")
try:
start = int(start)
except Exception:
start = 0
try:
end = int(end)
except Exception:
end = self._recordsCache[-1].ROWID
startID = self._recordsCache[0].ROWID
clippedStart = 0 if start is None else max(0, start - startID)
clippedEnd = len(self._recordsCache) if end is None else \
max(0, min( len( self._recordsCache) , end - startID))
if clippedEnd <= clippedStart:
raise CLAModelInvalidRangeError("Invalid supplied range for "
"'removeLabels'.", debugInfo={
'requestRange': {
'startRecordID': start,
'endRecordID': end
},
'clippedRequestRange': {
'startRecordID': clippedStart,
'endRecordID': clippedEnd
},
'validRange': {
'startRecordID': startID,
'endRecordID': self._recordsCache[len(self._recordsCache)-1].ROWID
},
'numRecordsStored': len(self._recordsCache)
})
# Remove records within the cache
recordsToDelete = []
for state in self._recordsCache[clippedStart:clippedEnd]:
if labelFilter is not None:
if labelFilter in state.anomalyLabel:
state.anomalyLabel.remove(labelFilter)
else:
state.anomalyLabel = []
state.setByUser = False
recordsToDelete.append(state)
self._deleteRecordsFromKNN(recordsToDelete)
# Remove records not in cache
self._deleteRangeFromKNN(start, end)
# Recompute [clippedEnd, ...)
for state in self._recordsCache[clippedEnd:]:
self.classifyState(state)
#############################################################################
#
# Methods to support serialization
#
#############################################################################
def __getstate__(self):
"""
Return serializable state. This function will return a version of the
__dict__ with all "ephemeral" members stripped out. "Ephemeral" members
are defined as those that do not need to be (nor should be) stored
in any kind of persistent file (e.g., NuPIC network XML file.)
"""
state = self.__dict__.copy()
# Save knnclassifier properties
state['_knnclassifierProps'] = state['_knnclassifier'].__getstate__()
state.pop('_knnclassifier')
return state
def __setstate__(self, state):
"""
Set the state of ourself from a serialized state.
"""
if '_version' not in state or state['_version'] == 1:
knnclassifierProps = state.pop('_knnclassifierProps')
self.__dict__.update(state)
self._knnclassifier = KNNClassifierRegion(**self._knnclassifierArgs)
self._knnclassifier.__setstate__(knnclassifierProps)
self._version = KNNAnomalyClassifierRegion.__VERSION__
else:
raise Exception("Invalid KNNAnomalyClassifierRegion version. Current "
"version: %s" % (KNNAnomalyClassifierRegion.__VERSION__))
def diff(self, knnRegion2):
diff = []
toCheck = [((), self.__getstate__(), knnRegion2.__getstate__())]
while toCheck:
keys, a, b = toCheck.pop()
if type(a) != type(b):
diff.append((keys, a, b))
elif 'saved_categories' in keys:
cats1 = set(a)
cats2 = set(b)
if cats1 != cats2:
for k in cats1 - cats2:
diff.append((keys + (k,), a[k], None))
for k in cats1 - cats2:
diff.append((keys + (k,), None, b[k]))
elif '_recordsCache' in keys:
if len(a) != len(b):
diff.append((keys + ('len', ), len(a), len(b)))
for i, v in enumerate(a):
if not (a[i] == b[i]):
diff.append((keys + ('_' + str(i), ), a[i].__getstate__(),
b[i].__getstate__()))
elif isinstance(a, dict):
keys1 = set(a.keys())
keys2 = set(b.keys())
# If there are missing keys, add them to the diff.
if keys1 != keys2:
for k in keys1 - keys2:
diff.append((keys + (k,), [k], None))
for k in keys2 - keys1:
diff.append((keys + (k,), None, b[k]))
# For matching keys, add the values to the list of things to check.
for k in keys1.union(keys2):
toCheck.append((keys + (k,), a[k], b[k]))
elif (isinstance(a, numpy.ndarray) or isinstance(a, list) or
isinstance(a, tuple)):
if len(a) != len(b):
diff.append((keys + ('len', ), len(a), len(b)))
elif not numpy.array_equal(a, b):
diff.append((keys, a, b))
#for i in xrange(len(a)):
# toCheck.append((keys + (k, i), a[i], b[i]))
elif isinstance(a, Random):
for i, v in enumerate(a.get_state()):
toCheck.append((keys + (i,), v, b.get_state()[i]))
else:
try:
_ = a != b
except ValueError:
raise ValueError(type(a))
if a != b:
diff.append((keys, a, b))
return diff
#############################################################################
#
# NuPIC 2 Support
# These methods are required by NuPIC 2
#
#############################################################################
def getOutputElementCount(self, name):
if name == 'labels':
return self._maxLabelOutputs
else:
raise Exception("Invalid output name specified")
class _CLAClassificationRecord(object):
"""
A single record to store data associated with a single prediction for the
anomaly classifier.
ROWID - prediction stream ROWID record number
setByUser - if true, a delete must be called explicitly on this point to
remove its label
"""
__slots__ = ["ROWID", "anomalyScore", "anomalyVector", "anomalyLabel",
"setByUser"]
def __init__(self, ROWID, anomalyScore, anomalyVector, anomalyLabel,
setByUser=False):
self.ROWID = ROWID
self.anomalyScore = anomalyScore
self.anomalyVector = anomalyVector
self.anomalyLabel = anomalyLabel
self.setByUser = setByUser
def __getstate__(self):
obj_slot_values = dict((k, getattr(self, k)) for k in self.__slots__)
return obj_slot_values
def __setstate__(self, data_dict):
for (name, value) in data_dict.iteritems():
setattr(self, name, value)
def __eq__(self, other):
return (self.ROWID == other.ROWID and
self.anomalyScore == other.anomalyScore and
self.anomalyLabel == other.anomalyLabel and
self.setByUser == other.setByUser and
numpy.array_equal(self.anomalyVector, other.anomalyVector))
|
funkring/fdoo
|
refs/heads/8.0-fdoo
|
addons/mass_mailing/wizard/mail_compose_message.py
|
308
|
# -*- coding: utf-8 -*-
from openerp.osv import osv, fields
class MailComposeMessage(osv.TransientModel):
"""Add concept of mass mailing campaign to the mail.compose.message wizard
"""
_inherit = 'mail.compose.message'
_columns = {
'mass_mailing_campaign_id': fields.many2one(
'mail.mass_mailing.campaign', 'Mass Mailing Campaign',
),
'mass_mailing_id': fields.many2one(
'mail.mass_mailing', 'Mass Mailing'
),
'mass_mailing_name': fields.char('Mass Mailing'),
'mailing_list_ids': fields.many2many(
'mail.mass_mailing.list', string='Mailing List'
),
}
def get_mail_values(self, cr, uid, wizard, res_ids, context=None):
""" Override method that generated the mail content by creating the
mail.mail.statistics values in the o2m of mail_mail, when doing pure
email mass mailing. """
res = super(MailComposeMessage, self).get_mail_values(cr, uid, wizard, res_ids, context=context)
# use only for allowed models in mass mailing
if wizard.composition_mode == 'mass_mail' and \
(wizard.mass_mailing_name or wizard.mass_mailing_id) and \
wizard.model in [item[0] for item in self.pool['mail.mass_mailing']._get_mailing_model(cr, uid, context=context)]:
mass_mailing = wizard.mass_mailing_id
if not mass_mailing:
reply_to_mode = wizard.no_auto_thread and 'email' or 'thread'
reply_to = wizard.no_auto_thread and wizard.reply_to or False
mass_mailing_id = self.pool['mail.mass_mailing'].create(
cr, uid, {
'mass_mailing_campaign_id': wizard.mass_mailing_campaign_id and wizard.mass_mailing_campaign_id.id or False,
'name': wizard.mass_mailing_name,
'template_id': wizard.template_id and wizard.template_id.id or False,
'state': 'done',
'reply_to_mode': reply_to_mode,
'reply_to': reply_to,
'sent_date': fields.datetime.now(),
'body_html': wizard.body,
'mailing_model': wizard.model,
'mailing_domain': wizard.active_domain,
}, context=context)
mass_mailing = self.pool['mail.mass_mailing'].browse(cr, uid, mass_mailing_id, context=context)
for res_id in res_ids:
res[res_id].update({
'mailing_id': mass_mailing.id,
'statistics_ids': [(0, 0, {
'model': wizard.model,
'res_id': res_id,
'mass_mailing_id': mass_mailing.id,
})],
# email-mode: keep original message for routing
'notification': mass_mailing.reply_to_mode == 'thread',
'auto_delete': True,
})
return res
|
bmanojlovic/ansible
|
refs/heads/devel
|
lib/ansible/modules/system/gluster_volume.py
|
14
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Taneli Leppä <taneli@crasman.fi>
#
# This file is part of Ansible (sort of)
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = """
module: gluster_volume
short_description: Manage GlusterFS volumes
description:
- Create, remove, start, stop and tune GlusterFS volumes
version_added: "1.9"
options:
name:
required: true
description:
- The volume name
state:
required: true
choices: [ 'present', 'absent', 'started', 'stopped' ]
description:
- Use present/absent ensure if a volume exists or not,
use started/stopped to control it's availability.
cluster:
required: false
default: null
description:
- List of hosts to use for probing and brick setup
host:
required: false
default: null
description:
- Override local hostname (for peer probing purposes)
replicas:
required: false
default: null
description:
- Replica count for volume
arbiter:
required: false
default: null
description:
- Arbiter count for volume
version_added: "2.3"
stripes:
required: false
default: null
description:
- Stripe count for volume
disperses:
required: false
default: null
description:
- Disperse count for volume
version_added: "2.2"
redundancies:
required: false
default: null
description:
- Redundancy count for volume
version_added: "2.2"
transport:
required: false
choices: [ 'tcp', 'rdma', 'tcp,rdma' ]
default: 'tcp'
description:
- Transport type for volume
bricks:
required: false
default: null
description:
- Brick paths on servers. Multiple brick paths can be separated by commas
aliases: ['brick']
start_on_create:
choices: [ 'yes', 'no']
required: false
default: 'yes'
description:
- Controls whether the volume is started after creation or not, defaults to yes
rebalance:
choices: [ 'yes', 'no']
required: false
default: 'no'
description:
- Controls whether the cluster is rebalanced after changes
directory:
required: false
default: null
description:
- Directory for limit-usage
options:
required: false
default: null
description:
- A dictionary/hash with options/settings for the volume
quota:
required: false
default: null
description:
- Quota value for limit-usage (be sure to use 10.0MB instead of 10MB, see quota list)
force:
required: false
default: null
description:
- If brick is being created in the root partition, module will fail.
Set force to true to override this behaviour
notes:
- "Requires cli tools for GlusterFS on servers"
- "Will add new bricks, but not remove them"
author: "Taneli Leppä (@rosmo)"
"""
EXAMPLES = """
- name: create gluster volume
gluster_volume:
state: present
name: test1
bricks: /bricks/brick1/g1
rebalance: yes
cluster:
- 192.0.2.10
- 192.0.2.11
run_once: true
- name: tune
gluster_volume:
state: present
name: test1
options:
performance.cache-size: 256MB
- name: start gluster volume
gluster_volume:
state: started
name: test1
- name: limit usage
gluster_volume:
state: present
name: test1
directory: /foo
quota: 20.0MB
- name: stop gluster volume
gluster_volume:
state: stopped
name: test1
- name: remove gluster volume
gluster_volume:
state: absent
name: test1
- name: create gluster volume with multiple bricks
gluster_volume:
state: present
name: test2
bricks: /bricks/brick1/g2,/bricks/brick2/g2
cluster:
- 192.0.2.10
- 192.0.2.11
run_once: true
"""
import shutil
import time
import socket
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.basic import *
glusterbin = ''
def run_gluster(gargs, **kwargs):
global glusterbin
global module
args = [glusterbin]
args.extend(gargs)
try:
rc, out, err = module.run_command(args, **kwargs)
if rc != 0:
module.fail_json(msg='error running gluster (%s) command (rc=%d): %s' % (' '.join(args), rc, out or err))
except Exception:
e = get_exception()
module.fail_json(msg='error running gluster (%s) command: %s' % (' '.join(args), str(e)))
return out
def run_gluster_nofail(gargs, **kwargs):
global glusterbin
global module
args = [glusterbin]
args.extend(gargs)
rc, out, err = module.run_command(args, **kwargs)
if rc != 0:
return None
return out
def run_gluster_yes(gargs):
global glusterbin
global module
args = [glusterbin]
args.extend(gargs)
rc, out, err = module.run_command(args, data='y\n')
if rc != 0:
module.fail_json(msg='error running gluster (%s) command (rc=%d): %s' % (' '.join(args), rc, out or err))
return out
def get_peers():
out = run_gluster([ 'peer', 'status'])
i = 0
peers = {}
hostname = None
uuid = None
state = None
shortNames = False
for row in out.split('\n'):
if ': ' in row:
key, value = row.split(': ')
if key.lower() == 'hostname':
hostname = value
shortNames = False
if key.lower() == 'uuid':
uuid = value
if key.lower() == 'state':
state = value
peers[hostname] = [ uuid, state ]
elif row.lower() == 'other names:':
shortNames = True
elif row != '' and shortNames == True:
peers[row] = [ uuid, state ]
elif row == '':
shortNames = False
return peers
def get_volumes():
out = run_gluster([ 'volume', 'info' ])
volumes = {}
volume = {}
for row in out.split('\n'):
if ': ' in row:
key, value = row.split(': ')
if key.lower() == 'volume name':
volume['name'] = value
volume['options'] = {}
volume['quota'] = False
if key.lower() == 'volume id':
volume['id'] = value
if key.lower() == 'status':
volume['status'] = value
if key.lower() == 'transport-type':
volume['transport'] = value
if value.lower().endswith(' (arbiter)'):
if not 'arbiters' in volume:
volume['arbiters'] = []
value = value[:-10]
volume['arbiters'].append(value)
if key.lower() != 'bricks' and key.lower()[:5] == 'brick':
if not 'bricks' in volume:
volume['bricks'] = []
volume['bricks'].append(value)
# Volume options
if '.' in key:
if not 'options' in volume:
volume['options'] = {}
volume['options'][key] = value
if key == 'features.quota' and value == 'on':
volume['quota'] = True
else:
if row.lower() != 'bricks:' and row.lower() != 'options reconfigured:':
if len(volume) > 0:
volumes[volume['name']] = volume
volume = {}
return volumes
def get_quotas(name, nofail):
quotas = {}
if nofail:
out = run_gluster_nofail([ 'volume', 'quota', name, 'list' ])
if not out:
return quotas
else:
out = run_gluster([ 'volume', 'quota', name, 'list' ])
for row in out.split('\n'):
if row[:1] == '/':
q = re.split('\s+', row)
quotas[q[0]] = q[1]
return quotas
def wait_for_peer(host):
for x in range(0, 4):
peers = get_peers()
if host in peers and peers[host][1].lower().find('peer in cluster') != -1:
return True
time.sleep(1)
return False
def probe(host, myhostname):
global module
out = run_gluster([ 'peer', 'probe', host ])
if out.find('localhost') == -1 and not wait_for_peer(host):
module.fail_json(msg='failed to probe peer %s on %s' % (host, myhostname))
changed = True
def probe_all_peers(hosts, peers, myhostname):
for host in hosts:
host = host.strip() # Clean up any extra space for exact comparison
if host not in peers:
probe(host, myhostname)
def create_volume(name, stripe, replica, arbiter, disperse, redundancy, transport, hosts, bricks, force):
args = [ 'volume', 'create' ]
args.append(name)
if stripe:
args.append('stripe')
args.append(str(stripe))
if replica:
args.append('replica')
args.append(str(replica))
if arbiter:
args.append('arbiter')
args.append(str(arbiter))
if disperse:
args.append('disperse')
args.append(str(disperse))
if redundancy:
args.append('redundancy')
args.append(str(redundancy))
args.append('transport')
args.append(transport)
for brick in bricks:
for host in hosts:
args.append(('%s:%s' % (host, brick)))
if force:
args.append('force')
run_gluster(args)
def start_volume(name):
run_gluster([ 'volume', 'start', name ])
def stop_volume(name):
run_gluster_yes([ 'volume', 'stop', name ])
def set_volume_option(name, option, parameter):
run_gluster([ 'volume', 'set', name, option, parameter ])
def add_bricks(name, new_bricks, stripe, replica, force):
args = [ 'volume', 'add-brick', name ]
if stripe:
args.append('stripe')
args.append(str(stripe))
if replica:
args.append('replica')
args.append(str(replica))
args.extend(new_bricks)
if force:
args.append('force')
run_gluster(args)
def do_rebalance(name):
run_gluster([ 'volume', 'rebalance', name, 'start' ])
def enable_quota(name):
run_gluster([ 'volume', 'quota', name, 'enable' ])
def set_quota(name, directory, value):
run_gluster([ 'volume', 'quota', name, 'limit-usage', directory, value ])
def main():
### MAIN ###
global module
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True, default=None, aliases=['volume']),
state=dict(required=True, choices=[ 'present', 'absent', 'started', 'stopped', 'rebalanced' ]),
cluster=dict(required=False, default=None, type='list'),
host=dict(required=False, default=None),
stripes=dict(required=False, default=None, type='int'),
replicas=dict(required=False, default=None, type='int'),
arbiters=dict(required=False, default=None, type='int'),
disperses=dict(required=False, default=None, type='int'),
redundancies=dict(required=False, default=None, type='int'),
transport=dict(required=False, default='tcp', choices=[ 'tcp', 'rdma', 'tcp,rdma' ]),
bricks=dict(required=False, default=None, aliases=['brick']),
start_on_create=dict(required=False, default=True, type='bool'),
rebalance=dict(required=False, default=False, type='bool'),
options=dict(required=False, default={}, type='dict'),
quota=dict(required=False),
directory=dict(required=False, default=None),
force=dict(required=False, default=False, type='bool'),
)
)
global glusterbin
glusterbin = module.get_bin_path('gluster', True)
changed = False
action = module.params['state']
volume_name = module.params['name']
cluster= module.params['cluster']
brick_paths = module.params['bricks']
stripes = module.params['stripes']
replicas = module.params['replicas']
arbiters = module.params['arbiters']
disperses = module.params['disperses']
redundancies = module.params['redundancies']
transport = module.params['transport']
myhostname = module.params['host']
start_on_create = module.boolean(module.params['start_on_create'])
rebalance = module.boolean(module.params['rebalance'])
force = module.boolean(module.params['force'])
if not myhostname:
myhostname = socket.gethostname()
# Clean up if last element is empty. Consider that yml can look like this:
# cluster="{% for host in groups['glusterfs'] %}{{ hostvars[host]['private_ip'] }},{% endfor %}"
if cluster is not None and len(cluster) > 1 and cluster[-1] == '':
cluster = cluster[0:-1]
if cluster is None or cluster[0] == '':
cluster = [myhostname]
if brick_paths is not None and "," in brick_paths:
brick_paths = brick_paths.split(",")
else:
brick_paths = [brick_paths]
options = module.params['options']
quota = module.params['quota']
directory = module.params['directory']
# get current state info
peers = get_peers()
volumes = get_volumes()
quotas = {}
if volume_name in volumes and volumes[volume_name]['quota'] and volumes[volume_name]['status'].lower() == 'started':
quotas = get_quotas(volume_name, True)
# do the work!
if action == 'absent':
if volume_name in volumes:
if volumes[volume_name]['status'].lower() != 'stopped':
stop_volume(volume_name)
run_gluster_yes([ 'volume', 'delete', volume_name ])
changed = True
if action == 'present':
probe_all_peers(cluster, peers, myhostname)
# create if it doesn't exist
if volume_name not in volumes:
create_volume(volume_name, stripes, replicas, arbiters, disperses, redundancies, transport, cluster, brick_paths, force)
volumes = get_volumes()
changed = True
if volume_name in volumes:
if volumes[volume_name]['status'].lower() != 'started' and start_on_create:
start_volume(volume_name)
changed = True
# switch bricks
new_bricks = []
removed_bricks = []
all_bricks = []
for node in cluster:
for brick_path in brick_paths:
brick = '%s:%s' % (node, brick_path)
all_bricks.append(brick)
if brick not in volumes[volume_name]['bricks']:
new_bricks.append(brick)
# this module does not yet remove bricks, but we check those anyways
for brick in volumes[volume_name]['bricks']:
if brick not in all_bricks:
removed_bricks.append(brick)
if new_bricks:
add_bricks(volume_name, new_bricks, stripes, replicas, force)
changed = True
# handle quotas
if quota:
if not volumes[volume_name]['quota']:
enable_quota(volume_name)
quotas = get_quotas(volume_name, False)
if directory not in quotas or quotas[directory] != quota:
set_quota(volume_name, directory, quota)
changed = True
# set options
for option in options.keys():
if option not in volumes[volume_name]['options'] or volumes[volume_name]['options'][option] != options[option]:
set_volume_option(volume_name, option, options[option])
changed = True
else:
module.fail_json(msg='failed to create volume %s' % volume_name)
if action != 'delete' and volume_name not in volumes:
module.fail_json(msg='volume not found %s' % volume_name)
if action == 'started':
if volumes[volume_name]['status'].lower() != 'started':
start_volume(volume_name)
changed = True
if action == 'stopped':
if volumes[volume_name]['status'].lower() != 'stopped':
stop_volume(volume_name)
changed = True
if changed:
volumes = get_volumes()
if rebalance:
do_rebalance(volume_name)
facts = {}
facts['glusterfs'] = { 'peers': peers, 'volumes': volumes, 'quotas': quotas }
module.exit_json(changed=changed, ansible_facts=facts)
if __name__ == '__main__':
main()
|
aosagie/spark
|
refs/heads/master
|
python/pyspark/ml/linalg/__init__.py
|
4
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
MLlib utilities for linear algebra. For dense vectors, MLlib
uses the NumPy C{array} type, so you can simply pass NumPy arrays
around. For sparse vectors, users can construct a L{SparseVector}
object from MLlib or pass SciPy C{scipy.sparse} column vectors if
SciPy is available in their environment.
"""
import sys
import array
import struct
if sys.version >= '3':
basestring = str
xrange = range
import copyreg as copy_reg
long = int
else:
from itertools import izip as zip
import copy_reg
import numpy as np
from pyspark import since
from pyspark.sql.types import UserDefinedType, StructField, StructType, ArrayType, DoubleType, \
IntegerType, ByteType, BooleanType
__all__ = ['Vector', 'DenseVector', 'SparseVector', 'Vectors',
'Matrix', 'DenseMatrix', 'SparseMatrix', 'Matrices']
if sys.version_info[:2] == (2, 7):
# speed up pickling array in Python 2.7
def fast_pickle_array(ar):
return array.array, (ar.typecode, ar.tostring())
copy_reg.pickle(array.array, fast_pickle_array)
# Check whether we have SciPy. MLlib works without it too, but if we have it, some methods,
# such as _dot and _serialize_double_vector, start to support scipy.sparse matrices.
try:
import scipy.sparse
_have_scipy = True
except:
# No SciPy in environment, but that's okay
_have_scipy = False
def _convert_to_vector(l):
if isinstance(l, Vector):
return l
elif type(l) in (array.array, np.array, np.ndarray, list, tuple, xrange):
return DenseVector(l)
elif _have_scipy and scipy.sparse.issparse(l):
assert l.shape[1] == 1, "Expected column vector"
# Make sure the converted csc_matrix has sorted indices.
csc = l.tocsc()
if not csc.has_sorted_indices:
csc.sort_indices()
return SparseVector(l.shape[0], csc.indices, csc.data)
else:
raise TypeError("Cannot convert type %s into Vector" % type(l))
def _vector_size(v):
"""
Returns the size of the vector.
>>> _vector_size([1., 2., 3.])
3
>>> _vector_size((1., 2., 3.))
3
>>> _vector_size(array.array('d', [1., 2., 3.]))
3
>>> _vector_size(np.zeros(3))
3
>>> _vector_size(np.zeros((3, 1)))
3
>>> _vector_size(np.zeros((1, 3)))
Traceback (most recent call last):
...
ValueError: Cannot treat an ndarray of shape (1, 3) as a vector
"""
if isinstance(v, Vector):
return len(v)
elif type(v) in (array.array, list, tuple, xrange):
return len(v)
elif type(v) == np.ndarray:
if v.ndim == 1 or (v.ndim == 2 and v.shape[1] == 1):
return len(v)
else:
raise ValueError("Cannot treat an ndarray of shape %s as a vector" % str(v.shape))
elif _have_scipy and scipy.sparse.issparse(v):
assert v.shape[1] == 1, "Expected column vector"
return v.shape[0]
else:
raise TypeError("Cannot treat type %s as a vector" % type(v))
def _format_float(f, digits=4):
s = str(round(f, digits))
if '.' in s:
s = s[:s.index('.') + 1 + digits]
return s
def _format_float_list(l):
return [_format_float(x) for x in l]
def _double_to_long_bits(value):
if np.isnan(value):
value = float('nan')
# pack double into 64 bits, then unpack as long int
return struct.unpack('Q', struct.pack('d', value))[0]
class VectorUDT(UserDefinedType):
"""
SQL user-defined type (UDT) for Vector.
"""
@classmethod
def sqlType(cls):
return StructType([
StructField("type", ByteType(), False),
StructField("size", IntegerType(), True),
StructField("indices", ArrayType(IntegerType(), False), True),
StructField("values", ArrayType(DoubleType(), False), True)])
@classmethod
def module(cls):
return "pyspark.ml.linalg"
@classmethod
def scalaUDT(cls):
return "org.apache.spark.ml.linalg.VectorUDT"
def serialize(self, obj):
if isinstance(obj, SparseVector):
indices = [int(i) for i in obj.indices]
values = [float(v) for v in obj.values]
return (0, obj.size, indices, values)
elif isinstance(obj, DenseVector):
values = [float(v) for v in obj]
return (1, None, None, values)
else:
raise TypeError("cannot serialize %r of type %r" % (obj, type(obj)))
def deserialize(self, datum):
assert len(datum) == 4, \
"VectorUDT.deserialize given row with length %d but requires 4" % len(datum)
tpe = datum[0]
if tpe == 0:
return SparseVector(datum[1], datum[2], datum[3])
elif tpe == 1:
return DenseVector(datum[3])
else:
raise ValueError("do not recognize type %r" % tpe)
def simpleString(self):
return "vector"
class MatrixUDT(UserDefinedType):
"""
SQL user-defined type (UDT) for Matrix.
"""
@classmethod
def sqlType(cls):
return StructType([
StructField("type", ByteType(), False),
StructField("numRows", IntegerType(), False),
StructField("numCols", IntegerType(), False),
StructField("colPtrs", ArrayType(IntegerType(), False), True),
StructField("rowIndices", ArrayType(IntegerType(), False), True),
StructField("values", ArrayType(DoubleType(), False), True),
StructField("isTransposed", BooleanType(), False)])
@classmethod
def module(cls):
return "pyspark.ml.linalg"
@classmethod
def scalaUDT(cls):
return "org.apache.spark.ml.linalg.MatrixUDT"
def serialize(self, obj):
if isinstance(obj, SparseMatrix):
colPtrs = [int(i) for i in obj.colPtrs]
rowIndices = [int(i) for i in obj.rowIndices]
values = [float(v) for v in obj.values]
return (0, obj.numRows, obj.numCols, colPtrs,
rowIndices, values, bool(obj.isTransposed))
elif isinstance(obj, DenseMatrix):
values = [float(v) for v in obj.values]
return (1, obj.numRows, obj.numCols, None, None, values,
bool(obj.isTransposed))
else:
raise TypeError("cannot serialize type %r" % (type(obj)))
def deserialize(self, datum):
assert len(datum) == 7, \
"MatrixUDT.deserialize given row with length %d but requires 7" % len(datum)
tpe = datum[0]
if tpe == 0:
return SparseMatrix(*datum[1:])
elif tpe == 1:
return DenseMatrix(datum[1], datum[2], datum[5], datum[6])
else:
raise ValueError("do not recognize type %r" % tpe)
def simpleString(self):
return "matrix"
class Vector(object):
__UDT__ = VectorUDT()
"""
Abstract class for DenseVector and SparseVector
"""
def toArray(self):
"""
Convert the vector into an numpy.ndarray
:return: numpy.ndarray
"""
raise NotImplementedError
class DenseVector(Vector):
"""
A dense vector represented by a value array. We use numpy array for
storage and arithmetics will be delegated to the underlying numpy
array.
>>> v = Vectors.dense([1.0, 2.0])
>>> u = Vectors.dense([3.0, 4.0])
>>> v + u
DenseVector([4.0, 6.0])
>>> 2 - v
DenseVector([1.0, 0.0])
>>> v / 2
DenseVector([0.5, 1.0])
>>> v * u
DenseVector([3.0, 8.0])
>>> u / v
DenseVector([3.0, 2.0])
>>> u % 2
DenseVector([1.0, 0.0])
>>> -v
DenseVector([-1.0, -2.0])
"""
def __init__(self, ar):
if isinstance(ar, bytes):
ar = np.frombuffer(ar, dtype=np.float64)
elif not isinstance(ar, np.ndarray):
ar = np.array(ar, dtype=np.float64)
if ar.dtype != np.float64:
ar = ar.astype(np.float64)
self.array = ar
def __reduce__(self):
return DenseVector, (self.array.tostring(),)
def numNonzeros(self):
"""
Number of nonzero elements. This scans all active values and count non zeros
"""
return np.count_nonzero(self.array)
def norm(self, p):
"""
Calculates the norm of a DenseVector.
>>> a = DenseVector([0, -1, 2, -3])
>>> a.norm(2)
3.7...
>>> a.norm(1)
6.0
"""
return np.linalg.norm(self.array, p)
def dot(self, other):
"""
Compute the dot product of two Vectors. We support
(Numpy array, list, SparseVector, or SciPy sparse)
and a target NumPy array that is either 1- or 2-dimensional.
Equivalent to calling numpy.dot of the two vectors.
>>> dense = DenseVector(array.array('d', [1., 2.]))
>>> dense.dot(dense)
5.0
>>> dense.dot(SparseVector(2, [0, 1], [2., 1.]))
4.0
>>> dense.dot(range(1, 3))
5.0
>>> dense.dot(np.array(range(1, 3)))
5.0
>>> dense.dot([1.,])
Traceback (most recent call last):
...
AssertionError: dimension mismatch
>>> dense.dot(np.reshape([1., 2., 3., 4.], (2, 2), order='F'))
array([ 5., 11.])
>>> dense.dot(np.reshape([1., 2., 3.], (3, 1), order='F'))
Traceback (most recent call last):
...
AssertionError: dimension mismatch
"""
if type(other) == np.ndarray:
if other.ndim > 1:
assert len(self) == other.shape[0], "dimension mismatch"
return np.dot(self.array, other)
elif _have_scipy and scipy.sparse.issparse(other):
assert len(self) == other.shape[0], "dimension mismatch"
return other.transpose().dot(self.toArray())
else:
assert len(self) == _vector_size(other), "dimension mismatch"
if isinstance(other, SparseVector):
return other.dot(self)
elif isinstance(other, Vector):
return np.dot(self.toArray(), other.toArray())
else:
return np.dot(self.toArray(), other)
def squared_distance(self, other):
"""
Squared distance of two Vectors.
>>> dense1 = DenseVector(array.array('d', [1., 2.]))
>>> dense1.squared_distance(dense1)
0.0
>>> dense2 = np.array([2., 1.])
>>> dense1.squared_distance(dense2)
2.0
>>> dense3 = [2., 1.]
>>> dense1.squared_distance(dense3)
2.0
>>> sparse1 = SparseVector(2, [0, 1], [2., 1.])
>>> dense1.squared_distance(sparse1)
2.0
>>> dense1.squared_distance([1.,])
Traceback (most recent call last):
...
AssertionError: dimension mismatch
>>> dense1.squared_distance(SparseVector(1, [0,], [1.,]))
Traceback (most recent call last):
...
AssertionError: dimension mismatch
"""
assert len(self) == _vector_size(other), "dimension mismatch"
if isinstance(other, SparseVector):
return other.squared_distance(self)
elif _have_scipy and scipy.sparse.issparse(other):
return _convert_to_vector(other).squared_distance(self)
if isinstance(other, Vector):
other = other.toArray()
elif not isinstance(other, np.ndarray):
other = np.array(other)
diff = self.toArray() - other
return np.dot(diff, diff)
def toArray(self):
"""
Returns an numpy.ndarray
"""
return self.array
@property
def values(self):
"""
Returns a list of values
"""
return self.array
def __getitem__(self, item):
return self.array[item]
def __len__(self):
return len(self.array)
def __str__(self):
return "[" + ",".join([str(v) for v in self.array]) + "]"
def __repr__(self):
return "DenseVector([%s])" % (', '.join(_format_float(i) for i in self.array))
def __eq__(self, other):
if isinstance(other, DenseVector):
return np.array_equal(self.array, other.array)
elif isinstance(other, SparseVector):
if len(self) != other.size:
return False
return Vectors._equals(list(xrange(len(self))), self.array, other.indices, other.values)
return False
def __ne__(self, other):
return not self == other
def __hash__(self):
size = len(self)
result = 31 + size
nnz = 0
i = 0
while i < size and nnz < 128:
if self.array[i] != 0:
result = 31 * result + i
bits = _double_to_long_bits(self.array[i])
result = 31 * result + (bits ^ (bits >> 32))
nnz += 1
i += 1
return result
def __getattr__(self, item):
return getattr(self.array, item)
def __neg__(self):
return DenseVector(-self.array)
def _delegate(op):
def func(self, other):
if isinstance(other, DenseVector):
other = other.array
return DenseVector(getattr(self.array, op)(other))
return func
__add__ = _delegate("__add__")
__sub__ = _delegate("__sub__")
__mul__ = _delegate("__mul__")
__div__ = _delegate("__div__")
__truediv__ = _delegate("__truediv__")
__mod__ = _delegate("__mod__")
__radd__ = _delegate("__radd__")
__rsub__ = _delegate("__rsub__")
__rmul__ = _delegate("__rmul__")
__rdiv__ = _delegate("__rdiv__")
__rtruediv__ = _delegate("__rtruediv__")
__rmod__ = _delegate("__rmod__")
class SparseVector(Vector):
"""
A simple sparse vector class for passing data to MLlib. Users may
alternatively pass SciPy's {scipy.sparse} data types.
"""
def __init__(self, size, *args):
"""
Create a sparse vector, using either a dictionary, a list of
(index, value) pairs, or two separate arrays of indices and
values (sorted by index).
:param size: Size of the vector.
:param args: Active entries, as a dictionary {index: value, ...},
a list of tuples [(index, value), ...], or a list of strictly
increasing indices and a list of corresponding values [index, ...],
[value, ...]. Inactive entries are treated as zeros.
>>> SparseVector(4, {1: 1.0, 3: 5.5})
SparseVector(4, {1: 1.0, 3: 5.5})
>>> SparseVector(4, [(1, 1.0), (3, 5.5)])
SparseVector(4, {1: 1.0, 3: 5.5})
>>> SparseVector(4, [1, 3], [1.0, 5.5])
SparseVector(4, {1: 1.0, 3: 5.5})
>>> SparseVector(4, {1:1.0, 6:2.0})
Traceback (most recent call last):
...
AssertionError: Index 6 is out of the size of vector with size=4
>>> SparseVector(4, {-1:1.0})
Traceback (most recent call last):
...
AssertionError: Contains negative index -1
"""
self.size = int(size)
""" Size of the vector. """
assert 1 <= len(args) <= 2, "must pass either 2 or 3 arguments"
if len(args) == 1:
pairs = args[0]
if type(pairs) == dict:
pairs = pairs.items()
pairs = sorted(pairs)
self.indices = np.array([p[0] for p in pairs], dtype=np.int32)
""" A list of indices corresponding to active entries. """
self.values = np.array([p[1] for p in pairs], dtype=np.float64)
""" A list of values corresponding to active entries. """
else:
if isinstance(args[0], bytes):
assert isinstance(args[1], bytes), "values should be string too"
if args[0]:
self.indices = np.frombuffer(args[0], np.int32)
self.values = np.frombuffer(args[1], np.float64)
else:
# np.frombuffer() doesn't work well with empty string in older version
self.indices = np.array([], dtype=np.int32)
self.values = np.array([], dtype=np.float64)
else:
self.indices = np.array(args[0], dtype=np.int32)
self.values = np.array(args[1], dtype=np.float64)
assert len(self.indices) == len(self.values), "index and value arrays not same length"
for i in xrange(len(self.indices) - 1):
if self.indices[i] >= self.indices[i + 1]:
raise TypeError(
"Indices %s and %s are not strictly increasing"
% (self.indices[i], self.indices[i + 1]))
if self.indices.size > 0:
assert np.max(self.indices) < self.size, \
"Index %d is out of the size of vector with size=%d" \
% (np.max(self.indices), self.size)
assert np.min(self.indices) >= 0, \
"Contains negative index %d" % (np.min(self.indices))
def numNonzeros(self):
"""
Number of nonzero elements. This scans all active values and count non zeros.
"""
return np.count_nonzero(self.values)
def norm(self, p):
"""
Calculates the norm of a SparseVector.
>>> a = SparseVector(4, [0, 1], [3., -4.])
>>> a.norm(1)
7.0
>>> a.norm(2)
5.0
"""
return np.linalg.norm(self.values, p)
def __reduce__(self):
return (
SparseVector,
(self.size, self.indices.tostring(), self.values.tostring()))
def dot(self, other):
"""
Dot product with a SparseVector or 1- or 2-dimensional Numpy array.
>>> a = SparseVector(4, [1, 3], [3.0, 4.0])
>>> a.dot(a)
25.0
>>> a.dot(array.array('d', [1., 2., 3., 4.]))
22.0
>>> b = SparseVector(4, [2], [1.0])
>>> a.dot(b)
0.0
>>> a.dot(np.array([[1, 1], [2, 2], [3, 3], [4, 4]]))
array([ 22., 22.])
>>> a.dot([1., 2., 3.])
Traceback (most recent call last):
...
AssertionError: dimension mismatch
>>> a.dot(np.array([1., 2.]))
Traceback (most recent call last):
...
AssertionError: dimension mismatch
>>> a.dot(DenseVector([1., 2.]))
Traceback (most recent call last):
...
AssertionError: dimension mismatch
>>> a.dot(np.zeros((3, 2)))
Traceback (most recent call last):
...
AssertionError: dimension mismatch
"""
if isinstance(other, np.ndarray):
if other.ndim not in [2, 1]:
raise ValueError("Cannot call dot with %d-dimensional array" % other.ndim)
assert len(self) == other.shape[0], "dimension mismatch"
return np.dot(self.values, other[self.indices])
assert len(self) == _vector_size(other), "dimension mismatch"
if isinstance(other, DenseVector):
return np.dot(other.array[self.indices], self.values)
elif isinstance(other, SparseVector):
# Find out common indices.
self_cmind = np.in1d(self.indices, other.indices, assume_unique=True)
self_values = self.values[self_cmind]
if self_values.size == 0:
return 0.0
else:
other_cmind = np.in1d(other.indices, self.indices, assume_unique=True)
return np.dot(self_values, other.values[other_cmind])
else:
return self.dot(_convert_to_vector(other))
def squared_distance(self, other):
"""
Squared distance from a SparseVector or 1-dimensional NumPy array.
>>> a = SparseVector(4, [1, 3], [3.0, 4.0])
>>> a.squared_distance(a)
0.0
>>> a.squared_distance(array.array('d', [1., 2., 3., 4.]))
11.0
>>> a.squared_distance(np.array([1., 2., 3., 4.]))
11.0
>>> b = SparseVector(4, [2], [1.0])
>>> a.squared_distance(b)
26.0
>>> b.squared_distance(a)
26.0
>>> b.squared_distance([1., 2.])
Traceback (most recent call last):
...
AssertionError: dimension mismatch
>>> b.squared_distance(SparseVector(3, [1,], [1.0,]))
Traceback (most recent call last):
...
AssertionError: dimension mismatch
"""
assert len(self) == _vector_size(other), "dimension mismatch"
if isinstance(other, np.ndarray) or isinstance(other, DenseVector):
if isinstance(other, np.ndarray) and other.ndim != 1:
raise Exception("Cannot call squared_distance with %d-dimensional array" %
other.ndim)
if isinstance(other, DenseVector):
other = other.array
sparse_ind = np.zeros(other.size, dtype=bool)
sparse_ind[self.indices] = True
dist = other[sparse_ind] - self.values
result = np.dot(dist, dist)
other_ind = other[~sparse_ind]
result += np.dot(other_ind, other_ind)
return result
elif isinstance(other, SparseVector):
result = 0.0
i, j = 0, 0
while i < len(self.indices) and j < len(other.indices):
if self.indices[i] == other.indices[j]:
diff = self.values[i] - other.values[j]
result += diff * diff
i += 1
j += 1
elif self.indices[i] < other.indices[j]:
result += self.values[i] * self.values[i]
i += 1
else:
result += other.values[j] * other.values[j]
j += 1
while i < len(self.indices):
result += self.values[i] * self.values[i]
i += 1
while j < len(other.indices):
result += other.values[j] * other.values[j]
j += 1
return result
else:
return self.squared_distance(_convert_to_vector(other))
def toArray(self):
"""
Returns a copy of this SparseVector as a 1-dimensional NumPy array.
"""
arr = np.zeros((self.size,), dtype=np.float64)
arr[self.indices] = self.values
return arr
def __len__(self):
return self.size
def __str__(self):
inds = "[" + ",".join([str(i) for i in self.indices]) + "]"
vals = "[" + ",".join([str(v) for v in self.values]) + "]"
return "(" + ",".join((str(self.size), inds, vals)) + ")"
def __repr__(self):
inds = self.indices
vals = self.values
entries = ", ".join(["{0}: {1}".format(inds[i], _format_float(vals[i]))
for i in xrange(len(inds))])
return "SparseVector({0}, {{{1}}})".format(self.size, entries)
def __eq__(self, other):
if isinstance(other, SparseVector):
return other.size == self.size and np.array_equal(other.indices, self.indices) \
and np.array_equal(other.values, self.values)
elif isinstance(other, DenseVector):
if self.size != len(other):
return False
return Vectors._equals(self.indices, self.values, list(xrange(len(other))), other.array)
return False
def __getitem__(self, index):
inds = self.indices
vals = self.values
if not isinstance(index, int):
raise TypeError(
"Indices must be of type integer, got type %s" % type(index))
if index >= self.size or index < -self.size:
raise IndexError("Index %d out of bounds." % index)
if index < 0:
index += self.size
if (inds.size == 0) or (index > inds.item(-1)):
return 0.
insert_index = np.searchsorted(inds, index)
row_ind = inds[insert_index]
if row_ind == index:
return vals[insert_index]
return 0.
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
result = 31 + self.size
nnz = 0
i = 0
while i < len(self.values) and nnz < 128:
if self.values[i] != 0:
result = 31 * result + int(self.indices[i])
bits = _double_to_long_bits(self.values[i])
result = 31 * result + (bits ^ (bits >> 32))
nnz += 1
i += 1
return result
class Vectors(object):
"""
Factory methods for working with vectors.
.. note:: Dense vectors are simply represented as NumPy array objects,
so there is no need to covert them for use in MLlib. For sparse vectors,
the factory methods in this class create an MLlib-compatible type, or users
can pass in SciPy's C{scipy.sparse} column vectors.
"""
@staticmethod
def sparse(size, *args):
"""
Create a sparse vector, using either a dictionary, a list of
(index, value) pairs, or two separate arrays of indices and
values (sorted by index).
:param size: Size of the vector.
:param args: Non-zero entries, as a dictionary, list of tuples,
or two sorted lists containing indices and values.
>>> Vectors.sparse(4, {1: 1.0, 3: 5.5})
SparseVector(4, {1: 1.0, 3: 5.5})
>>> Vectors.sparse(4, [(1, 1.0), (3, 5.5)])
SparseVector(4, {1: 1.0, 3: 5.5})
>>> Vectors.sparse(4, [1, 3], [1.0, 5.5])
SparseVector(4, {1: 1.0, 3: 5.5})
"""
return SparseVector(size, *args)
@staticmethod
def dense(*elements):
"""
Create a dense vector of 64-bit floats from a Python list or numbers.
>>> Vectors.dense([1, 2, 3])
DenseVector([1.0, 2.0, 3.0])
>>> Vectors.dense(1.0, 2.0)
DenseVector([1.0, 2.0])
"""
if len(elements) == 1 and not isinstance(elements[0], (float, int, long)):
# it's list, numpy.array or other iterable object.
elements = elements[0]
return DenseVector(elements)
@staticmethod
def squared_distance(v1, v2):
"""
Squared distance between two vectors.
a and b can be of type SparseVector, DenseVector, np.ndarray
or array.array.
>>> a = Vectors.sparse(4, [(0, 1), (3, 4)])
>>> b = Vectors.dense([2, 5, 4, 1])
>>> a.squared_distance(b)
51.0
"""
v1, v2 = _convert_to_vector(v1), _convert_to_vector(v2)
return v1.squared_distance(v2)
@staticmethod
def norm(vector, p):
"""
Find norm of the given vector.
"""
return _convert_to_vector(vector).norm(p)
@staticmethod
def zeros(size):
return DenseVector(np.zeros(size))
@staticmethod
def _equals(v1_indices, v1_values, v2_indices, v2_values):
"""
Check equality between sparse/dense vectors,
v1_indices and v2_indices assume to be strictly increasing.
"""
v1_size = len(v1_values)
v2_size = len(v2_values)
k1 = 0
k2 = 0
all_equal = True
while all_equal:
while k1 < v1_size and v1_values[k1] == 0:
k1 += 1
while k2 < v2_size and v2_values[k2] == 0:
k2 += 1
if k1 >= v1_size or k2 >= v2_size:
return k1 >= v1_size and k2 >= v2_size
all_equal = v1_indices[k1] == v2_indices[k2] and v1_values[k1] == v2_values[k2]
k1 += 1
k2 += 1
return all_equal
class Matrix(object):
__UDT__ = MatrixUDT()
"""
Represents a local matrix.
"""
def __init__(self, numRows, numCols, isTransposed=False):
self.numRows = numRows
self.numCols = numCols
self.isTransposed = isTransposed
def toArray(self):
"""
Returns its elements in a NumPy ndarray.
"""
raise NotImplementedError
@staticmethod
def _convert_to_array(array_like, dtype):
"""
Convert Matrix attributes which are array-like or buffer to array.
"""
if isinstance(array_like, bytes):
return np.frombuffer(array_like, dtype=dtype)
return np.asarray(array_like, dtype=dtype)
class DenseMatrix(Matrix):
"""
Column-major dense matrix.
"""
def __init__(self, numRows, numCols, values, isTransposed=False):
Matrix.__init__(self, numRows, numCols, isTransposed)
values = self._convert_to_array(values, np.float64)
assert len(values) == numRows * numCols
self.values = values
def __reduce__(self):
return DenseMatrix, (
self.numRows, self.numCols, self.values.tostring(),
int(self.isTransposed))
def __str__(self):
"""
Pretty printing of a DenseMatrix
>>> dm = DenseMatrix(2, 2, range(4))
>>> print(dm)
DenseMatrix([[ 0., 2.],
[ 1., 3.]])
>>> dm = DenseMatrix(2, 2, range(4), isTransposed=True)
>>> print(dm)
DenseMatrix([[ 0., 1.],
[ 2., 3.]])
"""
# Inspired by __repr__ in scipy matrices.
array_lines = repr(self.toArray()).splitlines()
# We need to adjust six spaces which is the difference in number
# of letters between "DenseMatrix" and "array"
x = '\n'.join([(" " * 6 + line) for line in array_lines[1:]])
return array_lines[0].replace("array", "DenseMatrix") + "\n" + x
def __repr__(self):
"""
Representation of a DenseMatrix
>>> dm = DenseMatrix(2, 2, range(4))
>>> dm
DenseMatrix(2, 2, [0.0, 1.0, 2.0, 3.0], False)
"""
# If the number of values are less than seventeen then return as it is.
# Else return first eight values and last eight values.
if len(self.values) < 17:
entries = _format_float_list(self.values)
else:
entries = (
_format_float_list(self.values[:8]) +
["..."] +
_format_float_list(self.values[-8:])
)
entries = ", ".join(entries)
return "DenseMatrix({0}, {1}, [{2}], {3})".format(
self.numRows, self.numCols, entries, self.isTransposed)
def toArray(self):
"""
Return an numpy.ndarray
>>> m = DenseMatrix(2, 2, range(4))
>>> m.toArray()
array([[ 0., 2.],
[ 1., 3.]])
"""
if self.isTransposed:
return np.asfortranarray(
self.values.reshape((self.numRows, self.numCols)))
else:
return self.values.reshape((self.numRows, self.numCols), order='F')
def toSparse(self):
"""Convert to SparseMatrix"""
if self.isTransposed:
values = np.ravel(self.toArray(), order='F')
else:
values = self.values
indices = np.nonzero(values)[0]
colCounts = np.bincount(indices // self.numRows)
colPtrs = np.cumsum(np.hstack(
(0, colCounts, np.zeros(self.numCols - colCounts.size))))
values = values[indices]
rowIndices = indices % self.numRows
return SparseMatrix(self.numRows, self.numCols, colPtrs, rowIndices, values)
def __getitem__(self, indices):
i, j = indices
if i < 0 or i >= self.numRows:
raise IndexError("Row index %d is out of range [0, %d)"
% (i, self.numRows))
if j >= self.numCols or j < 0:
raise IndexError("Column index %d is out of range [0, %d)"
% (j, self.numCols))
if self.isTransposed:
return self.values[i * self.numCols + j]
else:
return self.values[i + j * self.numRows]
def __eq__(self, other):
if (self.numRows != other.numRows or self.numCols != other.numCols):
return False
if isinstance(other, SparseMatrix):
return np.all(self.toArray() == other.toArray())
self_values = np.ravel(self.toArray(), order='F')
other_values = np.ravel(other.toArray(), order='F')
return np.all(self_values == other_values)
class SparseMatrix(Matrix):
"""Sparse Matrix stored in CSC format."""
def __init__(self, numRows, numCols, colPtrs, rowIndices, values,
isTransposed=False):
Matrix.__init__(self, numRows, numCols, isTransposed)
self.colPtrs = self._convert_to_array(colPtrs, np.int32)
self.rowIndices = self._convert_to_array(rowIndices, np.int32)
self.values = self._convert_to_array(values, np.float64)
if self.isTransposed:
if self.colPtrs.size != numRows + 1:
raise ValueError("Expected colPtrs of size %d, got %d."
% (numRows + 1, self.colPtrs.size))
else:
if self.colPtrs.size != numCols + 1:
raise ValueError("Expected colPtrs of size %d, got %d."
% (numCols + 1, self.colPtrs.size))
if self.rowIndices.size != self.values.size:
raise ValueError("Expected rowIndices of length %d, got %d."
% (self.rowIndices.size, self.values.size))
def __str__(self):
"""
Pretty printing of a SparseMatrix
>>> sm1 = SparseMatrix(2, 2, [0, 2, 3], [0, 1, 1], [2, 3, 4])
>>> print(sm1)
2 X 2 CSCMatrix
(0,0) 2.0
(1,0) 3.0
(1,1) 4.0
>>> sm1 = SparseMatrix(2, 2, [0, 2, 3], [0, 1, 1], [2, 3, 4], True)
>>> print(sm1)
2 X 2 CSRMatrix
(0,0) 2.0
(0,1) 3.0
(1,1) 4.0
"""
spstr = "{0} X {1} ".format(self.numRows, self.numCols)
if self.isTransposed:
spstr += "CSRMatrix\n"
else:
spstr += "CSCMatrix\n"
cur_col = 0
smlist = []
# Display first 16 values.
if len(self.values) <= 16:
zipindval = zip(self.rowIndices, self.values)
else:
zipindval = zip(self.rowIndices[:16], self.values[:16])
for i, (rowInd, value) in enumerate(zipindval):
if self.colPtrs[cur_col + 1] <= i:
cur_col += 1
if self.isTransposed:
smlist.append('({0},{1}) {2}'.format(
cur_col, rowInd, _format_float(value)))
else:
smlist.append('({0},{1}) {2}'.format(
rowInd, cur_col, _format_float(value)))
spstr += "\n".join(smlist)
if len(self.values) > 16:
spstr += "\n.." * 2
return spstr
def __repr__(self):
"""
Representation of a SparseMatrix
>>> sm1 = SparseMatrix(2, 2, [0, 2, 3], [0, 1, 1], [2, 3, 4])
>>> sm1
SparseMatrix(2, 2, [0, 2, 3], [0, 1, 1], [2.0, 3.0, 4.0], False)
"""
rowIndices = list(self.rowIndices)
colPtrs = list(self.colPtrs)
if len(self.values) <= 16:
values = _format_float_list(self.values)
else:
values = (
_format_float_list(self.values[:8]) +
["..."] +
_format_float_list(self.values[-8:])
)
rowIndices = rowIndices[:8] + ["..."] + rowIndices[-8:]
if len(self.colPtrs) > 16:
colPtrs = colPtrs[:8] + ["..."] + colPtrs[-8:]
values = ", ".join(values)
rowIndices = ", ".join([str(ind) for ind in rowIndices])
colPtrs = ", ".join([str(ptr) for ptr in colPtrs])
return "SparseMatrix({0}, {1}, [{2}], [{3}], [{4}], {5})".format(
self.numRows, self.numCols, colPtrs, rowIndices,
values, self.isTransposed)
def __reduce__(self):
return SparseMatrix, (
self.numRows, self.numCols, self.colPtrs.tostring(),
self.rowIndices.tostring(), self.values.tostring(),
int(self.isTransposed))
def __getitem__(self, indices):
i, j = indices
if i < 0 or i >= self.numRows:
raise IndexError("Row index %d is out of range [0, %d)"
% (i, self.numRows))
if j < 0 or j >= self.numCols:
raise IndexError("Column index %d is out of range [0, %d)"
% (j, self.numCols))
# If a CSR matrix is given, then the row index should be searched
# for in ColPtrs, and the column index should be searched for in the
# corresponding slice obtained from rowIndices.
if self.isTransposed:
j, i = i, j
colStart = self.colPtrs[j]
colEnd = self.colPtrs[j + 1]
nz = self.rowIndices[colStart: colEnd]
ind = np.searchsorted(nz, i) + colStart
if ind < colEnd and self.rowIndices[ind] == i:
return self.values[ind]
else:
return 0.0
def toArray(self):
"""
Return an numpy.ndarray
"""
A = np.zeros((self.numRows, self.numCols), dtype=np.float64, order='F')
for k in xrange(self.colPtrs.size - 1):
startptr = self.colPtrs[k]
endptr = self.colPtrs[k + 1]
if self.isTransposed:
A[k, self.rowIndices[startptr:endptr]] = self.values[startptr:endptr]
else:
A[self.rowIndices[startptr:endptr], k] = self.values[startptr:endptr]
return A
def toDense(self):
densevals = np.ravel(self.toArray(), order='F')
return DenseMatrix(self.numRows, self.numCols, densevals)
# TODO: More efficient implementation:
def __eq__(self, other):
return np.all(self.toArray() == other.toArray())
class Matrices(object):
@staticmethod
def dense(numRows, numCols, values):
"""
Create a DenseMatrix
"""
return DenseMatrix(numRows, numCols, values)
@staticmethod
def sparse(numRows, numCols, colPtrs, rowIndices, values):
"""
Create a SparseMatrix
"""
return SparseMatrix(numRows, numCols, colPtrs, rowIndices, values)
def _test():
import doctest
try:
# Numpy 1.14+ changed it's string format.
np.set_printoptions(legacy='1.13')
except TypeError:
pass
(failure_count, test_count) = doctest.testmod(optionflags=doctest.ELLIPSIS)
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
mspark93/VTK
|
refs/heads/master
|
ThirdParty/Twisted/twisted/web/test/test_resource.py
|
43
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.web.resource}.
"""
from twisted.trial.unittest import TestCase
from twisted.web.error import UnsupportedMethod
from twisted.web.resource import (
NOT_FOUND, FORBIDDEN, Resource, ErrorPage, NoResource, ForbiddenResource,
getChildForRequest)
from twisted.web.test.requesthelper import DummyRequest
class ErrorPageTests(TestCase):
"""
Tests for L{ErrorPage}, L{NoResource}, and L{ForbiddenResource}.
"""
errorPage = ErrorPage
noResource = NoResource
forbiddenResource = ForbiddenResource
def test_getChild(self):
"""
The C{getChild} method of L{ErrorPage} returns the L{ErrorPage} it is
called on.
"""
page = self.errorPage(321, "foo", "bar")
self.assertIdentical(page.getChild(b"name", object()), page)
def _pageRenderingTest(self, page, code, brief, detail):
request = DummyRequest([b''])
template = (
u"\n"
u"<html>\n"
u" <head><title>%s - %s</title></head>\n"
u" <body>\n"
u" <h1>%s</h1>\n"
u" <p>%s</p>\n"
u" </body>\n"
u"</html>\n")
expected = template % (code, brief, brief, detail)
self.assertEqual(
page.render(request), expected.encode('utf-8'))
self.assertEqual(request.responseCode, code)
self.assertEqual(
request.outgoingHeaders,
{b'content-type': b'text/html; charset=utf-8'})
def test_errorPageRendering(self):
"""
L{ErrorPage.render} returns a C{bytes} describing the error defined by
the response code and message passed to L{ErrorPage.__init__}. It also
uses that response code to set the response code on the L{Request}
passed in.
"""
code = 321
brief = "brief description text"
detail = "much longer text might go here"
page = self.errorPage(code, brief, detail)
self._pageRenderingTest(page, code, brief, detail)
def test_noResourceRendering(self):
"""
L{NoResource} sets the HTTP I{NOT FOUND} code.
"""
detail = "long message"
page = self.noResource(detail)
self._pageRenderingTest(page, NOT_FOUND, "No Such Resource", detail)
def test_forbiddenResourceRendering(self):
"""
L{ForbiddenResource} sets the HTTP I{FORBIDDEN} code.
"""
detail = "longer message"
page = self.forbiddenResource(detail)
self._pageRenderingTest(page, FORBIDDEN, "Forbidden Resource", detail)
class DynamicChild(Resource):
"""
A L{Resource} to be created on the fly by L{DynamicChildren}.
"""
def __init__(self, path, request):
Resource.__init__(self)
self.path = path
self.request = request
class DynamicChildren(Resource):
"""
A L{Resource} with dynamic children.
"""
def getChild(self, path, request):
return DynamicChild(path, request)
class BytesReturnedRenderable(Resource):
"""
A L{Resource} with minimal capabilities to render a response.
"""
def __init__(self, response):
"""
@param response: A C{bytes} object giving the value to return from
C{render_GET}.
"""
Resource.__init__(self)
self._response = response
def render_GET(self, request):
"""
Render a response to a I{GET} request by returning a short byte string
to be written by the server.
"""
return self._response
class ImplicitAllowedMethods(Resource):
"""
A L{Resource} which implicitly defines its allowed methods by defining
renderers to handle them.
"""
def render_GET(self, request):
pass
def render_PUT(self, request):
pass
class ResourceTests(TestCase):
"""
Tests for L{Resource}.
"""
def test_staticChildren(self):
"""
L{Resource.putChild} adds a I{static} child to the resource. That child
is returned from any call to L{Resource.getChildWithDefault} for the
child's path.
"""
resource = Resource()
child = Resource()
sibling = Resource()
resource.putChild(b"foo", child)
resource.putChild(b"bar", sibling)
self.assertIdentical(
child, resource.getChildWithDefault(b"foo", DummyRequest([])))
def test_dynamicChildren(self):
"""
L{Resource.getChildWithDefault} delegates to L{Resource.getChild} when
the requested path is not associated with any static child.
"""
path = b"foo"
request = DummyRequest([])
resource = DynamicChildren()
child = resource.getChildWithDefault(path, request)
self.assertIsInstance(child, DynamicChild)
self.assertEqual(child.path, path)
self.assertIdentical(child.request, request)
def test_defaultHEAD(self):
"""
When not otherwise overridden, L{Resource.render} treats a I{HEAD}
request as if it were a I{GET} request.
"""
expected = b"insert response here"
request = DummyRequest([])
request.method = b'HEAD'
resource = BytesReturnedRenderable(expected)
self.assertEqual(expected, resource.render(request))
def test_explicitAllowedMethods(self):
"""
The L{UnsupportedMethod} raised by L{Resource.render} for an unsupported
request method has a C{allowedMethods} attribute set to the value of the
C{allowedMethods} attribute of the L{Resource}, if it has one.
"""
expected = [b'GET', b'HEAD', b'PUT']
resource = Resource()
resource.allowedMethods = expected
request = DummyRequest([])
request.method = b'FICTIONAL'
exc = self.assertRaises(UnsupportedMethod, resource.render, request)
self.assertEqual(set(expected), set(exc.allowedMethods))
def test_implicitAllowedMethods(self):
"""
The L{UnsupportedMethod} raised by L{Resource.render} for an unsupported
request method has a C{allowedMethods} attribute set to a list of the
methods supported by the L{Resource}, as determined by the
I{render_}-prefixed methods which it defines, if C{allowedMethods} is
not explicitly defined by the L{Resource}.
"""
expected = set([b'GET', b'HEAD', b'PUT'])
resource = ImplicitAllowedMethods()
request = DummyRequest([])
request.method = b'FICTIONAL'
exc = self.assertRaises(UnsupportedMethod, resource.render, request)
self.assertEqual(expected, set(exc.allowedMethods))
class GetChildForRequestTests(TestCase):
"""
Tests for L{getChildForRequest}.
"""
def test_exhaustedPostPath(self):
"""
L{getChildForRequest} returns whatever resource has been reached by the
time the request's C{postpath} is empty.
"""
request = DummyRequest([])
resource = Resource()
result = getChildForRequest(resource, request)
self.assertIdentical(resource, result)
def test_leafResource(self):
"""
L{getChildForRequest} returns the first resource it encounters with a
C{isLeaf} attribute set to C{True}.
"""
request = DummyRequest([b"foo", b"bar"])
resource = Resource()
resource.isLeaf = True
result = getChildForRequest(resource, request)
self.assertIdentical(resource, result)
def test_postPathToPrePath(self):
"""
As path segments from the request are traversed, they are taken from
C{postpath} and put into C{prepath}.
"""
request = DummyRequest([b"foo", b"bar"])
root = Resource()
child = Resource()
child.isLeaf = True
root.putChild(b"foo", child)
self.assertIdentical(child, getChildForRequest(root, request))
self.assertEqual(request.prepath, [b"foo"])
self.assertEqual(request.postpath, [b"bar"])
|
Thhhza/XlsxWriter
|
refs/heads/master
|
examples/unicode_polish_utf8.py
|
9
|
##############################################################################
#
# A simple example of converting some Unicode text to an Excel file using
# the XlsxWriter Python module.
#
# This example generates a spreadsheet with some Polish text from a file
# with UTF8 encoded text.
#
# Copyright 2013-2015, John McNamara, jmcnamara@cpan.org
#
import codecs
import xlsxwriter
# Open the input file with the correct encoding.
textfile = codecs.open('unicode_polish_utf8.txt', 'r', 'utf-8')
# Create an new Excel file and convert the text data.
workbook = xlsxwriter.Workbook('unicode_polish_utf8.xlsx')
worksheet = workbook.add_worksheet()
# Widen the first column to make the text clearer.
worksheet.set_column('A:A', 50)
# Start from the first cell.
row = 0
col = 0
# Read the text file and write it to the worksheet.
for line in textfile:
# Ignore the comments in the text file.
if line.startswith('#'):
continue
# Write any other lines to the worksheet.
worksheet.write(row, col, line.rstrip("\n"))
row += 1
workbook.close()
|
jeremiahyan/odoo
|
refs/heads/master
|
addons/website_form/controllers/main.py
|
1
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import base64
import json
from psycopg2 import IntegrityError
from werkzeug.exceptions import BadRequest
from odoo import http, SUPERUSER_ID, _
from odoo.http import request
from odoo.tools import plaintext2html
from odoo.exceptions import ValidationError, UserError
from odoo.addons.base.models.ir_qweb_fields import nl2br
class WebsiteForm(http.Controller):
@http.route('/website_form/', type='http', auth="public", methods=['POST'], multilang=False)
def website_form_empty(self, **kwargs):
# This is a workaround to don't add language prefix to <form action="/website_form/" ...>
return ""
# Check and insert values from the form on the model <model>
@http.route('/website_form/<string:model_name>', type='http', auth="public", methods=['POST'], website=True, csrf=False)
def website_form(self, model_name, **kwargs):
# Partial CSRF check, only performed when session is authenticated, as there
# is no real risk for unauthenticated sessions here. It's a common case for
# embedded forms now: SameSite policy rejects the cookies, so the session
# is lost, and the CSRF check fails, breaking the post for no good reason.
csrf_token = request.params.pop('csrf_token', None)
if request.session.uid and not request.validate_csrf(csrf_token):
raise BadRequest('Session expired (invalid CSRF token)')
try:
# The except clause below should not let what has been done inside
# here be committed. It should not either roll back everything in
# this controller method. Instead, we use a savepoint to roll back
# what has been done inside the try clause.
with request.env.cr.savepoint():
if request.env['ir.http']._verify_request_recaptcha_token('website_form'):
return self._handle_website_form(model_name, **kwargs)
error = _("Suspicious activity detected by Google reCaptcha.")
except (ValidationError, UserError) as e:
error = e.args[0]
return json.dumps({
'error': error,
})
def _handle_website_form(self, model_name, **kwargs):
model_record = request.env['ir.model'].sudo().search([('model', '=', model_name), ('website_form_access', '=', True)])
if not model_record:
return json.dumps({
'error': _("The form's specified model does not exist")
})
try:
data = self.extract_data(model_record, request.params)
# If we encounter an issue while extracting data
except ValidationError as e:
# I couldn't find a cleaner way to pass data to an exception
return json.dumps({'error_fields' : e.args[0]})
try:
id_record = self.insert_record(request, model_record, data['record'], data['custom'], data.get('meta'))
if id_record:
self.insert_attachment(model_record, id_record, data['attachments'])
# in case of an email, we want to send it immediately instead of waiting
# for the email queue to process
if model_name == 'mail.mail':
request.env[model_name].sudo().browse(id_record).send()
# Some fields have additional SQL constraints that we can't check generically
# Ex: crm.lead.probability which is a float between 0 and 1
# TODO: How to get the name of the erroneous field ?
except IntegrityError:
return json.dumps(False)
request.session['form_builder_model_model'] = model_record.model
request.session['form_builder_model'] = model_record.name
request.session['form_builder_id'] = id_record
return json.dumps({'id': id_record})
# Constants string to make metadata readable on a text field
_meta_label = "%s\n________\n\n" % _("Metadata") # Title for meta data
# Dict of dynamically called filters following type of field to be fault tolerent
def identity(self, field_label, field_input):
return field_input
def integer(self, field_label, field_input):
return int(field_input)
def floating(self, field_label, field_input):
return float(field_input)
def html(self, field_label, field_input):
return plaintext2html(field_input)
def boolean(self, field_label, field_input):
return bool(field_input)
def binary(self, field_label, field_input):
return base64.b64encode(field_input.read())
def one2many(self, field_label, field_input):
return [int(i) for i in field_input.split(',')]
def many2many(self, field_label, field_input, *args):
return [(args[0] if args else (6,0)) + (self.one2many(field_label, field_input),)]
_input_filters = {
'char': identity,
'text': identity,
'html': html,
'date': identity,
'datetime': identity,
'many2one': integer,
'one2many': one2many,
'many2many':many2many,
'selection': identity,
'boolean': boolean,
'integer': integer,
'float': floating,
'binary': binary,
'monetary': floating,
}
# Extract all data sent by the form and sort its on several properties
def extract_data(self, model, values):
dest_model = request.env[model.sudo().model]
data = {
'record': {}, # Values to create record
'attachments': [], # Attached files
'custom': '', # Custom fields values
'meta': '', # Add metadata if enabled
}
authorized_fields = model.sudo()._get_form_writable_fields()
error_fields = []
custom_fields = []
for field_name, field_value in values.items():
# If the value of the field if a file
if hasattr(field_value, 'filename'):
# Undo file upload field name indexing
field_name = field_name.split('[', 1)[0]
# If it's an actual binary field, convert the input file
# If it's not, we'll use attachments instead
if field_name in authorized_fields and authorized_fields[field_name]['type'] == 'binary':
data['record'][field_name] = base64.b64encode(field_value.read())
field_value.stream.seek(0) # do not consume value forever
if authorized_fields[field_name]['manual'] and field_name + "_filename" in dest_model:
data['record'][field_name + "_filename"] = field_value.filename
else:
field_value.field_name = field_name
data['attachments'].append(field_value)
# If it's a known field
elif field_name in authorized_fields:
try:
input_filter = self._input_filters[authorized_fields[field_name]['type']]
data['record'][field_name] = input_filter(self, field_name, field_value)
except ValueError:
error_fields.append(field_name)
# If it's a custom field
elif field_name != 'context':
custom_fields.append((field_name, field_value))
data['custom'] = "\n".join([u"%s : %s" % v for v in custom_fields])
# Add metadata if enabled # ICP for retrocompatibility
if request.env['ir.config_parameter'].sudo().get_param('website_form_enable_metadata'):
environ = request.httprequest.headers.environ
data['meta'] += "%s : %s\n%s : %s\n%s : %s\n%s : %s\n" % (
"IP" , environ.get("REMOTE_ADDR"),
"USER_AGENT" , environ.get("HTTP_USER_AGENT"),
"ACCEPT_LANGUAGE" , environ.get("HTTP_ACCEPT_LANGUAGE"),
"REFERER" , environ.get("HTTP_REFERER")
)
# This function can be defined on any model to provide
# a model-specific filtering of the record values
# Example:
# def website_form_input_filter(self, values):
# values['name'] = '%s\'s Application' % values['partner_name']
# return values
if hasattr(dest_model, "website_form_input_filter"):
data['record'] = dest_model.website_form_input_filter(request, data['record'])
missing_required_fields = [label for label, field in authorized_fields.items() if field['required'] and not label in data['record']]
if any(error_fields):
raise ValidationError(error_fields + missing_required_fields)
return data
def insert_record(self, request, model, values, custom, meta=None):
model_name = model.sudo().model
if model_name == 'mail.mail':
values.update({'reply_to': values.get('email_from')})
record = request.env[model_name].with_user(SUPERUSER_ID).with_context(mail_create_nosubscribe=True).create(values)
if custom or meta:
_custom_label = "%s\n___________\n\n" % _("Other Information:") # Title for custom fields
if model_name == 'mail.mail':
_custom_label = "%s\n___________\n\n" % _("This message has been posted on your website!")
default_field = model.website_form_default_field_id
default_field_data = values.get(default_field.name, '')
custom_content = (default_field_data + "\n\n" if default_field_data else '') \
+ (_custom_label + custom + "\n\n" if custom else '') \
+ (self._meta_label + meta if meta else '')
# If there is a default field configured for this model, use it.
# If there isn't, put the custom data in a message instead
if default_field.name:
if default_field.ttype == 'html' or model_name == 'mail.mail':
custom_content = nl2br(custom_content)
record.update({default_field.name: custom_content})
else:
values = {
'body': nl2br(custom_content),
'model': model_name,
'message_type': 'comment',
'res_id': record.id,
}
mail_id = request.env['mail.message'].with_user(SUPERUSER_ID).create(values)
return record.id
# Link all files attached on the form
def insert_attachment(self, model, id_record, files):
orphan_attachment_ids = []
model_name = model.sudo().model
record = model.env[model_name].browse(id_record)
authorized_fields = model.sudo()._get_form_writable_fields()
for file in files:
custom_field = file.field_name not in authorized_fields
attachment_value = {
'name': file.filename,
'datas': base64.encodebytes(file.read()),
'res_model': model_name,
'res_id': record.id,
}
attachment_id = request.env['ir.attachment'].sudo().create(attachment_value)
if attachment_id and not custom_field:
record.sudo()[file.field_name] = [(4, attachment_id.id)]
else:
orphan_attachment_ids.append(attachment_id.id)
if model_name != 'mail.mail':
# If some attachments didn't match a field on the model,
# we create a mail.message to link them to the record
if orphan_attachment_ids:
values = {
'body': _('<p>Attached files : </p>'),
'model': model_name,
'message_type': 'comment',
'res_id': id_record,
'attachment_ids': [(6, 0, orphan_attachment_ids)],
'subtype_id': request.env['ir.model.data'].xmlid_to_res_id('mail.mt_comment'),
}
mail_id = request.env['mail.message'].with_user(SUPERUSER_ID).create(values)
else:
# If the model is mail.mail then we have no other choice but to
# attach the custom binary field files on the attachment_ids field.
for attachment_id_id in orphan_attachment_ids:
record.attachment_ids = [(4, attachment_id_id)]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.