repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
TheTypoMaster/my-vim-set-mac
|
refs/heads/master
|
.vim/bundle/YouCompleteMe/third_party/ycmd/third_party/waitress/waitress/server.py
|
31
|
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
import asyncore
import os
import os.path
import socket
import time
from waitress import trigger
from waitress.adjustments import Adjustments
from waitress.channel import HTTPChannel
from waitress.task import ThreadedTaskDispatcher
from waitress.utilities import cleanup_unix_socket, logging_dispatcher
def create_server(application,
map=None,
_start=True, # test shim
_sock=None, # test shim
_dispatcher=None, # test shim
**kw # adjustments
):
"""
if __name__ == '__main__':
server = create_server(app)
server.run()
"""
adj = Adjustments(**kw)
if adj.unix_socket and hasattr(socket, 'AF_UNIX'):
cls = UnixWSGIServer
else:
cls = TcpWSGIServer
return cls(application, map, _start, _sock, _dispatcher, adj)
class BaseWSGIServer(logging_dispatcher, object):
channel_class = HTTPChannel
next_channel_cleanup = 0
socketmod = socket # test shim
asyncore = asyncore # test shim
family = None
def __init__(self,
application,
map=None,
_start=True, # test shim
_sock=None, # test shim
_dispatcher=None, # test shim
adj=None, # adjustments
**kw
):
if adj is None:
adj = Adjustments(**kw)
self.application = application
self.adj = adj
self.trigger = trigger.trigger(map)
if _dispatcher is None:
_dispatcher = ThreadedTaskDispatcher()
_dispatcher.set_thread_count(self.adj.threads)
self.task_dispatcher = _dispatcher
self.asyncore.dispatcher.__init__(self, _sock, map=map)
if _sock is None:
self.create_socket(self.family, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind_server_socket()
self.effective_host, self.effective_port = self.getsockname()
self.server_name = self.get_server_name(self.adj.host)
self.active_channels = {}
if _start:
self.accept_connections()
def bind_server_socket(self):
raise NotImplementedError # pragma: no cover
def get_server_name(self, ip):
"""Given an IP or hostname, try to determine the server name."""
if ip:
server_name = str(ip)
else:
server_name = str(self.socketmod.gethostname())
# Convert to a host name if necessary.
for c in server_name:
if c != '.' and not c.isdigit():
return server_name
try:
if server_name == '0.0.0.0':
return 'localhost'
server_name = self.socketmod.gethostbyaddr(server_name)[0]
except socket.error: # pragma: no cover
pass
return server_name
def getsockname(self):
raise NotImplementedError # pragma: no cover
def accept_connections(self):
self.accepting = True
self.socket.listen(self.adj.backlog) # Get around asyncore NT limit
def add_task(self, task):
self.task_dispatcher.add_task(task)
def readable(self):
now = time.time()
if now >= self.next_channel_cleanup:
self.next_channel_cleanup = now + self.adj.cleanup_interval
self.maintenance(now)
return (self.accepting and len(self._map) < self.adj.connection_limit)
def writable(self):
return False
def handle_read(self):
pass
def handle_connect(self):
pass
def handle_accept(self):
try:
v = self.accept()
if v is None:
return
conn, addr = v
except socket.error:
# Linux: On rare occasions we get a bogus socket back from
# accept. socketmodule.c:makesockaddr complains that the
# address family is unknown. We don't want the whole server
# to shut down because of this.
if self.adj.log_socket_errors:
self.logger.warning('server accept() threw an exception',
exc_info=True)
return
self.set_socket_options(conn)
addr = self.fix_addr(addr)
self.channel_class(self, conn, addr, self.adj, map=self._map)
def run(self):
try:
self.asyncore.loop(
timeout=self.adj.asyncore_loop_timeout,
map=self._map,
use_poll=self.adj.asyncore_use_poll,
)
except (SystemExit, KeyboardInterrupt):
self.task_dispatcher.shutdown()
def pull_trigger(self):
self.trigger.pull_trigger()
def set_socket_options(self, conn):
pass
def fix_addr(self, addr):
return addr
def maintenance(self, now):
"""
Closes channels that have not had any activity in a while.
The timeout is configured through adj.channel_timeout (seconds).
"""
cutoff = now - self.adj.channel_timeout
for channel in self.active_channels.values():
if (not channel.requests) and channel.last_activity < cutoff:
channel.will_close = True
class TcpWSGIServer(BaseWSGIServer):
family = socket.AF_INET
def bind_server_socket(self):
self.bind((self.adj.host, self.adj.port))
def getsockname(self):
return self.socket.getsockname()
def set_socket_options(self, conn):
for (level, optname, value) in self.adj.socket_options:
conn.setsockopt(level, optname, value)
if hasattr(socket, 'AF_UNIX'):
class UnixWSGIServer(BaseWSGIServer):
family = socket.AF_UNIX
def bind_server_socket(self):
cleanup_unix_socket(self.adj.unix_socket)
self.bind(self.adj.unix_socket)
if os.path.exists(self.adj.unix_socket):
os.chmod(self.adj.unix_socket, self.adj.unix_socket_perms)
def getsockname(self):
return ('unix', self.socket.getsockname())
def fix_addr(self, addr):
return ('localhost', None)
# Compatibility alias.
WSGIServer = TcpWSGIServer
|
westerhofffl/appengine-mapreduce
|
refs/heads/master
|
python/src/mapreduce/datastore_range_iterators.py
|
24
|
#!/usr/bin/env python
"""Helpers iterators for input_readers.DatastoreInputReader."""
# pylint: disable=g-bad-name
import itertools
from google.appengine.datastore import datastore_query
from google.appengine.datastore import datastore_rpc
from google.appengine.ext import db
from google.appengine.ext import key_range
from mapreduce import json_util
from mapreduce import key_ranges
from mapreduce import model
from mapreduce import namespace_range
from mapreduce import property_range
from mapreduce import util
__all__ = [
"RangeIteratorFactory",
"RangeIterator",
"AbstractKeyRangeIterator",
"KeyRangeModelIterator",
"KeyRangeEntityIterator",
"KeyRangeKeyIterator",
"KeyRangeEntityProtoIterator"]
class RangeIteratorFactory(object):
"""Factory to create RangeIterator."""
@classmethod
def create_property_range_iterator(cls,
p_range,
ns_range,
query_spec):
"""Create a _PropertyRangeModelIterator.
Args:
p_range: a property_range.PropertyRange object that defines the
conditions entities should safisfy.
ns_range: a namesrange.NamespaceRange object that defines the namespaces
to examine.
query_spec: a model.QuerySpec object that defines how to retrieve
entities from datastore.
Returns:
a RangeIterator.
"""
return _PropertyRangeModelIterator(p_range,
ns_range,
query_spec)
@classmethod
def create_multi_property_range_iterator(cls,
p_range_iters):
"""Create a RangeIterator.
Args:
p_range_iters: a list of RangeIterator objects to chain together.
Returns:
a RangeIterator.
"""
return _MultiPropertyRangeModelIterator(p_range_iters)
@classmethod
def create_key_ranges_iterator(cls,
k_ranges,
query_spec,
key_range_iter_cls):
"""Create a _KeyRangesIterator.
Args:
k_ranges: a key_ranges._KeyRanges object.
query_spec: a model.query_spec object that defines how to retrieve
entities from datastore.
key_range_iter_cls: the class that iterates over a single key range.
The value yielded by this class is yielded.
Returns:
a RangeIterator.
"""
return _KeyRangesIterator(k_ranges, query_spec, key_range_iter_cls)
@classmethod
def from_json(cls, json):
return _RANGE_ITERATORS[json["name"]].from_json(json)
class RangeIterator(json_util.JsonMixin):
"""Interface for DatastoreInputReader helpers.
Technically, RangeIterator is a container. It contains all datastore
entities that fall under a certain range (key range or proprety range).
It implements __iter__, which returns a generator that can iterate
through entities. It also implements marshalling logics. Marshalling
saves the state of the container so that any new generator created
can resume where the old generator left off.
Caveats:
1. Calling next() on the generators may also modify the container.
2. Marshlling after StopIteration is raised has undefined behavior.
"""
def __iter__(self):
"""Iter.
Yields:
Iterates over datastore entities and yields some kind of value
for each entity.
"""
raise NotImplementedError()
def __repr__(self):
raise NotImplementedError()
def to_json(self):
"""Serializes all states into json form.
Returns:
all states in json-compatible map.
"""
raise NotImplementedError()
@classmethod
def from_json(cls, json):
"""Reverse of to_json."""
raise NotImplementedError()
class _PropertyRangeModelIterator(RangeIterator):
"""Yields db/ndb model entities within a property range."""
def __init__(self, p_range, ns_range, query_spec):
"""Init.
Args:
p_range: a property_range.PropertyRange object that defines the
conditions entities should safisfy.
ns_range: a namesrange.NamespaceRange object that defines the namespaces
to examine.
query_spec: a model.QuerySpec object that defines how to retrieve
entities from datastore.
"""
self._property_range = p_range
self._ns_range = ns_range
self._query_spec = query_spec
self._cursor = None
self._query = None
def __repr__(self):
return "PropertyRangeIterator for %s" % str(self._property_range)
def __iter__(self):
"""Iterate over entities.
Yields:
db model entities or ndb model entities if the model is defined with ndb.
"""
for ns in self._ns_range:
self._query = self._property_range.make_query(ns)
if isinstance(self._query, db.Query):
if self._cursor:
self._query.with_cursor(self._cursor)
for model_instance in self._query.run(
batch_size=self._query_spec.batch_size,
keys_only=self._query_spec.keys_only):
yield model_instance
else:
self._query = self._query.iter(batch_size=self._query_spec.batch_size,
keys_only=self._query_spec.keys_only,
start_cursor=self._cursor,
produce_cursors=True)
for model_instance in self._query:
yield model_instance
self._query = None
self._cursor = None
if ns != self._ns_range.namespace_end:
self._ns_range = self._ns_range.with_start_after(ns)
def to_json(self):
"""Inherit doc."""
cursor = self._cursor
if self._query is not None:
if isinstance(self._query, db.Query):
cursor = self._query.cursor()
else:
cursor = self._query.cursor_after()
if cursor is None or isinstance(cursor, basestring):
cursor_object = False
else:
cursor_object = True
cursor = cursor.to_websafe_string()
return {"property_range": self._property_range.to_json(),
"query_spec": self._query_spec.to_json(),
"cursor": cursor,
"ns_range": self._ns_range.to_json_object(),
"name": self.__class__.__name__,
"cursor_object": cursor_object}
# TODO(user): it sucks we need to handle cursor_to_str in many places.
# In the long run, datastore adaptor refactor will take care of this as
# we will only need to deal with low level datastore API after that.
# Thus we will not add Cursor as a json primitive MR should understand.
@classmethod
def from_json(cls, json):
"""Inherit doc."""
obj = cls(property_range.PropertyRange.from_json(json["property_range"]),
namespace_range.NamespaceRange.from_json_object(json["ns_range"]),
model.QuerySpec.from_json(json["query_spec"]))
cursor = json["cursor"]
# lint bug. Class method can access protected fields.
# pylint: disable=protected-access
if cursor and json["cursor_object"]:
obj._cursor = datastore_query.Cursor.from_websafe_string(cursor)
else:
obj._cursor = cursor
return obj
class _MultiPropertyRangeModelIterator(RangeIterator):
"""Yields db/ndb model entities within a list of disjoint property ranges."""
def __init__(self, p_range_iters):
"""Init.
Args:
p_range_iters: a list of _PropertyRangeModelIterator objects to chain
together.
"""
self._iters = p_range_iters
def __repr__(self):
return "MultiPropertyRangeIterator combining %s" % str(
[str(it) for it in self._iters])
def __iter__(self):
"""Iterate over entities.
Yields:
db model entities or ndb model entities if the model is defined with ndb.
"""
for model_instance in itertools.chain.from_iterable(self._iters):
yield model_instance
def to_json(self):
"""Inherit doc."""
json = {"name": self.__class__.__name__,
"num_ranges": len(self._iters)}
for i in xrange(len(self._iters)):
json_item = self._iters[i].to_json()
query_spec = json_item["query_spec"]
item_name = json_item["name"]
# Delete and move one level up
del json_item["query_spec"]
del json_item["name"]
json[str(i)] = json_item
# Store once to save space
json["query_spec"] = query_spec
json["item_name"] = item_name
return json
@classmethod
def from_json(cls, json):
"""Inherit doc."""
num_ranges = int(json["num_ranges"])
query_spec = json["query_spec"]
item_name = json["item_name"]
p_range_iters = []
for i in xrange(num_ranges):
json_item = json[str(i)]
# Place query_spec, name back into each iterator
json_item["query_spec"] = query_spec
json_item["name"] = item_name
p_range_iters.append(_PropertyRangeModelIterator.from_json(json_item))
obj = cls(p_range_iters)
return obj
class _KeyRangesIterator(RangeIterator):
"""Create an iterator over a key_ranges.KeyRanges object."""
def __init__(self,
k_ranges,
query_spec,
key_range_iter_cls):
"""Init.
Args:
k_ranges: a key_ranges._KeyRanges object.
query_spec: a model.query_spec object that defines how to retrieve
entities from datastore.
key_range_iter_cls: the class that iterates over a single key range.
The value yielded by this class is yielded.
"""
self._key_ranges = k_ranges
self._query_spec = query_spec
self._key_range_iter_cls = key_range_iter_cls
self._current_iter = None
self._current_key_range = None
def __repr__(self):
return "KeyRangesIterator for %s" % str(self._key_ranges)
def __iter__(self):
while True:
if self._current_iter:
for o in self._current_iter:
yield o
try:
k_range = self._key_ranges.next()
self._current_iter = self._key_range_iter_cls(k_range,
self._query_spec)
except StopIteration:
self._current_iter = None
break
def to_json(self):
"""Inherit doc."""
current_iter = None
if self._current_iter:
current_iter = self._current_iter.to_json()
return {"key_ranges": self._key_ranges.to_json(),
"query_spec": self._query_spec.to_json(),
"current_iter": current_iter,
"key_range_iter_cls": self._key_range_iter_cls.__name__,
"name": self.__class__.__name__}
@classmethod
def from_json(cls, json):
"""Inherit doc."""
key_range_iter_cls = _KEY_RANGE_ITERATORS[json["key_range_iter_cls"]]
obj = cls(key_ranges.KeyRangesFactory.from_json(json["key_ranges"]),
model.QuerySpec.from_json(json["query_spec"]),
key_range_iter_cls)
current_iter = None
if json["current_iter"]:
current_iter = key_range_iter_cls.from_json(json["current_iter"])
# pylint: disable=protected-access
obj._current_iter = current_iter
return obj
# A map from class name to class of all RangeIterators.
_RANGE_ITERATORS = {
_PropertyRangeModelIterator.__name__: _PropertyRangeModelIterator,
_MultiPropertyRangeModelIterator.__name__: _MultiPropertyRangeModelIterator,
_KeyRangesIterator.__name__: _KeyRangesIterator
}
class AbstractKeyRangeIterator(json_util.JsonMixin):
"""Iterates over a single key_range.KeyRange and yields value for each key.
All subclasses do the same thing: iterate over a single KeyRange.
They do so using different APIs (db, ndb, datastore) to return entities
of different types (db model, ndb model, datastore entity, raw proto).
"""
def __init__(self, k_range, query_spec):
"""Init.
Args:
k_range: a key_range.KeyRange object that defines the entity keys to
operate on. KeyRange object already contains a namespace.
query_spec: a model.query_spec object that defines how to retrieve
entities from datastore.
"""
self._key_range = k_range
self._query_spec = query_spec
self._cursor = None
self._query = None
def __iter__(self):
"""Iter."""
raise NotImplementedError()
def _get_cursor(self):
"""Get cursor on current query iterator for serialization."""
raise NotImplementedError()
def to_json(self):
"""Serializes all states into json form.
Returns:
all states in json-compatible map.
"""
cursor = self._get_cursor()
cursor_object = False
if cursor and isinstance(cursor, datastore_query.Cursor):
cursor = cursor.to_websafe_string()
cursor_object = True
return {"key_range": self._key_range.to_json(),
"query_spec": self._query_spec.to_json(),
"cursor": cursor,
"cursor_object": cursor_object}
@classmethod
def from_json(cls, json):
"""Reverse of to_json."""
obj = cls(key_range.KeyRange.from_json(json["key_range"]),
model.QuerySpec.from_json(json["query_spec"]))
cursor = json["cursor"]
# lint bug. Class method can access protected fields.
# pylint: disable=protected-access
if cursor and json["cursor_object"]:
obj._cursor = datastore_query.Cursor.from_websafe_string(cursor)
else:
obj._cursor = cursor
return obj
class KeyRangeModelIterator(AbstractKeyRangeIterator):
"""Yields db/ndb model entities with a key range."""
def __iter__(self):
self._query = self._key_range.make_ascending_query(
util.for_name(self._query_spec.model_class_path),
filters=self._query_spec.filters)
if isinstance(self._query, db.Query):
if self._cursor:
self._query.with_cursor(self._cursor)
for model_instance in self._query.run(
batch_size=self._query_spec.batch_size,
keys_only=self._query_spec.keys_only):
yield model_instance
else:
self._query = self._query.iter(batch_size=self._query_spec.batch_size,
keys_only=self._query_spec.keys_only,
start_cursor=self._cursor,
produce_cursors=True)
for model_instance in self._query:
yield model_instance
def _get_cursor(self):
if self._query is None:
return self._cursor
if isinstance(self._query, db.Query):
return self._query.cursor()
else:
return self._query.cursor_after()
class KeyRangeEntityIterator(AbstractKeyRangeIterator):
"""Yields datastore.Entity type within a key range."""
_KEYS_ONLY = False
def __iter__(self):
self._query = self._key_range.make_ascending_datastore_query(
self._query_spec.entity_kind, filters=self._query_spec.filters)
for entity in self._query.Run(config=datastore_query.QueryOptions(
batch_size=self._query_spec.batch_size,
keys_only=self._query_spec.keys_only or self._KEYS_ONLY,
start_cursor=self._cursor)):
yield entity
def _get_cursor(self):
if self._query is None:
return self._cursor
return self._query.GetCursor()
class KeyRangeKeyIterator(KeyRangeEntityIterator):
"""Yields datastore.Key type within a key range."""
_KEYS_ONLY = True
class KeyRangeEntityProtoIterator(AbstractKeyRangeIterator):
"""Yields datastore.Entity's raw proto within a key range."""
def __iter__(self):
query = self._key_range.make_ascending_datastore_query(
self._query_spec.entity_kind, filters=self._query_spec.filters)
# get a connection without adapter.
connection = datastore_rpc.Connection()
query_options = datastore_query.QueryOptions(
batch_size=self._query_spec.batch_size,
start_cursor=self._cursor,
produce_cursors=True)
# Transform datastore.Query:
# datastore.Query -> datastore_query.Query -> datastore_query.Batcher ->
# datastore_query.ResultsIterator
self._query = datastore_query.ResultsIterator(
query.GetQuery().run(connection, query_options))
for entity_proto in self._query:
yield entity_proto
def _get_cursor(self):
if self._query is None:
return self._cursor
return self._query.cursor()
# TODO(user): update this map automatically using metaclass if needed.
# Ideally, we want a parameter in datastore input reader to control
# the return type.
_KEY_RANGE_ITERATORS = {
KeyRangeModelIterator.__name__: KeyRangeModelIterator,
KeyRangeEntityIterator.__name__: KeyRangeEntityIterator,
KeyRangeKeyIterator.__name__: KeyRangeKeyIterator,
KeyRangeEntityProtoIterator.__name__: KeyRangeEntityProtoIterator
}
|
nzavagli/UnrealPy
|
refs/heads/master
|
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Pygments-2.0.2/pygments/cmdline.py
|
43
|
# -*- coding: utf-8 -*-
"""
pygments.cmdline
~~~~~~~~~~~~~~~~
Command line interface.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
import sys
import getopt
from textwrap import dedent
from pygments import __version__, highlight
from pygments.util import ClassNotFound, OptionError, docstring_headline, \
guess_decode, guess_decode_from_terminal, terminal_encoding
from pygments.lexers import get_all_lexers, get_lexer_by_name, guess_lexer, \
get_lexer_for_filename, find_lexer_class, TextLexer
from pygments.formatters.latex import LatexEmbeddedLexer, LatexFormatter
from pygments.formatters import get_all_formatters, get_formatter_by_name, \
get_formatter_for_filename, find_formatter_class, \
TerminalFormatter # pylint:disable-msg=E0611
from pygments.filters import get_all_filters, find_filter_class
from pygments.styles import get_all_styles, get_style_by_name
USAGE = """\
Usage: %s [-l <lexer> | -g] [-F <filter>[:<options>]] [-f <formatter>]
[-O <options>] [-P <option=value>] [-s] [-o <outfile>] [<infile>]
%s -S <style> -f <formatter> [-a <arg>] [-O <options>] [-P <option=value>]
%s -L [<which> ...]
%s -N <filename>
%s -H <type> <name>
%s -h | -V
Highlight the input file and write the result to <outfile>.
If no input file is given, use stdin, if -o is not given, use stdout.
If -s is passed, lexing will be done in "streaming" mode, reading and
highlighting one line at a time. This will only work properly with
lexers that have no constructs spanning multiple lines!
<lexer> is a lexer name (query all lexer names with -L). If -l is not
given, the lexer is guessed from the extension of the input file name
(this obviously doesn't work if the input is stdin). If -g is passed,
attempt to guess the lexer from the file contents, or pass through as
plain text if this fails (this can work for stdin).
Likewise, <formatter> is a formatter name, and will be guessed from
the extension of the output file name. If no output file is given,
the terminal formatter will be used by default.
With the -O option, you can give the lexer and formatter a comma-
separated list of options, e.g. ``-O bg=light,python=cool``.
The -P option adds lexer and formatter options like the -O option, but
you can only give one option per -P. That way, the option value may
contain commas and equals signs, which it can't with -O, e.g.
``-P "heading=Pygments, the Python highlighter".
With the -F option, you can add filters to the token stream, you can
give options in the same way as for -O after a colon (note: there must
not be spaces around the colon).
The -O, -P and -F options can be given multiple times.
With the -S option, print out style definitions for style <style>
for formatter <formatter>. The argument given by -a is formatter
dependent.
The -L option lists lexers, formatters, styles or filters -- set
`which` to the thing you want to list (e.g. "styles"), or omit it to
list everything.
The -N option guesses and prints out a lexer name based solely on
the given filename. It does not take input or highlight anything.
If no specific lexer can be determined "text" is returned.
The -H option prints detailed help for the object <name> of type <type>,
where <type> is one of "lexer", "formatter" or "filter".
The -s option processes lines one at a time until EOF, rather than
waiting to process the entire file. This only works for stdin, and
is intended for streaming input such as you get from 'tail -f'.
Example usage: "tail -f sql.log | pygmentize -s -l sql"
The -h option prints this help.
The -V option prints the package version.
"""
def _parse_options(o_strs):
opts = {}
if not o_strs:
return opts
for o_str in o_strs:
if not o_str:
continue
o_args = o_str.split(',')
for o_arg in o_args:
o_arg = o_arg.strip()
try:
o_key, o_val = o_arg.split('=', 1)
o_key = o_key.strip()
o_val = o_val.strip()
except ValueError:
opts[o_arg] = True
else:
opts[o_key] = o_val
return opts
def _parse_filters(f_strs):
filters = []
if not f_strs:
return filters
for f_str in f_strs:
if ':' in f_str:
fname, fopts = f_str.split(':', 1)
filters.append((fname, _parse_options([fopts])))
else:
filters.append((f_str, {}))
return filters
def _print_help(what, name):
try:
if what == 'lexer':
cls = find_lexer_class(name)
print("Help on the %s lexer:" % cls.name)
print(dedent(cls.__doc__))
elif what == 'formatter':
cls = find_formatter_class(name)
print("Help on the %s formatter:" % cls.name)
print(dedent(cls.__doc__))
elif what == 'filter':
cls = find_filter_class(name)
print("Help on the %s filter:" % name)
print(dedent(cls.__doc__))
except AttributeError:
print("%s not found!" % what, file=sys.stderr)
def _print_list(what):
if what == 'lexer':
print()
print("Lexers:")
print("~~~~~~~")
info = []
for fullname, names, exts, _ in get_all_lexers():
tup = (', '.join(names)+':', fullname,
exts and '(filenames ' + ', '.join(exts) + ')' or '')
info.append(tup)
info.sort()
for i in info:
print(('* %s\n %s %s') % i)
elif what == 'formatter':
print()
print("Formatters:")
print("~~~~~~~~~~~")
info = []
for cls in get_all_formatters():
doc = docstring_headline(cls)
tup = (', '.join(cls.aliases) + ':', doc, cls.filenames and
'(filenames ' + ', '.join(cls.filenames) + ')' or '')
info.append(tup)
info.sort()
for i in info:
print(('* %s\n %s %s') % i)
elif what == 'filter':
print()
print("Filters:")
print("~~~~~~~~")
for name in get_all_filters():
cls = find_filter_class(name)
print("* " + name + ':')
print(" %s" % docstring_headline(cls))
elif what == 'style':
print()
print("Styles:")
print("~~~~~~~")
for name in get_all_styles():
cls = get_style_by_name(name)
print("* " + name + ':')
print(" %s" % docstring_headline(cls))
def main_inner(popts, args, usage):
opts = {}
O_opts = []
P_opts = []
F_opts = []
for opt, arg in popts:
if opt == '-O':
O_opts.append(arg)
elif opt == '-P':
P_opts.append(arg)
elif opt == '-F':
F_opts.append(arg)
opts[opt] = arg
if opts.pop('-h', None) is not None:
print(usage)
return 0
if opts.pop('-V', None) is not None:
print('Pygments version %s, (c) 2006-2014 by Georg Brandl.' % __version__)
return 0
# handle ``pygmentize -L``
L_opt = opts.pop('-L', None)
if L_opt is not None:
if opts:
print(usage, file=sys.stderr)
return 2
# print version
main(['', '-V'])
if not args:
args = ['lexer', 'formatter', 'filter', 'style']
for arg in args:
_print_list(arg.rstrip('s'))
return 0
# handle ``pygmentize -H``
H_opt = opts.pop('-H', None)
if H_opt is not None:
if opts or len(args) != 2:
print(usage, file=sys.stderr)
return 2
what, name = args
if what not in ('lexer', 'formatter', 'filter'):
print(usage, file=sys.stderr)
return 2
_print_help(what, name)
return 0
# parse -O options
parsed_opts = _parse_options(O_opts)
opts.pop('-O', None)
# parse -P options
for p_opt in P_opts:
try:
name, value = p_opt.split('=', 1)
except ValueError:
parsed_opts[p_opt] = True
else:
parsed_opts[name] = value
opts.pop('-P', None)
# encodings
inencoding = parsed_opts.get('inencoding', parsed_opts.get('encoding'))
outencoding = parsed_opts.get('outencoding', parsed_opts.get('encoding'))
# handle ``pygmentize -N``
infn = opts.pop('-N', None)
if infn is not None:
try:
lexer = get_lexer_for_filename(infn, **parsed_opts)
except ClassNotFound as err:
lexer = TextLexer()
except OptionError as err:
print('Error:', err, file=sys.stderr)
return 1
print(lexer.aliases[0])
return 0
# handle ``pygmentize -S``
S_opt = opts.pop('-S', None)
a_opt = opts.pop('-a', None)
if S_opt is not None:
f_opt = opts.pop('-f', None)
if not f_opt:
print(usage, file=sys.stderr)
return 2
if opts or args:
print(usage, file=sys.stderr)
return 2
try:
parsed_opts['style'] = S_opt
fmter = get_formatter_by_name(f_opt, **parsed_opts)
except ClassNotFound as err:
print(err, file=sys.stderr)
return 1
arg = a_opt or ''
try:
print(fmter.get_style_defs(arg))
except Exception as err:
print('Error:', err, file=sys.stderr)
return 1
return 0
# if no -S is given, -a is not allowed
if a_opt is not None:
print(usage, file=sys.stderr)
return 2
# parse -F options
F_opts = _parse_filters(F_opts)
opts.pop('-F', None)
# select lexer
lexer = None
# given by name?
lexername = opts.pop('-l', None)
if lexername:
try:
lexer = get_lexer_by_name(lexername, **parsed_opts)
except (OptionError, ClassNotFound) as err:
print('Error:', err, file=sys.stderr)
return 1
# read input code
code = None
if args:
if len(args) > 1:
print(usage, file=sys.stderr)
return 2
if '-s' in opts:
print('Error: -s option not usable when input file specified',
file=sys.stderr)
return 1
infn = args[0]
try:
with open(infn, 'rb') as infp:
code = infp.read()
except Exception as err:
print('Error: cannot read infile:', err, file=sys.stderr)
return 1
if not inencoding:
code, inencoding = guess_decode(code)
# do we have to guess the lexer?
if not lexer:
try:
lexer = get_lexer_for_filename(infn, code, **parsed_opts)
except ClassNotFound as err:
if '-g' in opts:
try:
lexer = guess_lexer(code, **parsed_opts)
except ClassNotFound:
lexer = TextLexer(**parsed_opts)
else:
print('Error:', err, file=sys.stderr)
return 1
except OptionError as err:
print('Error:', err, file=sys.stderr)
return 1
elif '-s' not in opts: # treat stdin as full file (-s support is later)
# read code from terminal, always in binary mode since we want to
# decode ourselves and be tolerant with it
if sys.version_info > (3,):
# Python 3: we have to use .buffer to get a binary stream
code = sys.stdin.buffer.read()
else:
code = sys.stdin.read()
if not inencoding:
code, inencoding = guess_decode_from_terminal(code, sys.stdin)
# else the lexer will do the decoding
if not lexer:
try:
lexer = guess_lexer(code, **parsed_opts)
except ClassNotFound:
lexer = TextLexer(**parsed_opts)
# select formatter
outfn = opts.pop('-o', None)
fmter = opts.pop('-f', None)
if fmter:
try:
fmter = get_formatter_by_name(fmter, **parsed_opts)
except (OptionError, ClassNotFound) as err:
print('Error:', err, file=sys.stderr)
return 1
if outfn:
if not fmter:
try:
fmter = get_formatter_for_filename(outfn, **parsed_opts)
except (OptionError, ClassNotFound) as err:
print('Error:', err, file=sys.stderr)
return 1
try:
outfile = open(outfn, 'wb')
except Exception as err:
print('Error: cannot open outfile:', err, file=sys.stderr)
return 1
else:
if not fmter:
fmter = TerminalFormatter(**parsed_opts)
if sys.version_info > (3,):
# Python 3: we have to use .buffer to get a binary stream
outfile = sys.stdout.buffer
else:
outfile = sys.stdout
# determine output encoding if not explicitly selected
if not outencoding:
if outfn:
# output file? use lexer encoding for now (can still be None)
fmter.encoding = inencoding
else:
# else use terminal encoding
fmter.encoding = terminal_encoding(sys.stdout)
# provide coloring under Windows, if possible
if not outfn and sys.platform in ('win32', 'cygwin') and \
fmter.name in ('Terminal', 'Terminal256'):
# unfortunately colorama doesn't support binary streams on Py3
if sys.version_info > (3,):
from pygments.util import UnclosingTextIOWrapper
outfile = UnclosingTextIOWrapper(outfile, encoding=fmter.encoding)
fmter.encoding = None
try:
import colorama.initialise
except ImportError:
pass
else:
outfile = colorama.initialise.wrap_stream(
outfile, convert=None, strip=None, autoreset=False, wrap=True)
# When using the LaTeX formatter and the option `escapeinside` is
# specified, we need a special lexer which collects escaped text
# before running the chosen language lexer.
escapeinside = parsed_opts.get('escapeinside', '')
if len(escapeinside) == 2 and isinstance(fmter, LatexFormatter):
left = escapeinside[0]
right = escapeinside[1]
lexer = LatexEmbeddedLexer(left, right, lexer)
# process filters
for fname, fopts in F_opts:
try:
lexer.add_filter(fname, **fopts)
except ClassNotFound as err:
print('Error:', err, file=sys.stderr)
return 1
# ... and do it!
if '-s' not in opts:
# process whole input as per normal...
highlight(code, lexer, fmter, outfile)
return 0
else:
if not lexer:
print('Error: when using -s a lexer has to be selected with -l',
file=sys.stderr)
return 1
# line by line processing of stdin (eg: for 'tail -f')...
try:
while 1:
if sys.version_info > (3,):
# Python 3: we have to use .buffer to get a binary stream
line = sys.stdin.buffer.readline()
else:
line = sys.stdin.readline()
if not line:
break
if not inencoding:
line = guess_decode_from_terminal(line, sys.stdin)[0]
highlight(line, lexer, fmter, outfile)
if hasattr(outfile, 'flush'):
outfile.flush()
except KeyboardInterrupt:
return 0
def main(args=sys.argv):
"""
Main command line entry point.
"""
usage = USAGE % ((args[0],) * 6)
try:
popts, args = getopt.getopt(args[1:], "l:f:F:o:O:P:LS:a:N:hVHgs")
except getopt.GetoptError:
print(usage, file=sys.stderr)
return 2
try:
return main_inner(popts, args, usage)
except Exception:
import traceback
info = traceback.format_exception(*sys.exc_info())
msg = info[-1].strip()
if len(info) >= 3:
# extract relevant file and position info
msg += '\n (f%s)' % info[-2].split('\n')[0].strip()[1:]
print(file=sys.stderr)
print('*** Error while highlighting:', file=sys.stderr)
print(msg, file=sys.stderr)
return 1
|
iabdalkader/micropython
|
refs/heads/master
|
tests/float/math_isclose.py
|
15
|
# test math.isclose (appeared in Python 3.5)
try:
from math import isclose
except ImportError:
print("SKIP")
raise SystemExit
def test(a, b, **kwargs):
print(isclose(a, b, **kwargs))
def test_combinations(a, b, **kwargs):
test(a, a, **kwargs)
test(a, b, **kwargs)
test(b, a, **kwargs)
test(b, b, **kwargs)
# Special numbers
test_combinations(float("nan"), 1)
test_combinations(float("inf"), 1)
test_combinations(float("-inf"), 1)
# Equality
test(1.0, 1.0, rel_tol=0.0, abs_tol=0.0)
test(2.35e-100, 2.35e-100, rel_tol=0.0, abs_tol=0.0)
test(2.1234e100, 2.1234e100, rel_tol=0.0, abs_tol=0.0)
# Relative tolerance
test(1000.0, 1001.0, rel_tol=1e-3)
test(1000.0, 1001.0, rel_tol=1e-4)
test(1000, 1001, rel_tol=1e-3)
test(1000, 1001, rel_tol=1e-4)
test_combinations(0, 1, rel_tol=1.0)
# Absolute tolerance
test(0.0, 1e-10, abs_tol=1e-10, rel_tol=0.1)
test(0.0, 1e-10, abs_tol=0.0, rel_tol=0.1)
# Bad parameters
try:
isclose(0, 0, abs_tol=-1)
except ValueError:
print("ValueError")
try:
isclose(0, 0, rel_tol=-1)
except ValueError:
print("ValueError")
|
SCSSG/Odoo-SCS
|
refs/heads/master
|
addons/crm_claim/report/__init__.py
|
446
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import crm_claim_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Jusedawg/SickRage
|
refs/heads/develop
|
lib/stevedore/example2/setup.py
|
32
|
from setuptools import setup, find_packages
setup(
name='stevedore-examples2',
version='1.0',
description='Demonstration package for stevedore',
author='Doug Hellmann',
author_email='doug@doughellmann.com',
url='http://git.openstack.org/cgit/openstack/stevedore',
classifiers=['Development Status :: 3 - Alpha',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Intended Audience :: Developers',
'Environment :: Console',
],
platforms=['Any'],
scripts=[],
provides=['stevedore.examples2',
],
packages=find_packages(),
include_package_data=True,
entry_points={
'stevedore.example.formatter': [
'field = stevedore.example2.fields:FieldList',
],
},
zip_safe=False,
)
|
luistorresm/odoo
|
refs/heads/8.0
|
addons/website_report/controllers/__init__.py
|
7372
|
import main
|
pepetreshere/odoo
|
refs/heads/patch-2
|
addons/google_recaptcha/models/ir_http.py
|
3
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
import requests
from odoo import api, models, _
from odoo.http import request
from odoo.exceptions import UserError, ValidationError
logger = logging.getLogger(__name__)
class Http(models.AbstractModel):
_inherit = 'ir.http'
@api.model
def _verify_request_recaptcha_token(self, action):
""" Verify the recaptcha token for the current request.
If no recaptcha private key is set the recaptcha verification
is considered inactive and this method will return True.
"""
ip_addr = request.httprequest.remote_addr
token = request.params.pop('recaptcha_token_response', False)
recaptcha_result = request.env['ir.http']._verify_recaptcha_token(ip_addr, token, action)
if recaptcha_result in ['is_human', 'no_secret']:
return True
if recaptcha_result == 'wrong_secret':
raise ValidationError(_("The reCaptcha private key is invalid."))
elif recaptcha_result == 'wrong_token':
raise ValidationError(_("The reCaptcha token is invalid."))
elif recaptcha_result == 'timeout':
raise UserError(_("Your request has timed out, please retry."))
elif recaptcha_result == 'bad_request':
raise UserError(_("The request is invalid or malformed."))
else:
return False
@api.model
def _verify_recaptcha_token(self, ip_addr, token, action=False):
"""
Verify a recaptchaV3 token and returns the result as a string.
RecaptchaV3 verify DOC: https://developers.google.com/recaptcha/docs/verify
:return: The result of the call to the google API:
is_human: The token is valid and the user trustworthy.
is_bot: The user is not trustworthy and most likely a bot.
no_secret: No reCaptcha secret set in settings.
wrong_action: the action performed to obtain the token does not match the one we are verifying.
wrong_token: The token provided is invalid or empty.
wrong_secret: The private key provided in settings is invalid.
timeout: The request has timout or the token provided is too old.
bad_request: The request is invalid or malformed.
:rtype: str
"""
private_key = request.env['ir.config_parameter'].sudo().get_param('recaptcha_private_key')
if not private_key:
return 'no_secret'
min_score = request.env['ir.config_parameter'].sudo().get_param('recaptcha_min_score')
try:
r = requests.post('https://www.recaptcha.net/recaptcha/api/siteverify', {
'secret': private_key,
'response': token,
'remoteip': ip_addr,
}, timeout=2) # it takes ~50ms to retrieve the response
result = r.json()
res_success = result['success']
res_action = res_success and action and result['action']
except requests.exceptions.Timeout:
logger.error("Trial captcha verification timeout for ip address %s", ip_addr)
return 'timeout'
except Exception:
logger.error("Trial captcha verification bad request response")
return 'bad_request'
if res_success:
score = result.get('score', False)
if score < float(min_score):
logger.warning("Trial captcha verification for ip address %s failed with score %f.", ip_addr, score)
return 'is_bot'
if res_action and res_action != action:
logger.warning("Trial captcha verification for ip address %s failed with action %f, expected: %s.", ip_addr, score, action)
return 'wrong_action'
logger.info("Trial captcha verification for ip address %s succeeded with score %f.", ip_addr, score)
return 'is_human'
errors = result.get('error-codes', [])
logger.warning("Trial captcha verification for ip address %s failed error codes %r. token was: [%s]", ip_addr, errors, token)
for error in errors:
if error in ['missing-input-secret', 'invalid-input-secret']:
return 'wrong_secret'
if error in ['missing-input-response', 'invalid-input-response']:
return 'wrong_token'
if error == 'timeout-or-duplicate':
return 'timeout'
if error == 'bad-request':
return 'bad_request'
return 'is_bot'
|
mrquim/repository.mrquim
|
refs/heads/master
|
script.module.unidecode/lib/unidecode/x07b.py
|
252
|
data = (
'Mang ', # 0x00
'Zhu ', # 0x01
'Utsubo ', # 0x02
'Du ', # 0x03
'Ji ', # 0x04
'Xiao ', # 0x05
'Ba ', # 0x06
'Suan ', # 0x07
'Ji ', # 0x08
'Zhen ', # 0x09
'Zhao ', # 0x0a
'Sun ', # 0x0b
'Ya ', # 0x0c
'Zhui ', # 0x0d
'Yuan ', # 0x0e
'Hu ', # 0x0f
'Gang ', # 0x10
'Xiao ', # 0x11
'Cen ', # 0x12
'Pi ', # 0x13
'Bi ', # 0x14
'Jian ', # 0x15
'Yi ', # 0x16
'Dong ', # 0x17
'Shan ', # 0x18
'Sheng ', # 0x19
'Xia ', # 0x1a
'Di ', # 0x1b
'Zhu ', # 0x1c
'Na ', # 0x1d
'Chi ', # 0x1e
'Gu ', # 0x1f
'Li ', # 0x20
'Qie ', # 0x21
'Min ', # 0x22
'Bao ', # 0x23
'Tiao ', # 0x24
'Si ', # 0x25
'Fu ', # 0x26
'Ce ', # 0x27
'Ben ', # 0x28
'Pei ', # 0x29
'Da ', # 0x2a
'Zi ', # 0x2b
'Di ', # 0x2c
'Ling ', # 0x2d
'Ze ', # 0x2e
'Nu ', # 0x2f
'Fu ', # 0x30
'Gou ', # 0x31
'Fan ', # 0x32
'Jia ', # 0x33
'Ge ', # 0x34
'Fan ', # 0x35
'Shi ', # 0x36
'Mao ', # 0x37
'Po ', # 0x38
'Sey ', # 0x39
'Jian ', # 0x3a
'Qiong ', # 0x3b
'Long ', # 0x3c
'Souke ', # 0x3d
'Bian ', # 0x3e
'Luo ', # 0x3f
'Gui ', # 0x40
'Qu ', # 0x41
'Chi ', # 0x42
'Yin ', # 0x43
'Yao ', # 0x44
'Xian ', # 0x45
'Bi ', # 0x46
'Qiong ', # 0x47
'Gua ', # 0x48
'Deng ', # 0x49
'Jiao ', # 0x4a
'Jin ', # 0x4b
'Quan ', # 0x4c
'Sun ', # 0x4d
'Ru ', # 0x4e
'Fa ', # 0x4f
'Kuang ', # 0x50
'Zhu ', # 0x51
'Tong ', # 0x52
'Ji ', # 0x53
'Da ', # 0x54
'Xing ', # 0x55
'Ce ', # 0x56
'Zhong ', # 0x57
'Kou ', # 0x58
'Lai ', # 0x59
'Bi ', # 0x5a
'Shai ', # 0x5b
'Dang ', # 0x5c
'Zheng ', # 0x5d
'Ce ', # 0x5e
'Fu ', # 0x5f
'Yun ', # 0x60
'Tu ', # 0x61
'Pa ', # 0x62
'Li ', # 0x63
'Lang ', # 0x64
'Ju ', # 0x65
'Guan ', # 0x66
'Jian ', # 0x67
'Han ', # 0x68
'Tong ', # 0x69
'Xia ', # 0x6a
'Zhi ', # 0x6b
'Cheng ', # 0x6c
'Suan ', # 0x6d
'Shi ', # 0x6e
'Zhu ', # 0x6f
'Zuo ', # 0x70
'Xiao ', # 0x71
'Shao ', # 0x72
'Ting ', # 0x73
'Ce ', # 0x74
'Yan ', # 0x75
'Gao ', # 0x76
'Kuai ', # 0x77
'Gan ', # 0x78
'Chou ', # 0x79
'Kago ', # 0x7a
'Gang ', # 0x7b
'Yun ', # 0x7c
'O ', # 0x7d
'Qian ', # 0x7e
'Xiao ', # 0x7f
'Jian ', # 0x80
'Pu ', # 0x81
'Lai ', # 0x82
'Zou ', # 0x83
'Bi ', # 0x84
'Bi ', # 0x85
'Bi ', # 0x86
'Ge ', # 0x87
'Chi ', # 0x88
'Guai ', # 0x89
'Yu ', # 0x8a
'Jian ', # 0x8b
'Zhao ', # 0x8c
'Gu ', # 0x8d
'Chi ', # 0x8e
'Zheng ', # 0x8f
'Jing ', # 0x90
'Sha ', # 0x91
'Zhou ', # 0x92
'Lu ', # 0x93
'Bo ', # 0x94
'Ji ', # 0x95
'Lin ', # 0x96
'Suan ', # 0x97
'Jun ', # 0x98
'Fu ', # 0x99
'Zha ', # 0x9a
'Gu ', # 0x9b
'Kong ', # 0x9c
'Qian ', # 0x9d
'Quan ', # 0x9e
'Jun ', # 0x9f
'Chui ', # 0xa0
'Guan ', # 0xa1
'Yuan ', # 0xa2
'Ce ', # 0xa3
'Ju ', # 0xa4
'Bo ', # 0xa5
'Ze ', # 0xa6
'Qie ', # 0xa7
'Tuo ', # 0xa8
'Luo ', # 0xa9
'Dan ', # 0xaa
'Xiao ', # 0xab
'Ruo ', # 0xac
'Jian ', # 0xad
'Xuan ', # 0xae
'Bian ', # 0xaf
'Sun ', # 0xb0
'Xiang ', # 0xb1
'Xian ', # 0xb2
'Ping ', # 0xb3
'Zhen ', # 0xb4
'Sheng ', # 0xb5
'Hu ', # 0xb6
'Shi ', # 0xb7
'Zhu ', # 0xb8
'Yue ', # 0xb9
'Chun ', # 0xba
'Lu ', # 0xbb
'Wu ', # 0xbc
'Dong ', # 0xbd
'Xiao ', # 0xbe
'Ji ', # 0xbf
'Jie ', # 0xc0
'Huang ', # 0xc1
'Xing ', # 0xc2
'Mei ', # 0xc3
'Fan ', # 0xc4
'Chui ', # 0xc5
'Zhuan ', # 0xc6
'Pian ', # 0xc7
'Feng ', # 0xc8
'Zhu ', # 0xc9
'Hong ', # 0xca
'Qie ', # 0xcb
'Hou ', # 0xcc
'Qiu ', # 0xcd
'Miao ', # 0xce
'Qian ', # 0xcf
'[?] ', # 0xd0
'Kui ', # 0xd1
'Sik ', # 0xd2
'Lou ', # 0xd3
'Yun ', # 0xd4
'He ', # 0xd5
'Tang ', # 0xd6
'Yue ', # 0xd7
'Chou ', # 0xd8
'Gao ', # 0xd9
'Fei ', # 0xda
'Ruo ', # 0xdb
'Zheng ', # 0xdc
'Gou ', # 0xdd
'Nie ', # 0xde
'Qian ', # 0xdf
'Xiao ', # 0xe0
'Cuan ', # 0xe1
'Gong ', # 0xe2
'Pang ', # 0xe3
'Du ', # 0xe4
'Li ', # 0xe5
'Bi ', # 0xe6
'Zhuo ', # 0xe7
'Chu ', # 0xe8
'Shai ', # 0xe9
'Chi ', # 0xea
'Zhu ', # 0xeb
'Qiang ', # 0xec
'Long ', # 0xed
'Lan ', # 0xee
'Jian ', # 0xef
'Bu ', # 0xf0
'Li ', # 0xf1
'Hui ', # 0xf2
'Bi ', # 0xf3
'Di ', # 0xf4
'Cong ', # 0xf5
'Yan ', # 0xf6
'Peng ', # 0xf7
'Sen ', # 0xf8
'Zhuan ', # 0xf9
'Pai ', # 0xfa
'Piao ', # 0xfb
'Dou ', # 0xfc
'Yu ', # 0xfd
'Mie ', # 0xfe
'Zhuan ', # 0xff
)
|
akanimax/CL-2_lab_2016
|
refs/heads/master
|
Assignment_B4/NN.py
|
1
|
''' Implementation of simple NN without tool '''
''' Coded by botMan '''
import csv
import random
import math
import operator
def loadDataset(filename, split, trainingSet=[] , testSet=[]):
with open(filename, 'rb') as csvfile:
lines = csv.reader(csvfile)
dataset = list(lines)
for x in range(len(dataset)-1):
for y in range(4):
dataset[x][y] = float(dataset[x][y])
if random.random() < split:
trainingSet.append(dataset[x])
else:
testSet.append(dataset[x])
def euclideanDistance(instance1, instance2, length):
distance = 0
for x in range(length):
distance += pow((instance1[x] - instance2[x]), 2)
return math.sqrt(distance)
def getNeighbors(trainingSet, testInstance, k):
distances = []
length = len(testInstance)-1
for x in range(len(trainingSet)):
dist = euclideanDistance(testInstance, trainingSet[x], length)
distances.append((trainingSet[x], dist))
distances.sort(key=operator.itemgetter(1))
neighbors = []
for x in range(k):
neighbors.append(distances[x][0])
return neighbors
def getResponse(neighbors):
classVotes = {}
for x in range(len(neighbors)):
response = neighbors[x][-1]
if response in classVotes:
classVotes[response] += 1
else:
classVotes[response] = 1
sortedVotes = sorted(classVotes.iteritems(), key=operator.itemgetter(1), reverse=True)
return sortedVotes[0][0]
def getAccuracy(testSet, predictions):
correct = 0
for x in range(len(testSet)):
if testSet[x][-1] == predictions[x]:
correct += 1
return (correct/float(len(testSet))) * 100.0
def main():
# prepare data
trainingSet=[]
testSet=[]
split = 0.67
loadDataset('iris.data', split, trainingSet, testSet)
print 'Train set: ' + repr(len(trainingSet))
print 'Test set: ' + repr(len(testSet))
# generate predictions
predictions=[]
k = 3
for x in range(len(testSet)):
neighbors = getNeighbors(trainingSet, testSet[x], k)
result = getResponse(neighbors)
predictions.append(result)
print('> predicted=' + repr(result) + ', actual=' + repr(testSet[x][-1]))
accuracy = getAccuracy(testSet, predictions)
print('Accuracy: ' + repr(accuracy) + '%')
main()
|
hexpl0it/plugin.video.genesi-ita
|
refs/heads/master
|
resources/lib/sources/yify_mv.py
|
3
|
# -*- coding: utf-8 -*-
'''
Genesis Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse,json
from resources.lib.libraries import cleantitle
from resources.lib.libraries import client
class source:
def __init__(self):
self.base_link = 'http://yify.tv'
self.search_link = '/wp-admin/admin-ajax.php'
self.search_link2 = '?s=%s'
self.pk_link = '/player/pk/pk/plugins/player_p2.php'
def get_movie(self, imdb, title, year):
try:
query = self.search_link2 % (urllib.quote_plus(title))
query = urlparse.urljoin(self.base_link, query)
for i in range(5):
result = client.source(query, close=False)
if not result == None: break
result = client.parseDOM(result, 'section', attrs = {'id': 'contentrea'})[0]
title = cleantitle.movie(title)
years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)]
result = zip(client.parseDOM(result, 'a', ret='href'), client.parseDOM(result, 'a'))
result = [(i[0], re.compile('(^Watch Full "|^Watch |)(.+? \d{4})').findall(i[1])) for i in result]
result = [(i[0], i[1][0][-1]) for i in result if len(i[1]) > 0]
result = [(i[0], re.compile('(.+?) (\d{4})$').findall(i[1])) for i in result]
result = [(i[0], i[1][0][0], i[1][0][1]) for i in result if len(i[1]) > 0]
result = [i for i in result if any(x in i[2] for x in years)]
result = [i[0] for i in result if title == cleantitle.movie(i[1])][0]
try: url = re.compile('//.+?(/.+)').findall(result)[0]
except: url = result
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def get_sources(self, url, hosthdDict, hostDict, locDict):
try:
sources = []
if url == None: return sources
base = urlparse.urljoin(self.base_link, url)
for i in range(5):
result = client.source(base, close=False)
if not result == None: break
result = client.parseDOM(result, 'script', attrs = {'type': 'text/javascript'})
result = [i for i in result if 'parametros;' in i][0]
result = 'function' + result.split('function', 1)[-1]
result = result.rsplit('parametros;', 1)[0] + 'parametros;'
from resources.lib.libraries import js2py
result = js2py.evaljs.eval_js(result)
result = str(result)
links = re.compile('pic=([^&]+)').findall(result)
links = [x for y,x in enumerate(links) if x not in links[:y]]
for i in links:
try:
url = urlparse.urljoin(self.base_link, self.pk_link)
post = urllib.urlencode({'url': i, 'fv': '16', 'sou': 'pic'})
result = client.source(url, post=post, referer=base)
result = json.loads(result)
try: sources.append({'source': 'GVideo', 'quality': '1080p', 'provider': 'YIFY', 'url': [i['url'] for i in result if i['width'] == 1920 and 'google' in i['url']][0]})
except: pass
try: sources.append({'source': 'GVideo', 'quality': 'HD', 'provider': 'YIFY', 'url': [i['url'] for i in result if i['width'] == 1280 and 'google' in i['url']][0]})
except: pass
try: sources.append({'source': 'YIFY', 'quality': '1080p', 'provider': 'YIFY', 'url': [i['url'] for i in result if i['width'] == 1920 and not 'google' in i['url']][0]})
except: pass
try: sources.append({'source': 'YIFY', 'quality': 'HD', 'provider': 'YIFY', 'url': [i['url'] for i in result if i['width'] == 1280 and not 'google' in i['url']][0]})
except: pass
except:
pass
return sources
except:
return sources
def resolve(self, url):
try:
if 'google' in url:
url = client.request(url, output='geturl')
if 'requiressl=yes' in url: url = url.replace('http://', 'https://')
else: url = url.replace('https://', 'http://')
else:
url = '%s|User-Agent=%s' % (url, urllib.quote_plus(client.agent()))
return url
except:
return
|
Open-Party/python-beaver
|
refs/heads/master
|
beaver/transports/sqs_transport.py
|
4
|
# -*- coding: utf-8 -*-
import boto.sqs
import uuid
from boto.sqs.message import Message
from beaver.transports.base_transport import BaseTransport
from beaver.transports.exception import TransportException
class SqsTransport(BaseTransport):
def __init__(self, beaver_config, logger=None):
super(SqsTransport, self).__init__(beaver_config, logger=logger)
self._access_key = beaver_config.get('sqs_aws_access_key')
self._secret_key = beaver_config.get('sqs_aws_secret_key')
self._region = beaver_config.get('sqs_aws_region')
self._queue_name = beaver_config.get('sqs_aws_queue')
self._queue_owner_acct_id = beaver_config.get('sqs_aws_queue_owner_acct_id')
try:
if self._access_key is None and self._secret_key is None:
self._connection = boto.sqs.connect_to_region(self._region)
else:
self._connection = boto.sqs.connect_to_region(self._region,
aws_access_key_id=self._access_key,
aws_secret_access_key=self._secret_key)
if self._connection is None:
self._logger.warn('Unable to connect to AWS - check your AWS credentials')
raise TransportException('Unable to connect to AWS - check your AWS credentials')
if self._queue_owner_acct_id is None:
self._queue = self._connection.get_queue(self._queue_name)
else:
self._queue = self._connection.get_queue(self._queue_name,
owner_acct_id=self._queue_owner_acct_id)
if self._queue is None:
raise TransportException('Unable to access queue with name {0}'.format(self._queue_name))
except Exception, e:
raise TransportException(e.message)
def callback(self, filename, lines, **kwargs):
timestamp = self.get_timestamp(**kwargs)
if kwargs.get('timestamp', False):
del kwargs['timestamp']
message_batch = []
message_batch_size = 0
message_batch_size_max = 250000 # Max 256KiB but leave some headroom
for line in lines:
m = Message()
m.set_body(self.format(filename, line, timestamp, **kwargs))
message_size = len(m)
if (message_size > message_batch_size_max):
self._logger.debug('Dropping the message as it is too large to send ({0} bytes)'.format(message_size))
continue
# SQS can only handle up to 10 messages in batch send and it can not exceed 256KiB (see above)
# Check the new total size before adding a new message and don't try to send an empty batch
if (len(message_batch) > 0) and (((message_batch_size + message_size) >= message_batch_size_max) or (len(message_batch) == 10)):
self._logger.debug('Flushing {0} messages to SQS queue {1} bytes'.format(len(message_batch), message_batch_size))
self._send_message_batch(message_batch)
message_batch = []
message_batch_size = 0
message_batch_size = message_batch_size + message_size
message_batch.append((uuid.uuid4(), self.format(filename, line, timestamp, **kwargs), 0))
if len(message_batch) > 0:
self._logger.debug('Flushing the last {0} messages to SQS queue {1} bytes'.format(len(message_batch), message_batch_size))
self._send_message_batch(message_batch)
return True
def _send_message_batch(self, message_batch):
try:
result = self._queue.write_batch(message_batch)
if not result:
self._logger.error('Error occurred sending messages to SQS queue {0}. result: {1}'.format(
self._queue_name, result))
raise TransportException('Error occurred sending message to queue {0}'.format(self._queue_name))
except Exception, e:
self._logger.exception('Exception occurred sending batch to SQS queue')
raise TransportException(e.message)
def interrupt(self):
return True
def unhandled(self):
return True
|
ChenJunor/hue
|
refs/heads/master
|
desktop/core/ext-py/kazoo-2.0/kazoo/tests/test_gevent_handler.py
|
36
|
import unittest
from nose import SkipTest
from nose.tools import eq_
from nose.tools import raises
from kazoo.client import KazooClient
from kazoo.exceptions import NoNodeError
from kazoo.protocol.states import Callback
from kazoo.testing import KazooTestCase
from kazoo.tests import test_client
class TestGeventHandler(unittest.TestCase):
def setUp(self):
try:
import gevent
except ImportError:
raise SkipTest('gevent not available.')
def _makeOne(self, *args):
from kazoo.handlers.gevent import SequentialGeventHandler
return SequentialGeventHandler(*args)
def _getAsync(self, *args):
from kazoo.handlers.gevent import AsyncResult
return AsyncResult
def _getEvent(self):
from gevent.event import Event
return Event
def test_proper_threading(self):
h = self._makeOne()
h.start()
assert isinstance(h.event_object(), self._getEvent())
def test_matching_async(self):
h = self._makeOne()
h.start()
async = self._getAsync()
assert isinstance(h.async_result(), async)
def test_exception_raising(self):
h = self._makeOne()
@raises(h.timeout_exception)
def testit():
raise h.timeout_exception("This is a timeout")
testit()
def test_exception_in_queue(self):
h = self._makeOne()
h.start()
ev = self._getEvent()()
def func():
ev.set()
raise ValueError('bang')
call1 = Callback('completion', func, ())
h.dispatch_callback(call1)
ev.wait()
def test_queue_empty_exception(self):
from gevent.queue import Empty
h = self._makeOne()
h.start()
ev = self._getEvent()()
def func():
ev.set()
raise Empty()
call1 = Callback('completion', func, ())
h.dispatch_callback(call1)
ev.wait()
class TestBasicGeventClient(KazooTestCase):
def setUp(self):
try:
import gevent
except ImportError:
raise SkipTest('gevent not available.')
KazooTestCase.setUp(self)
def _makeOne(self, *args):
from kazoo.handlers.gevent import SequentialGeventHandler
return SequentialGeventHandler(*args)
def _getEvent(self):
from gevent.event import Event
return Event
def test_start(self):
client = self._get_client(handler=self._makeOne())
client.start()
self.assertEqual(client.state, 'CONNECTED')
client.stop()
def test_start_stop_double(self):
client = self._get_client(handler=self._makeOne())
client.start()
self.assertEqual(client.state, 'CONNECTED')
client.handler.start()
client.handler.stop()
client.stop()
def test_basic_commands(self):
client = self._get_client(handler=self._makeOne())
client.start()
self.assertEqual(client.state, 'CONNECTED')
client.create('/anode', 'fred')
eq_(client.get('/anode')[0], 'fred')
eq_(client.delete('/anode'), True)
eq_(client.exists('/anode'), None)
client.stop()
def test_failures(self):
client = self._get_client(handler=self._makeOne())
client.start()
self.assertRaises(NoNodeError, client.get, '/none')
client.stop()
def test_data_watcher(self):
client = self._get_client(handler=self._makeOne())
client.start()
client.ensure_path('/some/node')
ev = self._getEvent()()
@client.DataWatch('/some/node')
def changed(d, stat):
ev.set()
ev.wait()
ev.clear()
client.set('/some/node', 'newvalue')
ev.wait()
client.stop()
class TestGeventClient(test_client.TestClient):
def setUp(self):
try:
import gevent
except ImportError:
raise SkipTest('gevent not available.')
KazooTestCase.setUp(self)
def _makeOne(self, *args):
from kazoo.handlers.gevent import SequentialGeventHandler
return SequentialGeventHandler(*args)
def _get_client(self, **kwargs):
kwargs["handler"] = self._makeOne()
return KazooClient(self.hosts, **kwargs)
|
waynesun09/virt-test
|
refs/heads/master
|
virttest/libvirt_xml/devices/input.py
|
6
|
"""
input device support class(es)
http://libvirt.org/formatdomain.html#elementsInput
"""
from virttest.libvirt_xml.devices import base
class Input(base.TypedDeviceBase):
# TODO: Write this class
__metaclass__ = base.StubDeviceMeta
_device_tag = 'input'
_def_type_name = 'mouse'
|
tima/ansible
|
refs/heads/devel
|
lib/ansible/plugins/lookup/aws_service_ip_ranges.py
|
16
|
# (c) 2016 James Turner <turnerjsm@gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: aws_service_ip_ranges
author:
- James Turner <turnerjsm@gmail.com>
version_added: "2.5"
requirements:
- must have public internet connectivity
short_description: Look up the IP ranges for services provided in AWS such as EC2 and S3.
description:
- AWS publishes IP ranges used on the public internet by EC2, S3, CloudFront, CodeBuild, Route53, and Route53 Health Checking.
- This module produces a list of all the ranges (by default) or can narrow down the list to the specified region or service.
options:
service:
description: 'The service to filter ranges by. Options: EC2, S3, CLOUDFRONT, CODEbUILD, ROUTE53, ROUTE53_HEALTHCHECKS'
default: null
region:
description: 'The AWS region to narrow the ranges to. Examples: us-east-1, eu-west-2, ap-southeast-1'
default: null
"""
EXAMPLES = """
vars:
ec2_ranges: "{{ lookup('aws_service_ip_ranges', region='ap-southeast-2', service='EC2', wantlist=True) }}"
tasks:
- name: "use list return option and iterate as a loop"
debug: msg="{% for cidr in ec2_ranges %}{{ cidr }} {% endfor %}"
# "52.62.0.0/15 52.64.0.0/17 52.64.128.0/17 52.65.0.0/16 52.95.241.0/24 52.95.255.16/28 54.66.0.0/16 "
- name: "Pull S3 IP ranges, and print the default return style"
debug: msg="{{ lookup('aws_service_ip_ranges', region='us-east-1', service='S3') }}"
# "52.92.16.0/20,52.216.0.0/15,54.231.0.0/17"
"""
RETURN = """
_raw:
description: comma-separated list of CIDR ranges
"""
import json
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
from ansible.module_utils._text import to_native
from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
try:
resp = open_url('https://ip-ranges.amazonaws.com/ip-ranges.json')
amazon_response = json.load(resp)['prefixes']
except getattr(json.decoder, 'JSONDecodeError', ValueError) as e:
# on Python 3+, json.decoder.JSONDecodeError is raised for bad
# JSON. On 2.x it's a ValueError
raise AnsibleError("Could not decode AWS IP ranges: %s" % to_native(e))
except HTTPError as e:
raise AnsibleError("Received HTTP error while pulling IP ranges: %s" % to_native(e))
except SSLValidationError as e:
raise AnsibleError("Error validating the server's certificate for: %s" % to_native(e))
except URLError as e:
raise AnsibleError("Failed look up IP range service: %s" % to_native(e))
except ConnectionError as e:
raise AnsibleError("Error connecting to IP range service: %s" % to_native(e))
if 'region' in kwargs:
region = kwargs['region']
amazon_response = (item for item in amazon_response if item['region'] == region)
if 'service' in kwargs:
service = str.upper(kwargs['service'])
amazon_response = (item for item in amazon_response if item['service'] == service)
return [item['ip_prefix'] for item in amazon_response]
|
wxkdesky/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/style/checkers/python.py
|
120
|
# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Supports checking WebKit style in Python files."""
import re
from StringIO import StringIO
from webkitpy.common.system.filesystem import FileSystem
from webkitpy.common.webkit_finder import WebKitFinder
from webkitpy.thirdparty.autoinstalled import pep8
from webkitpy.thirdparty.autoinstalled.pylint import lint
from webkitpy.thirdparty.autoinstalled.pylint.reporters.text import ParseableTextReporter
class PythonChecker(object):
"""Processes text lines for checking style."""
def __init__(self, file_path, handle_style_error):
self._file_path = file_path
self._handle_style_error = handle_style_error
def check(self, lines):
self._check_pep8(lines)
self._check_pylint(lines)
def _check_pep8(self, lines):
# Initialize pep8.options, which is necessary for
# Checker.check_all() to execute.
pep8.process_options(arglist=[self._file_path])
pep8_checker = pep8.Checker(self._file_path)
def _pep8_handle_error(line_number, offset, text, check):
# FIXME: Incorporate the character offset into the error output.
# This will require updating the error handler __call__
# signature to include an optional "offset" parameter.
pep8_code = text[:4]
pep8_message = text[5:]
category = "pep8/" + pep8_code
self._handle_style_error(line_number, category, 5, pep8_message)
pep8_checker.report_error = _pep8_handle_error
pep8_errors = pep8_checker.check_all()
def _check_pylint(self, lines):
pylinter = Pylinter()
# FIXME: for now, we only report pylint errors, but we should be catching and
# filtering warnings using the rules in style/checker.py instead.
output = pylinter.run(['-E', self._file_path])
lint_regex = re.compile('([^:]+):([^:]+): \[([^]]+)\] (.*)')
for error in output.getvalue().splitlines():
match_obj = lint_regex.match(error)
assert(match_obj)
line_number = int(match_obj.group(2))
category_and_method = match_obj.group(3).split(', ')
category = 'pylint/' + (category_and_method[0])
if len(category_and_method) > 1:
message = '[%s] %s' % (category_and_method[1], match_obj.group(4))
else:
message = match_obj.group(4)
self._handle_style_error(line_number, category, 5, message)
class Pylinter(object):
# We filter out these messages because they are bugs in pylint that produce false positives.
# FIXME: Does it make sense to combine these rules with the rules in style/checker.py somehow?
FALSE_POSITIVES = [
# possibly http://www.logilab.org/ticket/98613 ?
"Instance of 'Popen' has no 'poll' member",
"Instance of 'Popen' has no 'returncode' member",
"Instance of 'Popen' has no 'stdin' member",
"Instance of 'Popen' has no 'stdout' member",
"Instance of 'Popen' has no 'stderr' member",
"Instance of 'Popen' has no 'wait' member",
"Instance of 'Popen' has no 'pid' member",
]
def __init__(self):
self._pylintrc = WebKitFinder(FileSystem()).path_from_webkit_base('Tools', 'Scripts', 'webkitpy', 'pylintrc')
def run(self, argv):
output = _FilteredStringIO(self.FALSE_POSITIVES)
lint.Run(['--rcfile', self._pylintrc] + argv, reporter=ParseableTextReporter(output=output), exit=False)
return output
class _FilteredStringIO(StringIO):
def __init__(self, bad_messages):
StringIO.__init__(self)
self.dropped_last_msg = False
self.bad_messages = bad_messages
def write(self, msg=''):
if not self._filter(msg):
StringIO.write(self, msg)
def _filter(self, msg):
if any(bad_message in msg for bad_message in self.bad_messages):
self.dropped_last_msg = True
return True
if self.dropped_last_msg:
# We drop the newline after a dropped message as well.
self.dropped_last_msg = False
if msg == '\n':
return True
return False
|
RyanYoung25/tensorflow
|
refs/heads/master
|
tensorflow/python/kernel_tests/gradient_checker_test.py
|
1
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.kernels.gradient_checker."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class GradientCheckerTest(tf.test.TestCase):
def testAddSimple(self):
np.random.seed(1) # Fix seed to avoid flakiness
with self.test_session(use_gpu=False):
# a test case for Add operation
size = (2, 3)
x1 = tf.constant(2.0, shape=size, name="x1")
x2 = tf.constant(3.0, shape=size, name="x2")
y = tf.add(x1, x2, name="y")
# checking gradients for x1
error = tf.test.compute_gradient_error(x1, size, y, size)
tf.logging.info("x1 error = %f", error)
assert error < 1e-4
def testAddSimpleGPU(self):
np.random.seed(2) # Fix seed to avoid flakiness
with self.test_session(use_gpu=True):
# a test case for Add operation
size = (2, 3)
x1 = tf.constant(2.0, shape=size, name="x1")
x2 = tf.constant(3.0, shape=size, name="x2")
y = tf.add(x1, x2, name="y")
# checking gradients for x1
error = tf.test.compute_gradient_error(x1, size, y, size)
tf.logging.info("x1 error = %f", error)
assert error < 1e-4
def testAddCustomized(self):
np.random.seed(3) # Fix seed to avoid flakiness
with self.test_session():
# a test case for Add operation
size = (2, 3)
x1 = tf.constant(2.0, shape=size, dtype=tf.float64,
name="x1")
x2 = tf.constant(3.0, shape=size, dtype=tf.float64,
name="x2")
y = tf.add(x1, x2, name="y")
# checkint gradients for x2 using a special init_value and delta
x_init_value = np.asarray(np.arange(6, dtype=np.float64).reshape(2, 3))
error = tf.test.compute_gradient_error(x2,
size,
y,
size,
x_init_value=x_init_value,
delta=1e-2)
tf.logging.info("x2 error = %f", error)
assert error < 1e-10
def testGather(self):
np.random.seed(4) # Fix seed to avoid flakiness
with self.test_session():
p_shape = (4, 2)
p_size = 8
index_values = [1, 3]
y_shape = [2, 2]
params = tf.constant(np.arange(p_size).astype(np.float),
shape=p_shape, name="p")
indices = tf.constant(index_values, name="i")
y = tf.gather(params, indices, name="y")
error = tf.test.compute_gradient_error(params, p_shape, y, y_shape)
tf.logging.info("gather error = %f", error)
assert error < 1e-4
def testNestedGather(self):
np.random.seed(5) # Fix seed to avoid flakiness
with self.test_session():
p_shape = (8, 2)
p_size = 16
index_values = [1, 3, 5, 6]
index_values2 = [0, 2]
y2_shape = [2, 2]
params = tf.constant(np.arange(p_size).astype(np.float),
shape=p_shape, name="p")
indices = tf.constant(index_values, name="i")
y = tf.gather(params, indices, name="y")
indices2 = tf.constant(index_values2, name="i2")
y2 = tf.gather(y, indices2, name="y2")
error = tf.test.compute_gradient_error(params, p_shape, y2, y2_shape)
tf.logging.info("nested gather error = %f", error)
assert error < 1e-4
# Gradient checker for MNIST.
def BuildAndTestMiniMNIST(param_index, tag):
# Fix seed to avoid occasional flakiness
np.random.seed(6)
# Hyperparameters
batch = 3
inputs = 16
features = 32
classes = 10
# Define the parameters
inp_data = np.random.random_sample(inputs * batch)
hidden_weight_data = np.random.randn(inputs * features) / np.sqrt(inputs)
hidden_bias_data = np.random.random_sample(features)
sm_weight_data = np.random.randn(features * classes) / np.sqrt(features)
sm_bias_data = np.random.random_sample(classes)
# special care for labels since they need to be normalized per batch
label_data = np.random.random(batch * classes).reshape((batch, classes))
s = label_data.sum(axis=1)
label_data /= s[:, None]
with tf.Session():
# We treat the inputs as "parameters" here
inp = tf.constant(inp_data.tolist(), shape=[batch, inputs],
dtype=tf.float64, name="inp")
hidden_weight = tf.constant(hidden_weight_data.tolist(),
shape=[inputs, features],
dtype=tf.float64,
name="hidden_weight")
hidden_bias = tf.constant(hidden_bias_data.tolist(),
shape=[features],
dtype=tf.float64,
name="hidden_bias")
softmax_weight = tf.constant(sm_weight_data.tolist(),
shape=[features, classes],
dtype=tf.float64,
name="softmax_weight")
softmax_bias = tf.constant(sm_bias_data.tolist(), shape=[classes],
dtype=tf.float64,
name="softmax_bias")
# List all the parameter so that we can test them one at a time
all_params = [inp, hidden_weight, hidden_bias, softmax_weight, softmax_bias]
param_sizes = [[batch, inputs], # inp
[inputs, features], # hidden_weight,
[features], # hidden_bias
[features, classes], # softmax_weight,
[classes]] # softmax_bias
# Now, Building MNIST
features = tf.nn.relu(tf.nn.xw_plus_b(inp, hidden_weight, hidden_bias),
name="features")
logits = tf.nn.xw_plus_b(features, softmax_weight, softmax_bias,
name="logits")
labels = tf.constant(label_data.tolist(),
shape=[batch, classes],
dtype=tf.float64,
name="labels")
cost = tf.nn.softmax_cross_entropy_with_logits(logits, labels, name="cost")
# Test the gradients.
err = tf.test.compute_gradient_error(all_params[param_index],
param_sizes[param_index],
cost,
[batch],
delta=1e-5)
tf.logging.info("Mini MNIST: %s gradient error = %g", tag, err)
return err
class MiniMNISTTest(tf.test.TestCase):
def testInputGradient(self):
self.assertLess(BuildAndTestMiniMNIST(0, "input"), 1e-8)
def testHiddenWeightGradient(self):
self.assertLess(BuildAndTestMiniMNIST(1, "hidden_weight"), 1e-8)
def testHiddenBiasGradient(self):
self.assertLess(BuildAndTestMiniMNIST(2, "hidden_bias"), 1e-8)
def testSoftmaxWeightGradient(self):
self.assertLess(BuildAndTestMiniMNIST(3, "softmax_weight"), 1e-8)
def testSoftmaxBiasGradient(self):
self.assertLess(BuildAndTestMiniMNIST(4, "softmax_bias"), 1e-8)
if __name__ == "__main__":
tf.test.main()
|
darkleons/lama
|
refs/heads/master
|
openerp/addons/test_convert/__openerp__.py
|
437
|
{
'name': 'test_convert',
'description': "Data for xml conversion tests",
'version': '0.0.1',
}
|
idlead/scikit-learn
|
refs/heads/master
|
sklearn/decomposition/nmf.py
|
5
|
""" Non-negative matrix factorization
"""
# Author: Vlad Niculae
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Mathieu Blondel <mathieu@mblondel.org>
# Tom Dupre la Tour
# Author: Chih-Jen Lin, National Taiwan University (original projected gradient
# NMF implementation)
# Author: Anthony Di Franco (Projected gradient, Python and NumPy port)
# License: BSD 3 clause
from __future__ import division, print_function
from math import sqrt
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from ..externals import six
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.extmath import randomized_svd, safe_sparse_dot, squared_norm
from ..utils.extmath import fast_dot
from ..utils.validation import check_is_fitted, check_non_negative
from ..utils import deprecated
from ..exceptions import ConvergenceWarning
from .cdnmf_fast import _update_cdnmf_fast
def safe_vstack(Xs):
if any(sp.issparse(X) for X in Xs):
return sp.vstack(Xs)
else:
return np.vstack(Xs)
def norm(x):
"""Dot product-based Euclidean norm implementation
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
"""
return sqrt(squared_norm(x))
def trace_dot(X, Y):
"""Trace of np.dot(X, Y.T)."""
return np.dot(X.ravel(), Y.ravel())
def _sparseness(x):
"""Hoyer's measure of sparsity for a vector"""
sqrt_n = np.sqrt(len(x))
return (sqrt_n - np.linalg.norm(x, 1) / norm(x)) / (sqrt_n - 1)
def _check_init(A, shape, whom):
A = check_array(A)
if np.shape(A) != shape:
raise ValueError('Array with wrong shape passed to %s. Expected %s, '
'but got %s ' % (whom, shape, np.shape(A)))
check_non_negative(A, whom)
if np.max(A) == 0:
raise ValueError('Array passed to %s is full of zeros.' % whom)
def _safe_compute_error(X, W, H):
"""Frobenius norm between X and WH, safe for sparse array"""
if not sp.issparse(X):
error = norm(X - np.dot(W, H))
else:
norm_X = np.dot(X.data, X.data)
norm_WH = trace_dot(np.dot(np.dot(W.T, W), H), H)
cross_prod = trace_dot((X * H.T), W)
error = sqrt(norm_X + norm_WH - 2. * cross_prod)
return error
def _check_string_param(sparseness, solver):
allowed_sparseness = (None, 'data', 'components')
if sparseness not in allowed_sparseness:
raise ValueError(
'Invalid sparseness parameter: got %r instead of one of %r' %
(sparseness, allowed_sparseness))
allowed_solver = ('pg', 'cd')
if solver not in allowed_solver:
raise ValueError(
'Invalid solver parameter: got %r instead of one of %r' %
(solver, allowed_solver))
def _initialize_nmf(X, n_components, init=None, eps=1e-6,
random_state=None):
"""Algorithms for NMF initialization.
Computes an initial guess for the non-negative
rank k matrix approximation for X: X = WH
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix to be decomposed.
n_components : integer
The number of components desired in the approximation.
init : None | 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar'
Method used to initialize the procedure.
Default: 'nndsvdar' if n_components < n_features, otherwise 'random'.
Valid options:
'random': non-negative random matrices, scaled with:
sqrt(X.mean() / n_components)
'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
eps: float
Truncate all values less then this in output to zero.
random_state : int seed, RandomState instance, or None (default)
Random number generator seed control, used in 'nndsvdar' and
'random' modes.
Returns
-------
W : array-like, shape (n_samples, n_components)
Initial guesses for solving X ~= WH
H : array-like, shape (n_components, n_features)
Initial guesses for solving X ~= WH
References
----------
C. Boutsidis, E. Gallopoulos: SVD based initialization: A head start for
nonnegative matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
check_non_negative(X, "NMF initialization")
n_samples, n_features = X.shape
if init is None:
if n_components < n_features:
init = 'nndsvd'
else:
init = 'random'
# Random initialization
if init == 'random':
avg = np.sqrt(X.mean() / n_components)
rng = check_random_state(random_state)
H = avg * rng.randn(n_components, n_features)
W = avg * rng.randn(n_samples, n_components)
# we do not write np.abs(H, out=H) to stay compatible with
# numpy 1.5 and earlier where the 'out' keyword is not
# supported as a kwarg on ufuncs
np.abs(H, H)
np.abs(W, W)
return W, H
# NNDSVD initialization
U, S, V = randomized_svd(X, n_components, random_state=random_state)
W, H = np.zeros(U.shape), np.zeros(V.shape)
# The leading singular triplet is non-negative
# so it can be used as is for initialization.
W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
for j in range(1, n_components):
x, y = U[:, j], V[j, :]
# extract positive and negative parts of column vectors
x_p, y_p = np.maximum(x, 0), np.maximum(y, 0)
x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0))
# and their norms
x_p_nrm, y_p_nrm = norm(x_p), norm(y_p)
x_n_nrm, y_n_nrm = norm(x_n), norm(y_n)
m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm
# choose update
if m_p > m_n:
u = x_p / x_p_nrm
v = y_p / y_p_nrm
sigma = m_p
else:
u = x_n / x_n_nrm
v = y_n / y_n_nrm
sigma = m_n
lbd = np.sqrt(S[j] * sigma)
W[:, j] = lbd * u
H[j, :] = lbd * v
W[W < eps] = 0
H[H < eps] = 0
if init == "nndsvd":
pass
elif init == "nndsvda":
avg = X.mean()
W[W == 0] = avg
H[H == 0] = avg
elif init == "nndsvdar":
rng = check_random_state(random_state)
avg = X.mean()
W[W == 0] = abs(avg * rng.randn(len(W[W == 0])) / 100)
H[H == 0] = abs(avg * rng.randn(len(H[H == 0])) / 100)
else:
raise ValueError(
'Invalid init parameter: got %r instead of one of %r' %
(init, (None, 'random', 'nndsvd', 'nndsvda', 'nndsvdar')))
return W, H
def _nls_subproblem(V, W, H, tol, max_iter, alpha=0., l1_ratio=0.,
sigma=0.01, beta=0.1):
"""Non-negative least square solver
Solves a non-negative least squares subproblem using the projected
gradient descent algorithm.
Parameters
----------
V : array-like, shape (n_samples, n_features)
Constant matrix.
W : array-like, shape (n_samples, n_components)
Constant matrix.
H : array-like, shape (n_components, n_features)
Initial guess for the solution.
tol : float
Tolerance of the stopping condition.
max_iter : int
Maximum number of iterations before timing out.
alpha : double, default: 0.
Constant that multiplies the regularization terms. Set it to zero to
have no regularization.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L2 penalty.
For l1_ratio = 1 it is an L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
sigma : float
Constant used in the sufficient decrease condition checked by the line
search. Smaller values lead to a looser sufficient decrease condition,
thus reducing the time taken by the line search, but potentially
increasing the number of iterations of the projected gradient
procedure. 0.01 is a commonly used value in the optimization
literature.
beta : float
Factor by which the step size is decreased (resp. increased) until
(resp. as long as) the sufficient decrease condition is satisfied.
Larger values allow to find a better step size but lead to longer line
search. 0.1 is a commonly used value in the optimization literature.
Returns
-------
H : array-like, shape (n_components, n_features)
Solution to the non-negative least squares problem.
grad : array-like, shape (n_components, n_features)
The gradient.
n_iter : int
The number of iterations done by the algorithm.
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
"""
WtV = safe_sparse_dot(W.T, V)
WtW = fast_dot(W.T, W)
# values justified in the paper (alpha is renamed gamma)
gamma = 1
for n_iter in range(1, max_iter + 1):
grad = np.dot(WtW, H) - WtV
if alpha > 0 and l1_ratio == 1.:
grad += alpha
elif alpha > 0:
grad += alpha * (l1_ratio + (1 - l1_ratio) * H)
# The following multiplication with a boolean array is more than twice
# as fast as indexing into grad.
if norm(grad * np.logical_or(grad < 0, H > 0)) < tol:
break
Hp = H
for inner_iter in range(20):
# Gradient step.
Hn = H - gamma * grad
# Projection step.
Hn *= Hn > 0
d = Hn - H
gradd = np.dot(grad.ravel(), d.ravel())
dQd = np.dot(np.dot(WtW, d).ravel(), d.ravel())
suff_decr = (1 - sigma) * gradd + 0.5 * dQd < 0
if inner_iter == 0:
decr_gamma = not suff_decr
if decr_gamma:
if suff_decr:
H = Hn
break
else:
gamma *= beta
elif not suff_decr or (Hp == Hn).all():
H = Hp
break
else:
gamma /= beta
Hp = Hn
if n_iter == max_iter:
warnings.warn("Iteration limit reached in nls subproblem.")
return H, grad, n_iter
def _update_projected_gradient_w(X, W, H, tolW, nls_max_iter, alpha, l1_ratio,
sparseness, beta, eta):
"""Helper function for _fit_projected_gradient"""
n_samples, n_features = X.shape
n_components_ = H.shape[0]
if sparseness is None:
Wt, gradW, iterW = _nls_subproblem(X.T, H.T, W.T, tolW, nls_max_iter,
alpha=alpha, l1_ratio=l1_ratio)
elif sparseness == 'data':
Wt, gradW, iterW = _nls_subproblem(
safe_vstack([X.T, np.zeros((1, n_samples))]),
safe_vstack([H.T, np.sqrt(beta) * np.ones((1,
n_components_))]),
W.T, tolW, nls_max_iter, alpha=alpha, l1_ratio=l1_ratio)
elif sparseness == 'components':
Wt, gradW, iterW = _nls_subproblem(
safe_vstack([X.T,
np.zeros((n_components_, n_samples))]),
safe_vstack([H.T,
np.sqrt(eta) * np.eye(n_components_)]),
W.T, tolW, nls_max_iter, alpha=alpha, l1_ratio=l1_ratio)
return Wt.T, gradW.T, iterW
def _update_projected_gradient_h(X, W, H, tolH, nls_max_iter, alpha, l1_ratio,
sparseness, beta, eta):
"""Helper function for _fit_projected_gradient"""
n_samples, n_features = X.shape
n_components_ = W.shape[1]
if sparseness is None:
H, gradH, iterH = _nls_subproblem(X, W, H, tolH, nls_max_iter,
alpha=alpha, l1_ratio=l1_ratio)
elif sparseness == 'data':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((n_components_, n_features))]),
safe_vstack([W,
np.sqrt(eta) * np.eye(n_components_)]),
H, tolH, nls_max_iter, alpha=alpha, l1_ratio=l1_ratio)
elif sparseness == 'components':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((1, n_features))]),
safe_vstack([W,
np.sqrt(beta)
* np.ones((1, n_components_))]),
H, tolH, nls_max_iter, alpha=alpha, l1_ratio=l1_ratio)
return H, gradH, iterH
def _fit_projected_gradient(X, W, H, tol, max_iter,
nls_max_iter, alpha, l1_ratio,
sparseness, beta, eta):
"""Compute Non-negative Matrix Factorization (NMF) with Projected Gradient
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
P. Hoyer. Non-negative Matrix Factorization with Sparseness Constraints.
Journal of Machine Learning Research 2004.
"""
gradW = (np.dot(W, np.dot(H, H.T))
- safe_sparse_dot(X, H.T, dense_output=True))
gradH = (np.dot(np.dot(W.T, W), H)
- safe_sparse_dot(W.T, X, dense_output=True))
init_grad = squared_norm(gradW) + squared_norm(gradH.T)
# max(0.001, tol) to force alternating minimizations of W and H
tolW = max(0.001, tol) * np.sqrt(init_grad)
tolH = tolW
for n_iter in range(1, max_iter + 1):
# stopping condition
# as discussed in paper
proj_grad_W = squared_norm(gradW * np.logical_or(gradW < 0, W > 0))
proj_grad_H = squared_norm(gradH * np.logical_or(gradH < 0, H > 0))
if (proj_grad_W + proj_grad_H) / init_grad < tol ** 2:
break
# update W
W, gradW, iterW = _update_projected_gradient_w(X, W, H, tolW,
nls_max_iter,
alpha, l1_ratio,
sparseness, beta, eta)
if iterW == 1:
tolW = 0.1 * tolW
# update H
H, gradH, iterH = _update_projected_gradient_h(X, W, H, tolH,
nls_max_iter,
alpha, l1_ratio,
sparseness, beta, eta)
if iterH == 1:
tolH = 0.1 * tolH
H[H == 0] = 0 # fix up negative zeros
if n_iter == max_iter:
W, _, _ = _update_projected_gradient_w(X, W, H, tol, nls_max_iter,
alpha, l1_ratio, sparseness,
beta, eta)
return W, H, n_iter
def _update_coordinate_descent(X, W, Ht, l1_reg, l2_reg, shuffle,
random_state):
"""Helper function for _fit_coordinate_descent
Update W to minimize the objective function, iterating once over all
coordinates. By symmetry, to update H, one can call
_update_coordinate_descent(X.T, Ht, W, ...)
"""
n_components = Ht.shape[1]
HHt = fast_dot(Ht.T, Ht)
XHt = safe_sparse_dot(X, Ht)
# L2 regularization corresponds to increase the diagonal of HHt
if l2_reg != 0.:
# adds l2_reg only on the diagonal
HHt.flat[::n_components + 1] += l2_reg
# L1 regularization correponds to decrease each element of XHt
if l1_reg != 0.:
XHt -= l1_reg
if shuffle:
permutation = random_state.permutation(n_components)
else:
permutation = np.arange(n_components)
# The following seems to be required on 64-bit Windows w/ Python 3.5.
permutation = np.asarray(permutation, dtype=np.intp)
return _update_cdnmf_fast(W, HHt, XHt, permutation)
def _fit_coordinate_descent(X, W, H, tol=1e-4, max_iter=200, alpha=0.001,
l1_ratio=0., regularization=None, update_H=True,
verbose=0, shuffle=False, random_state=None):
"""Compute Non-negative Matrix Factorization (NMF) with Coordinate Descent
The objective function is minimized with an alternating minimization of W
and H. Each minimization is done with a cyclic (up to a permutation of the
features) Coordinate Descent.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Constant matrix.
W : array-like, shape (n_samples, n_components)
Initial guess for the solution.
H : array-like, shape (n_components, n_features)
Initial guess for the solution.
tol : float, default: 1e-4
Tolerance of the stopping condition.
max_iter : integer, default: 200
Maximum number of iterations before timing out.
alpha : double, default: 0.
Constant that multiplies the regularization terms.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L2 penalty.
For l1_ratio = 1 it is an L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
regularization : 'both' | 'components' | 'transformation' | None
Select whether the regularization affects the components (H), the
transformation (W), both or none of them.
update_H : boolean, default: True
Set to True, both W and H will be estimated from initial guesses.
Set to False, only W will be estimated.
verbose : integer, default: 0
The verbosity level.
shuffle : boolean, default: False
If true, randomize the order of coordinates in the CD solver.
random_state : integer seed, RandomState instance, or None (default)
Random number generator seed control.
Returns
-------
W : array-like, shape (n_samples, n_components)
Solution to the non-negative least squares problem.
H : array-like, shape (n_components, n_features)
Solution to the non-negative least squares problem.
n_iter : int
The number of iterations done by the algorithm.
References
----------
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
"""
# so W and Ht are both in C order in memory
Ht = check_array(H.T, order='C')
X = check_array(X, accept_sparse='csr')
# L1 and L2 regularization
l1_H, l2_H, l1_W, l2_W = 0, 0, 0, 0
if regularization in ('both', 'components'):
alpha = float(alpha)
l1_H = l1_ratio * alpha
l2_H = (1. - l1_ratio) * alpha
if regularization in ('both', 'transformation'):
alpha = float(alpha)
l1_W = l1_ratio * alpha
l2_W = (1. - l1_ratio) * alpha
rng = check_random_state(random_state)
for n_iter in range(max_iter):
violation = 0.
# Update W
violation += _update_coordinate_descent(X, W, Ht, l1_W, l2_W,
shuffle, rng)
# Update H
if update_H:
violation += _update_coordinate_descent(X.T, Ht, W, l1_H, l2_H,
shuffle, rng)
if n_iter == 0:
violation_init = violation
if violation_init == 0:
break
if verbose:
print("violation:", violation / violation_init)
if violation / violation_init <= tol:
if verbose:
print("Converged at iteration", n_iter + 1)
break
return W, Ht.T, n_iter
def non_negative_factorization(X, W=None, H=None, n_components=None,
init='random', update_H=True, solver='cd',
tol=1e-4, max_iter=200, alpha=0., l1_ratio=0.,
regularization=None, random_state=None,
verbose=0, shuffle=False, nls_max_iter=2000,
sparseness=None, beta=1, eta=0.1):
"""Compute Non-negative Matrix Factorization (NMF)
Find two non-negative matrices (W, H) whose product approximates the non-
negative matrix X. This factorization can be used for example for
dimensionality reduction, source separation or topic extraction.
The objective function is::
0.5 * ||X - WH||_Fro^2
+ alpha * l1_ratio * ||vec(W)||_1
+ alpha * l1_ratio * ||vec(H)||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
+ 0.5 * alpha * (1 - l1_ratio) * ||H||_Fro^2
Where::
||A||_Fro^2 = \sum_{i,j} A_{ij}^2 (Frobenius norm)
||vec(A)||_1 = \sum_{i,j} abs(A_{ij}) (Elementwise L1 norm)
The objective function is minimized with an alternating minimization of W
and H. If H is given and update_H=False, it solves for W only.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Constant matrix.
W : array-like, shape (n_samples, n_components)
If init='custom', it is used as initial guess for the solution.
H : array-like, shape (n_components, n_features)
If init='custom', it is used as initial guess for the solution.
If update_H=False, it is used as a constant, to solve for W only.
n_components : integer
Number of components, if n_components is not set all features
are kept.
init : None | 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'custom'
Method used to initialize the procedure.
Default: 'nndsvd' if n_components < n_features, otherwise random.
Valid options::
'random': non-negative random matrices, scaled with:
sqrt(X.mean() / n_components)
'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
'custom': use custom matrices W and H
update_H : boolean, default: True
Set to True, both W and H will be estimated from initial guesses.
Set to False, only W will be estimated.
solver : 'pg' | 'cd'
Numerical solver to use:
'pg' is a (deprecated) Projected Gradient solver.
'cd' is a Coordinate Descent solver.
tol : float, default: 1e-4
Tolerance of the stopping condition.
max_iter : integer, default: 200
Maximum number of iterations before timing out.
alpha : double, default: 0.
Constant that multiplies the regularization terms.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an elementwise L2 penalty
(aka Frobenius Norm).
For l1_ratio = 1 it is an elementwise L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
regularization : 'both' | 'components' | 'transformation' | None
Select whether the regularization affects the components (H), the
transformation (W), both or none of them.
random_state : integer seed, RandomState instance, or None (default)
Random number generator seed control.
verbose : integer, default: 0
The verbosity level.
shuffle : boolean, default: False
If true, randomize the order of coordinates in the CD solver.
nls_max_iter : integer, default: 2000
Number of iterations in NLS subproblem.
Used only in the deprecated 'pg' solver.
sparseness : 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
Used only in the deprecated 'pg' solver.
beta : double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness. Used only in the deprecated 'pg' solver.
eta : double, default: 0.1
Degree of correctness to maintain, if sparsity is not None. Smaller
values mean larger error. Used only in the deprecated 'pg' solver.
Returns
-------
W : array-like, shape (n_samples, n_components)
Solution to the non-negative least squares problem.
H : array-like, shape (n_components, n_features)
Solution to the non-negative least squares problem.
n_iter : int
Actual number of iterations.
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
"""
X = check_array(X, accept_sparse=('csr', 'csc'))
check_non_negative(X, "NMF (input X)")
_check_string_param(sparseness, solver)
n_samples, n_features = X.shape
if n_components is None:
n_components = n_features
if not isinstance(n_components, six.integer_types) or n_components <= 0:
raise ValueError("Number of components must be positive;"
" got (n_components=%r)" % n_components)
if not isinstance(max_iter, numbers.Number) or max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % max_iter)
if not isinstance(tol, numbers.Number) or tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % tol)
# check W and H, or initialize them
if init == 'custom':
_check_init(H, (n_components, n_features), "NMF (input H)")
_check_init(W, (n_samples, n_components), "NMF (input W)")
elif not update_H:
_check_init(H, (n_components, n_features), "NMF (input H)")
W = np.zeros((n_samples, n_components))
else:
W, H = _initialize_nmf(X, n_components, init=init,
random_state=random_state)
if solver == 'pg':
warnings.warn("'pg' solver will be removed in release 0.19."
" Use 'cd' solver instead.", DeprecationWarning)
if update_H: # fit_transform
W, H, n_iter = _fit_projected_gradient(X, W, H, tol,
max_iter,
nls_max_iter,
alpha, l1_ratio,
sparseness,
beta, eta)
else: # transform
W, H, n_iter = _update_projected_gradient_w(X, W, H,
tol, nls_max_iter,
alpha, l1_ratio,
sparseness, beta,
eta)
elif solver == 'cd':
W, H, n_iter = _fit_coordinate_descent(X, W, H, tol,
max_iter,
alpha, l1_ratio,
regularization,
update_H=update_H,
verbose=verbose,
shuffle=shuffle,
random_state=random_state)
else:
raise ValueError("Invalid solver parameter '%s'." % solver)
if n_iter == max_iter:
warnings.warn("Maximum number of iteration %d reached. Increase it to"
" improve convergence." % max_iter, ConvergenceWarning)
return W, H, n_iter
class NMF(BaseEstimator, TransformerMixin):
"""Non-Negative Matrix Factorization (NMF)
Find two non-negative matrices (W, H) whose product approximates the non-
negative matrix X. This factorization can be used for example for
dimensionality reduction, source separation or topic extraction.
The objective function is::
0.5 * ||X - WH||_Fro^2
+ alpha * l1_ratio * ||vec(W)||_1
+ alpha * l1_ratio * ||vec(H)||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
+ 0.5 * alpha * (1 - l1_ratio) * ||H||_Fro^2
Where::
||A||_Fro^2 = \sum_{i,j} A_{ij}^2 (Frobenius norm)
||vec(A)||_1 = \sum_{i,j} abs(A_{ij}) (Elementwise L1 norm)
The objective function is minimized with an alternating minimization of W
and H.
Read more in the :ref:`User Guide <NMF>`.
Parameters
----------
n_components : int or None
Number of components, if n_components is not set all features
are kept.
init : 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'custom'
Method used to initialize the procedure.
Default: 'nndsvdar' if n_components < n_features, otherwise random.
Valid options::
'random': non-negative random matrices
'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
'custom': use custom matrices W and H, given in 'fit' method.
solver : 'pg' | 'cd'
Numerical solver to use:
'pg' is a Projected Gradient solver (deprecated).
'cd' is a Coordinate Descent solver (recommended).
.. versionadded:: 0.17
Coordinate Descent solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver.
tol : double, default: 1e-4
Tolerance value used in stopping conditions.
max_iter : integer, default: 200
Number of iterations to compute.
random_state : integer seed, RandomState instance, or None (default)
Random number generator seed control.
alpha : double, default: 0.
Constant that multiplies the regularization terms. Set it to zero to
have no regularization.
.. versionadded:: 0.17
*alpha* used in the Coordinate Descent solver.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an elementwise L2 penalty
(aka Frobenius Norm).
For l1_ratio = 1 it is an elementwise L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
.. versionadded:: 0.17
Regularization parameter *l1_ratio* used in the Coordinate Descent solver.
shuffle : boolean, default: False
If true, randomize the order of coordinates in the CD solver.
.. versionadded:: 0.17
*shuffle* parameter used in the Coordinate Descent solver.
nls_max_iter : integer, default: 2000
Number of iterations in NLS subproblem.
Used only in the deprecated 'pg' solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver. Use Coordinate Descent solver
instead.
sparseness : 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
Used only in the deprecated 'pg' solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver. Use Coordinate Descent solver
instead.
beta : double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness. Used only in the deprecated 'pg' solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver. Use Coordinate Descent solver
instead.
eta : double, default: 0.1
Degree of correctness to maintain, if sparsity is not None. Smaller
values mean larger error. Used only in the deprecated 'pg' solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver. Use Coordinate Descent solver
instead.
Attributes
----------
components_ : array, [n_components, n_features]
Non-negative components of the data.
reconstruction_err_ : number
Frobenius norm of the matrix difference between
the training data and the reconstructed data from
the fit produced by the model. ``|| X - WH ||_2``
n_iter_ : int
Actual number of iterations.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import NMF
>>> model = NMF(n_components=2, init='random', random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
NMF(alpha=0.0, beta=1, eta=0.1, init='random', l1_ratio=0.0, max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0, shuffle=False,
solver='cd', sparseness=None, tol=0.0001, verbose=0)
>>> model.components_
array([[ 2.09783018, 0.30560234],
[ 2.13443044, 2.13171694]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.00115993...
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
"""
def __init__(self, n_components=None, init=None, solver='cd',
tol=1e-4, max_iter=200, random_state=None,
alpha=0., l1_ratio=0., verbose=0, shuffle=False,
nls_max_iter=2000, sparseness=None, beta=1, eta=0.1):
self.n_components = n_components
self.init = init
self.solver = solver
self.tol = tol
self.max_iter = max_iter
self.random_state = random_state
self.alpha = alpha
self.l1_ratio = l1_ratio
self.verbose = verbose
self.shuffle = shuffle
if sparseness is not None:
warnings.warn("Controlling regularization through the sparseness,"
" beta and eta arguments is only available"
" for 'pg' solver, which will be removed"
" in release 0.19. Use another solver with L1 or L2"
" regularization instead.", DeprecationWarning)
self.nls_max_iter = nls_max_iter
self.sparseness = sparseness
self.beta = beta
self.eta = eta
def fit_transform(self, X, y=None, W=None, H=None):
"""Learn a NMF model for the data X and returns the transformed data.
This is more efficient than calling fit followed by transform.
Parameters
----------
X: {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be decomposed
W : array-like, shape (n_samples, n_components)
If init='custom', it is used as initial guess for the solution.
H : array-like, shape (n_components, n_features)
If init='custom', it is used as initial guess for the solution.
Attributes
----------
components_ : array-like, shape (n_components, n_features)
Factorization matrix, sometimes called 'dictionary'.
n_iter_ : int
Actual number of iterations for the transform.
Returns
-------
W: array, shape (n_samples, n_components)
Transformed data.
"""
X = check_array(X, accept_sparse=('csr', 'csc'))
W, H, n_iter_ = non_negative_factorization(
X=X, W=W, H=H, n_components=self.n_components,
init=self.init, update_H=True, solver=self.solver,
tol=self.tol, max_iter=self.max_iter, alpha=self.alpha,
l1_ratio=self.l1_ratio, regularization='both',
random_state=self.random_state, verbose=self.verbose,
shuffle=self.shuffle,
nls_max_iter=self.nls_max_iter, sparseness=self.sparseness,
beta=self.beta, eta=self.eta)
if self.solver == 'pg':
self.comp_sparseness_ = _sparseness(H.ravel())
self.data_sparseness_ = _sparseness(W.ravel())
self.reconstruction_err_ = _safe_compute_error(X, W, H)
self.n_components_ = H.shape[0]
self.components_ = H
self.n_iter_ = n_iter_
return W
def fit(self, X, y=None, **params):
"""Learn a NMF model for the data X.
Parameters
----------
X: {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be decomposed
Attributes
----------
components_ : array-like, shape (n_components, n_features)
Factorization matrix, sometimes called 'dictionary'.
n_iter_ : int
Actual number of iterations for the transform.
Returns
-------
self
"""
self.fit_transform(X, **params)
return self
def transform(self, X):
"""Transform the data X according to the fitted NMF model
Parameters
----------
X: {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be transformed by the model
Attributes
----------
n_iter_ : int
Actual number of iterations for the transform.
Returns
-------
W: array, shape (n_samples, n_components)
Transformed data
"""
check_is_fitted(self, 'n_components_')
W, _, n_iter_ = non_negative_factorization(
X=X, W=None, H=self.components_, n_components=self.n_components_,
init=self.init, update_H=False, solver=self.solver,
tol=self.tol, max_iter=self.max_iter, alpha=self.alpha,
l1_ratio=self.l1_ratio, regularization='both',
random_state=self.random_state, verbose=self.verbose,
shuffle=self.shuffle,
nls_max_iter=self.nls_max_iter, sparseness=self.sparseness,
beta=self.beta, eta=self.eta)
self.n_iter_ = n_iter_
return W
@deprecated("It will be removed in release 0.19. Use NMF instead."
"'pg' solver is still available until release 0.19.")
class ProjectedGradientNMF(NMF):
def __init__(self, n_components=None, solver='pg', init=None,
tol=1e-4, max_iter=200, random_state=None,
alpha=0., l1_ratio=0., verbose=0,
nls_max_iter=2000, sparseness=None, beta=1, eta=0.1):
super(ProjectedGradientNMF, self).__init__(
n_components=n_components, init=init, solver='pg', tol=tol,
max_iter=max_iter, random_state=random_state, alpha=alpha,
l1_ratio=l1_ratio, verbose=verbose, nls_max_iter=nls_max_iter,
sparseness=sparseness, beta=beta, eta=eta)
|
silvio-giuliani/steam-userstatistics
|
refs/heads/master
|
run.py
|
1
|
from api.controllers import routes
app = routes.app
if __name__ == "__main__":
app.run()
|
wasade/networkx
|
refs/heads/master
|
networkx/algorithms/approximation/matching.py
|
85
|
# -*- coding: utf-8 -*-
"""
**************
Graph Matching
**************
Given a graph G = (V,E), a matching M in G is a set of pairwise non-adjacent
edges; that is, no two edges share a common vertex.
http://en.wikipedia.org/wiki/Matching_(graph_theory)
"""
# Copyright (C) 2011-2012 by
# Nicholas Mancuso <nick.mancuso@gmail.com>
# All rights reserved.
# BSD license.
import networkx as nx
__all__ = ["min_maximal_matching"]
__author__ = """Nicholas Mancuso (nick.mancuso@gmail.com)"""
def min_maximal_matching(G):
r"""Returns the minimum maximal matching of G. That is, out of all maximal
matchings of the graph G, the smallest is returned.
Parameters
----------
G : NetworkX graph
Undirected graph
Returns
-------
min_maximal_matching : set
Returns a set of edges such that no two edges share a common endpoint
and every edge not in the set shares some common endpoint in the set.
Cardinality will be 2*OPT in the worst case.
Notes
-----
The algorithm computes an approximate solution fo the minimum maximal
cardinality matching problem. The solution is no more than 2 * OPT in size.
Runtime is `O(|E|)`.
References
----------
.. [1] Vazirani, Vijay Approximation Algorithms (2001)
"""
return nx.maximal_matching(G)
|
yprez/python-social-auth
|
refs/heads/master
|
social/backends/flickr.py
|
76
|
"""
Flickr OAuth1 backend, docs at:
http://psa.matiasaguirre.net/docs/backends/flickr.html
"""
from social.backends.oauth import BaseOAuth1
class FlickrOAuth(BaseOAuth1):
"""Flickr OAuth authentication backend"""
name = 'flickr'
AUTHORIZATION_URL = 'https://www.flickr.com/services/oauth/authorize'
REQUEST_TOKEN_URL = 'https://www.flickr.com/services/oauth/request_token'
ACCESS_TOKEN_URL = 'https://www.flickr.com/services/oauth/access_token'
EXTRA_DATA = [
('id', 'id'),
('username', 'username'),
('expires', 'expires')
]
def get_user_details(self, response):
"""Return user details from Flickr account"""
fullname, first_name, last_name = self.get_user_names(
response.get('fullname')
)
return {'username': response.get('username') or response.get('id'),
'email': '',
'fullname': fullname,
'first_name': first_name,
'last_name': last_name}
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
return {
'id': access_token['user_nsid'],
'username': access_token['username'],
'fullname': access_token.get('fullname', ''),
}
def auth_extra_arguments(self):
params = super(FlickrOAuth, self).auth_extra_arguments() or {}
if 'perms' not in params:
params['perms'] = 'read'
return params
|
aptivate/ckanext-mapactionimporter
|
refs/heads/staging
|
ckanext/mapactionimporter/commands.py
|
1
|
import ckan.plugins.toolkit as toolkit
import paste.script
from ckanext.mapactionimporter.plugin import (
create_product_themes
)
class MapactionImporterCommand(toolkit.CkanCommand):
"""
ckanext-mapactionimporter management commands
Usage::
paster mapactionimporter create_product_themes
"""
summary = __doc__.split('\n')[0]
usage = __doc__
parser = paste.script.command.Command.standard_parser(verbose=True)
parser.add_option('-c', '--config', dest='config',
default='development.ini',
help='Config file to use.')
def command(self):
cmd = None
if self.args:
cmd = self.args[0]
self._load_config()
if cmd == 'create_product_themes':
create_product_themes()
else:
print self.__doc__
|
ingve/IncludeOS
|
refs/heads/master
|
test/hw/integration/serial/test.py
|
1
|
#!/usr/bin/python
import sys
import os
includeos_src = os.environ.get('INCLUDEOS_SRC',
os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))).split('/test')[0])
sys.path.insert(0,includeos_src)
from subprocess import call
from vmrunner import vmrunner
# Get an auto-created VM from the vmrunner
vm = vmrunner.vms[0]
def test_serial_port():
print "<test.py> Test triggered"
global vm
vm.writeline("Here is a test\n")
vm.on_output("trigger_test_serial_port", test_serial_port)
# Boot the VM
vm.make().boot(80)
|
HousekeepLtd/django
|
refs/heads/master
|
tests/middleware_exceptions/urls.py
|
390
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^middleware_exceptions/view/$', views.normal_view),
url(r'^middleware_exceptions/not_found/$', views.not_found),
url(r'^middleware_exceptions/error/$', views.server_error),
url(r'^middleware_exceptions/null_view/$', views.null_view),
url(r'^middleware_exceptions/permission_denied/$', views.permission_denied),
url(r'^middleware_exceptions/exception_in_render/$', views.exception_in_render),
url(r'^middleware_exceptions/template_response/$', views.template_response),
url(r'^middleware_exceptions/template_response_error/$', views.template_response_error),
]
|
bdh1011/wau
|
refs/heads/master
|
venv/lib/python2.7/site-packages/ipykernel/embed.py
|
13
|
"""Simple function for embedding an IPython kernel
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import sys
from IPython.utils.frame import extract_module_locals
from .kernelapp import IPKernelApp
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
def embed_kernel(module=None, local_ns=None, **kwargs):
"""Embed and start an IPython kernel in a given scope.
Parameters
----------
module : ModuleType, optional
The module to load into IPython globals (default: caller)
local_ns : dict, optional
The namespace to load into IPython user namespace (default: caller)
kwargs : various, optional
Further keyword args are relayed to the IPKernelApp constructor,
allowing configuration of the Kernel. Will only have an effect
on the first embed_kernel call for a given process.
"""
# get the app if it exists, or set it up if it doesn't
if IPKernelApp.initialized():
app = IPKernelApp.instance()
else:
app = IPKernelApp.instance(**kwargs)
app.initialize([])
# Undo unnecessary sys module mangling from init_sys_modules.
# This would not be necessary if we could prevent it
# in the first place by using a different InteractiveShell
# subclass, as in the regular embed case.
main = app.kernel.shell._orig_sys_modules_main_mod
if main is not None:
sys.modules[app.kernel.shell._orig_sys_modules_main_name] = main
# load the calling scope if not given
(caller_module, caller_locals) = extract_module_locals(1)
if module is None:
module = caller_module
if local_ns is None:
local_ns = caller_locals
app.kernel.user_module = module
app.kernel.user_ns = local_ns
app.shell.set_completer_frame()
app.start()
|
Leo-g/Flask-Skeleton
|
refs/heads/master
|
app/__init__.py
|
3
|
from flask import Flask
#http://flask.pocoo.org/docs/0.10/patterns/appfactories/
def create_app(config_filename):
app = Flask(__name__)
app.config.from_object(config_filename)
from app.users.models import db
db.init_app(app)
#Blueprints
from app.users.views import users
app.register_blueprint(users, url_prefix='/users')
return app
|
swamireddy/python-cinderclient
|
refs/heads/master
|
cinderclient/tests/v2/test_quota_classes.py
|
6
|
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinderclient.tests import utils
from cinderclient.tests.v2 import fakes
cs = fakes.FakeClient()
class QuotaClassSetsTest(utils.TestCase):
def test_class_quotas_get(self):
class_name = 'test'
cs.quota_classes.get(class_name)
cs.assert_called('GET', '/os-quota-class-sets/%s' % class_name)
def test_update_quota(self):
q = cs.quota_classes.get('test')
q.update(volumes=2, snapshots=2)
cs.assert_called('PUT', '/os-quota-class-sets/test')
def test_refresh_quota(self):
q = cs.quota_classes.get('test')
q2 = cs.quota_classes.get('test')
self.assertEqual(q.volumes, q2.volumes)
q2.volumes = 0
self.assertNotEqual(q.volumes, q2.volumes)
q2.get()
self.assertEqual(q.volumes, q2.volumes)
|
infraredbg/Lenovo_A820_kernel_kk
|
refs/heads/upstream
|
bionic/libc/kernel/tools/update_all.py
|
5
|
#!/usr/bin/env python
#
import sys, cpp, kernel, glob, os, re, getopt, clean_header
from defaults import *
from utils import *
def usage():
print """\
usage: %(progname)s [kernel-original-path]
this program is used to update all the auto-generated clean headers
used by the Bionic C library. it assumes the following:
- a set of source kernel headers is located in '../original',
relative to the program's directory
- the clean headers will be placed in '../arch-<arch>/asm',
'../common/linux', '../common/asm-generic', etc..
""" % { "progname" : os.path.basename(sys.argv[0]) }
sys.exit(0)
try:
optlist, args = getopt.getopt( sys.argv[1:], '' )
except:
# unrecognized option
sys.stderr.write( "error: unrecognized option\n" )
usage()
if len(optlist) > 0 or len(args) > 1:
usage()
progdir = find_program_dir()
if len(args) == 1:
original_dir = args[0]
if not os.path.isdir(original_dir):
panic( "Not a directory: %s\n" % original_dir )
else:
original_dir = kernel_original_path
if not os.path.isdir(original_dir):
panic( "Missing directory, please specify one through command-line: %s\n" % original_dir )
# find all source files in 'original'
#
sources = []
for root, dirs, files in os.walk( original_dir ):
for file in files:
base, ext = os.path.splitext(file)
if ext == ".h":
sources.append( "%s/%s" % (root,file) )
b = BatchFileUpdater()
for arch in kernel_archs:
b.readDir( os.path.normpath( progdir + "/../arch-%s" % arch ) )
b.readDir( os.path.normpath( progdir + "/../common" ) )
#print "OLD " + repr(b.old_files)
oldlen = 120
for path in sources:
dst_path, newdata = clean_header.cleanupFile(path, original_dir)
if not dst_path:
continue
b.readFile( dst_path )
r = b.editFile( dst_path, newdata )
if r == 0:
state = "unchanged"
elif r == 1:
state = "edited"
else:
state = "added"
str = "cleaning: %-*s -> %-*s (%s)" % ( 35, "<original>" + path[len(original_dir):], 35, dst_path, state )
if sys.stdout.isatty():
print "%-*s" % (oldlen,str),
if (r == 0):
print "\r",
else:
print "\n",
oldlen = 0
else:
print str
oldlen = len(str)
print "%-*s" % (oldlen,"Done!")
b.updateGitFiles()
sys.exit(0)
|
omouse/staykat
|
refs/heads/master
|
app/dateplanner/tests.py
|
24123
|
from django.test import TestCase
# Create your tests here.
|
lduarte1991/edx-platform
|
refs/heads/master
|
common/test/acceptance/pages/lms/peer_grade.py
|
40
|
"""
Students grade peer submissions.
"""
from bok_choy.page_object import PageObject
from bok_choy.promise import Promise
class PeerGradePage(PageObject):
"""
Students grade peer submissions.
"""
url = None
def is_browser_on_page(self):
def _is_correct_page():
is_present = (
self.q(css='div.peer-grading-tools').present or
self.q(css='div.grading-panel.current-state').present
)
return is_present, is_present
return Promise(_is_correct_page, 'On the peer grading page.').fulfill()
@property
def problem_list(self):
"""
Return the list of available problems to peer grade.
"""
return self.q(css='a.problem-button').text
def select_problem(self, problem_name):
"""
Choose the problem with `problem_name` to start grading or calibrating.
"""
index = self.problem_list.index(problem_name) + 1
self.q(css='a.problem-button:nth-of-type({})'.format(index)).first.click()
|
CTSRD-SOAAP/chromium-42.0.2311.135
|
refs/heads/master
|
tools/gyp/test/mac/gyptest-bundle-resources.py
|
193
|
#!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies things related to bundle resources.
"""
import TestGyp
import os
import stat
import sys
def check_attribs(path, expected_exec_bit):
out_path = test.built_file_path(
os.path.join('resource.app/Contents/Resources', path), chdir=CHDIR)
in_stat = os.stat(os.path.join(CHDIR, path))
out_stat = os.stat(out_path)
if in_stat.st_mtime == out_stat.st_mtime:
test.fail_test()
if out_stat.st_mode & stat.S_IXUSR != expected_exec_bit:
test.fail_test()
if sys.platform == 'darwin':
# set |match| to ignore build stderr output.
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
CHDIR = 'bundle-resources'
test.run_gyp('test.gyp', chdir=CHDIR)
test.build('test.gyp', test.ALL, chdir=CHDIR)
test.built_file_must_match('resource.app/Contents/Resources/secret.txt',
'abc\n', chdir=CHDIR)
test.built_file_must_match('source_rule.app/Contents/Resources/secret.txt',
'ABC\n', chdir=CHDIR)
test.built_file_must_match(
'resource.app/Contents/Resources/executable-file.sh',
'#!/bin/bash\n'
'\n'
'echo echo echo echo cho ho o o\n', chdir=CHDIR)
check_attribs('executable-file.sh', expected_exec_bit=stat.S_IXUSR)
check_attribs('secret.txt', expected_exec_bit=0)
# TODO(thakis): This currently fails with make.
if test.format != 'make':
test.built_file_must_match(
'resource_rule.app/Contents/Resources/secret.txt', 'ABC\n', chdir=CHDIR)
test.pass_test()
|
rrpg/engine
|
refs/heads/master
|
core/commands/attack.py
|
1
|
# -*- coding: utf-8 -*-
import core.command
from core.fight import fight
from core.localisation import _
class attack(core.command.command):
"""
Fight command
"""
def run(self):
"""
Attack someone in the same area
"""
f = fight.getFight()
if f is None:
raise core.command.exception(_('ERROR_FIGHT_NOT_FIGHTING'))
enemy = f.getEnemy()
attackResult = f.attack()
return {
'enemy': enemy,
'attackResult': attackResult
}
def render(self, data):
attackConfirm = _('ATTACK_CONFIRM_PLAYER_TO_ENEMY_{enemy}_{damages}')
attackConfirmEnemy = _('ATTACK_CONFIRM_ENEMY_TO_PLAYER_{enemy}_{damages}')
attackVictory = _('ATTACK_VICTORY_{enemy}')
attackLost = _('ATTACK_LOST_{enemy}')
dataFormat = {
'enemy': data['enemy']['name'],
'damages': data['attackResult']['damagesToEnemy']
}
output = [attackConfirm.format(**dataFormat)]
if data['attackResult']['damagesToPlayer'] is not None:
dataFormat = {
'enemy': data['enemy']['name'],
'damages': data['attackResult']['damagesToPlayer']
}
output.append(attackConfirmEnemy.format(**dataFormat))
if data['attackResult']['fightFinished']:
dataFormat = {
'enemy': data['enemy']['name']
}
if data['attackResult']['winner'] == self._player:
output.append(attackVictory.format(**dataFormat))
else:
output.append(attackLost.format(**dataFormat))
return '\n'.join(output)
|
v1bri/gnuradio
|
refs/heads/master
|
gr-blocks/python/blocks/qa_endian_swap.py
|
47
|
#!/usr/bin/env python
#
# Copyright 2011-2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, blocks
import ctypes
class test_endian_swap(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_001(self):
src_data = [1,2,3,4]
expected_result = [256, 512, 768, 1024];
src = blocks.vector_source_s(src_data)
op = blocks.endian_swap(2)
dst = blocks.vector_sink_s()
self.tb.connect(src, op, dst)
self.tb.run()
result_data = list(dst.data())
self.assertEqual(expected_result, result_data)
def test_002(self):
src_data = [1,2,3,4]
expected_result = [16777216, 33554432, 50331648, 67108864];
src = blocks.vector_source_i(src_data)
op = blocks.endian_swap(4)
dst = blocks.vector_sink_i()
self.tb.connect(src, op, dst)
self.tb.run()
result_data = list(dst.data())
self.assertEqual(expected_result, result_data)
if __name__ == '__main__':
gr_unittest.run(test_endian_swap, "test_endian_swap.xml")
|
DavidResin/aps-aalto
|
refs/heads/master
|
stitch/cv/lib/python3.4/site-packages/pip/_vendor/html5lib/_trie/py.py
|
1323
|
from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
from bisect import bisect_left
from ._base import Trie as ABCTrie
class Trie(ABCTrie):
def __init__(self, data):
if not all(isinstance(x, text_type) for x in data.keys()):
raise TypeError("All keys must be strings")
self._data = data
self._keys = sorted(data.keys())
self._cachestr = ""
self._cachepoints = (0, len(data))
def __contains__(self, key):
return key in self._data
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(self._data)
def __getitem__(self, key):
return self._data[key]
def keys(self, prefix=None):
if prefix is None or prefix == "" or not self._keys:
return set(self._keys)
if prefix.startswith(self._cachestr):
lo, hi = self._cachepoints
start = i = bisect_left(self._keys, prefix, lo, hi)
else:
start = i = bisect_left(self._keys, prefix)
keys = set()
if start == len(self._keys):
return keys
while self._keys[i].startswith(prefix):
keys.add(self._keys[i])
i += 1
self._cachestr = prefix
self._cachepoints = (start, i)
return keys
def has_keys_with_prefix(self, prefix):
if prefix in self._data:
return True
if prefix.startswith(self._cachestr):
lo, hi = self._cachepoints
i = bisect_left(self._keys, prefix, lo, hi)
else:
i = bisect_left(self._keys, prefix)
if i == len(self._keys):
return False
return self._keys[i].startswith(prefix)
|
mdakin/engine
|
refs/heads/master
|
third_party/mesa/redirectoutput.py
|
167
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import os.path
import subprocess
import sys
if len(sys.argv) < 3:
print "Usage: %s OUTPUTFILE SCRIPTNAME ARGUMENTS" % sys.argv[0]
print "Re-execs the python interpreter against SCRIPTNAME with ARGS,"
print "redirecting output to OUTPUTFILE."
sys.exit(1)
abs_outputfile = os.path.abspath(sys.argv[1])
abs_outputdir = os.path.dirname(abs_outputfile)
if not os.path.isdir(abs_outputdir):
os.makedirs(abs_outputdir)
ret = 0
with open(abs_outputfile, "w") as f:
ret = subprocess.Popen([sys.executable] + sys.argv[2:], stdout=f).wait()
if ret:
os.remove(abs_outputfile)
sys.exit(ret)
|
badjr/pysal
|
refs/heads/master
|
pysal/core/IOHandlers/geoda_txt.py
|
14
|
import pysal.core.Tables as Tables
__author__ = "Charles R Schmidt <schmidtc@gmail.com>"
__all__ = ['GeoDaTxtReader']
class GeoDaTxtReader(Tables.DataTable):
"""GeoDa Text File Export Format
"""
__doc__ = Tables.DataTable.__doc__
FORMATS = ['geoda_txt']
MODES = ['r']
def __init__(self, *args, **kwargs):
"""
Examples
--------
>>> import pysal
>>> f = pysal.open(pysal.examples.get_path('stl_hom.txt'),'r')
>>> f.header
['FIPSNO', 'HR8488', 'HR8893', 'HC8488']
>>> len(f)
78
>>> f.dat[0]
['17107', '1.290722', '1.624458', '2']
>>> f.dat[-1]
['29223', '0', '8.451537', '0']
>>> f._spec
[<type 'int'>, <type 'float'>, <type 'float'>, <type 'int'>]
"""
Tables.DataTable.__init__(self, *args, **kwargs)
self.__idx = {}
self.__len = None
self.pos = 0
self._open()
def _open(self):
if self.mode == 'r':
self.fileObj = open(self.dataPath, 'r')
n, k = self.fileObj.readline().strip().split(',')
n, k = int(n), int(k)
header = self.fileObj.readline().strip().split(',')
self.header = [f.replace('"', '') for f in header]
try:
assert len(self.header) == k
except AssertionError:
raise TypeError("This is not a valid geoda_txt file.")
dat = self.fileObj.readlines()
self.dat = [line.strip().split(',') for line in dat]
self._spec = self._determineSpec(self.dat)
self.__len = len(dat)
def __len__(self):
return self.__len
def _read(self):
if self.pos < len(self):
row = self.dat[self.pos]
self.pos += 1
return row
else:
raise None
def close(self):
self.fileObj.close()
Tables.DataTable.close(self)
@staticmethod
def _determineSpec(data):
cols = len(data[0])
spec = []
for j in range(cols):
isInt = True
isFloat = True
for row in data:
val = row[j]
if not val.strip().replace('-', '').replace('.', '').isdigit():
isInt = False
isFloat = False
break
else:
if isInt and '.' in val:
isInt = False
if isInt:
spec.append(int)
elif isFloat:
spec.append(float)
else:
spec.append(str)
return spec
|
fitermay/intellij-community
|
refs/heads/master
|
python/testData/psi/Nonlocal.py
|
83
|
nonlocal a, b
|
william-os4y/fapws3
|
refs/heads/master
|
fapws/__init__.py
|
12133432
| |
stochasticHydroTools/RigidMultiblobsWall
|
refs/heads/master
|
read_input/__init__.py
|
12133432
| |
DvA-leopold/CrAB
|
refs/heads/dev
|
crypto/__init__.py
|
12133432
| |
initNirvana/Easyphotos
|
refs/heads/master
|
env/lib/python3.4/site-packages/PIL/ImageFilter.py
|
20
|
#
# The Python Imaging Library.
# $Id$
#
# standard filters
#
# History:
# 1995-11-27 fl Created
# 2002-06-08 fl Added rank and mode filters
# 2003-09-15 fl Fixed rank calculation in rank filter; added expand call
#
# Copyright (c) 1997-2003 by Secret Labs AB.
# Copyright (c) 1995-2002 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
from functools import reduce
class Filter(object):
pass
class Kernel(Filter):
"""
Create a convolution kernel. The current version only
supports 3x3 and 5x5 integer and floating point kernels.
In the current version, kernels can only be applied to
"L" and "RGB" images.
:param size: Kernel size, given as (width, height). In the current
version, this must be (3,3) or (5,5).
:param kernel: A sequence containing kernel weights.
:param scale: Scale factor. If given, the result for each pixel is
divided by this value. the default is the sum of the
kernel weights.
:param offset: Offset. If given, this value is added to the result,
after it has been divided by the scale factor.
"""
def __init__(self, size, kernel, scale=None, offset=0):
if scale is None:
# default scale is sum of kernel
scale = reduce(lambda a, b: a+b, kernel)
if size[0] * size[1] != len(kernel):
raise ValueError("not enough coefficients in kernel")
self.filterargs = size, scale, offset, kernel
def filter(self, image):
if image.mode == "P":
raise ValueError("cannot filter palette images")
return image.filter(*self.filterargs)
class BuiltinFilter(Kernel):
def __init__(self):
pass
class RankFilter(Filter):
"""
Create a rank filter. The rank filter sorts all pixels in
a window of the given size, and returns the **rank**'th value.
:param size: The kernel size, in pixels.
:param rank: What pixel value to pick. Use 0 for a min filter,
``size * size / 2`` for a median filter, ``size * size - 1``
for a max filter, etc.
"""
name = "Rank"
def __init__(self, size, rank):
self.size = size
self.rank = rank
def filter(self, image):
if image.mode == "P":
raise ValueError("cannot filter palette images")
image = image.expand(self.size//2, self.size//2)
return image.rankfilter(self.size, self.rank)
class MedianFilter(RankFilter):
"""
Create a median filter. Picks the median pixel value in a window with the
given size.
:param size: The kernel size, in pixels.
"""
name = "Median"
def __init__(self, size=3):
self.size = size
self.rank = size*size//2
class MinFilter(RankFilter):
"""
Create a min filter. Picks the lowest pixel value in a window with the
given size.
:param size: The kernel size, in pixels.
"""
name = "Min"
def __init__(self, size=3):
self.size = size
self.rank = 0
class MaxFilter(RankFilter):
"""
Create a max filter. Picks the largest pixel value in a window with the
given size.
:param size: The kernel size, in pixels.
"""
name = "Max"
def __init__(self, size=3):
self.size = size
self.rank = size*size-1
class ModeFilter(Filter):
"""
Create a mode filter. Picks the most frequent pixel value in a box with the
given size. Pixel values that occur only once or twice are ignored; if no
pixel value occurs more than twice, the original pixel value is preserved.
:param size: The kernel size, in pixels.
"""
name = "Mode"
def __init__(self, size=3):
self.size = size
def filter(self, image):
return image.modefilter(self.size)
class GaussianBlur(Filter):
"""Gaussian blur filter.
:param radius: Blur radius.
"""
name = "GaussianBlur"
def __init__(self, radius=2):
self.radius = radius
def filter(self, image):
return image.gaussian_blur(self.radius)
class UnsharpMask(Filter):
"""Unsharp mask filter.
See Wikipedia's entry on `digital unsharp masking`_ for an explanation of
the parameters.
:param radius: Blur Radius
:param percent: Unsharp strength, in percent
:param threshold: Threshold controls the minimum brightness change that
will be sharpened
.. _digital unsharp masking: https://en.wikipedia.org/wiki/Unsharp_masking#Digital_unsharp_masking
"""
name = "UnsharpMask"
def __init__(self, radius=2, percent=150, threshold=3):
self.radius = radius
self.percent = percent
self.threshold = threshold
def filter(self, image):
return image.unsharp_mask(self.radius, self.percent, self.threshold)
class BLUR(BuiltinFilter):
name = "Blur"
filterargs = (5, 5), 16, 0, (
1, 1, 1, 1, 1,
1, 0, 0, 0, 1,
1, 0, 0, 0, 1,
1, 0, 0, 0, 1,
1, 1, 1, 1, 1
)
class CONTOUR(BuiltinFilter):
name = "Contour"
filterargs = (3, 3), 1, 255, (
-1, -1, -1,
-1, 8, -1,
-1, -1, -1
)
class DETAIL(BuiltinFilter):
name = "Detail"
filterargs = (3, 3), 6, 0, (
0, -1, 0,
-1, 10, -1,
0, -1, 0
)
class EDGE_ENHANCE(BuiltinFilter):
name = "Edge-enhance"
filterargs = (3, 3), 2, 0, (
-1, -1, -1,
-1, 10, -1,
-1, -1, -1
)
class EDGE_ENHANCE_MORE(BuiltinFilter):
name = "Edge-enhance More"
filterargs = (3, 3), 1, 0, (
-1, -1, -1,
-1, 9, -1,
-1, -1, -1
)
class EMBOSS(BuiltinFilter):
name = "Emboss"
filterargs = (3, 3), 1, 128, (
-1, 0, 0,
0, 1, 0,
0, 0, 0
)
class FIND_EDGES(BuiltinFilter):
name = "Find Edges"
filterargs = (3, 3), 1, 0, (
-1, -1, -1,
-1, 8, -1,
-1, -1, -1
)
class SMOOTH(BuiltinFilter):
name = "Smooth"
filterargs = (3, 3), 13, 0, (
1, 1, 1,
1, 5, 1,
1, 1, 1
)
class SMOOTH_MORE(BuiltinFilter):
name = "Smooth More"
filterargs = (5, 5), 100, 0, (
1, 1, 1, 1, 1,
1, 5, 5, 5, 1,
1, 5, 44, 5, 1,
1, 5, 5, 5, 1,
1, 1, 1, 1, 1
)
class SHARPEN(BuiltinFilter):
name = "Sharpen"
filterargs = (3, 3), 16, 0, (
-2, -2, -2,
-2, 32, -2,
-2, -2, -2
)
|
timabbott/zulip
|
refs/heads/master
|
zerver/migrations/0206_stream_rendered_description.py
|
7
|
from django.db import migrations, models
from django.db.backends.postgresql.schema import DatabaseSchemaEditor
from django.db.migrations.state import StateApps
from zerver.lib.actions import render_stream_description
def render_all_stream_descriptions(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
Stream = apps.get_model('zerver', 'Stream')
all_streams = Stream.objects.exclude(description='')
for stream in all_streams:
stream.rendered_description = render_stream_description(stream.description)
stream.save(update_fields=["rendered_description"])
class Migration(migrations.Migration):
dependencies = [
('zerver', '0205_remove_realmauditlog_requires_billing_update'),
]
operations = [
migrations.AddField(
model_name='stream',
name='rendered_description',
field=models.TextField(default=''),
),
migrations.RunPython(render_all_stream_descriptions,
reverse_code=migrations.RunPython.noop,
elidable=True),
]
|
mKeRix/home-assistant
|
refs/heads/dev
|
homeassistant/components/input_number/__init__.py
|
12
|
"""Support to set a numeric value from a slider or text box."""
import logging
import typing
import voluptuous as vol
from homeassistant.const import (
ATTR_EDITABLE,
ATTR_MODE,
ATTR_UNIT_OF_MEASUREMENT,
CONF_ICON,
CONF_ID,
CONF_MODE,
CONF_NAME,
SERVICE_RELOAD,
)
from homeassistant.core import callback
from homeassistant.helpers import collection
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.restore_state import RestoreEntity
import homeassistant.helpers.service
from homeassistant.helpers.storage import Store
from homeassistant.helpers.typing import ConfigType, HomeAssistantType, ServiceCallType
_LOGGER = logging.getLogger(__name__)
DOMAIN = "input_number"
CONF_INITIAL = "initial"
CONF_MIN = "min"
CONF_MAX = "max"
CONF_STEP = "step"
MODE_SLIDER = "slider"
MODE_BOX = "box"
ATTR_INITIAL = "initial"
ATTR_VALUE = "value"
ATTR_MIN = "min"
ATTR_MAX = "max"
ATTR_STEP = "step"
SERVICE_SET_VALUE = "set_value"
SERVICE_INCREMENT = "increment"
SERVICE_DECREMENT = "decrement"
def _cv_input_number(cfg):
"""Configure validation helper for input number (voluptuous)."""
minimum = cfg.get(CONF_MIN)
maximum = cfg.get(CONF_MAX)
if minimum >= maximum:
raise vol.Invalid(
f"Maximum ({minimum}) is not greater than minimum ({maximum})"
)
state = cfg.get(CONF_INITIAL)
if state is not None and (state < minimum or state > maximum):
raise vol.Invalid(f"Initial value {state} not in range {minimum}-{maximum}")
return cfg
CREATE_FIELDS = {
vol.Required(CONF_NAME): vol.All(str, vol.Length(min=1)),
vol.Required(CONF_MIN): vol.Coerce(float),
vol.Required(CONF_MAX): vol.Coerce(float),
vol.Optional(CONF_INITIAL): vol.Coerce(float),
vol.Optional(CONF_STEP, default=1): vol.All(vol.Coerce(float), vol.Range(min=1e-3)),
vol.Optional(CONF_ICON): cv.icon,
vol.Optional(ATTR_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_MODE, default=MODE_SLIDER): vol.In([MODE_BOX, MODE_SLIDER]),
}
UPDATE_FIELDS = {
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_MIN): vol.Coerce(float),
vol.Optional(CONF_MAX): vol.Coerce(float),
vol.Optional(CONF_INITIAL): vol.Coerce(float),
vol.Optional(CONF_STEP): vol.All(vol.Coerce(float), vol.Range(min=1e-3)),
vol.Optional(CONF_ICON): cv.icon,
vol.Optional(ATTR_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_MODE): vol.In([MODE_BOX, MODE_SLIDER]),
}
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: cv.schema_with_slug_keys(
vol.All(
{
vol.Optional(CONF_NAME): cv.string,
vol.Required(CONF_MIN): vol.Coerce(float),
vol.Required(CONF_MAX): vol.Coerce(float),
vol.Optional(CONF_INITIAL): vol.Coerce(float),
vol.Optional(CONF_STEP, default=1): vol.All(
vol.Coerce(float), vol.Range(min=1e-3)
),
vol.Optional(CONF_ICON): cv.icon,
vol.Optional(ATTR_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_MODE, default=MODE_SLIDER): vol.In(
[MODE_BOX, MODE_SLIDER]
),
},
_cv_input_number,
)
)
},
extra=vol.ALLOW_EXTRA,
)
RELOAD_SERVICE_SCHEMA = vol.Schema({})
STORAGE_KEY = DOMAIN
STORAGE_VERSION = 1
async def async_setup(hass: HomeAssistantType, config: ConfigType) -> bool:
"""Set up an input slider."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
id_manager = collection.IDManager()
yaml_collection = collection.YamlCollection(
logging.getLogger(f"{__name__}.yaml_collection"), id_manager
)
collection.attach_entity_component_collection(
component, yaml_collection, InputNumber.from_yaml
)
storage_collection = NumberStorageCollection(
Store(hass, STORAGE_VERSION, STORAGE_KEY),
logging.getLogger(f"{__name__}.storage_collection"),
id_manager,
)
collection.attach_entity_component_collection(
component, storage_collection, InputNumber
)
await yaml_collection.async_load(
[{CONF_ID: id_, **(conf or {})} for id_, conf in config.get(DOMAIN, {}).items()]
)
await storage_collection.async_load()
collection.StorageCollectionWebsocket(
storage_collection, DOMAIN, DOMAIN, CREATE_FIELDS, UPDATE_FIELDS
).async_setup(hass)
collection.attach_entity_registry_cleaner(hass, DOMAIN, DOMAIN, yaml_collection)
collection.attach_entity_registry_cleaner(hass, DOMAIN, DOMAIN, storage_collection)
async def reload_service_handler(service_call: ServiceCallType) -> None:
"""Reload yaml entities."""
conf = await component.async_prepare_reload(skip_reset=True)
if conf is None:
conf = {DOMAIN: {}}
await yaml_collection.async_load(
[{CONF_ID: id_, **conf} for id_, conf in conf.get(DOMAIN, {}).items()]
)
homeassistant.helpers.service.async_register_admin_service(
hass,
DOMAIN,
SERVICE_RELOAD,
reload_service_handler,
schema=RELOAD_SERVICE_SCHEMA,
)
component.async_register_entity_service(
SERVICE_SET_VALUE,
{vol.Required(ATTR_VALUE): vol.Coerce(float)},
"async_set_value",
)
component.async_register_entity_service(SERVICE_INCREMENT, {}, "async_increment")
component.async_register_entity_service(SERVICE_DECREMENT, {}, "async_decrement")
return True
class NumberStorageCollection(collection.StorageCollection):
"""Input storage based collection."""
CREATE_SCHEMA = vol.Schema(vol.All(CREATE_FIELDS, _cv_input_number))
UPDATE_SCHEMA = vol.Schema(UPDATE_FIELDS)
async def _process_create_data(self, data: typing.Dict) -> typing.Dict:
"""Validate the config is valid."""
return self.CREATE_SCHEMA(data)
@callback
def _get_suggested_id(self, info: typing.Dict) -> str:
"""Suggest an ID based on the config."""
return info[CONF_NAME]
async def _update_data(self, data: dict, update_data: typing.Dict) -> typing.Dict:
"""Return a new updated data object."""
update_data = self.UPDATE_SCHEMA(update_data)
return _cv_input_number({**data, **update_data})
class InputNumber(RestoreEntity):
"""Representation of a slider."""
def __init__(self, config: typing.Dict):
"""Initialize an input number."""
self._config = config
self.editable = True
self._current_value = config.get(CONF_INITIAL)
@classmethod
def from_yaml(cls, config: typing.Dict) -> "InputNumber":
"""Return entity instance initialized from yaml storage."""
input_num = cls(config)
input_num.entity_id = f"{DOMAIN}.{config[CONF_ID]}"
input_num.editable = False
return input_num
@property
def should_poll(self):
"""If entity should be polled."""
return False
@property
def _minimum(self) -> float:
"""Return minimum allowed value."""
return self._config[CONF_MIN]
@property
def _maximum(self) -> float:
"""Return maximum allowed value."""
return self._config[CONF_MAX]
@property
def name(self):
"""Return the name of the input slider."""
return self._config.get(CONF_NAME)
@property
def icon(self):
"""Return the icon to be used for this entity."""
return self._config.get(CONF_ICON)
@property
def state(self):
"""Return the state of the component."""
return self._current_value
@property
def _step(self) -> int:
"""Return entity's increment/decrement step."""
return self._config[CONF_STEP]
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._config.get(ATTR_UNIT_OF_MEASUREMENT)
@property
def unique_id(self) -> typing.Optional[str]:
"""Return unique id of the entity."""
return self._config[CONF_ID]
@property
def state_attributes(self):
"""Return the state attributes."""
return {
ATTR_INITIAL: self._config.get(CONF_INITIAL),
ATTR_EDITABLE: self.editable,
ATTR_MIN: self._minimum,
ATTR_MAX: self._maximum,
ATTR_STEP: self._step,
ATTR_MODE: self._config[CONF_MODE],
}
async def async_added_to_hass(self):
"""Run when entity about to be added to hass."""
await super().async_added_to_hass()
if self._current_value is not None:
return
state = await self.async_get_last_state()
value = state and float(state.state)
# Check against None because value can be 0
if value is not None and self._minimum <= value <= self._maximum:
self._current_value = value
else:
self._current_value = self._minimum
async def async_set_value(self, value):
"""Set new value."""
num_value = float(value)
if num_value < self._minimum or num_value > self._maximum:
raise vol.Invalid(
f"Invalid value for {self.entity_id}: {value} (range {self._minimum} - {self._maximum})"
)
self._current_value = num_value
self.async_write_ha_state()
async def async_increment(self):
"""Increment value."""
await self.async_set_value(min(self._current_value + self._step, self._maximum))
async def async_decrement(self):
"""Decrement value."""
await self.async_set_value(max(self._current_value - self._step, self._minimum))
async def async_update_config(self, config: typing.Dict) -> None:
"""Handle when the config is updated."""
self._config = config
# just in case min/max values changed
self._current_value = min(self._current_value, self._maximum)
self._current_value = max(self._current_value, self._minimum)
self.async_write_ha_state()
|
andrew-inglis/cern-atlas-mmOptical
|
refs/heads/master
|
code/run.py
|
1
|
#
# run.py - the base code that is run
#
#
# some hardcoded locations for things
#
import os
from sys import argv
from math import copysign
import matplotlib.pyplot as plt
import pickle
import numpy
# Hard coded directories. Please change these accordingly.
imageJjarName = '/Users/ainglis/Applications/ImageJ-platInd/ij.jar'
imageJjarPath = '/Users/ainglis/Applications/ImageJ-platInd'
imageJscript = '/Users/ainglis/pycharmbase/cern-atlas-mmOptical/code/imageJscript001.txt'
imageJscript_jpg = '/Users/ainglis/pycharmbase/cern-atlas-mmOptical/code/imageJscript002_jpg.txt'
analysisDirectory = argv[1]
inputImageFileName = argv[2]
colorMode = argv[3] #R-red channel, G-green, B-blue
backgroundSmoothingParameter = argv[4]
foregroundSmoothingParameter = argv[5]
startx = argv[6]
starty = argv[7]
width = argv[8]
length = argv[9]
micronsPerPixel = argv[10]
Realpitch = argv[11]
dumpRulerData = argv[12]
adjustToRuler = argv[13] #0 no adjustment, 1 adjustment
adjustmentFile = argv[14] # this is a pickle file
adjustmentSpacing = argv[15] # this is how many microns there are between each adjustment
thicknessOfRuler = argv[16]
distanceOfCamera = argv[17]
imageType = argv[18] #0 for raw format tiff, 1 for JPG (that Fabian sent)
pitch = float(Realpitch)*(1-float(thicknessOfRuler)/float(distanceOfCamera))
scriptToUse = ''
if(int(imageType) == 0):
scriptToUse = imageJscript
elif(int(imageType) == 1):
scriptToUse = imageJscript_jpg
#print analysisDirectory, inputImageFileName, colorMode, plotMode
javaCommand = 'java -jar -Xmx2048m ' + imageJjarName + ' -ijpath ' + imageJjarPath + \
' -batch ' + scriptToUse + ' ' + inputImageFileName + ':' + colorMode + \
':' + backgroundSmoothingParameter + ':' + foregroundSmoothingParameter + \
':' + startx + ':' + starty + \
':' + width + ':' + length
#print javaCommand
os.system(javaCommand)
# now the information is in the file sliver_auto_pp.txt
dataFile = open('sliver_auto_pp.txt','r')
dataRaw = dataFile.readlines()
dataRaw.pop(0)
data = []
for d in dataRaw:
d001 = d.split('\t')
d002 = d001[1].split('\n')
if(int(imageType) == 0):
data.append(-float(d002[0]))
elif(int(imageType) == 1):
data.append(float(d002[0]))
#print data
#plt.plot(data)
#plt.show()
#exit(1)
brightestValues = []
for i in range(1,len(data)-1):
if( ( (data[i] < data[i-1] and data[i] < data[i+1]) or
(data[i] == data[i+1] and data[i] < data[i-1] and data[i+1] < data[i+2]))
and data[i] < 0): # then this is the lowest point
brightestValues.append(i)
print brightestValues
centersOfMass = []
numerators = []
denominators =[]
#need to be capturing the cm of the negative values
#firstEval = data[zeroCross[0]+1]
#start = 0
#if(firstEval != copysign(firstEval,-1.0)):
# start = 1
for i in range(len(brightestValues)-1):
startIndex = brightestValues[i]
endIndex = brightestValues[i+1]
numerator = 0.
denominator = 0.
#find the lowest value
lowestValue = 999999999999
for i in range (startIndex,endIndex):
if(data[i]<lowestValue):
lowestValue = data[i]
for i in range (startIndex,endIndex):
numerator = numerator + i*(data[i] - lowestValue)
denominator = denominator + data[i] - lowestValue
numerators.append(numerator)
denominators.append(denominator)
centersOfMass.append(1.0*numerator/denominator)
#print numerators
#print centersOfMass
if int(dumpRulerData) == 1:
pickle.dump( centersOfMass, open( "AUTO_ruler.p", "wb" ) )
differences = []
#find the pixels per micron
for i in range(0,len(centersOfMass)-1):
differences.append((centersOfMass[i+1] - centersOfMass[i]))
#plt.plot(differences,marker='o', linestyle='-')
arr001 = numpy.array(differences)
mean001 = numpy.mean(arr001)
std001 = numpy.std(arr001)
print 'mean of pixel values between strips',mean001
print 'std of pixel values between strips',std001
plotYvalues = []
stripNumber = []
if int(adjustToRuler) == 1:
#load in the adjustment
adjustmentList = pickle.load( open( adjustmentFile, "rb" ) )
# we scan through the centers of mass and find which adjustment lists they are between
newCentersInMicrons = []
print 'number of centers of masses:',len(centersOfMass)
firstFound = False
firstDistance = 0
for num,i in enumerate(centersOfMass):
for j in range(0,len(adjustmentList)-1):
if i >= adjustmentList[j] and i < adjustmentList[j+1]:
distanceInMicrons = float(adjustmentSpacing)*j + float(adjustmentSpacing)*(i - adjustmentList[j])/(adjustmentList[j+1]-adjustmentList[j])
newCentersInMicrons.append(distanceInMicrons)
if(not firstFound):
firstFound = True
firstDistance = distanceInMicrons
yValue = (distanceInMicrons - (num)*float(pitch) - firstDistance)
stripNumber.append(num*float(Realpitch)/10000.)
plotYvalues.append(yValue)
arr = numpy.array(plotYvalues)
mean = numpy.mean(arr)
std = numpy.std(arr)
print 'mean',mean
print 'std',std
for i,value in enumerate(plotYvalues):
plotYvalues[i] = plotYvalues[i] - mean
else:
print 'number of centers of masses:',len(centersOfMass)
firstFound = False
firstDistance = 0
for i in range(len(centersOfMass)-1):
differencesInMicrons = ((centersOfMass[i+1]-centersOfMass[i])-float(pitch)/float(micronsPerPixel))*float(micronsPerPixel)
distanceInMicrons = (centersOfMass[i])*float(micronsPerPixel)
if(not firstFound):
firstFound = True
firstDistance = distanceInMicrons
yValue = (distanceInMicrons - (i)*float(pitch) -firstDistance)
stripNumber.append(i*float(Realpitch)/10000.)
plotYvalues.append(yValue)
#plotYvalues.append(differencesInMicrons)
plt.plot(stripNumber, plotYvalues, marker='o', linestyle='-')
plt.ylabel('Strip location vs. ideal (microns)')
plt.xlabel('Strip location (cm)')
plt.savefig('plot.png')
#plt.ylabel('Ruler Grade location minus calculated location (microns)')
#plt.xlabel('Ruler grade # in 1/64th inches')
plt.show()
|
acourtney2015/boto
|
refs/heads/develop
|
tests/unit/ec2containerservice/__init__.py
|
12133432
| |
oliverhr/odoo
|
refs/heads/8.0-pos-pademobile-payment
|
addons/hr_gamification/wizard/__init__.py
|
388
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import grant_badge
|
js0701/chromium-crosswalk
|
refs/heads/master
|
build/android/gn/zip.py
|
25
|
#!/usr/bin/env python
#
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Archives a set of files.
"""
import ast
import optparse
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, 'gyp'))
from util import build_utils
def main():
parser = optparse.OptionParser()
build_utils.AddDepfileOption(parser)
parser.add_option('--inputs', help='List of files to archive.')
parser.add_option('--output', help='Path to output archive.')
parser.add_option('--base-dir',
help='If provided, the paths in the archive will be '
'relative to this directory', default='.')
options, _ = parser.parse_args()
inputs = ast.literal_eval(options.inputs)
output = options.output
base_dir = options.base_dir
build_utils.DoZip(inputs, output, base_dir)
if options.depfile:
build_utils.WriteDepfile(
options.depfile,
build_utils.GetPythonDependencies())
if __name__ == '__main__':
sys.exit(main())
|
peterlauri/django
|
refs/heads/master
|
tests/auth_tests/models/custom_user.py
|
36
|
from django.contrib.auth.models import (
AbstractBaseUser, AbstractUser, BaseUserManager, Group, Permission,
PermissionsMixin, UserManager,
)
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
# The custom User uses email as the unique identifier, and requires
# that every user provide a date of birth. This lets us test
# changes in username datatype, and non-text required fields.
class CustomUserManager(BaseUserManager):
def create_user(self, email, date_of_birth, password=None):
"""
Creates and saves a User with the given email and password.
"""
if not email:
raise ValueError('Users must have an email address')
user = self.model(
email=self.normalize_email(email),
date_of_birth=date_of_birth,
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password, date_of_birth):
u = self.create_user(email, password=password, date_of_birth=date_of_birth)
u.is_admin = True
u.save(using=self._db)
return u
@python_2_unicode_compatible
class CustomUser(AbstractBaseUser):
email = models.EmailField(verbose_name='email address', max_length=255, unique=True)
is_active = models.BooleanField(default=True)
is_admin = models.BooleanField(default=False)
date_of_birth = models.DateField()
custom_objects = CustomUserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['date_of_birth']
def get_full_name(self):
return self.email
def get_short_name(self):
return self.email
def __str__(self):
return self.email
# Maybe required?
def get_group_permissions(self, obj=None):
return set()
def get_all_permissions(self, obj=None):
return set()
def has_perm(self, perm, obj=None):
return True
def has_perms(self, perm_list, obj=None):
return True
def has_module_perms(self, app_label):
return True
# Admin required fields
@property
def is_staff(self):
return self.is_admin
class RemoveGroupsAndPermissions(object):
"""
A context manager to temporarily remove the groups and user_permissions M2M
fields from the AbstractUser class, so they don't clash with the
related_name sets.
"""
def __enter__(self):
self._old_au_local_m2m = AbstractUser._meta.local_many_to_many
self._old_pm_local_m2m = PermissionsMixin._meta.local_many_to_many
groups = models.ManyToManyField(Group, blank=True)
groups.contribute_to_class(PermissionsMixin, "groups")
user_permissions = models.ManyToManyField(Permission, blank=True)
user_permissions.contribute_to_class(PermissionsMixin, "user_permissions")
PermissionsMixin._meta.local_many_to_many = [groups, user_permissions]
AbstractUser._meta.local_many_to_many = [groups, user_permissions]
def __exit__(self, exc_type, exc_value, traceback):
AbstractUser._meta.local_many_to_many = self._old_au_local_m2m
PermissionsMixin._meta.local_many_to_many = self._old_pm_local_m2m
class CustomUserWithoutIsActiveField(AbstractBaseUser):
username = models.CharField(max_length=150, unique=True)
email = models.EmailField(unique=True)
objects = UserManager()
USERNAME_FIELD = 'username'
# The extension user is a simple extension of the built-in user class,
# adding a required date_of_birth field. This allows us to check for
# any hard references to the name "User" in forms/handlers etc.
with RemoveGroupsAndPermissions():
class ExtensionUser(AbstractUser):
date_of_birth = models.DateField()
custom_objects = UserManager()
REQUIRED_FIELDS = AbstractUser.REQUIRED_FIELDS + ['date_of_birth']
|
GoogleCloudPlatform/professional-services-data-validator
|
refs/heads/develop
|
samples/bq_result_handler.py
|
1
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from data_validation import data_validation
from data_validation.result_handlers import bigquery as bqhandler
PROJECT_ID = os.environ["PROJECT_ID"]
BQ_CONN = {"source_type": "BigQuery", "project_id": PROJECT_ID}
CONFIG_COUNT_VALID = {
# BigQuery Specific Connection Config
"source_conn": BQ_CONN,
"target_conn": BQ_CONN,
# Validation Type
"type": "Column",
# Configuration Required Depending on Validator Type
"schema_name": "bigquery-public-data.new_york_citibike",
"table_name": "citibike_trips",
"aggregates": [
{
"field_alias": "count",
"source_column": None,
"target_column": None,
"type": "count",
},
{
"field_alias": "count__tripduration",
"source_column": "tripduration",
"target_column": "tripduration",
"type": "count",
},
{
"field_alias": "count__start_station_name",
"source_column": "start_station_name",
"target_column": "start_station_name",
"type": "count",
},
{
"field_alias": "sum__tripduration",
"source_column": "tripduration",
"target_column": "tripduration",
"type": "sum",
},
{
"field_alias": "max__tripduration",
"source_column": "tripduration",
"target_column": "tripduration",
"type": "max",
},
{
"field_alias": "min__tripduration",
"source_column": "tripduration",
"target_column": "tripduration",
"type": "min",
},
{
"field_alias": "avg__tripduration",
"source_column": "tripduration",
"target_column": "tripduration",
"type": "avg",
},
],
}
result_handler = bqhandler.BigQueryResultHandler.get_handler_for_project(PROJECT_ID)
validator = data_validation.DataValidation(
CONFIG_COUNT_VALID, verbose=True, result_handler=result_handler
)
df = validator.execute()
|
grayark/osquery
|
refs/heads/master
|
tools/tests/test_base.py
|
14
|
#!/usr/bin/env python
# Copyright (c) 2014, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pyexpect.replwrap will not work with unicode_literals
#from __future__ import unicode_literals
import copy
import os
import psutil
import random
import re
import shutil
import signal
import subprocess
import sys
import time
import threading
import unittest
import pexpect
try:
from pexpect.replwrap import REPLWrapper
except ImportError as e:
print("Could not import pexpect.replwrap: %s" % (str(e)))
print(" Need pexpect version 3.3, installed version: %s" % (
str(pexpect.__version__)))
print(" pexpect location: %s" % (str(pexpect.__file__)))
exit(1)
try:
import argparse
except ImportError:
print ("Cannot import argparse: pip install argparse?")
exit(1)
try:
from thrift import Thrift
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
except ImportError:
print ("Cannot import thrift: pip install thrift?")
exit(1)
'''Defaults that should be used in integration tests.'''
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
CONFIG_DIR = "/tmp/osquery-tests/"
CONFIG_NAME = CONFIG_DIR + "tests"
DEFAULT_CONFIG = {
"options": {
"database_path": "%s.db" % CONFIG_NAME,
"pidfile": "%s.pid" % CONFIG_NAME,
"config_path": "%s.conf" % CONFIG_NAME,
"extensions_socket": "%s.em" % CONFIG_NAME,
"extensions_interval": "1",
"extensions_timeout": "0",
"watchdog_level": "3",
"disable_logging": "true",
"force": "true",
},
"schedule": {},
}
# osquery-specific python tooling and utilities
import utils
'''Expect CONFIG to be set during Tester.main() to a python dict.'''
CONFIG = None
'''Expect ARGS to contain the argparsed namespace.'''
ARGS = None
class OsqueryUnknownException(Exception):
'''Exception thrown for unknown output from the shell'''
pass
class OsqueryException(Exception):
'''Exception thrown when the shell returns an error'''
pass
class OsqueryWrapper(REPLWrapper):
'''A pexpect wrapper intended for interacting with the osqueryi REPL'''
PROMPT = u'osquery> '
CONTINUATION_PROMPT = u' ...> '
ERROR_PREFIX = 'Error:'
def __init__(self, command='../osqueryi', args={}, env={}):
global CONFIG_NAME, CONFIG
options = copy.deepcopy(CONFIG)["options"]
for option in args.keys():
options[option] = args[option]
options["database_path"] += str(random.randint(1000, 9999))
command = command + " " + " ".join(["--%s=%s" % (k, v) for
k, v in options.iteritems()])
proc = pexpect.spawn(command, env=env)
super(OsqueryWrapper, self).__init__(
proc,
self.PROMPT,
None,
continuation_prompt=self.CONTINUATION_PROMPT)
def run_query(self, query):
'''Run a query, returning the results as a list of dictionaries
When unknown output is encountered, OsqueryUnknownException is thrown.
When osqueryi returns an error, OsqueryException is thrown.
'''
query = query + ';' # Extra semicolon causes no harm
result = self.run_command(query)
# On Mac, the query appears first in the string. Remove it if so.
result = re.sub(re.escape(query), '', result).strip()
result_lines = result.splitlines()
if len(result_lines) < 1:
raise OsqueryUnknownException(
'Unexpected output:\n %s' % result_lines)
if result_lines[0].startswith(self.ERROR_PREFIX):
raise OsqueryException(result_lines[0])
try:
header = result_lines[1]
columns = re.findall('[^ |]+', header)
rows = []
for line in result_lines[3:-1]:
values = re.findall('[^ |]+', line)
rows.append(
dict((col, val) for col, val in zip(columns, values)))
return rows
except:
raise OsqueryUnknownException(
'Unexpected output:\n %s' % result_lines)
class ProcRunner(object):
'''A helper class to open a subprocess and perform testing actions.
The subprocess is opened in a new thread and state is tracked using
this class wrapper.
'''
def __init__(self, name, path, _args=[], interval=0.02, silent=False):
self.started = False
self.proc = None
self.name = name
self.path = path
self.args = _args
self.interval = interval
self.silent = silent
thread = threading.Thread(target=self.run, args=())
thread.daemon = True
thread.start()
def run(self):
pid = 0
code = -1
try:
if self.silent:
self.proc = subprocess.Popen([self.path] + self.args,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
else:
self.proc = subprocess.Popen([self.path] + self.args)
pid = self.proc.pid
self.started = True
except Exception as e:
print (utils.red("Process start failed:") + " %s" % self.name)
print (str(e))
sys.exit(1)
try:
while self.proc.poll() is None:
self.started = True
time.sleep(self.interval)
self.started = True
code = -1 if self.proc is None else self.proc.poll()
self.proc = None
except Exception as e:
return
def requireStarted(self, timeout=2):
delay = 0
while delay < timeout:
if self.started is True:
break
time.sleep(self.interval * 10)
delay += self.interval * 10
def getChildren(self, timeout=1):
'''Get the child pids.'''
self.requireStarted()
if not self.proc:
return []
try:
proc = psutil.Process(pid=self.proc.pid)
delay = 0
while len(proc.children()) == 0:
if delay > timeout:
return []
time.sleep(self.interval)
delay += self.interval
return [p.pid for p in proc.children()]
except:
pass
return []
@property
def pid(self):
self.requireStarted()
return self.proc.pid if self.proc is not None else None
def kill(self, children=False):
self.requireStarted()
if children:
for child in self.getChildren():
try:
os.kill(child, 9)
except:
pass
if self.proc:
try:
os.kill(self.pid, 9)
except:
pass
self.proc = None
def isAlive(self, timeout=3):
self.requireStarted()
'''Check if the process is alive.'''
delay = 0
while self.proc is None:
if delay > timeout:
break
time.sleep(self.interval)
delay += self.interval
if self.proc is None:
return False
return self.proc.poll() is None
def isDead(self, pid, timeout=5):
self.requireStarted()
'''Check if the process was killed.
This is different than `isAlive` in that the timeout is an expectation
that the process will die before the timeout, `isAlive`'s timeout is
an expectation that the process will be scheduled before the timeout.
'''
try:
proc = psutil.Process(pid=pid)
except psutil.NoSuchProcess as e:
return True
delay = 0
while delay < timeout:
if not proc.is_running():
return True
time.sleep(self.interval)
delay += self.interval
return False
class ProcessGenerator(object):
'''Helper methods to patch into a unittest'''
generators = []
def setUp(self):
shutil.rmtree(CONFIG_DIR)
os.makedirs(CONFIG_DIR)
def _run_daemon(self, options={}, silent=False, options_only={},
overwrite={}):
'''Spawn an osquery daemon process'''
global ARGS, CONFIG_NAME, CONFIG
config = copy.deepcopy(CONFIG)
config["options"]["database_path"] += str(random.randint(1000, 9999))
config["options"]["extensions_socket"] += str(random.randint(1000, 9999))
for option in options.keys():
config["options"][option] = options[option]
flags = ["--%s=%s" % (k, v) for k, v in config["options"].items()]
for option in options_only.keys():
config["options"][option] = options_only[option]
for key in overwrite:
config[key] = overwrite[key]
utils.write_config(config)
binary = os.path.join(ARGS.build, "osquery", "osqueryd")
daemon = ProcRunner("daemon", binary, flags, silent=silent)
daemon.options = config["options"]
self.generators.append(daemon)
return daemon
def _run_extension(self, timeout=0, path=None, silent=False):
'''Spawn an osquery extension (example_extension)'''
global ARGS, CONFIG
config = copy.deepcopy(CONFIG)
config["options"]["extensions_socket"] += str(random.randint(1000, 9999))
binary = os.path.join(ARGS.build, "osquery", "example_extension.ext")
if path is not None:
config["options"]["extensions_socket"] = path
extension = ProcRunner("extension",
binary,
[
"--socket=%s" % config["options"]["extensions_socket"],
"--verbose" if not silent else "",
"--timeout=%d" % timeout,
"--interval=%d" % 0,
],
silent=silent)
self.generators.append(extension)
extension.options = config["options"]
return extension
def tearDown(self):
'''When the unit test stops, clean up child-generated processes.
Iterate through the generated daemons and extensions, and kill -9 them.
Unittest should stop processes they generate, but on failure the
tearDown method will cleanup.
'''
for generator in self.generators:
if generator.pid is not None:
try:
os.kill(generator.pid, signal.SIGKILL)
except Exception as e:
pass
class EXClient(object):
'''An osquery Thrift/extensions python client generator.'''
transport = None
'''The instance transport object.'''
_manager = None
'''The client class's reference to run-time discovered manager.'''
_client = None
'''The client class's reference to run-time discovered client.'''
def __init__(self, path=None, uuid=None):
global CONFIG
'''Create a extensions client to a UNIX path and optional UUID.'''
if path is None:
path = CONFIG["options"]["extensions_socket"]
self.path = path
if uuid:
self.path += ".%s" % str(uuid)
transport = TSocket.TSocket(unix_socket=self.path)
transport = TTransport.TBufferedTransport(transport)
self.protocol = TBinaryProtocol.TBinaryProtocol(transport)
self.transport = transport
@classmethod
def setUp(cls, manager, client):
'''Set the manager and client modules to generate clients from.'''
cls._manager = manager
cls._client = client
def close(self):
if self.transport:
self.transport.close()
def open(self, timeout=0.1, interval=0.01):
'''Attempt to open the UNIX domain socket.'''
delay = 0
while delay < timeout:
try:
self.transport.open()
return True
except Exception as e:
pass
delay += interval
time.sleep(interval)
return False
def getEM(self):
'''Return an extension manager (osquery core) client.'''
if self._manager is None:
raise(Exception, "The EXClient must be 'setUp' with a manager")
return self._manager.Client(self.protocol)
def getEX(self):
'''Return an extension (osquery extension) client.'''
if self._client is None:
raise(Exception, "The EXClient must be 'setUp' with a client")
return self._client.Client(self.protocol)
class Autoloader(object):
'''Helper class to write a module or extension autoload file.'''
def __init__(self, autoloads=[]):
global CONFIG_DIR
self.path = CONFIG_DIR + "ext.load" + str(random.randint(1000, 9999))
with open(self.path, "w") as fh:
fh.write("\n".join(autoloads))
def __del__(self):
try:
os.unlink(self.path)
except:
pass
class TimeoutRunner(object):
def __init__(self, cmd=[], timeout_sec=1):
self.stdout = None
self.stderr = None
self.proc = subprocess.Popen(" ".join(cmd),
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
kill_proc = lambda p: p.kill()
timer = threading.Timer(timeout_sec, kill_proc, [self.proc])
timer.start()
self.stdout, self.stderr = self.proc.communicate()
timer.cancel()
class Tester(object):
def __init__(self):
global ARGS, CONFIG, CONFIG_DIR
parser = argparse.ArgumentParser(description=(
"osquery python integration testing."
))
parser.add_argument(
"--config", metavar="FILE", default=None,
help="Use special options from a config."
)
parser.add_argument(
"--verbose", default=False, action="store_true",
help="Run daemons and extensions with --verbose"
)
# Directory structure options
parser.add_argument(
"--build", metavar="PATH", default=".",
help="Path to osquery build (./build/<sys>/)."
)
ARGS = parser.parse_args()
if not os.path.exists(ARGS.build):
print ("Cannot find --build: %s" % ARGS.build)
print ("You must first run: make")
exit(1)
# Write config
random.seed(time.time())
try:
shutil.rmtree(CONFIG_DIR)
except:
# Allow the tester to fail
pass
os.makedirs(CONFIG_DIR)
CONFIG = read_config(ARGS.config) if ARGS.config else DEFAULT_CONFIG
def run(self):
os.setpgrp()
unittest_args = [sys.argv[0]]
if ARGS.verbose:
unittest_args += ["-v"]
unittest.main(argv=unittest_args)
def expect(functional, expected, interval=0.01, timeout=4):
"""Helper function to run a function with expected latency"""
delay = 0
result = None
while result is None or len(result) != expected:
try:
result = functional()
if len(result) == expected:
break
except Exception as e:
print ("Expect exception (%s): %s not %s" % (
str(e), str(functional), expected))
return None
if delay >= timeout:
return None
time.sleep(interval)
delay += interval
return result
class QueryTester(ProcessGenerator, unittest.TestCase):
def setUp(self):
self.binary = os.path.join(ARGS.build, "osquery", "osqueryi")
self.daemon = self._run_daemon({
# The set of queries will hammer the daemon process.
"disable_watchdog": True,
# Enable the 'hidden' flag "registry_exceptions" to prevent catching.
"registry_exceptions": True,
})
self.assertTrue(self.daemon.isAlive())
# The sets of example tests will use the extensions APIs.
self.client = EXClient(self.daemon.options["extensions_socket"])
expectTrue(self.client.open)
self.assertTrue(self.client.open())
self.em = self.client.getEM()
def tearDown(self):
self.client.close()
self.daemon.kill()
def _execute(self, query):
try:
result = self.em.query(query)
self.assertEqual(result.status.code, 0)
return result.response
except Exception as e:
print("General exception executing query: %s" % (
utils.lightred(query)))
raise e
def _execute_set(self, queries):
for example in queries:
start_time = time.time()
result = self._execute(example)
end_time = time.time()
duration_ms = int((end_time - start_time) * 1000)
if duration_ms > 2000:
# Query took longer than 2 seconds.
duration_ms = utils.lightred(duration_ms)
print("Query (%sms): %s, rows: %d" % (
duration_ms, example, len(result)))
def expectTrue(functional, interval=0.01, timeout=8):
"""Helper function to run a function with expected latency"""
delay = 0
while delay < timeout:
if functional():
return True
time.sleep(interval)
delay += interval
return False
def assertPermissions():
stat_info = os.stat('.')
if stat_info.st_uid != os.getuid():
print (utils.lightred("Will not load modules/extensions in tests."))
print (utils.lightred("Repository owner (%d) executer (%d) mismatch" % (
stat_info.st_uid, os.getuid())))
exit(1)
def loadThriftFromBuild(build_dir):
'''Find and import the thrift-generated python interface.'''
thrift_path = build_dir + "/generated/gen-py"
try:
sys.path.append(thrift_path)
sys.path.append(thrift_path + "/osquery")
from osquery import ExtensionManager, Extension
EXClient.setUp(ExtensionManager, Extension)
except ImportError as e:
print ("Cannot import osquery thrift API from %s" % (thrift_path))
print ("Exception: %s" % (str(e)))
print ("You must first run: make")
exit(1)
|
LumPenPacK/NetworkExtractionFromImages
|
refs/heads/master
|
win_build/nefi2_win_amd64_msvc_2015/site-packages/numpy/distutils/fcompiler/compaq.py
|
82
|
#http://www.compaq.com/fortran/docs/
from __future__ import division, absolute_import, print_function
import os
import sys
from numpy.distutils.fcompiler import FCompiler
from numpy.distutils.compat import get_exception
from distutils.errors import DistutilsPlatformError
compilers = ['CompaqFCompiler']
if os.name != 'posix' or sys.platform[:6] == 'cygwin' :
# Otherwise we'd get a false positive on posix systems with
# case-insensitive filesystems (like darwin), because we'll pick
# up /bin/df
compilers.append('CompaqVisualFCompiler')
class CompaqFCompiler(FCompiler):
compiler_type = 'compaq'
description = 'Compaq Fortran Compiler'
version_pattern = r'Compaq Fortran (?P<version>[^\s]*).*'
if sys.platform[:5]=='linux':
fc_exe = 'fort'
else:
fc_exe = 'f90'
executables = {
'version_cmd' : ['<F90>', "-version"],
'compiler_f77' : [fc_exe, "-f77rtl", "-fixed"],
'compiler_fix' : [fc_exe, "-fixed"],
'compiler_f90' : [fc_exe],
'linker_so' : ['<F90>'],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
module_dir_switch = '-module ' # not tested
module_include_switch = '-I'
def get_flags(self):
return ['-assume no2underscore', '-nomixed_str_len_arg']
def get_flags_debug(self):
return ['-g', '-check bounds']
def get_flags_opt(self):
return ['-O4', '-align dcommons', '-assume bigarrays',
'-assume nozsize', '-math_library fast']
def get_flags_arch(self):
return ['-arch host', '-tune host']
def get_flags_linker_so(self):
if sys.platform[:5]=='linux':
return ['-shared']
return ['-shared', '-Wl,-expect_unresolved,*']
class CompaqVisualFCompiler(FCompiler):
compiler_type = 'compaqv'
description = 'DIGITAL or Compaq Visual Fortran Compiler'
version_pattern = r'(DIGITAL|Compaq) Visual Fortran Optimizing Compiler'\
' Version (?P<version>[^\s]*).*'
compile_switch = '/compile_only'
object_switch = '/object:'
library_switch = '/OUT:' #No space after /OUT:!
static_lib_extension = ".lib"
static_lib_format = "%s%s"
module_dir_switch = '/module:'
module_include_switch = '/I'
ar_exe = 'lib.exe'
fc_exe = 'DF'
if sys.platform=='win32':
from numpy.distutils.msvccompiler import MSVCCompiler
try:
m = MSVCCompiler()
m.initialize()
ar_exe = m.lib
except DistutilsPlatformError:
pass
except AttributeError:
msg = get_exception()
if '_MSVCCompiler__root' in str(msg):
print('Ignoring "%s" (I think it is msvccompiler.py bug)' % (msg))
else:
raise
except IOError:
e = get_exception()
if not "vcvarsall.bat" in str(e):
print("Unexpected IOError in", __file__)
raise e
except ValueError:
e = get_exception()
if not "path']" in str(e):
print("Unexpected ValueError in", __file__)
raise e
executables = {
'version_cmd' : ['<F90>', "/what"],
'compiler_f77' : [fc_exe, "/f77rtl", "/fixed"],
'compiler_fix' : [fc_exe, "/fixed"],
'compiler_f90' : [fc_exe],
'linker_so' : ['<F90>'],
'archiver' : [ar_exe, "/OUT:"],
'ranlib' : None
}
def get_flags(self):
return ['/nologo', '/MD', '/WX', '/iface=(cref,nomixed_str_len_arg)',
'/names:lowercase', '/assume:underscore']
def get_flags_opt(self):
return ['/Ox', '/fast', '/optimize:5', '/unroll:0', '/math_library:fast']
def get_flags_arch(self):
return ['/threads']
def get_flags_debug(self):
return ['/debug']
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
from numpy.distutils.fcompiler import new_fcompiler
compiler = new_fcompiler(compiler='compaq')
compiler.customize()
print(compiler.get_version())
|
jfterpstra/bluebottle
|
refs/heads/develop
|
bluebottle/payments_mock/__init__.py
|
12133432
| |
benschmaus/catapult
|
refs/heads/master
|
third_party/gsutil/gslib/third_party/__init__.py
|
12133432
| |
giovaneliberato/python_birds_fp
|
refs/heads/simples
|
fases/rodar_fase_exemplo.py
|
12
|
# -*- coding: utf-8 -*-
from os import path
import sys
project_dir = path.dirname(__file__)
project_dir = path.join('..')
sys.path.append(project_dir)
from atores import PassaroAmarelo, PassaroVermelho, Obstaculo, Porco
from fase import Fase
from placa_grafica_tkinter import rodar_fase
if __name__ == '__main__':
fase = Fase(intervalo_de_colisao=32)
# Adicionar Pássaros Vermelhos
for i in range(5):
fase.adicionar_passaro(PassaroVermelho(30, 30))
# Adicionar Pássaros Amarelos
for i in range(30):
fase.adicionar_passaro(PassaroAmarelo(30, 30))
# Obstaculos
for i in range(30, 480, 32):
fase.adicionar_obstaculo(Obstaculo(300, i))
# Porcos
for i in range(30, 300, 32):
fase.adicionar_porco(Porco(600, i))
rodar_fase(fase)
|
phlax/pootle
|
refs/heads/master
|
pootle/apps/virtualfolder/receivers.py
|
7
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from django.db.models.signals import post_save, pre_delete
from django.dispatch import receiver
from pootle.core.delegate import revision_updater
from pootle_app.models import Directory
from pootle_store.models import Store
from .delegate import vfolder_finder
from .models import VirtualFolder
@receiver(post_save, sender=Store)
def handle_store_save(sender, instance, created, **kwargs):
if not created:
return
vfolder_finder.get(
instance.__class__)(instance).add_to_vfolders()
@receiver(post_save, sender=VirtualFolder)
def handle_vfolder_save(sender, instance, created, **kwargs):
instance.path_matcher.update_stores()
@receiver(pre_delete, sender=VirtualFolder)
def handle_vfolder_delete(sender, instance, **kwargs):
dirs = set(instance.stores.values_list("parent", flat=True))
for store in instance.stores.all():
instance.stores.remove(store)
if store.priority == instance.priority:
store.set_priority()
updater = revision_updater.get(Directory)(
object_list=Directory.objects.filter(pk__in=dirs))
updater.update(keys=["stats"])
|
fabian4/ceilometer
|
refs/heads/master
|
ceilometer/tests/unit/image/__init__.py
|
12133432
| |
lgscofield/odoo
|
refs/heads/8.0
|
addons/mrp/tests/test_multicompany.py
|
374
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.tests import common
class TestMrpMulticompany(common.TransactionCase):
def setUp(self):
super(TestMrpMulticompany, self).setUp()
cr, uid = self.cr, self.uid
# Usefull models
self.ir_model_data = self.registry('ir.model.data')
self.res_users = self.registry('res.users')
self.stock_location = self.registry('stock.location')
group_user_id = self.registry('ir.model.data').xmlid_to_res_id(cr, uid, 'base.group_user')
group_stock_manager_id = self.registry('ir.model.data').xmlid_to_res_id(cr, uid, 'stock.group_stock_manager')
company_2_id = self.registry('ir.model.data').xmlid_to_res_id(cr, uid, 'stock.res_company_1')
self.multicompany_user_id = self.res_users.create(cr, uid,
{'name': 'multicomp', 'login': 'multicomp',
'groups_id': [(6, 0, [group_user_id, group_stock_manager_id])],
'company_id': company_2_id, 'company_ids': [(6,0,[company_2_id])]})
def test_00_multicompany_user(self):
"""check no error on getting default mrp.production values in multicompany setting"""
cr, uid, context = self.cr, self.multicompany_user_id, {}
fields = ['location_src_id', 'location_dest_id']
defaults = self.stock_location.default_get(cr, uid, ['location_id', 'location_dest_id', 'type'], context)
for field in fields:
if defaults.get(field):
try:
self.stock_location.check_access_rule(cr, uid, [defaults[field]], 'read', context)
except Exception, exc:
assert False, "unreadable location %s: %s" % (field, exc)
|
freesmartphone/framework
|
refs/heads/master
|
framework/subsystems/odeviced/audio.py
|
1
|
#!/usr/bin/env python
"""
Open Device Daemon - A plugin for audio device peripherals
(C) 2008-2009 Michael 'Mickey' Lauer <mlauer@vanille-media.de>
(C) 2008 Openmoko, Inc.
GPLv2 or later
Package: odeviced
Module: audio
"""
MODULE_NAME = "odeviced.audio"
__version__ = "0.5.9.11"
from framework.config import config
from framework.patterns import asyncworker, processguard
from helpers import DBUS_INTERFACE_PREFIX, DBUS_PATH_PREFIX, readFromFile, writeToFile, cleanObjectName
APLAY_COMMAND = "/usr/bin/aplay"
import gobject
import dbus.service
import sys, os, time, types, subprocess
import logging
logger = logging.getLogger( "odeviced.audio" )
#----------------------------------------------------------------------------#
class UnknownFormat( dbus.DBusException ):
#----------------------------------------------------------------------------#
_dbus_error_name = "org.freesmartphone.Device.Audio.UnknownFormat"
#----------------------------------------------------------------------------#
class UnsupportedFormat( dbus.DBusException ):
#----------------------------------------------------------------------------#
_dbus_error_name = "org.freesmartphone.Device.Audio.UnsupportedFormat"
#----------------------------------------------------------------------------#
class PlayerError( dbus.DBusException ):
#----------------------------------------------------------------------------#
_dbus_error_name = "org.freesmartphone.Device.Audio.PlayerError"
#----------------------------------------------------------------------------#
class NotPlaying( dbus.DBusException ):
#----------------------------------------------------------------------------#
_dbus_error_name = "org.freesmartphone.Device.Audio.NotPlaying"
#----------------------------------------------------------------------------#
class AlreadyPlaying( dbus.DBusException ):
#----------------------------------------------------------------------------#
_dbus_error_name = "org.freesmartphone.Device.Audio.AlreadyPlaying"
#----------------------------------------------------------------------------#
class ScenarioInvalid( dbus.DBusException ):
#----------------------------------------------------------------------------#
_dbus_error_name = "org.freesmartphone.Device.Audio.ScenarioInvalid"
#----------------------------------------------------------------------------#
class ScenarioStackUnderflow( dbus.DBusException ):
#----------------------------------------------------------------------------#
_dbus_error_name = "org.freesmartphone.Device.Audio.ScenarioStackUnderflow"
#----------------------------------------------------------------------------#
class DeviceFailed( dbus.DBusException ):
#----------------------------------------------------------------------------#
_dbus_error_name = "org.freesmartphone.Device.Audio.DeviceFailed"
#----------------------------------------------------------------------------#
class Player( asyncworker.AsyncWorker ):
#----------------------------------------------------------------------------#
"""
Base class implementing common logic for all Players.
"""
def __init__( self, dbus_object ):
asyncworker.AsyncWorker.__init__( self )
self._object = dbus_object
def enqueueTask( self, ok_cb, error_cb, task, *args ):
self.enqueue( ok_cb, error_cb, task, args )
def onProcessElement( self, element ):
logger.debug( "getting task from queue..." )
ok_cb, error_cb, task, args = element
logger.debug( "got task: %s %s" % ( task, args ) )
try:
method = getattr( self, "task_%s" % task )
except AttributeError:
logger.debug( "unhandled task: %s %s" % ( task, args ) )
else:
method( ok_cb, error_cb, *args )
return True
def task_play( self, ok_cb, error_cb, name, loop, length ):
ok_cb()
def task_stop( self, ok_cb, error_cb, name ):
ok_cb()
def task_panic( self, ok_cb, error_cb ):
ok_cb()
@classmethod
def supportedFormats( cls ):
return []
#----------------------------------------------------------------------------#
class NullPlayer( Player ):
#----------------------------------------------------------------------------#
"""
A dummy player, useful e.g. if no audio subsystem is available.
"""
def task_play( self, ok_cb, error_cb, name, loop, length ):
logger.info( "NullPlayer [not] playing sound %s" % name )
ok_cb()
def task_stop( self, ok_cb, error_cb, name ):
logger.info( "NullPlayer [not] stopping sound %s" % name )
ok_cb()
def task_panic( self, ok_cb, error_cb ):
logger.info( "NullPlayer [not] stopping all sounds" )
ok_cb()
#----------------------------------------------------------------------------#
class GStreamerPlayer( Player ):
#----------------------------------------------------------------------------#
"""
A Gstreamer based Player.
"""
decoderMap = {}
@classmethod
def supportedFormats( cls ):
try:
global gst
import gst as gst
except ImportError:
logger.warning( "Could not setup gstreamer player (python-gst not installed?)" )
return []
# set up decoder map
if cls.decoderMap == {}:
cls._trySetupDecoder( "mod", "modplug" )
cls._trySetupDecoder( "mp3", "mad" )
cls._trySetupDecoder( "sid", "siddec" )
cls._trySetupDecoder( "wav", "wavparse" )
# ogg w/ integer vorbis decoder, found on embedded systems
haveit = cls._trySetupDecoder( "ogg", "oggdemux ! ivorbisdec ! audioconvert" )
if not haveit:
# ogg w/ floating point vorbis decoder, found on desktop systems
cls._trySetupDecoder( "ogg", "oggdemux ! vorbisdec ! audioconvert" )
return cls.decoderMap.keys()
@classmethod
def _trySetupDecoder( cls, ext, dec ):
# FIXME might even save the bin's already, not just the description
try:
gst.parse_bin_from_description( dec, 0 )
except gobject.GError, e:
logger.warning( "GST can't parse %s; Not adding %s to decoderMap" % ( dec, ext ) )
return False
else:
cls.decoderMap[ext] = dec
return True
def __init__( self, *args, **kwargs ):
Player.__init__( self, *args, **kwargs )
self.pipelines = {}
def _onMessage( self, bus, message, name ):
pipeline, status, loop, length, ok_cb, error_cb = self.pipelines[name]
logger.debug( "GST message received while file status = %s" % status )
t = message.type
if t == gst.MESSAGE_EOS:
# shall we restart?
if loop:
logger.debug( "G: EOS -- restarting stream" )
pipeline.seek_simple( gst.FORMAT_TIME, gst.SEEK_FLAG_FLUSH, 0 )
else:
logger.debug( "G: EOS" )
self._updateSoundStatus( name, "stopped" )
pipeline.set_state( gst.STATE_NULL )
del self.pipelines[name]
elif t == gst.MESSAGE_ERROR:
pipeline.set_state(gst.STATE_NULL)
del self.pipelines[name]
err, debug = message.parse_error()
logger.debug( "G: ERROR: %s %s" % ( err, debug ) )
error_cb( PlayerError( err.message ) )
elif t == gst.MESSAGE_STATE_CHANGED:
previous, current, pending = message.parse_state_changed()
logger.debug( "G: STATE NOW: (%s) -> %s -> (%s)" % ( previous, current, pending ) )
if ( previous, current, pending ) == ( gst.STATE_READY, gst.STATE_PAUSED, gst.STATE_PLAYING ):
self._updateSoundStatus( name, "playing" )
ok_cb()
if length:
logger.debug( "adding timeout for %s of %d seconds" % ( name, length ) )
gobject.timeout_add_seconds( length, self._playTimeoutReached, name )
elif ( previous, current, pending ) == ( gst.STATE_PLAYING, gst.STATE_PAUSED, gst.STATE_READY ):
self._updateSoundStatus( name, "stopped" )
pipeline.set_state( gst.STATE_NULL )
del self.pipelines[name]
# ok_cb()
else: # uninteresting state change
pass
else:
logger.debug( "G: UNHANDLED: %s" % t )
def _playTimeoutReached( self, name ):
try:
pipeline, status, loop, length, ok_cb, error_cb = self.pipelines[name]
except KeyError: # might have vanished in the meantime?
logger.warning( "audio pipeline for %s has vanished before timer could fire" % name )
return False
previous, current, next = pipeline.get_state()
logger.debug( "custom player timeout for %s reached, state is %s" % ( name, current ) )
if loop:
pipeline.set_state( gst.STATE_NULL )
del self.pipelines[name]
self.task_play( lambda: None, lambda foo: None, name, loop, length )
#pipeline.seek_simple( gst.FORMAT_TIME, gst.SEEK_FLAG_FLUSH, 0 )
else:
self.task_stop( lambda: None, lambda foo: None, name )
return False # don't call us again, mainloop
def _updateSoundStatus( self, name, newstatus ):
pipeline, status, loop, length, ok_cb, error_cb = self.pipelines[name]
if newstatus != status:
self.pipelines[name] = pipeline, newstatus, loop, length, ok_cb, error_cb
self._object.SoundStatus( name, newstatus, {} )
def task_play( self, ok_cb, error_cb, name, loop, length ):
if name in self.pipelines:
error_cb( AlreadyPlaying( name ) )
else:
# Split options from filename, these may be useful for advanced
# settings on MOD and SID files.
try:
base, ext = name.rsplit( '.', 1 )
except ValueError: # no extension provided
return error_cb( UnknownFormat( "Can't guess format from extension" ) )
options = ext.split( ';' )
ext = options.pop( 0 )
file = ".".join( [ base, ext ] ).replace( ' ', r'\ ' )
try:
decoder = GStreamerPlayer.decoderMap[ ext ]
except KeyError:
return error_cb( UnknownFormat( "Known formats are %s" % self.decoderMap.keys() ) )
else:
if len(options) > 0:
decoder = decoder + " " + " ".join( options )
# parse_launch may burn a few cycles compared to element_factory_make,
# however it should still be faster than creating the pipeline from
# individual elements in python, since it's all happening in compiled code
try:
pipeline = gst.parse_launch( 'filesrc location="%s" ! %s ! alsasink' % ( file, decoder ) )
except gobject.GError, e:
logger.exception( "could not instanciate pipeline: %s" % e )
return error_cb( PlayerError( "Could not instanciate pipeline due to an internal error." ) )
else:
# everything ok, go play
bus = pipeline.get_bus()
bus.add_signal_watch()
bus.connect( "message", self._onMessage, name )
self.pipelines[name] = ( pipeline, "unknown", loop, length, ok_cb, error_cb )
pipeline.set_state( gst.STATE_PLAYING )
def task_stop( self, ok_cb, error_cb, name ):
try:
pipeline = self.pipelines[name][0]
except KeyError:
error_cb( NotPlaying( name ) )
else:
pipeline.set_state( gst.STATE_READY )
ok_cb()
def task_panic( self, ok_cb, error_cb ):
for name in self.pipelines:
self.pipelines[name][0].set_state( gst.STATE_READY )
ok_cb()
#----------------------------------------------------------------------------#
class AlsaPlayer( Player ):
#----------------------------------------------------------------------------#
"""
An alsa player, useful for wav format, when the latency of the GStreamerPlayer
is too heavy.
"""
@classmethod
def supportedFormats( cls ):
if os.path.exists( APLAY_COMMAND ):
return [ "wav" ]
else:
return []
sounds = {}
def task_play( self, ok_cb, error_cb, name, loop, length ):
if name in self.sounds:
error_cb( AlreadyPlaying() )
else:
p = processguard.ProcessGuard( [ "/usr/bin/aplay", str(name) ] )
p.execute( onExit = self._onPlayingFinished )
self.sounds[name] = p, loop, length
ok_cb()
logger.info( "AlsaPlayer playing sound %s" % name )
self._object.SoundStatus( name, "playing", {} )
def task_stop( self, ok_cb, error_cb, name ):
if name not in self.sounds:
error_cb( NotPlaying() )
else:
p, loop, length = self.sounds[name]
p.shutdown()
del self.sounds[name]
ok_cb()
logger.info( "AlsaPlayer stopped sound %s" % name )
self._object.SoundStatus( name, "stopped", {} )
def task_panic( self, ok_cb, error_cb ):
logger.info( "AlsaPlayer stopping all sounds" )
for key, value in self.sounds.items():
p, loop, length = value
p.shutdown()
del self.sounds[key]
self._object.SoundStatus( key, "stopped", {} )
ok_cb()
def _onPlayingFinished( self, pid, exitcode, exitsignal ):
logger.info( "AlsaPlayer %d exited with exitcode %d (signal %d)" % ( pid, exitcode, exitsignal ) )
normalShutdown = ( exitcode == 0 )
for key, value in self.sounds.items():
p, loop, length = value
if p.hadpid == pid or p.pid == pid:
if normalShutdown and loop:
logger.debug( "AlsaPlayer restarting sound %s due to loop value" % key )
p.execute( onExit = self._onPlayingFinished )
else:
del self.sounds[key]
self._object.SoundStatus( key, "stopped", {} )
#----------------------------------------------------------------------------#
class AlsaScenarios( object ):
#----------------------------------------------------------------------------#
"""
Controls alsa audio scenarios.
"""
def __init__( self, dbus_object, statedir, defaultscene ):
self._object = dbus_object
self._statedir = statedir
self._default = defaultscene
self._statenames = None
# FIXME set default profile (from configuration)
# FIXME should be set when this audio object initializes
self._current = "unknown"
self._stack = []
gobject.idle_add( self._initScenario )
logger.info( " ::: using alsa scenarios in %s, default = %s" % ( statedir, defaultscene ) )
def _initScenario( self ):
# gather default profile from preferences
if os.path.exists( "%s/%s.state" % ( self._statedir, self._default ) ):
self.setScenario( self._default )
logger.info( "default alsa scenario restored" )
else:
logger.warning( "default alsa scenario '%s' not found in '%s'. device may start uninitialized" % ( self._default, self._statedir ) )
return False
def pushScenario( self, scenario ):
current = self._current
if self.setScenario( scenario ):
self._stack.append( current )
return True
else:
return False
def pullScenario( self ):
previous = self._stack.pop()
result = self.setScenario( previous )
if result is False:
return result
else:
return previous
def getScenario( self ):
return self._current
def storeScenario( self, scenario ):
statename = "%s/%s.state" % ( self._statedir, scenario )
result = subprocess.call( [ "alsactl", "-f", statename, "store" ] )
if result != 0:
logger.error( "can't store alsa scenario to %s" % statename )
return False
else:
# reload scenarios next time
self._statenames = None
return True
def getAvailableScenarios( self ):
# FIXME might check timestamp or use inotify
if self._statenames is None:
try:
files = os.listdir( self._statedir )
except OSError:
logger.warning( "no state files in %s found" % self._statedir )
self._statenames = []
else:
self._statenames = [ state[:-6] for state in files if state.endswith( ".state" ) ]
return self._statenames
def setScenario( self, scenario ):
if not scenario in self.getAvailableScenarios():
return False
statename = "%s/%s.state" % ( self._statedir, scenario )
result = subprocess.call( [ "alsactl", "-f", statename, "restore" ] )
if result == 0:
# work around ASoC DAPM problem
if scenario == "gsmbluetooth":
result += subprocess.call( [ "amixer", "sset", "Capture Left Mixer", "Analogue Mix Right" ] )
result += subprocess.call( [ "amixer", "sset", "Capture Left Mixer", "Analogue Mix Left" ] )
if result == 0:
self._current = scenario
self._object.Scenario( scenario, "user" )
return True
else:
logger.error( "can't set alsa scenario from %s" % statename )
return False
def hasScenario( self, scenario ):
return scenario in self.getAvailableScenarios()
#----------------------------------------------------------------------------#
class Audio( dbus.service.Object ):
#----------------------------------------------------------------------------#
"""
A Dbus Object implementing org.freesmartphone.Device.Audio
"""
DBUS_INTERFACE = DBUS_INTERFACE_PREFIX + ".Audio"
players = {}
def __init__( self, bus, index, node ):
self.interface = self.DBUS_INTERFACE
self.path = DBUS_PATH_PREFIX + "/Audio"
dbus.service.Object.__init__( self, bus, self.path )
for player in ( AlsaPlayer, GStreamerPlayer, ):
supportedFormats = player.supportedFormats()
instance = player( self )
for format in supportedFormats:
if format not in self.players:
self.players[format] = instance
scenario_dir = config.getValue( MODULE_NAME, "scenario_dir", "/etc/alsa/scenario" )
default_scenario = config.getValue( MODULE_NAME, "default_scenario", "default" )
self.scenario = AlsaScenarios( self, scenario_dir, default_scenario )
logger.info( "%s %s initialized. Serving %s at %s" % ( self.__class__.__name__, __version__, self.interface, self.path ) )
logger.debug( "^^^ found players for following formats: '%s'" % self.players.keys() )
def playerForFile( self, name ):
try:
base, ext = name.rsplit( '.', 1 )
except ValueError: # no extension provided
raise UnknownFormat( "Can't guess format from extension" )
options = ext.split( ';' )
ext = options.pop( 0 )
try:
player = self.players[ext]
except KeyError:
raise UnsupportedFormat( "No player registered for format '%s'" % ext )
return player
#
# dbus info methods
#
@dbus.service.method( DBUS_INTERFACE, "", "a{sv}",
async_callbacks=( "dbus_ok", "dbus_error" ) )
def GetInfo( self, dbus_ok, dbus_error ):
info = {}
formats = []
for player in self.players.values():
formats += player.supportedFormats()
info["name"] = "Default Audio Device"
info["formats"] = list( set( formats ) )
info["scenario"] = self.scenario.getScenario()
info["scenarios"] = dbus.Array( self.scenario.getAvailableScenarios(), "as" )
dbus_ok( info )
#
# dbus sound methods
#
@dbus.service.method( DBUS_INTERFACE, "sii", "",
async_callbacks=( "dbus_ok", "dbus_error" ) )
def PlaySound( self, name, loop, length, dbus_ok, dbus_error ):
self.playerForFile( name ).enqueueTask( dbus_ok, dbus_error, "play", name, loop, length )
@dbus.service.method( DBUS_INTERFACE, "s", "",
async_callbacks=( "dbus_ok", "dbus_error" ) )
def StopSound( self, name, dbus_ok, dbus_error ):
self.playerForFile( name ).enqueueTask( dbus_ok, dbus_error, "stop", name )
@dbus.service.method( DBUS_INTERFACE, "", "",
async_callbacks=( "dbus_ok", "dbus_error" ) )
def StopAllSounds( self, dbus_ok, dbus_error ):
for player in self.players.values():
player.enqueueTask( dbus_ok, dbus_error, "panic" )
#
# dbus scenario methods
#
# FIXME ugly. error handling should be done by the scenario itself
@dbus.service.method( DBUS_INTERFACE, "", "as",
async_callbacks=( "dbus_ok", "dbus_error" ) )
def GetAvailableScenarios( self, dbus_ok, dbus_error ):
dbus_ok( self.scenario.getAvailableScenarios() )
@dbus.service.method( DBUS_INTERFACE, "", "s",
async_callbacks=( "dbus_ok", "dbus_error" ) )
def GetScenario( self, dbus_ok, dbus_error ):
dbus_ok( self.scenario.getScenario() )
@dbus.service.method( DBUS_INTERFACE, "s", "",
async_callbacks=( "dbus_ok", "dbus_error" ) )
def SetScenario( self, name, dbus_ok, dbus_error ):
if not self.scenario.hasScenario( name ):
dbus_error( ScenarioInvalid( "available scenarios are: %s" % self.scenario.getAvailableScenarios() ) )
else:
if self.scenario.setScenario( name ):
dbus_ok()
else:
dbus_error( DeviceFailed( "unknown error while setting scenario" ) )
@dbus.service.method( DBUS_INTERFACE, "s", "",
async_callbacks=( "dbus_ok", "dbus_error" ) )
def PushScenario( self, name, dbus_ok, dbus_error ):
if not self.scenario.hasScenario( name ):
dbus_error( ScenarioInvalid( "available scenarios are: %s" % self.scenario.getAvailableScenarios() ) )
else:
if self.scenario.pushScenario( name ):
dbus_ok()
else:
dbus_error( DeviceFailed( "unknown error while pushing scenario" ) )
@dbus.service.method( DBUS_INTERFACE, "", "s",
async_callbacks=( "dbus_ok", "dbus_error" ) )
def PullScenario( self, dbus_ok, dbus_error ):
try:
previousScenario = self.scenario.pullScenario()
except IndexError:
dbus_error( ScenarioStackUnderflow( "forgot to push a scenario?" ) )
else:
if previousScenario is False:
dbus_error( DeviceFailed( "unknown error while pulling scenario" ) )
else:
dbus_ok( previousScenario )
@dbus.service.method( DBUS_INTERFACE, "s", "",
async_callbacks=( "dbus_ok", "dbus_error" ) )
def StoreScenario( self, name, dbus_ok, dbus_error ):
if self.scenario.storeScenario( name ):
dbus_ok()
else:
dbus_error( DeviceFailed( "unknown error while storing scenario" ) )
#
# dbus signals
#
@dbus.service.signal( DBUS_INTERFACE, "ssa{sv}" )
def SoundStatus( self, name, status, properties ):
logger.info( "sound status %s %s %s" % ( name, status, properties ) )
@dbus.service.signal( DBUS_INTERFACE, "ss" )
def Scenario( self, scenario, reason ):
logger.info( "sound scenario %s %s" % ( scenario, reason ) )
#----------------------------------------------------------------------------#
def factory( prefix, controller ):
#----------------------------------------------------------------------------#
"""Instanciate plugins"""
return [ Audio( controller.bus, 0, "" ) ]
if __name__ == "__main__":
import dbus
bus = dbus.SystemBus()
|
beheadedmyway/gity
|
refs/heads/master
|
python/erroremail.py
|
2
|
# Copyright Aaron Smith 2009
#
# This file is part of Gity.
#
# Gity is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Gity is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Gity. If not, see <http://www.gnu.org/licenses/>.
from _util import *
from _argv import *
try:
import sys,re,os,subprocess,smtplib
from email.mime.text import MIMEText
except Exception,e:
sys.stderr.write(str(e))
exit(84)
errors=open(os.environ['gitConfigPath'] + "/vendor/gity/tmp/errors","r")
content=errors.read()
errors.close()
try:
exit(0)
msg=MIMEText(content)
msg['Subject'] = "Gity Bot: Error"
mailServer=smtplib.SMTP("smtp.gmail.com",587)
mailServer.ehlo()
mailServer.starttls()
mailServer.ehlo()
mailServer.login('gitybot@macendeavor.com','')
mailServer.sendmail('gitybot@macendeavor.com','support@macendeavor.com',msg.as_string())
mailServer.close()
except Exception,e:
pass
|
AMOboxTV/AMOBox.LegoBuild
|
refs/heads/master
|
plugin.program.super.favourites/launcher.py
|
18
|
#
# Copyright (C) 2014
# Sean Poyser (seanpoyser@gmail.com)
#
# This Program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This Program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with XBMC; see the file COPYING. If not, write to
# the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# http://www.gnu.org/copyleft/gpl.html
#
import xbmc
xbmc.executebuiltin('RunAddon(plugin.program.super.favourites)')
xbmc.executebuiltin('Dialog.Close(all, true)')
|
peterlauri/django
|
refs/heads/master
|
tests/signed_cookies_tests/tests.py
|
28
|
from __future__ import unicode_literals
from django.core import signing
from django.http import HttpRequest, HttpResponse
from django.test import SimpleTestCase, override_settings
from django.test.utils import freeze_time
class SignedCookieTest(SimpleTestCase):
def test_can_set_and_read_signed_cookies(self):
response = HttpResponse()
response.set_signed_cookie('c', 'hello')
self.assertIn('c', response.cookies)
self.assertTrue(response.cookies['c'].value.startswith('hello:'))
request = HttpRequest()
request.COOKIES['c'] = response.cookies['c'].value
value = request.get_signed_cookie('c')
self.assertEqual(value, 'hello')
def test_can_use_salt(self):
response = HttpResponse()
response.set_signed_cookie('a', 'hello', salt='one')
request = HttpRequest()
request.COOKIES['a'] = response.cookies['a'].value
value = request.get_signed_cookie('a', salt='one')
self.assertEqual(value, 'hello')
with self.assertRaises(signing.BadSignature):
request.get_signed_cookie('a', salt='two')
def test_detects_tampering(self):
response = HttpResponse()
response.set_signed_cookie('c', 'hello')
request = HttpRequest()
request.COOKIES['c'] = response.cookies['c'].value[:-2] + '$$'
with self.assertRaises(signing.BadSignature):
request.get_signed_cookie('c')
def test_default_argument_suppresses_exceptions(self):
response = HttpResponse()
response.set_signed_cookie('c', 'hello')
request = HttpRequest()
request.COOKIES['c'] = response.cookies['c'].value[:-2] + '$$'
self.assertIsNone(request.get_signed_cookie('c', default=None))
def test_max_age_argument(self):
value = 'hello'
with freeze_time(123456789):
response = HttpResponse()
response.set_signed_cookie('c', value)
request = HttpRequest()
request.COOKIES['c'] = response.cookies['c'].value
self.assertEqual(request.get_signed_cookie('c'), value)
with freeze_time(123456800):
self.assertEqual(request.get_signed_cookie('c', max_age=12), value)
self.assertEqual(request.get_signed_cookie('c', max_age=11), value)
with self.assertRaises(signing.SignatureExpired):
request.get_signed_cookie('c', max_age=10)
@override_settings(SECRET_KEY=b'\xe7')
def test_signed_cookies_with_binary_key(self):
response = HttpResponse()
response.set_signed_cookie('c', 'hello')
request = HttpRequest()
request.COOKIES['c'] = response.cookies['c'].value
self.assertEqual(request.get_signed_cookie('c'), 'hello')
|
albertomurillo/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/azure/azure_rm_mysqldatabase_facts.py
|
13
|
#!/usr/bin/python
#
# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_mysqldatabase_facts
version_added: "2.7"
short_description: Get Azure MySQL Database facts.
description:
- Get facts of MySQL Database.
options:
resource_group:
description:
- The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
required: True
server_name:
description:
- The name of the server.
required: True
name:
description:
- The name of the database.
extends_documentation_fragment:
- azure
author:
- "Zim Kalinowski (@zikalino)"
'''
EXAMPLES = '''
- name: Get instance of MySQL Database
azure_rm_mysqldatabase_facts:
resource_group: myResourceGroup
server_name: server_name
name: database_name
- name: List instances of MySQL Database
azure_rm_mysqldatabase_facts:
resource_group: myResourceGroup
server_name: server_name
'''
RETURN = '''
databases:
description: A list of dictionaries containing facts for MySQL Databases.
returned: always
type: complex
contains:
id:
description:
- Resource ID
returned: always
type: str
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMySQL/servers/testser
ver/databases/db1"
resource_group:
description:
- Resource group name.
returned: always
type: str
sample: testrg
server_name:
description:
- Server name.
returned: always
type: str
sample: testserver
name:
description:
- Resource name.
returned: always
type: str
sample: db1
charset:
description:
- The charset of the database.
returned: always
type: str
sample: UTF8
collation:
description:
- The collation of the database.
returned: always
type: str
sample: English_United States.1252
'''
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.rdbms.mysql import MySQLManagementClient
from msrest.serialization import Model
except ImportError:
# This is handled in azure_rm_common
pass
class AzureRMMySqlDatabaseFacts(AzureRMModuleBase):
def __init__(self):
# define user inputs into argument
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
server_name=dict(
type='str',
required=True
),
name=dict(
type='str'
)
)
# store the results of the module operation
self.results = dict(
changed=False
)
self.resource_group = None
self.server_name = None
self.name = None
super(AzureRMMySqlDatabaseFacts, self).__init__(self.module_arg_spec, supports_tags=False)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
if (self.resource_group is not None and
self.server_name is not None and
self.name is not None):
self.results['databases'] = self.get()
elif (self.resource_group is not None and
self.server_name is not None):
self.results['databases'] = self.list_by_server()
return self.results
def get(self):
response = None
results = []
try:
response = self.mysql_client.databases.get(resource_group_name=self.resource_group,
server_name=self.server_name,
database_name=self.name)
self.log("Response : {0}".format(response))
except CloudError as e:
self.log('Could not get facts for Databases.')
if response is not None:
results.append(self.format_item(response))
return results
def list_by_server(self):
response = None
results = []
try:
response = self.mysql_client.databases.list_by_server(resource_group_name=self.resource_group,
server_name=self.server_name)
self.log("Response : {0}".format(response))
except CloudError as e:
self.fail("Error listing for server {0} - {1}".format(self.server_name, str(e)))
if response is not None:
for item in response:
results.append(self.format_item(item))
return results
def format_item(self, item):
d = item.as_dict()
d = {
'resource_group': self.resource_group,
'server_name': self.server_name,
'name': d['name'],
'charset': d['charset'],
'collation': d['collation']
}
return d
def main():
AzureRMMySqlDatabaseFacts()
if __name__ == '__main__':
main()
|
justintweaver/mtchi-cert-game
|
refs/heads/master
|
makahiki/apps/utils/__init__.py
|
9
|
"""utility module."""
|
b-carter/numpy
|
refs/heads/master
|
numpy/linalg/info.py
|
264
|
"""\
Core Linear Algebra Tools
-------------------------
Linear algebra basics:
- norm Vector or matrix norm
- inv Inverse of a square matrix
- solve Solve a linear system of equations
- det Determinant of a square matrix
- lstsq Solve linear least-squares problem
- pinv Pseudo-inverse (Moore-Penrose) calculated using a singular
value decomposition
- matrix_power Integer power of a square matrix
Eigenvalues and decompositions:
- eig Eigenvalues and vectors of a square matrix
- eigh Eigenvalues and eigenvectors of a Hermitian matrix
- eigvals Eigenvalues of a square matrix
- eigvalsh Eigenvalues of a Hermitian matrix
- qr QR decomposition of a matrix
- svd Singular value decomposition of a matrix
- cholesky Cholesky decomposition of a matrix
Tensor operations:
- tensorsolve Solve a linear tensor equation
- tensorinv Calculate an inverse of a tensor
Exceptions:
- LinAlgError Indicates a failed linear algebra operation
"""
from __future__ import division, absolute_import, print_function
depends = ['core']
|
Grirrane/odoo
|
refs/heads/master
|
addons/analytic/wizard/account_analytic_cost_ledger_for_journal_report.py
|
8
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
class account_analytic_cost_ledger_journal_report(osv.osv_memory):
_name = 'account.analytic.cost.ledger.journal.report'
_description = 'Account Analytic Cost Ledger For Journal Report'
_columns = {
'date1': fields.date('Start of period', required=True),
'date2': fields.date('End of period', required=True),
'journal': fields.many2many('account.analytic.journal', 'ledger_journal_rel', 'ledger_id', 'journal_id', 'Journals'),
}
_defaults = {
'date1': lambda *a: time.strftime('%Y-01-01'),
'date2': lambda *a: time.strftime('%Y-%m-%d')
}
def check_report(self, cr, uid, ids, context=None):
if context is None:
context = {}
data = self.read(cr, uid, ids)[0]
datas = {
'ids': context.get('active_ids', []),
'model': 'account.analytic.account',
'form': data
}
datas['form']['active_ids'] = context.get('active_ids', False)
return self.pool['report'].get_action(cr, uid, [], 'account.report_analyticcostledgerquantity', data=datas, context=context)
|
Ditmar/plugin.video.pelisalacarta
|
refs/heads/master
|
servers/userporn.py
|
33
|
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para userporn
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import os,re
import base64
from core import scrapertools
from core import logger
from core import config
HOSTER_KEY="NTI2NzI5Cgo="
# Returns an array of possible video url's from the page_url
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
logger.info("[userporn.py] get_video_url(page_url='%s')" % page_url)
video_urls = []
# Espera un poco como hace el player flash
logger.info("[userporn.py] waiting 3 secs")
import time
time.sleep(3)
# Obtiene el id
code = Extract_id(page_url)
# Descarga el json con los detalles del vídeo
#http://www.userporn.com/player_control/settings.php?v=dvthddkC7l4J&em=TRUE&fv=v1.1.45
controluri = "http://userporn.com/player_control/settings.php?v=" + code + "&em=TRUE&fv=v1.1.45"
datajson = scrapertools.cachePage(controluri)
#logger.info("response="+datajson);
# Convierte el json en un diccionario
datajson = datajson.replace("false","False").replace("true","True")
datajson = datajson.replace("null","None")
datadict = eval("("+datajson+")")
# Formatos
formatos = datadict["settings"]["res"]
for formato in formatos:
uri = base64.decodestring(formato["u"])
resolucion = formato["l"]
import videobb
video_url = videobb.build_url(uri,HOSTER_KEY,datajson)
video_urls.append( ["%s [userporn]" % resolucion , video_url.replace(":80","") ])
for video_url in video_urls:
logger.info("[userporn.py] %s - %s" % (video_url[0],video_url[1]))
return video_urls
def Extract_id(url):
_VALID_URL = r'^((?:http://)?(?:\w+\.)?userporn\.com/(?:(?:(?:e/)|(?:video/))|(?:(?:flash/)|(?:f/)))?)?([0-9A-Za-z_-]+)(?(1).+)?$'
# Extract video id from URL
mobj = re.match(_VALID_URL, url)
if mobj is None:
logger.info('[userporn.py] ERROR: URL invalida: %s' % url)
return ""
id = mobj.group(2)
logger.info("[userporn.py] extracted code="+id)
return id
# Encuentra vídeos del servidor en el texto pasado
def find_videos(data):
encontrados = set()
devuelve = []
#Enlace estricto a userporn")
#userporn tipo "http://www.userporn.com/f/szIwlZD8ewaH.swf"
patronvideos = 'userporn.com\/f\/([A-Z0-9a-z]{12}).swf'
logger.info("[userporn.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos).findall(data)
for match in matches:
titulo = "[userporn]"
url = "http://www.userporn.com/video/"+match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'userporn' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
#logger.info ("1) Enlace estricto a userporn")
#userporn tipo "http://www.userporn.com/video/ZIeb370iuHE4"
patronvideos = 'userporn.com\/video\/([A-Z0-9a-z]{12})'
logger.info("[userporn.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos).findall(data)
for match in matches:
titulo = "[userporn]"
url = "http://www.userporn.com/video/"+match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'userporn' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
#logger.info ("2) Enlace estricto a userporn")
#userporn tipo "http://www.userporn.com/e/LLqVzhw5ft7T"
patronvideos = 'userporn.com\/e\/([A-Z0-9a-z]{12})'
logger.info("[userporn.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos).findall(data)
for match in matches:
titulo = "[userporn]"
url = "http://www.userporn.com/video/"+match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'userporn' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
patronvideos = "http\:\/\/(?:www\.)?userporn.com\/(?:(?:e/|flash/)|(?:(?:video/|f/)))?([a-zA-Z0-9]{0,12})"
logger.info("[userporn.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
#print data
for match in matches:
titulo = "[Userporn]"
url = "http://www.userporn.com/video/"+match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'userporn' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
return devuelve
|
NSnietol/Py
|
refs/heads/master
|
PythonIntermediate/Regex/ejercicio1.py
|
2
|
#-*- coding: utf-8 -*-
import re
"""
Palabras con a lo sumo una pareja de 0's consecutivos y a lo sumo una pareja de 1's consecutivos.
Cadenas en las que toda pareja de 0's contiguos aparece antes de cualquier pareja de 1's contiguos.
Cadenas que no contienen a 101 como subcadena.
Cadenas equilibradas con igual número de 0's y de 1's tales que ningún prefijo de cualquiera de ellas posee más de dos 0's que 1's ni más de dos 1's que 0's.
"""
#Busqueda simple
patron1 = "(\S*(00|11)?\S*(11|00)?\S*)" # Se explota 00001211
patron2 = ".*(00)+(11)+" # Nada =(
#patron3 = ".^[(101)].*"
patron3 = "a{2,}"
regex = re.compile(patron3)
cadena = "aaa"
if regex.search(cadena):
print "Cumplió el patrón :"+str(regex.search(cadena).group())
else:
print "No cumplió "
|
psiinon/addons-server
|
refs/heads/master
|
src/olympia/bandwagon/migrations/0001_initial.py
|
7
|
# Generated by Django 2.2.5 on 2019-09-12 13:35
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import olympia.amo.fields
import olympia.amo.models
import olympia.translations.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('translations', '__first__'),
('addons', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Collection',
fields=[
('created', models.DateTimeField(blank=True, default=django.utils.timezone.now, editable=False)),
('modified', models.DateTimeField(auto_now=True)),
('id', olympia.amo.fields.PositiveAutoField(primary_key=True, serialize=False)),
('uuid', models.UUIDField(blank=True, null=True, unique=True)),
('nickname', models.CharField(blank=True, max_length=30, null=True, unique=True)),
('slug', models.CharField(blank=True, max_length=30, null=True)),
('default_locale', models.CharField(db_column='defaultlocale', default='en-US', max_length=10)),
('type', models.PositiveIntegerField(choices=[(0, 'Normal'), (1, 'Synchronized'), (2, 'Featured'), (3, 'Generated Recommendations'), (4, 'Favorites'), (5, 'Mobile'), (6, 'Anonymous')], db_column='collection_type', default=0)),
('listed', models.BooleanField(default=True, help_text='Collections are either listed or private.')),
('application', models.PositiveIntegerField(blank=True, choices=[(1, 'Firefox'), (61, 'Firefox for Android')], db_column='application_id', null=True)),
('addon_count', models.PositiveIntegerField(db_column='addonCount', default=0)),
],
options={
'db_table': 'collections',
'get_latest_by': 'created',
'abstract': False,
'base_manager_name': 'objects',
},
bases=(olympia.amo.models.SearchMixin, olympia.amo.models.SaveUpdateMixin, models.Model),
),
migrations.CreateModel(
name='FeaturedCollection',
fields=[
('created', models.DateTimeField(blank=True, default=django.utils.timezone.now, editable=False)),
('modified', models.DateTimeField(auto_now=True)),
('id', olympia.amo.fields.PositiveAutoField(primary_key=True, serialize=False)),
('application', models.PositiveIntegerField(choices=[(1, 'Firefox'), (61, 'Firefox for Android')], db_column='application_id')),
('locale', models.CharField(max_length=10, null=True)),
('collection', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bandwagon.Collection')),
],
options={
'db_table': 'featured_collections',
},
bases=(olympia.amo.models.SearchMixin, olympia.amo.models.SaveUpdateMixin, models.Model),
),
migrations.CreateModel(
name='CollectionAddon',
fields=[
('created', models.DateTimeField(blank=True, default=django.utils.timezone.now, editable=False)),
('modified', models.DateTimeField(auto_now=True)),
('id', olympia.amo.fields.PositiveAutoField(primary_key=True, serialize=False)),
('ordering', models.PositiveIntegerField(default=0, help_text='Add-ons are displayed in ascending order based on this field.')),
('addon', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='addons.Addon')),
('collection', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bandwagon.Collection')),
('comments', olympia.translations.fields.LinkifiedField(blank=True, db_column='comments', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='CollectionAddon_comments_set+', require_locale=True, short=True, to='translations.LinkifiedTranslation', to_field='id', unique=True)),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'addons_collections',
'get_latest_by': 'created',
'abstract': False,
'base_manager_name': 'objects',
},
bases=(olympia.amo.models.SearchMixin, olympia.amo.models.SaveUpdateMixin, models.Model),
),
migrations.AddField(
model_name='collection',
name='addons',
field=models.ManyToManyField(related_name='collections', through='bandwagon.CollectionAddon', to='addons.Addon'),
),
migrations.AddField(
model_name='collection',
name='author',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='collections', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='collection',
name='description',
field=olympia.translations.fields.NoLinksNoMarkupField(blank=True, db_column='description', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='Collection_description_set+', require_locale=False, short=True, to='translations.NoLinksNoMarkupTranslation', to_field='id', unique=True),
),
migrations.AddField(
model_name='collection',
name='name',
field=olympia.translations.fields.TranslatedField(blank=True, db_column='name', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='Collection_name_set+', require_locale=False, short=True, to='translations.Translation', to_field='id', unique=True),
),
migrations.AddIndex(
model_name='featuredcollection',
index=models.Index(fields=['application'], name='application_id_idx'),
),
migrations.AddIndex(
model_name='collectionaddon',
index=models.Index(fields=['collection', 'created'], name='created_idx'),
),
migrations.AddIndex(
model_name='collectionaddon',
index=models.Index(fields=['addon'], name='addon_id'),
),
migrations.AddIndex(
model_name='collectionaddon',
index=models.Index(fields=['collection'], name='collection_id'),
),
migrations.AddIndex(
model_name='collectionaddon',
index=models.Index(fields=['user'], name='user_id'),
),
migrations.AddConstraint(
model_name='collectionaddon',
constraint=models.UniqueConstraint(fields=('addon', 'collection'), name='addon_id_2'),
),
migrations.AddIndex(
model_name='collection',
index=models.Index(fields=['application'], name='application_id'),
),
migrations.AddIndex(
model_name='collection',
index=models.Index(fields=['created'], name='created_idx'),
),
migrations.AddIndex(
model_name='collection',
index=models.Index(fields=['listed'], name='listed'),
),
migrations.AddIndex(
model_name='collection',
index=models.Index(fields=['slug'], name='slug_idx'),
),
migrations.AddIndex(
model_name='collection',
index=models.Index(fields=['type'], name='type_idx'),
),
migrations.AddConstraint(
model_name='collection',
constraint=models.UniqueConstraint(fields=('author', 'slug'), name='author_id'),
),
]
|
pythonprobr/pythonpro-website
|
refs/heads/master
|
pythonpro/cohorts/__init__.py
|
12133432
| |
tareqalayan/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/rackspace/__init__.py
|
12133432
| |
luci/luci-py
|
refs/heads/master
|
client/third_party/infra_libs/experiments.py
|
7
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tools for gradually enabling a feature on a deterministic set of machines.
Add a flag to your program to control the percentage of machines that a new
feature should be enabled on::
def add_argparse_options(self, parser):
parser.add_argument('--myfeature-percent', type=int, default=0)
def main(self, opts):
if experiments.is_active_for_host('myfeature', opts.myfeature_percent):
# do myfeature
"""
import hashlib
import logging
import socket
import struct
def _is_active(labels, percent):
h = hashlib.md5()
for label, value in sorted(labels.iteritems()):
h.update(label)
h.update(value)
# The first 8 bytes of the hash digest as an unsigned integer.
hash_num = struct.unpack_from('Q', h.digest())[0]
return (hash_num % 100) < percent
def is_active_for_host(experiment_name, percent):
ret = _is_active({
'name': experiment_name,
'host': socket.getfqdn(),
}, percent)
if ret:
logging.info('Experiment "%s" is active', experiment_name)
return ret
|
hachreak/invenio-oaiharvester
|
refs/heads/master
|
invenio_oaiharvester/manage.py
|
1
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""CLI tool to harvest records from an OAI-PMH repository.
The output can be directed to files in a directory, passed into a "workflow"
or printed to stdout (default).
"""
from __future__ import absolute_import, print_function, unicode_literals
from invenio_ext.script import Manager
from .errors import IdentifiersOrDates
from .tasks import get_specific_records, list_records_from_dates
manager = Manager(description=__doc__)
@manager.option('-m', '--metadataprefix', dest='metadata_prefix', default=None,
help="The prefix for the metadata return (e.g. 'oai_dc')")
@manager.option('-n', '--name', dest='name', default=None,
help="The name of the OaiHARVEST object that we want to use to create the endpoint.")
@manager.option('-s', '--setSpec', dest='setSpec', default=None,
help="The 'set' criteria for the harvesting (optional).")
@manager.option('-i', '--identifiers', dest='identifiers', default=None,
help="A list of unique identifiers for records to be harvested.")
@manager.option('-f', '--from', dest='from_date', default=None,
help="The lower bound date for the harvesting (optional).")
@manager.option('-t', '--to', dest='until_date', default=None,
help="The upper bound date for the harvesting (optional).")
@manager.option('-u', '--url', dest='url', default=None,
help="The upper bound date for the harvesting (optional).")
@manager.option('-o', '--output', dest='output', default='stdout',
help="The type of the output (stdout, workflow, dir/directory).")
@manager.option('-w', '--workflow', dest='workflow', default=None,
help="The workflow that should process the output.")
@manager.option('-d', '--dir', dest='directory', default='records_harvested',
help="The directory that we want to send the harvesting results.")
def get(metadata_prefix, name, setSpec, identifiers, from_date,
until_date, url, output, workflow, directory):
"""Harvest records from an OAI repository immediately, without scheduling."""
begin_harvesting_action(metadata_prefix, name, setSpec, identifiers, from_date,
until_date, url, output, workflow, directory, is_queue=False)
@manager.option('-m', '--metadataprefix', dest='metadata_prefix', default=None,
help="The prefix for the metadata return (e.g. 'oai_dc')")
@manager.option('-n', '--name', dest='name', default=None,
help="The name of the OaiHARVEST object that we want to use to create the endpoint.")
@manager.option('-s', '--setSpec', dest='setSpec', default=None,
help="The 'set' criteria for the harvesting (optional).")
@manager.option('-i', '--identifiers', dest='identifiers', default=None,
help="A list of unique identifiers for records to be harvested.")
@manager.option('-f', '--from', dest='from_date', default=None,
help="The lower bound date for the harvesting (optional).")
@manager.option('-t', '--to', dest='until_date', default=None,
help="The upper bound date for the harvesting (optional).")
@manager.option('-u', '--url', dest='url', default=None,
help="The upper bound date for the harvesting (optional).")
@manager.option('-o', '--output', dest='output', default='stdout',
help="The type of the output (stdout, workflow, dir/directory).")
@manager.option('-w', '--workflow', dest='workflow', default=None,
help="The workflow that should process the output.")
@manager.option('-d', '--dir', dest='directory', default='records_harvested',
help="The directory that we want to send the harvesting results.")
def queue(metadata_prefix, name, setSpec, identifiers, from_date,
until_date, url, output, workflow, directory):
"""Schedule a run to harvest records from an OAI repository."""
begin_harvesting_action(metadata_prefix, name, setSpec, identifiers, from_date,
until_date, url, output, workflow, directory, is_queue=True)
def begin_harvesting_action(metadata_prefix, name, setSpec, identifiers, from_date,
until_date, url, output, workflow, directory, is_queue=False):
"""Select the right method for harvesting according to the parameters.
Then run it immediately or queue it with Celery.
:param metadata_prefix: The prefix for the metadata return (e.g. 'oai_dc').
:param name: The name of the OaiHARVEST object that we want to use to create the endpoint.
:param setSpec: The 'set' criteria for the harvesting (optional).
:param identifiers: A list of unique identifiers for records to be harvested.
:param from_date: The lower bound date for the harvesting (optional).
:param until_date: The upper bound date for the harvesting (optional).
:param url: The The url to be used to create the endpoint.
:param output: The type of the output (stdout, workflow, dir/directory).
:param workflow: The workflow that should process the output.
:param directory: The directory that we want to send the harvesting results.
:param is_queue: Boolean to check whether the harvest should be queued or run immediately.
"""
if identifiers is None:
# If no identifiers are provided, a harvest is scheduled:
# - url / name is used for the endpoint
# - from_date / lastrun is used for the dates (until_date optionally if from_date is used)
params = (metadata_prefix, from_date, until_date, url,
name, setSpec, output, workflow, directory)
if is_queue:
job = list_records_from_dates.delay(*params)
print("Scheduled job {0}".format(job.id))
else:
list_records_from_dates(*params)
else:
if (from_date is not None) or (until_date is not None):
raise IdentifiersOrDates("Identifiers cannot be used in combination with dates.")
# If identifiers are provided, we schedule an immediate run using them.
params = (identifiers, metadata_prefix, url,
name, output, workflow, directory)
if is_queue:
job = get_specific_records.delay(*params)
print("Scheduled job {0}".format(job.id))
else:
get_specific_records(*params)
def main():
"""Run manager."""
from invenio_base.factory import create_app
app = create_app()
manager.app = app
manager.run()
if __name__ == '__main__':
main()
|
shujunqiao/cocos2d-python
|
refs/heads/master
|
cocos/actions/tiledgrid_actions.py
|
3
|
# ----------------------------------------------------------------------------
# cocos2d
# Copyright (c) 2008-2012 Daniel Moisset, Ricardo Quesada, Rayentray Tappa,
# Lucio Torre
# Copyright (c) 2009-2014 Richard Jones, Claudio Canepa
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of cocos2d nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Implementation of TiledGrid3DAction actions
'''
from __future__ import division, print_function, unicode_literals
__docformat__ = 'restructuredtext'
import random
from cocos.euclid import *
from .basegrid_actions import *
from cocos.director import director
rr = random.randrange
__all__ = [ 'FadeOutTRTiles', # actions that don't modify the z coordinate
'FadeOutBLTiles',
'FadeOutUpTiles',
'FadeOutDownTiles',
'ShuffleTiles',
'TurnOffTiles',
'SplitRows',
'SplitCols',
'ShakyTiles3D', # actions that modify the z coordinate
'ShatteredTiles3D',
'WavesTiles3D',
'JumpTiles3D',
]
# Don't export this class
class Tile(object):
def __init__(self, position=(0,0), start_position=(0,0), delta=(0,0) ):
super(Tile,self).__init__()
self.position = position
self.start_position = start_position
self.delta = delta
def __repr__(self):
return "(start_pos: %s pos: %s delta:%s)" % (self.start_position, self.position, self.delta)
class ShakyTiles3D( TiledGrid3DAction ):
'''Simulates a shaky floor composed of tiles
Example::
scene.do( ShakyTiles3D( randrange=6, grid=(4,4), duration=10) )
'''
def init( self, randrange=6, *args, **kw ):
'''
:Parameters:
`randrange` : int
Number that will be used in random.randrange( -randrange, randrange) to do the effect
'''
super(ShakyTiles3D,self).init(*args,**kw)
self.randrange = randrange
def update( self, t ):
for i in range(0, self.grid.x):
for j in range(0, self.grid.y):
coords = self.get_original_tile(i,j)
for k in range(0,len(coords),3):
x = rr(-self.randrange, self.randrange+1)
y = rr(-self.randrange, self.randrange+1)
z = rr(-self.randrange, self.randrange+1)
coords[k] += x
coords[k+1] += y
coords[k+2] += z
self.set_tile(i,j,coords)
class ShatteredTiles3D( TiledGrid3DAction ):
'''ShatterTiles shatters the tiles according to a random value.
It is similar to shakes (see `ShakyTiles3D`) the tiles just one frame, and then continue with
that state for duration time.
Example::
scene.do( ShatteredTiles3D( randrange=12 ) )
'''
def init( self, randrange=6, *args, **kw ):
'''
:Parameters:
`randrange` : int
Number that will be used in random.randrange( -randrange, randrange) to do the effect
'''
super(ShatteredTiles3D,self).init(*args,**kw)
self.randrange = randrange
self._once = False
def update( self, t ):
if not self._once:
for i in range(0, self.grid.x):
for j in range(0, self.grid.y):
coords = self.get_original_tile(i,j)
for k in range(0,len(coords),3):
x = rr(-self.randrange, self.randrange+1)
y = rr(-self.randrange, self.randrange+1)
z = rr(-self.randrange, self.randrange+1)
coords[k] += x
coords[k+1] += y
coords[k+2] += z
self.set_tile(i,j,coords)
self._once = True
class ShuffleTiles( TiledGrid3DAction ):
'''ShuffleTiles moves the tiles randomly across the screen.
To put them back use: Reverse( ShuffleTiles() ) with the same seed parameter.
Example::
scene.do( ShuffleTiles( grid=(4,4), seed=1, duration=10) )
'''
def init(self, seed=-1, *args, **kw):
'''
:Parameters:
`seed` : float
Seed for the random in the shuffle.
'''
super(ShuffleTiles,self).init(*args, **kw)
self.seed = seed
def start(self):
super(ShuffleTiles,self).start()
self.tiles = {}
self._once = False
if self.seed != -1:
random.seed( self.seed )
# random positions
self.nr_of_tiles = self.grid.x * self.grid.y
self.tiles_order = list(range(self.nr_of_tiles))
random.shuffle( self.tiles_order )
for i in range(self.grid.x):
for j in range(self.grid.y):
self.tiles[(i,j)] = Tile( position = Point2(i,j),
start_position = Point2(i,j),
delta= self._get_delta(i,j) )
def place_tile(self, i, j):
t = self.tiles[(i,j)]
coords = self.get_original_tile(i,j)
for k in range(0,len(coords),3):
coords[k] += int( t.position.x * self.target.grid.x_step )
coords[k+1] += int( t.position.y * self.target.grid.y_step )
self.set_tile(i,j,coords)
def update(self, t ):
for i in range(0, self.grid.x):
for j in range(0, self.grid.y):
self.tiles[(i,j)].position = self.tiles[(i,j)].delta * t
self.place_tile(i,j)
# private method
def _get_delta(self, x, y):
idx = x * self.grid.y + y
i,j = divmod( self.tiles_order[idx], self.grid.y )
return Point2(i,j)-Point2(x,y)
class FadeOutTRTiles( TiledGrid3DAction ):
'''Fades out each tile following a diagonal Top-Right path until all the tiles are faded out.
Example::
scene.do( FadeOutTRTiles( grid=(16,12), duration=10) )
'''
def update( self, t ):
# direction right - up
for i in range(self.grid.x):
for j in range(self.grid.y):
distance = self.test_func(i,j,t)
if distance == 0:
self.turn_off_tile(i,j)
elif distance < 1:
self.transform_tile(i,j,distance)
else:
self.turn_on_tile(i,j)
def turn_on_tile(self, x,y):
self.set_tile(x,y, self.get_original_tile(x,y) )
def transform_tile(self, x, y, t ):
coords = self.get_original_tile(x,y)
for c in range( len(coords) ):
# x
if c == 0*3 or c == 3*3:
coords[c] = coords[c] + (self.target.grid.x_step / 2.0) * (1-t)
elif c == 1*3 or c == 2*3:
coords[c] = coords[c] - (self.target.grid.x_step / 2.0) * (1-t)
# y
if c == 0*3+1 or c == 1*3+1:
coords[c] = coords[c] + (self.target.grid.y_step / 2.0) * (1-t)
elif c == 2*3+1 or c == 3*3+1:
coords[c] = coords[c] - (self.target.grid.y_step / 2.0) * (1-t)
self.set_tile(x,y,coords)
def turn_off_tile( self,x,y):
self.set_tile(x,y,[0,0,0,0,0,0,0,0,0,0,0,0] )
def test_func(self, i,j, t ):
x,y = self.grid * t
if x+y==0:
return 1
return pow( (i+j) / (x+y), 6 )
class FadeOutBLTiles( FadeOutTRTiles):
'''Fades out each tile following an Bottom-Left path until all the tiles are faded out.
Example::
scene.do( FadeOutBLTiles( grid=(16,12), duration=5) )
'''
def test_func(self, i,j,t):
x,y = self.grid * (1-t)
if i+j==0:
return 1
return pow( (x+y) / (i+j), 6)
class FadeOutUpTiles( FadeOutTRTiles):
'''Fades out each tile following an upwards path until all the tiles are faded out.
Example::
scene.do( FadeOutUpTiles( grid=(16,12), duration=5) )
'''
def test_func(self, i,j, t):
x,y = self.grid * t
if y==0:
return 1
return pow( (j) / y, 6 )
def transform_tile(self, x, y, t ):
coords = self.get_original_tile(x,y)
for c in range( len(coords) ):
# y
if c == 0*3+1 or c == 1*3+1:
coords[c] = coords[c] + (self.target.grid.y_step / 2.0) * (1-t)
elif c == 2*3+1 or c == 3*3+1:
coords[c] = coords[c] - (self.target.grid.y_step / 2.0) * (1-t)
self.set_tile(x,y,coords)
class FadeOutDownTiles( FadeOutUpTiles):
'''Fades out each tile following an downwards path until all the tiles are faded out.
Example::
scene.do( FadeOutDownTiles( grid=(16,12), duration=5) )
'''
def test_func(self, i,j, t):
x,y = self.grid * (1-t)
if j==0:
return 1
return pow( (y) / j, 6 )
class TurnOffTiles( TiledGrid3DAction ):
'''TurnOffTiles turns off each in random order
Example::
scene.do( TurnOffTiles( grid=(16,12), seed=1, duration=10) )
'''
def init(self, seed=-1, *args, **kw):
super(TurnOffTiles,self).init( *args, **kw )
self.seed = seed
def start(self):
super(TurnOffTiles,self).start()
if self.seed != -1:
random.seed( self.seed )
self.nr_of_tiles = self.grid.x * self.grid.y
self.tiles_order = list(range(self.nr_of_tiles))
random.shuffle( self.tiles_order )
def update( self, t ):
l = int( t * self.nr_of_tiles )
for i in range( self.nr_of_tiles):
t = self.tiles_order[i]
if i < l:
self.turn_off_tile(t)
else:
self.turn_on_tile(t)
def get_tile_pos(self, idx):
return divmod(idx, self.grid.y)
def turn_on_tile(self, t):
x,y = self.get_tile_pos(t)
self.set_tile(x,y, self.get_original_tile(x,y) )
def turn_off_tile(self,t):
x,y = self.get_tile_pos(t)
self.set_tile(x,y,[0,0,0,0,0,0,0,0,0,0,0,0] )
class WavesTiles3D( TiledGrid3DAction ):
'''Simulates waves using the math.sin() function in the z-axis of each tile
Example::
scene.do( WavesTiles3D( waves=5, amplitude=120, grid=(16,16), duration=10) )
'''
def init( self, waves=4, amplitude=120, *args, **kw ):
'''
:Parameters:
`waves` : int
Number of waves (2 * pi) that the action will perform. Default is 4
`amplitude` : int
Wave amplitude (height). Default is 20
'''
super(WavesTiles3D, self).init( *args, **kw )
#: Total number of waves to perform
self.waves=waves
#: amplitude rate. Default: 1.0
#: This value is modified by other actions like `AccelAmplitude`.
self.amplitude_rate = 1.0
self.amplitude=amplitude
def update( self, t ):
for i in range(0, self.grid.x):
for j in range(0, self.grid.y):
coords = self.get_original_tile(i,j)
x = coords[0]
y = coords[1]
z = (math.sin(t*math.pi*self.waves*2 + (y+x) * .01) * self.amplitude * self.amplitude_rate )
for k in range( 0,len(coords),3 ):
coords[k+2] += z
self.set_tile( i,j, coords )
class JumpTiles3D( TiledGrid3DAction ):
'''Odd tiles will perform a jump in the z-axis using the sine function,
while the even tiles will perform a jump using sine+pi function
Example::
scene.do( JumpTiles3D( jumps=5, amplitude=40, grid=(16,16), duration=10) )
'''
def init( self, jumps=4, amplitude=20, *args, **kw ):
'''
:Parameters:
`jumps` : int
Number of jumps(2 * pi) that the action will perform. Default is 4
`amplitude` : int
Wave amplitude (height). Default is 20
'''
super(JumpTiles3D, self).init( *args, **kw )
#: Total number of jumps to perform
self.jumps=jumps
#: amplitude rate. Default: 1.0
#: This value is modified by other actions like `AccelAmplitude`.
self.amplitude_rate = 1.0
self.amplitude=amplitude
def update( self, t ):
sinz = (math.sin(t*math.pi*self.jumps*2 + (0) * .01) * self.amplitude * self.amplitude_rate )
sinz2= (math.sin(math.pi+t*math.pi*self.jumps*2 + (0) * .01) * self.amplitude * self.amplitude_rate )
for i in range(0, self.grid.x):
for j in range(0, self.grid.y):
coords = self.get_original_tile(i,j)
for k in range( 0,len(coords),3 ):
if (i+j) % 2 == 0:
coords[k+2] += sinz
else:
coords[k+2] += sinz2
self.set_tile( i,j, coords )
class SplitRows( TiledGrid3DAction ):
'''Split the screen in a number of rows, and move
these rows away from the screen.
The odds rows are moved to the left, while the even rows are moved to
the right.
Example::
scene.do( SplitRows( rows=3, duration=2) )
'''
def init( self, rows=9, grid=(-1,-1), *args, **kw ):
'''
:Parameters:
`rows` : int
Number of rows that will have the effect. Default: 9
'''
if grid != (-1,-1):
raise Exception("This action doesn't receives the grid argument")
grid = (1,rows)
self.rows = rows
super(SplitRows, self).init( grid, *args, **kw )
def update( self, t ):
x,y = director.get_window_size()
for j in range(0, self.grid.y):
coords = self.get_original_tile(0,j)
for c in range(0, len(coords), 3):
direction = 1
if j % 2 == 0:
direction = -1
coords[c] += direction * x * t
self.set_tile( 0,j, coords )
class SplitCols( TiledGrid3DAction ):
'''Split the screen in a number of columns, and move
these columns away from the screen.
The odds columns are moved to the upwards, while the even
columns are moved to the downwards.
Example::
scene.do( SplitCols( cols=3, duration=2) )
'''
def init( self, cols=9, grid=(-1,-1), *args, **kw ):
'''
:Parameters:
`cols` : int
Number of columns that will have the effect. Default: 9
'''
if grid != (-1,-1):
raise Exception("This action doesn't receives the grid argument")
grid = (cols,1)
self.cols = cols
super(SplitCols, self).init( grid, *args, **kw )
def update( self, t ):
x,y = director.get_window_size()
for i in range(0, self.grid.x):
coords = self.get_original_tile(i,0)
for c in range(0, len(coords), 3):
direction = 1
if i % 2 == 0:
direction = -1
coords[c+1] += direction * y * t
self.set_tile( i,0, coords )
|
zanderle/django
|
refs/heads/master
|
tests/defer/tests.py
|
338
|
from __future__ import unicode_literals
from django.db.models.query_utils import DeferredAttribute, InvalidQuery
from django.test import TestCase
from .models import (
BigChild, Child, ChildProxy, Primary, RefreshPrimaryProxy, Secondary,
)
class AssertionMixin(object):
def assert_delayed(self, obj, num):
"""
Instances with deferred fields look the same as normal instances when
we examine attribute values. Therefore, this method returns the number
of deferred fields on returned instances.
"""
count = 0
for field in obj._meta.fields:
if isinstance(obj.__class__.__dict__.get(field.attname), DeferredAttribute):
count += 1
self.assertEqual(count, num)
class DeferTests(AssertionMixin, TestCase):
@classmethod
def setUpTestData(cls):
cls.s1 = Secondary.objects.create(first="x1", second="y1")
cls.p1 = Primary.objects.create(name="p1", value="xx", related=cls.s1)
def test_defer(self):
qs = Primary.objects.all()
self.assert_delayed(qs.defer("name")[0], 1)
self.assert_delayed(qs.defer("name").get(pk=self.p1.pk), 1)
self.assert_delayed(qs.defer("related__first")[0], 0)
self.assert_delayed(qs.defer("name").defer("value")[0], 2)
def test_only(self):
qs = Primary.objects.all()
self.assert_delayed(qs.only("name")[0], 2)
self.assert_delayed(qs.only("name").get(pk=self.p1.pk), 2)
self.assert_delayed(qs.only("name").only("value")[0], 2)
self.assert_delayed(qs.only("related__first")[0], 2)
# Using 'pk' with only() should result in 3 deferred fields, namely all
# of them except the model's primary key see #15494
self.assert_delayed(qs.only("pk")[0], 3)
# You can use 'pk' with reverse foreign key lookups.
self.assert_delayed(self.s1.primary_set.all().only('pk')[0], 3)
def test_defer_only_chaining(self):
qs = Primary.objects.all()
self.assert_delayed(qs.only("name", "value").defer("name")[0], 2)
self.assert_delayed(qs.defer("name").only("value", "name")[0], 2)
self.assert_delayed(qs.defer("name").only("value")[0], 2)
self.assert_delayed(qs.only("name").defer("value")[0], 2)
def test_defer_on_an_already_deferred_field(self):
qs = Primary.objects.all()
self.assert_delayed(qs.defer("name")[0], 1)
self.assert_delayed(qs.defer("name").defer("name")[0], 1)
def test_defer_none_to_clear_deferred_set(self):
qs = Primary.objects.all()
self.assert_delayed(qs.defer("name", "value")[0], 2)
self.assert_delayed(qs.defer(None)[0], 0)
self.assert_delayed(qs.only("name").defer(None)[0], 0)
def test_only_none_raises_error(self):
msg = 'Cannot pass None as an argument to only().'
with self.assertRaisesMessage(TypeError, msg):
Primary.objects.only(None)
def test_defer_extra(self):
qs = Primary.objects.all()
self.assert_delayed(qs.defer("name").extra(select={"a": 1})[0], 1)
self.assert_delayed(qs.extra(select={"a": 1}).defer("name")[0], 1)
def test_defer_values_does_not_defer(self):
# User values() won't defer anything (you get the full list of
# dictionaries back), but it still works.
self.assertEqual(Primary.objects.defer("name").values()[0], {
"id": self.p1.id,
"name": "p1",
"value": "xx",
"related_id": self.s1.id,
})
def test_only_values_does_not_defer(self):
self.assertEqual(Primary.objects.only("name").values()[0], {
"id": self.p1.id,
"name": "p1",
"value": "xx",
"related_id": self.s1.id,
})
def test_get(self):
# Using defer() and only() with get() is also valid.
qs = Primary.objects.all()
self.assert_delayed(qs.defer("name").get(pk=self.p1.pk), 1)
self.assert_delayed(qs.only("name").get(pk=self.p1.pk), 2)
def test_defer_with_select_related(self):
obj = Primary.objects.select_related().defer("related__first", "related__second")[0]
self.assert_delayed(obj.related, 2)
self.assert_delayed(obj, 0)
def test_only_with_select_related(self):
obj = Primary.objects.select_related().only("related__first")[0]
self.assert_delayed(obj, 2)
self.assert_delayed(obj.related, 1)
self.assertEqual(obj.related_id, self.s1.pk)
self.assertEqual(obj.name, "p1")
def test_defer_select_related_raises_invalid_query(self):
# When we defer a field and also select_related it, the query is
# invalid and raises an exception.
with self.assertRaises(InvalidQuery):
Primary.objects.defer("related").select_related("related")[0]
def test_only_select_related_raises_invalid_query(self):
with self.assertRaises(InvalidQuery):
Primary.objects.only("name").select_related("related")[0]
def test_defer_foreign_keys_are_deferred_and_not_traversed(self):
# With a depth-based select_related, all deferred ForeignKeys are
# deferred instead of traversed.
with self.assertNumQueries(3):
obj = Primary.objects.defer("related").select_related()[0]
self.assert_delayed(obj, 1)
self.assertEqual(obj.related.id, self.s1.pk)
def test_saving_object_with_deferred_field(self):
# Saving models with deferred fields is possible (but inefficient,
# since every field has to be retrieved first).
Primary.objects.create(name="p2", value="xy", related=self.s1)
obj = Primary.objects.defer("value").get(name="p2")
obj.name = "a new name"
obj.save()
self.assertQuerysetEqual(
Primary.objects.all(), [
"p1", "a new name",
],
lambda p: p.name,
ordered=False,
)
def test_defer_baseclass_when_subclass_has_no_added_fields(self):
# Regression for #10572 - A subclass with no extra fields can defer
# fields from the base class
Child.objects.create(name="c1", value="foo", related=self.s1)
# You can defer a field on a baseclass when the subclass has no fields
obj = Child.objects.defer("value").get(name="c1")
self.assert_delayed(obj, 1)
self.assertEqual(obj.name, "c1")
self.assertEqual(obj.value, "foo")
def test_only_baseclass_when_subclass_has_no_added_fields(self):
# You can retrieve a single column on a base class with no fields
Child.objects.create(name="c1", value="foo", related=self.s1)
obj = Child.objects.only("name").get(name="c1")
# on an inherited model, its PK is also fetched, hence '3' deferred fields.
self.assert_delayed(obj, 3)
self.assertEqual(obj.name, "c1")
self.assertEqual(obj.value, "foo")
class BigChildDeferTests(AssertionMixin, TestCase):
@classmethod
def setUpTestData(cls):
cls.s1 = Secondary.objects.create(first="x1", second="y1")
BigChild.objects.create(name="b1", value="foo", related=cls.s1, other="bar")
def test_defer_baseclass_when_subclass_has_added_field(self):
# You can defer a field on a baseclass
obj = BigChild.objects.defer("value").get(name="b1")
self.assert_delayed(obj, 1)
self.assertEqual(obj.name, "b1")
self.assertEqual(obj.value, "foo")
self.assertEqual(obj.other, "bar")
def test_defer_subclass(self):
# You can defer a field on a subclass
obj = BigChild.objects.defer("other").get(name="b1")
self.assert_delayed(obj, 1)
self.assertEqual(obj.name, "b1")
self.assertEqual(obj.value, "foo")
self.assertEqual(obj.other, "bar")
def test_only_baseclass_when_subclass_has_added_field(self):
# You can retrieve a single field on a baseclass
obj = BigChild.objects.only("name").get(name="b1")
# when inherited model, its PK is also fetched, hence '4' deferred fields.
self.assert_delayed(obj, 4)
self.assertEqual(obj.name, "b1")
self.assertEqual(obj.value, "foo")
self.assertEqual(obj.other, "bar")
def test_only_sublcass(self):
# You can retrieve a single field on a subclass
obj = BigChild.objects.only("other").get(name="b1")
self.assert_delayed(obj, 4)
self.assertEqual(obj.name, "b1")
self.assertEqual(obj.value, "foo")
self.assertEqual(obj.other, "bar")
class TestDefer2(AssertionMixin, TestCase):
def test_defer_proxy(self):
"""
Ensure select_related together with only on a proxy model behaves
as expected. See #17876.
"""
related = Secondary.objects.create(first='x1', second='x2')
ChildProxy.objects.create(name='p1', value='xx', related=related)
children = ChildProxy.objects.all().select_related().only('id', 'name')
self.assertEqual(len(children), 1)
child = children[0]
self.assert_delayed(child, 2)
self.assertEqual(child.name, 'p1')
self.assertEqual(child.value, 'xx')
def test_defer_inheritance_pk_chaining(self):
"""
When an inherited model is fetched from the DB, its PK is also fetched.
When getting the PK of the parent model it is useful to use the already
fetched parent model PK if it happens to be available. Tests that this
is done.
"""
s1 = Secondary.objects.create(first="x1", second="y1")
bc = BigChild.objects.create(name="b1", value="foo", related=s1,
other="bar")
bc_deferred = BigChild.objects.only('name').get(pk=bc.pk)
with self.assertNumQueries(0):
bc_deferred.id
self.assertEqual(bc_deferred.pk, bc_deferred.id)
def test_eq(self):
s1 = Secondary.objects.create(first="x1", second="y1")
s1_defer = Secondary.objects.only('pk').get(pk=s1.pk)
self.assertEqual(s1, s1_defer)
self.assertEqual(s1_defer, s1)
def test_refresh_not_loading_deferred_fields(self):
s = Secondary.objects.create()
rf = Primary.objects.create(name='foo', value='bar', related=s)
rf2 = Primary.objects.only('related', 'value').get()
rf.name = 'new foo'
rf.value = 'new bar'
rf.save()
with self.assertNumQueries(1):
rf2.refresh_from_db()
self.assertEqual(rf2.value, 'new bar')
with self.assertNumQueries(1):
self.assertEqual(rf2.name, 'new foo')
def test_custom_refresh_on_deferred_loading(self):
s = Secondary.objects.create()
rf = RefreshPrimaryProxy.objects.create(name='foo', value='bar', related=s)
rf2 = RefreshPrimaryProxy.objects.only('related').get()
rf.name = 'new foo'
rf.value = 'new bar'
rf.save()
with self.assertNumQueries(1):
# Customized refresh_from_db() reloads all deferred fields on
# access of any of them.
self.assertEqual(rf2.name, 'new foo')
self.assertEqual(rf2.value, 'new bar')
|
mcecchi/SuperOcto
|
refs/heads/master
|
roboOctoprint/src/octoprint/server/api/connection.py
|
6
|
# coding=utf-8
from __future__ import absolute_import, division, print_function
__author__ = "Gina Häußge <osd@foosel.net>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
__copyright__ = "Copyright (C) 2014 The OctoPrint Project - Released under terms of the AGPLv3 License"
from flask import request, jsonify, make_response
from octoprint.settings import settings
from octoprint.server import printer, printerProfileManager, NO_CONTENT
from octoprint.server.api import api
from octoprint.server.util.flask import restricted_access, get_json_command_from_request
@api.route("/connection", methods=["GET"])
def connectionState():
state, port, baudrate, printer_profile = printer.get_current_connection()
current = {
"state": state,
"port": port,
"baudrate": baudrate,
"printerProfile": printer_profile["id"] if printer_profile is not None and "id" in printer_profile else "_default"
}
return jsonify({"current": current, "options": _get_options()})
@api.route("/connection", methods=["POST"])
@restricted_access
def connectionCommand():
valid_commands = {
"connect": [],
"disconnect": [],
"fake_ack": []
}
command, data, response = get_json_command_from_request(request, valid_commands)
if response is not None:
return response
if command == "connect":
connection_options = printer.__class__.get_connection_options()
port = None
baudrate = None
printerProfile = None
if "port" in data.keys():
port = data["port"]
if port not in connection_options["ports"] and port != "AUTO":
return make_response("Invalid port: %s" % port, 400)
if "baudrate" in data.keys():
baudrate = data["baudrate"]
if baudrate not in connection_options["baudrates"] and baudrate != 0:
return make_response("Invalid baudrate: %d" % baudrate, 400)
if "printerProfile" in data.keys():
printerProfile = data["printerProfile"]
if not printerProfileManager.exists(printerProfile):
return make_response("Invalid printer profile: %s" % printerProfile, 400)
if "save" in data.keys() and data["save"]:
settings().set(["serial", "port"], port)
settings().setInt(["serial", "baudrate"], baudrate)
printerProfileManager.set_default(printerProfile)
if "autoconnect" in data.keys():
settings().setBoolean(["serial", "autoconnect"], data["autoconnect"])
settings().save()
printer.connect(port=port, baudrate=baudrate, profile=printerProfile)
elif command == "disconnect":
printer.disconnect()
elif command == "fake_ack":
printer.fake_ack()
return NO_CONTENT
def _get_options():
connection_options = printer.__class__.get_connection_options()
profile_options = printerProfileManager.get_all()
default_profile = printerProfileManager.get_default()
options = dict(
ports=connection_options["ports"],
baudrates=connection_options["baudrates"],
printerProfiles=[dict(id=printer_profile["id"], name=printer_profile["name"] if "name" in printer_profile else printer_profile["id"]) for printer_profile in profile_options.values() if "id" in printer_profile],
portPreference=connection_options["portPreference"],
baudratePreference=connection_options["baudratePreference"],
printerProfilePreference=default_profile["id"] if "id" in default_profile else None
)
return options
|
shownomercy/django
|
refs/heads/master
|
tests/webdesign_tests/tests.py
|
251
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.template import Context, Template
from django.test import SimpleTestCase, modify_settings
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.webdesign'})
class WebdesignTest(SimpleTestCase):
def test_lorem_tag(self):
t = Template("{% load webdesign %}{% lorem 3 w %}")
self.assertEqual(t.render(Context({})),
'lorem ipsum dolor')
|
intel-ctrlsys/actsys
|
refs/heads/master
|
actsys/control/commands/services/tests/test_services_start.py
|
1
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Intel Corp.
#
"""
Test the ServicesCheckCommand Plugin.
"""
import unittest
from mock import patch, MagicMock
from .. import ServicesStartCommand
from ....plugin.manager import PluginManager
class TestServicesStartCommand(unittest.TestCase):
"""Test case for the ServicesCheckCommand class."""
@patch("control.plugin.manager.PluginManager", spec=PluginManager)
def setUp(self, mock_plugin_manager):
self.setup_mock_config()
self.node_name = "knl-123"
self.mock_plugin_manager = mock_plugin_manager
self.ssh_mock = self.mock_plugin_manager.create_instance.return_value
self.ssh_mock.execute.return_value = [0, None]
self.configuration = {
'device_name': self.node_name,
'configuration': self.configuration_manager,
'plugin_manager': self.mock_plugin_manager,
'logger': None,
'arguments': None
}
self.services_check = ServicesStartCommand(self.configuration)
def setup_mock_config(self):
self.configuration_manager = MagicMock()
obj = self.configuration_manager.get_device.return_value
setattr(obj, "ip_address", "192.168.1.1")
setattr(obj, "port", "22")
setattr(obj, "user", "user")
setattr(obj, "password", "pass")
setattr(obj, "device_type", "compute")
setattr(obj, "service_list", [])
if __name__ == '__main__':
unittest.main()
|
OCA/l10n-brazil
|
refs/heads/12.0
|
l10n_br_account_payment_order/models/__init__.py
|
1
|
# Copyright (C) 2016-Today - KMEE (<http://kmee.com.br>).
# Luis Felipe Miléo - mileo@kmee.com.br
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from . import res_company
from . import account_invoice
from . import account_journal
from . import account_move
from . import l10n_br_cnab_change_methods
from . import account_move_line
from . import l10n_br_cnab_boleto_fields
from . import l10n_br_cnab_payment_fields
from . import account_payment_mode
from . import account_payment_order
from . import account_payment_line
from . import account_payment
from . import bank_payment_line
# TODO - Separação dos dados de importação para um objeto especifico
# cnab.return.log armazenando o LOG do Arquivo de Retorno CNAB
# de forma separada e permitindo a integração com a alteração feita no
# modulo do BRCobranca onde se esta utilizando o modulo
# account_base_move_import para fazer essa tarefa de wizard de importação,
# o objeto l10n_br_cnab esta comentado para permitir, caso seja necessário,
# a implementação de outra forma de importação pois tem os metodos que eram
# usados pela KMEE e o historico git do arquivo
# from . import l10n_br_cnab
from . import l10n_br_cnab_event
from . import l10n_br_cnab_lot
from . import l10n_br_cnab_return_log
from . import ir_attachment
from . import l10n_br_cnab_data_abstract
from . import l10n_br_cnab_return_move_code
from . import l10n_br_cnab_mov_intruction_code
|
darfire/screp
|
refs/heads/master
|
screp/termactions.py
|
1
|
import lxml
from lxml.etree import (
XPath,
)
from lxml.cssselect import CSSSelector
import re
from .utils import (
raise_again,
generic_translator,
preprocess_selector,
)
class BaseTermAction(object):
in_type = None
out_type = None
@staticmethod
def _check_types_match(t1, t2):
return (t1 is None) or (t2 is None) or (t1 == t2)
def can_precede(self, other):
return BaseTermAction._check_types_match(self.out_type, other.in_type)
def can_follow(self, other):
return BaseTermAction._check_types_match(self.in_type, other.out_type)
def execute(self, value):
pass
class GenericTermAction(BaseTermAction):
def __init__(self, f, in_type=None, out_type=None, args=None, identification=None):
self._f = f
self._id = identification
self.in_type = in_type
self.out_type = out_type
if args is None:
args = []
self._args = list(args)
def sub_execute(self, value):
return self._f(value, *self._args)
def execute(self, value):
try:
return self.sub_execute(value)
except Exception as e:
raise_again('%s: %s' % (self._id, e))
class GenericSelectorTermAction(GenericTermAction):
def __init__(self, f, selector, in_type=None, out_type=None, identification=None, args=None):
super(GenericSelectorTermAction, self).__init__(f, in_type=in_type, out_type=out_type, identification=identification, args=args)
self._selector = selector
def sub_execute(self, value):
return self._f(value, self._selector, *self._args)
class AnchorTermAction(BaseTermAction):
in_type = 'context'
# out_type is set at instantiation, since it can vary
def __init__(self, anchor, out_type, identification=None):
self._anchor = anchor
self._id = identification
self.out_type = out_type
def execute(self, context):
# value must be a context
return context.get_anchor(self._anchor)
class RegexTermAction(BaseTermAction):
in_type = 'string'
out_type = 'string'
char_to_flag = {
'i': re.IGNORECASE,
'l': re.LOCALE,
'm': re.MULTILINE,
's': re.DOTALL,
'u': re.UNICODE,
}
allowed_flags = frozenset(char_to_flag.keys() + ['g', 'f'])
def __init__(self, args, identification=None):
self._id = identification
if len(args) < 2 or len(args) > 3:
raise ValueError("Invalid number of arguments for 'resub'!")
self._replace = args[1]
if len(args) == 3:
sflags = args[2]
else:
sflags = ''
(self._count, flags) = self._handle_flags(sflags)
self._re = re.compile(args[0], flags=flags)
def _handle_flags(self, flags):
flags = set(flags)
bflags = 0
if not flags <= self.allowed_flags:
raise ValueError("Unknown flags: '%s'!" % (''.join(list(flags - self.allowed_flags)),))
for (k, f) in self.char_to_flag.items():
if k in flags:
bflags |= f
if 'g' in flags and 'f' in flags:
raise ValueError("Only one of 'g' and 'f' is allowed!")
count = 0
if 'f' in flags:
count = 1
return (count, bflags)
def execute(self, value):
return self._re.sub(self._replace, value, count=self._count)
def make_action_of_class(cls, f, in_type, out_type):
def builder(identification, args):
return cls(f, in_type=in_type, out_type=out_type, identification=identification, args=args)
return builder
def make_generic_action(f, in_type, out_type):
return make_action_of_class(GenericTermAction, f, in_type, out_type)
def make_custom_selector_action(f, selector_ctor, in_type, out_type):
def builder(identification, args):
selector = selector_ctor(preprocess_selector(args[0]))
args = args[1:]
return GenericSelectorTermAction(f, selector, in_type=in_type, out_type=out_type, identification=identification, args=args)
return builder
def make_selector_action(f, in_type, out_type):
return make_custom_selector_action(f, CSSSelector, in_type, out_type)
def make_axis_selector_action(f, axis, in_type, out_type):
return make_custom_selector_action(f, lambda spec: XPath(generic_translator.css_to_xpath(spec, prefix=axis)), in_type, out_type)
|
glyph/cryptography
|
refs/heads/master
|
cryptography/hazmat/bindings/openssl/opensslv.py
|
1
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
INCLUDES = """
#include <openssl/opensslv.h>
"""
TYPES = """
static const int OPENSSL_VERSION_NUMBER;
static const char *const OPENSSL_VERSION_TEXT;
"""
FUNCTIONS = """
"""
MACROS = """
"""
CUSTOMIZATIONS = """
"""
CONDITIONAL_NAMES = {}
|
heeraj123/oh-mainline
|
refs/heads/master
|
vendor/packages/twisted/twisted/test/test_error.py
|
18
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.trial import unittest
from twisted.internet import error
import socket
class TestStringification(unittest.TestCase):
"""Test that the exceptions have useful stringifications.
"""
listOfTests = [
#(output, exception[, args[, kwargs]]),
("An error occurred binding to an interface.",
error.BindError),
("An error occurred binding to an interface: foo.",
error.BindError, ['foo']),
("An error occurred binding to an interface: foo bar.",
error.BindError, ['foo', 'bar']),
("Couldn't listen on eth0:4242: Foo.",
error.CannotListenError,
('eth0', 4242, socket.error('Foo'))),
("Message is too long to send.",
error.MessageLengthError),
("Message is too long to send: foo bar.",
error.MessageLengthError, ['foo', 'bar']),
("DNS lookup failed.",
error.DNSLookupError),
("DNS lookup failed: foo bar.",
error.DNSLookupError, ['foo', 'bar']),
("An error occurred while connecting.",
error.ConnectError),
("An error occurred while connecting: someOsError.",
error.ConnectError, ['someOsError']),
("An error occurred while connecting: foo.",
error.ConnectError, [], {'string': 'foo'}),
("An error occurred while connecting: someOsError: foo.",
error.ConnectError, ['someOsError', 'foo']),
("Couldn't bind.",
error.ConnectBindError),
("Couldn't bind: someOsError.",
error.ConnectBindError, ['someOsError']),
("Couldn't bind: someOsError: foo.",
error.ConnectBindError, ['someOsError', 'foo']),
("Hostname couldn't be looked up.",
error.UnknownHostError),
("No route to host.",
error.NoRouteError),
("Connection was refused by other side.",
error.ConnectionRefusedError),
("TCP connection timed out.",
error.TCPTimedOutError),
("File used for UNIX socket is no good.",
error.BadFileError),
("Service name given as port is unknown.",
error.ServiceNameUnknownError),
("User aborted connection.",
error.UserError),
("User timeout caused connection failure.",
error.TimeoutError),
("An SSL error occurred.",
error.SSLError),
("Connection to the other side was lost in a non-clean fashion.",
error.ConnectionLost),
("Connection to the other side was lost in a non-clean fashion: foo bar.",
error.ConnectionLost, ['foo', 'bar']),
("Connection was closed cleanly.",
error.ConnectionDone),
("Connection was closed cleanly: foo bar.",
error.ConnectionDone, ['foo', 'bar']),
("Uh.", #TODO nice docstring, you've got there.
error.ConnectionFdescWentAway),
("Tried to cancel an already-called event.",
error.AlreadyCalled),
("Tried to cancel an already-called event: foo bar.",
error.AlreadyCalled, ['foo', 'bar']),
("Tried to cancel an already-cancelled event.",
error.AlreadyCancelled),
("A process has ended without apparent errors: process finished with exit code 0.",
error.ProcessDone,
[None]),
("A process has ended with a probable error condition: process ended.",
error.ProcessTerminated),
("A process has ended with a probable error condition: process ended with exit code 42.",
error.ProcessTerminated,
[],
{'exitCode': 42}),
("A process has ended with a probable error condition: process ended by signal SIGBUS.",
error.ProcessTerminated,
[],
{'signal': 'SIGBUS'}),
("The Connector was not connecting when it was asked to stop connecting.",
error.NotConnectingError),
("The Port was not listening when it was asked to stop listening.",
error.NotListeningError),
]
def testThemAll(self):
for entry in self.listOfTests:
output = entry[0]
exception = entry[1]
try:
args = entry[2]
except IndexError:
args = ()
try:
kwargs = entry[3]
except IndexError:
kwargs = {}
self.failUnlessEqual(
str(exception(*args, **kwargs)),
output)
def test_connectionLostSubclassOfConnectionClosed(self):
"""
L{error.ConnectionClosed} is a superclass of L{error.ConnectionLost}.
"""
self.assertTrue(issubclass(error.ConnectionLost,
error.ConnectionClosed))
def test_connectionDoneSubclassOfConnectionClosed(self):
"""
L{error.ConnectionClosed} is a superclass of L{error.ConnectionDone}.
"""
self.assertTrue(issubclass(error.ConnectionDone,
error.ConnectionClosed))
|
igemsoftware/SYSU-Software2013
|
refs/heads/master
|
project/Python27_32/Lib/site-packages/pypm/client/store.py
|
2
|
# Copyright (c) 2010 ActiveState Software Inc. All rights reserved.
"""
pypm.client.store
~~~~~~~~~~~~~~~~~
Store contains two things:
1) repository index cache (~/.pypm/idx/<url-md5>/index)
2) installed database (<pyenv>/_pypm/installed.db)
"""
import logging
from datetime import datetime
from datetime import timedelta
from operator import attrgetter
from sqlalchemy import and_
from sqlalchemy import or_
from applib import _simpledb
from pypm.common.util import url_join
from pypm.common.package import RepoPackage
from pypm.common.package import InstalledPackage
from pypm.common.repository import RemoteRepositoryManager
from pypm.client import error
LOG = logging.getLogger('pypm.client')
class InstalledPackageDatabase(_simpledb.SimpleDatabase):
pass
_simpledb.setup(InstalledPackageDatabase, InstalledPackage,
primary_keys=['name', 'version',
'pyver', 'osarch',
'pkg_version'])
class RepoPackageStore(object):
"""Store to manage repositories"""
def __init__(self, rrmanager, repository_list):
"""
rrmanager - pypm.common.repository.RemoteRepositoryManager
repository_list - list<pypm.commmon.repository.RemoteRepository>
"""
self.rrmanager = rrmanager
self.repository_list = repository_list
def sync(self, force=True, interactive=True):
"""Sync the local cache of remote repositories
- force: if True, ignore cache.. and forcefully redownload index
- interactive: if False, don't show progress bars.
Return a sequence of bools denoting if the repositories are actually
downloaded or not.
"""
retvals = []
for repo in self.repository_list:
retvals.append(self.rrmanager.sync_repository(
repo, force, interactive=interactive))
return tuple(retvals)
def autosync(self):
"""Check for outdated repositories and upate them if necessary
This method works like `sync` (above) does except for the fact that it
only attempts (cache-friendly as sync(force=True)) to sync the
repository when last sync/autosync was run more than a day ago.
This is useful to do every day for the user typically do not remember to
run sync every day. Honestly, do you think you run 'sudo apt-get
update' every day? It is for this reason Ubuntu came with their auto
update thing (update-manager).
"""
utcnow = datetime.utcnow()
single_day = timedelta(days=1)
retvals = [] # return values of rrmanager.sync_repository
outdateds = [] # was the repository out-of-date, or local cache missing?
for repo in self.repository_list:
prevtime = self.rrmanager.get_remote_index_last_download_attempt_time(repo)
if prevtime is None or prevtime + single_day < utcnow:
outdateds.append(True)
retvals.append(
self.rrmanager.sync_repository(
repo, force=False, verbose=False))
else:
outdateds.append(False)
retvals.append(None)
if True in retvals:
LOG.info('autosync: synced %s repositories', retvals.count(True))
elif True in outdateds:
LOG.info('autosync: nothing new to sync')
else:
LOG.debug('autosync: nothing was outdated')
return retvals
def search(self, *keywords):
"""Search for ``keywords`` in all repositories"""
if not keywords:
q = self._query()
else:
q = self._query(
lambda C: and_(*[or_(C.name.contains(keyword),
C.summary.contains(keyword))
for keyword in keywords]))
for r in q:
yield r
def find_package(self, name, version=None):
"""Find a package"""
for pkg in self.find_package_releases(name):
if version:
if version == pkg.version:
return pkg
else:
return pkg
raise error.PackageNotFound(
'{0}=={1}'.format(name, version) if version else name)
def find_package_releases(self, name):
"""Find all available releases of a package
Return a sorted (by version) list of packages
"""
packages = list(self._query(lambda C: C.name == name.lower()))
packages.sort(key=attrgetter('version_key'), reverse=True)
return packages
def _query(self, *expr_list):
found = set()
# search every repository
for repo in self.repository_list:
db = self.rrmanager.get_index_db(repo)
with db.transaction() as session:
query = session.query(db.mapper_class)
for expr_fn in expr_list:
query = query.filter(expr_fn(db.mapper_class))
for pkg in query:
# return newly found items
if pkg.full_name not in found:
found.add(pkg.full_name)
# set download URL now
pkg.set_download_url(
url_join(repo.url, [pkg.relpath]))
yield pkg
class InstalledPackageStore:
"""Package store that contains all installed packages"""
def __init__(self, storepath):
self.storepath = storepath
self._db = InstalledPackageDatabase(self.storepath, touch=True)
def add_packages(self, packages):
with self._db.transaction() as session:
session.add_all(packages)
session.commit()
def remove_package(self, installed_package):
with self._db.transaction() as session:
session.delete(installed_package)
session.commit()
def find_all_packages(self):
"""Return all installed packages"""
return self._findby([])
def find_only_package(self, name):
"""Return the given installed package"""
packages = list(self._findby_with_name(name, []))
if not packages:
raise error.NoPackageInstalled(name, None)
assert len(packages) == 1, 'expecting 1 package, but got %d' % len(packages)
return packages[0]
def _findby(self, expression_list):
with self._db.transaction() as session:
query = session.query(self._db.mapper_class)
for expr in expression_list:
query = query.filter(expr)
return query
def _findby_with_name(self, name, expression_list):
escape_char = '\\'
# escape sqlite LIKE's special chars - _ and %
escaped_name = _simpledb.sqlalchemy_escape(name, escape_char, '%_')
search_expr = self._db.mapper_class.name.like(
escaped_name, escape=escape_char)
return self._findby([search_expr]+expression_list)
|
Itxaka/st2
|
refs/heads/master
|
st2client/st2client/formatters/doc.py
|
7
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from st2client import formatters
from st2client.utils import jsutil
LOG = logging.getLogger(__name__)
class Json(formatters.Formatter):
@classmethod
def format(self, subject, *args, **kwargs):
attributes = kwargs.get('attributes', None)
if type(subject) is str:
subject = json.loads(subject)
elif not isinstance(subject, (list, tuple)) and not hasattr(subject, '__iter__'):
doc = subject if isinstance(subject, dict) else subject.__dict__
keys = doc.keys() if not attributes or 'all' in attributes else attributes
docs = jsutil.get_kvps(doc, keys)
else:
docs = []
for item in subject:
doc = item if isinstance(item, dict) else item.__dict__
keys = doc.keys() if not attributes or 'all' in attributes else attributes
docs.append(jsutil.get_kvps(doc, keys))
return json.dumps(docs, indent=4, sort_keys=True)
|
svanschalkwyk/datafari
|
refs/heads/master
|
windows/python/Lib/test/test_coercion.py
|
122
|
import copy
import unittest
from test.test_support import run_unittest, TestFailed, check_warnings
# Fake a number that implements numeric methods through __coerce__
class CoerceNumber:
def __init__(self, arg):
self.arg = arg
def __repr__(self):
return '<CoerceNumber %s>' % repr(self.arg)
def __coerce__(self, other):
if isinstance(other, CoerceNumber):
return self.arg, other.arg
else:
return (self.arg, other)
# New-style class version of CoerceNumber
class CoerceTo(object):
def __init__(self, arg):
self.arg = arg
def __coerce__(self, other):
if isinstance(other, CoerceTo):
return self.arg, other.arg
else:
return self.arg, other
# Fake a number that implements numeric ops through methods.
class MethodNumber:
def __init__(self,arg):
self.arg = arg
def __repr__(self):
return '<MethodNumber %s>' % repr(self.arg)
def __add__(self,other):
return self.arg + other
def __radd__(self,other):
return other + self.arg
def __sub__(self,other):
return self.arg - other
def __rsub__(self,other):
return other - self.arg
def __mul__(self,other):
return self.arg * other
def __rmul__(self,other):
return other * self.arg
def __div__(self,other):
return self.arg / other
def __rdiv__(self,other):
return other / self.arg
def __truediv__(self,other):
return self.arg / other
def __rtruediv__(self,other):
return other / self.arg
def __floordiv__(self,other):
return self.arg // other
def __rfloordiv__(self,other):
return other // self.arg
def __pow__(self,other):
return self.arg ** other
def __rpow__(self,other):
return other ** self.arg
def __mod__(self,other):
return self.arg % other
def __rmod__(self,other):
return other % self.arg
def __cmp__(self, other):
return cmp(self.arg, other)
candidates = [2, 2L, 4.0, 2+0j, [1], (2,), None,
MethodNumber(2), CoerceNumber(2)]
infix_binops = [ '+', '-', '*', '**', '%', '//', '/' ]
TE = TypeError
# b = both normal and augmented give same result list
# s = single result lists for normal and augmented
# e = equals other results
# result lists: ['+', '-', '*', '**', '%', '//', ('classic /', 'new /')]
# ^^^^^^^^^^^^^^^^^^^^^^
# 2-tuple if results differ
# else only one value
infix_results = {
# 2
(0,0): ('b', [4, 0, 4, 4, 0, 1, (1, 1.0)]),
(0,1): ('e', (0,0)),
(0,2): ('b', [6.0, -2.0, 8.0, 16.0, 2.0, 0.0, 0.5]),
(0,3): ('b', [4+0j, 0+0j, 4+0j, 4+0j, 0+0j, 1+0j, 1+0j]),
(0,4): ('b', [TE, TE, [1, 1], TE, TE, TE, TE]),
(0,5): ('b', [TE, TE, (2, 2), TE, TE, TE, TE]),
(0,6): ('b', [TE, TE, TE, TE, TE, TE, TE]),
(0,7): ('e', (0,0)),
(0,8): ('e', (0,0)),
# 2L
(1,0): ('e', (0,0)),
(1,1): ('e', (0,1)),
(1,2): ('e', (0,2)),
(1,3): ('e', (0,3)),
(1,4): ('e', (0,4)),
(1,5): ('e', (0,5)),
(1,6): ('e', (0,6)),
(1,7): ('e', (0,7)),
(1,8): ('e', (0,8)),
# 4.0
(2,0): ('b', [6.0, 2.0, 8.0, 16.0, 0.0, 2.0, 2.0]),
(2,1): ('e', (2,0)),
(2,2): ('b', [8.0, 0.0, 16.0, 256.0, 0.0, 1.0, 1.0]),
(2,3): ('b', [6+0j, 2+0j, 8+0j, 16+0j, 0+0j, 2+0j, 2+0j]),
(2,4): ('b', [TE, TE, TE, TE, TE, TE, TE]),
(2,5): ('e', (2,4)),
(2,6): ('e', (2,4)),
(2,7): ('e', (2,0)),
(2,8): ('e', (2,0)),
# (2+0j)
(3,0): ('b', [4+0j, 0+0j, 4+0j, 4+0j, 0+0j, 1+0j, 1+0j]),
(3,1): ('e', (3,0)),
(3,2): ('b', [6+0j, -2+0j, 8+0j, 16+0j, 2+0j, 0+0j, 0.5+0j]),
(3,3): ('b', [4+0j, 0+0j, 4+0j, 4+0j, 0+0j, 1+0j, 1+0j]),
(3,4): ('b', [TE, TE, TE, TE, TE, TE, TE]),
(3,5): ('e', (3,4)),
(3,6): ('e', (3,4)),
(3,7): ('e', (3,0)),
(3,8): ('e', (3,0)),
# [1]
(4,0): ('b', [TE, TE, [1, 1], TE, TE, TE, TE]),
(4,1): ('e', (4,0)),
(4,2): ('b', [TE, TE, TE, TE, TE, TE, TE]),
(4,3): ('b', [TE, TE, TE, TE, TE, TE, TE]),
(4,4): ('b', [[1, 1], TE, TE, TE, TE, TE, TE]),
(4,5): ('s', [TE, TE, TE, TE, TE, TE, TE], [[1, 2], TE, TE, TE, TE, TE, TE]),
(4,6): ('b', [TE, TE, TE, TE, TE, TE, TE]),
(4,7): ('e', (4,0)),
(4,8): ('e', (4,0)),
# (2,)
(5,0): ('b', [TE, TE, (2, 2), TE, TE, TE, TE]),
(5,1): ('e', (5,0)),
(5,2): ('b', [TE, TE, TE, TE, TE, TE, TE]),
(5,3): ('e', (5,2)),
(5,4): ('e', (5,2)),
(5,5): ('b', [(2, 2), TE, TE, TE, TE, TE, TE]),
(5,6): ('b', [TE, TE, TE, TE, TE, TE, TE]),
(5,7): ('e', (5,0)),
(5,8): ('e', (5,0)),
# None
(6,0): ('b', [TE, TE, TE, TE, TE, TE, TE]),
(6,1): ('e', (6,0)),
(6,2): ('e', (6,0)),
(6,3): ('e', (6,0)),
(6,4): ('e', (6,0)),
(6,5): ('e', (6,0)),
(6,6): ('e', (6,0)),
(6,7): ('e', (6,0)),
(6,8): ('e', (6,0)),
# MethodNumber(2)
(7,0): ('e', (0,0)),
(7,1): ('e', (0,1)),
(7,2): ('e', (0,2)),
(7,3): ('e', (0,3)),
(7,4): ('e', (0,4)),
(7,5): ('e', (0,5)),
(7,6): ('e', (0,6)),
(7,7): ('e', (0,7)),
(7,8): ('e', (0,8)),
# CoerceNumber(2)
(8,0): ('e', (0,0)),
(8,1): ('e', (0,1)),
(8,2): ('e', (0,2)),
(8,3): ('e', (0,3)),
(8,4): ('e', (0,4)),
(8,5): ('e', (0,5)),
(8,6): ('e', (0,6)),
(8,7): ('e', (0,7)),
(8,8): ('e', (0,8)),
}
def process_infix_results():
for key in sorted(infix_results):
val = infix_results[key]
if val[0] == 'e':
infix_results[key] = infix_results[val[1]]
else:
if val[0] == 's':
res = (val[1], val[2])
elif val[0] == 'b':
res = (val[1], val[1])
for i in range(1):
if isinstance(res[i][6], tuple):
if 1/2 == 0:
# testing with classic (floor) division
res[i][6] = res[i][6][0]
else:
# testing with -Qnew
res[i][6] = res[i][6][1]
infix_results[key] = res
with check_warnings(("classic (int|long) division", DeprecationWarning),
quiet=True):
process_infix_results()
# now infix_results has two lists of results for every pairing.
prefix_binops = [ 'divmod' ]
prefix_results = [
[(1,0), (1L,0L), (0.0,2.0), ((1+0j),0j), TE, TE, TE, TE, (1,0)],
[(1L,0L), (1L,0L), (0.0,2.0), ((1+0j),0j), TE, TE, TE, TE, (1L,0L)],
[(2.0,0.0), (2.0,0.0), (1.0,0.0), ((2+0j),0j), TE, TE, TE, TE, (2.0,0.0)],
[((1+0j),0j), ((1+0j),0j), (0j,(2+0j)), ((1+0j),0j), TE, TE, TE, TE, ((1+0j),0j)],
[TE, TE, TE, TE, TE, TE, TE, TE, TE],
[TE, TE, TE, TE, TE, TE, TE, TE, TE],
[TE, TE, TE, TE, TE, TE, TE, TE, TE],
[TE, TE, TE, TE, TE, TE, TE, TE, TE],
[(1,0), (1L,0L), (0.0,2.0), ((1+0j),0j), TE, TE, TE, TE, (1,0)]
]
def format_float(value):
if abs(value) < 0.01:
return '0.0'
else:
return '%.1f' % value
# avoid testing platform fp quirks
def format_result(value):
if isinstance(value, complex):
return '(%s + %sj)' % (format_float(value.real),
format_float(value.imag))
elif isinstance(value, float):
return format_float(value)
return str(value)
class CoercionTest(unittest.TestCase):
def test_infix_binops(self):
for ia, a in enumerate(candidates):
for ib, b in enumerate(candidates):
results = infix_results[(ia, ib)]
for op, res, ires in zip(infix_binops, results[0], results[1]):
if res is TE:
self.assertRaises(TypeError, eval,
'a %s b' % op, {'a': a, 'b': b})
else:
self.assertEqual(format_result(res),
format_result(eval('a %s b' % op)),
'%s %s %s == %s failed' % (a, op, b, res))
try:
z = copy.copy(a)
except copy.Error:
z = a # assume it has no inplace ops
if ires is TE:
try:
exec 'z %s= b' % op
except TypeError:
pass
else:
self.fail("TypeError not raised")
else:
exec('z %s= b' % op)
self.assertEqual(ires, z)
def test_prefix_binops(self):
for ia, a in enumerate(candidates):
for ib, b in enumerate(candidates):
for op in prefix_binops:
res = prefix_results[ia][ib]
if res is TE:
self.assertRaises(TypeError, eval,
'%s(a, b)' % op, {'a': a, 'b': b})
else:
self.assertEqual(format_result(res),
format_result(eval('%s(a, b)' % op)),
'%s(%s, %s) == %s failed' % (op, a, b, res))
def test_cmptypes(self):
# Built-in tp_compare slots expect their arguments to have the
# same type, but a user-defined __coerce__ doesn't have to obey.
# SF #980352
evil_coercer = CoerceTo(42)
# Make sure these don't crash any more
self.assertNotEqual(cmp(u'fish', evil_coercer), 0)
self.assertNotEqual(cmp(slice(1), evil_coercer), 0)
# ...but that this still works
class WackyComparer(object):
def __cmp__(slf, other):
self.assertTrue(other == 42, 'expected evil_coercer, got %r' % other)
return 0
__hash__ = None # Invalid cmp makes this unhashable
self.assertEqual(cmp(WackyComparer(), evil_coercer), 0)
# ...and classic classes too, since that code path is a little different
class ClassicWackyComparer:
def __cmp__(slf, other):
self.assertTrue(other == 42, 'expected evil_coercer, got %r' % other)
return 0
self.assertEqual(cmp(ClassicWackyComparer(), evil_coercer), 0)
def test_infinite_rec_classic_classes(self):
# if __coerce__() returns its arguments reversed it causes an infinite
# recursion for classic classes.
class Tester:
def __coerce__(self, other):
return other, self
exc = TestFailed("__coerce__() returning its arguments reverse "
"should raise RuntimeError")
try:
Tester() + 1
except (RuntimeError, TypeError):
return
except:
raise exc
else:
raise exc
def test_main():
with check_warnings(("complex divmod.., // and % are deprecated",
DeprecationWarning),
("classic (int|long) division", DeprecationWarning),
quiet=True):
run_unittest(CoercionTest)
if __name__ == "__main__":
test_main()
|
unnikrishnankgs/va
|
refs/heads/master
|
venv/lib/python3.5/site-packages/tensorflow/python/ops/gen_sparse_ops.py
|
3
|
"""Python wrappers around TensorFlow ops.
This file is MACHINE GENERATED! Do not edit.
"""
import collections as _collections
from google.protobuf import text_format as _text_format
from tensorflow.core.framework import op_def_pb2 as _op_def_pb2
# Needed to trigger the call to _set_call_cpp_shape_fn.
from tensorflow.python.framework import common_shapes as _common_shapes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
def _add_many_sparse_to_tensors_map(sparse_indices, sparse_values,
sparse_shape, container=None,
shared_name=None, name=None):
r"""Add an `N`-minibatch `SparseTensor` to a `SparseTensorsMap`, return `N` handles.
A `SparseTensor` of rank `R` is represented by three tensors: `sparse_indices`,
`sparse_values`, and `sparse_shape`, where
```sparse_indices.shape[1] == sparse_shape.shape[0] == R```
An `N`-minibatch of `SparseTensor` objects is represented as a `SparseTensor`
having a first `sparse_indices` column taking values between `[0, N)`, where
the minibatch size `N == sparse_shape[0]`.
The input `SparseTensor` must have rank `R` greater than 1, and the first
dimension is treated as the minibatch dimension. Elements of the `SparseTensor`
must be sorted in increasing order of this first dimension. The stored
`SparseTensor` objects pointed to by each row of the output `sparse_handles`
will have rank `R-1`.
The `SparseTensor` values can then be read out as part of a minibatch by passing
the given keys as vector elements to `TakeManySparseFromTensorsMap`. To ensure
the correct `SparseTensorsMap` is accessed, ensure that the same
`container` and `shared_name` are passed to that Op. If no `shared_name`
is provided here, instead use the *name* of the Operation created by calling
`AddManySparseToTensorsMap` as the `shared_name` passed to
`TakeManySparseFromTensorsMap`. Ensure the Operations are colocated.
Args:
sparse_indices: A `Tensor` of type `int64`.
2-D. The `indices` of the minibatch `SparseTensor`.
`sparse_indices[:, 0]` must be ordered values in `[0, N)`.
sparse_values: A `Tensor`.
1-D. The `values` of the minibatch `SparseTensor`.
sparse_shape: A `Tensor` of type `int64`.
1-D. The `shape` of the minibatch `SparseTensor`.
The minibatch size `N == sparse_shape[0]`.
container: An optional `string`. Defaults to `""`.
The container name for the `SparseTensorsMap` created by this op.
shared_name: An optional `string`. Defaults to `""`.
The shared name for the `SparseTensorsMap` created by this op.
If blank, the new Operation's unique name is used.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int64`.
1-D. The handles of the `SparseTensor` now stored in the
`SparseTensorsMap`. Shape: `[N]`.
"""
result = _op_def_lib.apply_op("AddManySparseToTensorsMap",
sparse_indices=sparse_indices,
sparse_values=sparse_values,
sparse_shape=sparse_shape,
container=container, shared_name=shared_name,
name=name)
return result
def _add_sparse_to_tensors_map(sparse_indices, sparse_values, sparse_shape,
container=None, shared_name=None, name=None):
r"""Add a `SparseTensor` to a `SparseTensorsMap` return its handle.
A `SparseTensor` is represented by three tensors: `sparse_indices`,
`sparse_values`, and `sparse_shape`.
This operator takes the given `SparseTensor` and adds it to a container
object (a `SparseTensorsMap`). A unique key within this container is generated
in the form of an `int64`, and this is the value that is returned.
The `SparseTensor` can then be read out as part of a minibatch by passing
the key as a vector element to `TakeManySparseFromTensorsMap`. To ensure
the correct `SparseTensorsMap` is accessed, ensure that the same
`container` and `shared_name` are passed to that Op. If no `shared_name`
is provided here, instead use the *name* of the Operation created by calling
`AddSparseToTensorsMap` as the `shared_name` passed to
`TakeManySparseFromTensorsMap`. Ensure the Operations are colocated.
Args:
sparse_indices: A `Tensor` of type `int64`.
2-D. The `indices` of the `SparseTensor`.
sparse_values: A `Tensor`. 1-D. The `values` of the `SparseTensor`.
sparse_shape: A `Tensor` of type `int64`.
1-D. The `shape` of the `SparseTensor`.
container: An optional `string`. Defaults to `""`.
The container name for the `SparseTensorsMap` created by this op.
shared_name: An optional `string`. Defaults to `""`.
The shared name for the `SparseTensorsMap` created by this op.
If blank, the new Operation's unique name is used.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int64`.
0-D. The handle of the `SparseTensor` now stored in the
`SparseTensorsMap`.
"""
result = _op_def_lib.apply_op("AddSparseToTensorsMap",
sparse_indices=sparse_indices,
sparse_values=sparse_values,
sparse_shape=sparse_shape,
container=container, shared_name=shared_name,
name=name)
return result
__deserialize_many_sparse_outputs = ["sparse_indices", "sparse_values",
"sparse_shape"]
_DeserializeManySparseOutput = _collections.namedtuple(
"DeserializeManySparse", __deserialize_many_sparse_outputs)
def _deserialize_many_sparse(serialized_sparse, dtype, name=None):
r"""Deserialize and concatenate `SparseTensors` from a serialized minibatch.
The input `serialized_sparse` must be a string matrix of shape `[N x 3]` where
`N` is the minibatch size and the rows correspond to packed outputs of
`SerializeSparse`. The ranks of the original `SparseTensor` objects
must all match. When the final `SparseTensor` is created, it has rank one
higher than the ranks of the incoming `SparseTensor` objects
(they have been concatenated along a new row dimension).
The output `SparseTensor` object's shape values for all dimensions but the
first are the max across the input `SparseTensor` objects' shape values
for the corresponding dimensions. Its first shape value is `N`, the minibatch
size.
The input `SparseTensor` objects' indices are assumed ordered in
standard lexicographic order. If this is not the case, after this
step run `SparseReorder` to restore index ordering.
For example, if the serialized input is a `[2 x 3]` matrix representing two
original `SparseTensor` objects:
index = [ 0]
[10]
[20]
values = [1, 2, 3]
shape = [50]
and
index = [ 2]
[10]
values = [4, 5]
shape = [30]
then the final deserialized `SparseTensor` will be:
index = [0 0]
[0 10]
[0 20]
[1 2]
[1 10]
values = [1, 2, 3, 4, 5]
shape = [2 50]
Args:
serialized_sparse: A `Tensor` of type `string`.
2-D, The `N` serialized `SparseTensor` objects.
Must have 3 columns.
dtype: A `tf.DType`. The `dtype` of the serialized `SparseTensor` objects.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (sparse_indices, sparse_values, sparse_shape).
sparse_indices: A `Tensor` of type `int64`.
sparse_values: A `Tensor` of type `dtype`.
sparse_shape: A `Tensor` of type `int64`.
"""
result = _op_def_lib.apply_op("DeserializeManySparse",
serialized_sparse=serialized_sparse,
dtype=dtype, name=name)
return _DeserializeManySparseOutput._make(result)
def _serialize_many_sparse(sparse_indices, sparse_values, sparse_shape,
name=None):
r"""Serialize an `N`-minibatch `SparseTensor` into an `[N, 3]` string `Tensor`.
The `SparseTensor` must have rank `R` greater than 1, and the first dimension
is treated as the minibatch dimension. Elements of the `SparseTensor`
must be sorted in increasing order of this first dimension. The serialized
`SparseTensor` objects going into each row of `serialized_sparse` will have
rank `R-1`.
The minibatch size `N` is extracted from `sparse_shape[0]`.
Args:
sparse_indices: A `Tensor` of type `int64`.
2-D. The `indices` of the minibatch `SparseTensor`.
sparse_values: A `Tensor`.
1-D. The `values` of the minibatch `SparseTensor`.
sparse_shape: A `Tensor` of type `int64`.
1-D. The `shape` of the minibatch `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `string`.
"""
result = _op_def_lib.apply_op("SerializeManySparse",
sparse_indices=sparse_indices,
sparse_values=sparse_values,
sparse_shape=sparse_shape, name=name)
return result
def _serialize_sparse(sparse_indices, sparse_values, sparse_shape, name=None):
r"""Serialize a `SparseTensor` into a string 3-vector (1-D `Tensor`) object.
Args:
sparse_indices: A `Tensor` of type `int64`.
2-D. The `indices` of the `SparseTensor`.
sparse_values: A `Tensor`. 1-D. The `values` of the `SparseTensor`.
sparse_shape: A `Tensor` of type `int64`.
1-D. The `shape` of the `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `string`.
"""
result = _op_def_lib.apply_op("SerializeSparse",
sparse_indices=sparse_indices,
sparse_values=sparse_values,
sparse_shape=sparse_shape, name=name)
return result
__sparse_add_outputs = ["sum_indices", "sum_values", "sum_shape"]
_SparseAddOutput = _collections.namedtuple(
"SparseAdd", __sparse_add_outputs)
def _sparse_add(a_indices, a_values, a_shape, b_indices, b_values, b_shape,
thresh, name=None):
r"""Adds two `SparseTensor` objects to produce another `SparseTensor`.
The input `SparseTensor` objects' indices are assumed ordered in standard
lexicographic order. If this is not the case, before this step run
`SparseReorder` to restore index ordering.
By default, if two values sum to zero at some index, the output `SparseTensor`
would still include that particular location in its index, storing a zero in the
corresponding value slot. To override this, callers can specify `thresh`,
indicating that if the sum has a magnitude strictly smaller than `thresh`, its
corresponding value and index would then not be included. In particular,
`thresh == 0` (default) means everything is kept and actual thresholding happens
only for a positive value.
In the following shapes, `nnz` is the count after taking `thresh` into account.
Args:
a_indices: A `Tensor` of type `int64`.
2-D. The `indices` of the first `SparseTensor`, size `[nnz, ndims]` Matrix.
a_values: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
1-D. The `values` of the first `SparseTensor`, size `[nnz]` Vector.
a_shape: A `Tensor` of type `int64`.
1-D. The `shape` of the first `SparseTensor`, size `[ndims]` Vector.
b_indices: A `Tensor` of type `int64`.
2-D. The `indices` of the second `SparseTensor`, size `[nnz, ndims]` Matrix.
b_values: A `Tensor`. Must have the same type as `a_values`.
1-D. The `values` of the second `SparseTensor`, size `[nnz]` Vector.
b_shape: A `Tensor` of type `int64`.
1-D. The `shape` of the second `SparseTensor`, size `[ndims]` Vector.
thresh: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `int64`, `uint8`, `int16`, `int8`, `uint16`, `half`.
0-D. The magnitude threshold that determines if an output value/index
pair takes space.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (sum_indices, sum_values, sum_shape).
sum_indices: A `Tensor` of type `int64`.
sum_values: A `Tensor`. Has the same type as `a_values`.
sum_shape: A `Tensor` of type `int64`.
"""
result = _op_def_lib.apply_op("SparseAdd", a_indices=a_indices,
a_values=a_values, a_shape=a_shape,
b_indices=b_indices, b_values=b_values,
b_shape=b_shape, thresh=thresh, name=name)
return _SparseAddOutput._make(result)
__sparse_add_grad_outputs = ["a_val_grad", "b_val_grad"]
_SparseAddGradOutput = _collections.namedtuple(
"SparseAddGrad", __sparse_add_grad_outputs)
def _sparse_add_grad(backprop_val_grad, a_indices, b_indices, sum_indices,
name=None):
r"""The gradient operator for the SparseAdd op.
The SparseAdd op calculates A + B, where A, B, and the sum are all represented
as `SparseTensor` objects. This op takes in the upstream gradient w.r.t.
non-empty values of the sum, and outputs the gradients w.r.t. the non-empty
values of A and B.
Args:
backprop_val_grad: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
1-D with shape `[nnz(sum)]`. The gradient with respect to
the non-empty values of the sum.
a_indices: A `Tensor` of type `int64`.
2-D. The `indices` of the `SparseTensor` A, size `[nnz(A), ndims]`.
b_indices: A `Tensor` of type `int64`.
2-D. The `indices` of the `SparseTensor` B, size `[nnz(B), ndims]`.
sum_indices: A `Tensor` of type `int64`.
2-D. The `indices` of the sum `SparseTensor`, size
`[nnz(sum), ndims]`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (a_val_grad, b_val_grad).
a_val_grad: A `Tensor`. Has the same type as `backprop_val_grad`. 1-D with shape `[nnz(A)]`. The gradient with respect to the
non-empty values of A.
b_val_grad: A `Tensor`. Has the same type as `backprop_val_grad`. 1-D with shape `[nnz(B)]`. The gradient with respect to the
non-empty values of B.
"""
result = _op_def_lib.apply_op("SparseAddGrad",
backprop_val_grad=backprop_val_grad,
a_indices=a_indices, b_indices=b_indices,
sum_indices=sum_indices, name=name)
return _SparseAddGradOutput._make(result)
__sparse_concat_outputs = ["output_indices", "output_values", "output_shape"]
_SparseConcatOutput = _collections.namedtuple(
"SparseConcat", __sparse_concat_outputs)
def _sparse_concat(indices, values, shapes, concat_dim, name=None):
r"""Concatenates a list of `SparseTensor` along the specified dimension.
Concatenation is with respect to the dense versions of these sparse tensors.
It is assumed that each input is a `SparseTensor` whose elements are ordered
along increasing dimension number.
All inputs' shapes must match, except for the concat dimension. The
`indices`, `values`, and `shapes` lists must have the same length.
The output shape is identical to the inputs', except along the concat
dimension, where it is the sum of the inputs' sizes along that dimension.
The output elements will be resorted to preserve the sort order along
increasing dimension number.
This op runs in `O(M log M)` time, where `M` is the total number of non-empty
values across all inputs. This is due to the need for an internal sort in
order to concatenate efficiently across an arbitrary dimension.
For example, if `concat_dim = 1` and the inputs are
sp_inputs[0]: shape = [2, 3]
[0, 2]: "a"
[1, 0]: "b"
[1, 1]: "c"
sp_inputs[1]: shape = [2, 4]
[0, 1]: "d"
[0, 2]: "e"
then the output will be
shape = [2, 7]
[0, 2]: "a"
[0, 4]: "d"
[0, 5]: "e"
[1, 0]: "b"
[1, 1]: "c"
Graphically this is equivalent to doing
[ a] concat [ d e ] = [ a d e ]
[b c ] [ ] [b c ]
Args:
indices: A list of at least 2 `Tensor` objects with type `int64`.
2-D. Indices of each input `SparseTensor`.
values: A list with the same length as `indices` of `Tensor` objects with the same type.
1-D. Non-empty values of each `SparseTensor`.
shapes: A list with the same length as `indices` of `Tensor` objects with type `int64`.
1-D. Shapes of each `SparseTensor`.
concat_dim: An `int`.
Dimension to concatenate along. Must be in range [-rank, rank),
where rank is the number of dimensions in each input `SparseTensor`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output_indices, output_values, output_shape).
output_indices: A `Tensor` of type `int64`. 2-D. Indices of the concatenated `SparseTensor`.
output_values: A `Tensor`. Has the same type as `values`. 1-D. Non-empty values of the concatenated `SparseTensor`.
output_shape: A `Tensor` of type `int64`. 1-D. Shape of the concatenated `SparseTensor`.
"""
result = _op_def_lib.apply_op("SparseConcat", indices=indices,
values=values, shapes=shapes,
concat_dim=concat_dim, name=name)
return _SparseConcatOutput._make(result)
__sparse_cross_outputs = ["output_indices", "output_values", "output_shape"]
_SparseCrossOutput = _collections.namedtuple(
"SparseCross", __sparse_cross_outputs)
def _sparse_cross(indices, values, shapes, dense_inputs, hashed_output,
num_buckets, hash_key, out_type, internal_type, name=None):
r"""Generates sparse cross from a list of sparse and dense tensors.
The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each
representing features of one feature column. It outputs a 2D `SparseTensor` with
the batchwise crosses of these features.
For example, if the inputs are
inputs[0]: SparseTensor with shape = [2, 2]
[0, 0]: "a"
[1, 0]: "b"
[1, 1]: "c"
inputs[1]: SparseTensor with shape = [2, 1]
[0, 0]: "d"
[1, 0]: "e"
inputs[2]: Tensor [["f"], ["g"]]
then the output will be
shape = [2, 2]
[0, 0]: "a_X_d_X_f"
[1, 0]: "b_X_e_X_g"
[1, 1]: "c_X_e_X_g"
if hashed_output=true then the output will be
shape = [2, 2]
[0, 0]: FingerprintCat64(
Fingerprint64("f"), FingerprintCat64(
Fingerprint64("d"), Fingerprint64("a")))
[1, 0]: FingerprintCat64(
Fingerprint64("g"), FingerprintCat64(
Fingerprint64("e"), Fingerprint64("b")))
[1, 1]: FingerprintCat64(
Fingerprint64("g"), FingerprintCat64(
Fingerprint64("e"), Fingerprint64("c")))
Args:
indices: A list of `Tensor` objects with type `int64`.
2-D. Indices of each input `SparseTensor`.
values: A list of `Tensor` objects with types from: `int64`, `string`.
1-D. values of each `SparseTensor`.
shapes: A list with the same length as `indices` of `Tensor` objects with type `int64`.
1-D. Shapes of each `SparseTensor`.
dense_inputs: A list of `Tensor` objects with types from: `int64`, `string`.
2-D. Columns represented by dense `Tensor`.
hashed_output: A `bool`.
If true, returns the hash of the cross instead of the string.
This will allow us avoiding string manipulations.
num_buckets: An `int` that is `>= 0`. It is used if hashed_output is true.
output = hashed_value%num_buckets if num_buckets > 0 else hashed_value.
hash_key: An `int`.
Specify the hash_key that will be used by the `FingerprintCat64`
function to combine the crosses fingerprints.
out_type: A `tf.DType` from: `tf.int64, tf.string`.
internal_type: A `tf.DType` from: `tf.int64, tf.string`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output_indices, output_values, output_shape).
output_indices: A `Tensor` of type `int64`. 2-D. Indices of the concatenated `SparseTensor`.
output_values: A `Tensor` of type `out_type`. 1-D. Non-empty values of the concatenated or hashed
`SparseTensor`.
output_shape: A `Tensor` of type `int64`. 1-D. Shape of the concatenated `SparseTensor`.
"""
result = _op_def_lib.apply_op("SparseCross", indices=indices, values=values,
shapes=shapes, dense_inputs=dense_inputs,
hashed_output=hashed_output,
num_buckets=num_buckets, hash_key=hash_key,
out_type=out_type,
internal_type=internal_type, name=name)
return _SparseCrossOutput._make(result)
def sparse_dense_cwise_add(sp_indices, sp_values, sp_shape, dense, name=None):
r"""Adds up a SparseTensor and a dense Tensor, using these special rules:
(1) Broadcasts the dense side to have the same shape as the sparse side, if
eligible;
(2) Then, only the dense values pointed to by the indices of the SparseTensor
participate in the cwise addition.
By these rules, the result is a logical SparseTensor with exactly the same
indices and shape, but possibly with different non-zero values. The output of
this Op is the resultant non-zero values.
Args:
sp_indices: A `Tensor` of type `int64`.
2-D. `N x R` matrix with the indices of non-empty values in a
SparseTensor, possibly not in canonical ordering.
sp_values: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
1-D. `N` non-empty values corresponding to `sp_indices`.
sp_shape: A `Tensor` of type `int64`.
1-D. Shape of the input SparseTensor.
dense: A `Tensor`. Must have the same type as `sp_values`.
`R`-D. The dense Tensor operand.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `sp_values`.
1-D. The `N` values that are operated on.
"""
result = _op_def_lib.apply_op("SparseDenseCwiseAdd", sp_indices=sp_indices,
sp_values=sp_values, sp_shape=sp_shape,
dense=dense, name=name)
return result
def sparse_dense_cwise_div(sp_indices, sp_values, sp_shape, dense, name=None):
r"""Component-wise divides a SparseTensor by a dense Tensor.
*Limitation*: this Op only broadcasts the dense side to the sparse side, but not
the other direction.
Args:
sp_indices: A `Tensor` of type `int64`.
2-D. `N x R` matrix with the indices of non-empty values in a
SparseTensor, possibly not in canonical ordering.
sp_values: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
1-D. `N` non-empty values corresponding to `sp_indices`.
sp_shape: A `Tensor` of type `int64`.
1-D. Shape of the input SparseTensor.
dense: A `Tensor`. Must have the same type as `sp_values`.
`R`-D. The dense Tensor operand.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `sp_values`.
1-D. The `N` values that are operated on.
"""
result = _op_def_lib.apply_op("SparseDenseCwiseDiv", sp_indices=sp_indices,
sp_values=sp_values, sp_shape=sp_shape,
dense=dense, name=name)
return result
def sparse_dense_cwise_mul(sp_indices, sp_values, sp_shape, dense, name=None):
r"""Component-wise multiplies a SparseTensor by a dense Tensor.
The output locations corresponding to the implicitly zero elements in the sparse
tensor will be zero (i.e., will not take up storage space), regardless of the
contents of the dense tensor (even if it's +/-INF and that INF*0 == NaN).
*Limitation*: this Op only broadcasts the dense side to the sparse side, but not
the other direction.
Args:
sp_indices: A `Tensor` of type `int64`.
2-D. `N x R` matrix with the indices of non-empty values in a
SparseTensor, possibly not in canonical ordering.
sp_values: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
1-D. `N` non-empty values corresponding to `sp_indices`.
sp_shape: A `Tensor` of type `int64`.
1-D. Shape of the input SparseTensor.
dense: A `Tensor`. Must have the same type as `sp_values`.
`R`-D. The dense Tensor operand.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `sp_values`.
1-D. The `N` values that are operated on.
"""
result = _op_def_lib.apply_op("SparseDenseCwiseMul", sp_indices=sp_indices,
sp_values=sp_values, sp_shape=sp_shape,
dense=dense, name=name)
return result
def sparse_reduce_sum(input_indices, input_values, input_shape,
reduction_axes, keep_dims=None, name=None):
r"""Computes the sum of elements across dimensions of a SparseTensor.
This Op takes a SparseTensor and is the sparse counterpart to
`tf.reduce_sum()`. In particular, this Op also returns a dense `Tensor`
instead of a sparse one.
Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless
`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
`reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
with length 1.
If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
with a single element is returned. Additionally, the axes can be negative,
which are interpreted according to the indexing rules in Python.
Args:
input_indices: A `Tensor` of type `int64`.
2-D. `N x R` matrix with the indices of non-empty values in a
SparseTensor, possibly not in canonical ordering.
input_values: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
1-D. `N` non-empty values corresponding to `input_indices`.
input_shape: A `Tensor` of type `int64`.
1-D. Shape of the input SparseTensor.
reduction_axes: A `Tensor` of type `int32`.
1-D. Length-`K` vector containing the reduction axes.
keep_dims: An optional `bool`. Defaults to `False`.
If true, retain reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input_values`.
`R-K`-D. The reduced Tensor.
"""
result = _op_def_lib.apply_op("SparseReduceSum",
input_indices=input_indices,
input_values=input_values,
input_shape=input_shape,
reduction_axes=reduction_axes,
keep_dims=keep_dims, name=name)
return result
_sparse_reduce_sum_sparse_outputs = ["output_indices", "output_values",
"output_shape"]
_SparseReduceSumSparseOutput = _collections.namedtuple(
"SparseReduceSumSparse", _sparse_reduce_sum_sparse_outputs)
def sparse_reduce_sum_sparse(input_indices, input_values, input_shape,
reduction_axes, keep_dims=None, name=None):
r"""Computes the sum of elements across dimensions of a SparseTensor.
This Op takes a SparseTensor and is the sparse counterpart to
`tf.reduce_sum()`. In contrast to SparseReduceSum, this Op returns a
SparseTensor.
Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless
`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
`reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
with length 1.
If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
with a single element is returned. Additionally, the axes can be negative,
which are interpreted according to the indexing rules in Python.
Args:
input_indices: A `Tensor` of type `int64`.
2-D. `N x R` matrix with the indices of non-empty values in a
SparseTensor, possibly not in canonical ordering.
input_values: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
1-D. `N` non-empty values corresponding to `input_indices`.
input_shape: A `Tensor` of type `int64`.
1-D. Shape of the input SparseTensor.
reduction_axes: A `Tensor` of type `int32`.
1-D. Length-`K` vector containing the reduction axes.
keep_dims: An optional `bool`. Defaults to `False`.
If true, retain reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output_indices, output_values, output_shape).
output_indices: A `Tensor` of type `int64`.
output_values: A `Tensor`. Has the same type as `input_values`.
output_shape: A `Tensor` of type `int64`.
"""
result = _op_def_lib.apply_op("SparseReduceSumSparse",
input_indices=input_indices,
input_values=input_values,
input_shape=input_shape,
reduction_axes=reduction_axes,
keep_dims=keep_dims, name=name)
return _SparseReduceSumSparseOutput._make(result)
__sparse_reorder_outputs = ["output_indices", "output_values"]
_SparseReorderOutput = _collections.namedtuple(
"SparseReorder", __sparse_reorder_outputs)
def _sparse_reorder(input_indices, input_values, input_shape, name=None):
r"""Reorders a SparseTensor into the canonical, row-major ordering.
Note that by convention, all sparse ops preserve the canonical ordering along
increasing dimension number. The only time ordering can be violated is during
manual manipulation of the indices and values vectors to add entries.
Reordering does not affect the shape of the SparseTensor.
If the tensor has rank `R` and `N` non-empty values, `input_indices` has
shape `[N, R]`, input_values has length `N`, and input_shape has length `R`.
Args:
input_indices: A `Tensor` of type `int64`.
2-D. `N x R` matrix with the indices of non-empty values in a
SparseTensor, possibly not in canonical ordering.
input_values: A `Tensor`.
1-D. `N` non-empty values corresponding to `input_indices`.
input_shape: A `Tensor` of type `int64`.
1-D. Shape of the input SparseTensor.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output_indices, output_values).
output_indices: A `Tensor` of type `int64`. 2-D. `N x R` matrix with the same indices as input_indices, but
in canonical row-major ordering.
output_values: A `Tensor`. Has the same type as `input_values`. 1-D. `N` non-empty values corresponding to `output_indices`.
"""
result = _op_def_lib.apply_op("SparseReorder", input_indices=input_indices,
input_values=input_values,
input_shape=input_shape, name=name)
return _SparseReorderOutput._make(result)
__sparse_reshape_outputs = ["output_indices", "output_shape"]
_SparseReshapeOutput = _collections.namedtuple(
"SparseReshape", __sparse_reshape_outputs)
def _sparse_reshape(input_indices, input_shape, new_shape, name=None):
r"""Reshapes a SparseTensor to represent values in a new dense shape.
This operation has the same semantics as reshape on the represented dense
tensor. The `input_indices` are recomputed based on the requested `new_shape`.
If one component of `new_shape` is the special value -1, the size of that
dimension is computed so that the total dense size remains constant. At
most one component of `new_shape` can be -1. The number of dense elements
implied by `new_shape` must be the same as the number of dense elements
originally implied by `input_shape`.
Reshaping does not affect the order of values in the SparseTensor.
If the input tensor has rank `R_in` and `N` non-empty values, and `new_shape`
has length `R_out`, then `input_indices` has shape `[N, R_in]`,
`input_shape` has length `R_in`, `output_indices` has shape `[N, R_out]`, and
`output_shape` has length `R_out`.
Args:
input_indices: A `Tensor` of type `int64`.
2-D. `N x R_in` matrix with the indices of non-empty values in a
SparseTensor.
input_shape: A `Tensor` of type `int64`.
1-D. `R_in` vector with the input SparseTensor's dense shape.
new_shape: A `Tensor` of type `int64`.
1-D. `R_out` vector with the requested new dense shape.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output_indices, output_shape).
output_indices: A `Tensor` of type `int64`. 2-D. `N x R_out` matrix with the updated indices of non-empty
values in the output SparseTensor.
output_shape: A `Tensor` of type `int64`. 1-D. `R_out` vector with the full dense shape of the output
SparseTensor. This is the same as `new_shape` but with any -1 dimensions
filled in.
"""
result = _op_def_lib.apply_op("SparseReshape", input_indices=input_indices,
input_shape=input_shape, new_shape=new_shape,
name=name)
return _SparseReshapeOutput._make(result)
def sparse_softmax(sp_indices, sp_values, sp_shape, name=None):
r"""Applies softmax to a batched N-D `SparseTensor`.
The inputs represent an N-D SparseTensor with logical shape `[..., B, C]`
(where `N >= 2`), and with indices sorted in the canonical lexicographic order.
This op is equivalent to applying the normal `tf.nn.softmax()` to each innermost
logical submatrix with shape `[B, C]`, but with the catch that *the implicitly
zero elements do not participate*. Specifically, the algorithm is equivalent
to the following:
(1) Applies `tf.nn.softmax()` to a densified view of each innermost submatrix
with shape `[B, C]`, along the size-C dimension;
(2) Masks out the original implicitly-zero locations;
(3) Renormalizes the remaining elements.
Hence, the `SparseTensor` result has exactly the same non-zero indices and
shape.
Args:
sp_indices: A `Tensor` of type `int64`.
2-D. `NNZ x R` matrix with the indices of non-empty values in a
SparseTensor, in canonical ordering.
sp_values: A `Tensor`. Must be one of the following types: `float32`, `float64`.
1-D. `NNZ` non-empty values corresponding to `sp_indices`.
sp_shape: A `Tensor` of type `int64`.
1-D. Shape of the input SparseTensor.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `sp_values`.
1-D. The `NNZ` values for the result `SparseTensor`.
"""
result = _op_def_lib.apply_op("SparseSoftmax", sp_indices=sp_indices,
sp_values=sp_values, sp_shape=sp_shape,
name=name)
return result
_sparse_sparse_maximum_outputs = ["output_indices", "output_values"]
_SparseSparseMaximumOutput = _collections.namedtuple(
"SparseSparseMaximum", _sparse_sparse_maximum_outputs)
def sparse_sparse_maximum(a_indices, a_values, a_shape, b_indices, b_values,
b_shape, name=None):
r"""Returns the element-wise max of two SparseTensors.
Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
Args:
a_indices: A `Tensor` of type `int64`.
2-D. `N x R` matrix with the indices of non-empty values in a
SparseTensor, in the canonical lexicographic ordering.
a_values: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `int64`, `uint8`, `int16`, `int8`, `uint16`, `half`.
1-D. `N` non-empty values corresponding to `a_indices`.
a_shape: A `Tensor` of type `int64`.
1-D. Shape of the input SparseTensor.
b_indices: A `Tensor` of type `int64`.
counterpart to `a_indices` for the other operand.
b_values: A `Tensor`. Must have the same type as `a_values`.
counterpart to `a_values` for the other operand; must be of the same dtype.
b_shape: A `Tensor` of type `int64`.
counterpart to `a_shape` for the other operand; the two shapes must be equal.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output_indices, output_values).
output_indices: A `Tensor` of type `int64`. 2-D. The indices of the output SparseTensor.
output_values: A `Tensor`. Has the same type as `a_values`. 1-D. The values of the output SparseTensor.
"""
result = _op_def_lib.apply_op("SparseSparseMaximum", a_indices=a_indices,
a_values=a_values, a_shape=a_shape,
b_indices=b_indices, b_values=b_values,
b_shape=b_shape, name=name)
return _SparseSparseMaximumOutput._make(result)
_sparse_sparse_minimum_outputs = ["output_indices", "output_values"]
_SparseSparseMinimumOutput = _collections.namedtuple(
"SparseSparseMinimum", _sparse_sparse_minimum_outputs)
def sparse_sparse_minimum(a_indices, a_values, a_shape, b_indices, b_values,
b_shape, name=None):
r"""Returns the element-wise min of two SparseTensors.
Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
Args:
a_indices: A `Tensor` of type `int64`.
2-D. `N x R` matrix with the indices of non-empty values in a
SparseTensor, in the canonical lexicographic ordering.
a_values: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
1-D. `N` non-empty values corresponding to `a_indices`.
a_shape: A `Tensor` of type `int64`.
1-D. Shape of the input SparseTensor.
b_indices: A `Tensor` of type `int64`.
counterpart to `a_indices` for the other operand.
b_values: A `Tensor`. Must have the same type as `a_values`.
counterpart to `a_values` for the other operand; must be of the same dtype.
b_shape: A `Tensor` of type `int64`.
counterpart to `a_shape` for the other operand; the two shapes must be equal.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output_indices, output_values).
output_indices: A `Tensor` of type `int64`. 2-D. The indices of the output SparseTensor.
output_values: A `Tensor`. Has the same type as `a_values`. 1-D. The values of the output SparseTensor.
"""
result = _op_def_lib.apply_op("SparseSparseMinimum", a_indices=a_indices,
a_values=a_values, a_shape=a_shape,
b_indices=b_indices, b_values=b_values,
b_shape=b_shape, name=name)
return _SparseSparseMinimumOutput._make(result)
__sparse_split_outputs = ["output_indices", "output_values", "output_shape"]
_SparseSplitOutput = _collections.namedtuple(
"SparseSplit", __sparse_split_outputs)
def _sparse_split(split_dim, indices, values, shape, num_split, name=None):
r"""Split a `SparseTensor` into `num_split` tensors along one dimension.
If the `shape[split_dim]` is not an integer multiple of `num_split`. Slices
`[0 : shape[split_dim] % num_split]` gets one extra dimension.
For example, if `split_dim = 1` and `num_split = 2` and the input is
input_tensor = shape = [2, 7]
[ a d e ]
[b c ]
Graphically the output tensors are:
output_tensor[0] = shape = [2, 4]
[ a ]
[b c ]
output_tensor[1] = shape = [2, 3]
[ d e ]
[ ]
Args:
split_dim: A `Tensor` of type `int64`.
0-D. The dimension along which to split. Must be in the range
`[0, rank(shape))`.
indices: A `Tensor` of type `int64`.
2-D tensor represents the indices of the sparse tensor.
values: A `Tensor`. 1-D tensor represents the values of the sparse tensor.
shape: A `Tensor` of type `int64`.
1-D. tensor represents the shape of the sparse tensor.
output indices: A list of 1-D tensors represents the indices of the output
sparse tensors.
num_split: An `int` that is `>= 1`. The number of ways to split.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output_indices, output_values, output_shape).
output_indices: A list of `num_split` `Tensor` objects with type `int64`.
output_values: A list of `num_split` `Tensor` objects with the same type as `values`. A list of 1-D tensors represents the values of the output sparse
tensors.
output_shape: A list of `num_split` `Tensor` objects with type `int64`. A list of 1-D tensors represents the shape of the output sparse
tensors.
"""
result = _op_def_lib.apply_op("SparseSplit", split_dim=split_dim,
indices=indices, values=values, shape=shape,
num_split=num_split, name=name)
return _SparseSplitOutput._make(result)
def _sparse_tensor_dense_add(a_indices, a_values, a_shape, b, name=None):
r"""Adds up a `SparseTensor` and a dense `Tensor`, producing a dense `Tensor`.
This Op does not require `a_indices` be sorted in standard lexicographic order.
Args:
a_indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
2-D. The `indices` of the `SparseTensor`, with shape `[nnz, ndims]`.
a_values: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
1-D. The `values` of the `SparseTensor`, with shape `[nnz]`.
a_shape: A `Tensor`. Must have the same type as `a_indices`.
1-D. The `shape` of the `SparseTensor`, with shape `[ndims]`.
b: A `Tensor`. Must have the same type as `a_values`.
`ndims`-D Tensor. With shape `a_shape`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `a_values`.
"""
result = _op_def_lib.apply_op("SparseTensorDenseAdd", a_indices=a_indices,
a_values=a_values, a_shape=a_shape, b=b,
name=name)
return result
def _sparse_tensor_dense_mat_mul(a_indices, a_values, a_shape, b,
adjoint_a=None, adjoint_b=None, name=None):
r"""Multiply SparseTensor (of rank 2) "A" by dense matrix "B".
No validity checking is performed on the indices of A. However, the following
input format is recommended for optimal behavior:
if adjoint_a == false:
A should be sorted in lexicographically increasing order. Use SparseReorder
if you're not sure.
if adjoint_a == true:
A should be sorted in order of increasing dimension 1 (i.e., "column major"
order instead of "row major" order).
Args:
a_indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
2-D. The `indices` of the `SparseTensor`, size `[nnz, 2]` Matrix.
a_values: A `Tensor`.
1-D. The `values` of the `SparseTensor`, size `[nnz]` Vector.
a_shape: A `Tensor` of type `int64`.
1-D. The `shape` of the `SparseTensor`, size `[2]` Vector.
b: A `Tensor`. Must have the same type as `a_values`.
2-D. A dense Matrix.
adjoint_a: An optional `bool`. Defaults to `False`.
Use the adjoint of A in the matrix multiply. If A is complex, this
is transpose(conj(A)). Otherwise it's transpose(A).
adjoint_b: An optional `bool`. Defaults to `False`.
Use the adjoint of B in the matrix multiply. If B is complex, this
is transpose(conj(B)). Otherwise it's transpose(B).
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `a_values`.
"""
result = _op_def_lib.apply_op("SparseTensorDenseMatMul",
a_indices=a_indices, a_values=a_values,
a_shape=a_shape, b=b, adjoint_a=adjoint_a,
adjoint_b=adjoint_b, name=name)
return result
def _sparse_to_dense(sparse_indices, output_shape, sparse_values,
default_value, validate_indices=None, name=None):
r"""Converts a sparse representation into a dense tensor.
Builds an array `dense` with shape `output_shape` such that
```prettyprint
# If sparse_indices is scalar
dense[i] = (i == sparse_indices ? sparse_values : default_value)
# If sparse_indices is a vector, then for each i
dense[sparse_indices[i]] = sparse_values[i]
# If sparse_indices is an n by d matrix, then for each i in [0, n)
dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i]
```
All other values in `dense` are set to `default_value`. If `sparse_values` is a
scalar, all sparse indices are set to this single value.
Indices should be sorted in lexicographic order, and indices must not
contain any repeats. If `validate_indices` is true, these properties
are checked during execution.
Args:
sparse_indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
0-D, 1-D, or 2-D. `sparse_indices[i]` contains the complete
index where `sparse_values[i]` will be placed.
output_shape: A `Tensor`. Must have the same type as `sparse_indices`.
1-D. Shape of the dense output tensor.
sparse_values: A `Tensor`.
1-D. Values corresponding to each row of `sparse_indices`,
or a scalar value to be used for all sparse indices.
default_value: A `Tensor`. Must have the same type as `sparse_values`.
Scalar value to set for indices not specified in
`sparse_indices`.
validate_indices: An optional `bool`. Defaults to `True`.
If true, indices are checked to make sure they are sorted in
lexicographic order and that there are no repeats.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `sparse_values`.
Dense output tensor of shape `output_shape`.
"""
result = _op_def_lib.apply_op("SparseToDense",
sparse_indices=sparse_indices,
output_shape=output_shape,
sparse_values=sparse_values,
default_value=default_value,
validate_indices=validate_indices, name=name)
return result
__take_many_sparse_from_tensors_map_outputs = ["sparse_indices",
"sparse_values", "sparse_shape"]
_TakeManySparseFromTensorsMapOutput = _collections.namedtuple(
"TakeManySparseFromTensorsMap",
__take_many_sparse_from_tensors_map_outputs)
def _take_many_sparse_from_tensors_map(sparse_handles, dtype, container=None,
shared_name=None, name=None):
r"""Read `SparseTensors` from a `SparseTensorsMap` and concatenate them.
The input `sparse_handles` must be an `int64` matrix of shape `[N, 1]` where
`N` is the minibatch size and the rows correspond to the output handles of
`AddSparseToTensorsMap` or `AddManySparseToTensorsMap`. The ranks of the
original `SparseTensor` objects that went into the given input ops must all
match. When the final `SparseTensor` is created, it has rank one
higher than the ranks of the incoming `SparseTensor` objects
(they have been concatenated along a new row dimension on the left).
The output `SparseTensor` object's shape values for all dimensions but the
first are the max across the input `SparseTensor` objects' shape values
for the corresponding dimensions. Its first shape value is `N`, the minibatch
size.
The input `SparseTensor` objects' indices are assumed ordered in
standard lexicographic order. If this is not the case, after this
step run `SparseReorder` to restore index ordering.
For example, if the handles represent an input, which is a `[2, 3]` matrix
representing two original `SparseTensor` objects:
```
index = [ 0]
[10]
[20]
values = [1, 2, 3]
shape = [50]
```
and
```
index = [ 2]
[10]
values = [4, 5]
shape = [30]
```
then the final `SparseTensor` will be:
```
index = [0 0]
[0 10]
[0 20]
[1 2]
[1 10]
values = [1, 2, 3, 4, 5]
shape = [2 50]
```
Args:
sparse_handles: A `Tensor` of type `int64`.
1-D, The `N` serialized `SparseTensor` objects.
Shape: `[N]`.
dtype: A `tf.DType`.
The `dtype` of the `SparseTensor` objects stored in the
`SparseTensorsMap`.
container: An optional `string`. Defaults to `""`.
The container name for the `SparseTensorsMap` read by this op.
shared_name: An optional `string`. Defaults to `""`.
The shared name for the `SparseTensorsMap` read by this op.
It should not be blank; rather the `shared_name` or unique Operation name
of the Op that created the original `SparseTensorsMap` should be used.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (sparse_indices, sparse_values, sparse_shape).
sparse_indices: A `Tensor` of type `int64`. 2-D. The `indices` of the minibatch `SparseTensor`.
sparse_values: A `Tensor` of type `dtype`. 1-D. The `values` of the minibatch `SparseTensor`.
sparse_shape: A `Tensor` of type `int64`. 1-D. The `shape` of the minibatch `SparseTensor`.
"""
result = _op_def_lib.apply_op("TakeManySparseFromTensorsMap",
sparse_handles=sparse_handles, dtype=dtype,
container=container, shared_name=shared_name,
name=name)
return _TakeManySparseFromTensorsMapOutput._make(result)
def _InitOpDefLibrary():
op_list = _op_def_pb2.OpList()
_text_format.Merge(_InitOpDefLibrary.op_list_ascii, op_list)
_op_def_registry.register_op_list(op_list)
op_def_lib = _op_def_library.OpDefLibrary()
op_def_lib.add_op_list(op_list)
return op_def_lib
_InitOpDefLibrary.op_list_ascii = """op {
name: "AddManySparseToTensorsMap"
input_arg {
name: "sparse_indices"
type: DT_INT64
}
input_arg {
name: "sparse_values"
type_attr: "T"
}
input_arg {
name: "sparse_shape"
type: DT_INT64
}
output_arg {
name: "sparse_handles"
type: DT_INT64
}
attr {
name: "T"
type: "type"
}
attr {
name: "container"
type: "string"
default_value {
s: ""
}
}
attr {
name: "shared_name"
type: "string"
default_value {
s: ""
}
}
is_stateful: true
}
op {
name: "AddSparseToTensorsMap"
input_arg {
name: "sparse_indices"
type: DT_INT64
}
input_arg {
name: "sparse_values"
type_attr: "T"
}
input_arg {
name: "sparse_shape"
type: DT_INT64
}
output_arg {
name: "sparse_handle"
type: DT_INT64
}
attr {
name: "T"
type: "type"
}
attr {
name: "container"
type: "string"
default_value {
s: ""
}
}
attr {
name: "shared_name"
type: "string"
default_value {
s: ""
}
}
is_stateful: true
}
op {
name: "DeserializeManySparse"
input_arg {
name: "serialized_sparse"
type: DT_STRING
}
output_arg {
name: "sparse_indices"
type: DT_INT64
}
output_arg {
name: "sparse_values"
type_attr: "dtype"
}
output_arg {
name: "sparse_shape"
type: DT_INT64
}
attr {
name: "dtype"
type: "type"
}
}
op {
name: "SerializeManySparse"
input_arg {
name: "sparse_indices"
type: DT_INT64
}
input_arg {
name: "sparse_values"
type_attr: "T"
}
input_arg {
name: "sparse_shape"
type: DT_INT64
}
output_arg {
name: "serialized_sparse"
type: DT_STRING
}
attr {
name: "T"
type: "type"
}
}
op {
name: "SerializeSparse"
input_arg {
name: "sparse_indices"
type: DT_INT64
}
input_arg {
name: "sparse_values"
type_attr: "T"
}
input_arg {
name: "sparse_shape"
type: DT_INT64
}
output_arg {
name: "serialized_sparse"
type: DT_STRING
}
attr {
name: "T"
type: "type"
}
}
op {
name: "SparseAdd"
input_arg {
name: "a_indices"
type: DT_INT64
}
input_arg {
name: "a_values"
type_attr: "T"
}
input_arg {
name: "a_shape"
type: DT_INT64
}
input_arg {
name: "b_indices"
type: DT_INT64
}
input_arg {
name: "b_values"
type_attr: "T"
}
input_arg {
name: "b_shape"
type: DT_INT64
}
input_arg {
name: "thresh"
type_attr: "Treal"
}
output_arg {
name: "sum_indices"
type: DT_INT64
}
output_arg {
name: "sum_values"
type_attr: "T"
}
output_arg {
name: "sum_shape"
type: DT_INT64
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "Treal"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT32
type: DT_INT64
type: DT_UINT8
type: DT_INT16
type: DT_INT8
type: DT_UINT16
type: DT_HALF
}
}
}
}
op {
name: "SparseAddGrad"
input_arg {
name: "backprop_val_grad"
type_attr: "T"
}
input_arg {
name: "a_indices"
type: DT_INT64
}
input_arg {
name: "b_indices"
type: DT_INT64
}
input_arg {
name: "sum_indices"
type: DT_INT64
}
output_arg {
name: "a_val_grad"
type_attr: "T"
}
output_arg {
name: "b_val_grad"
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
}
op {
name: "SparseConcat"
input_arg {
name: "indices"
type: DT_INT64
number_attr: "N"
}
input_arg {
name: "values"
type_attr: "T"
number_attr: "N"
}
input_arg {
name: "shapes"
type: DT_INT64
number_attr: "N"
}
output_arg {
name: "output_indices"
type: DT_INT64
}
output_arg {
name: "output_values"
type_attr: "T"
}
output_arg {
name: "output_shape"
type: DT_INT64
}
attr {
name: "concat_dim"
type: "int"
}
attr {
name: "N"
type: "int"
has_minimum: true
minimum: 2
}
attr {
name: "T"
type: "type"
}
}
op {
name: "SparseCross"
input_arg {
name: "indices"
type: DT_INT64
number_attr: "N"
}
input_arg {
name: "values"
type_list_attr: "sparse_types"
}
input_arg {
name: "shapes"
type: DT_INT64
number_attr: "N"
}
input_arg {
name: "dense_inputs"
type_list_attr: "dense_types"
}
output_arg {
name: "output_indices"
type: DT_INT64
}
output_arg {
name: "output_values"
type_attr: "out_type"
}
output_arg {
name: "output_shape"
type: DT_INT64
}
attr {
name: "N"
type: "int"
has_minimum: true
}
attr {
name: "hashed_output"
type: "bool"
}
attr {
name: "num_buckets"
type: "int"
has_minimum: true
}
attr {
name: "hash_key"
type: "int"
}
attr {
name: "sparse_types"
type: "list(type)"
has_minimum: true
allowed_values {
list {
type: DT_INT64
type: DT_STRING
}
}
}
attr {
name: "dense_types"
type: "list(type)"
has_minimum: true
allowed_values {
list {
type: DT_INT64
type: DT_STRING
}
}
}
attr {
name: "out_type"
type: "type"
allowed_values {
list {
type: DT_INT64
type: DT_STRING
}
}
}
attr {
name: "internal_type"
type: "type"
allowed_values {
list {
type: DT_INT64
type: DT_STRING
}
}
}
}
op {
name: "SparseDenseCwiseAdd"
input_arg {
name: "sp_indices"
type: DT_INT64
}
input_arg {
name: "sp_values"
type_attr: "T"
}
input_arg {
name: "sp_shape"
type: DT_INT64
}
input_arg {
name: "dense"
type_attr: "T"
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
}
op {
name: "SparseDenseCwiseDiv"
input_arg {
name: "sp_indices"
type: DT_INT64
}
input_arg {
name: "sp_values"
type_attr: "T"
}
input_arg {
name: "sp_shape"
type: DT_INT64
}
input_arg {
name: "dense"
type_attr: "T"
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
}
op {
name: "SparseDenseCwiseMul"
input_arg {
name: "sp_indices"
type: DT_INT64
}
input_arg {
name: "sp_values"
type_attr: "T"
}
input_arg {
name: "sp_shape"
type: DT_INT64
}
input_arg {
name: "dense"
type_attr: "T"
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
}
op {
name: "SparseReduceSum"
input_arg {
name: "input_indices"
type: DT_INT64
}
input_arg {
name: "input_values"
type_attr: "T"
}
input_arg {
name: "input_shape"
type: DT_INT64
}
input_arg {
name: "reduction_axes"
type: DT_INT32
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "keep_dims"
type: "bool"
default_value {
b: false
}
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
}
op {
name: "SparseReduceSumSparse"
input_arg {
name: "input_indices"
type: DT_INT64
}
input_arg {
name: "input_values"
type_attr: "T"
}
input_arg {
name: "input_shape"
type: DT_INT64
}
input_arg {
name: "reduction_axes"
type: DT_INT32
}
output_arg {
name: "output_indices"
type: DT_INT64
}
output_arg {
name: "output_values"
type_attr: "T"
}
output_arg {
name: "output_shape"
type: DT_INT64
}
attr {
name: "keep_dims"
type: "bool"
default_value {
b: false
}
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
}
op {
name: "SparseReorder"
input_arg {
name: "input_indices"
type: DT_INT64
}
input_arg {
name: "input_values"
type_attr: "T"
}
input_arg {
name: "input_shape"
type: DT_INT64
}
output_arg {
name: "output_indices"
type: DT_INT64
}
output_arg {
name: "output_values"
type_attr: "T"
}
attr {
name: "T"
type: "type"
}
}
op {
name: "SparseReshape"
input_arg {
name: "input_indices"
type: DT_INT64
}
input_arg {
name: "input_shape"
type: DT_INT64
}
input_arg {
name: "new_shape"
type: DT_INT64
}
output_arg {
name: "output_indices"
type: DT_INT64
}
output_arg {
name: "output_shape"
type: DT_INT64
}
}
op {
name: "SparseSoftmax"
input_arg {
name: "sp_indices"
type: DT_INT64
}
input_arg {
name: "sp_values"
type_attr: "T"
}
input_arg {
name: "sp_shape"
type: DT_INT64
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
}
}
}
}
op {
name: "SparseSparseMaximum"
input_arg {
name: "a_indices"
type: DT_INT64
}
input_arg {
name: "a_values"
type_attr: "T"
}
input_arg {
name: "a_shape"
type: DT_INT64
}
input_arg {
name: "b_indices"
type: DT_INT64
}
input_arg {
name: "b_values"
type_attr: "T"
}
input_arg {
name: "b_shape"
type: DT_INT64
}
output_arg {
name: "output_indices"
type: DT_INT64
}
output_arg {
name: "output_values"
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT32
type: DT_INT64
type: DT_UINT8
type: DT_INT16
type: DT_INT8
type: DT_UINT16
type: DT_HALF
}
}
}
}
op {
name: "SparseSparseMinimum"
input_arg {
name: "a_indices"
type: DT_INT64
}
input_arg {
name: "a_values"
type_attr: "T"
}
input_arg {
name: "a_shape"
type: DT_INT64
}
input_arg {
name: "b_indices"
type: DT_INT64
}
input_arg {
name: "b_values"
type_attr: "T"
}
input_arg {
name: "b_shape"
type: DT_INT64
}
output_arg {
name: "output_indices"
type: DT_INT64
}
output_arg {
name: "output_values"
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
}
op {
name: "SparseSplit"
input_arg {
name: "split_dim"
type: DT_INT64
}
input_arg {
name: "indices"
type: DT_INT64
}
input_arg {
name: "values"
type_attr: "T"
}
input_arg {
name: "shape"
type: DT_INT64
}
output_arg {
name: "output_indices"
type: DT_INT64
number_attr: "num_split"
}
output_arg {
name: "output_values"
type_attr: "T"
number_attr: "num_split"
}
output_arg {
name: "output_shape"
type: DT_INT64
number_attr: "num_split"
}
attr {
name: "num_split"
type: "int"
has_minimum: true
minimum: 1
}
attr {
name: "T"
type: "type"
}
}
op {
name: "SparseTensorDenseAdd"
input_arg {
name: "a_indices"
type_attr: "Tindices"
}
input_arg {
name: "a_values"
type_attr: "T"
}
input_arg {
name: "a_shape"
type_attr: "Tindices"
}
input_arg {
name: "b"
type_attr: "T"
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
}
op {
name: "SparseTensorDenseMatMul"
input_arg {
name: "a_indices"
type_attr: "Tindices"
}
input_arg {
name: "a_values"
type_attr: "T"
}
input_arg {
name: "a_shape"
type: DT_INT64
}
input_arg {
name: "b"
type_attr: "T"
}
output_arg {
name: "product"
type_attr: "T"
}
attr {
name: "T"
type: "type"
}
attr {
name: "Tindices"
type: "type"
default_value {
type: DT_INT64
}
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "adjoint_a"
type: "bool"
default_value {
b: false
}
}
attr {
name: "adjoint_b"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "SparseToDense"
input_arg {
name: "sparse_indices"
type_attr: "Tindices"
}
input_arg {
name: "output_shape"
type_attr: "Tindices"
}
input_arg {
name: "sparse_values"
type_attr: "T"
}
input_arg {
name: "default_value"
type_attr: "T"
}
output_arg {
name: "dense"
type_attr: "T"
}
attr {
name: "validate_indices"
type: "bool"
default_value {
b: true
}
}
attr {
name: "T"
type: "type"
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
}
op {
name: "TakeManySparseFromTensorsMap"
input_arg {
name: "sparse_handles"
type: DT_INT64
}
output_arg {
name: "sparse_indices"
type: DT_INT64
}
output_arg {
name: "sparse_values"
type_attr: "dtype"
}
output_arg {
name: "sparse_shape"
type: DT_INT64
}
attr {
name: "dtype"
type: "type"
}
attr {
name: "container"
type: "string"
default_value {
s: ""
}
}
attr {
name: "shared_name"
type: "string"
default_value {
s: ""
}
}
is_stateful: true
}
"""
_op_def_lib = _InitOpDefLibrary()
|
GreatFruitOmsk/nativeconfig
|
refs/heads/master
|
test/configs/__init__.py
|
1
|
from abc import ABC, abstractmethod
import json
import os
from unittest.mock import MagicMock
from nativeconfig.options import StringOption, IntOption, ArrayOption, DictOption, ValueSource
from nativeconfig.exceptions import DeserializationError, ValidationError
class ConfigMixin(ABC):
CONFIG_TYPE = None
def tearDown(self):
os.environ.pop('FIRST_NAME', None)
super().tearDown()
def test_exception_is_raised_for_duplicate_options(self):
with self.assertRaises(AttributeError):
class MyConfig(self.CONFIG_TYPE):
first_name = StringOption('Name')
last_name = StringOption('Name')
MyConfig.get_instance()
def test_default_values_are_not_written_to_config(self):
class MyConfig(self.CONFIG_TYPE):
first_name = StringOption('FirstName', default='Ilya')
MyConfig.get_instance().del_value_for_option_name('FirstName')
self.assertEqual(MyConfig.get_instance().get_value('FirstName'), None)
def test_get_value_for_option_name_returns_python(self):
class MyConfig(self.CONFIG_TYPE):
first_name = StringOption('FirstName', default='Ilya')
c = MyConfig.get_instance()
self.assertEqual(c.get_value_for_option_name('FirstName'), 'Ilya')
def test_get_raw_value_for_option_name_returns_raw(self):
class MyConfig(self.CONFIG_TYPE):
first_name = StringOption('FirstName', default='Ilya')
c = MyConfig.get_instance()
self.assertEqual(c.option_for_name('FirstName').deserialize(c.get_raw_value_for_option_name('FirstName')), 'Ilya')
def test_get_json_value_for_option_name_returns_json(self):
class MyConfig(self.CONFIG_TYPE):
first_name = StringOption('FirstName', default='Ilya')
c = MyConfig.get_instance()
self.assertEqual(json.loads(c.get_json_value_for_option_name('FirstName')), 'Ilya')
def test_get_value_for_option_raises_key_error_if_option_not_found(self):
class MyConfig(self.CONFIG_TYPE):
first_name = StringOption('FirstName', default='Ilya')
c = MyConfig.get_instance()
with self.assertRaises(KeyError):
self.assertEqual(c.get_value_for_option_name('LastName'), None)
def test_get_raw_value_for_option_raises_key_error_if_option_not_found(self):
class MyConfig(self.CONFIG_TYPE):
first_name = StringOption('FirstName', default='Ilya')
c = MyConfig.get_instance()
with self.assertRaises(KeyError):
c.get_raw_value_for_option_name('LastName')
def test_get_json_value_for_option_raises_key_error_if_option_not_found(self):
class MyConfig(self.CONFIG_TYPE):
first_name = StringOption('FirstName', default='Ilya')
c = MyConfig.get_instance()
with self.assertRaises(KeyError):
c.get_json_value_for_option_name('LastName')
def test_set_value_for_option_name_accepts_python(self):
class MyConfig(self.CONFIG_TYPE):
first_name = StringOption('FirstName', default='Ilya')
c = MyConfig.get_instance()
c.set_value_for_option_name('FirstName', 'Artem')
self.assertEqual(c.first_name, 'Artem')
def test_set_raw_value_for_option_name_accepts_raw(self):
class MyConfig(self.CONFIG_TYPE):
age = IntOption('Age', default=42)
c = MyConfig.get_instance()
c.set_raw_value_for_option_name('Age', c.option_for_name('Age').serialize(9000))
self.assertEqual(c.age, 9000)
with self.assertRaises(DeserializationError):
c.set_raw_value_for_option_name('Age', 'Artem')
def test_set_json_value_for_option_name_accepts_json(self):
class MyConfig(self.CONFIG_TYPE):
first_name = StringOption('FirstName', default='Ilya')
c = MyConfig.get_instance()
c.set_json_value_for_option_name('FirstName', json.dumps('Artem'))
self.assertEqual(c.first_name, 'Artem')
with self.assertRaises(DeserializationError):
c.set_json_value_for_option_name('FirstName', 'Artem')
def test_set_None_value_for_option_name_deletes_value(self):
class MyConfig(self.CONFIG_TYPE):
first_name = StringOption('FirstName', default='Ilya')
c = MyConfig.get_instance()
c.first_name = 'Artem'
self.assertEqual(c.get_value_for_option_name('FirstName'), 'Artem')
c.set_value_for_option_name('FirstName', None)
self.assertEqual(c.get_value('FirstName'), None)
def test_set_null_json_value_for_option_name_deletes_value(self):
class MyConfig(self.CONFIG_TYPE):
first_name = StringOption('FirstName', default='Ilya')
c = MyConfig.get_instance()
c.first_name = 'Artem'
self.assertEqual(c.get_json_value_for_option_name('FirstName'), '"Artem"')
c.set_json_value_for_option_name('FirstName', json.dumps(None))
self.assertEqual(c.get_value('FirstName'), None)
def test_set_value_for_option_name_raises_key_error_if_option_not_found(self):
class MyConfig(self.CONFIG_TYPE):
first_name = StringOption('FirstName', default='Ilya')
c = MyConfig.get_instance()
with self.assertRaises(KeyError):
c.set_value_for_option_name('LastName', 'Kulakov')
def test_set_raw_value_for_option_name_raises_key_error_if_option_not_found(self):
class MyConfig(self.CONFIG_TYPE):
first_name = StringOption('FirstName', default='Ilya')
c = MyConfig.get_instance()
with self.assertRaises(KeyError):
c.set_json_value_for_option_name('LastName', 'Kulakov')
def test_set_json_value_for_option_name_raises_key_error_if_option_not_found(self):
class MyConfig(self.CONFIG_TYPE):
first_name = StringOption('FirstName', default='Ilya')
c = MyConfig.get_instance()
with self.assertRaises(KeyError):
c.set_json_value_for_option_name('LastName', '"Kulakov"')
def test_set_one_shot_value_for_option_name_accepts_python(self):
class MyConfig(self.CONFIG_TYPE):
age = IntOption('Age', default=42)
c = MyConfig.get_instance()
c.set_one_shot_value_for_option_name('Age', 9000)
self.assertEqual(c.age, 9000)
with self.assertRaises(ValidationError):
c.set_one_shot_value_for_option_name('Age', '9000')
def test_set_one_shot_raw_value_for_option_name_accepts_raw(self):
class MyConfig(self.CONFIG_TYPE):
age = IntOption('Age', default=42)
c = MyConfig.get_instance()
c.set_one_shot_raw_value_for_option_name('Age', c.option_for_name('Age').serialize(9000))
self.assertEqual(c.age, 9000)
with self.assertRaises(DeserializationError):
c.set_one_shot_raw_value_for_option_name('Age', 'fortytwo')
def test_set_one_shot_json_value_for_option_name_accepts_json(self):
class MyConfig(self.CONFIG_TYPE):
first_name = StringOption('FirstName', default='Ilya')
c = MyConfig.get_instance()
c.set_one_shot_json_value_for_option_name('FirstName', json.dumps('Artem'))
self.assertEqual(c.first_name, 'Artem')
with self.assertRaises(DeserializationError):
c.set_one_shot_json_value_for_option_name('FirstName', 'Artem')
def test_set_one_shot_value_for_option_name_raises_key_error_if_option_not_found(self):
class MyConfig(self.CONFIG_TYPE):
first_name = StringOption('FirstName', default='Ilya')
c = MyConfig.get_instance()
with self.assertRaises(KeyError):
c.set_one_shot_value_for_option_name('LastName', 'Kulakov')
def test_set_one_shot_raw_value_for_option_name_raises_key_error_if_option_not_found(self):
class MyConfig(self.CONFIG_TYPE):
first_name = StringOption('FirstName', default='Ilya')
c = MyConfig.get_instance()
with self.assertRaises(KeyError):
c.set_one_shot_raw_value_for_option_name('LastName', c.option_for_name('FirstName').serialize('Kulakov'))
def test_set_one_shot_json_value_for_option_name_raises_key_error_if_option_not_found(self):
class MyConfig(self.CONFIG_TYPE):
first_name = StringOption('FirstName', default='Ilya')
c = MyConfig.get_instance()
with self.assertRaises(KeyError):
c.set_one_shot_json_value_for_option_name('LastName', '"Kulakov"')
def test_one_shot_value_overrides_config(self):
class MyConfig(self.CONFIG_TYPE):
first_name = StringOption('FirstName', default='Ilya')
c = MyConfig.get_instance()
c.set_value_for_option_name('FirstName', 'Artem')
self.assertEqual(c.option_for_name('FirstName')._is_one_shot_value_set, False)
c.set_one_shot_value_for_option_name('FirstName', 'Ivan')
self.assertEqual(c.first_name, 'Ivan')
self.assertEqual(c.option_for_name('FirstName')._is_one_shot_value_set, True)
def test_one_shot_raw_value_overrides_config(self):
class MyConfig(self.CONFIG_TYPE):
first_name = StringOption('FirstName', default='Ilya')
c = MyConfig.get_instance()
c.set_raw_value_for_option_name('FirstName', c.option_for_name('FirstName').serialize('Artem'))
self.assertEqual(c.option_for_name('FirstName')._is_one_shot_value_set, False)
c.set_one_shot_raw_value_for_option_name('FirstName', c.option_for_name('FirstName').serialize('Ivan'))
self.assertEqual(c.first_name, 'Ivan')
self.assertEqual(c.option_for_name('FirstName')._is_one_shot_value_set, True)
def test_one_shot_json_value_overrides_config(self):
class MyConfig(self.CONFIG_TYPE):
first_name = StringOption('FirstName', default='Ilya')
c = MyConfig.get_instance()
c.set_json_value_for_option_name('FirstName', json.dumps('Artem'))
self.assertEqual(c.option_for_name('FirstName')._is_one_shot_value_set, False)
c.set_one_shot_json_value_for_option_name('FirstName', json.dumps('Ivan'))
self.assertEqual(c.first_name, 'Ivan')
self.assertEqual(c.option_for_name('FirstName')._is_one_shot_value_set, True)
def test_one_shot_value_does_not_override_env(self):
class MyConfig(self.CONFIG_TYPE):
first_name = StringOption('FirstName', default='Ilya', env_name='FIRST_NAME')
c = MyConfig.get_instance()
os.environ['FIRST_NAME'] = json.dumps('Ivan')
c.set_one_shot_value_for_option_name('FirstName', 'Artem')
self.assertEqual(c.first_name, 'Ivan')
def test_one_shot_raw_value_does_not_override_env(self):
class MyConfig(self.CONFIG_TYPE):
first_name = StringOption('FirstName', default='Ilya', env_name='FIRST_NAME')
c = MyConfig.get_instance()
os.environ['FIRST_NAME'] = json.dumps('Ivan')
c.set_one_shot_raw_value_for_option_name('FirstName', c.option_for_name('FirstName').serialize('Artem'))
self.assertEqual(c.first_name, 'Ivan')
def test_one_shot_json_value_does_not_override_env(self):
class MyConfig(self.CONFIG_TYPE):
first_name = StringOption('FirstName', default='Ilya', env_name='FIRST_NAME')
c = MyConfig.get_instance()
os.environ['FIRST_NAME'] = json.dumps('Ivan')
c.set_one_shot_json_value_for_option_name('FirstName', json.dumps('Artem'))
self.assertEqual(c.first_name, 'Ivan')
def test_one_shot_value_reset_by_set(self):
class MyConfig(self.CONFIG_TYPE):
first_name = StringOption('FirstName', default='Ilya')
c = MyConfig.get_instance()
c.set_one_shot_value_for_option_name('FirstName', 'Artem')
self.assertEqual(c.option_for_name('FirstName')._is_one_shot_value_set, True)
self.assertEqual(c.first_name, 'Artem')
c.first_name = 'Ivan'
self.assertEqual(c.first_name, 'Ivan')
self.assertEqual(c.option_for_name('FirstName')._is_one_shot_value_set, False)
def test_one_shot_raw_value_reset_by_set(self):
class MyConfig(self.CONFIG_TYPE):
first_name = StringOption('FirstName', default='Ilya')
c = MyConfig.get_instance()
c.set_one_shot_raw_value_for_option_name('FirstName', c.option_for_name('FirstName').serialize('Artem'))
self.assertEqual(c.option_for_name('FirstName')._is_one_shot_value_set, True)
self.assertEqual(c.first_name, 'Artem')
c.first_name = 'Ivan'
self.assertEqual(c.first_name, 'Ivan')
self.assertEqual(c.option_for_name('FirstName')._is_one_shot_value_set, False)
def test_one_shot_json_value_reset_by_set(self):
class MyConfig(self.CONFIG_TYPE):
first_name = StringOption('FirstName', default='Ilya')
c = MyConfig.get_instance()
c.set_one_shot_json_value_for_option_name('FirstName', json.dumps('Artem'))
self.assertEqual(c.option_for_name('FirstName')._is_one_shot_value_set, True)
self.assertEqual(c.first_name, 'Artem')
c.first_name = 'Ivan'
self.assertEqual(c.first_name, 'Ivan')
self.assertEqual(c.option_for_name('FirstName')._is_one_shot_value_set, False)
def test_one_shot_value_reset_by_del(self):
class MyConfig(self.CONFIG_TYPE):
first_name = StringOption('FirstName', default='Ilya')
c = MyConfig.get_instance()
c.set_one_shot_value_for_option_name('FirstName', 'Artem')
self.assertEqual(c.option_for_name('FirstName')._is_one_shot_value_set, True)
self.assertEqual(c.first_name, 'Artem')
del c.first_name
self.assertEqual(c.first_name, 'Ilya')
self.assertEqual(c.option_for_name('FirstName')._is_one_shot_value_set, False)
def test_one_shot_raw_value_reset_by_del(self):
class MyConfig(self.CONFIG_TYPE):
first_name = StringOption('FirstName', default='Ilya')
c = MyConfig.get_instance()
c.set_one_shot_raw_value_for_option_name('FirstName', c.option_for_name('FirstName').serialize('Artem'))
self.assertEqual(c.option_for_name('FirstName')._is_one_shot_value_set, True)
self.assertEqual(c.first_name, 'Artem')
del c.first_name
self.assertEqual(c.first_name, 'Ilya')
self.assertEqual(c.option_for_name('FirstName')._is_one_shot_value_set, False)
def test_one_shot_json_value_reset_by_del(self):
class MyConfig(self.CONFIG_TYPE):
first_name = StringOption('FirstName', default='Ilya')
c = MyConfig.get_instance()
c.set_one_shot_json_value_for_option_name('FirstName', json.dumps('Artem'))
self.assertEqual(c.option_for_name('FirstName')._is_one_shot_value_set, True)
self.assertEqual(c.first_name, 'Artem')
del c.first_name
self.assertEqual(c.first_name, 'Ilya')
self.assertEqual(c.option_for_name('FirstName')._is_one_shot_value_set, False)
def test_one_shot_value_set_to_None_forces_default(self):
class MyConfig(self.CONFIG_TYPE):
first_name = StringOption('FirstName', default='Ilya')
c = MyConfig.get_instance()
c.first_name = 'Artem'
self.assertEqual(c.first_name, 'Artem')
c.set_one_shot_value_for_option_name('FirstName', None)
self.assertEqual(c.first_name, 'Ilya')
def test_one_shot_json_value_set_to_null_forces_default(self):
class MyConfig(self.CONFIG_TYPE):
first_name = StringOption('FirstName', default='Ilya')
c = MyConfig.get_instance()
c.first_name = 'Artem'
self.assertEqual(c.first_name, 'Artem')
c.set_one_shot_json_value_for_option_name('FirstName', json.dumps(None))
self.assertEqual(c.first_name, 'Ilya')
def test_del_value_for_option_name_deletes_value(self):
class MyConfig(self.CONFIG_TYPE):
first_name = StringOption('FirstName', default='Ilya')
c = MyConfig.get_instance()
c.first_name = 'Ivan'
c.del_value_for_option_name('FirstName')
self.assertEqual(c.get_value('FirstName'), None)
def test_del_value_for_option_name_raises_warn_if_option_not_found(self):
class MyConfig(self.CONFIG_TYPE):
first_name = StringOption('FirstName', default='Ilya')
c = MyConfig.get_instance()
with self.assertRaises(KeyError):
c.del_value_for_option_name('LastName')
def test_validate_value_for_option_name_accepts_python(self):
class MyConfig(self.CONFIG_TYPE):
first_name = StringOption('FirstName', default='Ilya')
c = MyConfig.get_instance()
c.validate_value_for_option_name('FirstName', 'Artem')
def test_validate_raw_value_for_option_name_accepts_raw(self):
class MyConfig(self.CONFIG_TYPE):
first_name = StringOption('FirstName', default='Ilya')
c = MyConfig.get_instance()
c.validate_raw_value_for_option_name('FirstName', c.option_for_name('FirstName').serialize('Artem'))
def test_validate_json_value_for_option_name_accepts_json(self):
class MyConfig(self.CONFIG_TYPE):
first_name = StringOption('FirstName', default='Ilya')
c = MyConfig.get_instance()
c.validate_json_value_for_option_name('FirstName', json.dumps('Artem'))
def test_validate_value_for_option_name_raises_validation_error_for_invalid_value(self):
class MyConfig(self.CONFIG_TYPE):
first_name = StringOption('FirstName', default='Ilya')
c = MyConfig.get_instance()
with self.assertRaises(ValidationError):
c.validate_value_for_option_name('FirstName', 42)
def test_validate_raw_value_for_option_name_raises_validation_error_for_invalid_value(self):
class MyConfig(self.CONFIG_TYPE):
first_name = StringOption('FirstName', default='Ilya', choices=['Ilya', 'Artem'])
c = MyConfig.get_instance()
with self.assertRaises(ValidationError):
c.validate_raw_value_for_option_name('FirstName', c.option_for_name('FirstName').serialize('Ivan'))
def test_validate_json_value_for_option_name_raises_validation_error_for_invalid_value(self):
class MyConfig(self.CONFIG_TYPE):
first_name = StringOption('FirstName', default='Ilya', choices=['Ilya', 'Artem'])
c = MyConfig.get_instance()
with self.assertRaises(ValidationError):
c.validate_json_value_for_option_name('FirstName', json.dumps('Ivan'))
def test_validate_raw_value_for_option_name_raises_deserialization_error_for_malformed_raw(self):
class MyConfig(self.CONFIG_TYPE):
age = IntOption('Age', default=42)
c = MyConfig.get_instance()
with self.assertRaises(DeserializationError):
c.validate_raw_value_for_option_name('Age', 'fortytwo')
def test_validate_json_value_for_option_name_raises_deserialization_error_for_malformed_json(self):
class MyConfig(self.CONFIG_TYPE):
age = IntOption('Age', default=42)
c = MyConfig.get_instance()
with self.assertRaises(DeserializationError):
c.validate_json_value_for_option_name('Age', '"fortytwo"')
def test_items_enumerates_values(self):
class MyConfig(self.CONFIG_TYPE):
age = IntOption('Age', default=42)
c = MyConfig.get_instance()
for option_name, (python_value, value_source) in c.python_items():
if option_name == 'Age2':
self.assertEqual((option_name, (python_value, value_source)), ('Age2', (42, ValueSource.default)))
def test_raw_items_enumerates_raw(self):
class MyConfig(self.CONFIG_TYPE):
age = IntOption('Age', default=42)
c = MyConfig.get_instance()
for option_name, (raw_value, value_source) in c.raw_items():
if option_name == 'Age2':
self.assertEqual((option_name, (raw_value, value_source)), ('Age2', ('42', ValueSource.default)))
def test_json_items_enumerates_raw(self):
class MyConfig(self.CONFIG_TYPE):
age = IntOption('Age', default=42)
c = MyConfig.get_instance()
for option_name, (json_value, value_source) in c.json_items():
if option_name == 'Age2':
self.assertEqual((option_name, (json_value, value_source)), ('Age2', ('42', ValueSource.default)))
def test_snapshot_returns_json_dict(self):
class MyConfig(self.CONFIG_TYPE):
first_name = StringOption('FirstName', default='Ilya')
last_name = StringOption('LastName', default='Kulakov')
c = MyConfig.get_instance()
self.assertEqual(json.loads(c.snapshot()), {'ConfigVersion': '1.0', 'FirstName': 'Ilya', 'LastName': 'Kulakov'})
self.assertEqual(json.loads(c.snapshot(['FirstName'])), {'FirstName': 'Ilya'})
def test_option_for_name_returns_property(self):
class MyConfig(self.CONFIG_TYPE):
first_name = StringOption('FirstName', default='Ilya')
c = MyConfig.get_instance()
self.assertEqual(c.option_for_name('FirstName'), getattr(MyConfig, 'first_name'))
def test_option_for_name_returns_None_if_option_not_found(self):
class MyConfig(self.CONFIG_TYPE):
first_name = StringOption('FirstName', default='Ilya')
c = MyConfig.get_instance()
self.assertEqual(c.option_for_name('LastName'), None)
def test_resolve_value_is_called_to_resolve_broken_value(self):
class MyConfig(self.CONFIG_TYPE):
lucky_number = IntOption('LuckyNumber')
c = MyConfig.get_instance()
c.resolve_value = MagicMock()
c.set_value('LuckyNumber', 'NotANumber')
c.lucky_number
self.assertEqual(c.resolve_value.call_count, 1)
self.assertIsInstance(c.resolve_value.call_args[0][0][1], DeserializationError)
self.assertEqual(c.resolve_value.call_args[0][1], 'LuckyNumber')
self.assertEqual(c.resolve_value.call_args[0][2], 'NotANumber')
def test_get_value_returns_raw_value(self):
class MyConfig(self.CONFIG_TYPE):
lucky_number = IntOption('LuckyNumber')
c = MyConfig.get_instance()
c.lucky_number = 1
self.assertEqual(c.get_value('LuckyNumber'), '1')
def test_get_value_returns_None_if_option_does_not_exist(self):
class MyConfig(self.CONFIG_TYPE):
lucky_number = IntOption('LuckyNumber')
c = MyConfig.get_instance()
c.lucky_number = 1
self.assertEqual(c.get_value('UnluckyNumber'), None)
def test_set_value_accepts_raw_value(self):
class MyConfig(self.CONFIG_TYPE):
lucky_number = IntOption('LuckyNumber')
c = MyConfig.get_instance()
c.set_value('LuckyNumber', '2')
self.assertEqual(c.lucky_number, 2)
def test_set_None_value_deletes_value(self):
class MyConfig(self.CONFIG_TYPE):
lucky_number = IntOption('LuckyNumber')
c = MyConfig.get_instance()
c.lucky_number = 10
self.assertEqual(c.get_value('LuckyNumber'), '10')
c.set_value('LuckyNumber', None)
self.assertEqual(c.get_value('LuckyNumber'), None)
def test_del_value_deletes_value(self):
class MyConfig(self.CONFIG_TYPE):
lucky_number = IntOption('LuckyNumber')
c = MyConfig.get_instance()
c.lucky_number = 1
c.del_value('LuckyNumber')
self.assertEqual(c.get_value('LuckyNumber'), None)
def test_get_array_value_returns_list(self):
class MyConfig(self.CONFIG_TYPE):
lucky_numbers = ArrayOption('LuckyNumber', IntOption('_'))
c = MyConfig.get_instance()
c.lucky_numbers = [7, 42]
self.assertIsInstance(c.get_array_value('LuckyNumber'), list)
def test_get_array_value_returns_None_if_option_does_not_exist(self):
class MyConfig(self.CONFIG_TYPE):
lucky_numbers = ArrayOption('LuckyNumber', IntOption('_'))
c = MyConfig.get_instance()
self.assertEqual(c.get_array_value('FirstName'), None)
def test_set_array_value_accepts_iterable(self):
class MyConfig(self.CONFIG_TYPE):
lucky_numbers = ArrayOption('LuckyNumber', IntOption('_'))
c = MyConfig.get_instance()
c.set_array_value('LuckyNumber', ['7', '42'])
self.assertEqual(c.lucky_numbers, [7, 42])
c.set_array_value('LuckyNumber', ('7', '42'))
self.assertEqual(c.lucky_numbers, [7, 42])
def test_set_None_array_value_deletes_value(self):
class MyConfig(self.CONFIG_TYPE):
lucky_numbers = ArrayOption('LuckyNumber', IntOption('_'))
c = MyConfig.get_instance()
c.lucky_numbers = [7, 42]
self.assertEqual(c.lucky_numbers, [7, 42])
c.set_array_value('LuckyNumber', None)
self.assertEqual(c.lucky_numbers, None)
def test_get_dict_value_returns_dict(self):
class MyConfig(self.CONFIG_TYPE):
lucky_numbers = DictOption('LuckyNumber', IntOption('_'))
c = MyConfig.get_instance()
c.lucky_numbers = {'a': 1}
self.assertIsInstance(c.get_dict_value('LuckyNumber'), dict)
def test_remove_fields_from_dict(self):
class MyConfig(self.CONFIG_TYPE):
test_dict = DictOption('TestDict', value_option=StringOption('_'))
c = MyConfig.get_instance()
c.test_dict = {"key1": "value1", "key2": "value2"}
c.test_dict = {"key2": "value2"}
self.assertEqual(c.test_dict, {"key2": "value2"})
def test_get_dict_value_returns_None_if_option_does_not_exist(self):
class MyConfig(self.CONFIG_TYPE):
lucky_numbers = DictOption('LuckyNumber', IntOption('_'))
c = MyConfig.get_instance()
self.assertEqual(c.get_dict_value('FirstName'), None)
def test_set_dict_value_accepts_dict(self):
class MyConfig(self.CONFIG_TYPE):
lucky_numbers = DictOption('LuckyNumber', IntOption('_'))
c = MyConfig.get_instance()
c.set_dict_value('LuckyNumber', {'a': '1'})
self.assertEqual(c.lucky_numbers, {'a': 1})
def test_set_None_dict_value_deletes_value(self):
class MyConfig(self.CONFIG_TYPE):
lucky_numbers = DictOption('LuckyNumber', IntOption('_'))
c = MyConfig.get_instance()
c.set_dict_value('LuckyNumber', {'a': '1'})
self.assertEqual(c.lucky_numbers, {'a': 1})
c.set_dict_value('LuckyNumber', None)
self.assertEqual(c.lucky_numbers, None)
def test_default_value_is_used_when_no_value_in_config(self):
class MyConfig(self.CONFIG_TYPE):
lucky_number = IntOption('LuckyNumber', default=42)
c = MyConfig.get_instance()
c.del_value_for_option_name('LuckyNumber')
self.assertEqual(c.lucky_number, 42)
def test_overriding_base_option_moves_it_to_the_end(self):
class MyConfig(self.CONFIG_TYPE):
lucky_number = IntOption('LuckyNumber', default=42)
first_name = StringOption('FirstName')
last_name = StringOption('LastName')
class SubMyConfig(MyConfig):
lucky_number = IntOption('LuckyNumber', default=9000)
old_index = 0
for i, option in enumerate(MyConfig._ordered_options):
if option._name == 'LuckyNumber':
old_index = i
break
new_index = 0
for i, option in enumerate(SubMyConfig._ordered_options):
if option._name == 'LuckyNumber':
new_index = i
break
self.assertNotEqual(old_index, new_index)
def test_custom_properties_are_allowed(self):
class MyConfig(self.CONFIG_TYPE):
lucky_number = IntOption('LuckyNumber', default=42)
@property
def custom_property(self):
return '9000'
c = MyConfig.get_instance()
def test_ordered_options_supports_multiple_inheritance(self):
class MyConfigMixin1:
first_name = StringOption('FirstName', default='Ilya')
class MyConfigMixin2:
last_name = StringOption('LastName', default='Kulakov')
class MyConfig(self.CONFIG_TYPE, MyConfigMixin1, MyConfigMixin2):
age = IntOption('Age', default=42)
self.assertIn(MyConfigMixin1.first_name._name, [o._name for o in MyConfig._ordered_options])
self.assertIn(MyConfigMixin2.last_name, MyConfig._ordered_options)
self.assertIn(MyConfig.age, MyConfig._ordered_options)
def test_overriding_option_type_raises_warn_if_not_subclass(self):
class MyConfig(self.CONFIG_TYPE):
first_name = StringOption('FirstName', default='Ilya')
with self.assertWarns(UserWarning):
class MyConfig2(MyConfig):
first_name = IntOption('FirstName', default=42)
def test_reset_deletes_from_config(self):
class MyConfig(self.CONFIG_TYPE):
lucky_number = IntOption('LuckyNumber', default=42)
c = MyConfig.get_instance()
c.lucky_number = 9000
self.assertEqual(c.get_value('LuckyNumber'), '9000')
c.reset()
self.assertEqual(c.get_value('LuckyNumber'), None)
def test_get_value_is_cached(self):
class MyConfig(self.CONFIG_TYPE):
first_name = StringOption('FirstName', allow_cache=True)
c = MyConfig.get_instance()
c.get_value_cache_free = MagicMock(return_value='Ilya')
c.first_name
c.first_name
c.first_name
self.assertLessEqual(c.get_value_cache_free.call_count, 1)
def test_set_value_is_cached(self):
class MyConfig(self.CONFIG_TYPE):
first_name = StringOption('FirstName', allow_cache=True)
c = MyConfig.get_instance()
c.set_value_cache_free = MagicMock(return_value='Ilya')
c.first_name = 'Ilya'
c.first_name = 'Ilya'
c.first_name = 'Ilya'
self.assertLessEqual(c.set_value_cache_free.call_count, 1)
def test_get_array_value_is_cached(self):
class MyConfig(self.CONFIG_TYPE):
lucky_numbers = ArrayOption('LuckyNumber', IntOption('_'), allow_cache=True)
c = MyConfig.get_instance()
c.get_array_value_cache_free = MagicMock(return_value=[1, 2, 3])
c.lucky_numbers
c.lucky_numbers
c.lucky_numbers
self.assertLessEqual(c.get_array_value_cache_free.call_count, 1)
def test_set_array_value_is_cached(self):
class MyConfig(self.CONFIG_TYPE):
lucky_numbers = ArrayOption('LuckyNumber', IntOption('_'), allow_cache=True)
c = MyConfig.get_instance()
c.set_array_value_cache_free = MagicMock(return_value=[1, 2, 3])
c.lucky_numbers = [1, 2, 3]
c.lucky_numbers = [1, 2, 3]
c.lucky_numbers = [1, 2, 3]
self.assertLessEqual(c.set_array_value_cache_free.call_count, 1)
def test_get_dict_value_is_cached(self):
class MyConfig(self.CONFIG_TYPE):
lucky_numbers = DictOption('LuckyNumber', IntOption('_'), allow_cache=True)
c = MyConfig.get_instance()
c.get_dict_value_cache_free = MagicMock(return_value={'a': 1, 'b': 2, 'c': 3})
c.lucky_numbers
c.lucky_numbers
c.lucky_numbers
self.assertLessEqual(c.get_dict_value_cache_free.call_count, 1)
def test_set_dict_value_is_cached(self):
class MyConfig(self.CONFIG_TYPE):
lucky_numbers = DictOption('LuckyNumber', IntOption('_'), allow_cache=True)
c = MyConfig.get_instance()
c.set_dict_value_cache_free = MagicMock(return_value={'a': 1, 'b': 2, 'c': 3})
c.lucky_numbers = {'a': 1, 'b': 2, 'c': 3}
c.lucky_numbers = {'a': 1, 'b': 2, 'c': 3}
c.lucky_numbers = {'a': 1, 'b': 2, 'c': 3}
self.assertLessEqual(c.set_dict_value_cache_free.call_count, 1)
def test_del_value_is_cached(self):
class MyConfig(self.CONFIG_TYPE):
lucky_numbers = DictOption('LuckyNumber', IntOption('_'), allow_cache=True)
c = MyConfig.get_instance()
c.del_value_cache_free = MagicMock()
del c.lucky_numbers
del c.lucky_numbers
del c.lucky_numbers
self.assertLessEqual(c.del_value_cache_free.call_count, 1)
def test_set_value_writes_new_value(self):
class MyConfig(self.CONFIG_TYPE):
first_name = StringOption('FirstName', default='Ilya', allow_cache=True)
c = MyConfig.get_instance()
c.first_name = 'Artem'
c.first_name = 'Konstantin'
c.first_name = 'Kirill'
self.assertEqual(c.get_value_cache_free('FirstName'), 'Kirill')
def test_set_array_value_writes_new_value(self):
class MyConfig(self.CONFIG_TYPE):
lucky_numbers = ArrayOption('LuckyNumber', IntOption('_'), default=(1, 2, 3), allow_cache=True)
c = MyConfig.get_instance()
c.lucky_numbers = [4, 5, 6]
c.lucky_numbers = [7, 8, 9]
c.lucky_numbers = [10, 11, 12]
self.assertEqual(c.get_array_value_cache_free('LuckyNumber'), ['10', '11', '12'])
def test_set_dict_value_writes_new_value(self):
class MyConfig(self.CONFIG_TYPE):
lucky_numbers = DictOption('LuckyNumber', IntOption('_'), default={'a': 1, 'b': 2, 'c': 3}, allow_cache=True)
c = MyConfig.get_instance()
c.lucky_numbers = {'a': 4, 'b': 5, 'c': 6}
c.lucky_numbers = {'a': 7, 'b': 8, 'c': 9}
c.lucky_numbers = {'a': 10, 'b': 11, 'c': 12}
self.assertEqual(c.get_dict_value_cache_free('LuckyNumber'), {'a': '10', 'b': '11', 'c': '12'})
def test_del_value_writes_new_value(self):
class MyConfig(self.CONFIG_TYPE):
lucky_numbers = DictOption('LuckyNumber', IntOption('_'), default={'a': 1, 'b': 2, 'c': 3}, allow_cache=True)
c = MyConfig.get_instance()
c.lucky_numbers = {'a': 4, 'b': 5, 'c': 6}
del c.lucky_numbers
c.lucky_numbers = {'a': 10, 'b': 11, 'c': 12}
del c.lucky_numbers
self.assertEqual(c.get_dict_value_cache_free('LuckyNumber'), None)
def test_allow_cache(self):
class AllowCacheConfig(self.CONFIG_TYPE):
ALLOW_CACHE = True
first_name = StringOption('FirstName', default='Ilya')
class DisallowCacheConfig(self.CONFIG_TYPE):
ALLOW_CACHE = False
first_name = StringOption('FirstName', default='Ilya')
class DefaultCacheConfig(self.CONFIG_TYPE):
first_name = StringOption('FirstName', default='Ilya')
c = AllowCacheConfig.get_instance()
c.get_value = MagicMock(return_value='Ilya')
c.first_name
c.get_value.assert_called_with('FirstName', allow_cache=True)
c = DisallowCacheConfig.get_instance()
c.get_value = MagicMock(return_value='Ilya')
c.first_name
c.get_value.assert_called_with('FirstName', allow_cache=False)
c = DefaultCacheConfig.get_instance()
c.get_value = MagicMock(return_value='Ilya')
c.first_name
c.get_value.assert_called_with('FirstName', allow_cache=False)
def test_per_option_allow_cache(self):
class AllowCacheConfig(self.CONFIG_TYPE):
ALLOW_CACHE = True
first_name = StringOption('FirstName', default='Ilya', allow_cache=False)
class DisallowCacheConfig(self.CONFIG_TYPE):
ALLOW_CACHE = False
first_name = StringOption('FirstName', default='Ilya', allow_cache=True)
c = AllowCacheConfig.get_instance()
c.get_value = MagicMock(return_value='Ilya')
c.first_name
c.get_value.assert_called_with('FirstName', allow_cache=False)
c = DisallowCacheConfig.get_instance()
c.get_value = MagicMock(return_value='Ilya')
c.first_name
c.get_value.assert_called_with('FirstName', allow_cache=True)
def test_magic_len(self):
class ZeroItemsConfig(self.CONFIG_TYPE):
pass
class OneItemConfig(self.CONFIG_TYPE):
first_name = StringOption('FirstName', default='Ilya', allow_cache=False)
class TwoItemsConfig(self.CONFIG_TYPE):
first_name = StringOption('FirstName', default='Ilya', allow_cache=False)
last_name = StringOption('LastName', default='Kulakov', allow_cache=False)
self.assertEqual(len(ZeroItemsConfig.get_instance()), len(ZeroItemsConfig._ordered_options))
self.assertEqual(len(OneItemConfig.get_instance()), len(OneItemConfig._ordered_options))
self.assertEqual(len(TwoItemsConfig.get_instance()), len(TwoItemsConfig._ordered_options))
def test_magic_getitem(self):
class OneItemConfig(self.CONFIG_TYPE):
first_name = StringOption('FirstName', default='Ilya', allow_cache=False)
c = OneItemConfig.get_instance()
self.assertEqual(c['FirstName'], c.first_name)
with self.assertRaises(KeyError):
c['SecondName']
def test_magic_setitem(self):
class OneItemConfig(self.CONFIG_TYPE):
first_name = StringOption('FirstName', default='Ilya', allow_cache=False)
c = OneItemConfig.get_instance()
c['FirstName'] = 'Tamara'
self.assertEqual(c.first_name, 'Tamara')
with self.assertRaises(KeyError):
c['LastName'] = 'Fedorova'
def test_magic_delitem(self):
class OneItemConfig(self.CONFIG_TYPE):
first_name = StringOption('FirstName', default='Ilya', allow_cache=False)
c = OneItemConfig.get_instance()
c['FirstName'] = 'Tamara'
self.assertEqual(c.first_name, 'Tamara')
del c['FirstName']
self.assertEqual(c.first_name, 'Ilya')
def test_magic_iter(self):
class OneItemConfig(self.CONFIG_TYPE):
first_name = StringOption('FirstName', default='Ilya', allow_cache=False)
c = OneItemConfig.get_instance()
self.assertSetEqual(set(c.keys()), set(iter(c)))
def test_reset_clears_cache(self):
class MyConfig(self.CONFIG_TYPE):
age = IntOption('Age', default=42)
c = MyConfig()
c.age = 99
self.assertEqual(c._cache['Age'], '99')
c.reset()
self.assertEqual(c._cache['Age'], None)
def test_option_mixins(self):
class MyConfigMixin:
age = IntOption('Age', default=42)
class MyConfig(MyConfigMixin, self.CONFIG_TYPE):
pass
c = MyConfig()
self.assertIn('Age', c)
@abstractmethod
def test_config_is_created_if_not_found(self):
pass
|
praserocking/google-gtags
|
refs/heads/master
|
gtags.py
|
7
|
#!/usr/bin/python2.4
#
# Copyright 2004 Google Inc.
# All Rights Reserved.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
A module for talking to gtags server via python.
This module does the equivalent of gtags.el, but is implemented into python
for possible integration into VIM or other editors.
Because those editors usually don't understand google3 programming conventions,
this module was written as a standard python module.
To use add this to your editor's config:
import sys
import gtags
"""
import socket
import signal
import os
import time
from cStringIO import StringIO
# Server list
# Customize list of server to be something like:
# "c++" : { "definition" : [("gtags.google.com", 2222), (host2, port)],
lang_call_to_server = {
"c++" : { "definition" : [],
"callgraph" : [] },
"java" : { "definition" : [],
"callgraph" : [] },
"python" : { "definition" : [],
"callgraph" : [] } }
# A list of functions that can potentially map a generated file name to its
# source file name
genfile_to_source = []
# String containing characters which are acceptable in a symbol.
SYMBOL_CHARS = "abcdefghijklmnopqrstuvwxyz" + \
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" + \
"0123456789" + \
"+-*/@$%^&_=<>~.?![]{}"
PYCLIENT_VERSION = 3
PY_CLIENT_IDENTIFIER = "python"
default_corpus = 'google3'
default_language = 'c++'
CONNECT_TIMEOUT = 5.0
DATA_TIMEOUT = 5.0
# GTags mixer settings.
# Command to launch mixer.
MIXER_CMD = "/path/to/gtagsmixer"
# Number of tries before we declare mixer is unreachable.
MIXER_RETRIES = 5
# Waiting time between retries (in ms).
MIXER_RETRY_DELAY = 100
# Default mixer port
MIXER_PORT = 2220
def send_to_server(host, port, command, timeout=DATA_TIMEOUT, proxy=None):
'''
Sends command to specified port of gtags server and returns response.
Use this function only if you want to send a command to a specific data
center/gtags server. Otherwise, use send_to_server in connection_manager
which does automatic data center selection and failover based on query
language and call type.
'''
if proxy:
import socks
s = socks.socksocket(socket.AF_INET, socket.SOCK_STREAM)
i = proxy.find(":")
if i != -1:
s.setproxy(socks.PROXY_TYPE_HTTP, proxy[0:i], int(proxy[i+1:]))
else:
s.setproxy(socks.PROXY_TYPE_HTTP, proxy)
else:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setblocking(1)
s.settimeout(CONNECT_TIMEOUT)
address = socket.getaddrinfo(host, port, socket.AF_INET,
socket.SOCK_STREAM)
s.connect(address[0][4])
s.settimeout(timeout)
# need \r\n to match telnet protocol
s.sendall(command + '\r\n')
class SocketReader:
"""SocketReader exposes a single function 'GetResponse' to the caller
of send_to_server. If the caller is interested in the server response,
it can call GetResponse(). Otherwise, the caller is free ignore the
return value"""
def __init__(self, socket):
self._socket = socket
def GetResponse(self):
buf = StringIO()
data = s.recv(1024)
# accumulate all data
while data:
buf.write(data)
data = s.recv(1024)
signal.alarm(0)
return buf.getvalue()
return SocketReader(s)
# a class to store tags related information
class ETag:
def __init__(self, filename):
self.filename_ = filename
def set_tag(self, tag):
self.tag_ = tag
def set_lineno(self, lineno):
self.lineno_ = lineno
def set_fileoffset(self, offset):
self.offset_ = offset
def set_snippet(self, snippet):
self.snippet_ = snippet
def __str__(self):
return 'tag: ' + self.tag_ + '\n' + 'filename: ' + self.filename_ + \
'\nlineno: ' + str(self.lineno_) + \
'\noffset: ' + str(self.offset_) + '\nsnippet: ' + self.snippet_
class Error(Exception):
pass
class UnparseableSexp(Error):
"Indicates that a string does not contain a valid s-exp."
class UnexpectedEndOfString(Error):
"Indicates that a string is terminated by an unescaped backslash."
class NoAvailableServer(Error):
"Indicates that no gtags server is available to take the request."
def server_type(callgraph):
"Map a boolean value to dictionary keys"
if callgraph:
return "callgraph"
else:
return "definition"
class TagsConnectionManager(object):
'''
Handles communication with gtags server. Client only needs to supply
the language and whether the request if for callgraph and
TagsConnectionManager will determine which server to talk to. It also
automatically switch to the next available server if the current one
cannont be reached.
'''
def __init__(self):
self.lang_to_server = {"callgraph" : {},
"definition" : {}}
self.current_server = {"callgraph" : {},
"definition" : {}}
self.indexes = {"callgraph" : {},
"definition" : {}}
# A http proxy, e.g: webcache:8080
self.proxy = None
self.use_mixer = False
self.mixer_port = MIXER_PORT
self.mixer_launched = False
# Add a server to the list of known servers
def add_server(self, language, callgraph, host, port):
if (not self.lang_to_server[callgraph].has_key(language)):
self.lang_to_server[callgraph][language] = []
self.lang_to_server[callgraph][language].append((host, port))
# Remove a server from the list
def remove_server(self, language, callgraph, host, port):
if (not self.lang_to_server[callgraph].has_key(language)):
return
self.lang_to_server[callgraph][language].remove((host, port))
# Return the current selected server for the language and callgraph
def selected_server(self, language, callgraph):
if (not self.current_server[callgraph].has_key(language)):
self.current_server[callgraph][language] = \
self.next_server(language, callgraph)
return self.current_server[callgraph][language]
# Switch to the next available server for the language and callgraph
# Throws NoAvailableServer exception if there is no 'next' avaiable server
def next_server(self, language, callgraph):
if (not self.indexes[callgraph].has_key(language)):
self.indexes[callgraph][language] = 0
if (not self.lang_to_server[callgraph].has_key(language)):
self.lang_to_server[callgraph][language] = []
if (len(self.lang_to_server[callgraph][language]) <=
self.indexes[callgraph][language]):
raise NoAvailableServer
self.current_server[callgraph][language] = \
self.lang_to_server[callgraph][language] \
[self.indexes[callgraph][language]]
self.indexes[callgraph][language] += 1
return self.current_server[callgraph][language]
# Send a command to server
# When self.use_mixer is True, try starting the mixer if called for the
# first time. After that, send queries to the mixer. If self.user_mixer is not
# True, figure out hostname and port base on language and callgraph and
# call send_to_server. If we can't reach current selected sever, move to the
# next one and try again
def send_to_server(self, language, is_callgraph, command):
if not self.proxy and self.use_mixer:
if not self.mixer_launched:
os.system(MIXER_CMD + " --port %s" % self.mixer_port + " &")
time.sleep(0.5)
self.mixer_launched = True
for retry_count in xrange(MIXER_RETRIES):
try:
return send_to_server("localhost", self.mixer_port,
command).GetResponse()
except socket.error, socket_error:
if retry_count < MIXER_RETRIES - 1:
time.sleep(MIXER_RETRY_DELAY / 1000.0)
else:
raise socket_error
callgraph = server_type(is_callgraph)
while 1:
host, port = self.selected_server(language, callgraph)
try:
return send_to_server(host, port, command, proxy=self.proxy).GetResponse()
except socket.error:
self.next_server(language, callgraph)
# Instance of connection_manager that forwards client requests to gtags server
connection_manager = TagsConnectionManager()
# Register gtags sever with connection_manager as follows
#
for lang in lang_call_to_server.keys():
for call in lang_call_to_server[lang].keys():
for host, port in lang_call_to_server[lang][call]:
connection_manager.add_server(lang, call, host, port)
def string_lisp_repr(string):
"""
Prints a string as a lisp-readable string literal surrounded by
double quotes. (Python's repr would do the job, except it likes to
use single quotes and therefore won't escape double quotes.)
"""
retval = '"'
for c in string:
if c == '\\' or c == '"':
retval += '\\'
retval += c
retval += '"'
return retval
def boolean_lisp_repr(boolean_value):
"""
Convert a python boolean value (basically anything that evaluates to true or
false) to s-expression equivalents (nil or t).
"""
if boolean_value:
return "t"
else:
return "nil"
def make_command(command_type,
extra_params = [],
language = default_language,
callgraph = 0,
client_type = "python",
client_version = PYCLIENT_VERSION,
corpus = default_corpus,
current_file = None):
"""
Given a command-type (string) and list of extra parameters,
constructs a query string that we can send to the server.
e.g.
make_command('foo', [['tag', 40], ['file', 'gtags.py']])
=> '(foo (client-type "python") (client-version 2) (protocol-version 2)
(tag 40) (file "gtags.py"))'
"""
client_string = ('(client-type %s) (client-version %s) (protocol-version 2)'
% (string_lisp_repr(client_type), client_version))
corp_lang_call_string = '(corpus %s) (language %s) (callers %s)' \
% (string_lisp_repr(corpus),
string_lisp_repr(language),
boolean_lisp_repr(callgraph))
current_file_string = ''
if current_file:
current_file_string = '(current-file %s)' % string_lisp_repr(current_file)
param_string = ""
for param in extra_params:
if isinstance(param[1], str):
value_repr = string_lisp_repr(param[1])
else:
value_repr = repr(param[1])
param_string += ' (%s %s)' % (param[0], value_repr)
return '(%s %s %s %s %s)' \
% (command_type, client_string, corp_lang_call_string,
current_file_string, param_string)
# returns a list of tags matching id for language language
# Deprecated-- use find_matching_tags_exact instead.
# TODO(leandrog): delete if no one uses this
def find_tag(language,
id,
callgraph = 0,
decipher_genfiles = 0,
client = 'py',
corpus = default_corpus,
current_file=None):
return find_matching_tags_exact(language,
id,
callgraph,
decipher_genfiles,
client,
corpus,
current_file,
0)
def parse_sexp(str):
"""
Parses a string representation of a s-exp and converts it to a
python data structure. Can parse strings, integers, symbols and
lists of those.
Examples:
'1450' => "1450"
'"string"' => "string"
'()' => []
'(("a" "b") (105 205))' => [["a", "b"], [105, 205]]
Transforms improper lists into lists:
'(1 2 . 3)' => [1, 2, 3]
Symbols are represented with the symbol name in a one-item tuple:
'symbol' => ("symbol",)
Throws UnparseableSexp if there is an error during parsing.
"""
def unescape_string(str):
"""
Requires: str does not contain any escaped characters other than
\\, \t and \".
Returns str, with escaped characters replaced with their original versions.
"""
chars = []
i = 0
while i < len(str):
if str[i] == "\\":
if i == len(str) - 1:
raise UnexpectedEndOfString # Throw an exception if string
# ends in unescaped slash
if str[i+1] in ["\\", "\""]:
chars.append(str[i+1])
elif str[i+1] == "t":
chars.append("\t");
i += 1
else:
chars.append(str[i])
i += 1
return ''.join(chars)
def parse_sexp_helper(str, start_index):
"""
Parses a string representation of a s-exp (starting at position
start_index) into a string, an integer, a symbol-- represented by
a one-item tuple ("symbolname",)-- or a list. Returns (len,
answer) containing the number of characters read from the string,
and the sexp itself.
Throws UnparseableSexp if there is an error during parsing.
"""
i = start_index
while i < len(str) and str[i] == " ": # Skip leading spaces.
i += 1
if i == len(str):
raise UnparseableSexp, "No s-exp was found in input."
data_start_index = i # This is where the token/list actually starts.
if str[i] == "(":
# Parse list
elements = []
i += 1
if i == len(str):
raise UnparseableSexp, "Input unexpectedly ended inside a list."
while str[i] != ")":
while str[i] == " ":
i += 1
if i == len(str):
raise UnparseableSexp, "Input unexpectedly ended inside a list."
if str[i] == ")":
pass
else:
if str[i] == ".": # Just ignore "." (in improper lists).
# TODO(psung): Check for spurious "."'s (e.g. more than
# one, or one anywhere other than the second-to-last
# position) and throw an exception
i += 1
else:
(skip_ahead, result) = parse_sexp_helper(str, i)
i += skip_ahead
elements.append(result)
if i >= len(str): # No room for closing parenthesis
raise UnparseableSexp, "Input unexpectedly ended inside a list."
i += 1
return (i - start_index, elements)
elif str[i] == '"':
# Parse string literal
i += 1
if i == len(str):
raise UnparseableSexp, "Input contains unterminated quoted string."
while str[i] != '"':
if str[i] == "\\":
i += 1 # Skip over the slash.
if i == len(str):
raise UnparseableSexp, "Quoted string ends in unescaped backslash."
i += 1
if i == len(str):
raise UnparseableSexp, "Input contains unterminated quoted string."
try:
s = unescape_string(str[data_start_index+1:i])
except UnexpectedEndOfString:
raise UnparseableSexp, "Quoted string ends in unescaped backslash."
i += 1
return (i - start_index, s)
elif str[i] in "0123456789":
# Parse integer literal
# TODO(psung): Handle negative ints, floats, etc. This code is
# also bad in that it assumes a token is a number if the first
# char is a digit.
while i < len(str) and str[i] in "0123456789":
i += 1
return (i - start_index, int(str[data_start_index:i]))
elif str[i] in SYMBOL_CHARS or str[i] == "\\":
# Parse symbol
while i < len(str):
if str[i] not in SYMBOL_CHARS + "\\":
break
if str[i] == "\\":
i += 1 # Skip over the slash.
if i == len(str):
raise UnparseableSexp, "Symbol ends in unescaped backslash."
i += 1
try:
s = unescape_string(str[data_start_index:i])
except UnexpectedEndOfString:
raise UnparseableSexp, "Symbol ends in unescaped backslash."
return (i - start_index, (s,))
raise UnparseableSexp, ("Unexpected character '%s' was found in input; " +
"beginning of token/list was expected") % str[i]
return parse_sexp_helper(str, 0)[1]
def alist_to_dictionary(alist):
"""
Takes a parsed alist, i.e. of the form
[[("key1",), value1], ...]
--which is what you get when you parse ((key1 value1) ...) --
and returns a dictionary indexed by the key symbols:
result["key1"] => value1
"""
# We want [key, value], but the input is [(key,), value]
return dict([[item[0][0], item[1]] for item in alist])
# Parameters of functions
#
# find_matching_tags, find_matching_tags_exact, search_tag_snippets,
# check_server_status, reload_tagfile, list_tags_for_file and do_gtags_command,
#
# lang = language
# id = tag you are searching for
# callgraph = O:definitions mode, 1: callgraph mode
# decipher_genfiles = 0:No, 1:Yes
# client = Name of the client using this library. Defaults to
# PY_CLIENT_IDENTIFIER (= "python")
# ignore_output = 0: Parse the output, 1: Do not parse the output
# Regular expression match
def find_matching_tags(lang,
id,
callgraph = 0,
decipher_genfiles = 0,
client = PY_CLIENT_IDENTIFIER,
corpus = default_corpus,
current_file = None,
ignore_output = 0):
return do_gtags_command('lookup-tag-prefix-regexp',
[["tag", id]],
lang,
callgraph,
decipher_genfiles,
client,
corpus,
current_file,
ignore_output)
# Exact match
def find_matching_tags_exact(lang,
id,
callgraph = 0,
decipher_genfiles = 0,
client = PY_CLIENT_IDENTIFIER,
corpus = default_corpus,
current_file = None,
ignore_output = 0):
return do_gtags_command('lookup-tag-exact',
[["tag", id]],
lang,
callgraph,
decipher_genfiles,
client,
corpus,
current_file,
ignore_output)
# Snippets search
def search_tag_snippets(lang,
id,
callgraph = 0,
client = PY_CLIENT_IDENTIFIER,
corpus = default_corpus,
current_file = None,
ignore_output = 0):
return do_gtags_command('lookup-tag-snippet-regexp',
[["tag", id]],
lang,
callgraph,
0,
client,
corpus,
current_file,
ignore_output)
# Check server status with '/' (ping) command
def check_server_status(lang,
callgraph = 0,
client = PY_CLIENT_IDENTIFIER):
return connection_manager.send_to_server(lang,
callgraph,
make_command('ping',
[],
lang,
callgraph,
client))
# Reload tag file using 'file'
def reload_tagfile(file,
lang,
callgraph = 0,
client = PY_CLIENT_IDENTIFIER):
return connection_manager.send_to_server(lang,
callgraph,
make_command('reload-tags-file',
[["file", file]],
lang,
callgraph,
client))
# List all tags in a file
def list_tags_for_file(lang, filename, client = PY_CLIENT_IDENTIFIER):
return do_gtags_command('lookup-tags-in-file',
[["file", filename]],
lang,
0,
0,
client)
class ErrorMessageFromServer(Exception):
"""Raise an instance of this class if we get a (error ((message "message"))
from the server"""
def do_gtags_command(command,
parameters,
lang,
callgraph = 0,
decipher_genfiles = 0,
client = PY_CLIENT_IDENTIFIER,
corpus = default_corpus,
current_file = None,
ignore_output = 0):
tags_data = connection_manager.send_to_server(
lang, callgraph, make_command(command,
parameters,
lang,
callgraph,
client,
corpus = corpus,
current_file = current_file))
if ignore_output:
return tags_data
# reserved for genfiles if decipher_genfiles != 0
prepend_tag_list = []
tag_list = []
response_dict = alist_to_dictionary(parse_sexp(tags_data))
for tag_sexp in response_dict["value"]:
# Check for error message in the form of
# [("error",) [[("message"), message],]]
if "error" == tag_sexp[0][0]:
error_dict = alist_to_dictionary(tag_sexp[1])
raise ErrorMessageFromServer(error_dict["message"])
tag_dict = alist_to_dictionary(tag_sexp)
filename = tag_dict["filename"]
tag = ETag(filename)
tag.set_snippet(tag_dict["snippet"])
tag.set_tag(tag_dict["tag"])
tag.set_lineno(tag_dict["lineno"])
tag.set_fileoffset(tag_dict["offset"])
tag_list.append(tag)
for f in genfile_to_source:
gentag = f(filename, tag_dict)
if (gentag):
if decipher_genfiles != '0': # prepend
prepend_tag_list.append(gentag)
else: # append
tag_list.append(gentag)
return prepend_tag_list + tag_list
def server_reload(host, port, filename):
# Don't time out on server reloading as it can take a long time
send_to_server(host,
port,
make_command('reload-tags-file',
[['file', filename]],
'nil', 'nil') + '\r\n',
timeout = 0)
def server_load_update_file(host, port, filename):
# Don't time out, because updates can take a few seconds
send_to_server(host,
port,
make_command('load-update-file',
[['file', filename]],
'nil', 'nil') + '\r\n',
timeout = 0)
|
xxsergzzxx/python-for-android
|
refs/heads/master
|
python3-alpha/python3-src/Lib/plat-os2emx/IN.py
|
57
|
# Generated by h2py from f:/emx/include/netinet/in.h
# Included from sys/param.h
PAGE_SIZE = 0x1000
HZ = 100
MAXNAMLEN = 260
MAXPATHLEN = 260
def htonl(X): return _swapl(X)
def ntohl(X): return _swapl(X)
def htons(X): return _swaps(X)
def ntohs(X): return _swaps(X)
IPPROTO_IP = 0
IPPROTO_ICMP = 1
IPPROTO_IGMP = 2
IPPROTO_GGP = 3
IPPROTO_TCP = 6
IPPROTO_EGP = 8
IPPROTO_PUP = 12
IPPROTO_UDP = 17
IPPROTO_IDP = 22
IPPROTO_TP = 29
IPPROTO_EON = 80
IPPROTO_RAW = 255
IPPROTO_MAX = 256
IPPORT_RESERVED = 1024
IPPORT_USERRESERVED = 5000
def IN_CLASSA(i): return (((int)(i) & 0x80000000) == 0)
IN_CLASSA_NET = 0xff000000
IN_CLASSA_NSHIFT = 24
IN_CLASSA_HOST = 0x00ffffff
IN_CLASSA_MAX = 128
def IN_CLASSB(i): return (((int)(i) & 0xc0000000) == 0x80000000)
IN_CLASSB_NET = 0xffff0000
IN_CLASSB_NSHIFT = 16
IN_CLASSB_HOST = 0x0000ffff
IN_CLASSB_MAX = 65536
def IN_CLASSC(i): return (((int)(i) & 0xe0000000) == 0xc0000000)
IN_CLASSC_NET = 0xffffff00
IN_CLASSC_NSHIFT = 8
IN_CLASSC_HOST = 0x000000ff
def IN_CLASSD(i): return (((int)(i) & 0xf0000000) == 0xe0000000)
IN_CLASSD_NET = 0xf0000000
IN_CLASSD_NSHIFT = 28
IN_CLASSD_HOST = 0x0fffffff
def IN_MULTICAST(i): return IN_CLASSD(i)
def IN_EXPERIMENTAL(i): return (((int)(i) & 0xe0000000) == 0xe0000000)
def IN_BADCLASS(i): return (((int)(i) & 0xf0000000) == 0xf0000000)
INADDR_ANY = 0x00000000
INADDR_LOOPBACK = 0x7f000001
INADDR_BROADCAST = 0xffffffff
INADDR_NONE = 0xffffffff
INADDR_UNSPEC_GROUP = 0xe0000000
INADDR_ALLHOSTS_GROUP = 0xe0000001
INADDR_MAX_LOCAL_GROUP = 0xe00000ff
IN_LOOPBACKNET = 127
IP_OPTIONS = 1
IP_MULTICAST_IF = 2
IP_MULTICAST_TTL = 3
IP_MULTICAST_LOOP = 4
IP_ADD_MEMBERSHIP = 5
IP_DROP_MEMBERSHIP = 6
IP_HDRINCL = 2
IP_TOS = 3
IP_TTL = 4
IP_RECVOPTS = 5
IP_RECVRETOPTS = 6
IP_RECVDSTADDR = 7
IP_RETOPTS = 8
IP_DEFAULT_MULTICAST_TTL = 1
IP_DEFAULT_MULTICAST_LOOP = 1
IP_MAX_MEMBERSHIPS = 20
|
wanghe4096/website
|
refs/heads/master
|
aliyun/api/rest/Rds20130528DescribeDBInstancesRequest.py
|
1
|
'''
Created by auto_sdk on 2015.04.21
'''
from aliyun.api.base import RestApi
class Rds20130528DescribeDBInstancesRequest(RestApi):
def __init__(self,domain='rds.aliyuncs.com',port=80):
RestApi.__init__(self,domain, port)
self.DBInstanceId = None
self.DBInstanceNetType = None
self.DBInstanceStatus = None
self.Engine = None
def getapiname(self):
return 'rds.aliyuncs.com.DescribeDBInstances.2013-05-28'
|
tseaver/google-cloud-python
|
refs/heads/master
|
securitycenter/google/cloud/securitycenter_v1beta1/proto/asset_pb2.py
|
2
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/securitycenter_v1beta1/proto/asset.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.cloud.securitycenter_v1beta1.proto import (
security_marks_pb2 as google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_security__marks__pb2,
)
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/cloud/securitycenter_v1beta1/proto/asset.proto",
package="google.cloud.securitycenter.v1beta1",
syntax="proto3",
serialized_options=_b(
"\n'com.google.cloud.securitycenter.v1beta1P\001ZQgoogle.golang.org/genproto/googleapis/cloud/securitycenter/v1beta1;securitycenter"
),
serialized_pb=_b(
"\n5google/cloud/securitycenter_v1beta1/proto/asset.proto\x12#google.cloud.securitycenter.v1beta1\x1a\x1cgoogle/api/annotations.proto\x1a>google/cloud/securitycenter_v1beta1/proto/security_marks.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\xf7\x04\n\x05\x41sset\x12\x0c\n\x04name\x18\x01 \x01(\t\x12g\n\x1asecurity_center_properties\x18\x02 \x01(\x0b\x32\x43.google.cloud.securitycenter.v1beta1.Asset.SecurityCenterProperties\x12_\n\x13resource_properties\x18\x07 \x03(\x0b\x32\x42.google.cloud.securitycenter.v1beta1.Asset.ResourcePropertiesEntry\x12J\n\x0esecurity_marks\x18\x08 \x01(\x0b\x32\x32.google.cloud.securitycenter.v1beta1.SecurityMarks\x12/\n\x0b\x63reate_time\x18\t \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0bupdate_time\x18\n \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x1a\x94\x01\n\x18SecurityCenterProperties\x12\x15\n\rresource_name\x18\x01 \x01(\t\x12\x15\n\rresource_type\x18\x02 \x01(\t\x12\x17\n\x0fresource_parent\x18\x03 \x01(\t\x12\x18\n\x10resource_project\x18\x04 \x01(\t\x12\x17\n\x0fresource_owners\x18\x05 \x03(\t\x1aQ\n\x17ResourcePropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12%\n\x05value\x18\x02 \x01(\x0b\x32\x16.google.protobuf.Value:\x02\x38\x01\x42~\n'com.google.cloud.securitycenter.v1beta1P\x01ZQgoogle.golang.org/genproto/googleapis/cloud/securitycenter/v1beta1;securitycenterb\x06proto3"
),
dependencies=[
google_dot_api_dot_annotations__pb2.DESCRIPTOR,
google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_security__marks__pb2.DESCRIPTOR,
google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,
google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,
],
)
_ASSET_SECURITYCENTERPROPERTIES = _descriptor.Descriptor(
name="SecurityCenterProperties",
full_name="google.cloud.securitycenter.v1beta1.Asset.SecurityCenterProperties",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="resource_name",
full_name="google.cloud.securitycenter.v1beta1.Asset.SecurityCenterProperties.resource_name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="resource_type",
full_name="google.cloud.securitycenter.v1beta1.Asset.SecurityCenterProperties.resource_type",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="resource_parent",
full_name="google.cloud.securitycenter.v1beta1.Asset.SecurityCenterProperties.resource_parent",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="resource_project",
full_name="google.cloud.securitycenter.v1beta1.Asset.SecurityCenterProperties.resource_project",
index=3,
number=4,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="resource_owners",
full_name="google.cloud.securitycenter.v1beta1.Asset.SecurityCenterProperties.resource_owners",
index=4,
number=5,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=652,
serialized_end=800,
)
_ASSET_RESOURCEPROPERTIESENTRY = _descriptor.Descriptor(
name="ResourcePropertiesEntry",
full_name="google.cloud.securitycenter.v1beta1.Asset.ResourcePropertiesEntry",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="key",
full_name="google.cloud.securitycenter.v1beta1.Asset.ResourcePropertiesEntry.key",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="value",
full_name="google.cloud.securitycenter.v1beta1.Asset.ResourcePropertiesEntry.value",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=_b("8\001"),
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=802,
serialized_end=883,
)
_ASSET = _descriptor.Descriptor(
name="Asset",
full_name="google.cloud.securitycenter.v1beta1.Asset",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.cloud.securitycenter.v1beta1.Asset.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="security_center_properties",
full_name="google.cloud.securitycenter.v1beta1.Asset.security_center_properties",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="resource_properties",
full_name="google.cloud.securitycenter.v1beta1.Asset.resource_properties",
index=2,
number=7,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="security_marks",
full_name="google.cloud.securitycenter.v1beta1.Asset.security_marks",
index=3,
number=8,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="create_time",
full_name="google.cloud.securitycenter.v1beta1.Asset.create_time",
index=4,
number=9,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="update_time",
full_name="google.cloud.securitycenter.v1beta1.Asset.update_time",
index=5,
number=10,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[_ASSET_SECURITYCENTERPROPERTIES, _ASSET_RESOURCEPROPERTIESENTRY,],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=252,
serialized_end=883,
)
_ASSET_SECURITYCENTERPROPERTIES.containing_type = _ASSET
_ASSET_RESOURCEPROPERTIESENTRY.fields_by_name[
"value"
].message_type = google_dot_protobuf_dot_struct__pb2._VALUE
_ASSET_RESOURCEPROPERTIESENTRY.containing_type = _ASSET
_ASSET.fields_by_name[
"security_center_properties"
].message_type = _ASSET_SECURITYCENTERPROPERTIES
_ASSET.fields_by_name[
"resource_properties"
].message_type = _ASSET_RESOURCEPROPERTIESENTRY
_ASSET.fields_by_name[
"security_marks"
].message_type = (
google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_security__marks__pb2._SECURITYMARKS
)
_ASSET.fields_by_name[
"create_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_ASSET.fields_by_name[
"update_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
DESCRIPTOR.message_types_by_name["Asset"] = _ASSET
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Asset = _reflection.GeneratedProtocolMessageType(
"Asset",
(_message.Message,),
dict(
SecurityCenterProperties=_reflection.GeneratedProtocolMessageType(
"SecurityCenterProperties",
(_message.Message,),
dict(
DESCRIPTOR=_ASSET_SECURITYCENTERPROPERTIES,
__module__="google.cloud.securitycenter_v1beta1.proto.asset_pb2",
__doc__="""Cloud SCC managed properties. These properties are managed by Cloud SCC
and cannot be modified by the user.
Attributes:
resource_name:
The full resource name of the GCP resource this asset
represents. This field is immutable after create time. See: ht
tps://cloud.google.com/apis/design/resource\_names#full\_resou
rce\_name
resource_type:
The type of the GCP resource. Examples include: APPLICATION,
PROJECT, and ORGANIZATION. This is a case insensitive field
defined by Cloud SCC and/or the producer of the resource and
is immutable after create time.
resource_parent:
The full resource name of the immediate parent of the
resource. See: https://cloud.google.com/apis/design/resource\_
names#full\_resource\_name
resource_project:
The full resource name of the project the resource belongs to.
See: https://cloud.google.com/apis/design/resource\_names#full
\_resource\_name
resource_owners:
Owners of the Google Cloud resource.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.Asset.SecurityCenterProperties)
),
),
ResourcePropertiesEntry=_reflection.GeneratedProtocolMessageType(
"ResourcePropertiesEntry",
(_message.Message,),
dict(
DESCRIPTOR=_ASSET_RESOURCEPROPERTIESENTRY,
__module__="google.cloud.securitycenter_v1beta1.proto.asset_pb2"
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.Asset.ResourcePropertiesEntry)
),
),
DESCRIPTOR=_ASSET,
__module__="google.cloud.securitycenter_v1beta1.proto.asset_pb2",
__doc__="""Cloud Security Command Center's (Cloud SCC) representation of a Google
Cloud Platform (GCP) resource.
The Asset is a Cloud SCC resource that captures information about a
single GCP resource. All modifications to an Asset are only within the
context of Cloud SCC and don't affect the referenced GCP resource.
Attributes:
name:
The relative resource name of this asset. See: https://cloud.g
oogle.com/apis/design/resource\_names#relative\_resource\_name
Example: "organizations/123/assets/456".
security_center_properties:
Cloud SCC managed properties. These properties are managed by
Cloud SCC and cannot be modified by the user.
resource_properties:
Resource managed properties. These properties are managed and
defined by the GCP resource and cannot be modified by the
user.
security_marks:
User specified security marks. These marks are entirely
managed by the user and come from the SecurityMarks resource
that belongs to the asset.
create_time:
The time at which the asset was created in Cloud SCC.
update_time:
The time at which the asset was last updated, added, or
deleted in Cloud SCC.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.Asset)
),
)
_sym_db.RegisterMessage(Asset)
_sym_db.RegisterMessage(Asset.SecurityCenterProperties)
_sym_db.RegisterMessage(Asset.ResourcePropertiesEntry)
DESCRIPTOR._options = None
_ASSET_RESOURCEPROPERTIESENTRY._options = None
# @@protoc_insertion_point(module_scope)
|
paplorinc/intellij-community
|
refs/heads/master
|
python/testData/resolve/multiFile/fromImportStarReassignment/FromImportStarReassignment.py
|
83
|
from m1 import *
foo = foo
# <ref>
|
ennoborg/gramps
|
refs/heads/master
|
gramps/plugins/db/dbapi/test/__init__.py
|
12133432
| |
Gaurav-S-Thakur/PythonSqlFlask
|
refs/heads/master
|
Demos/Module 5 - MVATrivia/TriviaMVAApp/models/__init__.py
|
12133432
| |
snowcloud/django-flatpagewiki
|
refs/heads/master
|
flatpagewiki/__init__.py
|
12133432
| |
chromium/chromium
|
refs/heads/master
|
third_party/blink/web_tests/external/wpt/webdriver/tests/get_window_handles/__init__.py
|
12133432
| |
mayfield/shellish
|
refs/heads/master
|
test/rendering.py
|
1
|
import itertools
import unittest
from shellish import rendering as R
class VTMLBufferTests(unittest.TestCase):
def test_overclip_plain(self):
startval = 'A' * 10
s = R.vtmlrender(startval)
self.assertEqual(s.clip(11), startval)
self.assertEqual(s.clip(11).text(), startval)
self.assertEqual(s.clip(20), startval)
self.assertEqual(s.clip(20).text(), startval)
def test_noclip_plain(self):
startval = 'A' * 10
s = R.vtmlrender(startval)
self.assertEqual(s.clip(10), startval)
self.assertEqual(s.clip(10).text(), startval)
def test_underclip_plain(self):
startval = 'A' * 10
s = R.vtmlrender(startval)
self.assertEqual(s.clip(9), startval[:9])
self.assertEqual(s.clip(9).text(), startval[:9])
self.assertEqual(s.clip(4), startval[:4])
self.assertEqual(s.clip(4).text(), startval[:4])
self.assertEqual(s.clip(1), startval[:1])
self.assertEqual(s.clip(1).text(), startval[:1])
self.assertEqual(s.clip(0), '')
self.assertEqual(s.clip(0).text(), '')
self.assertRaises(ValueError, s.clip, -10)
def test_overclip_vtml(self):
startval = 'A' * 10
s = R.vtmlrender('<b>%s</b>' % startval)
self.assertEqual(s.clip(11).text(), startval)
self.assertEqual(s.clip(20).text(), startval)
self.assertEqual(s.clip(11), s)
self.assertEqual(s.clip(20), s)
def test_noclip_vtml(self):
startval = 'A' * 10
s = R.vtmlrender('<b>%s</b>' % startval)
self.assertEqual(s.clip(10).text(), startval)
self.assertEqual(s.clip(10), s)
def test_underclip_vtml(self):
startval = 'A' * 10
s = R.vtmlrender('<b>%s</b>' % startval)
self.assertEqual(s.clip(9).text(), startval[:9])
self.assertEqual(str(s.clip(9)).count('A'), 9)
self.assertEqual(s.clip(4).text(), startval[:4])
self.assertEqual(str(s.clip(4)).count('A'), 4)
self.assertEqual(s.clip(1).text(), startval[:1])
self.assertEqual(str(s.clip(1)).count('A'), 1)
self.assertEqual(s.clip(0).text(), '')
self.assertEqual(s.clip(0), '')
self.assertRaises(ValueError, s.clip, -10)
def test_underclip_vtml_reset(self):
s = R.vtmlrender('<b>%s</b>' % 'AAAA')
self.assertTrue(str(s.clip(2)).endswith('\033[0m'))
def test_overclip_with_cliptextt(self):
startval = 'A' * 10
s = R.vtmlrender(startval)
self.assertEqual(s.clip(12, '.'), startval)
self.assertEqual(s.clip(11, '.'), startval)
self.assertEqual(s.clip(10, '.'), startval)
self.assertEqual(s.clip(12, '..'), startval)
self.assertEqual(s.clip(11, '..'), startval)
self.assertEqual(s.clip(10, '..'), startval)
self.assertEqual(s.clip(12, '...'), startval)
self.assertEqual(s.clip(11, '...'), startval)
self.assertEqual(s.clip(10, '...'), startval)
def test_underclip_with_cliptextt(self):
startval = 'A' * 10
s = R.vtmlrender(startval)
self.assertEqual(s.clip(9, '.'), startval[:8] + '.')
self.assertEqual(s.clip(8, '.'), startval[:7] + '.')
self.assertEqual(s.clip(7, '.'), startval[:6] + '.')
self.assertEqual(s.clip(9, '..'), startval[:7] + '..')
self.assertEqual(s.clip(8, '..'), startval[:6] + '..')
self.assertEqual(s.clip(7, '..'), startval[:5] + '..')
self.assertEqual(s.clip(9, '...'), startval[:6] + '...')
self.assertEqual(s.clip(8, '...'), startval[:5] + '...')
self.assertEqual(s.clip(7, '...'), startval[:4] + '...')
self.assertEqual(s.clip(6, '...'), startval[:3] + '...')
def test_clip_strip(self):
self.assertEqual(R.vtmlrender('\n').clip(10, '!').text(), '!')
self.assertEqual(R.vtmlrender(' \n').clip(10, '!').text(), '!')
self.assertEqual(R.vtmlrender(' ').clip(10, '!').text(), '')
self.assertEqual(R.vtmlrender('A\n').clip(10, '!').text(), 'A!')
self.assertEqual(R.vtmlrender('A\nB').clip(10, '!').text(), 'A!')
self.assertEqual(R.vtmlrender('A\n').clip(1, '!').text(), '!')
self.assertEqual(R.vtmlrender('A ').clip(1, '!').text(), 'A')
self.assertEqual(R.vtmlrender('A ').clip(1, '!').text(), 'A')
self.assertEqual(R.vtmlrender('A \n').clip(1, '!').text(), '!')
self.assertEqual(R.vtmlrender('A \n ').clip(1, '!').text(), '!')
self.assertEqual(R.vtmlrender('A\n').clip(2, '!').text(), 'A!')
self.assertEqual(R.vtmlrender('A ').clip(2, '!').text(), 'A')
self.assertEqual(R.vtmlrender('A ').clip(2, '!').text(), 'A')
self.assertEqual(R.vtmlrender('A \n').clip(2, '!').text(), 'A!')
self.assertEqual(R.vtmlrender('A \n ').clip(2, '!').text(), 'A!')
self.assertEqual(R.vtmlrender(' ').clip(5, '!').text(), '')
self.assertEqual(R.vtmlrender(' ').clip(4, '!').text(), '')
self.assertEqual(R.vtmlrender(' ').clip(3, '!').text(), '')
self.assertEqual(R.vtmlrender(' ').clip(2, '!').text(), '')
self.assertEqual(R.vtmlrender(' ').clip(1, '!').text(), '')
def test_clip_empty(self):
self.assertEqual(R.vtmlrender('').clip(10, '!').text(), '')
def test_wrap_empty(self):
buf = R.vtmlrender('')
self.assertListEqual(buf.wrap(10), [''])
def test_wrap_identity(self):
buf = R.vtmlrender('abcdefgh')
self.assertIsInstance(buf.wrap(10), list)
self.assertIsInstance(buf.wrap(8), list)
self.assertIsInstance(buf.wrap(1), list)
self.assertRaises(ValueError, buf.wrap, -1)
self.assertRaises(ValueError, buf.wrap, 0)
self.assertRaises(ValueError, buf.wrap, -2)
self.assertRaises(ValueError, buf.wrap, -8)
self.assertRaises(ValueError, buf.wrap, -10)
def test_wrap_boundries_packed_stronly(self):
buf = R.vtmlrender('abcdefgh')
self.assertListEqual(buf.wrap(10), ['abcdefgh'])
self.assertListEqual(buf.wrap(8), ['abcdefgh'])
self.assertListEqual(buf.wrap(7), ['abcdefg', 'h'])
self.assertListEqual(buf.wrap(2), ['ab', 'cd', 'ef', 'gh'])
self.assertListEqual(buf.wrap(1), list('abcdefgh'))
def test_wrap_boundries_hypens_stronly(self):
buf = R.vtmlrender('abcd-efgh')
self.assertListEqual(buf.wrap(10), ['abcd-efgh'])
self.assertListEqual(buf.wrap(8), ['abcd-', 'efgh'])
def test_wrap_whitespace_stronly(self):
buf = R.vtmlrender('abcd efgh')
self.assertListEqual(buf.wrap(10), ['abcd efgh'])
self.assertListEqual(buf.wrap(8), ['abcd', 'efgh'])
self.assertListEqual(buf.wrap(7), ['abcd', 'efgh'])
self.assertListEqual(buf.wrap(2), ['ab', 'cd', 'ef', 'gh'])
self.assertListEqual(buf.wrap(1), list('abcdefgh'))
def test_wrap_multi_whitespace_stronly(self):
buf = R.vtmlrender(' A BB CCC DDDD EEEEE ')
self.assertListEqual(buf.wrap(10), [' A BB CCC', 'DDDD EEEEE'])
self.assertListEqual(buf.wrap(8), [' A BB', 'CCC', 'DDDD', 'EEEEE'])
self.assertListEqual(buf.wrap(7), [' A BB', 'CCC', 'DDDD', 'EEEEE'])
self.assertListEqual(buf.wrap(2), [
' A', 'BB', 'CC', 'C', 'DD', 'DD', 'EE', 'EE', 'E'])
self.assertListEqual(buf.wrap(1), [
' ', 'A', 'B', 'B', 'C', 'C', 'C', 'D', 'D', 'D', 'D', 'E', 'E',
'E', 'E', 'E'])
def test_wrap_overflow_whitespace_stronly(self):
buf = R.vtmlrender(' A BB ')
self.assertListEqual(buf.wrap(20), [' A BB'])
self.assertListEqual(buf.wrap(5), [' ', 'A BB'])
self.assertListEqual(buf.wrap(3), [' ', 'A', 'BB'])
self.assertListEqual(buf.wrap(2), [' ', 'A', 'BB'])
buf = R.vtmlrender(' A BB ')
self.assertListEqual(buf.wrap(20), [' A BB'])
self.assertListEqual(buf.wrap(5), [' ', 'A BB'])
self.assertListEqual(buf.wrap(3), [' ', 'A', 'BB'])
self.assertListEqual(buf.wrap(2), [' ', 'A', 'BB'])
buf = R.vtmlrender(' A BB ')
self.assertListEqual(buf.wrap(30), [' A BB'])
self.assertListEqual(buf.wrap(5), [' ', 'A', 'BB'])
self.assertListEqual(buf.wrap(3), [' ', 'A', 'BB'])
self.assertListEqual(buf.wrap(2), [' ', 'A', 'BB'])
def test_wrap_newlines(self):
for width in 1, 2, 80:
with self.subTest('width=%d' % width):
buf = R.vtmlrender('A\nB')
self.assertListEqual(buf.wrap(width), ['A', 'B'])
buf = R.vtmlrender('A\n\nB')
self.assertListEqual(buf.wrap(width), ['A', '', 'B'])
buf = R.vtmlrender('A\n\n\nB')
self.assertListEqual(buf.wrap(width), ['A', '', '', 'B'])
buf = R.vtmlrender('A\n\n\nB')
self.assertListEqual(buf.wrap(width), ['A', '', '', 'B'])
buf = R.vtmlrender('A\n B')
self.assertListEqual(buf.wrap(width), ['A', 'B'])
buf = R.vtmlrender('A \n B')
self.assertListEqual(buf.wrap(width), ['A', 'B'])
buf = R.vtmlrender('A \nB')
self.assertListEqual(buf.wrap(width), ['A', 'B'])
def test_wrap_no_indent(self):
buf = R.vtmlrender(' A\nB')
self.assertListEqual(buf.wrap(20, strip_leading_indent=True),
['A', 'B'])
@unittest.skip
def test_wrap_expand_tabs(self):
buf = R.vtmlrender('A\tB')
self.assertListEqual(buf.wrap(20), ['A B'])
self.assertListEqual(buf.wrap(3), ['A', 'B'])
self.assertListEqual(buf.wrap(2), ['A', 'B'])
self.assertListEqual(buf.wrap(20, expand_tabs=False), ['A\tB'])
self.assertListEqual(buf.wrap(3, expand_tabs=False), ['A\tB'])
self.assertListEqual(buf.wrap(2, expand_tabs=False), ['A', 'B'])
def test_bad_data(self):
bad = [
None,
0,
1,
['asdf', 'asdf'],
[None, None],
'<>asdf',
'</b>asdf</notit>',
]
for x in bad:
self.assertEqual(R.vtmlrender(x), x)
def test_ordering(self):
self.assertGreater(R.vtmlrender('bbbb'), R.vtmlrender('aaaa'))
self.assertLess(R.vtmlrender('aaaa'), R.vtmlrender('bbbb'))
self.assertGreaterEqual(R.vtmlrender('bbbb'), R.vtmlrender('aaaa'))
self.assertGreaterEqual(R.vtmlrender('aaaa'), R.vtmlrender('aaaa'))
self.assertLessEqual(R.vtmlrender('aaaa'), R.vtmlrender('bbbb'))
self.assertLessEqual(R.vtmlrender('aaaa'), R.vtmlrender('aaaa'))
self.assertEqual(R.vtmlrender('aaaa'), R.vtmlrender('aaaa'))
def test_add_same_type(self):
a = R.vtmlrender('aaaa')
b = R.vtmlrender('BBBB')
c = a + b
self.assertIsNot(c, a)
self.assertIsNot(c, b)
self.assertEqual(c, R.vtmlrender('aaaaBBBB'))
self.assertEqual(str(c), 'aaaaBBBB')
def test_add_str_type(self):
a = R.vtmlrender('aaaa')
b = 'BBBB'
c = a + b
self.assertIsNot(c, a)
self.assertEqual(c, R.vtmlrender('aaaaBBBB'))
self.assertEqual(str(c), 'aaaaBBBB')
def test_iadd_same_type(self):
a1 = a1_save = R.vtmlrender('aaaa')
a1 += R.vtmlrender('BBBB')
self.assertIs(a1, a1_save)
self.assertEqual(a1, R.vtmlrender('aaaaBBBB'))
self.assertEqual(str(a1), 'aaaaBBBB')
def test_iadd_str_type(self):
a1 = a1_save = R.vtmlrender('aaaa')
a1 += 'BBBB'
self.assertIs(a1, a1_save)
self.assertEqual(a1, R.vtmlrender('aaaaBBBB'))
self.assertEqual(str(a1), 'aaaaBBBB')
def test_right_add_str_type(self):
a = R.vtmlrender('aaaa')
b = 'BBBB' + a
self.assertEqual(b, 'BBBBaaaa')
self.assertIsInstance(b, str)
def test_add_ident(self):
a1 = R.vtmlrender('aaaa')
a2 = a1 + 'BBBB'
self.assertIsNot(a1, a2)
self.assertNotEqual(a1, a2)
def test_iadd_unsupport_type(self):
a1 = R.vtmlrender('foo')
self.assertRaises(TypeError, lambda: a1 + 1)
self.assertRaises(TypeError, lambda: a1 + b'bar')
def test_amp_tail_single_char(self):
""" Without a workaround this hits a bug in HTMLParser. """
t = 'a&b'
self.assertEqual(R.vtmlrender(t, strict=True), t)
def test_amp_tail_double_char(self):
t = 'a&bc'
self.assertEqual(R.vtmlrender(t, strict=True), t)
def test_amp_tail(self):
t = 'a&'
self.assertEqual(R.vtmlrender(t, strict=True), t)
def test_amp_normal(self):
for t in ('a>', '<', '<ss;', '&;', '&abc;<other>'):
self.assertEqual(R.vtmlrender(t, strict=True), t)
def test_wrong_tag_tolerance(self):
bad = ('foobar', 'foo', 'bar')
perms = itertools.permutations(bad)
for starts in perms:
suffix = '<b>valid</b>'
valid = str(R.vtmlrender(suffix, strict=True))
self.assertIn('\033', valid, 'test to validate next portion')
ends = next(perms) # makes for bad closes
buf = ["<%s>asdf</%s>" % (x, y) for x, y in zip(starts, ends)]
line = ''.join(buf)
ugly = str(R.vtmlrender(line + suffix, strict=True))
self.assertIn(valid, ugly)
self.assertEqual(ugly, line + valid, 'partial conv did not work')
def test_pound(self):
for t in ('a#bc', 'a'):
self.assertEqual(R.vtmlrender(t, strict=True), t)
def test_multiply(self):
a = R.vtmlrender('A')
self.assertEqual(a * 2, R.vtmlrender('AA'))
self.assertEqual(a * 3, R.vtmlrender('AAA'))
self.assertEqual((a * 3).text(), 'AAA')
a = R.vtmlrender('AB')
self.assertEqual(a * 2, R.vtmlrender('ABAB'))
self.assertEqual(a * 3, R.vtmlrender('ABABAB'))
self.assertEqual((a * 3).text(), 'ABABAB')
self.assertRaises(TypeError, lambda: 1.5 * a)
self.assertRaises(TypeError, lambda: '' * a)
self.assertRaises(TypeError, lambda: a * a)
def test_inplace_multiply(self):
a = a_save = R.vtmlrender('A ')
a *= 3
self.assertIs(a, a_save)
self.assertEqual(a, R.vtmlrender('A A A '))
self.assertRaises(TypeError, lambda: 1.5 * a)
self.assertRaises(TypeError, lambda: '' * a)
self.assertRaises(TypeError, lambda: a * a)
def test_right_multiply(self):
a = R.vtmlrender('A')
self.assertEqual(2 * a, R.vtmlrender('AA'))
self.assertEqual(3 * a, R.vtmlrender('AAA'))
self.assertEqual((3 * a).text(), 'AAA')
a = R.vtmlrender('AB')
self.assertEqual(2 * a, R.vtmlrender('ABAB'))
self.assertEqual(3 * a, R.vtmlrender('ABABAB'))
self.assertEqual((3 * a).text(), 'ABABAB')
self.assertRaises(TypeError, lambda: 1.5 * a)
self.assertRaises(TypeError, lambda: '' * a)
self.assertRaises(TypeError, lambda: a * a)
def test_left_justify(self):
a = R.vtmlrender('A')
self.assertEqual(a.ljust(0), 'A')
self.assertEqual(a.ljust(1), 'A')
self.assertEqual(a.ljust(2), 'A ')
self.assertEqual(a.ljust(3), 'A ')
def test_startswith(self):
abc = R.vtmlrender('abc')
self.assertIs(abc.startswith(''), True)
self.assertIs(abc.startswith('a'), True)
self.assertIs(abc.startswith('ab'), True)
self.assertIs(abc.startswith('abc'), True)
self.assertIs(abc.startswith('A'), False)
self.assertIs(abc.startswith('ABC'), False)
self.assertIs(abc.startswith('ABCD'), False)
a = R.vtmlrender('<b>a')
b = R.vtmlrender('b')
c = R.vtmlrender('<u>c')
self.assertIs((a + b + c).startswith('abc'), True)
self.assertIs((a + b + c).startswith(''), True)
self.assertIs((a + b + c).startswith('<b>'), False)
self.assertIs((a + b + c).startswith('A'), False)
self.assertIs((a + b + c).startswith('ABC'), False)
self.assertIs((a + b + c).startswith('ABCD'), False)
def test_endswith(self):
abc = R.vtmlrender('abc')
self.assertIs(abc.endswith(''), True)
self.assertIs(abc.endswith('c'), True)
self.assertIs(abc.endswith('bc'), True)
self.assertIs(abc.endswith('abc'), True)
self.assertIs(abc.endswith('C'), False)
self.assertIs(abc.endswith('ABC'), False)
self.assertIs(abc.endswith('ABCD'), False)
abc = R.vtmlrender('<b>a')
abc += R.vtmlrender('b')
abc += R.vtmlrender('<u>c</u>')
self.assertIs(abc.endswith('abc'), True)
self.assertIs(abc.endswith(''), True)
self.assertIs(abc.endswith('</u>'), False)
self.assertIs(abc.endswith('C'), False)
self.assertIs(abc.endswith('ABC'), False)
self.assertIs(abc.endswith('ABCD'), False)
def test_in(self):
abc = R.vtmlrender('abc')
self.assertIn('', abc)
self.assertIn('a', abc)
self.assertIn('abc', abc)
self.assertIn('abc', abc)
self.assertNotIn('A', abc)
self.assertNotIn('ABC', abc)
self.assertNotIn('ABCD', abc)
abc = R.vtmlrender('<b>a') + R.vtmlrender('bc') + R.vtmlrender('<u>c')
self.assertIn('a', abc)
self.assertIn('abc', abc)
self.assertNotIn('<b>', abc)
self.assertNotIn('A', abc)
self.assertNotIn('ABC', abc)
self.assertNotIn('ABCD', abc)
self.assertNotIn('abcd', abc)
def test_split_maxsplit(self):
ref = 'abc ABC xyz XYZ'
vs = R.vtmlrender(ref)
self.assertListEqual(vs.split(), ref.split(' '))
for i in range(6):
with self.subTest(i):
self.assertListEqual(vs.split(maxsplit=i), ref.split(' ', i))
def test_split_leading(self):
for ref in (' ', ' abc', ' ', ' abc'):
vs = R.vtmlrender(ref)
self.assertListEqual(vs.split(), ref.split(' '))
def test_split_trailing(self):
for ref in ('abc ', 'abc ', ' abc ', ' abc '):
vs = R.vtmlrender(ref)
self.assertListEqual(vs.split(), ref.split(' '))
def test_split_notfound(self):
ref = 'abc'
vs = R.vtmlrender(ref)
self.assertListEqual(vs.split('D'), ref.split('D'))
self.assertListEqual(vs.split('abcd'), ref.split('abcd'))
self.assertListEqual(vs.split('bcd'), ref.split('bcd'))
def test_split_multichar(self):
for ref in ('abcGAPABC', 'abcGAP', 'GAPabc', 'abGAPABGAP',
'aGAPbGAPc'):
vs = R.vtmlrender(ref)
self.assertListEqual(vs.split('GAP'), ref.split('GAP'))
class HTMLConversion(unittest.TestCase):
a_format = '<blue><u>%s</u></blue>'
def test_empty(self):
R.htmlrender('')
def test_parity(self):
for tag in ('b', 'u', 'i'):
markup = '<%s>stuff</%s>' % (tag, tag)
self.assertEqual(R.html2vtml(markup), markup)
def test_noop(self):
self.assertEqual(R.html2vtml('<script>nope</script>'), '')
def test_strip(self):
self.assertEqual(R.html2vtml('<script>nope</script>'), '')
self.assertEqual(R.html2vtml('before<script>nope</script>after'),
'beforeafter')
def test_icase_tag(self):
t = R.vtmlrender('<b>foo</b>')
self.assertEqual(R.htmlrender('<B>foo</b>'), t)
self.assertEqual(R.htmlrender('<B>foo</B>'), t)
self.assertEqual(R.htmlrender('<b>foo</B>'), t)
def test_a_tag_no_href(self):
self.assertEqual(R.html2vtml('<a>foo</a>'), self.a_format % 'foo')
def test_empty_href(self):
self.assertEqual(R.html2vtml('<a href>foo</a>'), self.a_format % 'foo')
def test_unquoted_href(self):
self.assertEqual(R.html2vtml('<a href=link.here>foo</a>'),
self.a_format % 'foo (link.here)')
def test_quoted_href(self):
self.assertEqual(R.html2vtml('<a href="link.here">foo</a>'),
self.a_format % 'foo (link.here)')
self.assertEqual(R.html2vtml("<a href='link.here'>foo</a>"),
self.a_format % 'foo (link.here)')
def test_icase_href(self):
for x in ('HREF', 'Href', 'hreF', 'href'):
self.assertEqual(R.html2vtml('<a %s="link.here">foo</a>' % x),
self.a_format % 'foo (link.here)', x)
self.assertEqual(R.html2vtml('<A %s="link.here">foo</a>' % x),
self.a_format % 'foo (link.here)')
self.assertEqual(R.html2vtml('<A %s="link.here">foo</A>' % x),
self.a_format % 'foo (link.here)')
self.assertEqual(R.html2vtml('<a %s="link.here">foo</A>' % x),
self.a_format % 'foo (link.here)')
class MDConversion(unittest.TestCase):
def test_empty(self):
R.mdrender('')
def test_bold(self):
self.assertEqual(R.mdrender('**foo**'), R.vtmlrender('\n<b>foo</b>\n'))
self.assertEqual(R.mdrender('__foo__'), R.vtmlrender('\n<b>foo</b>\n'))
|
jypeitao/cppcheck
|
refs/heads/master
|
addons/cppcheckdata.py
|
9
|
## @mainpage cppcheckdata
#
# @brief This is a Python module that helps you access Cppcheck dump data.<br><br>
#
# License: No restrictions, use this as you need.<br><br>
#
import xml.etree.ElementTree as ET
## Token class. Contains information about each token in the source code.
#
# The CppcheckData.tokenlist is a list of Token items
#
# C++ class: http://cppcheck.sourceforge.net/devinfo/doxyoutput/classToken.html
#
# To iterate through all tokens use such code:
# @code
# data = CppcheckData.parsedump(...)
# code = ''
# for token in data.tokenlist:
# code = code + token.str + ' '
# print(code)
# @endcode
#
class Token:
Id = None
## Token string
str = None
## Next token in tokenlist. For last token, next is None.
next = None
## Previous token in tokenlist. For first token, previous is None,
previous = None
linkId = None
## Linked token in tokenlist. Each '(', '[' and '{' are linked to the
# corresponding '}', ']' and ')'. For templates, the '<' is linked to
# the corresponding '>'.
link = None
scopeId = None
## Scope information for this token. See the Scope class.
scope = None
## Is this token a symbol name
isName = False
## Is this token a number, for example 123, 12.34
isNumber = False
## Is this token a int value such as 1234
isInt = False
## Is this token a int value such as 12.34
isFloat = False
## Is this token a string literal such as "hello"
isString = False
## string length for string literal
strlen = None
## Is this token a char literal such as 'x'
isChar = False
## Is this token a operator
isOp = False
## Is this token a arithmetic operator
isArithmeticalOp = False
## Is this token a assignment operator
isAssignmentOp = False
## Is this token a comparison operator
isComparisonOp = False
## Is this token a logical operator: && ||
isLogicalOp = False
## varId for token, each variable has a unique non-zero id
varId = None
variableId = None
## Variable information for this token. See the Variable class.
#
# Example code:
# @code
# data = CppcheckData.parsedump(...)
# code = ''
# for token in data.tokenlist:
# code = code + token.str
# if token.variable:
# if token.variable.isLocal:
# code = code + ':localvar'
# if token.variable.isArgument:
# code = code + ':arg'
# code = code + ' '
# print(code)
# @endcode
variable = None
functionId = None
## If this token points at a function call, this attribute has the Function
# information. See the Function class.
function = None
valuesId = None
## Possible values of token
#
# Example code:
# @code
# data = CppcheckData.parsedump(...)
# code = ''
# for token in data.tokenlist:
# code = code + token.str
# if token.values:
# # print values..
# code = code + '{'
# for value in token.values:
# if value.intvalue:
# code = code + str(value.intvalue) + ' '
# code = code + '}'
# code = code + ' '
# print(code)
# @endcode
values = None
typeScopeId = None
## type scope (token->type()->classScope)
typeScope = None
astParentId = None
## syntax tree parent
astParent = None
astOperand1Id = None
## syntax tree operand1
#
# Example code:
# @code
# data = CppcheckData.parsedump(...)
# for token in data.tokenlist:
#
# # is this a addition?
# if token.str == '+':
#
# # print LHS operand
# print(token.astOperand1.str)
#
# @endcode
astOperand1 = None
astOperand2Id = None
## syntax tree operand2
#
# Example code:
# @code
# data = CppcheckData.parsedump(...)
# for token in data.tokenlist:
#
# # is this a division?
# if token.str == '/':
#
# # print RHS operand
# print(token.astOperand2.str)
#
# @endcode
astOperand2 = None
## file name
file = None
## line number
linenr = None
def __init__(self, element):
self.Id = element.get('id')
self.str = element.get('str')
self.next = None
self.previous = None
self.scopeId = element.get('scope')
self.scope = None
type = element.get('type')
if type == 'name':
self.isName = True
elif type == 'number':
self.isNumber = True
if element.get('isInt'):
self.isInt = True
elif element.get('isFloat'):
self.isFloat = True
elif type == 'string':
self.isString = True
self.strlen = int(element.get('strlen'))
elif type == 'char':
self.isChar = True
elif type == 'op':
self.isOp = True
if element.get('isArithmeticalOp'):
self.isArithmeticalOp = True
elif element.get('isAssignmentOp'):
self.isAssignmentOp = True
elif element.get('isComparisonOp'):
self.isComparisonOp = True
elif element.get('isLogicalOp'):
self.isLogicalOp = True
self.linkId = element.get('link')
self.link = None
self.varId = element.get('varId')
self.variableId = element.get('variable')
self.variable = None
self.functionId = element.get('function')
self.function = None
self.valuesId = element.get('values')
self.values = None
self.typeScopeId = element.get('type-scope')
self.typeScope = None
self.astParentId = element.get('astParent')
self.astParent = None
self.astOperand1Id = element.get('astOperand1')
self.astOperand1 = None
self.astOperand2Id = element.get('astOperand2')
self.astOperand2 = None
self.file = element.get('file')
self.linenr = element.get('linenr')
def setId(self, IdMap):
self.scope = IdMap[self.scopeId]
self.link = IdMap[self.linkId]
self.variable = IdMap[self.variableId]
self.function = IdMap[self.functionId]
self.values = IdMap[self.valuesId]
self.typeScope = IdMap[self.typeScopeId]
self.astParent = IdMap[self.astParentId]
self.astOperand1 = IdMap[self.astOperand1Id]
self.astOperand2 = IdMap[self.astOperand2Id]
## Get value if it exists
# Returns None if it doesn't exist
def getValue(self, v):
if not self.values:
return None
for value in self.values:
if value.intvalue == v:
return value
return None
## Scope. Information about global scope, function scopes, class scopes, inner scopes, etc.
# C++ class: http://cppcheck.sourceforge.net/devinfo/doxyoutput/classScope.html
class Scope:
Id = None
classStartId = None
## The { Token for this scope
classStart = None
classEndId = None
## The } Token for this scope
classEnd = None
## Name of this scope. For a function scope, this is the function name, For a class scope, this is the class name.
className = None
## Type of scope: Global, Function, Class, If, While
type = None
def __init__(self, element):
self.Id = element.get('id')
self.className = element.get('className')
self.classStartId = element.get('classStart')
self.classStart = None
self.classEndId = element.get('classEnd')
self.classEnd = None
self.nestedInId = element.get('nestedId')
self.nestedIn = None
self.type = element.get('type')
def setId(self, IdMap):
self.classStart = IdMap[self.classStartId]
self.classEnd = IdMap[self.classEndId]
self.nestedIn = IdMap[self.nestedInId]
## Information about a function
# C++ class: http://cppcheck.sourceforge.net/devinfo/doxyoutput/classFunction.html
class Function:
Id = None
argument = None
argumentId = None
tokenDef = None
tokenDefId = None
name = None
def __init__(self, element):
self.Id = element.get('id')
self.tokenDefId = element.get('tokenDef')
self.name = element.get('name')
self.argument = {}
self.argumentId = {}
for arg in element:
self.argumentId[arg.get('nr')] = arg.get('id')
def setId(self, IdMap):
for argnr, argid in self.argumentId.items():
self.argument[argnr] = IdMap[argid]
self.tokenDef = IdMap[self.tokenDefId]
## Information about a variable
# C++ class: http://cppcheck.sourceforge.net/devinfo/doxyoutput/classVariable.html
class Variable:
Id = None
nameTokenId = None
# name token in variable declaration
nameToken = None
typeStartTokenId = None
## start token of variable declaration
typeStartToken = None
typeEndTokenId = None
## end token of variable declaration
typeEndToken = None
## Is this variable a function argument?
isArgument = False
## Is this variable an array?
isArray = False
## Is this variable a class or struct?
isClass = False
## Is this variable a local variable?
isLocal = False
## Is this variable a pointer
isPointer = False
## Is this variable a reference
isReference = False
## Is this variable static?
isStatic = False
def __init__(self, element):
self.Id = element.get('id')
self.nameTokenId = element.get('nameToken')
self.nameToken = None
self.typeStartTokenId = element.get('typeStartToken')
self.typeStartToken = None
self.typeEndTokenId = element.get('typeEndToken')
self.typeEndToken = None
self.isArgument = element.get('isArgument') == 'true'
self.isArray = element.get('isArray') == 'true'
self.isClass = element.get('isClass') == 'true'
self.isLocal = element.get('isLocal') == 'true'
self.isPointer = element.get('isPointer') == 'true'
self.isReference = element.get('isReference') == 'true'
self.isStatic = element.get('isStatic') == 'true'
def setId(self, IdMap):
self.nameToken = IdMap[self.nameTokenId]
self.typeStartToken = IdMap[self.typeStartTokenId]
self.typeEndToken = IdMap[self.typeEndTokenId]
## ValueFlow class
class ValueFlow:
## ValueFlow::Value class
# Each possible value has a ValueFlow::Value item.
# Each ValueFlow::Value either has a intvalue or tokvalue
# C++ class: http://cppcheck.sourceforge.net/devinfo/doxyoutput/classValueFlow_1_1Value.html
class Value:
# integer value
intvalue = None
# token value
tokvalue = None
# condition where this Value comes from
condition = None
def __init__(self, element):
self.intvalue = element.get('intvalue')
if self.intvalue:
self.intvalue = int(self.intvalue)
self.tokvalue = element.get('tokvalue')
self.condition = element.get('condition-line')
if self.condition:
self.condition = int(self.condition)
Id = None
## Possible values
values = None
def __init__(self, element):
self.Id = element.get('id')
self.values = []
for value in element:
self.values.append(ValueFlow.Value(value))
## Class that makes cppcheck dump data available
#
# To iterate through all tokens use such code:
# @code
# data = CppcheckData.parsedump(...)
# code = ''
# for token in data.tokenlist:
# code = code + token.str + ' '
# print(code)
# @endcode
#
# To iterate through all scopes (functions, types, etc) use such code:
# @code
# data = CppcheckData.parsedump(...)
# for scope in data.scopes:
# print('type:' + scope.type + ' name:' + scope.className)
# @endcode
#
class CppcheckData:
## List of Token items
tokenlist = []
## List of Scope items
scopes = []
## List of Function items
functions = []
## List of Variable items
variables = []
## List of ValueFlow values
valueflow = []
def __init__(self, filename):
self.tokenlist = []
self.scopes = []
self.functions = []
self.variables = []
self.valueflow = []
data = ET.parse(filename)
for element in data.getroot():
if element.tag == 'tokenlist':
for token in element:
self.tokenlist.append(Token(token))
# set next/previous..
prev = None
for token in self.tokenlist:
token.previous = prev
if prev:
prev.next = token
prev = token
if element.tag == 'scopes':
for scope in element:
self.scopes.append(Scope(scope))
for functionList in scope:
if functionList.tag == 'functionList':
for function in functionList:
self.functions.append(Function(function))
if element.tag == 'variables':
for variable in element:
self.variables.append(Variable(variable))
if element.tag == 'valueflow':
for values in element:
self.valueflow.append(ValueFlow(values))
IdMap = {}
IdMap[None] = None
IdMap['0'] = None
for token in self.tokenlist:
IdMap[token.Id] = token
for scope in self.scopes:
IdMap[scope.Id] = scope
for function in self.functions:
IdMap[function.Id] = function
for variable in self.variables:
IdMap[variable.Id] = variable
for values in self.valueflow:
IdMap[values.Id] = values.values
for token in self.tokenlist:
token.setId(IdMap)
for scope in self.scopes:
scope.setId(IdMap)
for function in self.functions:
function.setId(IdMap)
for variable in self.variables:
variable.setId(IdMap)
## parse a cppcheck dump file
def parsedump(filename):
return CppcheckData(filename)
## Check if type of ast node is float/double
def astIsFloat(token):
if not token:
return False
if token.str == '.':
return astIsFloat(token.astOperand2)
if '+-*/%'.find(token.str) == 0:
if True == astIsFloat(token.astOperand1):
return True
return astIsFloat(token.astOperand2)
if not token.variable:
# float literal?
if token.str[0].isdigit():
for c in token.str:
if c == 'f' or c == '.' or c == 'E':
return True
return False
typeToken = token.variable.typeStartToken
endToken = token.variable.typeEndToken
while typeToken != endToken:
if typeToken.str == 'float' or typeToken.str == 'double':
return True
typeToken = typeToken.next
if typeToken.str == 'float' or typeToken.str == 'double':
return True
return False
|
srkiyengar/NewGripper
|
refs/heads/master
|
src/shutter.py
|
1
|
import serial
import struct
import time
# j = 2 means open, j = 1 means close shutter
def command_shutter(port, j):
# first, start the serial port to communicate with the arduino
if port.isOpen():
print "port open"
port.write(struct.pack('>B', j))
return 1
else:
return 0
#while(1 == 1):
#cover_or_not = int(input('Enter a number. 1 will cover the Lenses of the NDI, while 2 will open the blinds.'))
#data.write(struct.pack('>B',cover_or_not))
|
napalm-automation/napalm-yang
|
refs/heads/develop
|
napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv4_external_reachability/__init__.py
|
1
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import state
from . import prefixes
class ipv4_external_reachability(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/ipv4-external-reachability. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines list of IPv4 external reachability
information.
"""
__slots__ = ("_path_helper", "_extmethods", "__state", "__prefixes")
_yang_name = "ipv4-external-reachability"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
self.__prefixes = YANGDynClass(
base=prefixes.prefixes,
is_container="container",
yang_name="prefixes",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"ipv4-external-reachability",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv4_external_reachability/state (container)
YANG Description: This container describes IPv4 external reachability state
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv4_external_reachability/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: This container describes IPv4 external reachability state
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
def _get_prefixes(self):
"""
Getter method for prefixes, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv4_external_reachability/prefixes (container)
YANG Description: This container describes IS neighbors.
"""
return self.__prefixes
def _set_prefixes(self, v, load=False):
"""
Setter method for prefixes, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv4_external_reachability/prefixes (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_prefixes is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_prefixes() directly.
YANG Description: This container describes IS neighbors.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=prefixes.prefixes,
is_container="container",
yang_name="prefixes",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """prefixes must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=prefixes.prefixes, is_container='container', yang_name="prefixes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__prefixes = t
if hasattr(self, "_set"):
self._set()
def _unset_prefixes(self):
self.__prefixes = YANGDynClass(
base=prefixes.prefixes,
is_container="container",
yang_name="prefixes",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
prefixes = __builtin__.property(_get_prefixes)
_pyangbind_elements = OrderedDict([("state", state), ("prefixes", prefixes)])
from . import state
from . import prefixes
class ipv4_external_reachability(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/ipv4-external-reachability. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines list of IPv4 external reachability
information.
"""
__slots__ = ("_path_helper", "_extmethods", "__state", "__prefixes")
_yang_name = "ipv4-external-reachability"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
self.__prefixes = YANGDynClass(
base=prefixes.prefixes,
is_container="container",
yang_name="prefixes",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"ipv4-external-reachability",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv4_external_reachability/state (container)
YANG Description: This container describes IPv4 external reachability state
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv4_external_reachability/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: This container describes IPv4 external reachability state
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
def _get_prefixes(self):
"""
Getter method for prefixes, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv4_external_reachability/prefixes (container)
YANG Description: This container describes IS neighbors.
"""
return self.__prefixes
def _set_prefixes(self, v, load=False):
"""
Setter method for prefixes, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv4_external_reachability/prefixes (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_prefixes is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_prefixes() directly.
YANG Description: This container describes IS neighbors.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=prefixes.prefixes,
is_container="container",
yang_name="prefixes",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """prefixes must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=prefixes.prefixes, is_container='container', yang_name="prefixes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__prefixes = t
if hasattr(self, "_set"):
self._set()
def _unset_prefixes(self):
self.__prefixes = YANGDynClass(
base=prefixes.prefixes,
is_container="container",
yang_name="prefixes",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
prefixes = __builtin__.property(_get_prefixes)
_pyangbind_elements = OrderedDict([("state", state), ("prefixes", prefixes)])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.