text
stringlengths
4
1.02M
meta
dict
"""Tests for the command line interface.""" import json import logging import os import traceback import unittest from click.testing import CliRunner from pybel import Manager, cli from pybel.constants import METADATA_NAME, PYBEL_CONTEXT_TAG from pybel.io import from_bel_script, from_nodelink, from_pickle from pybel.manager.database_io import from_database from pybel.testing.cases import FleetingTemporaryCacheMixin from pybel.testing.constants import test_bel_simple, test_bel_thorough from pybel.testing.mocks import mock_bel_resources from tests.constants import BelReconstitutionMixin, expected_test_thorough_metadata log = logging.getLogger(__name__) @unittest.skip class TestCli(FleetingTemporaryCacheMixin, BelReconstitutionMixin): def setUp(self): super(TestCli, self).setUp() self.runner = CliRunner() @mock_bel_resources def test_convert(self, mock_get): """Test conversion via the CLI.""" with self.runner.isolated_filesystem(): test_csv = os.path.abspath("test.csv") test_gpickle = os.path.abspath("test.gpickle") test_canon = os.path.abspath("test.bel") args = [ "convert", # Input "--path", test_bel_thorough, "--connection", self.connection, # Outputs "--csv", test_csv, "--pickle", test_gpickle, "--bel", test_canon, "--store", "--allow-nested", ] result = self.runner.invoke(cli.main, args) self.assertEqual( 0, result.exit_code, msg="{}\n{}\n{}".format( result.exc_info[0], result.exc_info[1], traceback.format_tb(result.exc_info[2]), ), ) self.assertTrue(os.path.exists(test_csv)) self.bel_thorough_reconstituted(from_pickle(test_gpickle)) self.bel_thorough_reconstituted(from_bel_script(test_canon)) manager = Manager(connection=self.connection) self.bel_thorough_reconstituted( from_database(expected_test_thorough_metadata[METADATA_NAME], manager=manager) ) @mock_bel_resources def test_convert_json(self, mock_get): with self.runner.isolated_filesystem(): test_json = os.path.abspath("test.json") args = [ "convert", "--path", test_bel_thorough, "--json", test_json, "--connection", self.connection, "--allow-nested", ] result = self.runner.invoke(cli.main, args) self.assertEqual(0, result.exit_code, msg=result.exc_info) with open(test_json) as f: self.bel_thorough_reconstituted(from_nodelink(json.load(f))) @unittest.skipUnless("NEO_PATH" in os.environ, "Need environmental variable $NEO_PATH") @mock_bel_resources def test_neo4j_remote(self, mock_get): from py2neo import Graph from py2neo.database.status import GraphError test_context = "PYBEL_TEST_CTX" neo_path = os.environ["NEO_PATH"] try: neo = Graph(neo_path) neo.data('match (n)-[r]->() where r.{}="{}" detach delete n'.format(PYBEL_CONTEXT_TAG, test_context)) except GraphError: self.skipTest("Can't query Neo4J ") except Exception: self.skipTest("Can't connect to Neo4J server") else: with self.runner.isolated_filesystem(): args = [ "convert", "--path", test_bel_simple, "--connection", self.connection, "--neo", neo_path, "--neo-context", test_context, ] self.runner.invoke(cli.main, args) q = 'match (n)-[r]->() where r.{}="{}" return count(n) as count'.format(PYBEL_CONTEXT_TAG, test_context) count = neo.data(q)[0]["count"] self.assertEqual(14, count)
{ "content_hash": "3872e0873e01589ff68bec895126eaa8", "timestamp": "", "source": "github", "line_count": 131, "max_line_length": 120, "avg_line_length": 33.61832061068702, "alnum_prop": 0.534741144414169, "repo_name": "pybel/pybel", "id": "22f7e64e70bf8d37cb2bcc1b915ecb07da1e4bcb", "size": "4429", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/test_cli.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "880" }, { "name": "JavaScript", "bytes": "9473" }, { "name": "Jupyter Notebook", "bytes": "52170" }, { "name": "Python", "bytes": "1475429" } ], "symlink_target": "" }
import sys import enum import http import time import asyncio import warnings from types import TracebackType from typing import Generic, Optional, Union, Type, List, Sequence, Any, cast from typing import Dict, TYPE_CHECKING try: import ssl as _ssl except ImportError: _ssl = None # type: ignore from h2.config import H2Configuration from multidict import MultiDict from .utils import Wrapper, DeadlineWrapper from .const import Status, Cardinality from .config import Configuration from .stream import send_message, recv_message, StreamIterator from .stream import _RecvType, _SendType from .events import _DispatchChannelEvents from .protocol import H2Protocol, AbstractHandler, Stream as _Stream, Peer from .metadata import Deadline, USER_AGENT, decode_grpc_message, encode_timeout from .metadata import encode_metadata, decode_metadata, _MetadataLike, _Metadata from .metadata import _STATUS_DETAILS_KEY, decode_bin_value from .exceptions import GRPCError, ProtocolError, StreamTerminatedError from .encoding.base import GRPC_CONTENT_TYPE, CodecBase, StatusDetailsCodecBase from .encoding.proto import ProtoCodec, ProtoStatusDetailsCodec from .encoding.proto import _googleapis_available from ._registry import channels as _channels if TYPE_CHECKING: from ._typing import IReleaseStream # noqa _H2_OK = '200' # https://github.com/grpc/grpc/blob/master/doc/http-grpc-status-mapping.md _H2_TO_GRPC_STATUS_MAP = { # 400 str(http.HTTPStatus.BAD_REQUEST.value): Status.INTERNAL, # 401 str(http.HTTPStatus.UNAUTHORIZED.value): Status.UNAUTHENTICATED, # 403 str(http.HTTPStatus.FORBIDDEN.value): Status.PERMISSION_DENIED, # 404 str(http.HTTPStatus.NOT_FOUND.value): Status.UNIMPLEMENTED, # 502 str(http.HTTPStatus.BAD_GATEWAY.value): Status.UNAVAILABLE, # 503 str(http.HTTPStatus.SERVICE_UNAVAILABLE.value): Status.UNAVAILABLE, # 504 str(http.HTTPStatus.GATEWAY_TIMEOUT.value): Status.UNAVAILABLE, # 429 str(http.HTTPStatus.TOO_MANY_REQUESTS.value): Status.UNAVAILABLE, } class Handler(AbstractHandler): connection_lost = False def accept(self, stream: Any, headers: Any, release_stream: Any) -> None: raise NotImplementedError('Client connection can not accept requests') def cancel(self, stream: Any) -> None: pass def close(self) -> None: self.connection_lost = True class Stream(StreamIterator[_RecvType], Generic[_SendType, _RecvType]): """ Represents gRPC method call - HTTP/2 request/stream, and everything you need to communicate with server in order to get response. In order to work directly with stream, you should :py:meth:`ServiceMethod.open` request like this: .. code-block:: python3 request = cafe_pb2.LatteOrder( size=cafe_pb2.SMALL, temperature=70, sugar=3, ) async with client.MakeLatte.open() as stream: await stream.send_message(request, end=True) reply: empty_pb2.Empty = await stream.recv_message() """ # stream state _send_request_done = False _send_message_done = False _end_done = False _recv_initial_metadata_done = False _recv_trailing_metadata_done = False _cancel_done = False _trailers_only: Optional[bool] = None _stream: _Stream _release_stream: 'IReleaseStream' _wrapper_ctx = None #: This property contains initial metadata, received with headers from #: the server. It equals to ``None`` initially, and to a multi-dict object #: after :py:meth:`recv_initial_metadata` coroutine succeeds. initial_metadata: Optional[_Metadata] = None #: This property contains trailing metadata, received with trailers from #: the server. It equals to ``None`` initially, and to a multi-dict object #: after :py:meth:`recv_trailing_metadata` coroutine succeeds. trailing_metadata: Optional[_Metadata] = None #: Connection's peer info of type :py:class:`~grpclib.protocol.Peer` peer: Optional[Peer] = None # stats _messages_sent = 0 _messages_received = 0 def __init__( self, channel: 'Channel', method_name: str, metadata: _Metadata, cardinality: Cardinality, send_type: Type[_SendType], recv_type: Type[_RecvType], *, codec: CodecBase, status_details_codec: Optional[StatusDetailsCodecBase], dispatch: _DispatchChannelEvents, deadline: Optional[Deadline] = None, ) -> None: self._channel = channel self._method_name = method_name self._metadata = metadata self._cardinality = cardinality self._send_type = send_type self._recv_type = recv_type self._codec = codec self._status_details_codec = status_details_codec self._dispatch = dispatch self._deadline = deadline async def send_request(self, *, end: bool = False) -> None: """Coroutine to send request headers with metadata to the server. New HTTP/2 stream will be created during this coroutine call. .. note:: This coroutine will be called implicitly during first :py:meth:`send_message` coroutine call, if not called before explicitly. :param end: end outgoing stream if there are no messages to send in a streaming request """ if self._send_request_done: raise ProtocolError('Request is already sent') if end and not self._cardinality.client_streaming: raise ProtocolError('Unary request requires a message to be sent ' 'before ending outgoing stream') with self._wrapper: protocol = await self._channel.__connect__() stream = protocol.processor.connection\ .create_stream(wrapper=self._wrapper) headers = [ (':method', 'POST'), (':scheme', self._channel._scheme), (':path', self._method_name), (':authority', self._channel._authority), ] if self._deadline is not None: timeout = self._deadline.time_remaining() headers.append(('grpc-timeout', encode_timeout(timeout))) # FIXME: remove this check after this issue gets resolved: # https://github.com/googleapis/googleapis.github.io/issues/27 if self._codec.__content_subtype__ == 'proto': content_type = GRPC_CONTENT_TYPE else: content_type = (GRPC_CONTENT_TYPE + '+' + self._codec.__content_subtype__) headers.extend(( ('te', 'trailers'), ('content-type', content_type), ('user-agent', USER_AGENT), )) metadata, = await self._dispatch.send_request( self._metadata, method_name=self._method_name, deadline=self._deadline, content_type=content_type, ) headers.extend(encode_metadata(metadata)) release_stream = await stream.send_request( headers, end_stream=end, _processor=protocol.processor, ) self._stream = stream self._release_stream = release_stream self.peer = self._stream.connection.get_peer() self._send_request_done = True if end: self._end_done = True async def send_message( self, message: _SendType, *, end: bool = False, ) -> None: """Coroutine to send message to the server. If client sends UNARY request, then you should call this coroutine only once. If client sends STREAM request, then you can call this coroutine as many times as you need. .. warning:: It is important to finally end stream from the client-side when you finished sending messages. You can do this in two ways: - specify ``end=True`` argument while sending last message - and last DATA frame will include END_STREAM flag; - call :py:meth:`end` coroutine after sending last message - and extra HEADERS frame with END_STREAM flag will be sent. First approach is preferred, because it doesn't require sending additional HTTP/2 frame. """ if not self._send_request_done: await self.send_request() end_stream = end if not self._cardinality.client_streaming: if self._send_message_done: raise ProtocolError('Message was already sent') else: end_stream = True if self._end_done: raise ProtocolError('Stream is ended') with self._wrapper: message, = await self._dispatch.send_message(message) await send_message(self._stream, self._codec, message, self._send_type, end=end_stream) self._send_message_done = True self._messages_sent += 1 self._stream.connection.messages_sent += 1 self._stream.connection.last_message_sent = time.monotonic() if end: self._end_done = True async def end(self) -> None: """Coroutine to end stream from the client-side. It should be used to finally end stream from the client-side when we're finished sending messages to the server and stream wasn't closed with last DATA frame. See :py:meth:`send_message` for more details. HTTP/2 stream will have half-closed (local) state after this coroutine call. """ if not self._send_request_done: raise ProtocolError('Request was not sent') if self._end_done: raise ProtocolError('Stream was already ended') if not self._cardinality.client_streaming: if not self._send_message_done: raise ProtocolError('Unary request requires a single message ' 'to be sent') else: # `send_message` must already ended stream self._end_done = True return else: await self._stream.end() self._end_done = True def _raise_for_status(self, headers_map: Dict[str, str]) -> None: status = headers_map[':status'] if status is not None and status != _H2_OK: grpc_status = _H2_TO_GRPC_STATUS_MAP.get(status, Status.UNKNOWN) raise GRPCError(grpc_status, 'Received :status = {!r}'.format(status)) def _raise_for_content_type(self, headers_map: Dict[str, str]) -> None: content_type = headers_map.get('content-type') if content_type is None: raise GRPCError(Status.UNKNOWN, 'Missing content-type header') base_content_type, _, sub_type = content_type.partition('+') sub_type = sub_type or ProtoCodec.__content_subtype__ if ( base_content_type != GRPC_CONTENT_TYPE or sub_type != self._codec.__content_subtype__ ): raise GRPCError(Status.UNKNOWN, 'Invalid content-type: {!r}' .format(content_type)) def _raise_for_grpc_status(self, headers_map: Dict[str, str]) -> None: grpc_status = headers_map.get('grpc-status') if grpc_status is None: raise GRPCError(Status.UNKNOWN, 'Missing grpc-status header') try: status = Status(int(grpc_status)) except ValueError: raise GRPCError(Status.UNKNOWN, ('Invalid grpc-status: {!r}' .format(grpc_status))) else: if status is not Status.OK: message = headers_map.get('grpc-message') if message is not None: message = decode_grpc_message(message) details = None if self._status_details_codec is not None: details_bin = headers_map.get(_STATUS_DETAILS_KEY) if details_bin is not None: details = self._status_details_codec.decode( status, message, decode_bin_value(details_bin.encode('ascii')) ) raise GRPCError(status, message, details) async def recv_initial_metadata(self) -> None: """Coroutine to wait for headers with initial metadata from the server. .. note:: This coroutine will be called implicitly during first :py:meth:`recv_message` coroutine call, if not called before explicitly. May raise :py:class:`~grpclib.exceptions.GRPCError` if server returned non-:py:attr:`Status.OK <grpclib.const.Status.OK>` in trailers-only response. When this coroutine finishes, you can access received initial metadata by using :py:attr:`initial_metadata` attribute. """ if not self._send_request_done: raise ProtocolError('Request was not sent yet') if self._recv_initial_metadata_done: raise ProtocolError('Initial metadata was already received') with self._wrapper: headers = await self._stream.recv_headers() self._recv_initial_metadata_done = True headers_map = dict(headers) self._raise_for_status(headers_map) self._raise_for_content_type(headers_map) if 'grpc-status' in headers_map: # trailers-only response self._trailers_only = True im = cast(_Metadata, MultiDict()) im, = await self._dispatch.recv_initial_metadata(im) self.initial_metadata = im tm = decode_metadata(headers) tm, = await self._dispatch.recv_trailing_metadata(tm) self.trailing_metadata = tm self._raise_for_grpc_status(headers_map) else: im = decode_metadata(headers) im, = await self._dispatch.recv_initial_metadata(im) self.initial_metadata = im async def recv_message(self) -> Optional[_RecvType]: """Coroutine to receive incoming message from the server. If server sends UNARY response, then you can call this coroutine only once. If server sends STREAM response, then you should call this coroutine several times, until it returns None. To simplify you code in this case, :py:class:`Stream` implements async iterations protocol, so you can use it like this: .. code-block:: python3 async for message in stream: do_smth_with(message) or even like this: .. code-block:: python3 messages = [msg async for msg in stream] HTTP/2 has flow control mechanism, so client will acknowledge received DATA frames as a message only after user consumes this coroutine. :returns: message """ if not self._recv_initial_metadata_done: await self.recv_initial_metadata() with self._wrapper: message = await recv_message(self._stream, self._codec, self._recv_type) if message is not None: message, = await self._dispatch.recv_message(message) self._messages_received += 1 self._stream.connection.messages_received += 1 self._stream.connection.last_message_received = time.monotonic() return message else: return None async def recv_trailing_metadata(self) -> None: """Coroutine to wait for trailers with trailing metadata from the server. .. note:: This coroutine will be called implicitly at exit from this call (context manager's exit), if not called before explicitly. May raise :py:class:`~grpclib.exceptions.GRPCError` if server returned non-:py:attr:`Status.OK <grpclib.const.Status.OK>` in trailers. When this coroutine finishes, you can access received trailing metadata by using :py:attr:`trailing_metadata` attribute. """ if (not self._end_done # explicit end and not (not self._cardinality.client_streaming # implicit end and self._send_message_done)): raise ProtocolError('Outgoing stream was not ended') if not self._recv_initial_metadata_done: raise ProtocolError('Initial metadata was not received before ' 'waiting for trailing metadata') if self._recv_trailing_metadata_done: raise ProtocolError('Trailing metadata was already received') if self._trailers_only: self._recv_trailing_metadata_done = True else: with self._wrapper: trailers = await self._stream.recv_trailers() self._recv_trailing_metadata_done = True tm = decode_metadata(trailers) tm, = await self._dispatch.recv_trailing_metadata(tm) self.trailing_metadata = tm self._raise_for_grpc_status(dict(trailers)) async def cancel(self) -> None: """Coroutine to cancel this request/stream. Client will send RST_STREAM frame to the server, so it will be explicitly informed that there is nothing to expect from the client regarding this request/stream. """ if not self._send_request_done: raise ProtocolError('Request was not sent yet') if self._cancel_done: raise ProtocolError('Stream was already cancelled') with self._wrapper: await self._stream.reset() # TODO: specify error code self._cancel_done = True async def __aenter__(self) -> 'Stream[_SendType, _RecvType]': if self._deadline is None: self._wrapper = Wrapper() else: self._wrapper = DeadlineWrapper() self._wrapper_ctx = self._wrapper.start(self._deadline) self._wrapper_ctx.__enter__() self._channel._calls_started += 1 self._channel._last_call_started = time.monotonic() return self async def _maybe_finish(self) -> None: if ( not self._cancel_done and not self._stream._transport.is_closing() ): if not self._recv_initial_metadata_done: await self.recv_initial_metadata() if not self._recv_trailing_metadata_done: await self.recv_trailing_metadata() def _maybe_raise(self) -> None: if self._stream.headers is not None: self._raise_for_status(dict(self._stream.headers)) if self._stream.trailers is not None: self._raise_for_grpc_status(dict(self._stream.trailers)) elif self._stream.headers is not None: headers_map = dict(self._stream.headers) if 'grpc-status' in headers_map: self._raise_for_grpc_status(headers_map) async def __aexit__( self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType], ) -> None: if not self._send_request_done: return try: reraise = False if exc_val is None: try: await self._maybe_finish() except Exception: exc_type, exc_val, exc_tb = sys.exc_info() reraise = True if isinstance(exc_val, StreamTerminatedError): self._maybe_raise() if reraise: assert exc_val is not None raise exc_val finally: if self._stream.closable: self._stream.reset_nowait() self._release_stream() if self._wrapper_ctx is not None: self._wrapper_ctx.__exit__(exc_type, exc_val, exc_tb) if exc_val is None: self._channel._calls_succeeded += 1 else: self._channel._calls_failed += 1 class _ChannelState(enum.IntEnum): IDLE = 1 CONNECTING = 2 READY = 3 TRANSIENT_FAILURE = 4 class Channel: """ Represents a connection to the server, which can be used with generated stub classes to perform gRPC calls. .. code-block:: python3 channel = Channel() client = cafe_grpc.CoffeeMachineStub(channel) ... request = cafe_pb2.LatteOrder( size=cafe_pb2.SMALL, temperature=70, sugar=3, ) reply: empty_pb2.Empty = await client.MakeLatte(request) ... channel.close() """ _protocol = None # stats _calls_started = 0 _calls_succeeded = 0 _calls_failed = 0 _last_call_started: Optional[float] = None def __init__( self, host: Optional[str] = None, port: Optional[int] = None, *, loop: Optional[asyncio.AbstractEventLoop] = None, path: Optional[str] = None, codec: Optional[CodecBase] = None, status_details_codec: Optional[StatusDetailsCodecBase] = None, ssl: Union[None, bool, '_ssl.SSLContext'] = None, config: Optional[Configuration] = None, ): """Initialize connection to the server :param host: server host name. :param port: server port number. :param loop: (deprecated) asyncio-compatible event loop :param path: server socket path. If specified, host and port should be omitted (must be None). :param codec: instance of a codec to encode and decode messages, if omitted ``ProtoCodec`` is used by default :param status_details_codec: instance of a status details codec to decode error details in a trailing metadata, if omitted ``ProtoStatusDetailsCodec`` is used by default :param ssl: ``True`` or :py:class:`~python:ssl.SSLContext` object; if ``True``, default SSL context is used. """ if path is not None and (host is not None or port is not None): raise ValueError("The 'path' parameter can not be used with the " "'host' or 'port' parameters.") else: if host is None: host = '127.0.0.1' if port is None: port = 50051 if ssl is True: ssl = self._get_default_ssl_context() if codec is None: codec = ProtoCodec() if status_details_codec is None and _googleapis_available(): status_details_codec = ProtoStatusDetailsCodec() if loop: warnings.warn("The loop argument is deprecated and scheduled " "for removal in grpclib 0.5", DeprecationWarning, stacklevel=2) self._host = host self._port = port self._loop = loop or asyncio.get_event_loop() self._path = path self._codec = codec self._status_details_codec = status_details_codec self._ssl = ssl or None self._authority = '{}:{}'.format(self._host, self._port) self._scheme = 'https' if self._ssl else 'http' self._authority = '{}:{}'.format(self._host, self._port) self._h2_config = H2Configuration( client_side=True, header_encoding='ascii', validate_inbound_headers=False, validate_outbound_headers=False, normalize_inbound_headers=False, normalize_outbound_headers=False, ) self._connect_lock = asyncio.Lock() self._state = _ChannelState.IDLE config = Configuration() if config is None else config self._config = config.__for_client__() self.__dispatch__ = _DispatchChannelEvents() _channels.add(self) def __repr__(self) -> str: return ('Channel({!r}, {!r}, ..., path={!r})' .format(self._host, self._port, self._path)) def _protocol_factory(self) -> H2Protocol: return H2Protocol(Handler(), self._config, self._h2_config) async def _create_connection(self) -> H2Protocol: if self._path is not None: _, protocol = await self._loop.create_unix_connection( self._protocol_factory, self._path, ssl=self._ssl, ) else: _, protocol = await self._loop.create_connection( self._protocol_factory, self._host, self._port, ssl=self._ssl, ) return cast(H2Protocol, protocol) @property def _connected(self) -> bool: return (self._protocol is not None and not self._protocol.handler.connection_lost) async def __connect__(self) -> H2Protocol: if not self._connected: async with self._connect_lock: self._state = _ChannelState.CONNECTING if not self._connected: try: self._protocol = await self._create_connection() except Exception: self._state = _ChannelState.TRANSIENT_FAILURE raise else: self._state = _ChannelState.READY return cast(H2Protocol, self._protocol) # https://python-hyper.org/projects/h2/en/stable/negotiating-http2.html def _get_default_ssl_context(self) -> '_ssl.SSLContext': if _ssl is None: raise RuntimeError('SSL is not supported.') try: import certifi except ImportError: cafile = None else: cafile = certifi.where() ctx = _ssl.create_default_context( purpose=_ssl.Purpose.SERVER_AUTH, cafile=cafile, ) ctx.options |= (_ssl.OP_NO_TLSv1 | _ssl.OP_NO_TLSv1_1) ctx.set_ciphers('ECDHE+AESGCM:ECDHE+CHACHA20:DHE+AESGCM:DHE+CHACHA20') ctx.set_alpn_protocols(['h2']) try: ctx.set_npn_protocols(['h2']) except NotImplementedError: pass return ctx def request( self, name: str, cardinality: Cardinality, request_type: Type[_SendType], reply_type: Type[_RecvType], *, timeout: Optional[float] = None, deadline: Optional[Deadline] = None, metadata: Optional[_MetadataLike] = None, ) -> Stream[_SendType, _RecvType]: if timeout is not None and deadline is None: deadline = Deadline.from_timeout(timeout) elif timeout is not None and deadline is not None: deadline = min(Deadline.from_timeout(timeout), deadline) metadata = cast(_Metadata, MultiDict(metadata or ())) return Stream(self, name, metadata, cardinality, request_type, reply_type, codec=self._codec, status_details_codec=self._status_details_codec, dispatch=self.__dispatch__, deadline=deadline) def close(self) -> None: """Closes connection to the server. """ if self._protocol is not None: self._protocol.processor.close() del self._protocol self._state = _ChannelState.IDLE def __del__(self) -> None: if self._protocol is not None: message = 'Unclosed connection: {!r}'.format(self) warnings.warn(message, ResourceWarning) if self._loop.is_closed(): return else: self.close() self._loop.call_exception_handler({'message': message}) async def __aenter__(self) -> 'Channel': return self async def __aexit__( self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType], ) -> None: self.close() class ServiceMethod(Generic[_SendType, _RecvType]): """ Base class for all gRPC method types """ def __init__( self, channel: Channel, name: str, request_type: Type[_SendType], reply_type: Type[_RecvType], ): self.channel = channel self.name = name self.request_type = request_type self.reply_type = reply_type @property def _cardinality(self) -> Cardinality: raise NotImplementedError def open( self, *, timeout: Optional[float] = None, metadata: Optional[_MetadataLike] = None, ) -> Stream[_SendType, _RecvType]: """Creates and returns :py:class:`Stream` object to perform request to the server. Nothing will happen to the current underlying HTTP/2 connection during this method call. It just initializes :py:class:`Stream` object for you. Actual request will be sent only during :py:meth:`Stream.send_request` or :py:meth:`Stream.send_message` coroutine call. :param float timeout: request timeout (seconds) :param metadata: custom request metadata, dict or list of pairs :return: :py:class:`Stream` object """ return self.channel.request(self.name, self._cardinality, self.request_type, self.reply_type, timeout=timeout, metadata=metadata) class UnaryUnaryMethod(ServiceMethod[_SendType, _RecvType]): """ Represents UNARY-UNARY gRPC method type. .. automethod:: __call__ .. automethod:: open """ _cardinality = Cardinality.UNARY_UNARY async def __call__( self, message: _SendType, *, timeout: Optional[float] = None, metadata: Optional[_MetadataLike] = None, ) -> _RecvType: """Coroutine to perform defined call. :param message: message :param float timeout: request timeout (seconds) :param metadata: custom request metadata, dict or list of pairs :return: message """ async with self.open(timeout=timeout, metadata=metadata) as stream: await stream.send_message(message, end=True) reply = await stream.recv_message() assert reply is not None return reply class UnaryStreamMethod(ServiceMethod[_SendType, _RecvType]): """ Represents UNARY-STREAM gRPC method type. .. automethod:: __call__ .. automethod:: open """ _cardinality = Cardinality.UNARY_STREAM async def __call__( self, message: _SendType, *, timeout: Optional[float] = None, metadata: Optional[_MetadataLike] = None, ) -> List[_RecvType]: """Coroutine to perform defined call. :param message: message :param float timeout: request timeout (seconds) :param metadata: custom request metadata, dict or list of pairs :return: sequence of messages """ async with self.open(timeout=timeout, metadata=metadata) as stream: await stream.send_message(message, end=True) return [message async for message in stream] class StreamUnaryMethod(ServiceMethod[_SendType, _RecvType]): """ Represents STREAM-UNARY gRPC method type. .. automethod:: __call__ .. automethod:: open """ _cardinality = Cardinality.STREAM_UNARY async def __call__( self, messages: Sequence[_SendType], *, timeout: Optional[float] = None, metadata: Optional[_MetadataLike] = None, ) -> _RecvType: """Coroutine to perform defined call. :param messages: sequence of messages :param float timeout: request timeout (seconds) :param metadata: custom request metadata, dict or list of pairs :return: message """ async with self.open(timeout=timeout, metadata=metadata) as stream: for message in messages[:-1]: await stream.send_message(message) if messages: await stream.send_message(messages[-1], end=True) else: await stream.send_request(end=True) reply = await stream.recv_message() assert reply is not None return reply class StreamStreamMethod(ServiceMethod[_SendType, _RecvType]): """ Represents STREAM-STREAM gRPC method type. .. automethod:: __call__ .. automethod:: open """ _cardinality = Cardinality.STREAM_STREAM async def __call__( self, messages: Sequence[_SendType], *, timeout: Optional[float] = None, metadata: Optional[_MetadataLike] = None, ) -> List[_RecvType]: """Coroutine to perform defined call. :param messages: sequence of messages :param float timeout: request timeout (seconds) :param metadata: custom request metadata, dict or list of pairs :return: sequence of messages """ async with self.open(timeout=timeout, metadata=metadata) as stream: for message in messages[:-1]: await stream.send_message(message) if messages: await stream.send_message(messages[-1], end=True) else: await stream.send_request(end=True) return [message async for message in stream]
{ "content_hash": "0a4040bf2b16e011d2f164ee00b8e17f", "timestamp": "", "source": "github", "line_count": 949, "max_line_length": 80, "avg_line_length": 35.6986301369863, "alnum_prop": 0.5865753586398252, "repo_name": "vmagamedov/asyncgrpc", "id": "c6da36db1ccfca1bfd9125750ba3021a603df27e", "size": "33878", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "grpclib/client.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Makefile", "bytes": "585" }, { "name": "Protocol Buffer", "bytes": "2287" }, { "name": "Python", "bytes": "31945" } ], "symlink_target": "" }
import pyexpat from iptest import IronPythonTestCase, run_test class PyExpatTest(IronPythonTestCase): def test_incremental_parsing(self): """https://github.com/IronLanguages/ironpython3/pull/680""" res = [] def start_element(name, attrs): res.append("<{}>".format(name)) def end_element(name): res.append("</{}>".format(name)) def char_data(data): res.append(data) parser = pyexpat.ParserCreate() parser.StartElementHandler = start_element parser.EndElementHandler = end_element parser.CharacterDataHandler = char_data data = "<e>abc\U00010000xyz</e>" parser.Parse(data[:7], False) parser.Parse(data[7:], True) self.assertEqual(data, "".join(res)) run_test(__name__)
{ "content_hash": "0b441be36e9105b6c34be89bac97d99a", "timestamp": "", "source": "github", "line_count": 27, "max_line_length": 67, "avg_line_length": 31.14814814814815, "alnum_prop": 0.5945303210463734, "repo_name": "IronLanguages/ironpython3", "id": "5a6d4d171f2bca4b2c60ba398e673b89412fa3ce", "size": "1049", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "Tests/test_pyexpat.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "6855" }, { "name": "C", "bytes": "239473" }, { "name": "C#", "bytes": "12619304" }, { "name": "C++", "bytes": "28403" }, { "name": "CSS", "bytes": "96" }, { "name": "HTML", "bytes": "13157428" }, { "name": "Makefile", "bytes": "332" }, { "name": "PLSQL", "bytes": "22886" }, { "name": "PowerShell", "bytes": "84504" }, { "name": "Python", "bytes": "29490541" }, { "name": "Roff", "bytes": "21080" }, { "name": "Shell", "bytes": "4872" }, { "name": "VBScript", "bytes": "481" } ], "symlink_target": "" }
""" this was an attempt to achieve motion blur hoping that fading out will do the job. this is not quite motion blur - to see what i mean, try moving the mouse around for a longer while - instead of motion blur what you get is motion tail, which is unwanted. still this example teaches something too. """ from gi.repository import Gtk as gtk from lib import graphics class Scene(graphics.Scene): def __init__(self): graphics.Scene.__init__(self) self.mouse_cursor = False self.coords = [] self.x, self.y = 0, 0 self.radius = 30 self.connect("on-mouse-move", self.on_mouse_move) self.connect("on-enter-frame", self.on_enter_frame) self.fade_tick = 0 def on_mouse_move(self, area, event): # oh i know this should not be performed using tweeners, but hey - a demo! self.coords.insert(0, (event.x, event.y)) self.coords = self.coords[:10] # limit trail length def on_enter_frame(self, scene, context): g = graphics.Graphics(context) for i, coords in enumerate(reversed(self.coords)): x, y = coords if i == len(self.coords) - 1: alpha = 1 else: alpha = float(i+1) / len(self.coords) / 2 g.rectangle(x - self.radius, y - self.radius, self.radius * 2, self.radius * 2, 3) #print alpha g.fill("#999", alpha) self.fade_tick += 1 if len(self.coords) > 1 and self.fade_tick > 2: self.fade_tick = 0 self.coords.pop(-1) self.redraw() # constant redraw (maintaining the requested frame rate) class BasicWindow: def __init__(self): window = gtk.Window() window.set_default_size(300, 300) window.connect("delete_event", lambda *args: gtk.main_quit()) window.add(Scene()) window.show_all() if __name__ == "__main__": example = BasicWindow() import signal signal.signal(signal.SIGINT, signal.SIG_DFL) # gtk3 screws up ctrl+c gtk.main()
{ "content_hash": "ded17a401382623734a74380dc1987c9", "timestamp": "", "source": "github", "line_count": 71, "max_line_length": 82, "avg_line_length": 30.577464788732396, "alnum_prop": 0.5674804237678489, "repo_name": "projecthamster/experiments", "id": "feb9a7ffc6df06b8d6f8b71b2fb6447d6ecb96ee", "size": "2274", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "mouse_fade_out.py", "mode": "33261", "license": "mit", "language": [ { "name": "Python", "bytes": "734313" } ], "symlink_target": "" }
from typing import Optional from airflow.models import BaseOperator from airflow.providers.amazon.aws.hooks.dms import DmsHook class DmsStartTaskOperator(BaseOperator): """ Starts AWS DMS replication task. .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:DmsStartTaskOperator` :param replication_task_arn: Replication task ARN :type replication_task_arn: str :param start_replication_task_type: Replication task start type ('start-replication'|'resume-processing'|'reload-target') :type start_replication_task_type: Optional[str] :param start_task_kwargs: Extra start replication task arguments :type start_task_kwargs: Optional[dict] :param aws_conn_id: The Airflow connection used for AWS credentials. If this is None or empty then the default boto3 behaviour is used. If running Airflow in a distributed manner and aws_conn_id is None or empty, then default boto3 configuration would be used (and must be maintained on each worker node). :type aws_conn_id: Optional[str] """ template_fields = ( 'replication_task_arn', 'start_replication_task_type', 'start_task_kwargs', ) template_ext = () template_fields_renderers = {'start_task_kwargs': 'json'} def __init__( self, *, replication_task_arn: str, start_replication_task_type: Optional[str] = 'start-replication', start_task_kwargs: Optional[dict] = None, aws_conn_id: str = 'aws_default', **kwargs, ): super().__init__(**kwargs) self.replication_task_arn = replication_task_arn self.start_replication_task_type = start_replication_task_type self.start_task_kwargs = start_task_kwargs or {} self.aws_conn_id = aws_conn_id def execute(self, context): """ Starts AWS DMS replication task from Airflow :return: replication task arn """ dms_hook = DmsHook(aws_conn_id=self.aws_conn_id) dms_hook.start_replication_task( replication_task_arn=self.replication_task_arn, start_replication_task_type=self.start_replication_task_type, **self.start_task_kwargs, ) self.log.info("DMS replication task(%s) is starting.", self.replication_task_arn)
{ "content_hash": "619067ec9a8a7399ba4666cddf16c73a", "timestamp": "", "source": "github", "line_count": 66, "max_line_length": 89, "avg_line_length": 36.54545454545455, "alnum_prop": 0.6550580431177446, "repo_name": "apache/incubator-airflow", "id": "a2ce635dc9c2408bf7388833fe65654289ff2ebb", "size": "3200", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "airflow/providers/amazon/aws/operators/dms_start_task.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "69070" }, { "name": "Dockerfile", "bytes": "2001" }, { "name": "HTML", "bytes": "283783" }, { "name": "JavaScript", "bytes": "1387552" }, { "name": "Mako", "bytes": "1284" }, { "name": "Python", "bytes": "5482822" }, { "name": "Shell", "bytes": "40957" } ], "symlink_target": "" }
import os import sys # dir = os.path.dirname(__file__) # sys.path.insert(0, os.path.realpath(os.path.join(dir,'../../UUTrack'))) sys.path.insert(0, os.path.abspath('../../')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = ['sphinx.ext.todo', 'sphinx.ext.viewcode', 'sphinx.ext.autodoc', 'sphinx.ext.coverage'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = 'UUTracking' copyright = '2017, Aquiles Carattino' author = 'Aquiles Carattino' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.1' # The full version, including alpha/beta/rc tags. release = '0.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = [] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # try: import sphinx_rtd_theme html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] html_theme = 'sphinx_rtd_theme' except: html_theme = 'sphinxdoc' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. htmlhelp_basename = 'UUTrackingdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'UUTracking.tex', 'UUTracking Documentation', 'Aquiles Carattino', 'manual'), ] # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'uutracking', 'UUTracking Documentation', [author], 1) ] # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'UUTracking', 'UUTracking Documentation', author, 'UUTracking', 'One line description of project.', 'Miscellaneous'), ] # -- Options for Epub output ---------------------------------------------- # Bibliographic Dublin Core info. epub_title = project epub_author = author epub_publisher = author epub_copyright = copyright # The unique identifier of the text. This can be a ISBN number # or the project homepage. # # epub_identifier = '' # A unique identification for the text. # # epub_uid = '' # A list of files that should not be packed into the epub file. epub_exclude_files = ['search.html'] show_authors = True
{ "content_hash": "c4171b626a41101d78067cbaada3bdd8", "timestamp": "", "source": "github", "line_count": 165, "max_line_length": 100, "avg_line_length": 29.575757575757574, "alnum_prop": 0.6620901639344262, "repo_name": "aquilesC/UUTrack", "id": "b1cf318c719c90a49f4b156e3748e7716430e19e", "size": "5566", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "docs/source/conf.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "368149" } ], "symlink_target": "" }
import numpy as np import pandas as pd import pytest from distributed import Nanny from distributed.utils_test import gen_cluster from packaging import version from scipy.stats import loguniform from sklearn.datasets import make_classification from dask_ml.model_selection import IncrementalSearchCV try: import scikeras import tensorflow as tf from scikeras.wrappers import KerasClassifier from tensorflow.keras.layers import Dense from tensorflow.keras.models import Sequential pytestmark = [ pytest.mark.skipif( version.parse(tf.__version__) < version.parse("2.3.0"), reason="pickle support", ), pytest.mark.skipif( version.parse(scikeras.__version__) < version.parse("0.1.8"), reason="partial_fit support", ), ] except ImportError: pytestmark = pytest.mark.skip(reason="Missing tensorflow or scikeras") def _keras_build_fn(lr=0.01): layers = [ Dense(512, input_shape=(784,), activation="relu"), Dense(10, input_shape=(512,), activation="softmax"), ] model = Sequential(layers) opt = tf.keras.optimizers.SGD(learning_rate=lr) model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"]) return model @gen_cluster(client=True, Worker=Nanny, timeout=20) async def test_keras(c, s, a, b): # Mirror the mnist dataset X, y = make_classification(n_classes=10, n_features=784, n_informative=100) X = X.astype("float32") assert y.dtype == np.dtype("int64") model = KerasClassifier( model=_keras_build_fn, lr=0.01, verbose=False, loss="categorical_crossentropy", ) params = {"lr": loguniform(1e-3, 1e-1)} search = IncrementalSearchCV( model, params, max_iter=3, n_initial_parameters=5, decay_rate=None ) await search.fit(X, y) # search.fit(X, y) assert search.best_score_ >= 0 # Make sure the model trains, and scores aren't constant scores = { ident: [h["score"] for h in hist] for ident, hist in search.model_history_.items() } assert all(len(hist) == 3 for hist in scores.values()) nuniq_scores = [pd.Series(v).nunique() for v in scores.values()] assert max(nuniq_scores) > 1
{ "content_hash": "cbebd9bd3e992b84d3e8262d37d778a6", "timestamp": "", "source": "github", "line_count": 76, "max_line_length": 87, "avg_line_length": 30.13157894736842, "alnum_prop": 0.6554585152838428, "repo_name": "dask/dask-ml", "id": "c3cbe645999a128a638404b34a29ce4d64e8a1e5", "size": "2290", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "tests/model_selection/test_keras.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "798280" }, { "name": "Shell", "bytes": "633" } ], "symlink_target": "" }
import json from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException from tencentcloud.common.abstract_client import AbstractClient from tencentcloud.ocr.v20181119 import models class OcrClient(AbstractClient): _apiVersion = '2018-11-19' _endpoint = 'ocr.tencentcloudapi.com' _service = 'ocr' def AdvertiseOCR(self, request): """本接口支持广告商品图片内文字的检测和识别,返回文本框位置与文字内容。 产品优势:针对广告商品图片普遍存在较多繁体字、艺术字的特点,进行了识别能力的增强。支持中英文、横排、竖排以及倾斜场景文字识别。文字识别的召回率和准确率能达到96%以上。 :param request: Request instance for AdvertiseOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.AdvertiseOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.AdvertiseOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("AdvertiseOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.AdvertiseOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def ArithmeticOCR(self, request): """本接口支持作业算式题目的自动识别和判分,目前覆盖 K12 学力范围内的 11 种题型,包括加减乘除四则、加减乘除已知结果求运算因子、判断大小、约等于估算、带余数除法、分数四则运算、单位换算、竖式加减法、竖式乘除法、脱式计算和解方程,平均识别精度达到93%以上。 :param request: Request instance for ArithmeticOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.ArithmeticOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.ArithmeticOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("ArithmeticOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.ArithmeticOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def BankCardOCR(self, request): """本接口支持对中国大陆主流银行卡正反面关键字段的检测与识别,包括卡号、卡类型、卡名字、银行信息、有效期。支持竖排异形卡识别、多角度旋转图片识别。支持对复印件、翻拍件、边框遮挡的银行卡进行告警,可应用于各种银行卡信息有效性校验场景,如金融行业身份认证、第三方支付绑卡等场景。 默认接口请求频率限制:10次/秒。 :param request: Request instance for BankCardOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.BankCardOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.BankCardOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("BankCardOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.BankCardOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def BankSlipOCR(self, request): """本接口支持银行回单全字段的识别,包括付款开户行、收款开户行、付款账号、收款账号、回单类型、回单编号、币种、流水号、凭证号码、交易机构、交易金额、手续费、日期等字段信息。 :param request: Request instance for BankSlipOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.BankSlipOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.BankSlipOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("BankSlipOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.BankSlipOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def BizLicenseOCR(self, request): """本接口支持快速精准识别营业执照上的字段,包括统一社会信用代码、公司名称、经营场所、主体类型、法定代表人、注册资金、组成形式、成立日期、营业期限和经营范围等字段。 默认接口请求频率限制:10次/秒。 :param request: Request instance for BizLicenseOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.BizLicenseOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.BizLicenseOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("BizLicenseOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.BizLicenseOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def BusInvoiceOCR(self, request): """本接口支持识别公路汽车客票的发票代码、发票号码、日期、姓名、票价等字段。 :param request: Request instance for BusInvoiceOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.BusInvoiceOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.BusInvoiceOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("BusInvoiceOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.BusInvoiceOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def BusinessCardOCR(self, request): """本接口支持名片各字段的自动定位与识别,包含姓名、电话、手机号、邮箱、公司、部门、职位、网址、地址、QQ、微信、MSN等。 :param request: Request instance for BusinessCardOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.BusinessCardOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.BusinessCardOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("BusinessCardOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.BusinessCardOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def CarInvoiceOCR(self, request): """本接口支持机动车销售统一发票和二手车销售统一发票的识别,包括发票号码、发票代码、合计金额、合计税额等二十多个字段。 :param request: Request instance for CarInvoiceOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.CarInvoiceOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.CarInvoiceOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("CarInvoiceOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.CarInvoiceOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def ClassifyDetectOCR(self, request): """支持身份证、护照、名片、银行卡、行驶证、驾驶证、港澳台通行证、户口本、港澳台来往内地通行证、港澳台居住证、不动产证、营业执照的智能分类。 :param request: Request instance for ClassifyDetectOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.ClassifyDetectOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.ClassifyDetectOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("ClassifyDetectOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.ClassifyDetectOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DriverLicenseOCR(self, request): """本接口支持驾驶证主页和副页所有字段的自动定位与识别,重点字段的识别准确度达到99%以上。 驾驶证主页:包括证号、姓名、性别、国籍、住址、出生日期、初次领证日期、准驾车型、有效期限、发证单位 驾驶证副页:包括证号、姓名、档案编号、记录。 另外,本接口还支持复印件、翻拍和PS告警功能。同时支持识别交管12123APP发放的电子驾驶证正页。 电子驾驶证正页:包括证号、姓名、性别、国籍、出生日期、初次领证日期、准驾车型、有效期开始时间、有效期截止时间、档案编号、状态、累积记分。 默认接口请求频率限制:10次/秒。 :param request: Request instance for DriverLicenseOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.DriverLicenseOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.DriverLicenseOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("DriverLicenseOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.DriverLicenseOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DutyPaidProofOCR(self, request): """本接口支持对完税证明的税号、纳税人识别号、纳税人名称、金额合计大写、金额合计小写、填发日期、税务机关、填票人等关键字段的识别。 :param request: Request instance for DutyPaidProofOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.DutyPaidProofOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.DutyPaidProofOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("DutyPaidProofOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.DutyPaidProofOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def EduPaperOCR(self, request): """本接口支持数学试题内容的识别和结构化输出,包括通用文本解析和小学/初中/高中数学公式解析能力(包括91种题型,180种符号),公式返回格式为 Latex 格式文本。 :param request: Request instance for EduPaperOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.EduPaperOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.EduPaperOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("EduPaperOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.EduPaperOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def EnglishOCR(self, request): """本接口支持图像英文文字的检测和识别,返回文字框位置与文字内容。支持多场景、任意版面下的英文、字母、数字和常见字符的识别,同时覆盖英文印刷体和英文手写体识别。 默认接口请求频率限制:10次/秒。 :param request: Request instance for EnglishOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.EnglishOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.EnglishOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("EnglishOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.EnglishOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def EnterpriseLicenseOCR(self, request): """本接口支持智能化识别各类企业登记证书、许可证书、企业执照、三证合一类证书,结构化输出统一社会信用代码、公司名称、法定代表人、公司地址、注册资金、企业类型、经营范围等关键字段。 :param request: Request instance for EnterpriseLicenseOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.EnterpriseLicenseOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.EnterpriseLicenseOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("EnterpriseLicenseOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.EnterpriseLicenseOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def EstateCertOCR(self, request): """本接口支持不动产权证关键字段的识别,包括使用期限、面积、用途、权利性质、权利类型、坐落、共有情况、权利人、权利其他状况等。 :param request: Request instance for EstateCertOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.EstateCertOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.EstateCertOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("EstateCertOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.EstateCertOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def FinanBillOCR(self, request): """本接口支持常见银行票据的自动分类和识别。整单识别包括支票(含现金支票、普通支票、转账支票),承兑汇票(含银行承兑汇票、商业承兑汇票)以及进账单等,适用于中国人民银行印发的 2010 版银行票据凭证版式(银发[2010]299 号)。 :param request: Request instance for FinanBillOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.FinanBillOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.FinanBillOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("FinanBillOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.FinanBillOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def FinanBillSliceOCR(self, request): """本接口支持常见银行票据的自动分类和识别。切片识别包括金融行业常见票据的重要切片字段识别,包括金额、账号、日期、凭证号码等。(金融票据切片:金融票据中待识别字段及其周围局部区域的裁剪图像。) :param request: Request instance for FinanBillSliceOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.FinanBillSliceOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.FinanBillSliceOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("FinanBillSliceOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.FinanBillSliceOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def FlightInvoiceOCR(self, request): """本接口支持机票行程单关键字段的识别,包括旅客姓名、有效身份证件号码、电子客票号码、验证码、填开单位、其他税费、燃油附加费、民航发展基金、保险费、销售单位代号、始发地、目的地、航班号、时间、日期、座位等级、承运人、发票消费类型、票价、合计金额、填开日期、国内国际标签、印刷序号、客票级别/类别、客票生效日期、有效期截止日期、免费行李等字段,支持航班信息多行明细输出。 :param request: Request instance for FlightInvoiceOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.FlightInvoiceOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.FlightInvoiceOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("FlightInvoiceOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.FlightInvoiceOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def FormulaOCR(self, request): """本接口支持识别主流初高中数学符号和公式,返回公式的 Latex 格式文本。 :param request: Request instance for FormulaOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.FormulaOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.FormulaOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("FormulaOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.FormulaOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def GeneralAccurateOCR(self, request): """本接口支持图像整体文字的检测和识别。支持中文、英文、中英文、数字和特殊字符号的识别,并返回文字框位置和文字内容。 适用于文字较多、版式复杂、对识别准召率要求较高的场景,如试卷试题、网络图片、街景店招牌、法律卷宗等场景。 产品优势:与通用印刷体识别相比,提供更高精度的文字识别服务,在文字较多、长串数字、小字、模糊字、倾斜文本等困难场景下,高精度版的准确率和召回率更高。 通用印刷体识别不同版本的差异如下: <table style="width:715px"> <thead> <tr> <th style="width:150px"></th> <th >【荐】通用印刷体识别(高精度版)</th> <th style="width:200px"><a href="https://cloud.tencent.com/document/product/866/33526">【荐】通用印刷体识别</a></th> <th><a href="https://cloud.tencent.com/document/product/866/37831">通用印刷体识别(精简版)</a></th> </tr> </thead> <tbody> <tr> <td> 适用场景</td> <td>适用于文字较多、长串数字、小字、模糊字、倾斜文本等困难场景</td> <td>适用于所有通用场景的印刷体识别</td> <td>适用于快速文本识别场景,准召率有一定损失,价格更优惠</td> </tr> <tr> <td>识别准确率</td> <td>99%</td> <td>96%</td> <td>91%</td> </tr> <tr> <td>价格</td> <td>高</td> <td>中</td> <td>低</td> </tr> <tr> <td>支持的语言</td> <td>中文、英文、中英文</td> <td>中文、英文、中英文、日语、韩语、西班牙语、法语、德语、葡萄牙语、越南语、马来语、俄语、意大利语、荷兰语、瑞典语、芬兰语、丹麦语、挪威语、匈牙利语、泰语</td> <td>中文、英文、中英文</td> </tr> <tr> <td>自动语言检测</td> <td>支持</td> <td>支持</td> <td>支持</td> </tr> <tr> <td>返回文本行坐标</td> <td>支持</td> <td>支持</td> <td>支持</td> </tr> <tr> <td>自动旋转纠正</td> <td>支持旋转识别,返回角度信息</td> <td>支持旋转识别,返回角度信息</td> <td>支持旋转识别,返回角度信息</td> </tr> </tbody> </table> 默认接口请求频率限制:10次/秒。 :param request: Request instance for GeneralAccurateOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.GeneralAccurateOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.GeneralAccurateOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("GeneralAccurateOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.GeneralAccurateOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def GeneralBasicOCR(self, request): """本接口支持图像整体文字的检测和识别。可以识别中文、英文、中英文、日语、韩语、西班牙语、法语、德语、葡萄牙语、越南语、马来语、俄语、意大利语、荷兰语、瑞典语、芬兰语、丹麦语、挪威语、匈牙利语、泰语,阿拉伯语20种语言,且各种语言均支持与英文混合的文字识别。 适用于印刷文档识别、网络图片识别、广告图文字识别、街景店招牌识别、菜单识别、视频标题识别、头像文字识别等场景。 产品优势:支持自动识别语言类型,可返回文本框坐标信息,对于倾斜文本支持自动旋转纠正。 通用印刷体识别不同版本的差异如下: <table style="width:715px"> <thead> <tr> <th style="width:150px"></th> <th style="width:200px">【荐】通用印刷体识别</th> <th ><a href="https://cloud.tencent.com/document/product/866/34937">【荐】通用印刷体识别(高精度版)</a></th> <th><a href="https://cloud.tencent.com/document/product/866/37831">通用印刷体识别(精简版)</a></th> </tr> </thead> <tbody> <tr> <td> 适用场景</td> <td>适用于所有通用场景的印刷体识别</td> <td>适用于文字较多、长串数字、小字、模糊字、倾斜文本等困难场景</td> <td>适用于快速文本识别场景,准召率有一定损失,价格更优惠</td> </tr> <tr> <td>识别准确率</td> <td>96%</td> <td>99%</td> <td>91%</td> </tr> <tr> <td>价格</td> <td>中</td> <td>高</td> <td>低</td> </tr> <tr> <td>支持的语言</td> <td>中文、英文、中英文、日语、韩语、西班牙语、法语、德语、葡萄牙语、越南语、马来语、俄语、意大利语、荷兰语、瑞典语、芬兰语、丹麦语、挪威语、匈牙利语、泰语</td> <td>中文、英文、中英文</td> <td>中文、英文、中英文</td> </tr> <tr> <td>自动语言检测</td> <td>支持</td> <td>支持</td> <td>支持</td> </tr> <tr> <td>返回文本行坐标</td> <td>支持</td> <td>支持</td> <td>支持</td> </tr> <tr> <td>自动旋转纠正</td> <td>支持旋转识别,返回角度信息</td> <td>支持旋转识别,返回角度信息</td> <td>支持旋转识别,返回角度信息</td> </tr> </tbody> </table> 默认接口请求频率限制:20次/秒。 :param request: Request instance for GeneralBasicOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.GeneralBasicOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.GeneralBasicOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("GeneralBasicOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.GeneralBasicOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def GeneralEfficientOCR(self, request): """本接口支持图像整体文字的检测和识别。支持中文、英文、中英文、数字和特殊字符号的识别,并返回文字框位置和文字内容。 适用于快速文本识别场景。 产品优势:与通用印刷体识别接口相比,精简版虽然在准确率和召回率上有一定损失,但价格更加优惠。 通用印刷体识别不同版本的差异如下: <table style="width:715px"> <thead> <tr> <th style="width:150px"></th> <th >通用印刷体识别(精简版)</th> <th style="width:200px"><a href="https://cloud.tencent.com/document/product/866/33526">【荐】通用印刷体识别</a></th> <th><a href="https://cloud.tencent.com/document/product/866/34937">【荐】通用印刷体识别(高精度版)</a></th> </tr> </thead> <tbody> <tr> <td> 适用场景</td> <td>适用于快速文本识别场景,准召率有一定损失,价格更优惠</td> <td>适用于所有通用场景的印刷体识别</td> <td>适用于文字较多、长串数字、小字、模糊字、倾斜文本等困难场景</td> </tr> <tr> <td>识别准确率</td> <td>91%</td> <td>96%</td> <td>99%</td> </tr> <tr> <td>价格</td> <td>低</td> <td>中</td> <td>高</td> </tr> <tr> <td>支持的语言</td> <td>中文、英文、中英文</td> <td>中文、英文、中英文、日语、韩语、西班牙语、法语、德语、葡萄牙语、越南语、马来语、俄语、意大利语、荷兰语、瑞典语、芬兰语、丹麦语、挪威语、匈牙利语、泰语</td> <td>中文、英文、中英文</td> </tr> <tr> <td>自动语言检测</td> <td>支持</td> <td>支持</td> <td>支持</td> </tr> <tr> <td>返回文本行坐标</td> <td>支持</td> <td>支持</td> <td>支持</td> </tr> <tr> <td>自动旋转纠正</td> <td>支持旋转识别,返回角度信息</td> <td>支持旋转识别,返回角度信息</td> <td>支持旋转识别,返回角度信息</td> </tr> </tbody> </table> 默认接口请求频率限制:10次/秒。 :param request: Request instance for GeneralEfficientOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.GeneralEfficientOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.GeneralEfficientOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("GeneralEfficientOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.GeneralEfficientOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def GeneralFastOCR(self, request): """本接口支持图片中整体文字的检测和识别,返回文字框位置与文字内容。相比通用印刷体识别接口,识别速度更快。 默认接口请求频率限制:10次/秒。 :param request: Request instance for GeneralFastOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.GeneralFastOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.GeneralFastOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("GeneralFastOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.GeneralFastOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def GeneralHandwritingOCR(self, request): """本接口支持图片内手写体文字的检测和识别,针对手写字体无规则、字迹潦草、模糊等特点进行了识别能力的增强。 默认接口请求频率限制:10次/秒。 :param request: Request instance for GeneralHandwritingOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.GeneralHandwritingOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.GeneralHandwritingOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("GeneralHandwritingOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.GeneralHandwritingOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def HKIDCardOCR(self, request): """本接口支持中国香港身份证人像面中关键字段的识别,包括中文姓名、英文姓名、姓名电码、出生日期、性别、证件符号、首次签发日期、最近领用日期、身份证号、是否是永久性居民身份证;具备防伪识别、人像照片裁剪等扩展功能。 默认接口请求频率限制:5次/秒。 :param request: Request instance for HKIDCardOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.HKIDCardOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.HKIDCardOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("HKIDCardOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.HKIDCardOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def HmtResidentPermitOCR(self, request): """港澳台居住证OCR支持港澳台居住证正反面全字段内容检测识别功能,包括姓名、性别、出生日期、地址、身份证ID、签发机关、有效期限、签发次数、通行证号码关键字段识别。可以应用于港澳台居住证信息有效性校验场景,例如银行开户、用户注册等场景。 :param request: Request instance for HmtResidentPermitOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.HmtResidentPermitOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.HmtResidentPermitOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("HmtResidentPermitOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.HmtResidentPermitOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def IDCardOCR(self, request): """本接口支持中国大陆居民二代身份证正反面所有字段的识别,包括姓名、性别、民族、出生日期、住址、公民身份证号、签发机关、有效期限,识别准确度达到99%以上。 另外,本接口还支持多种增值能力,满足不同场景的需求。如身份证照片、人像照片的裁剪功能,同时具备9种告警功能,如下表所示。 <table style="width:650px"> <thead> <tr> <th width="150">增值能力</th> <th width="500">能力项</th> </tr> </thead> <tbody> <tr> <td rowspan="2">裁剪功能</td> <td>身份证照片裁剪(去掉证件外多余的边缘、自动矫正拍摄角度)</td> </tr> <tr> <td>人像照片裁剪(自动抠取身份证头像区域)</td> </tr> <tr> <td rowspan="9">告警功能</td> <td>身份证有效日期不合法告警</td> </tr> <tr> <td>身份证边框不完整告警</td> </tr> <tr> <td>身份证复印件告警</td> </tr> <tr> <td>身份证翻拍告警</td> </tr> <tr> <td>身份证框内遮挡告警</td> </tr> <tr> <td>临时身份证告警</td> </tr> <tr> <td>身份证 PS 告警</td> </tr> <tr> <td>图片模糊告警(可根据图片质量分数判断)</td> </tr> </tbody> </table> 默认接口请求频率限制:20次/秒。 :param request: Request instance for IDCardOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.IDCardOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.IDCardOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("IDCardOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.IDCardOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def ImageEnhancement(self, request): """文本图像增强是面向文档类图片提供的图像增强处理能力,包括切边增强、图像矫正、阴影去除、摩尔纹去除等;可以有效优化文档类的图片质量,提升文字的清晰度。 :param request: Request instance for ImageEnhancement. :type request: :class:`tencentcloud.ocr.v20181119.models.ImageEnhancementRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.ImageEnhancementResponse` """ try: params = request._serialize() headers = request.headers body = self.call("ImageEnhancement", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.ImageEnhancementResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def InstitutionOCR(self, request): """本接口支持事业单位法人证书关键字段识别,包括注册号、有效期、住所、名称、法定代表人等。 :param request: Request instance for InstitutionOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.InstitutionOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.InstitutionOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("InstitutionOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.InstitutionOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def InsuranceBillOCR(self, request): """本接口支持病案首页、费用清单、结算单、医疗发票四种保险理赔单据的文本识别和结构化输出。 :param request: Request instance for InsuranceBillOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.InsuranceBillOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.InsuranceBillOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("InsuranceBillOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.InsuranceBillOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def InvoiceGeneralOCR(self, request): """本接口支持对通用机打发票的发票代码、发票号码、日期、购买方识别号、销售方识别号、校验码、小写金额等关键字段的识别。 :param request: Request instance for InvoiceGeneralOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.InvoiceGeneralOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.InvoiceGeneralOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("InvoiceGeneralOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.InvoiceGeneralOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def LicensePlateOCR(self, request): """本接口支持对中国大陆机动车车牌的自动定位和识别,返回地域编号和车牌号码与车牌颜色信息。 默认接口请求频率限制:10次/秒。 :param request: Request instance for LicensePlateOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.LicensePlateOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.LicensePlateOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("LicensePlateOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.LicensePlateOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def MLIDCardOCR(self, request): """本接口支持马来西亚身份证识别,识别字段包括身份证号、姓名、性别、地址;具备身份证人像照片的裁剪功能和翻拍、复印件告警功能。 本接口暂未完全对外开放,如需咨询,请[联系商务](https://cloud.tencent.com/about/connect) :param request: Request instance for MLIDCardOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.MLIDCardOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.MLIDCardOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("MLIDCardOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.MLIDCardOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def MLIDPassportOCR(self, request): """本接口支持中国港澳台地区以及其他国家、地区的护照识别。识别字段包括护照ID、姓名、出生日期、性别、有效期、发行国、国籍,具备护照人像照片的裁剪功能和翻拍、复印件告警功能。 默认接口请求频率限制:5次/秒。 :param request: Request instance for MLIDPassportOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.MLIDPassportOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.MLIDPassportOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("MLIDPassportOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.MLIDPassportOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def MainlandPermitOCR(self, request): """智能识别并结构化港澳台居民来往内地通行证正面全部字段,包含中文姓名、英文姓名、性别、出生日期、签发机关、有效期限、证件号、签发地点、签发次数、证件类别。 :param request: Request instance for MainlandPermitOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.MainlandPermitOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.MainlandPermitOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("MainlandPermitOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.MainlandPermitOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def MixedInvoiceDetect(self, request): """本接口支持多张、多类型票据的混合检测和自动分类,返回对应票据类型。目前已支持增值税发票、增值税发票(卷票)、定额发票、通用机打发票、购车发票、火车票、出租车发票、机票行程单、汽车票、轮船票、过路过桥费发票、酒店账单、客运限额发票、购物小票、完税证明共15种票据。 :param request: Request instance for MixedInvoiceDetect. :type request: :class:`tencentcloud.ocr.v20181119.models.MixedInvoiceDetectRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.MixedInvoiceDetectResponse` """ try: params = request._serialize() headers = request.headers body = self.call("MixedInvoiceDetect", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.MixedInvoiceDetectResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def MixedInvoiceOCR(self, request): """本接口支持 单张、多张、多类型 票据的混合识别,同时支持自选需要识别的票据类型,已支持票种包括:增值税发票(专票、普票、卷票)、全电发票、非税发票、定额发票、通用机打发票、购车发票、火车票、出租车发票、机票行程单、汽车票、轮船票、过路过桥费发票共14种标准报销发票,并支持其他类发票的识别。 默认接口请求频率限制:5次/秒。 :param request: Request instance for MixedInvoiceOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.MixedInvoiceOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.MixedInvoiceOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("MixedInvoiceOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.MixedInvoiceOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def OrgCodeCertOCR(self, request): """本接口支持组织机构代码证关键字段的识别,包括代码、有效期、地址、机构名称等。 :param request: Request instance for OrgCodeCertOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.OrgCodeCertOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.OrgCodeCertOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("OrgCodeCertOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.OrgCodeCertOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def PassportOCR(self, request): """本接口支持中国大陆地区护照个人资料页多个字段的检测与识别。已支持字段包括英文姓名、中文姓名、国家码、护照号、出生地、出生日期、国籍英文、性别英文、有效期、签发地点英文、签发日期、持证人签名、护照机读码(MRZ码)等。 :param request: Request instance for PassportOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.PassportOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.PassportOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("PassportOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.PassportOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def PermitOCR(self, request): """本接口支持对卡式港澳台通行证的识别,包括签发地点、签发机关、有效期限、性别、出生日期、英文姓名、姓名、证件号等字段。 :param request: Request instance for PermitOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.PermitOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.PermitOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("PermitOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.PermitOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def PropOwnerCertOCR(self, request): """本接口支持房产证关键字段的识别,包括房地产权利人、共有情况、登记时间、规划用途、房屋性质、房屋坐落等。 目前接口对合肥、成都、佛山三个城市的房产证版式识别较好。 :param request: Request instance for PropOwnerCertOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.PropOwnerCertOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.PropOwnerCertOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("PropOwnerCertOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.PropOwnerCertOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def QrcodeOCR(self, request): """本接口支持条形码和二维码的识别(包括 DataMatrix 和 PDF417)。 :param request: Request instance for QrcodeOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.QrcodeOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.QrcodeOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("QrcodeOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.QrcodeOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def QueryBarCode(self, request): """本接口支持条形码备案信息查询,返回条形码查询结果的相关信息,包括产品名称、产品英文名称、品牌名称、规格型号、宽度、高度、深度、关键字、产品描述、厂家名称、厂家地址、企业社会信用代码13个字段信息。 产品优势:直联中国物品编码中心,查询结果更加准确、可靠。 :param request: Request instance for QueryBarCode. :type request: :class:`tencentcloud.ocr.v20181119.models.QueryBarCodeRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.QueryBarCodeResponse` """ try: params = request._serialize() headers = request.headers body = self.call("QueryBarCode", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.QueryBarCodeResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def QuotaInvoiceOCR(self, request): """本接口支持定额发票的发票号码、发票代码、金额(大小写)、发票消费类型、地区及是否有公司印章等关键字段的识别。 :param request: Request instance for QuotaInvoiceOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.QuotaInvoiceOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.QuotaInvoiceOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("QuotaInvoiceOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.QuotaInvoiceOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def RecognizeContainerOCR(self, request): """本接口支持集装箱箱门信息识别,识别字段包括集装箱箱号、类型、总重量、有效承重、容量、自身重量,具备集装箱箱号、类型不完整或者不清晰的告警功能。 :param request: Request instance for RecognizeContainerOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.RecognizeContainerOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.RecognizeContainerOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("RecognizeContainerOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.RecognizeContainerOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def RecognizeHealthCodeOCR(self, request): """本接口支持北京、上海、广东、江苏、吉林、黑龙江、天津、辽宁、浙江、河南、四川、贵州、山东、安徽、福建、江西、湖北、湖南等省份健康码的识别,包括持码人姓名、持码人身份证号、健康码更新时间、健康码颜色、核酸检测结果、核酸检测间隔时长、核酸检测时间,疫苗接种信息,八个字段的识别结果输出。不同省市健康码显示的字段信息有所不同,上述字段的识别结果可能为空,以图片上具体展示的信息为准。 默认接口请求频率限制:10次/秒。 :param request: Request instance for RecognizeHealthCodeOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.RecognizeHealthCodeOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.RecognizeHealthCodeOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("RecognizeHealthCodeOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.RecognizeHealthCodeOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def RecognizeIndonesiaIDCardOCR(self, request): """印尼身份证识别 :param request: Request instance for RecognizeIndonesiaIDCardOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.RecognizeIndonesiaIDCardOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.RecognizeIndonesiaIDCardOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("RecognizeIndonesiaIDCardOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.RecognizeIndonesiaIDCardOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def RecognizeMedicalInvoiceOCR(self, request): """医疗发票识别目前支持全国统一门诊发票、全国统一住院发票、以及部分地方的门诊和住院发票的识别。 :param request: Request instance for RecognizeMedicalInvoiceOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.RecognizeMedicalInvoiceOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.RecognizeMedicalInvoiceOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("RecognizeMedicalInvoiceOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.RecognizeMedicalInvoiceOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def RecognizeOnlineTaxiItineraryOCR(self, request): """本接口支持网约车行程单关键字段的识别,包括行程起止日期、上车时间、起点、终点、里程、金额等字段。 :param request: Request instance for RecognizeOnlineTaxiItineraryOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.RecognizeOnlineTaxiItineraryOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.RecognizeOnlineTaxiItineraryOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("RecognizeOnlineTaxiItineraryOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.RecognizeOnlineTaxiItineraryOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def RecognizePhilippinesDrivingLicenseOCR(self, request): """菲律宾驾驶证识别 :param request: Request instance for RecognizePhilippinesDrivingLicenseOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.RecognizePhilippinesDrivingLicenseOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.RecognizePhilippinesDrivingLicenseOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("RecognizePhilippinesDrivingLicenseOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.RecognizePhilippinesDrivingLicenseOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def RecognizePhilippinesVoteIDOCR(self, request): """菲律宾VoteID识别 :param request: Request instance for RecognizePhilippinesVoteIDOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.RecognizePhilippinesVoteIDOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.RecognizePhilippinesVoteIDOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("RecognizePhilippinesVoteIDOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.RecognizePhilippinesVoteIDOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def RecognizeTableOCR(self, request): """本接口支持中英文图片/ PDF内常规表格、无线表格、多表格的检测和识别,支持日文有线表格识别,返回每个单元格的文字内容,支持旋转的表格图片识别,且支持将识别结果保存为 Excel 格式。 :param request: Request instance for RecognizeTableOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.RecognizeTableOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.RecognizeTableOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("RecognizeTableOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.RecognizeTableOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def RecognizeThaiIDCardOCR(self, request): """本接口支持泰国身份证识别,识别字段包括泰文姓名、英文姓名、地址、出生日期、身份证号码。 本接口暂未完全对外开放,如需咨询,请[联系商务](https://cloud.tencent.com/about/connect) :param request: Request instance for RecognizeThaiIDCardOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.RecognizeThaiIDCardOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.RecognizeThaiIDCardOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("RecognizeThaiIDCardOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.RecognizeThaiIDCardOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def RecognizeTravelCardOCR(self, request): """本接口支持通信大数据行程卡识别,包括行程卡颜色、更新时间、途经地、存在中高风险地区的城市、电话号码,五个字段的识别结果输出。 默认接口请求频率限制:20次/秒。 :param request: Request instance for RecognizeTravelCardOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.RecognizeTravelCardOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.RecognizeTravelCardOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("RecognizeTravelCardOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.RecognizeTravelCardOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def ResidenceBookletOCR(self, request): """本接口支持居民户口簿户主页及成员页关键字段的识别,包括姓名、户别、地址、籍贯、身份证号码等。 :param request: Request instance for ResidenceBookletOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.ResidenceBookletOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.ResidenceBookletOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("ResidenceBookletOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.ResidenceBookletOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def RideHailingDriverLicenseOCR(self, request): """本接口支持网约车驾驶证关键字段的识别,包括姓名、证号、起始日期、截止日期、发证日期。 :param request: Request instance for RideHailingDriverLicenseOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.RideHailingDriverLicenseOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.RideHailingDriverLicenseOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("RideHailingDriverLicenseOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.RideHailingDriverLicenseOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def RideHailingTransportLicenseOCR(self, request): """本接口支持网约车运输证关键字段的识别,包括交运管许可字号、车辆所有人、车辆号牌、起始日期、截止日期、发证日期。 :param request: Request instance for RideHailingTransportLicenseOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.RideHailingTransportLicenseOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.RideHailingTransportLicenseOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("RideHailingTransportLicenseOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.RideHailingTransportLicenseOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def SealOCR(self, request): """本接口支持各类印章识别,包括发票章,财务章等,适用于公文,票据等场景。 :param request: Request instance for SealOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.SealOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.SealOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("SealOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.SealOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def ShipInvoiceOCR(self, request): """本接口支持识别轮船票的发票代码、发票号码、日期、姓名、票价、始发地、目的地、姓名、时间、发票消费类型、省、市、币种字段。 :param request: Request instance for ShipInvoiceOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.ShipInvoiceOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.ShipInvoiceOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("ShipInvoiceOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.ShipInvoiceOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def SmartStructuralOCR(self, request): """本接口支持识别并提取各类证照、票据、表单、合同等结构化场景的字段信息。无需任何配置,灵活高效。适用于各类结构化信息录入场景。 默认接口请求频率限制:5次/秒。 :param request: Request instance for SmartStructuralOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.SmartStructuralOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.SmartStructuralOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("SmartStructuralOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.SmartStructuralOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def TableOCR(self, request): """<b>此接口为表格识别的旧版本服务,不再进行服务升级,建议您使用识别能力更强、服务性能更优的<a href="https://cloud.tencent.com/document/product/866/49525">新版表格识别</a>。</b> 本接口支持图片内表格文档的检测和识别,返回每个单元格的文字内容,支持将识别结果保存为 Excel 格式。 :param request: Request instance for TableOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.TableOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.TableOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("TableOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.TableOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def TaxiInvoiceOCR(self, request): """本接口支持出租车发票关键字段的识别,包括发票号码、发票代码、金额、日期、上下车时间、里程、车牌号、发票类型及所属地区等字段。 :param request: Request instance for TaxiInvoiceOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.TaxiInvoiceOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.TaxiInvoiceOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("TaxiInvoiceOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.TaxiInvoiceOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def TextDetect(self, request): """本接口通过检测图片中的文字信息特征,快速判断图片中有无文字并返回判断结果,帮助用户过滤无文字的图片。 :param request: Request instance for TextDetect. :type request: :class:`tencentcloud.ocr.v20181119.models.TextDetectRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.TextDetectResponse` """ try: params = request._serialize() headers = request.headers body = self.call("TextDetect", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.TextDetectResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def TollInvoiceOCR(self, request): """本接口支持对过路过桥费发票的发票代码、发票号码、日期、小写金额等关键字段的识别。 :param request: Request instance for TollInvoiceOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.TollInvoiceOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.TollInvoiceOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("TollInvoiceOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.TollInvoiceOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def TrainTicketOCR(self, request): """本接口支持火车票全字段的识别,包括编号、票价、姓名、座位号、出发时间、出发站、到达站、车次、席别、发票类型及序列号等。 :param request: Request instance for TrainTicketOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.TrainTicketOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.TrainTicketOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("TrainTicketOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.TrainTicketOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def VatInvoiceOCR(self, request): """本接口支持增值税专用发票、增值税普通发票、增值税电子发票全字段的内容检测和识别,包括发票代码、发票号码、打印发票代码、打印发票号码、开票日期、合计金额、校验码、税率、合计税额、价税合计、购买方识别号、复核、销售方识别号、开票人、密码区1、密码区2、密码区3、密码区4、发票名称、购买方名称、销售方名称、服务名称、备注、规格型号、数量、单价、金额、税额、收款人等字段。 默认接口请求频率限制:10次/秒。 :param request: Request instance for VatInvoiceOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.VatInvoiceOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.VatInvoiceOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("VatInvoiceOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.VatInvoiceOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def VatInvoiceVerify(self, request): """本接口支持增值税发票的准确性核验,您可以通过输入增值税发票的关键字段提供所需的验证信息,接口返回真实的票面相关信息,包括发票代码、发票号码、开票日期、金额、消费类型、购方名称、购方税号、销方名称、销方税号等多个常用字段。支持多种发票类型核验,包括增值税专用发票、增值税普通发票(含电子普通发票、卷式发票、通行费发票)、全电发票、机动车销售统一发票、货物运输业增值税专用发票、二手车销售统一发票。 :param request: Request instance for VatInvoiceVerify. :type request: :class:`tencentcloud.ocr.v20181119.models.VatInvoiceVerifyRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.VatInvoiceVerifyResponse` """ try: params = request._serialize() headers = request.headers body = self.call("VatInvoiceVerify", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.VatInvoiceVerifyResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def VatInvoiceVerifyNew(self, request): """本接口支持增值税发票的准确性核验,您可以通过输入增值税发票的关键字段提供所需的验证信息,接口返回真实的票面相关信息,包括发票代码、发票号码、开票日期、金额、消费类型、购方名称、购方税号、销方名称、销方税号等多个常用字段。支持多种发票类型核验,包括增值税专用发票、增值税普通发票(含电子普通发票、卷式发票、通行费发票)、全电发票、机动车销售统一发票、货物运输业增值税专用发票、二手车销售统一发票、通用机打电子发票(广东和浙江)。 :param request: Request instance for VatInvoiceVerifyNew. :type request: :class:`tencentcloud.ocr.v20181119.models.VatInvoiceVerifyNewRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.VatInvoiceVerifyNewResponse` """ try: params = request._serialize() headers = request.headers body = self.call("VatInvoiceVerifyNew", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.VatInvoiceVerifyNewResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def VatRollInvoiceOCR(self, request): """本接口支持对增值税发票(卷票)的发票代码、发票号码、日期、校验码、合计金额(小写)等关键字段的识别。 :param request: Request instance for VatRollInvoiceOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.VatRollInvoiceOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.VatRollInvoiceOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("VatRollInvoiceOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.VatRollInvoiceOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def VehicleLicenseOCR(self, request): """本接口支持行驶证主页和副页所有字段的自动定位与识别。 行驶证主页:车牌号码、车辆类型、所有人、住址、使用性质、品牌型号、识别代码、发动机号、注册日期、发证日期、发证单位。 行驶证副页:号牌号码、档案编号、核定载人数、总质量、整备质量、核定载质量、外廓尺寸、准牵引总质量、备注、检验记录。 另外,本接口还支持复印件、翻拍和PS告警功能。 默认接口请求频率限制:10次/秒。 :param request: Request instance for VehicleLicenseOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.VehicleLicenseOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.VehicleLicenseOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("VehicleLicenseOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.VehicleLicenseOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def VehicleRegCertOCR(self, request): """本接口支持国内机动车登记证书主要字段的结构化识别,包括机动车所有人、身份证明名称、号码、车辆型号、车辆识别代号、发动机号、制造厂名称等。 :param request: Request instance for VehicleRegCertOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.VehicleRegCertOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.VehicleRegCertOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("VehicleRegCertOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.VehicleRegCertOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def VerifyBasicBizLicense(self, request): """本接口支持营业执照信息的识别与准确性核验。 您可以通过输入营业执照注册号或营业执照图片(若两者都输入则只用注册号做查询)进行核验,接口返回查询到的工商照面信息,并比对要校验的字段与查询结果的一致性。查询到工商信息包括:统一社会信用代码、经营期限、法人姓名、经营状态、经营业务范围、注册资本等。 :param request: Request instance for VerifyBasicBizLicense. :type request: :class:`tencentcloud.ocr.v20181119.models.VerifyBasicBizLicenseRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.VerifyBasicBizLicenseResponse` """ try: params = request._serialize() headers = request.headers body = self.call("VerifyBasicBizLicense", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.VerifyBasicBizLicenseResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def VerifyBizLicense(self, request): """本接口支持营业执照信息的识别与准确性核验,返回的真实工商照面信息比营业执照识别及核验(基础版)接口更详细。 您可以输入营业执照注册号或营业执照图片(若两者都输入则只用注册号做查询),接口返回查询到的工商照面信息,并比对要校验的字段与查询结果的一致性。 查询到工商信息包括:统一社会信用代码、组织机构代码、经营期限、法人姓名、经营状态、经营业务范围及方式、注册资金、注册币种、登记机关、开业日期、企业(机构)类型、注销日期、吊销日期、许可经营项目、一般经营项目、核准时间、省、地级市、区/县、住所所在行政区划代码、行业门类代码、行业门类名称、国民经济行业代码、国民经济行业名称、经营(业务)范围等。 :param request: Request instance for VerifyBizLicense. :type request: :class:`tencentcloud.ocr.v20181119.models.VerifyBizLicenseRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.VerifyBizLicenseResponse` """ try: params = request._serialize() headers = request.headers body = self.call("VerifyBizLicense", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.VerifyBizLicenseResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def VerifyEnterpriseFourFactors(self, request): """此接口基于企业四要素授权“姓名、证件号码、企业标识、企业全称”,验证企业信息是否一致。 :param request: Request instance for VerifyEnterpriseFourFactors. :type request: :class:`tencentcloud.ocr.v20181119.models.VerifyEnterpriseFourFactorsRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.VerifyEnterpriseFourFactorsResponse` """ try: params = request._serialize() headers = request.headers body = self.call("VerifyEnterpriseFourFactors", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.VerifyEnterpriseFourFactorsResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def VerifyOfdVatInvoiceOCR(self, request): """本接口支持OFD格式的增值税电子普通发票和增值税电子专用发票的识别,返回发票代码、发票号码、开票日期、验证码、机器编号、密码区,购买方和销售方信息,包括名称、纳税人识别号、地址电话、开户行及账号,以及价税合计、开票人、收款人、复核人、税额、不含税金额等字段信息。 :param request: Request instance for VerifyOfdVatInvoiceOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.VerifyOfdVatInvoiceOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.VerifyOfdVatInvoiceOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("VerifyOfdVatInvoiceOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.VerifyOfdVatInvoiceOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def VinOCR(self, request): """本接口支持图片内车辆识别代号(VIN)的检测和识别。 :param request: Request instance for VinOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.VinOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.VinOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("VinOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.VinOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def WaybillOCR(self, request): """本接口支持市面上主流版式电子运单的识别,包括收件人和寄件人的姓名、电话、地址以及运单号等字段,精度均处于业界领先水平,识别准确率达到99%以上。 默认接口请求频率限制:10次/秒。 :param request: Request instance for WaybillOCR. :type request: :class:`tencentcloud.ocr.v20181119.models.WaybillOCRRequest` :rtype: :class:`tencentcloud.ocr.v20181119.models.WaybillOCRResponse` """ try: params = request._serialize() headers = request.headers body = self.call("WaybillOCR", params, headers=headers) response = json.loads(body) if "Error" not in response["Response"]: model = models.WaybillOCRResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message)
{ "content_hash": "eb5f3c05a29ad4fcccd7b8706b554985", "timestamp": "", "source": "github", "line_count": 2543, "max_line_length": 224, "avg_line_length": 41.6283916633897, "alnum_prop": 0.5801003202312466, "repo_name": "tzpBingo/github-trending", "id": "91c217705bea6397927d3cd20a431f0aba71c3f2", "size": "122605", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "codespace/python/tencentcloud/ocr/v20181119/ocr_client.py", "mode": "33188", "license": "mit", "language": [ { "name": "Go", "bytes": "11470" }, { "name": "HTML", "bytes": "1543" }, { "name": "Python", "bytes": "49985109" }, { "name": "Shell", "bytes": "18039" } ], "symlink_target": "" }
import sys from bottle import route, run, template, static_file, request import subprocess import time import datetime import multiprocessing as mp import threading import socket import correlate_with_ref import shlex import argparse import collections import numpy as np # The record and correlate tasks run in alternance. # Maybe later we want to run them simultaneously in a small # pipeline. class RTLSDR_Receiver(threading.Thread): """Connection between the rtlsdr and our script is done using a TCP socket. This class handles running the rtl_tcp tool, and reads the incoming data stream into a local buffer. The buffer size is capped, and works as a FIFO, because analysis of the data is slower than capturing it. We therefore want to lose some data""" def __init__(self, options): threading.Thread.__init__(self) self.freq = float(options.freq) self.rate = int(options.rate) self.samps = int(options.samps) self.gain = float(options.gain) # We want to keep twice the amount of samples # in the queue to have some margin. Samples are # two bytes because they are I/Q interleaved u8 self.max_num_bytes = self.samps * 2 * 2 self.event_stop = threading.Event() self.rtl_tcp_port = 59152 # chosen randomly self.data_queue = collections.deque() # While the data_queue is itself thread-safe, we need to make sure # the consumer cannot preeempt the little housekeeping we do in run() # to keep the maximum queue length. self.data_lock = threading.Lock() self.rtlsdr_proc = None def run(self): rtl_tcp_cmdline = shlex.split("rtl_tcp -f {} -s {} -g {} -p {}".format(self.freq, self.rate, self.gain, self.rtl_tcp_port)) self.rtlsdr_proc = subprocess.Popen(rtl_tcp_cmdline) time.sleep(1.5) self.sock = socket.socket() self.sock.connect(("localhost", self.rtl_tcp_port)) while not self.event_stop.is_set(): try: samples = self.sock.recv(1024) self.data_queue.extend(samples) except: print('Socket error') break self.data_lock.acquire() # try/catch/except to make sure we release the lock, and # re-raise any exception up try: n_bytes = len(self.data_queue) if n_bytes > self.max_num_bytes: num_to_delete = n_bytes - self.max_num_bytes for i in range(num_to_delete): self.data_queue.popleft() except: raise finally: self.data_lock.release() print("Receiver leaving") self.sock.close() self.rtlsdr_proc.terminate() self.rtlsdr_proc.wait() print("Receiver thread ends") def stop(self): self.event_stop.set() self.join() def get_samples(self, num_samples): """Return a string containing num_bytes if that is available, or return None if not enough data available""" ret = None num_bytes = num_samples * 2 self.data_lock.acquire() try: n_bytes = len(self.data_queue) if n_bytes > num_bytes: ret = "".join( self.data_queue.popleft() for i in range(num_bytes)) except: raise finally: self.data_lock.release() return ret class RTLSDR_CIR_Runner(mp.Process): def __init__(self, options, iq_file, fig_file): """Initialise a new runner, which runs rtl_tcp that will save to iq_file, and run the CIR analysis that will save to fig_file. options must contain freq, rate and samps fields""" mp.Process.__init__(self) self.freq = float(options.freq) self.samps = int(options.samps) self.receiver = RTLSDR_Receiver(options) self.events = mp.Queue() self.iq_file = iq_file self.fig_file = fig_file def stop(self): self.events.put("quit") self.join() def run(self): self.receiver.start() while True: time.sleep(1) try: samps = self.receiver.get_samples(self.samps) if samps: print("Got {} samples".format(len(samps))) # The RTLSDR outputs u8 format iq_data = np.array( [ord(c) for c in samps], np.uint8 ) self.do_one_cir_run(iq_data) else: print("Got 0 samples") except Exception as e: print("Exception occurred: {}".format(e)) except KeyboardInterrupt: print("Keyhoard Interrupt") break try: ev = self.events.get_nowait() if ev == "quit": break except mp.queues.Empty: pass self.receiver.stop() def do_one_cir_run(self, iq_data): print("Starting correlation") cir_corr = correlate_with_ref.CIR_Correlate(iq_data=iq_data, iq_format="u8") title = "Correlation on {}kHz done at {}".format( int(self.freq / 1000), datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S")) cir_corr.plot(self.fig_file, title) @route('/') def index(): return template('index', freq = cli_args.freq, rate = cli_args.rate, gain = cli_args.gain, fig_file = FIG_FILE) @route('/static/<filename:path>') def send_static(filename): return static_file(filename, root='./static') if __name__ == '__main__': parser = argparse.ArgumentParser(description='DAB Channel Impulse Measurement for RTL-SDR') # Options for the webserver parser.add_argument('--host', default='127.0.0.1', help='socket host (default: 127.0.0.1)',required=False) parser.add_argument('--port', default='8000', help='socket port (default: 8000)',required=False) # Options for RTLSDR reception parser.add_argument('--freq', help='Receive frequency', required=True) parser.add_argument('--samps', default=10*196608, help='Number of samples to analyse in one run, one transmission frame at 2048000 samples per second is 196608 samples', required=False) parser.add_argument('--gain', default=20, help='Gain setting for rtl_sdr', required=False) parser.add_argument('--rate', default='2048000', help='Samplerate for RTLSDR receiver (2048000)', required=False) cli_args = parser.parse_args() # File to save the recorded IQ file to IQ_FILE = "static/rtlsdr.iq" # The figures are saved to a file FIG_FILE = "static/rtlsdr.svg" rtlsdr_cir = RTLSDR_CIR_Runner(cli_args, IQ_FILE, FIG_FILE) rtlsdr_cir.start() try: run(host=cli_args.host, port=int(cli_args.port), debug=True, reloader=False) finally: rtlsdr_cir.stop()
{ "content_hash": "4ac29391bf8c2226d3c3d14255f4ec90", "timestamp": "", "source": "github", "line_count": 228, "max_line_length": 131, "avg_line_length": 31.32456140350877, "alnum_prop": 0.5817698123774853, "repo_name": "mpbraendli/dab-autocorrelation", "id": "8b7a3b6c61de9e364ab9a8f04ae0dd957079df34", "size": "7523", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "cir_measure.py", "mode": "33261", "license": "mit", "language": [ { "name": "CSS", "bytes": "958" }, { "name": "Python", "bytes": "159247" }, { "name": "Smarty", "bytes": "675" } ], "symlink_target": "" }
''' http://explorer.viacoin.org/ ''' import logging from lib import config, util, util_bitcoin def get_host(): if config.BLOCKCHAIN_SERVICE_CONNECT: return config.BLOCKCHAIN_SERVICE_CONNECT else: return 'http://localhost:3001' if config.TESTNET else 'http://localhost:3000' def check(): result = util.get_url(get_host() + '/api/sync/', abort_on_error=True) if not result: raise Exception('Insight reports error: %s' % result['error']) if result['status'] == 'error': raise Exception('Insight reports error: %s' % result['error']) if result['status'] == 'syncing': logging.warning("WARNING: Insight is not fully synced to the blockchain: %s%% complete" % result['syncPercentage']) def getinfo(): return util.get_url(get_host() + '/api/status?q=getInfo', abort_on_error=True) def listunspent(address): return util.get_url(get_host() + '/api/addr/' + address + '/utxo/', abort_on_error=True) def getaddressinfo(address): return util.get_url(get_host() + '/api/addr/' + address + '/', abort_on_error=True) def gettransaction(tx_hash): return util.get_url(get_host() + '/api/tx/' + tx_hash + '/', abort_on_error=False) def get_pubkey_for_address(address): #first, get a list of transactions for the address address_info = getaddressinfo(address) #if no transactions, we can't get the pubkey if not address_info['transactions']: return None #for each transaction we got back, extract the vin, pubkey, go through, convert it to binary, and see if it reduces down to the given address for tx_id in address_info['transactions']: #parse the pubkey out of the first sent transaction tx = gettransaction(tx_id) pubkey_hex = tx['vin'][0]['scriptSig']['asm'].split(' ')[1] if util_bitcoin.pubkey_to_address(pubkey_hex) == address: return pubkey_hex return None
{ "content_hash": "88d1c57be661eeefee0a83ddb00e8e77", "timestamp": "", "source": "github", "line_count": 50, "max_line_length": 145, "avg_line_length": 38.42, "alnum_prop": 0.659552316501822, "repo_name": "ClearingHouse/clearblockd", "id": "1a15c68e3d89bbfdb5ce2ea97532d0132fc287fc", "size": "1921", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "lib/blockchain/insight.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "326740" }, { "name": "Shell", "bytes": "5106" } ], "symlink_target": "" }
from PyObjCTools.TestSupport import * from Quartz.QuartzCore import * class TestCABase (TestCase): @min_os_level('10.5') def testFunctions(self): v = CACurrentMediaTime() self.assertIsInstance(v, float) if __name__ == "__main__": main()
{ "content_hash": "1b3eb6db83bac54a87ada60bed158b4e", "timestamp": "", "source": "github", "line_count": 11, "max_line_length": 39, "avg_line_length": 24.272727272727273, "alnum_prop": 0.6479400749063671, "repo_name": "albertz/music-player", "id": "5f241945a3ad088dbb7f487896c92c178ca2b061", "size": "268", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "mac/pyobjc-framework-Quartz/PyObjCTest/test_cabase.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "Assembly", "bytes": "47481" }, { "name": "C", "bytes": "435926" }, { "name": "C++", "bytes": "149133" }, { "name": "CSS", "bytes": "16435" }, { "name": "HTML", "bytes": "914432" }, { "name": "JavaScript", "bytes": "52869" }, { "name": "M", "bytes": "10808" }, { "name": "Makefile", "bytes": "13304" }, { "name": "Mathematica", "bytes": "61418" }, { "name": "Objective-C", "bytes": "2082720" }, { "name": "Objective-C++", "bytes": "62427" }, { "name": "PostScript", "bytes": "2783" }, { "name": "Prolog", "bytes": "217" }, { "name": "Python", "bytes": "7789845" }, { "name": "QMake", "bytes": "9667" }, { "name": "Roff", "bytes": "8329" }, { "name": "Shell", "bytes": "3521" } ], "symlink_target": "" }
import os from setuptools import find_packages, setup pkgname = "vdt.versionplugin.gitchain" def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() setup(name=pkgname, version="0.0.4", description="Instead of building packages as a freezed state, propagate tags between git repositories.", long_description=read('README.rst'), author="Lars van de Kerkhof", author_email="lars@permanentmarkers.nl", maintainer="Lars van de Kerkhof", maintainer_email="lars@permanentmarkers.nl", packages=find_packages(), include_package_data=True, namespace_packages=['vdt', 'vdt.versionplugin'], zip_safe=True, install_requires=[ "setuptools", "vdt.version", "vdt.versionplugin.default", ], entry_points={}, )
{ "content_hash": "21daf03db8bb194ac7e7a7a65bcf4343", "timestamp": "", "source": "github", "line_count": 27, "max_line_length": 110, "avg_line_length": 31.22222222222222, "alnum_prop": 0.6524317912218268, "repo_name": "devopsconsulting/vdt.versionplugin.gitchain", "id": "69678af229f7c0d3dab82a1a7fb45dee1cbabe57", "size": "843", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "setup.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "828" } ], "symlink_target": "" }
try: import unittest2 as unittest except ImportError: import unittest import logging import time import os import datetime import random from wia import Wia from wia.error import WiaError, WiaValidationError, WiaUnauthorisedError, WiaForbiddenError, WiaNotFoundError class SpacesTest(unittest.TestCase): def test_spaces_create_and_retrieve(self): wia = Wia() wia.access_token_create(username=os.environ['WIA_TEST_USERNAME'], password=os.environ['WIA_TEST_PASSWORD']) random_name = str(datetime.date.today()) + str(random.getrandbits(128)) space = wia.Space.create(name=random_name) self.assertTrue(space.name == random_name) space_retrieve = wia.Space.retrieve(space.id) self.assertTrue(space.name == space_retrieve.name) def test_spaces_list(self): wia = Wia() spaces = wia.Space.list() self.assertTrue(spaces['count'] > 0) def test_spaces_create_retrieve_update_list_delete_device(self): wia = Wia() spaces = wia.Space.list() testSpace = spaces['spaces'][0] testDevice = wia.Device.create(**{"name": "testDevice", "space.id": testSpace.id}) self.assertIsInstance(testDevice, type(Wia().Device)) retrieveDevice = wia.Device.retrieve(testDevice.id) self.assertTrue(retrieveDevice.name == "testDevice") deviceUpdated = wia.Device.update(id=retrieveDevice.id, name="testDeviceUpdated") self.assertTrue(deviceUpdated.name == "testDeviceUpdated") list_return = wia.Device.list(**{"space.id": testSpace.id}) self.assertTrue('devices' in list_return) self.assertTrue(type(list_return['devices']) == list) self.assertTrue('count' in list_return) self.assertTrue(wia.Device.delete(deviceUpdated.id)) def test_spaces_devices_list_order_desc(self): wia = Wia() spaces = wia.Space.list() testSpace = spaces['spaces'][0] list_return_desc = wia.Device.list(**{"space.id": testSpace.id, "order": "createdAt", "sort" : "desc"}) self.assertTrue('devices' in list_return_desc) self.assertTrue(type(list_return_desc['devices']) == list) self.assertTrue('count' in list_return_desc) def test_spaces_devices_list_order_asc(self): wia = Wia() spaces = wia.Space.list() testSpace = spaces['spaces'][0] list_return_asc = wia.Device.list(**{"space.id": testSpace.id, "order": "createdAt", "sort": "asc"}) self.assertTrue('devices' in list_return_asc) self.assertTrue(type(list_return_asc['devices']) == list) self.assertTrue('count' in list_return_asc) # # ERROR TESTS def test_create_device_not_authorized(self): wia = Wia() wia.access_token = os.environ['device_secret_key'] device = wia.Device.create(name='fail', public=True) self.assertIsInstance(device, WiaError) wia.access_token = None def test_create_device_invalid_params(self): wia = Wia() wia.access_token = os.environ['device_secret_key'] device = wia.Device.create(name='fail', public='Oops') self.assertIsInstance(device, WiaError) wia.access_token = None def test_create_device_wrong_params(self): wia = Wia() wia.access_token = os.environ['device_secret_key'] device = wia.Device.create(name='fail', data=100) self.assertIsInstance(device, WiaError) wia.access_token = None def test_device_delete_unknown(self): wia = Wia() wia.access_token = os.environ['device_secret_key'] device = wia.Device.delete('nonexisting') self.assertIsInstance(device, WiaError) wia.access_token = None def test_device_retrieve_unknown(self): wia = Wia() wia.access_token = os.environ['device_secret_key'] device = wia.Device.retrieve('dev_nonexisting') self.assertIsInstance(device, WiaError) wia.access_token = None if __name__ == '__main__': unittest.main()
{ "content_hash": "fa99358415516b8ee4beac2330475dd6", "timestamp": "", "source": "github", "line_count": 108, "max_line_length": 112, "avg_line_length": 38.101851851851855, "alnum_prop": 0.6376670716889429, "repo_name": "wiaio/wia-python-sdk", "id": "72f5c7626cca729f35f9b48a915339274bead5fb", "size": "4115", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "wia/test/resources/test_spaces.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "6757" }, { "name": "HTML", "bytes": "11086816" }, { "name": "JavaScript", "bytes": "23025" }, { "name": "Python", "bytes": "40583" } ], "symlink_target": "" }
"Filter definition for underline_columns" from .abstract import AbstractFilter class UnderlineColumns(AbstractFilter): "Create columns from underlined text file" name = "Colonne par souligné" description = """Recherche des colonnes soulignées dans un fichier texte""" node_in = ['fichier'] node_out = ['resultat'] parameters = [ { 'name': 'Colonne cible', 'key': 'target', 'type': 'string' }, { 'name': 'Colonne par défaut', 'key': 'default', 'type': 'string' }, { 'name': 'Caractères de soulignage', 'key': 'underline', 'type': 'string' } ] def run(self): "Find columns" flux = self._flux_in['fichier'] target = self._param('target') header = self._param('default') underline = self._param('underline') current = 0 prev = None column = flux['headers'].index(target) lines = [line[column] for line in flux['rows']] output = { 'headers': [header], 'rows': [] } tmp = [[]] longest = 0 for line in lines: if len(line) > 0 and all(letter in underline for letter in line): output['headers'].append(prev) tmp.append([]) current = current + 1 prev = None else: if prev is not None: tmp[current].append(prev) if len(tmp[current]) > longest: longest = len(tmp[current]) prev = line if prev is not None: tmp[current].append(prev) output['rows'] = [ [ col[row] if row < len(col) else '' for col in tmp ] for row in range(longest) ] self._flux_out['resultat'] = output
{ "content_hash": "384856d6e7b9c2067b69dbdd4a8a938a", "timestamp": "", "source": "github", "line_count": 66, "max_line_length": 79, "avg_line_length": 29.484848484848484, "alnum_prop": 0.4773895169578623, "repo_name": "Exanis/cannelloni", "id": "da6fc69c4b27fe738b34c6f9503765a6bde06ea2", "size": "1974", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "backend/filters/underline_columns.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "105" }, { "name": "CSS", "bytes": "663" }, { "name": "HTML", "bytes": "35557" }, { "name": "JavaScript", "bytes": "39704" }, { "name": "Python", "bytes": "69684" }, { "name": "Shell", "bytes": "72" } ], "symlink_target": "" }
from userflow.forms import signin, signup, password, profile
{ "content_hash": "f8b51e879667e263a3146b1c272e56c2", "timestamp": "", "source": "github", "line_count": 1, "max_line_length": 60, "avg_line_length": 61, "alnum_prop": 0.819672131147541, "repo_name": "alexey-grom/django-userflow", "id": "8efaf4d09bb9199b41076afcf905ac965fb1dd9d", "size": "80", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "userflow/forms/__init__.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "28584" }, { "name": "Python", "bytes": "63319" } ], "symlink_target": "" }
import operator import sys import unittest import numpy import six import chainer from chainer.backends import cuda from chainer import basic_math from chainer import gradient_check from chainer import testing from chainer.testing import attr @testing.parameterize(*testing.product({ 'shape': [(3, 2), ()], 'dtype': [numpy.float16, numpy.float32, numpy.float64], })) class TestBinaryOp(unittest.TestCase): def setUp(self): self.x1 = numpy.random.uniform(.5, 1, self.shape).astype(self.dtype) self.x2 = numpy.random.uniform(.5, 1, self.shape).astype(self.dtype) self.gy = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype) self.ggx1 = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype) self.ggx2 = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype) def check_forward(self, op, x1_data, x2_data): x1 = chainer.Variable(x1_data) x2 = chainer.Variable(x2_data) y = op(x1, x2) options = {} if self.dtype == numpy.float16: options = {'atol': 1e-4, 'rtol': 1e-3} testing.assert_allclose(op(self.x1, self.x2), y.data, **options) def forward_cpu(self, op): self.check_forward(op, self.x1, self.x2) def test_add_forward_cpu(self): self.forward_cpu(lambda x, y: x + y) def test_sub_forward_cpu(self): self.forward_cpu(lambda x, y: x - y) def test_mul_forward_cpu(self): self.forward_cpu(lambda x, y: x * y) def test_div_forward_cpu(self): self.forward_cpu(lambda x, y: x / y) def test_floordiv_forward_cpu(self): self.forward_cpu(lambda x, y: x // y) def test_pow_forward_cpu(self): self.forward_cpu(lambda x, y: x ** y) def test_radd_forward_cpu(self): self.forward_cpu(lambda x, y: y.__radd__(x)) def test_rsub_forward_cpu(self): self.forward_cpu(lambda x, y: y.__rsub__(x)) def test_rmul_forward_cpu(self): self.forward_cpu(lambda x, y: y.__rmul__(x)) def test_rdiv_forward_cpu(self): self.forward_cpu(lambda x, y: y.__rtruediv__(x)) def test_rfloordiv_forward_cpu(self): self.forward_cpu(lambda x, y: y.__rfloordiv__(x)) def test_rpow_forward_cpu(self): self.forward_cpu(lambda x, y: y.__rpow__(x)) def forward_gpu(self, op): self.check_forward(op, cuda.to_gpu(self.x1), cuda.to_gpu(self.x2)) @attr.gpu def test_add_forward_gpu(self): self.forward_gpu(lambda x, y: x + y) @attr.gpu def test_sub_forward_gpu(self): self.forward_gpu(lambda x, y: x - y) @attr.gpu def test_mul_forward_gpu(self): self.forward_gpu(lambda x, y: x * y) @attr.gpu def test_div_forward_gpu(self): self.forward_gpu(lambda x, y: x / y) @attr.gpu def test_floordiv_forward_gpu(self): self.forward_gpu(lambda x, y: x // y) @attr.gpu def test_pow_forward_gpu(self): self.forward_gpu(lambda x, y: x ** y) @attr.gpu def test_radd_forward_gpu(self): self.forward_gpu(lambda x, y: y.__radd__(x)) @attr.gpu def test_rsub_forward_gpu(self): self.forward_gpu(lambda x, y: y.__rsub__(x)) @attr.gpu def test_rmul_forward_gpu(self): self.forward_gpu(lambda x, y: y.__rmul__(x)) @attr.gpu def test_rdiv_forward_gpu(self): self.forward_gpu(lambda x, y: y.__rtruediv__(x)) @attr.gpu def test_rfloordiv_forward_gpu(self): self.forward_gpu(lambda x, y: y.__rfloordiv__(x)) @attr.gpu def test_rpow_forward_gpu(self): self.forward_gpu(lambda x, y: y.__rpow__(x)) @attr.gpu def test_add_constant_allocation(self): x = 0 y = chainer.Variable(cuda.cupy.ones((1,))) z = y + x self.assertEqual(1, z.data.get()[0]) def check_backward(self, op, x1_data, x2_data, y_grad): options = {} if self.dtype == numpy.float16: options = {'atol': 5e-3, 'rtol': 5e-2} gradient_check.check_backward(op, (x1_data, x2_data), y_grad, dtype=numpy.float64, **options) def backward_cpu(self, op): self.check_backward(op, self.x1, self.x2, self.gy) def test_add_backward_cpu(self): self.backward_cpu(lambda x, y: x + y) def test_sub_backward_cpu(self): self.backward_cpu(lambda x, y: x - y) def test_mul_backward_cpu(self): self.backward_cpu(lambda x, y: x * y) def test_div_backward_cpu(self): self.backward_cpu(lambda x, y: x / y) def test_pow_backward_cpu(self): self.backward_cpu(lambda x, y: x ** y) def backward_gpu(self, op): self.check_backward( op, cuda.to_gpu(self.x1), cuda.to_gpu(self.x2), cuda.to_gpu(self.gy)) @attr.gpu def test_add_backward_gpu(self): self.backward_gpu(lambda x, y: x + y) @attr.gpu def test_sub_backward_gpu(self): self.backward_gpu(lambda x, y: x - y) @attr.gpu def test_mul_backward_gpu(self): self.backward_gpu(lambda x, y: x * y) @attr.gpu def test_div_backward_gpu(self): self.backward_gpu(lambda x, y: x / y) @attr.gpu def test_pow_backward_gpu(self): self.backward_gpu(lambda x, y: x ** y) def check_double_backward( self, op, x1_data, x2_data, y_grad, ggx1_data, ggx2_data, **args): options = {} if self.dtype == numpy.float16: options = {'atol': 5e-3, 'rtol': 5e-2} options.update(args) def _op(*xs): y = op(*xs) return y * y gradient_check.check_double_backward( _op, (x1_data, x2_data), y_grad, (ggx1_data, ggx2_data), dtype=numpy.float64, **options) def double_backward_cpu(self, op, **options): self.check_double_backward( op, self.x1, self.x2, self.gy, self.ggx1, self.ggx2, **options) def test_div_double_backward_cpu(self): self.double_backward_cpu(lambda x, y: x / y, atol=5e-2, rtol=5e-2) def test_pow_double_backward_cpu(self): self.double_backward_cpu(lambda x, y: x ** y) def test_rpow_double_backward_cpu(self): self.double_backward_cpu(lambda x, y: y.__rpow__(x)) def double_backward_gpu(self, op, **options): self.check_double_backward( op, cuda.to_gpu(self.x1), cuda.to_gpu(self.x2), cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx1), cuda.to_gpu(self.ggx2), **options) @attr.gpu def test_div_double_backward_gpu(self): self.double_backward_gpu(lambda x, y: x / y, atol=5e-2, rtol=5e-2) @attr.gpu def test_pow_double_backward_gpu(self): self.double_backward_gpu(lambda x, y: x ** y) @attr.gpu def test_rpow_double_backward_gpu(self): self.double_backward_gpu(lambda x, y: y.__rpow__(x)) @testing.parameterize(*testing.product({ 'shape': [(3, 2), ()], 'dtype': [numpy.float16, numpy.float32, numpy.float64], })) class TestMultipleAdd(unittest.TestCase): def setUp(self): self.x1 = numpy.random.uniform(.5, 1, self.shape).astype(self.dtype) self.x2 = numpy.random.uniform(.5, 1, self.shape).astype(self.dtype) self.x3 = numpy.random.uniform(.5, 1, self.shape).astype(self.dtype) self.gy = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype) self.ggx1 = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype) self.ggx2 = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype) self.ggx3 = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype) def check_forward(self, func, x1_data, x2_data, x3_data): x1 = chainer.Variable(x1_data) x2 = chainer.Variable(x2_data) x3 = chainer.Variable(x3_data) y = func(x1, x2, x3) options = {} if self.dtype == numpy.float16: options = {'atol': 1e-4, 'rtol': 1e-3} testing.assert_allclose( (self.x1 + self.x2 + self.x3), y.data, **options) def forward_cpu(self, func): self.check_forward(func, self.x1, self.x2, self.x3) def test_add_forward_cpu(self): func = chainer.functions.add self.forward_cpu(func) def check_backward(self, func, x1_data, x2_data, x3_data, y_grad): options = {} if self.dtype == numpy.float16: options = {'atol': 5e-3, 'rtol': 5e-2} gradient_check.check_backward(func, (x1_data, x2_data, x3_data), y_grad, dtype=numpy.float64, **options) def backward_cpu(self, func): self.check_backward(func, self.x1, self.x2, self.x3, self.gy) def test_add_backward_cpu(self): func = chainer.functions.add self.backward_cpu(func) def check_double_backward( self, func, x1_data, x2_data, x3_data, y_grad, ggx1_data, ggx2_data, ggx3_data, **args): options = {} if self.dtype == numpy.float16: options = {'atol': 5e-3, 'rtol': 5e-2} options.update(args) def _func(*xs): y = func(*xs) return y * y gradient_check.check_double_backward( _func, (x1_data, x2_data, x3_data), y_grad, (ggx1_data, ggx2_data, ggx3_data), dtype=numpy.float64, **options) def double_backward_cpu(self, func, **options): self.check_double_backward( func, self.x1, self.x2, self.x3, self.gy, self.ggx1, self.ggx2, self.ggx3, **options) def test_double_backward_cpu(self): func = chainer.functions.add self.double_backward_cpu(func, atol=5e-2, rtol=5e-2) @testing.parameterize(*testing.product({ 'dtype': [numpy.float16, numpy.float32, numpy.float64], })) class TestBinaryOpConstant(unittest.TestCase): def _test_constant_one(self, func, lhs, rhs, gpu=False): if gpu: lhs = cuda.to_gpu(lhs) x = chainer.Variable(lhs) y = func(x, rhs) self.assertEqual(y.data.dtype, self.dtype) y.backward() self.assertEqual(x.grad.dtype, self.dtype) def _test_constant(self, func): x_data = numpy.array(1, self.dtype) self._test_constant_one(func, x_data, 1) self._test_constant_one(func, x_data, 1.0) self._test_constant_one(func, x_data, numpy.int64(1)) self._test_constant_one(func, x_data, numpy.float64(1.0)) def _test_constant_gpu(self, func): x_data = numpy.array(1, self.dtype) self._test_constant_one(func, x_data, 1, True) self._test_constant_one(func, x_data, 1.0, True) self._test_constant_one(func, x_data, numpy.int64(1), True) self._test_constant_one(func, x_data, numpy.float64(1), True) def _test_constant_array_one(self, func, lhs, rhs): x = chainer.Variable(lhs) y = func(x, rhs) self.assertEqual(y.data.dtype, self.dtype) y.grad = numpy.ones_like(y.data, self.dtype) y.backward() self.assertEqual(x.grad.dtype, self.dtype) def _test_constant_array(self, func): x_data = numpy.array([1.0, 2.0], self.dtype) self._test_constant_array_one( func, x_data, numpy.array([3.0, 4.0], numpy.int32)) self._test_constant_array_one( func, x_data, numpy.array([3.0, 4.0], numpy.int64)) self._test_constant_array_one( func, x_data, numpy.array([3.0, 4.0], numpy.float32)) self._test_constant_array_one( func, x_data, numpy.array([3.0, 4.0], numpy.float64)) with self.assertRaises(ValueError): self._test_constant_array_one(func, x_data, [3.0, 4.0]) with self.assertRaises(ValueError): self._test_constant_array_one(func, x_data, (3.0, 4.0)) with self.assertRaises(ValueError): self._test_constant_array_one(func, x_data, [3.0, 4.0, 5.0]) with self.assertRaises(ValueError): self._test_constant_array_one(func, x_data, (3.0, 4.0, 5.0)) with self.assertRaises(ValueError): self._test_constant_array_one( func, x_data, numpy.array([3.0, 4.0, 5.0], self.dtype)) def _test_constant_array_gpu_one(self, func, lhs, rhs): x = chainer.Variable(cuda.to_gpu(lhs)) y = func(x, rhs) self.assertEqual(y.data.dtype, self.dtype) y.grad = cuda.cupy.ones_like(y.data).astype(self.dtype) y.backward() self.assertEqual(x.grad.dtype, self.dtype) def _test_constant_array_gpu(self, func, exception=TypeError): x_data = numpy.array([1.0, 2.0], self.dtype) self._test_constant_array_gpu_one( func, x_data, cuda.to_gpu(numpy.array([3.0, 4.0], numpy.int32))) self._test_constant_array_gpu_one( func, x_data, cuda.to_gpu(numpy.array([3.0, 4.0], numpy.int64))) self._test_constant_array_gpu_one( func, x_data, cuda.to_gpu(numpy.array([3.0, 4.0], numpy.float32))) self._test_constant_array_gpu_one( func, x_data, cuda.to_gpu(numpy.array([3.0, 4.0], numpy.float64))) with self.assertRaises(exception): self._test_constant_array_one( func, x_data, cuda.to_gpu( numpy.array([3.0, 4.0, 5.0], self.dtype))) with six.assertRaisesRegex(self, ValueError, 'broadcast'): self._test_constant_array_gpu_one( func, x_data, cuda.to_gpu( numpy.array([[3.0, 4.0], [5.0, 6.0]], self.dtype))) def test_add_constant(self): self._test_constant(lambda x, y: x + y) @attr.gpu def test_add_constant_gpu(self): self._test_constant_gpu(lambda x, y: x + y) def test_add_constant_array(self): self._test_constant_array(lambda x, y: x + y) @attr.gpu def test_add_constant_array_gpu(self): self._test_constant_array_gpu(lambda x, y: x + y) def test_radd_constant(self): self._test_constant(lambda x, y: y + x) @attr.gpu def test_radd_constant_gpu(self): self._test_constant_gpu(lambda x, y: y + x) def test_radd_constant_array(self): self._test_constant_array(lambda x, y: y + x) @attr.gpu def test_radd_constant_array_gpu(self): self._test_constant_array_gpu(lambda x, y: y + x) def test_sub_constant(self): self._test_constant(lambda x, y: x - y) @attr.gpu def test_sub_constant_gpu(self): self._test_constant_gpu(lambda x, y: x - y) def test_sub_constant_array(self): self._test_constant_array(lambda x, y: x - y) @attr.gpu def test_sub_constant_array_gpu(self): self._test_constant_array_gpu(lambda x, y: x - y) def test_rsub_constant(self): self._test_constant(lambda x, y: y - x) @attr.gpu def test_rsub_constant_gpu(self): self._test_constant_gpu(lambda x, y: y - x) def test_rsub_constant_array(self): self._test_constant_array(lambda x, y: y - x) @attr.gpu def test_rsub_constant_array_gpu(self): self._test_constant_array_gpu(lambda x, y: y - x) def test_mul_constant(self): self._test_constant(lambda x, y: x * y) @attr.gpu def test_mul_constant_gpu(self): self._test_constant_gpu(lambda x, y: x * y) def test_mul_constant_array(self): self._test_constant_array(lambda x, y: x * y) @attr.gpu def test_mul_constant_array_gpu(self): self._test_constant_array(lambda x, y: x * y) def test_rmul_constant(self): self._test_constant(lambda x, y: y * x) @attr.gpu def test_rmul_constant_gpu(self): self._test_constant_gpu(lambda x, y: y * x) def test_rmul_constant_array(self): self._test_constant_array(lambda x, y: y * x) @attr.gpu def test_rmul_constant_array_gpu(self): # _test_constant_array_one throws pycuda._pvt_struct.error self._test_constant_array_gpu(lambda x, y: y * x, exception=Exception) def test_div_constant(self): self._test_constant(lambda x, y: x / y) @attr.gpu def test_div_constant_gpu(self): self._test_constant_gpu(lambda x, y: x / y) def test_div_constant_array(self): self._test_constant_array(lambda x, y: x / y) @attr.gpu def test_div_constant_array_gpu(self): # _test_constant_array_one throws pycuda._pvt_struct.error self._test_constant_array_gpu(lambda x, y: x / y, exception=Exception) def test_rdiv_constant(self): self._test_constant(lambda x, y: y / x) @attr.gpu def test_rdiv_constant_gpu(self): self._test_constant_gpu(lambda x, y: y / x) def test_rdiv_constant_array(self): self._test_constant_array(lambda x, y: y / x) @attr.gpu def test_rdiv_constant_array_gpu(self): self._test_constant_array_gpu(lambda x, y: y / x) def test_pow_constant(self): self._test_constant(lambda x, y: x ** y) @attr.gpu def test_pow_constant_gpu(self): self._test_constant_gpu(lambda x, y: x ** y) def test_pow_constant_array(self): self._test_constant_array(lambda x, y: x ** y) @attr.gpu def test_pow_constant_array_gpu(self): self._test_constant_array_gpu(lambda x, y: x ** y, exception=TypeError) def test_rpow_constant(self): self._test_constant(lambda x, y: y ** x) @attr.gpu def test_rpow_constant_gpu(self): self._test_constant_gpu(lambda x, y: y ** x) def test_rpow_constant_array(self): self._test_constant_array(lambda x, y: y ** x) @attr.gpu def test_rpow_constant_array_gpu(self): # _test_constant_array_one throws pycuda._pvt_struct.error self._test_constant_array_gpu(lambda x, y: y ** x, exception=Exception) @testing.parameterize(*testing.product({ 'shape': [(3, 2), ()], 'dtype': [numpy.float16, numpy.float32, numpy.float64], })) class TestVariableConstantOp(unittest.TestCase): def make_date(self): raise NotImplementedError() def setUp(self): self.x = numpy.random.uniform(.5, 1, self.shape).astype(self.dtype) self.gy = numpy.random.uniform(.5, 1, self.shape).astype(self.dtype) self.ggx = numpy.random.uniform(.5, 1, self.shape).astype(self.dtype) self.value = 0.5 def check_forward(self, op, x_data): x = chainer.Variable(x_data) y = op(x, self.value) if self.dtype == numpy.float16: atol = 5e-4 rtol = 5e-4 else: atol = 1e-7 rtol = 1e-7 testing.assert_allclose( op(self.x, self.value), y.data, atol=atol, rtol=rtol) def forward_cpu(self, op): self.check_forward(op, self.x) def test_add_forward_cpu(self): self.forward_cpu(lambda x, y: x + y) def test_radd_forward_cpu(self): self.forward_cpu(lambda x, y: y + x) def test_sub_forward_cpu(self): self.forward_cpu(lambda x, y: x - y) def test_rsub_forward_cpu(self): self.forward_cpu(lambda x, y: y - x) def test_mul_forward_cpu(self): self.forward_cpu(lambda x, y: x * y) def test_rmul_forward_cpu(self): self.forward_cpu(lambda x, y: y * x) def test_div_forward_cpu(self): self.forward_cpu(lambda x, y: x / y) def test_rdiv_forward_cpu(self): self.forward_cpu(lambda x, y: y / x) def test_pow_forward_cpu(self): self.forward_cpu(lambda x, y: x ** y) def test_rpow_forward_cpu(self): self.forward_cpu(lambda x, y: y ** x) def forward_gpu(self, op): self.check_forward(op, cuda.to_gpu(self.x)) @attr.gpu def test_add_forward_gpu(self): self.forward_gpu(lambda x, y: x + y) @attr.gpu def test_radd_forward_gpu(self): self.forward_gpu(lambda x, y: y + x) @attr.gpu def test_sub_forward_gpu(self): self.forward_gpu(lambda x, y: x - y) @attr.gpu def test_rsub_forward_gpu(self): self.forward_gpu(lambda x, y: y - x) @attr.gpu def test_mul_forward_gpu(self): self.forward_gpu(lambda x, y: x * y) @attr.gpu def test_rmul_forward_gpu(self): self.forward_gpu(lambda x, y: y * x) @attr.gpu def test_div_forward_gpu(self): self.forward_gpu(lambda x, y: x / y) @attr.gpu def test_rdiv_forward_gpu(self): self.forward_gpu(lambda x, y: y / x) @attr.gpu def test_pow_forward_gpu(self): self.forward_gpu(lambda x, y: x ** y) @attr.gpu def test_rpow_forward_gpu(self): self.forward_gpu(lambda x, y: y ** x) def check_backward(self, op, x_data, y_grad): options = {} if self.dtype == numpy.float16: options = {'atol': 5e-3, 'rtol': 5e-2} gradient_check.check_backward(lambda x: op(x, self.value), x_data, y_grad, dtype=numpy.float64, **options) def backward_cpu(self, op): self.check_backward(op, self.x, self.gy) def test_add_backward_cpu(self): self.backward_cpu(lambda x, y: x + y) def test_radd_backward_cpu(self): self.backward_cpu(lambda x, y: y + x) def test_sub_backward_cpu(self): self.backward_cpu(lambda x, y: x - y) def test_rsub_backward_cpu(self): self.backward_cpu(lambda x, y: y - x) def test_mul_backward_cpu(self): self.backward_cpu(lambda x, y: x * y) def test_rmul_backward_cpu(self): self.backward_cpu(lambda x, y: y * x) def test_div_backward_cpu(self): self.backward_cpu(lambda x, y: x / y) def test_rdiv_backward_cpu(self): self.backward_cpu(lambda x, y: y / x) def test_pow_backward_cpu(self): self.backward_cpu(lambda x, y: x ** y) def test_rpow_backward_cpu(self): self.backward_cpu(lambda x, y: y ** x) def backward_gpu(self, op): self.check_backward(op, cuda.to_gpu(self.x), cuda.to_gpu(self.gy)) @attr.gpu def test_add_backward_gpu(self): self.backward_gpu(lambda x, y: x + y) @attr.gpu def test_radd_backward_gpu(self): self.backward_gpu(lambda x, y: y + x) @attr.gpu def test_sub_backward_gpu(self): self.backward_gpu(lambda x, y: x - y) @attr.gpu def test_rsub_backward_gpu(self): self.backward_gpu(lambda x, y: y - x) @attr.gpu def test_mul_backward_gpu(self): self.backward_gpu(lambda x, y: x * y) @attr.gpu def test_rmul_backward_gpu(self): self.backward_gpu(lambda x, y: y * x) @attr.gpu def test_div_backward_gpu(self): self.backward_gpu(lambda x, y: x / y) @attr.gpu def test_rdiv_backward_gpu(self): self.backward_gpu(lambda x, y: y / x) @attr.gpu def test_pow_backward_gpu(self): self.backward_gpu(lambda x, y: x ** y) @attr.gpu def test_rpow_backward_gpu(self): self.backward_gpu(lambda x, y: y ** x) def check_double_backward(self, op, x_data, y_grad, x_grad_grad): options = {} if self.dtype == numpy.float16: options = {'atol': 5e-3, 'rtol': 5e-2} def _op(x): y = op(x, self.value) return y * y gradient_check.check_double_backward( _op, x_data, y_grad, x_grad_grad, dtype=numpy.float64, **options) def double_backward_cpu(self, op): self.check_double_backward(op, self.x, self.gy, self.ggx) def test_pow_double_backward_cpu(self): self.double_backward_cpu(lambda x, y: x ** y) def test_rpow_double_backward_cpu(self): self.double_backward_cpu(lambda x, y: y ** x) def test_rdiv_double_backward_cpu(self): self.double_backward_cpu(lambda x, y: y / x) def double_backward_gpu(self, op): self.check_double_backward( op, cuda.to_gpu(self.x), cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx)) @attr.gpu def test_pow_double_backward_gpu(self): self.double_backward_gpu(lambda x, y: x ** y) @attr.gpu def test_rpow_double_backward_gpu(self): self.double_backward_gpu(lambda x, y: y ** x) @attr.gpu def test_rdiv_double_backward_gpu(self): self.double_backward_gpu(lambda x, y: y / x) @testing.parameterize(*testing.product({ 'dtype': [numpy.float16, numpy.float32, numpy.float64], })) class TestVariableConstantArrayOp(unittest.TestCase): def setUp(self): self.x = numpy.random.uniform(.5, 1, (3, 2)).astype(self.dtype) self.gy = numpy.random.uniform(-1, 1, (3, 2)).astype(self.dtype) self.ggx = numpy.random.uniform(.5, 1, (3, 2)).astype(self.dtype) self.value = numpy.random.uniform(-1, 1, (3, 2)).astype(self.dtype) def check_forward(self, op, x_data, gpu, positive): value = self.value if positive: value = numpy.abs(value) v = value if gpu: v = cuda.to_gpu(v) x = chainer.Variable(x_data) y = op(x, v) if self.dtype == numpy.float16: tol = 1e-3 else: tol = 1e-6 testing.assert_allclose( op(self.x, value), y.data, atol=tol, rtol=tol) def forward_cpu(self, op, positive=False): self.check_forward(op, self.x, False, positive) def test_add_forward_cpu(self): self.forward_cpu(lambda x, y: x + y) def test_radd_forward_cpu(self): self.forward_cpu(lambda x, y: y + x) def test_sub_forward_cpu(self): self.forward_cpu(lambda x, y: x - y) def test_rsub_forward_cpu(self): self.forward_cpu(lambda x, y: y - x) def test_mul_forward_cpu(self): self.forward_cpu(lambda x, y: x * y) def test_rmul_forward_cpu(self): self.forward_cpu(lambda x, y: y * x) def test_div_forward_cpu(self): self.forward_cpu(lambda x, y: x / y) def test_rdiv_forward_cpu(self): self.forward_cpu(lambda x, y: y / x) def test_pow_forward_cpu(self): self.forward_cpu(lambda x, y: x ** y) def test_rpow_forward_cpu(self): self.forward_cpu(lambda x, y: y ** x, positive=True) def forward_gpu(self, op, positive=False): self.check_forward(op, cuda.to_gpu(self.x), True, positive) @attr.gpu def test_add_forward_gpu(self): self.forward_gpu(lambda x, y: x + y) @attr.gpu def test_radd_forward_gpu(self): self.forward_gpu(lambda x, y: y + x) @attr.gpu def test_sub_forward_gpu(self): self.forward_gpu(lambda x, y: x - y) @attr.gpu def test_rsub_forward_gpu(self): self.forward_gpu(lambda x, y: y - x) @attr.gpu def test_mul_forward_gpu(self): self.forward_gpu(lambda x, y: x * y) @attr.gpu def test_rmul_forward_gpu(self): self.forward_gpu(lambda x, y: y * x) @attr.gpu def test_div_forward_gpu(self): self.forward_gpu(lambda x, y: x / y) @attr.gpu def test_rdiv_forward_gpu(self): self.forward_gpu(lambda x, y: y / x) @attr.gpu def test_pow_forward_gpu(self): self.forward_gpu(lambda x, y: x ** y) @attr.gpu def test_rpow_forward_gpu(self): self.forward_gpu(lambda x, y: y ** x, positive=True) def check_backward(self, op, x_data, y_grad, gpu, positive): value = self.value if positive: value = numpy.abs(value) if gpu: value = cuda.to_gpu(value) options = {} if self.dtype == numpy.float16: options = {'atol': 5e-3, 'rtol': 5e-2} gradient_check.check_backward(lambda x: op(x, value), x_data, y_grad, dtype=numpy.float64, **options) def backward_cpu(self, op, positive=False): self.check_backward(op, self.x, self.gy, False, positive) def test_add_backward_cpu(self): self.backward_cpu(lambda x, y: x + y) def test_radd_backward_cpu(self): self.backward_cpu(lambda x, y: y + x) def test_sub_backward_cpu(self): self.backward_cpu(lambda x, y: x - y) def test_rsub_backward_cpu(self): self.backward_cpu(lambda x, y: y - x) def test_mul_backward_cpu(self): self.backward_cpu(lambda x, y: x * y) def test_rmul_backward_cpu(self): self.backward_cpu(lambda x, y: y * x) def test_div_backward_cpu(self): self.backward_cpu(lambda x, y: x / y) def test_rdiv_backward_cpu(self): self.backward_cpu(lambda x, y: y / x) def test_pow_backward_cpu(self): self.backward_cpu(lambda x, y: x ** y) def test_rpow_backward_cpu(self): self.backward_cpu(lambda x, y: y ** x, positive=True) def backward_gpu(self, op, positive=False): self.check_backward( op, cuda.to_gpu(self.x), cuda.to_gpu(self.gy), True, positive) @attr.gpu def test_add_backward_gpu(self): self.backward_gpu(lambda x, y: x + y) @attr.gpu def test_radd_backward_gpu(self): self.backward_gpu(lambda x, y: y + x) @attr.gpu def test_sub_backward_gpu(self): self.backward_gpu(lambda x, y: x - y) @attr.gpu def test_mul_backward_gpu(self): self.backward_gpu(lambda x, y: x * y) @attr.gpu def test_rmul_backward_gpu(self): self.backward_gpu(lambda x, y: y * x) @attr.gpu def test_div_backward_gpu(self): self.backward_gpu(lambda x, y: x / y) @attr.gpu def test_rdiv_backward_gpu(self): self.backward_gpu(lambda x, y: y / x) @attr.gpu def test_pow_backward_gpu(self): self.backward_gpu(lambda x, y: x ** y) @attr.gpu def test_rpow_backward_gpu(self): self.backward_gpu(lambda x, y: y ** x, positive=True) def check_double_backward(self, op, x_data, y_grad, x_grad_grad, gpu, positive): value = self.value if positive: value = numpy.abs(value) if gpu: value = cuda.to_gpu(value) options = {} if self.dtype == numpy.float16: options = {'atol': 5e-3, 'rtol': 5e-2} def _op(x): y = op(x, value) return y * y gradient_check.check_double_backward( _op, x_data, y_grad, x_grad_grad, dtype=numpy.float64, **options) def double_backward_cpu(self, op, positive=False): self.check_double_backward( op, self.x, self.gy, self.ggx, False, positive) def test_pow_double_backward_cpu(self): self.double_backward_cpu(lambda x, y: x ** y) def test_rpow_double_backward_cpu(self): self.double_backward_cpu(lambda x, y: y ** x, positive=True) def double_backward_gpu(self, op, positive=False): self.check_double_backward( op, cuda.to_gpu(self.x), cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx), True, positive) @attr.gpu def test_pow_double_backward_gpu(self): self.double_backward_gpu(lambda x, y: x ** y) @attr.gpu def test_rpow_double_backward_gpu(self): self.double_backward_gpu(lambda x, y: y ** x, positive=True) @testing.parameterize(*testing.product({ 'shape': [(3, 2), ()], 'dtype': [numpy.float16, numpy.float32, numpy.float64], })) class TestUnaryFunctions(unittest.TestCase): def setUp(self): self.x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype) for i in numpy.ndindex(self.shape): if -0.1 < self.x[i] < 0.1: self.x[i] = 0.5 self.gy = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype) self.ggx = numpy.random.uniform(.5, 1, self.shape).astype(self.dtype) def check_forward(self, op, op_np, x_data): x = chainer.Variable(x_data) y = op(x) testing.assert_allclose( op_np(self.x), y.data, atol=1e-7, rtol=1e-7) def forward_cpu(self, op, op_np): self.check_forward(op, op_np, self.x) def test_neg_forward_cpu(self): self.forward_cpu(lambda x: -x, lambda x: -x) def test_abs_forward_cpu(self): self.forward_cpu(lambda x: abs(x), lambda x: abs(x)) def forward_gpu(self, op, op_np): self.check_forward(op, op_np, cuda.to_gpu(self.x)) @attr.gpu def test_neg_forward_gpu(self): self.forward_gpu(lambda x: -x, lambda x: -x) @attr.gpu def test_abs_forward_gpu(self): self.forward_gpu(lambda x: abs(x), lambda x: abs(x)) def check_backward(self, op, x_data, y_grad): options = {} if self.dtype == numpy.float16: options = {'atol': 5e-3, 'rtol': 5e-2} gradient_check.check_backward( op, x_data, y_grad, dtype=numpy.float64, **options) def backward_cpu(self, op): self.check_backward(op, self.x, self.gy) def test_neg_backward_cpu(self): self.backward_cpu(lambda x: -x) def test_abs_backward_cpu(self): self.backward_cpu(lambda x: abs(x)) def backward_gpu(self, op): self.check_backward(op, cuda.to_gpu(self.x), cuda.to_gpu(self.gy)) @attr.gpu def test_neg_backward_gpu(self): self.backward_gpu(lambda x: -x) @attr.gpu def test_abs_backward_gpu(self): self.backward_gpu(lambda x: abs(x)) def check_double_backward(self, op, x_data, y_grad, x_grad_grad): options = {} if self.dtype == numpy.float16: options = {'atol': 5e-3, 'rtol': 5e-2} def f(x): x = op(x) return x * x gradient_check.check_double_backward( f, x_data, y_grad, x_grad_grad, dtype=numpy.float64, **options) def double_backward_cpu(self, op): self.check_double_backward(op, self.x, self.gy, self.ggx) def test_neg_double_backward_cpu(self): self.double_backward_cpu(lambda x: -x) def test_abs_double_backward_cpu(self): self.double_backward_cpu(lambda x: abs(x)) def double_backward_gpu(self, op): self.check_double_backward( op, cuda.to_gpu(self.x), cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx)) @attr.gpu def test_neg_double_backward_gpu(self): self.double_backward_gpu(lambda x: -x) @attr.gpu def test_abs_double_backward_gpu(self): self.double_backward_gpu(lambda x: abs(x)) @testing.parameterize(*testing.product({ 'dtype': [numpy.float16, numpy.float32, numpy.float64], })) class TestNegativePow(unittest.TestCase): def setUp(self): self.x = numpy.random.uniform(-1, 0, (3, 2)).astype(self.dtype) self.gy = numpy.random.uniform(-1, 1, (3, 2)).astype(self.dtype) self.ggx = numpy.random.uniform(-1, 1, (3, 2)).astype(self.dtype) def check_backward(self, x_data, y_grad): options = {} if self.dtype == numpy.float16: options = {'atol': 5e-3, 'rtol': 5e-2} gradient_check.check_backward( lambda x: x ** 2, x_data, y_grad, dtype=numpy.float64, **options) def test_backward_cpu(self): self.check_backward(self.x, self.gy) @attr.gpu def test_backward_gpu(self): self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy)) def check_double_backward(self, x_data, y_grad, x_grad_grad): options = {} if self.dtype == numpy.float16: options = {'atol': 5e-3, 'rtol': 5e-2} gradient_check.check_double_backward( lambda x: x ** 2, x_data, y_grad, x_grad_grad, dtype=numpy.float64, **options) def test_double_backward_cpu(self): self.check_double_backward(self.x, self.gy, self.ggx) @attr.gpu def test_double_backward_gpu(self): self.check_double_backward( cuda.to_gpu(self.x), cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx)) @testing.parameterize(*testing.product_dict( [ {'left_const': False, 'right_const': False}, {'left_const': True, 'right_const': False}, {'left_const': False, 'right_const': True}, ], [ {'dtype': numpy.float16}, {'dtype': numpy.float32}, {'dtype': numpy.float64}, ], [ {'x_shape': (3, 2), 'y_shape': (2, 4), 'z_shape': (3, 4)}, {'x_shape': (2, 3, 2), 'y_shape': (2, 2, 4), 'z_shape': (2, 3, 4)}, {'x_shape': (3,), 'y_shape': (3,), 'z_shape': ()}, ] )) @unittest.skipUnless(sys.version_info >= (3, 5), 'Only for Python3.5 or higher') class TestMatMulVarVar(unittest.TestCase): def setUp(self): self.x = numpy.random.uniform(-1, 1, self.x_shape).astype(self.dtype) self.y = numpy.random.uniform(-1, 1, self.y_shape).astype(self.dtype) self.gz = numpy.random.uniform(-1, 1, self.z_shape).astype(self.dtype) self.ggx = numpy.random.uniform( -1, 1, self.x_shape).astype(self.dtype) self.ggy = numpy.random.uniform( -1, 1, self.y_shape).astype(self.dtype) def _get_forward_answer(self, x, y): if x.ndim <= 2: return numpy.dot(x, y) else: return numpy.einsum('...ij,...jk->...ik', x, y) def check_forward(self, x_data, y_data): if self.left_const: x = x_data else: x = chainer.Variable(x_data) if self.right_const: y = y_data else: y = chainer.Variable(y_data) z = operator.matmul(x, y) if self.dtype == numpy.float16: options = {'atol': 1e-3, 'rtol': 1e-3} else: options = {'atol': 1e-7, 'rtol': 1e-7} testing.assert_allclose( self._get_forward_answer(self.x, self.y), z.data, **options) def test_forward_cpu(self): self.check_forward(self.x, self.y) @attr.gpu def test_forward_gpu(self): self.check_forward(cuda.to_gpu(self.x), cuda.to_gpu(self.y)) def check_backward(self, x_data, y_data, z_grad): if self.right_const: def op(x): return operator.matmul(x, y_data) data = x_data, elif self.left_const: def op(y): return operator.matmul(x_data, y) data = y_data, else: op = operator.matmul data = x_data, y_data if self.dtype == numpy.float16: options = {'atol': 1e-3, 'rtol': 1e-2} else: options = {'atol': 1e-4, 'rtol': 1e-4} gradient_check.check_backward( op, data, z_grad, dtype=numpy.float64, **options) def test_backward_cpu(self): self.check_backward(self.x, self.y, self.gz) @attr.gpu def test_backward_gpu(self): self.check_backward( cuda.to_gpu(self.x), cuda.to_gpu(self.y), cuda.to_gpu(self.gz)) def check_double_backward( self, x_data, y_data, z_grad, x_grad_grad, y_grad_grad): if self.right_const: def op(x): z = operator.matmul(x, y_data) return z * z data = x_data, grad_grad = x_grad_grad, elif self.left_const: def op(y): z = operator.matmul(x_data, y) return z * z data = y_data, grad_grad = y_grad_grad, else: def op(x, y): z = operator.matmul(x, y) return z * z data = x_data, y_data grad_grad = x_grad_grad, y_grad_grad if self.dtype == numpy.float16: options = {'atol': 1e-3, 'rtol': 1e-2} else: options = {'atol': 1e-4, 'rtol': 1e-4} gradient_check.check_double_backward( op, data, z_grad, grad_grad, dtype=numpy.float64, **options) def test_double_backward_cpu(self): self.check_double_backward(self.x, self.y, self.gz, self.ggx, self.ggy) @attr.gpu def test_double_backward_gpu(self): self.check_double_backward( cuda.to_gpu(self.x), cuda.to_gpu(self.y), cuda.to_gpu(self.gz), cuda.to_gpu(self.ggx), cuda.to_gpu(self.ggy)) class TestNotSupportOperation(unittest.TestCase): def setUp(self): self.x = chainer.Variable(numpy.zeros(10)) self.y = chainer.Variable(numpy.zeros(10)) def test_lt(self): with self.assertRaises(NotImplementedError): self.x < self.y def test_le(self): with self.assertRaises(NotImplementedError): self.x <= self.y def test_eq(self): with self.assertRaises(NotImplementedError): self.x == self.y def test_ne(self): with self.assertRaises(NotImplementedError): self.x != self.y def test_gt(self): with self.assertRaises(NotImplementedError): self.x > self.y def test_ge(self): with self.assertRaises(NotImplementedError): self.x >= self.y def test_nonzero(self): with self.assertRaises(NotImplementedError): if self.x: pass class ConvertValueToStringTest(unittest.TestCase): def _check_scalar(self, value, string): self.assertEqual(basic_math._convert_value_to_string(value), string) def test_integer_positive(self): self._check_scalar(2, '2') def test_integer_zero(self): self._check_scalar(0, '0') def test_integer_negative(self): self._check_scalar(-2, '(-2)') def test_float_positive(self): self._check_scalar(2.0, '2.0') def test_float_zero(self): self._check_scalar(0.0, '0.0') def test_float_negative(self): self._check_scalar(-2.0, '(-2.0)') def test_numpy_scalar(self): self._check_scalar(numpy.float32(2), '2.0') def _check_array(self, value, string): self.assertEqual(basic_math._convert_value_to_string(value), string) value = chainer.Variable(value) self.assertEqual(basic_math._convert_value_to_string(value), string) def test_array_cpu(self): self._check_array(numpy.array([1, 2]), 'constant array') @attr.gpu def test_array_gpu(self): self._check_array(cuda.ndarray([1, 2]), 'constant array') class TestLabel(unittest.TestCase): def test_neg(self): self.assertEqual(basic_math.Neg().label, '__neg__') def test_absolute(self): self.assertEqual(basic_math.Absolute().label, '|_|') def test_add(self): self.assertEqual(basic_math.Add().label, '_ + _') def test_add_constant(self): self.assertEqual(basic_math.AddConstant(2.0).label, '_ + 2.0') def test_sub(self): self.assertEqual(basic_math.Sub().label, '_ - _') def test_sub_from_constant(self): self.assertEqual(basic_math.SubFromConstant(2.0).label, '2.0 - _') def test_mul(self): self.assertEqual(basic_math.Mul().label, '_ * _') def test_mul_constant(self): self.assertEqual(basic_math.MulConstant(2.0).label, '_ * 2.0') def test_div(self): self.assertEqual(basic_math.Div().label, '_ / _') def test_div_from_constant(self): self.assertEqual(basic_math.DivFromConstant(2.0).label, '2.0 / _') def test_pow_var_var(self): self.assertEqual(basic_math.PowVarVar().label, '_ ** _') def test_pow_var_const(self): self.assertEqual(basic_math.PowVarConst(2.0).label, '_ ** 2.0') def test_pow_const_var(self): self.assertEqual(basic_math.PowConstVar(2.0).label, '2.0 ** _') def test_matmul_var_var(self): self.assertEqual(basic_math.MatMulVarVar().label, '_ @ _') def test_matmul_var_const(self): self.assertEqual( basic_math.MatMulVarConst(numpy.zeros((2, 2))).label, '_ @ constant array') def test_matmul_const_var(self): self.assertEqual( basic_math.MatMulConstVar(numpy.zeros((2, 2))).label, 'constant array @ _') testing.run_module(__name__, __file__)
{ "content_hash": "59cfce1a416c0ff313234fb7fbe51f5e", "timestamp": "", "source": "github", "line_count": 1402, "max_line_length": 79, "avg_line_length": 31.420114122681884, "alnum_prop": 0.5805316564890695, "repo_name": "aonotas/chainer", "id": "45dc32e88c5844f86d9bc755f5c8f038d0220f8c", "size": "44051", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/chainer_tests/functions_tests/math_tests/test_basic_math.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "3368" }, { "name": "PowerShell", "bytes": "7197" }, { "name": "Python", "bytes": "3357320" } ], "symlink_target": "" }
import os import sys from django.core.management import execute_from_command_line if __name__ == "__main__": os.environ.setdefault("DJANGO_SETTINGS_MODULE", "fetchkeys.settings") execute_from_command_line(sys.argv)
{ "content_hash": "113605e70406fb62b7efb2dff821f477", "timestamp": "", "source": "github", "line_count": 8, "max_line_length": 73, "avg_line_length": 28.125, "alnum_prop": 0.7288888888888889, "repo_name": "vicaroni/fetch", "id": "c91a869eee05e1147ef7019f69e47dc55e00bb4d", "size": "247", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "fetchkeys/manage.py", "mode": "33261", "license": "mit", "language": [ { "name": "CSS", "bytes": "570" }, { "name": "HTML", "bytes": "2689" }, { "name": "Python", "bytes": "18989" } ], "symlink_target": "" }
__author__ = "Rastislav Szabo <raszabo@cisco.com>, Lukas Macko <lmacko@cisco.com>" __copyright__ = "Copyright 2016, Cisco Systems, Inc." __license__ = "Apache 2.0" # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from random import randint import os, time import unittest import traceback import sys class Tester(object): """ Class instance simulates the steps executed by tester. A step can be added by add_step function. Steps are executed in synchronized order with other testers. Attributes: steps (list) - list of steps to be executed _current_step(int) - index of the step to be executed """ def __init__(self, name=None): self.name = name self.tc = unittest.TestCase('__init__') self.steps = [] def setup(self): """Method executed before steps""" pass def cleanup(self): """Method executed after steps""" pass def add_step(self, step, *args): """Adds the step to the end of the list""" self.steps.append((step, args)) def has_next(self): """Checks whether there is a step to be executed""" return self._current_step < (len(self.steps)) def report_pid(self, pid): """Insert pid into notification queue""" if self.sub_proc is not None: self.sub_proc.put(pid) def print_with_lock(self, *args): """Print to stdout with acquired lock""" if self.lock is not None: self.lock.acquire() print(args) self.lock.release() else: print(args) def execute_step(self, index): step, args = self.steps[index] if len(args) != 0: step(*args) else: step() def run_sync(self, done, next_step, rand_sleep, id, queue): """run steps in sync with other tester and inform test manager""" if self.name is None: self.name = id #empty steps check if len(self.steps) == 0: next_step.acquire() queue.put((id, self.name, False)) done.release() for step in range(len(self.steps)): err = None next_step.acquire() if rand_sleep: time.sleep(randint(1,1000)*0.00001) #self.print_with_lock('Step: ', step, ", pid: ", os.getpid()) try: self.execute_step(step) except Exception as e: _, _, tb = sys.exc_info() traceback.print_tb(tb) # Print traceback print("\n\n", file=sys.stderr) err = e finally: self._current_step += 1 #report to test manager if there is any step left if err is not None: queue.put((id, self.name, err)) elif self.has_next(): queue.put((id, self.name, True)) else: queue.put((id, self.name, False)) done.release() def run_without_sync(self): """run steps independently""" for step in range(len(self.steps)): self.execute_step(step) def run(self, done=None, next_step=None, rand_sleep=False, lock=None, sub_proc=None, pids=None, id=-1, queue=None): """Executes the tester steps Arguments: done (Semaphore) - to be acquired before step execution next_step (Semaphore) - to be released on step completion rand_sleep (bool) - flag whether sleep before each step lock (Lock) - stdout synchronization sub_proc (Queue) - queue to report pids of created process in tester to be terminated by testmanager pids (Array) - pids of other testers id (int) - identification used for queue messages index of the tester pid in pids queue (Queue) - message for notification whether there is a step to be executed """ self.setup() self.lock = lock self._current_step = 0 self.pids = pids self.sub_proc = sub_proc if done is not None and next_step is not None and queue is not None: self.run_sync(done, next_step, rand_sleep, id, queue) else: self.run_without_sync() self.cleanup() def waitStep(self): """Step that can be used by tester, to do nothing""" pass def runTest(self): """This needs to be here to make unit test happy""" pass
{ "content_hash": "495229bcb02f20c4ae6fc1a67c520c56", "timestamp": "", "source": "github", "line_count": 150, "max_line_length": 123, "avg_line_length": 34.31333333333333, "alnum_prop": 0.5655721779677482, "repo_name": "morganzhh/sysrepo", "id": "f414bf8990b568deb790b8a8289b0f14c84c68c8", "size": "5193", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "swig/python3/tests/ConcurrentHelpers/Tester.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "3566610" }, { "name": "C++", "bytes": "135368" }, { "name": "CMake", "bytes": "52482" }, { "name": "Dockerfile", "bytes": "3295" }, { "name": "Go", "bytes": "13128" }, { "name": "Java", "bytes": "20086" }, { "name": "Lua", "bytes": "118071" }, { "name": "Python", "bytes": "209790" }, { "name": "Ruby", "bytes": "1481" }, { "name": "Shell", "bytes": "7640" } ], "symlink_target": "" }
from pyIGTLink import * # noqa
{ "content_hash": "d9df952dc13292cedd0068d0fe4b516a", "timestamp": "", "source": "github", "line_count": 1, "max_line_length": 31, "avg_line_length": 32, "alnum_prop": 0.71875, "repo_name": "Danielhiversen/pyIGTLink", "id": "6da0aaf112b30f218c517e8a07567cff4cccfb6e", "size": "32", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "pyIGTLink/__init__.py", "mode": "33188", "license": "mit", "language": [ { "name": "MATLAB", "bytes": "15295" }, { "name": "Python", "bytes": "25519" } ], "symlink_target": "" }
import os import re from migrate.changeset import ansisql from migrate.changeset.databases import sqlite from migrate import exceptions as versioning_exceptions from migrate.versioning import api as versioning_api from migrate.versioning.repository import Repository import sqlalchemy from sqlalchemy.schema import UniqueConstraint from rally.openstack.common.db import exception from rally.openstack.common.gettextutils import _ def _get_unique_constraints(self, table): """Retrieve information about existing unique constraints of the table This feature is needed for _recreate_table() to work properly. Unfortunately, it's not available in sqlalchemy 0.7.x/0.8.x. """ data = table.metadata.bind.execute( """SELECT sql FROM sqlite_master WHERE type='table' AND name=:table_name""", table_name=table.name ).fetchone()[0] UNIQUE_PATTERN = "CONSTRAINT (\w+) UNIQUE \(([^\)]+)\)" return [ UniqueConstraint( *[getattr(table.columns, c.strip(' "')) for c in cols.split(",")], name=name ) for name, cols in re.findall(UNIQUE_PATTERN, data) ] def _recreate_table(self, table, column=None, delta=None, omit_uniques=None): """Recreate the table properly Unlike the corresponding original method of sqlalchemy-migrate this one doesn't drop existing unique constraints when creating a new one. """ table_name = self.preparer.format_table(table) # we remove all indexes so as not to have # problems during copy and re-create for index in table.indexes: index.drop() # reflect existing unique constraints for uc in self._get_unique_constraints(table): table.append_constraint(uc) # omit given unique constraints when creating a new table if required table.constraints = set([ cons for cons in table.constraints if omit_uniques is None or cons.name not in omit_uniques ]) self.append('ALTER TABLE %s RENAME TO migration_tmp' % table_name) self.execute() insertion_string = self._modify_table(table, column, delta) table.create(bind=self.connection) self.append(insertion_string % {'table_name': table_name}) self.execute() self.append('DROP TABLE migration_tmp') self.execute() def _visit_migrate_unique_constraint(self, *p, **k): """Drop the given unique constraint The corresponding original method of sqlalchemy-migrate just raises NotImplemented error """ self.recreate_table(p[0].table, omit_uniques=[p[0].name]) def patch_migrate(): """A workaround for SQLite's inability to alter things SQLite abilities to alter tables are very limited (please read http://www.sqlite.org/lang_altertable.html for more details). E. g. one can't drop a column or a constraint in SQLite. The workaround for this is to recreate the original table omitting the corresponding constraint (or column). sqlalchemy-migrate library has recreate_table() method that implements this workaround, but it does it wrong: - information about unique constraints of a table is not retrieved. So if you have a table with one unique constraint and a migration adding another one you will end up with a table that has only the latter unique constraint, and the former will be lost - dropping of unique constraints is not supported at all The proper way to fix this is to provide a pull-request to sqlalchemy-migrate, but the project seems to be dead. So we can go on with monkey-patching of the lib at least for now. """ # this patch is needed to ensure that recreate_table() doesn't drop # existing unique constraints of the table when creating a new one helper_cls = sqlite.SQLiteHelper helper_cls.recreate_table = _recreate_table helper_cls._get_unique_constraints = _get_unique_constraints # this patch is needed to be able to drop existing unique constraints constraint_cls = sqlite.SQLiteConstraintDropper constraint_cls.visit_migrate_unique_constraint = \ _visit_migrate_unique_constraint constraint_cls.__bases__ = (ansisql.ANSIColumnDropper, sqlite.SQLiteConstraintGenerator) def db_sync(engine, abs_path, version=None, init_version=0, sanity_check=True): """Upgrade or downgrade a database. Function runs the upgrade() or downgrade() functions in change scripts. :param engine: SQLAlchemy engine instance for a given database :param abs_path: Absolute path to migrate repository. :param version: Database will upgrade/downgrade until this version. If None - database will update to the latest available version. :param init_version: Initial database version :param sanity_check: Require schema sanity checking for all tables """ if version is not None: try: version = int(version) except ValueError: raise exception.DbMigrationError( message=_("version should be an integer")) current_version = db_version(engine, abs_path, init_version) repository = _find_migrate_repo(abs_path) if sanity_check: _db_schema_sanity_check(engine) if version is None or version > current_version: return versioning_api.upgrade(engine, repository, version) else: return versioning_api.downgrade(engine, repository, version) def _db_schema_sanity_check(engine): """Ensure all database tables were created with required parameters. :param engine: SQLAlchemy engine instance for a given database """ if engine.name == 'mysql': onlyutf8_sql = ('SELECT TABLE_NAME,TABLE_COLLATION ' 'from information_schema.TABLES ' 'where TABLE_SCHEMA=%s and ' 'TABLE_COLLATION NOT LIKE "%%utf8%%"') table_names = [res[0] for res in engine.execute(onlyutf8_sql, engine.url.database)] if len(table_names) > 0: raise ValueError(_('Tables "%s" have non utf8 collation, ' 'please make sure all tables are CHARSET=utf8' ) % ','.join(table_names)) def db_version(engine, abs_path, init_version): """Show the current version of the repository. :param engine: SQLAlchemy engine instance for a given database :param abs_path: Absolute path to migrate repository :param version: Initial database version """ repository = _find_migrate_repo(abs_path) try: return versioning_api.db_version(engine, repository) except versioning_exceptions.DatabaseNotControlledError: meta = sqlalchemy.MetaData() meta.reflect(bind=engine) tables = meta.tables if len(tables) == 0 or 'alembic_version' in tables: db_version_control(engine, abs_path, version=init_version) return versioning_api.db_version(engine, repository) else: raise exception.DbMigrationError( message=_( "The database is not under version control, but has " "tables. Please stamp the current version of the schema " "manually.")) def db_version_control(engine, abs_path, version=None): """Mark a database as under this repository's version control. Once a database is under version control, schema changes should only be done via change scripts in this repository. :param engine: SQLAlchemy engine instance for a given database :param abs_path: Absolute path to migrate repository :param version: Initial database version """ repository = _find_migrate_repo(abs_path) versioning_api.version_control(engine, repository, version) return version def _find_migrate_repo(abs_path): """Get the project's change script repository :param abs_path: Absolute path to migrate repository """ if not os.path.exists(abs_path): raise exception.DbMigrationError("Path %s not found" % abs_path) return Repository(abs_path)
{ "content_hash": "23835f68505de777243c1024c121e02b", "timestamp": "", "source": "github", "line_count": 230, "max_line_length": 79, "avg_line_length": 36.27391304347826, "alnum_prop": 0.6629509768668345, "repo_name": "ytsarev/rally", "id": "73ef73c45fe325f417c9ded47c7540479a88d19e", "size": "10212", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "rally/openstack/common/db/sqlalchemy/migration.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "984256" }, { "name": "Shell", "bytes": "14201" } ], "symlink_target": "" }
import itertools import logging import os import stat import pytest from funcy import first from dvc.dvcfile import PIPELINE_FILE from dvc.exceptions import DvcException from dvc.repo.experiments.utils import exp_refs_by_rev from dvc.scm import resolve_rev from dvc.utils.serialize import PythonFileCorruptedError from tests.func.test_repro_multistage import COPY_SCRIPT @pytest.mark.parametrize( "name,workspace", [(None, True), (None, False), ("foo", True), ("foo", False)], ) def test_new_simple(tmp_dir, scm, dvc, exp_stage, mocker, name, workspace): baseline = scm.get_rev() tmp_dir.gen("params.yaml", "foo: 2") new_mock = mocker.spy(dvc.experiments, "new") results = dvc.experiments.run( exp_stage.addressing, name=name, tmp_dir=not workspace ) exp = first(results) ref_info = first(exp_refs_by_rev(scm, exp)) assert ref_info and ref_info.baseline_sha == baseline new_mock.assert_called_once() fs = scm.get_fs(exp) with fs.open(tmp_dir / "metrics.yaml", mode="r", encoding="utf-8") as fobj: assert fobj.read().strip() == "foo: 2" if workspace: assert (tmp_dir / "metrics.yaml").read_text().strip() == "foo: 2" exp_name = name if name else ref_info.name assert dvc.experiments.get_exact_name(exp) == exp_name assert resolve_rev(scm, exp_name) == exp @pytest.mark.parametrize("workspace", [True, False]) def test_experiment_exists(tmp_dir, scm, dvc, exp_stage, mocker, workspace): from dvc.repo.experiments.base import ExperimentExistsError dvc.experiments.run( exp_stage.addressing, name="foo", params=["foo=2"], tmp_dir=not workspace, ) new_mock = mocker.spy(dvc.experiments, "_stash_exp") with pytest.raises(ExperimentExistsError): dvc.experiments.run( exp_stage.addressing, name="foo", params=["foo=3"], tmp_dir=not workspace, ) new_mock.assert_not_called() results = dvc.experiments.run( exp_stage.addressing, name="foo", params=["foo=3"], force=True, tmp_dir=not workspace, ) exp = first(results) fs = scm.get_fs(exp) with fs.open(tmp_dir / "metrics.yaml", mode="r", encoding="utf-8") as fobj: assert fobj.read().strip() == "foo: 3" @pytest.mark.skipif(os.name == "nt", reason="Not supported for Windows.") def test_file_permissions(tmp_dir, scm, dvc, exp_stage, mocker): mode = 0o755 os.chmod(tmp_dir / "copy.py", mode) scm.add(["copy.py"]) scm.commit("set exec") tmp_dir.gen("params.yaml", "foo: 2") dvc.experiments.run(exp_stage.addressing) assert stat.S_IMODE(os.stat(tmp_dir / "copy.py").st_mode) == mode def test_failed_exp(tmp_dir, scm, dvc, exp_stage, mocker, caplog): from dvc.exceptions import ReproductionError tmp_dir.gen("params.yaml", "foo: 2") mocker.patch( "concurrent.futures.Future.exception", return_value=ReproductionError(exp_stage.relpath), ) with caplog.at_level(logging.ERROR): dvc.experiments.run(exp_stage.addressing, tmp_dir=True) assert "Failed to reproduce experiment" in caplog.text @pytest.mark.parametrize( "changes, expected", [ [["foo=baz"], "{foo: baz, goo: {bag: 3}, lorem: false}"], [["foo=baz", "goo=bar"], "{foo: baz, goo: bar, lorem: false}"], [ ["goo.bag=4"], "{foo: [bar: 1, baz: 2], goo: {bag: 4}, lorem: false}", ], [["foo[0]=bar"], "{foo: [bar, baz: 2], goo: {bag: 3}, lorem: false}"], [ ["foo[1].baz=3"], "{foo: [bar: 1, baz: 3], goo: {bag: 3}, lorem: false}", ], [ ["foo[1]=[baz, goo]"], "{foo: [bar: 1, [baz, goo]], goo: {bag: 3}, lorem: false}", ], ], ) def test_modify_params(tmp_dir, scm, dvc, mocker, changes, expected): tmp_dir.gen("copy.py", COPY_SCRIPT) tmp_dir.gen( "params.yaml", "{foo: [bar: 1, baz: 2], goo: {bag: 3}, lorem: false}" ) stage = dvc.run( cmd="python copy.py params.yaml metrics.yaml", metrics_no_cache=["metrics.yaml"], params=["foo", "goo", "lorem"], name="copy-file", ) scm.add(["dvc.yaml", "dvc.lock", "copy.py", "params.yaml", "metrics.yaml"]) scm.commit("init") new_mock = mocker.spy(dvc.experiments, "new") results = dvc.experiments.run(stage.addressing, params=changes) exp = first(results) new_mock.assert_called_once() fs = scm.get_fs(exp) with fs.open(tmp_dir / "metrics.yaml", mode="r") as fobj: assert fobj.read().strip() == expected @pytest.mark.parametrize( "changes", [["lorem.ipsum=3"], ["foo[0].bazar=3"]], ) def test_add_params(tmp_dir, scm, dvc, changes): tmp_dir.gen("copy.py", COPY_SCRIPT) tmp_dir.gen( "params.yaml", "{foo: [bar: 1, baz: 2], goo: {bag: 3}, lorem: false}" ) stage = dvc.run( cmd="python copy.py params.yaml metrics.yaml", metrics_no_cache=["metrics.yaml"], params=["foo", "goo", "lorem"], name="copy-file", ) scm.add(["dvc.yaml", "dvc.lock", "copy.py", "params.yaml", "metrics.yaml"]) scm.commit("init") with pytest.raises(DvcException): dvc.experiments.run(stage.addressing, params=changes) @pytest.mark.parametrize("queue", [True, False]) def test_apply(tmp_dir, scm, dvc, exp_stage, queue): from dvc.exceptions import InvalidArgumentError from dvc.repo.experiments.base import ApplyConflictError metrics_original = (tmp_dir / "metrics.yaml").read_text().strip() results = dvc.experiments.run( exp_stage.addressing, params=["foo=2"], queue=queue, tmp_dir=True ) exp_a = first(results) results = dvc.experiments.run( exp_stage.addressing, params=["foo=3"], queue=queue, tmp_dir=True ) exp_b = first(results) with pytest.raises(InvalidArgumentError): dvc.experiments.apply("foo") dvc.experiments.apply(exp_a) assert (tmp_dir / "params.yaml").read_text().strip() == "foo: 2" assert ( (tmp_dir / "metrics.yaml").read_text().strip() == metrics_original if queue else "foo: 2" ) with pytest.raises(ApplyConflictError): dvc.experiments.apply(exp_b, force=False) # failed apply should revert everything to prior state assert (tmp_dir / "params.yaml").read_text().strip() == "foo: 2" assert ( (tmp_dir / "metrics.yaml").read_text().strip() == metrics_original if queue else "foo: 2" ) dvc.experiments.apply(exp_b) assert (tmp_dir / "params.yaml").read_text().strip() == "foo: 3" assert ( (tmp_dir / "metrics.yaml").read_text().strip() == metrics_original if queue else "foo: 3" ) def test_get_baseline(tmp_dir, scm, dvc, exp_stage): from dvc.repo.experiments.base import EXPS_STASH init_rev = scm.get_rev() assert dvc.experiments.get_baseline(init_rev) is None results = dvc.experiments.run(exp_stage.addressing, params=["foo=2"]) exp_rev = first(results) assert dvc.experiments.get_baseline(exp_rev) == init_rev dvc.experiments.run(exp_stage.addressing, params=["foo=3"], queue=True) assert dvc.experiments.get_baseline(f"{EXPS_STASH}@{{0}}") == init_rev scm.add(["dvc.yaml", "dvc.lock", "copy.py", "params.yaml", "metrics.yaml"]) scm.commit("promote exp") promote_rev = scm.get_rev() assert dvc.experiments.get_baseline(promote_rev) is None results = dvc.experiments.run(exp_stage.addressing, params=["foo=4"]) exp_rev = first(results) assert dvc.experiments.get_baseline(exp_rev) == promote_rev dvc.experiments.run(exp_stage.addressing, params=["foo=5"], queue=True) assert dvc.experiments.get_baseline(f"{EXPS_STASH}@{{0}}") == promote_rev print("stash 1") assert dvc.experiments.get_baseline(f"{EXPS_STASH}@{{1}}") == init_rev def test_update_py_params(tmp_dir, scm, dvc): tmp_dir.gen("copy.py", COPY_SCRIPT) tmp_dir.gen("params.py", "INT = 1\n") stage = dvc.run( cmd="python copy.py params.py metrics.py", metrics_no_cache=["metrics.py"], params=["params.py:INT"], name="copy-file", ) scm.add(["dvc.yaml", "dvc.lock", "copy.py", "params.py", "metrics.py"]) scm.commit("init") results = dvc.experiments.run( stage.addressing, params=["params.py:INT=2"], tmp_dir=True ) exp_a = first(results) fs = scm.get_fs(exp_a) with fs.open(tmp_dir / "params.py", mode="r", encoding="utf-8") as fobj: assert fobj.read().strip() == "INT = 2" with fs.open(tmp_dir / "metrics.py", mode="r", encoding="utf-8") as fobj: assert fobj.read().strip() == "INT = 2" tmp_dir.gen( "params.py", "INT = 1\nFLOAT = 0.001\nDICT = {'a': 1}\n\n" "class Train:\n seed = 2020\n\n" "class Klass:\n def __init__(self):\n self.a = 111\n", ) stage = dvc.run( cmd="python copy.py params.py metrics.py", metrics_no_cache=["metrics.py"], params=["params.py:INT,FLOAT,DICT,Train,Klass"], name="copy-file", ) scm.add(["dvc.yaml", "dvc.lock", "copy.py", "params.py", "metrics.py"]) scm.commit("init") results = dvc.experiments.run( stage.addressing, params=[ "params.py:FLOAT=0.1", "params.py:Train.seed=2121", "params.py:Klass.a=222", ], tmp_dir=True, ) exp_a = first(results) result = ( "INT = 1\nFLOAT = 0.1\nDICT = {'a': 1}\n\n" "class Train:\n seed = 2121\n\n" "class Klass:\n def __init__(self):\n self.a = 222" ) def _dos2unix(text): if os.name != "nt": return text # NOTE: git on windows will use CRLF, so we have to convert it to LF # in order to compare with the original return text.replace("\r\n", "\n") fs = scm.get_fs(exp_a) with fs.open(tmp_dir / "params.py", mode="r", encoding="utf-8") as fobj: assert _dos2unix(fobj.read().strip()) == result with fs.open(tmp_dir / "metrics.py", mode="r", encoding="utf-8") as fobj: assert _dos2unix(fobj.read().strip()) == result tmp_dir.gen("params.py", "INT = 1\n") stage = dvc.run( cmd="python copy.py params.py metrics.py", metrics_no_cache=["metrics.py"], params=["params.py:INT"], name="copy-file", ) scm.add(["dvc.yaml", "dvc.lock", "copy.py", "params.py", "metrics.py"]) scm.commit("init") with pytest.raises(PythonFileCorruptedError): dvc.experiments.run( stage.addressing, params=["params.py:INT=2a"], tmp_dir=True ) def test_detached_parent(tmp_dir, scm, dvc, exp_stage, mocker): detached_rev = scm.get_rev() tmp_dir.gen("params.yaml", "foo: 2") dvc.reproduce(exp_stage.addressing) scm.add(["dvc.yaml", "dvc.lock", "copy.py", "params.yaml", "metrics.yaml"]) scm.commit("v2") scm.checkout(detached_rev) assert scm.gitpython.repo.head.is_detached results = dvc.experiments.run(exp_stage.addressing, params=["foo=3"]) exp_rev = first(results) assert dvc.experiments.get_baseline(exp_rev) == detached_rev assert (tmp_dir / "params.yaml").read_text().strip() == "foo: 3" def test_branch(tmp_dir, scm, dvc, exp_stage): from dvc.exceptions import InvalidArgumentError with pytest.raises(InvalidArgumentError): dvc.experiments.branch("foo", "branch") scm.branch("branch-exists") results = dvc.experiments.run( exp_stage.addressing, params=["foo=2"], name="foo" ) exp_a = first(results) ref_a = dvc.experiments.get_branch_by_rev(exp_a) with pytest.raises(InvalidArgumentError): dvc.experiments.branch("foo", "branch-exists") dvc.experiments.branch("foo", "branch-name") dvc.experiments.branch(exp_a, "branch-rev") dvc.experiments.branch(ref_a, "branch-ref") for name in ["branch-name", "branch-rev", "branch-ref"]: assert name in scm.list_branches() assert scm.resolve_rev(name) == exp_a tmp_dir.scm_gen({"new_file": "new_file"}, commit="new baseline") results = dvc.experiments.run( exp_stage.addressing, params=["foo=2"], name="foo" ) exp_b = first(results) ref_b = dvc.experiments.get_branch_by_rev(exp_b) with pytest.raises(InvalidArgumentError): dvc.experiments.branch("foo", "branch-name") dvc.experiments.branch(ref_b, "branch-ref-b") assert "branch-ref-b" in scm.list_branches() assert scm.resolve_rev("branch-ref-b") == exp_b def test_no_scm(tmp_dir): from dvc.repo import Repo as DvcRepo from dvc.scm import NoSCMError dvc = DvcRepo.init(no_scm=True) for cmd in [ "apply", "branch", "diff", "show", "run", "gc", "push", "pull", "ls", ]: with pytest.raises(NoSCMError): getattr(dvc.experiments, cmd)() @pytest.mark.parametrize("workspace", [True, False]) def test_untracked(tmp_dir, scm, dvc, caplog, workspace): tmp_dir.gen("copy.py", COPY_SCRIPT) tmp_dir.scm_gen("params.yaml", "foo: 1", commit="track params") stage = dvc.run( cmd="python copy.py params.yaml metrics.yaml", metrics_no_cache=["metrics.yaml"], params=["foo"], deps=["copy.py"], name="copy-file", no_exec=True, ) # copy.py is untracked with caplog.at_level(logging.ERROR): results = dvc.experiments.run( stage.addressing, params=["foo=2"], tmp_dir=True ) assert "Failed to reproduce experiment" in caplog.text assert not results # dvc.yaml, copy.py are staged as new file but not committed scm.add(["dvc.yaml", "copy.py"]) results = dvc.experiments.run( stage.addressing, params=["foo=2"], tmp_dir=not workspace ) exp = first(results) fs = scm.get_fs(exp) assert fs.exists("dvc.yaml") assert fs.exists("dvc.lock") assert fs.exists("copy.py") with fs.open(tmp_dir / "metrics.yaml", mode="r", encoding="utf-8") as fobj: assert fobj.read().strip() == "foo: 2" def test_packed_args_exists(tmp_dir, scm, dvc, exp_stage, caplog): from dvc.repo.experiments.executor.base import BaseExecutor tmp_dir.scm_gen( tmp_dir / ".dvc" / "tmp" / BaseExecutor.PACKED_ARGS_FILE, "", commit="commit args file", ) with caplog.at_level(logging.WARNING): dvc.experiments.run(exp_stage.addressing) assert "Temporary DVC file" in caplog.text def test_list(tmp_dir, scm, dvc, exp_stage): baseline_a = scm.get_rev() results = dvc.experiments.run(exp_stage.addressing, params=["foo=2"]) exp_a = first(results) ref_info_a = first(exp_refs_by_rev(scm, exp_a)) results = dvc.experiments.run(exp_stage.addressing, params=["foo=3"]) exp_b = first(results) ref_info_b = first(exp_refs_by_rev(scm, exp_b)) tmp_dir.scm_gen("new", "new", commit="new") baseline_c = scm.get_rev() results = dvc.experiments.run(exp_stage.addressing, params=["foo=4"]) exp_c = first(results) ref_info_c = first(exp_refs_by_rev(scm, exp_c)) assert dvc.experiments.ls() == {baseline_c: [ref_info_c.name]} exp_list = dvc.experiments.ls(rev=ref_info_a.baseline_sha) assert {key: set(val) for key, val in exp_list.items()} == { baseline_a: {ref_info_a.name, ref_info_b.name} } exp_list = dvc.experiments.ls(all_=True) assert {key: set(val) for key, val in exp_list.items()} == { baseline_a: {ref_info_a.name, ref_info_b.name}, baseline_c: {ref_info_c.name}, } @pytest.mark.parametrize("workspace", [True, False]) def test_subdir(tmp_dir, scm, dvc, workspace): subdir = tmp_dir / "dir" subdir.gen("copy.py", COPY_SCRIPT) subdir.gen("params.yaml", "foo: 1") with subdir.chdir(): dvc.run( cmd="python copy.py params.yaml metrics.yaml", metrics_no_cache=["metrics.yaml"], params=["foo"], name="copy-file", no_exec=True, ) scm.add( [subdir / "dvc.yaml", subdir / "copy.py", subdir / "params.yaml"] ) scm.commit("init") results = dvc.experiments.run( PIPELINE_FILE, params=["foo=2"], tmp_dir=not workspace ) assert results exp = first(results) ref_info = first(exp_refs_by_rev(scm, exp)) fs = scm.get_fs(exp) for fname in ["metrics.yaml", "dvc.lock"]: assert fs.exists(subdir / fname) with fs.open(subdir / "metrics.yaml", mode="r", encoding="utf-8") as fobj: assert fobj.read().strip() == "foo: 2" assert dvc.experiments.get_exact_name(exp) == ref_info.name assert resolve_rev(scm, ref_info.name) == exp @pytest.mark.parametrize("workspace", [True, False]) def test_subrepo(tmp_dir, scm, workspace): from tests.unit.fs.test_repo import make_subrepo subrepo = tmp_dir / "dir" / "repo" make_subrepo(subrepo, scm) subrepo.gen("copy.py", COPY_SCRIPT) subrepo.gen("params.yaml", "foo: 1") with subrepo.chdir(): subrepo.dvc.run( cmd="python copy.py params.yaml metrics.yaml", metrics_no_cache=["metrics.yaml"], params=["foo"], name="copy-file", no_exec=True, ) scm.add( [ subrepo / "dvc.yaml", subrepo / "copy.py", subrepo / "params.yaml", ] ) scm.commit("init") results = subrepo.dvc.experiments.run( PIPELINE_FILE, params=["foo=2"], tmp_dir=not workspace ) assert results exp = first(results) ref_info = first(exp_refs_by_rev(scm, exp)) fs = scm.get_fs(exp) for fname in ["metrics.yaml", "dvc.lock"]: assert fs.exists(subrepo / fname) with fs.open(subrepo / "metrics.yaml", mode="r", encoding="utf-8") as fobj: assert fobj.read().strip() == "foo: 2" assert subrepo.dvc.experiments.get_exact_name(exp) == ref_info.name assert resolve_rev(scm, ref_info.name) == exp def test_queue(tmp_dir, scm, dvc, exp_stage, mocker): dvc.experiments.run(exp_stage.addressing, params=["foo=2"], queue=True) dvc.experiments.run(exp_stage.addressing, params=["foo=3"], queue=True) assert len(dvc.experiments.stash_revs) == 2 repro_mock = mocker.spy(dvc.experiments, "_reproduce_revs") results = dvc.experiments.run(run_all=True) assert len(results) == 2 repro_mock.assert_called_with(jobs=1) expected = {"foo: 2", "foo: 3"} metrics = set() for exp in results: fs = scm.get_fs(exp) with fs.open( tmp_dir / "metrics.yaml", mode="r", encoding="utf-8" ) as fobj: metrics.add(fobj.read().strip()) assert expected == metrics def test_run_metrics(tmp_dir, scm, dvc, exp_stage, mocker): from dvc.main import main mocker.patch.object( dvc.experiments, "run", return_value={"abc123": "abc123"} ) show_mock = mocker.patch.object(dvc.metrics, "show", return_value={}) main(["exp", "run", "-m"]) assert show_mock.called_once() def test_checkout_targets_deps(tmp_dir, scm, dvc, exp_stage): from dvc.utils.fs import remove tmp_dir.dvc_gen({"foo": "foo", "bar": "bar"}, commit="add files") stage = dvc.stage.add( cmd="python copy.py params.yaml metrics.yaml", metrics_no_cache=["metrics.yaml"], params=["foo"], name="copy-file", deps=["copy.py", "foo"], force=True, ) remove("foo") remove("bar") dvc.experiments.run(stage.addressing, params=["foo=2"]) assert (tmp_dir / "foo").exists() assert (tmp_dir / "foo").read_text() == "foo" assert not (tmp_dir / "bar").exists() @pytest.mark.parametrize("tail", ["", "~1", "^"]) def test_fix_exp_head(tmp_dir, scm, tail): from dvc.repo.experiments.base import EXEC_BASELINE from dvc.repo.experiments.utils import fix_exp_head head = "HEAD" + tail assert head == fix_exp_head(scm, head) scm.set_ref(EXEC_BASELINE, "refs/heads/master") assert EXEC_BASELINE + tail == fix_exp_head(scm, head) assert "foo" + tail == fix_exp_head(scm, "foo" + tail) @pytest.mark.parametrize( "workspace, params, target", itertools.product((True, False), ("foo: 1", "foo: 2"), (True, False)), ) def test_modified_data_dep(tmp_dir, scm, dvc, workspace, params, target): tmp_dir.dvc_gen("data", "data") tmp_dir.gen("copy.py", COPY_SCRIPT) tmp_dir.gen("params.yaml", "foo: 1") exp_stage = dvc.run( cmd="python copy.py params.yaml metrics.yaml", metrics_no_cache=["metrics.yaml"], params=["foo"], name="copy-file", deps=["copy.py", "data"], ) scm.add( [ "dvc.yaml", "dvc.lock", "copy.py", "params.yaml", "metrics.yaml", "data.dvc", ".gitignore", ] ) scm.commit("init") tmp_dir.gen("params.yaml", params) tmp_dir.gen("data", "modified") results = dvc.experiments.run( exp_stage.addressing if target else None, tmp_dir=not workspace ) exp = first(results) for rev in dvc.brancher(revs=[exp]): if rev != exp: continue with dvc.repo_fs.open((tmp_dir / "metrics.yaml").fs_path) as fobj: assert fobj.read().strip() == params with dvc.repo_fs.open((tmp_dir / "data").fs_path) as fobj: assert fobj.read().strip() == "modified" if workspace: assert (tmp_dir / "metrics.yaml").read_text().strip() == params assert (tmp_dir / "data").read_text().strip() == "modified" def test_exp_run_recursive(tmp_dir, scm, dvc, run_copy_metrics): tmp_dir.dvc_gen("metric_t.json", '{"foo": 1}') run_copy_metrics( "metric_t.json", "metric.json", metrics=["metric.json"], no_exec=True ) assert dvc.experiments.run(".", recursive=True) assert (tmp_dir / "metric.json").parse() == {"foo": 1} def test_experiment_name_invalid(tmp_dir, scm, dvc, exp_stage, mocker): from dvc.exceptions import InvalidArgumentError new_mock = mocker.spy(dvc.experiments, "_stash_exp") with pytest.raises(InvalidArgumentError): dvc.experiments.run( exp_stage.addressing, name="fo^o", params=["foo=3"], ) new_mock.assert_not_called()
{ "content_hash": "d3f42316f4fa75961ba99e5b373cc3a8", "timestamp": "", "source": "github", "line_count": 705, "max_line_length": 79, "avg_line_length": 32.13758865248227, "alnum_prop": 0.5942975680805049, "repo_name": "efiop/dvc", "id": "a1c051df1f00f29378ca06506834e6fa3e8e1fff", "size": "22657", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/func/experiments/test_experiments.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "53" }, { "name": "Inno Setup", "bytes": "10158" }, { "name": "PowerShell", "bytes": "2686" }, { "name": "Python", "bytes": "2231040" }, { "name": "Shell", "bytes": "695" } ], "symlink_target": "" }
# Copyright 2020 Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # -*- coding: utf-8 -*- """ An example of uploading firmware to the iLO Repository for flashing """ import os import sys import json import argparse from random import randint from redfish import RedfishClient from redfish.rest.v1 import ServerDownOrUnreachableError import logging from redfish import redfish_logger from ilorest_util import get_resource_directory LOGGERFILE = "RedfishApiExamples.log" LOGGERFORMAT = "%(asctime)s - %(name)s - %(levelname)s - %(message)s" LOGGER = redfish_logger(LOGGERFILE, LOGGERFORMAT, logging.DEBUG) def upload_firmware(_redfishobj, firmware_loc, comp_type, update_repo=True, update_target=False): resource_instances = get_resource_directory(_redfishobj) if DISABLE_RESOURCE_DIR or not resource_instances: #resource directory is not available so we will navigate through paths manually update_service_uri = _redfishobj.root.obj['UpdateService']['@odata.id'] else: #obtain all account instances from resource directory for instance in resource_instances: if '#UpdateService.' in instance['@odata.type']: update_service_uri = instance['@odata.id'] print (update_service_uri) update_service_response = _redfishobj.get(update_service_uri) path = update_service_response.obj.HttpPushUri print (path) body = [] json_data = {'UpdateRepository': update_repo, 'UpdateTarget': update_target, 'ETag': 'atag', 'Section': 0} session_key = _redfishobj.session_key #ImageLocation = "c:\\test" #filename = "I41_2.40_08_10_2020.fwpkg" #ImagePath = os.path.join(ImageLocation, filename) #file_name = os.path.basename(firmware_loc) #print (ImagePath) with open(firmware_loc, 'rb') as fle: output = fle.read() session_tuple = ('sessionKey', session_key) parameters_tuple = ('parameters', json.dumps(json_data)) file_tuple = ('file', (filename, output, 'application/octet-stream')) #Build the payload from each multipart-form data tuple body.append(session_tuple) body.append(parameters_tuple) body.append(file_tuple) #print (body) #Create our header dictionary header = {'Cookie': 'sessionKey=' + session_key} print ('Begin upload....please wait...') # We pass the whole list payload to post resp = _redfishobj.post(path, body, headers=header) if resp.status == 400: sys.stderr.write("Failed to upload firmware...") elif not resp.status in [200, 201]: sys.stderr.write("An http response of '%s' was returned.\n" % resp.status) else: print("Upload complete!\n") def create_task(_redfishobj, firmware_loc, tpm_flag=True): session_key = _redfishobj.session_key #Create our header dictionary header = {'Cookie': 'sessionKey=' + session_key} updatable_by = ['Uefi'] task_path = '/redfish/v1/UpdateService/UpdateTaskQueue/' file_name = os.path.basename(firmware_loc) task_resp = _redfishobj.get(task_path) if not task_resp.obj["Members@odata.count"]: print ("No current tasks, proceed to create new task") update_task = {'Name': 'Update-%s-%s' % (str(randint(0, 1000000)), \ file_name), 'Command': 'ApplyUpdate',\ 'Filename': file_name, 'UpdatableBy': updatable_by, 'TPMOverride': tpm_flag} resp = _redfishobj.post(task_path, update_task, headers=header) #print (resp) if resp.status == 400: sys.stderr.write("Failed to create update task queue...") elif not resp.status in [200, 201]: sys.stderr.write("An http response of '%s' was returned.\n" % resp.status) else: print("Task Queue Creation complete!\n") if __name__ == "__main__": # Initialize parser parser = argparse.ArgumentParser(description = "Script to upload and flash NVMe FW") parser.add_argument( '-c', '--component', dest='comp_path', action="store", required=True, help="The path to the firmware file to upload", default=None) parser.add_argument( '-s', '--session_key', dest='session_key', action="store", required=False, help="Http session key for the server", default=None) parser.add_argument( '-i', '--ilo', dest='ilo_ip', action="store", required=False, help="iLO IP of the server", default=None) parser.add_argument( '-u', '--user', dest='ilo_user', action="store", required=False, help="iLO username to login", default=None) parser.add_argument( '-p', '--password', dest='ilo_pass', action="store", required=False, help="iLO password to log in.", default=None) options = parser.parse_args() system_url = "https://" + options.ilo_ip print (system_url) # Upload the firmware file to the iLO Repository update_repo_flag = True # Update the system with the firmware file update_target_flag = False comp_type = 'C' tpm_flag = True # flag to force disable resource directory. Resource directory and associated operations are # intended for HPE servers. DISABLE_RESOURCE_DIR = True #system_url = "https://" + "15.146.46.49" #session_id = "515b196969f7d879886ee4a2da4ccba8" try: # Create a Redfish client object if options.session_key: redfish_obj = RedfishClient(base_url=system_url, session_key= options.session_key) else: redfish_obj = RedfishClient(base_url=system_url, username=options.ilo_user, password=options.ilo_pass) # Login with the Redfish client redfish_obj.login() except ServerDownOrUnreachableError as excp: sys.stderr.write("ERROR: server not reachable or does not support RedFish.\n") sys.exit() upload_firmware(redfish_obj, options.comp_path, comp_type, update_repo_flag, update_target_flag) if comp_type == 'C': create_task(redfish_obj, options.comp_path, tpm_flag) redfish_obj.logout()
{ "content_hash": "1e1114078bc120af06ce56e009167f3d", "timestamp": "", "source": "github", "line_count": 204, "max_line_length": 114, "avg_line_length": 33.181372549019606, "alnum_prop": 0.6497266952282464, "repo_name": "HewlettPackard/python-ilorest-library", "id": "7aa7e86c0a8457e14089805d7e714fc6ef81cec1", "size": "6769", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "examples/Redfish/flash_firmware_by_uefi.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "160" }, { "name": "Makefile", "bytes": "13995" }, { "name": "PowerShell", "bytes": "3135" }, { "name": "Python", "bytes": "352505" }, { "name": "Shell", "bytes": "119" } ], "symlink_target": "" }
from nova.compute import stats as nova_stats from nova.compute import task_states from nova.compute import vm_states class Stats(nova_stats.Stats): """Handler for updates to compute node workload stats.""" def __init__(self): super(Stats, self).__init__() @property def io_workload(self): """Calculate an I/O based load by counting I/O heavy operations.""" def _get(state, state_type): key = "num_%s_%s" % (state_type, state) return self.get(key, 0) io_workload_num = super(Stats, self).io_workload num_rebuild_spawnings = _get(task_states.REBUILD_SPAWNING, "task") num_image_uploadings = _get(task_states.IMAGE_UPLOADING, "task") return (io_workload_num + num_rebuild_spawnings + num_image_uploadings)
{ "content_hash": "3e167a91956898ce50309284fefa6f68", "timestamp": "", "source": "github", "line_count": 24, "max_line_length": 79, "avg_line_length": 33.583333333333336, "alnum_prop": 0.6439205955334988, "repo_name": "nash-x/hws", "id": "64bb87a1d8b4bb3a35dee992266729a170cf07ce", "size": "1446", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "nova/huawei/compute/stats.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Mako", "bytes": "1043" }, { "name": "PLpgSQL", "bytes": "12782" }, { "name": "Python", "bytes": "20443623" }, { "name": "Shell", "bytes": "4643" } ], "symlink_target": "" }
import subprocess from gitviewfs_objects import CommitTreeSymLink from tests.structs.default import paths from tests.structs.default.utils import BaseDefaultDirStructTest,\ BaseDefaultDirStructIntegrationTest class CommitTreeSymLinkPathTest(BaseDefaultDirStructTest): def test_path(self): self.assertPathIs(paths.COMMIT_TREE_SYMLINK, CommitTreeSymLink) class CommitTreeSymLinkIntegrationTest(BaseDefaultDirStructIntegrationTest): def test_readlink(self): self.create_and_commit_file() commit_sha1 = subprocess.check_output(['git', 'rev-parse', 'HEAD']).strip() tree_sha1 = subprocess.check_output(['git', 'rev-parse', 'HEAD^{tree}']).strip() commit_tree_symlink_path = self.make_commit_tree_symlink_path(commit_sha1) self.assertSymLink(self.make_tree_dir_path(tree_sha1), commit_tree_symlink_path)
{ "content_hash": "416626dcb02c5ba395364a4c6134a373", "timestamp": "", "source": "github", "line_count": 23, "max_line_length": 82, "avg_line_length": 35.91304347826087, "alnum_prop": 0.7905569007263923, "repo_name": "erdavila/gitviewfs", "id": "cdc59d358e279967f893e0f6c848628db0c96a1f", "size": "826", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/structs/default/test_commit_tree_symlink.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "59815" } ], "symlink_target": "" }
""" The innovative bucketlist app is an application that allows users  to record and share things they want to achieve or experience before reaching a certain age meeting the needs of keeping track of their dreams and goals """ class BucketList: def __init__(self, bl_username, bl_name=None): self.bl_name = bl_name self.conn = mysql.connect() self.cursor = conn.cursor() self.bl_category_user_id = None self.username = bl_username[1] self.get_user_id() def get_user_id(self): self.bl_category_user_id = 1 global conn, cursor try: conn = mysql.connect() cursor = conn.cursor() cursor.callproc('sp_user_id', [self.username]) # data = cursor.fetchall() # self.bl_category_user_id = data[0][0] return json.dumps({'user_id': self.bl_category_user_id}) except Exception as e: return json.dumps({'error': str(e)}) finally: cursor.close() conn.close() def add_category(self): try: global conn, cursor conn = mysql.connect() cursor = conn.cursor() # return json.dumps({'id': self.bl_category_user_id,'name':self.bl_name}) cursor.callproc('sp_Add_Bl_Category', (self.bl_name, self.bl_category_user_id)) data = cursor.fetchall() if len(data) is 0: conn.commit() return json.dumps({'message': 'Category Created successfully !'}) else: return json.dumps({'error': str(data[0])}) except Exception as e: return json.dumps({'error': str(e)}) finally: cursor.close() conn.close() def edit_category(self, id, name): try: global conn, cursor conn = mysql.connect() cursor = conn.cursor() # return json.dumps({'id': self.bl_category_user_id,'name':self.bl_name}) cursor.callproc('sp_update_category', (id, name, self.bl_category_user_id)) data = cursor.fetchall() if len(data) is 0: conn.commit() return json.dumps({'message': 'Category Updated successfully !'}) else: return json.dumps({'error': str(data[0])}) except Exception as e: return json.dumps({'error': str(e)}) def delete_category(self, category_delete_id): try: global conn, cursor conn = mysql.connect() cursor = conn.cursor() # return json.dumps({'id': self.bl_category_user_id,'name':self.bl_name}) cursor.callproc('sp_delete_Category', [category_delete_id]) data = cursor.fetchall() if len(data) is 0: conn.commit() return json.dumps({'message': 'Delete !'}) else: return json.dumps({'error': str(data[0])}) except Exception as e: return json.dumps({'error': str(e)}) finally: cursor.close() conn.close() def add_activity(self, bl_id, bl_activity_name): try: global conn, cursor conn = mysql.connect() cursor = conn.cursor() # return json.dumps({'id': self.bl_category_user_id,'name':self.bl_name}) cursor.callproc('sp_Add_Bl_Activity', (bl_activity_name, bl_id)) data = cursor.fetchall() if len(data) is 0: conn.commit() return json.dumps({'message': 'Activity Created successfully !'}) else: return json.dumps({'error': str(data[0])}) except Exception as e: return json.dumps({'error': str(e)}) finally: cursor.close() conn.close() def delete_activity(self, _activity_id): try: global conn, cursor conn = mysql.connect() cursor = conn.cursor() # return json.dumps({'id': self.bl_category_user_id,'name':self.bl_name}) cursor.callproc('sp_delete_Bl_activity', [_activity_id]) data = cursor.fetchall() if len(data) is 0: conn.commit() return json.dumps({'message': 'Delete !'}) else: return json.dumps({'error': str(data[0])}) except Exception as e: return json.dumps({'error': str(e)}) finally: cursor.close() conn.close() def edit_activity(self, id, name, date, category_id): try: global conn, cursor conn = mysql.connect() cursor = conn.cursor() # return json.dumps({'id': self.bl_category_user_id,'name':self.bl_name}) cursor.callproc('sp_update_activity', (id, name, date, category_id)) data = cursor.fetchall() if len(data) is 0: conn.commit() return json.dumps({'message': 'Activity Updated successfully !'}) else: return json.dumps({'error': str(data[0])}) except Exception as e: return json.dumps({'error': str(e)}) finally: cursor.close() conn.close() # session will be used to set a session for a user # json will be used to return data to the browser from flask import Flask, render_template, json, request, session # Library for using MySQL from flask.ext.mysql import MySQL # We need this for hashing our password before saving it in the database import hashlib import os mysql = MySQL() app = Flask(__name__) app.secret_key = os.urandom(24) # MySQL configurations app.config['MYSQL_DATABASE_USER'] = 'root' app.config['MYSQL_DATABASE_PASSWORD'] = '' app.config['MYSQL_DATABASE_DB'] = 'BucketList' app.config['MYSQL_DATABASE_HOST'] = 'localhost' mysql.init_app(app) @app.route('/showBucketListEdit') def showBucketListEdit(): return render_template('bucketListManagement.html') @app.route('/') def main(): return render_template('index.html') @app.route('/showSignUp') def showSignUp(): return render_template('signup.html') @app.route('/showBucketListView') @app.route('/showHome') def showHome(): return render_template('home.html') @app.route('/showSignIn') def showSignIn(): return render_template('signin.html') # Set up the check for Sign In @app.route('/getsession') def getsession(): try: if session['bl_user'] == None: return json.dumps({'loggedin': 0}) elif 'bl_user' in session: return json.dumps({'loggedin': 1, 'user': session['bl_user']}) else: return json.dumps({'loggedin': 0}) except Exception as e: return json.dumps({'error': str(e)}) @app.route('/allActivity', methods=['POST', 'GET']) def allActivity(): try: global conn, cursor conn = mysql.connect() cursor = conn.cursor() _id = request.form['id'] cursor.callproc('sp_all_bl_activity', [_id]) data = cursor.fetchall() if data[0][0] == 'Nothing Found !!': return json.dumps({'empty_data': 1, 'data': data}) else: return json.dumps({'empty_data': 0, 'data': data}) except Exception as e: return json.dumps({'empty_data': 1, 'error': str(e)}) finally: cursor.close() conn.close() @app.route('/allBlCategory') def allBlCategory(): global conn, cursor try: conn = mysql.connect() cursor = conn.cursor() cursor.callproc('sp_all_bl_category', [session['bl_user'][0]]) data = cursor.fetchall() if data[0][0] == 'Nothing Found !!': return json.dumps({'empty_data': 1, 'data': data}) else: return json.dumps({'empty_data': 0, 'data': data}) except Exception as e: return json.dumps({'empty_data': 1, 'error': str(e)}) finally: cursor.close() conn.close() @app.route('/addNewCategory', methods=['POST', 'GET']) def addNewCategory(): try: _blNewName = request.form['bl_new_name'] if _blNewName: myNewBucketList = BucketList(session['bl_user'], _blNewName) return myNewBucketList.add_category() except Exception as e: return json.dumps({'error': str(e)}) @app.route('/categoryUpdate', methods=['POST', 'GET']) def categoryUpdate(): try: _id = request.form['category-id'] _name = request.form['new-name'] if _id and _name: myUpdatedBucketList = BucketList(session['bl_user']) return myUpdatedBucketList.edit_category(_id, _name) else: return json.dumps({'error': "All inputs not entered"}) except Exception as e: return json.dumps({'error': str(e)}) @app.route('/activityUpdate', methods=['POST', 'GET']) def activityUpdate(): try: _date = request.form['activity-new-date'] _id = request.form['activity-id'] _name = request.form['new-name'] _category_id = request.form['category-id'] if _date and _id and _name: myUpdatedBucketList = BucketList(session['bl_user']) return myUpdatedBucketList.edit_activity(_id, _name, _date, _category_id) else: return json.dumps({'error': "All inputs not entered"}) except Exception as e: return json.dumps({'error': str(e)}) @app.route('/addNewActivity', methods=['POST', 'GET']) def addNewActivity(): _bl_id = request.form['bl_id'] _bl_activity_new_name = request.form['bl_activity_new_name'] if _bl_id and _bl_activity_new_name: myNewBucketList = BucketList(session['bl_user']) return myNewBucketList.add_activity(_bl_id, _bl_activity_new_name) # _date = request.form['date'] # Date feature still pending return "okay"; @app.route('/deleteActivity', methods=['POST', 'GET']) def deleteActivity(): _activity_id = request.form['activity_id'] if _activity_id: myDeleteActivity = BucketList(session['bl_user']) return myDeleteActivity.delete_activity(_activity_id) @app.route('/deleteCategory', methods=['POST', 'GET']) def deleteCategory(): _category_delete_id = request.form['category_delete_id'] if _category_delete_id: myDeleteCategory = BucketList(session['bl_user']) return myDeleteCategory.delete_category(_category_delete_id) @app.route('/logOut') def logOut(): session['bl_user'] = None return json.dumps({'logged_in': '0'}) @app.route('/signIn', methods=['POST', 'GET']) def signIn(): global conn, cursor try: _email = request.form['inputEmail'] _password = request.form['inputPassword'] # validate the received values if _email and _password: conn = mysql.connect() cursor = conn.cursor() # Hash the password before saving it to the database _hashed_password = hashlib.md5(_password.encode('utf-8')).hexdigest() cursor.callproc('sp_loginUser', (_email, _hashed_password)) data = cursor.fetchall() if data[0][0] == 'Invalid username or password !': return json.dumps({'loggedin': 0, 'user': data[0]}) else: session['bl_user'] = data[0] return json.dumps({'loggedin': 1, 'user': data[0]}) else: return json.dumps({'html': '<span>Enter the required fields</span>'}) except Exception as e: return json.dumps({'error': str(e)}) finally: try: cursor.close() conn.close() except Exception as e: return json.dumps({'error': str(e)}) @app.route('/signUp', methods=['POST', 'GET']) def signUp(): global conn, cursor try: conn = mysql.connect() cursor = conn.cursor() _name = request.form['inputName'] _email = request.form['inputEmail'] _password = request.form['inputPassword'] # validate the received values if _name and _email and _password: # All Good, let's call MySQL _hashed_password = hashlib.md5(_password.encode('utf-8')).hexdigest() cursor.callproc('sp_createUser', (_name, _email, _hashed_password)) data = cursor.fetchall() if len(data) is 0: # myNewBucketList = BucketList(session['bl_user'], _blNewName) # new_user_bucket_list = BucketList([1,_email]) # user_id = new_user_bucket_list.get_user_id(); # user_id = 1; # conn.commit() user_id = 1 session['bl_user'] = [user_id, _email] new_user_bucket_list = BucketList(session['bl_user']) user_id = new_user_bucket_list.bl_category_user_id session['bl_user'] = [user_id, _email] return json.dumps({'message': 'User created successfully !', 'id': 1}) else: return json.dumps({'error': str(data[0])}) else: return json.dumps({'html': '<span>Enter the required fields</span>'}) except Exception as e: return json.dumps({'error': str(e)}) finally: try: cursor.close() conn.close() except Exception as e: "" if __name__ == "__main__": app.run(port=5006)
{ "content_hash": "15174c7377ac8678314ef7b81d62d38b", "timestamp": "", "source": "github", "line_count": 489, "max_line_length": 91, "avg_line_length": 27.897750511247445, "alnum_prop": 0.5555637003371939, "repo_name": "okotieno/BucketList", "id": "8f5b1b8704462c9954153cee374c23caf0b841b4", "size": "13643", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "app.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "CSS", "bytes": "767" }, { "name": "HTML", "bytes": "20237" }, { "name": "JavaScript", "bytes": "147878" }, { "name": "Python", "bytes": "13826" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('editor', '0019_auto_20160601_1528'), ] operations = [ migrations.AddField( model_name='newexamquestion', name='group', field=models.PositiveIntegerField(default=0), ), ]
{ "content_hash": "3b0e63c7e26e7b1f92fb6493a83c2f89", "timestamp": "", "source": "github", "line_count": 18, "max_line_length": 57, "avg_line_length": 21.666666666666668, "alnum_prop": 0.6051282051282051, "repo_name": "numbas/editor", "id": "aae8a409f61a40b8c4dfaf8f51d60bccedeb704c", "size": "414", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "editor/migrations/0020_newexamquestion_group.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "44056" }, { "name": "HTML", "bytes": "548468" }, { "name": "JavaScript", "bytes": "2344000" }, { "name": "Less", "bytes": "205670" }, { "name": "Makefile", "bytes": "10028" }, { "name": "Python", "bytes": "551931" } ], "symlink_target": "" }
"""Copyright 2011 Urban Airship""" import errno import os import signal import socket import sys import time import traceback RETRY_ERRORS = frozenset(( None, # socket.timeout().errno is None errno.ENOENT, errno.EPIPE, errno.ECONNREFUSED, errno.ECONNRESET, )) def connect(url): s = socket.socket(socket.AF_UNIX, socket.SOCK_SEQPACKET) s.settimeout(0.2) s.connect(url) return s def punish(me, num, url): s = connect(url) for i in range(num): logline = '%s %8s %s' % (str(me)*22, i, 'a'*64) reconnect = False while 1: if reconnect: # Throttle time.sleep(0.1) try: s = connect(url) reconnect = False except (socket.error, socket.timeout) as e: print '{%s:%s exc connecting (%s)}' % (me, i, e) s.close() if e.errno in RETRY_ERRORS: continue # Reconnect again else: raise # Unable to continue try: s.send(logline) break except (socket.error, socket.timeout) as e: print '[%s:%s exc sending (%s)]' % (me, i, e) s.close() if e.errno in RETRY_ERRORS: reconnect = True else: raise def spawn(con, num, url, logd_pid): for i in range(con): pid = os.fork() if pid: print 'CHILD %d Started (%d)' % (i, pid) continue else: try: punish(i, num, url) except: traceback.print_exc() raise return True if logd_pid: for i in range(20): os.kill(logd_pid, signal.SIGHUP) print 'SIGHUP Sent' time.sleep(0.1) def main(): if len(sys.argv) == 5: _, concurrency, messages, url, logd_pid = sys.argv elif len(sys.argv) == 4: _, concurrency, messages, url = sys.argv logd_pid = 0 else: print 'usage: %s <concurrency> <messages> <url> [logd pid]' % ( sys.argv[0],) sys.exit(1) concurrency = int(concurrency) messages = int(messages) logd_pid = int(logd_pid) s = time.time() child = spawn(concurrency, messages, url, logd_pid) if child: return print 'SIGHUP Loop done, waiting on punishers' while 1: try: os.wait() except OSError: print 'CHILD processes already done' break e = time.time() total = concurrency * messages print 'FIN - Sent %d messages in %d seconds (%f per second)' % ( total, e - s, total / (e - s)) if __name__ == '__main__': main()
{ "content_hash": "7e9a8d39a741970b4f29f3f468c4c26f", "timestamp": "", "source": "github", "line_count": 112, "max_line_length": 71, "avg_line_length": 25.723214285714285, "alnum_prop": 0.4842068726136758, "repo_name": "schmichael/gologd", "id": "1c7573c1e6bcc9c26aa227dc1d575a4a310d1af8", "size": "2881", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "punish_logd.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Go", "bytes": "3854" }, { "name": "Python", "bytes": "12210" } ], "symlink_target": "" }
from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ("mqr", "0002_baselinesurveyresult"), ] operations = [ migrations.AddField( model_name="baselinesurveyresult", name="airtime_sent", field=models.BooleanField(default=False), ), migrations.AddField( model_name="baselinesurveyresult", name="airtime_sent_at", field=models.DateTimeField(null=True), ), ]
{ "content_hash": "d2e43c1e360548043f8ebab4b66613cb", "timestamp": "", "source": "github", "line_count": 21, "max_line_length": 53, "avg_line_length": 25.238095238095237, "alnum_prop": 0.5830188679245283, "repo_name": "praekeltfoundation/ndoh-hub", "id": "b8f1df03a7a857b1493a309b71bdbc330c7681f7", "size": "580", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "mqr/migrations/0003_auto_20220322_0539.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Dockerfile", "bytes": "450" }, { "name": "HTML", "bytes": "2200" }, { "name": "Python", "bytes": "957306" }, { "name": "Shell", "bytes": "2796" } ], "symlink_target": "" }
import unittest from coveragelink import CoverageLink class CoverageLinkTestsCase(unittest.TestCase): def test_object(self): project = 'demo' url = 'invalid' link = CoverageLink(project, url, type) assertEquals(link.project, project) assertEquals(link.url, url) assertEquals(link.type, 'unknown') assertFalse(link.isValid())
{ "content_hash": "8f1aa8960d20b667c577a2eefdb7ed1d", "timestamp": "", "source": "github", "line_count": 15, "max_line_length": 47, "avg_line_length": 25.933333333333334, "alnum_prop": 0.6658097686375322, "repo_name": "ronaldbradford/os-demo", "id": "b0f50abd8b49f179e4b4abff5bfba1459ffbdabb", "size": "389", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "coverage/tests/unit/coveragelink.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "15208" } ], "symlink_target": "" }
from __future__ import print_function, division import os import sys import numpy as np from numpy import zeros, empty from scipy.sparse import csr_matrix import warnings class gpaw_hsx_c(): def __init__(self, sv, calc): """ Gather information on the Hamiltonian """ from gpaw.utilities import unpack assert calc.wfs.mode.lower()=='lcao' self.norbs = sv.norbs self.norbs_sc = sv.norbs_sc self.nspin = sv.nspin self.telec = 0.001 # Don't know how to get it?? calc.atoms.get_temperature() self.nelec = calc.setups.nvalence #print(dir(calc.setups)) #print("nelec = ", self.nelec, "telec = ", self.telec) # print(dir(calc.hamiltonian)) # print(calc.hamiltonian) # print(calc.hamiltonian.vt_sG.shape) # print(calc.hamiltonian.vHt_g.shape) # print(calc.hamiltonian.vt_sg.shape) # for k in calc.hamiltonian.dH_asp.keys(): # print( k, unpack(calc.hamiltonian.dH_asp[k])) #self.nnz = # self.is_gamma = (dat[i]>0); i=i+1; # self.nelec = dat[i]; i=i+1; # self.telec = dat[i]; i=i+1; # # self.h4 = np.reshape(dat[i:i+self.nnz*self.nspin], (self.nspin,self.nnz)); i=i+self.nnz*self.n # self.s4 = dat[i:i+self.nnz]; i = i + self.nnz; # self.x4 = np.reshape(dat[i:i+self.nnz*3], (self.nnz,3)); i = i + self.nnz*3; # self.row_ptr = np.array(dat[i:i+self.norbs+1]-1, dtype='int'); i = i + self.norbs+1; # self.col_ind = np.array(dat[i:i+self.nnz]-1, dtype='int'); i = i + self.nnz; # self.spin2h4_csr = [] # for s in range(self.nspin): # self.spin2h4_csr.append(csr_matrix((self.h4[s,:], self.col_ind, self.row_ptr), dtype=np.floa # self.s4_csr = csr_matrix((self.s4, self.col_ind, self.row_ptr), dtype=np.float32) # #self.orb_sc2orb_uc=None #if(i<len(dat)): # if(self.is_gamma): raise SystemError('i<len(dat) && gamma') # self.orb_sc2orb_uc = np.array(dat[i:i+self.norbs_sc]-1, dtype='int'); i = i + self.norbs_sc """ Comment about the overlap matrix in Gpaw: Marc: > From what I see the overlap matrix is not written in the GPAW output. Do > there is an option to write it or it is not implemented? Ask answer (asklarsen@gmail.com): It is not implemented. Just write it manually from the calculation script. If you use ScaLAPACK (or band/orbital parallelization) the array will be distributed, so each core will have only a chunk. One needs to call a function to collect it on master then. But probably you won't need that. The array will exist after you call calc.set_positions(atoms), in case you want to generate it without triggering a full calculation. """ self.S_qMM = calc.wfs.S_qMM if calc.wfs.S_qMM is None: # sv.overlap_coo return csr sparce matrix self.s4_csr = sv.overlap_coo() else: self.s4_csr = csr_matrix(calc.wfs.S_qMM[0, :, :]) if calc.wfs.S_qMM.shape[0] >1: warnings.warn(""" GPAW overlaps has more than one kpts """, UserWarning) def check_overlaps(self, pyscf_overlaps): # overlap not in gpaw output, this routine can be used only # after a direct call to gpaw calculator if self.S_qMM is None: return -1 s4 = self.s4_csr.toarray() if s4.shape != pyscf_overlaps.shape: warnings.warn(""" Gpaw and Pyscf overlaps have different shapes. Something should be wrong! """, UserWarning) print("Shape: overlaps gpaw: ", s4.shape) print("Shape: overlaps pyscf: ", pyscf_overlaps) return -1 return np.sum(abs(s4-pyscf_overlaps)) def deallocate(self): del self.s4_csr
{ "content_hash": "6ead043ad45f250e7d560b4017c1051a", "timestamp": "", "source": "github", "line_count": 103, "max_line_length": 101, "avg_line_length": 36, "alnum_prop": 0.6240560949298813, "repo_name": "gkc1000/pyscf", "id": "2d1faa9705dbe7ed7997950483bb702e6e9887f7", "size": "4320", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "pyscf/nao/m_gpaw_hsx.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "2749942" }, { "name": "C++", "bytes": "20522" }, { "name": "CMake", "bytes": "29300" }, { "name": "Common Lisp", "bytes": "40269" }, { "name": "Cuda", "bytes": "12405" }, { "name": "Fortran", "bytes": "1104054" }, { "name": "Jupyter Notebook", "bytes": "42844" }, { "name": "Makefile", "bytes": "6797" }, { "name": "Python", "bytes": "10739278" }, { "name": "Shell", "bytes": "5480" }, { "name": "VBA", "bytes": "577" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('api', '0008_merge_20161114_0228'), ] operations = [ migrations.AlterField( model_name='volunteer', name='email', field=models.CharField(max_length=50), ), migrations.AlterField( model_name='volunteer', name='jacket', field=models.CharField(max_length=30), ), migrations.AlterField( model_name='volunteer', name='years_of_service', field=models.CharField(max_length=3), ), ]
{ "content_hash": "17557cb44c94a9a8e08a9a8a6cc2623b", "timestamp": "", "source": "github", "line_count": 28, "max_line_length": 50, "avg_line_length": 24.928571428571427, "alnum_prop": 0.5544412607449857, "repo_name": "jumbocodespring2017/bostonathleticsassociation", "id": "2b415380261e34d510fb8b47a7448850a1452aa3", "size": "771", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "back-end/api/migrations/0009_auto_20161114_0228.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "583725" }, { "name": "HTML", "bytes": "24194" }, { "name": "JavaScript", "bytes": "3394213" }, { "name": "Python", "bytes": "67089" } ], "symlink_target": "" }
from __future__ import absolute_import import sys import traceback import cStringIO from unittest import suite from unittest import case from unittest import runner import mut from . import actions class MpiTestSuite(suite.TestSuite): _instance = None def __init__(self, *args, **kwargs): suite.TestSuite.__init__(self, *args, **kwargs) MpiTestSuite._instance = self self._result = None self._flattened_suites = _SuiteCollection() self._debug = False @classmethod def get_instance(cls): return cls._instance def run(self, result, debug=False): self._debug = debug self._result = result suites = self._flatten() self._confirm_process_loaded_same_tests() if mut.RANK == 0: self._run_as_master() else: self._run_as_worker() def _flatten(self): for ss in self: if isinstance(ss, MpiTestSuite): self._flattened_suites.update(ss._flatten()) if len(self._tests) > 0 and all(isinstance(ss, case.TestCase) for ss in self): self._flattened_suites.add(self) return self._flattened_suites def _confirm_process_loaded_same_tests(self): loaded_suite_names = mut.COMM_WORLD.gather(self._flattened_suites.keys(), root=0) if mut.RANK == 0: master_suite_names = loaded_suite_names[0] if self._debug: sys.__stderr__.write('[{:0>3}] Checking that all processors loaded same tests.\n'.format(mut.RANK)) for worker_suite_names in loaded_suite_names[1:]: if master_suite_names != worker_suite_names: if self._debug: sys.__stderr__.write('[{:0>3}] shit failed, \n{} vs.\n{}.\n'.format(mut.RANK, master_suite_names, worker_suite_names)) raise Exception('Worker and master did not load the same tests... this is an issue.') def _run_as_master(self): self._result.stream.writeln('Found {} suites, will distribute across {} processors.' .format(len(self._flattened_suites), mut.SIZE - 1)) for suite_name in self._flattened_suites.keys(): actions.RequestWorkAction.add_work(RunSuiteAction(suite_name)) waiting = [True] + [False for _ in range(1, mut.SIZE)] while actions.RequestWorkAction._backlog or not all(waiting): for rank in range(1, mut.SIZE): if not mut.COMM_WORLD.Iprobe(source=rank): continue mpi_action = mut.COMM_WORLD.recv(source=rank) if not isinstance(mpi_action, actions.Action): raise actions.MpiActionError(mpi_action) mpi_action.invoke() if self._debug: sys.__stderr__.write('[{:0<3}] backlog: {}, waiting: {}\n'.format(mut.RANK, len(actions.RequestWorkAction._backlog), all(waiting))) waiting[rank] = isinstance(mpi_action, actions.RequestWorkAction) for rank in range(1, mut.SIZE): mut.COMM_WORLD.send(actions.StopAction(), dest=rank) if self._debug: sys.__stderr__.write('[{:0<3}] telling {} to stop.\n'.format(mut.RANK, rank)) return self._result def _run_as_worker(self): not_done = True try: while not_done: if self._debug: sys.__stderr__.write('[{:0>3}] waiting...\n'.format(mut.RANK)) mut.COMM_WORLD.send(actions.RequestWorkAction()) action = mut.COMM_WORLD.recv(None, source=0) if self._debug: sys.__stderr__.write('[{:0>3}] sent {}\n'.format(mut.RANK, action)) if not isinstance(action, actions.Action): raise actions.MpiActionError(action) if self._debug: sys.__stderr__.write('[{:0>3}] {}.\n'.format(mut.RANK, action)) not_done = action.invoke() except: traceback.print_exc(file=sys.__stderr__) if self._debug: sys.__stderr__.write('[{:0>3}] worker node failed, quitting.\n'.format(mut.RANK)) mut.COMM_WORLD.Abort(-1) class _SuiteCollection(dict): def __setitem__(self, key, value): if key in self: raise KeyError('Item with key {} already exists'.format(key)) dict.__setitem__(self, key, value) def add(self, suite): tests = iter(suite) first_test = tests.next() fully_qualified_class = '{}.{}'.format(first_test.__module__, first_test.__class__.__name__) for test in tests: if test.__class__ != first_test.__class__: raise Exception('A flattened suite should only contain instances of the same type, but ' 'found {} and {}'.format(first_test, test)) self[fully_qualified_class] = suite def ordered(self): pass class RunSuiteAction(actions.Action): def __init__(self, suite_name): self._suite_name = suite_name def invoke(self): local_suite = MpiTestSuite.get_instance() result = local_suite._result for test in local_suite._flattened_suites[self._suite_name]: self.tryClassSetUpOrTearDown(test, result, 'setUpClass') if result.shouldStop: break test(result) self.tryClassSetUpOrTearDown(test, result, 'tearDownClass') return not result.shouldStop def tryClassSetUpOrTearDown(self, test, result, methodName): method = getattr(test, methodName, None) if method is not None: sys.stdout = cStringIO.StringIO() sys.stderr = cStringIO.StringIO() try: method() except: if not hasattr(sys.stdout, 'getvalue'): sys.stdout = cStringIO.StringIO() sys.stdout.write('[mut.{:0>3}] sys.stdout has been modified' .format(mut.RANK)) if not hasattr(sys.stderr, 'getvalue'): sys.stderr = cStringIO.StringIO() sys.stderr.write('[mut.{:0>3}] sys.stderr has been modified' .format(mut.RANK)) result.addError(test, sys.exc_info()) finally: sys.stdout = sys.__stdout__ sys.stderr = sys.__stderr__
{ "content_hash": "79a4c051fae3bb2da5ab0479fe7ff960", "timestamp": "", "source": "github", "line_count": 159, "max_line_length": 141, "avg_line_length": 36.61635220125786, "alnum_prop": 0.623497080041223, "repo_name": "SimplyKnownAsG/mpiunittest", "id": "7d04c224bc4688c212a2a64c75859c8b0f19b91a", "size": "5823", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "mut/suites.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "23120" } ], "symlink_target": "" }
from argparse import ArgumentParser from multiprocessing import Pool, Manager from tests import faker import datetime import json import time import uuid import sys import random INSTANCE_TEMPLATE = { 'uuid':'uuid_replace', 'display_name':'display_name_replace', 'host':'h1', 'power_state':1, 'vm_state':'active', 'vcpus':2, 'memory_mb':10, 'disk_gb':1, 'key_name':'key1', 'launched_at':'replace', 'user_id':'u1', 'project_id':'p1', 'image_ref':'i1', 'flavor_name':'flavor1', 'ip':'0.0.0.0' } COMPUTENODE_TEMPLATE = { 'id':-1, #replace 'local_gb': 259, 'vcpus_used': 0, 'deleted': 0, 'hypervisor_type': 'powervm', 'created_at': '2013-04-01T00:27:06.000000', 'local_gb_used': 0, 'updated_at': '2013-04-03T00:35:41.000000', 'hypervisor_hostname': 'hostname_replace', 'memory_mb_used': 512, 'memory_mb': 131072, 'current_workload': 0, 'vcpus': 16, 'cpu_info': 'ppc64,powervm,3940', 'running_vms': 0, 'free_disk_gb': 259, 'service_id': 7, 'hypervisor_version': 7, 'disk_available_least': 265856, 'deleted_at': None, 'free_ram_mb': 130560 } STATE_MAP = { 0: 'pending', 1: 'active', 3: 'paused', 4: 'shutdown', 6: 'crashed', 7: 'suspended', 9: 'building', } DATA_BATCH = 1000 MAX_PROCESSES = 10 HOST_TO_VM_RATIO = 20 class ScaleDataProvider(): def __init__(self, file_names): self.file_names = file_names def execute(self, query): def datetime_hook(data): if 'launched_at' in data: # expensive translation that causes a 4x multiple in data read time. Necessary to keep InstanceHandler happy. data['launched_at'] = datetime.datetime.strptime(data['launched_at'],'%Y-%m-%d %H:%M:%S.%f') if '.' in data['launched_at'] else datetime.datetime.strptime(data['launched_at'],'%Y-%m-%d %H:%M:%S') return data data = [] for file_name in self.file_names : with open(file_name, "r") as data_file : file_data = json.load(data_file, object_hook= datetime_hook) data.extend(file_data) return data def ensure_dir(dirname): import os if not os.path.exists(dirname): print 'Creating %s.' % dirname os.makedirs(dirname) def _generate_instance_batch(file_name, data_range, host_names): #start_time = time.time() #print 'Writing to %s range[%d, %d] in proc %d.' % (file_name, data_range[0], data_range[0] + len(data_range), os.getpid()) sys.stdout.write('.') host_names_index = 0 projects = faker.firstname(10) with open(file_name, 'w') as data_file: add_separator = False data_file.write('[') for index in data_range: INSTANCE_TEMPLATE['uuid'] = str(uuid.uuid4()) INSTANCE_TEMPLATE['display_name'] = 'i-%d.' % index + faker.domain() INSTANCE_TEMPLATE['launched_at'] = str(datetime.datetime.now()) INSTANCE_TEMPLATE['host'] = host_names[host_names_index] INSTANCE_TEMPLATE['flavor_name'] = random.sample(("m1.tiny", "m1.small", "m1.medium", "m1.large", "m1.xlarge"), 1)[0] INSTANCE_TEMPLATE['power_state'] = random.sample((1,1,1,3,7), 1)[0] INSTANCE_TEMPLATE['vm_state'] = STATE_MAP[INSTANCE_TEMPLATE['power_state']] INSTANCE_TEMPLATE['vcpus'] = random.randint(1, 4) INSTANCE_TEMPLATE['memory_mb'] = random.randint(1, 2048) INSTANCE_TEMPLATE['disk_gb'] = random.randint(1, 256) INSTANCE_TEMPLATE['project_id'] = 'p' + str(random.randint(0, 9)) INSTANCE_TEMPLATE['user_id'] = 'u' + str(random.randint(0, 9)) INSTANCE_TEMPLATE['image_ref'] = 'i' + str(random.randint(0, 9)) INSTANCE_TEMPLATE['ip'] = faker.ip() if add_separator: data_file.write(',\n') else : add_separator = True json.dump(INSTANCE_TEMPLATE, data_file) # even host distribution host_names_index = host_names_index + 1 host_names_index = 0 if host_names_index == len(host_names) else host_names_index data_file.write(']') #print 'Process %d done writing to %s in %f.' % (os.getpid(), file_name, time.time() - start_time) def _generate_instance_data(location, size, host_names): instance_location = '%s/%s' % (location, 'instance') ensure_dir(instance_location) sys.stdout.write('generating instance data') file_names = [] num_file = size / DATA_BATCH if (size % DATA_BATCH) : num_file = num_file + 1 HOST_BATCH = DATA_BATCH / HOST_TO_VM_RATIO pp = Pool(processes=MAX_PROCESSES) for file_index in range(num_file): file_name = '%s/data_%d.dat' % (instance_location, file_index) file_names.append(file_name) data_start_index = file_index * DATA_BATCH is_last_page = (size - data_start_index) <= DATA_BATCH data_end_index = size if is_last_page else data_start_index + DATA_BATCH # Even batch distribution pp.apply_async(_generate_instance_batch, args=(file_name, range(data_start_index, data_end_index), host_names[file_index*HOST_BATCH:file_index*HOST_BATCH + HOST_BATCH])) pp.close() pp.join() sys.stdout.write('\n') return file_names def _generate_computenode_batch(file_name, data_range, host_names): #start_time = time.time() #print 'Writing to %s range[%d, %d] in proc %d.' % (file_name, data_range[0], data_range[0] + len(data_range), os.getpid()) sys.stdout.write('.') with open(file_name, 'w') as data_file: add_separator = False data_file.write('[') for index in data_range: COMPUTENODE_TEMPLATE['id'] = index host_name = faker.domain() COMPUTENODE_TEMPLATE['hypervisor_hostname'] = host_name vcpus = random.randint(8, 24) COMPUTENODE_TEMPLATE['vcpus'] = vcpus COMPUTENODE_TEMPLATE['memory_mb'] = random.randint(32768, 98304) COMPUTENODE_TEMPLATE['local_gb'] = random.randint(1, 2048) # TODO: find a way to compute vcpus_used, vcpus_assignred, memory_active, # memory_assigned, storage_used and storage_free. All of them should be calculated # after all vm's for the particular host are generated. COMPUTENODE_TEMPLATE['vcpus_used'] = random.randint(30, 5 * vcpus + 20) COMPUTENODE_TEMPLATE['memory_mb_used'] = random.randint(1, 3 * COMPUTENODE_TEMPLATE['memory_mb']) COMPUTENODE_TEMPLATE['local_gb_used'] = random.randint(1, COMPUTENODE_TEMPLATE['local_gb']) COMPUTENODE_TEMPLATE['free_disk_gb'] = COMPUTENODE_TEMPLATE['local_gb'] - COMPUTENODE_TEMPLATE['local_gb_used'] host_names.append(host_name) if add_separator: data_file.write(',\n') else : add_separator = True json.dump(COMPUTENODE_TEMPLATE, data_file) data_file.write(']') #print 'Process %d done writing to %s in %f.' % (os.getpid(), file_name, time.time() - start_time) def _generate_computenode_data(location, size): computenode_location = '%s/%s' % (location, 'computenode') ensure_dir(computenode_location) sys.stdout.write('generating computenode data') file_names = [] num_file = size / DATA_BATCH if (size % DATA_BATCH) : num_file = num_file + 1 # manager holds the host_names list and allows other process to manipulate this via proxy. # initial testing suggests this to be thread/process safe. manager = Manager() host_names = manager.list() pp = Pool(processes=MAX_PROCESSES) for file_index in range(num_file): file_name = '%s/data_%d.dat' % (computenode_location, file_index) file_names.append(file_name) data_start_index = file_index * DATA_BATCH is_last_page = (size - data_start_index) <= DATA_BATCH data_end_index = size if is_last_page else data_start_index + DATA_BATCH pp.apply_async(_generate_computenode_batch, args=(file_name, range(data_start_index, data_end_index), host_names)) pp.close() pp.join() sys.stdout.write('\n') return (file_names, host_names) def generate_data(location, size): # generate computenode data computenode_size = size / HOST_TO_VM_RATIO + (1 if size % HOST_TO_VM_RATIO > 0 else 0) start_time = time.time() computenodes_files, host_names = _generate_computenode_data(location, computenode_size) print 'generated %s compute_nodes in %f.' % (len(host_names), time.time() - start_time) #generate instance data start_time = time.time() instances_files = _generate_instance_data(location, size, host_names) print 'generated %s instance_nodes in %f.' % (size, time.time() - start_time) return (computenodes_files, instances_files) def main(): parser = ArgumentParser() parser.add_argument("-l", "--location", help="location of the file.") parser.add_argument("-n", "--number", help="number of resources.") args = parser.parse_args() computenodes_files, instances_files = generate_data(args.location, int(args.number)) # if len(file_names) > 0: # #print file_names # print 'reading from %d file(s).' % len(file_names) # provider = ScaleDataProvider(file_names) # start_time = time.time() # data = provider.execute("") # print 'read %d(%d bytes) instance_nodes from all files in %f.' % (len(data), sys.getsizeof(data), time.time() - start_time) if __name__ == '__main__': main()
{ "content_hash": "dbc9b52ab07cdf1ffd63b241cb2b8ecd", "timestamp": "", "source": "github", "line_count": 235, "max_line_length": 211, "avg_line_length": 42.4936170212766, "alnum_prop": 0.5923292609653515, "repo_name": "StackStorm/search", "id": "31faf5e6b2b62d3e3bba98937fc463a0b8c50d47", "size": "9986", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/scale_gen.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "88711" }, { "name": "Shell", "bytes": "4731" } ], "symlink_target": "" }
import six from django.core.exceptions import ObjectDoesNotExist from django.views import generic from django.db.models import Q from django.http import HttpResponseRedirect, Http404 from django.contrib import messages from django.core.urlresolvers import reverse from django.utils.translation import ugettext_lazy as _ from django.template.loader import render_to_string from oscar.core.loading import get_classes, get_model from oscar.views import sort_queryset from oscar.views.generic import ObjectLookupView (ProductForm, ProductSearchForm, CategoryForm, StockRecordFormSet, StockAlertSearchForm, ProductCategoryFormSet, ProductImageFormSet, ProductRecommendationFormSet) \ = get_classes('dashboard.catalogue.forms', ('ProductForm', 'ProductSearchForm', 'CategoryForm', 'StockRecordFormSet', 'StockAlertSearchForm', 'ProductCategoryFormSet', 'ProductImageFormSet', 'ProductRecommendationFormSet')) Product = get_model('catalogue', 'Product') Category = get_model('catalogue', 'Category') ProductImage = get_model('catalogue', 'ProductImage') ProductCategory = get_model('catalogue', 'ProductCategory') ProductClass = get_model('catalogue', 'ProductClass') StockRecord = get_model('partner', 'StockRecord') StockAlert = get_model('partner', 'StockAlert') Partner = get_model('partner', 'Partner') def filter_products(queryset, user): """ Restrict the queryset to products the given user has access to. A staff user is allowed to access all Products. A non-staff user is only allowed access to a product if they are in at least one stock record's partner user list. """ if user.is_staff: return queryset return queryset.filter(stockrecords__partner__users__pk=user.pk).distinct() class ProductListView(generic.ListView): """ Dashboard view of the product list. Supports the permission-based dashboard. """ template_name = 'dashboard/catalogue/product_list.html' model = Product context_object_name = 'products' form_class = ProductSearchForm description_template = _(u'Products %(upc_filter)s %(title_filter)s') paginate_by = 20 recent_products = 5 def get_context_data(self, **kwargs): ctx = super(ProductListView, self).get_context_data(**kwargs) ctx['product_classes'] = ProductClass.objects.all() ctx['form'] = self.form if 'recently_edited' in self.request.GET: ctx['queryset_description'] \ = _("Last %(num_products)d edited products") \ % {'num_products': self.recent_products} else: ctx['queryset_description'] = self.description return ctx def filter_queryset(self, queryset): """ Apply any filters to restrict the products that appear on the list """ return filter_products(queryset, self.request.user) def get_queryset(self): """ Build the queryset for this list """ queryset = Product.objects.base_queryset() queryset = self.filter_queryset(queryset) queryset = self.apply_search(queryset) queryset = self.apply_ordering(queryset) return queryset def apply_ordering(self, queryset): if 'recently_edited' in self.request.GET: # Just show recently edited queryset = queryset.order_by('-date_updated') queryset = queryset[:self.recent_products] else: # Allow sorting when all queryset = sort_queryset(queryset, self.request, ['title'], '-date_created') return queryset def apply_search(self, queryset): """ Filter the queryset and set the description according to the search parameters given """ description_ctx = {'upc_filter': '', 'title_filter': ''} self.form = self.form_class(self.request.GET) if not self.form.is_valid(): self.description = self.description_template % description_ctx return queryset data = self.form.cleaned_data if data.get('upc'): queryset = queryset.filter(upc=data['upc']) description_ctx['upc_filter'] = _( " including an item with UPC '%s'") % data['upc'] if data.get('title'): queryset = queryset.filter( title__icontains=data['title']).distinct() description_ctx['title_filter'] = _( " including an item with title matching '%s'") % data['title'] self.description = self.description_template % description_ctx return queryset class ProductCreateRedirectView(generic.RedirectView): permanent = False def get_product_create_url(self, product_class): """ Allow site to provide custom URL """ return reverse('dashboard:catalogue-product-create', kwargs={'product_class_slug': product_class.slug}) def get_invalid_product_class_url(self): messages.error(self.request, _("Please choose a product class")) return reverse('dashboard:catalogue-product-list') def get_redirect_url(self, **kwargs): product_class_slug = self.request.GET.get('product_class') if product_class_slug is None: return self.get_invalid_product_class_url() try: product_class = ProductClass.objects.get(slug=product_class_slug) except ProductClass.DoesNotExist: return self.get_invalid_product_class_url() else: return self.get_product_create_url(product_class) class ProductCreateUpdateView(generic.UpdateView): """ Dashboard view that bundles both creating and updating single products. Supports the permission-based dashboard. """ template_name = 'dashboard/catalogue/product_update.html' model = Product context_object_name = 'product' form_class = ProductForm category_formset = ProductCategoryFormSet image_formset = ProductImageFormSet recommendations_formset = ProductRecommendationFormSet stockrecord_formset = StockRecordFormSet def __init__(self, *args, **kwargs): super(ProductCreateUpdateView, self).__init__(*args, **kwargs) self.formsets = {'category_formset': self.category_formset, 'image_formset': self.image_formset, 'recommended_formset': self.recommendations_formset, 'stockrecord_formset': self.stockrecord_formset} def get_queryset(self): """ Filter products that the user doesn't have permission to update """ return filter_products(Product.objects.all(), self.request.user) def get_object(self, queryset=None): """ This parts allows generic.UpdateView to handle creating products as well. The only distinction between an UpdateView and a CreateView is that self.object is None. We emulate this behavior. Additionally, self.product_class is set. """ self.creating = not 'pk' in self.kwargs if self.creating: try: product_class_slug = self.kwargs.get('product_class_slug', None) self.product_class = ProductClass.objects.get( slug=product_class_slug) except ObjectDoesNotExist: raise Http404 else: return None # success else: product = super(ProductCreateUpdateView, self).get_object(queryset) self.product_class = product.product_class return product def get_context_data(self, **kwargs): ctx = super(ProductCreateUpdateView, self).get_context_data(**kwargs) ctx['product_class'] = self.product_class for ctx_name, formset_class in six.iteritems(self.formsets): if ctx_name not in ctx: ctx[ctx_name] = formset_class(self.product_class, self.request.user, instance=self.object) if self.object is None: ctx['title'] = _('Create new %s product') % self.product_class.name else: ctx['title'] = ctx['product'].get_title() return ctx def get_form_kwargs(self): kwargs = super(ProductCreateUpdateView, self).get_form_kwargs() kwargs['product_class'] = self.product_class return kwargs def process_all_forms(self, form): """ Short-circuits the regular logic to have one place to have our logic to check all forms """ # Need to create the product here because the inline forms need it # can't use commit=False because ProductForm does not support it if self.creating and form.is_valid(): self.object = form.save() formsets = {} for ctx_name, formset_class in six.iteritems(self.formsets): formsets[ctx_name] = formset_class(self.product_class, self.request.user, self.request.POST, self.request.FILES, instance=self.object) is_valid = form.is_valid() and all([formset.is_valid() for formset in formsets.values()]) cross_form_validation_result = self.clean(form, formsets) if is_valid and cross_form_validation_result: return self.forms_valid(form, formsets) else: return self.forms_invalid(form, formsets) # form_valid and form_invalid are called depending on the validation result # of just the product form and redisplay the form respectively return a # redirect to the success URL. In both cases we need to check our formsets # as well, so both methods do the same. process_all_forms then calls # forms_valid or forms_invalid respectively, which do the redisplay or # redirect. form_valid = form_invalid = process_all_forms def clean(self, form, formsets): """ Perform any cross-form/formset validation. If there are errors, attach errors to a form or a form field so that they are displayed to the user and return False. If everything is valid, return True. This method will be called regardless of whether the individual forms are valid. """ return True def forms_valid(self, form, formsets): """ Save all changes and display a success url. """ if not self.creating: # a just created product was already saved in process_all_forms() self.object = form.save() # Save formsets for formset in formsets.values(): formset.save() return HttpResponseRedirect(self.get_success_url()) def forms_invalid(self, form, formsets): # delete the temporary product again if self.creating and self.object and self.object.pk is not None: self.object.delete() self.object = None messages.error(self.request, _("Your submitted data was not valid - please " "correct the errors below")) ctx = self.get_context_data(form=form, **formsets) return self.render_to_response(ctx) def get_url_with_querystring(self, url): url_parts = [url] if self.request.GET.urlencode(): url_parts += [self.request.GET.urlencode()] return "?".join(url_parts) def get_success_url(self): msg = render_to_string( 'dashboard/catalogue/messages/product_saved.html', { 'product': self.object, 'creating': self.creating, }) messages.success(self.request, msg) url = reverse('dashboard:catalogue-product-list') if self.request.POST.get('action') == 'continue': url = reverse('dashboard:catalogue-product', kwargs={"pk": self.object.id}) return self.get_url_with_querystring(url) class ProductDeleteView(generic.DeleteView): """ Dashboard view to delete a product. Supports the permission-based dashboard. """ template_name = 'dashboard/catalogue/product_delete.html' model = Product context_object_name = 'product' def get_queryset(self): """ Filter products that the user doesn't have permission to update """ return filter_products(Product.objects.all(), self.request.user) def get_success_url(self): msg = _("Deleted product '%s'") % self.object.title messages.success(self.request, msg) return reverse('dashboard:catalogue-product-list') class StockAlertListView(generic.ListView): template_name = 'dashboard/catalogue/stockalert_list.html' model = StockAlert context_object_name = 'alerts' paginate_by = 20 def get_context_data(self, **kwargs): ctx = super(StockAlertListView, self).get_context_data(**kwargs) ctx['form'] = self.form ctx['description'] = self.description return ctx def get_queryset(self): if 'status' in self.request.GET: self.form = StockAlertSearchForm(self.request.GET) if self.form.is_valid(): status = self.form.cleaned_data['status'] self.description = _('Alerts with status "%s"') % status return self.model.objects.filter(status=status) else: self.description = _('All alerts') self.form = StockAlertSearchForm() return self.model.objects.all() class CategoryListView(generic.TemplateView): template_name = 'dashboard/catalogue/category_list.html' def get_context_data(self, *args, **kwargs): ctx = super(CategoryListView, self).get_context_data(*args, **kwargs) ctx['child_categories'] = Category.get_root_nodes() return ctx class CategoryDetailListView(generic.DetailView): template_name = 'dashboard/catalogue/category_list.html' model = Category context_object_name = 'category' def get_context_data(self, *args, **kwargs): ctx = super(CategoryDetailListView, self).get_context_data(*args, **kwargs) ctx['child_categories'] = self.object.get_children() ctx['ancestors'] = self.object.get_ancestors() return ctx class CategoryListMixin(object): def get_success_url(self): parent = self.object.get_parent() if parent is None: return reverse("dashboard:catalogue-category-list") else: return reverse("dashboard:catalogue-category-detail-list", args=(parent.pk,)) class CategoryCreateView(CategoryListMixin, generic.CreateView): template_name = 'dashboard/catalogue/category_form.html' model = Category form_class = CategoryForm def get_context_data(self, **kwargs): ctx = super(CategoryCreateView, self).get_context_data(**kwargs) ctx['title'] = _("Add a new category") return ctx def get_success_url(self): messages.info(self.request, _("Category created successfully")) return super(CategoryCreateView, self).get_success_url() def get_initial(self): # set child category if set in the URL kwargs initial = super(CategoryCreateView, self).get_initial() if 'parent' in self.kwargs: initial['_ref_node_id'] = self.kwargs['parent'] return initial class CategoryUpdateView(CategoryListMixin, generic.UpdateView): template_name = 'dashboard/catalogue/category_form.html' model = Category form_class = CategoryForm def get_context_data(self, **kwargs): ctx = super(CategoryUpdateView, self).get_context_data(**kwargs) ctx['title'] = _("Update category '%s'") % self.object.name return ctx def get_success_url(self): messages.info(self.request, _("Category updated successfully")) return super(CategoryUpdateView, self).get_success_url() class CategoryDeleteView(CategoryListMixin, generic.DeleteView): template_name = 'dashboard/catalogue/category_delete.html' model = Category def get_context_data(self, *args, **kwargs): ctx = super(CategoryDeleteView, self).get_context_data(*args, **kwargs) ctx['parent'] = self.object.get_parent() return ctx def get_success_url(self): messages.info(self.request, _("Category deleted successfully")) return super(CategoryDeleteView, self).get_success_url() class ProductLookupView(ObjectLookupView): model = Product def get_query_set(self): return self.model.browsable.all() def lookup_filter(self, qs, term): return qs.filter(Q(title__icontains=term) | Q(parent__title__icontains=term))
{ "content_hash": "ff4582b395db29c50ea27b0601d8c602", "timestamp": "", "source": "github", "line_count": 469, "max_line_length": 79, "avg_line_length": 36.97867803837953, "alnum_prop": 0.6193853427895981, "repo_name": "MrReN/django-oscar", "id": "c83d6a5c6708b3fcddad01a7e784fd4ba96fc69d", "size": "17343", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "oscar/apps/dashboard/catalogue/views.py", "mode": "33188", "license": "bsd-3-clause", "language": [], "symlink_target": "" }
"""Tests for feature_column.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import copy import numpy as np from tensorflow.core.example import example_pb2 from tensorflow.core.example import feature_pb2 from tensorflow.core.protobuf import config_pb2 from tensorflow.core.protobuf import rewriter_config_pb2 from tensorflow.python import keras from tensorflow.python.client import session from tensorflow.python.eager import backprop from tensorflow.python.eager import context from tensorflow.python.feature_column import feature_column as fc_old from tensorflow.python.feature_column import feature_column_v2 as fc from tensorflow.python.feature_column import serialization from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import lookup_ops from tensorflow.python.ops import parsing_ops from tensorflow.python.ops import partitioned_variables from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables as variables_lib from tensorflow.python.platform import test from tensorflow.python.training import coordinator from tensorflow.python.training import queue_runner_impl from tensorflow.python.training import rmsprop from tensorflow_estimator.python.estimator.inputs import numpy_io def _initialized_session(config=None): sess = session.Session(config=config) sess.run(variables_lib.global_variables_initializer()) sess.run(lookup_ops.tables_initializer()) return sess def get_linear_model_bias(name='linear_model'): with variable_scope.variable_scope(name, reuse=True): return variable_scope.get_variable('bias_weights') def get_linear_model_column_var(column, name='linear_model'): return ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES, name + '/' + column.name)[0] class BaseFeatureColumnForTests(fc.FeatureColumn): """A base FeatureColumn useful to avoid boiler-plate in tests. Provides dummy implementations for abstract methods that raise ValueError in order to avoid re-defining all abstract methods for each test sub-class. """ @property def parents(self): raise ValueError('Should not use this method.') @classmethod def _from_config(cls, config, custom_objects=None, columns_by_name=None): raise ValueError('Should not use this method.') def _get_config(self): raise ValueError('Should not use this method.') class LazyColumnTest(test.TestCase): def test_transformations_called_once(self): class TransformCounter(BaseFeatureColumnForTests): def __init__(self): self.num_transform = 0 @property def _is_v2_column(self): return True @property def name(self): return 'TransformCounter' def transform_feature(self, transformation_cache, state_manager): self.num_transform += 1 # Count transform calls. return transformation_cache.get('a', state_manager) @property def parse_example_spec(self): pass transformation_cache = fc.FeatureTransformationCache( features={'a': [[2], [3.]]}) column = TransformCounter() self.assertEqual(0, column.num_transform) transformation_cache.get(column, None) self.assertEqual(1, column.num_transform) transformation_cache.get(column, None) self.assertEqual(1, column.num_transform) def test_returns_transform_output(self): class Transformer(BaseFeatureColumnForTests): @property def _is_v2_column(self): return True @property def name(self): return 'Transformer' def transform_feature(self, transformation_cache, state_manager): return 'Output' @property def parse_example_spec(self): pass transformation_cache = fc.FeatureTransformationCache( features={'a': [[2], [3.]]}) column = Transformer() self.assertEqual('Output', transformation_cache.get(column, None)) self.assertEqual('Output', transformation_cache.get(column, None)) def test_does_not_pollute_given_features_dict(self): class Transformer(BaseFeatureColumnForTests): @property def _is_v2_column(self): return True @property def name(self): return 'Transformer' def transform_feature(self, transformation_cache, state_manager): return 'Output' @property def parse_example_spec(self): pass features = {'a': [[2], [3.]]} transformation_cache = fc.FeatureTransformationCache(features=features) transformation_cache.get(Transformer(), None) self.assertEqual(['a'], list(features.keys())) def test_error_if_feature_is_not_found(self): transformation_cache = fc.FeatureTransformationCache( features={'a': [[2], [3.]]}) with self.assertRaisesRegexp(ValueError, 'bbb is not in features dictionary'): transformation_cache.get('bbb', None) with self.assertRaisesRegexp(ValueError, 'bbb is not in features dictionary'): transformation_cache.get(u'bbb', None) def test_not_supported_feature_column(self): class NotAProperColumn(BaseFeatureColumnForTests): @property def _is_v2_column(self): return True @property def name(self): return 'NotAProperColumn' def transform_feature(self, transformation_cache, state_manager): # It should return not None. pass @property def parse_example_spec(self): pass transformation_cache = fc.FeatureTransformationCache( features={'a': [[2], [3.]]}) with self.assertRaisesRegexp(ValueError, 'NotAProperColumn is not supported'): transformation_cache.get(NotAProperColumn(), None) def test_key_should_be_string_or_feature_colum(self): class NotAFeatureColumn(object): pass transformation_cache = fc.FeatureTransformationCache( features={'a': [[2], [3.]]}) with self.assertRaisesRegexp( TypeError, '"key" must be either a "str" or "FeatureColumn".'): transformation_cache.get(NotAFeatureColumn(), None) @test_util.run_deprecated_v1 def test_expand_dim_rank_1_sparse_tensor_empty_batch(self): # empty 1-D sparse tensor: transformation_cache = fc.FeatureTransformationCache( features={ 'a': sparse_tensor.SparseTensor( indices=np.reshape(np.array([], dtype=np.int64), (0, 1)), dense_shape=[0], values=np.array([])) }) spv = self.evaluate(transformation_cache.get('a', None)) self.assertAllEqual(np.array([0, 1], dtype=np.int64), spv.dense_shape) self.assertAllEqual( np.reshape(np.array([], dtype=np.int64), (0, 2)), spv.indices) class NumericColumnTest(test.TestCase): @test_util.run_deprecated_v1 def test_defaults(self): a = fc.numeric_column('aaa') self.assertEqual('aaa', a.key) self.assertEqual('aaa', a.name) self.assertEqual((1,), a.shape) self.assertIsNone(a.default_value) self.assertEqual(dtypes.float32, a.dtype) self.assertIsNone(a.normalizer_fn) self.assertTrue(a._is_v2_column) def test_key_should_be_string(self): with self.assertRaisesRegexp(ValueError, 'key must be a string.'): fc.numeric_column(key=('aaa',)) def test_shape_saved_as_tuple(self): a = fc.numeric_column('aaa', shape=[1, 2], default_value=[[3, 2.]]) self.assertEqual((1, 2), a.shape) def test_default_value_saved_as_tuple(self): a = fc.numeric_column('aaa', default_value=4.) self.assertEqual((4.,), a.default_value) a = fc.numeric_column('aaa', shape=[1, 2], default_value=[[3, 2.]]) self.assertEqual(((3., 2.),), a.default_value) def test_shape_and_default_value_compatibility(self): fc.numeric_column('aaa', shape=[2], default_value=[1, 2.]) with self.assertRaisesRegexp(ValueError, 'The shape of default_value'): fc.numeric_column('aaa', shape=[2], default_value=[1, 2, 3.]) fc.numeric_column( 'aaa', shape=[3, 2], default_value=[[2, 3], [1, 2], [2, 3.]]) with self.assertRaisesRegexp(ValueError, 'The shape of default_value'): fc.numeric_column( 'aaa', shape=[3, 1], default_value=[[2, 3], [1, 2], [2, 3.]]) with self.assertRaisesRegexp(ValueError, 'The shape of default_value'): fc.numeric_column( 'aaa', shape=[3, 3], default_value=[[2, 3], [1, 2], [2, 3.]]) def test_default_value_type_check(self): fc.numeric_column( 'aaa', shape=[2], default_value=[1, 2.], dtype=dtypes.float32) fc.numeric_column( 'aaa', shape=[2], default_value=[1, 2], dtype=dtypes.int32) with self.assertRaisesRegexp(TypeError, 'must be compatible with dtype'): fc.numeric_column( 'aaa', shape=[2], default_value=[1, 2.], dtype=dtypes.int32) with self.assertRaisesRegexp(TypeError, 'default_value must be compatible with dtype'): fc.numeric_column('aaa', default_value=['string']) def test_shape_must_be_positive_integer(self): with self.assertRaisesRegexp(TypeError, 'shape dimensions must be integer'): fc.numeric_column( 'aaa', shape=[ 1.0, ]) with self.assertRaisesRegexp(ValueError, 'shape dimensions must be greater than 0'): fc.numeric_column( 'aaa', shape=[ 0, ]) def test_dtype_is_convertible_to_float(self): with self.assertRaisesRegexp(ValueError, 'dtype must be convertible to float'): fc.numeric_column('aaa', dtype=dtypes.string) def test_scalar_default_value_fills_the_shape(self): a = fc.numeric_column('aaa', shape=[2, 3], default_value=2.) self.assertEqual(((2., 2., 2.), (2., 2., 2.)), a.default_value) def test_parse_spec(self): a = fc.numeric_column('aaa', shape=[2, 3], dtype=dtypes.int32) self.assertEqual({ 'aaa': parsing_ops.FixedLenFeature((2, 3), dtype=dtypes.int32) }, a.parse_example_spec) @test_util.run_deprecated_v1 def test_parse_example_no_default_value(self): price = fc.numeric_column('price', shape=[2]) data = example_pb2.Example( features=feature_pb2.Features( feature={ 'price': feature_pb2.Feature( float_list=feature_pb2.FloatList(value=[20., 110.])) })) features = parsing_ops.parse_example( serialized=[data.SerializeToString()], features=fc.make_parse_example_spec_v2([price])) self.assertIn('price', features) self.assertAllEqual([[20., 110.]], self.evaluate(features['price'])) @test_util.run_deprecated_v1 def test_parse_example_with_default_value(self): price = fc.numeric_column('price', shape=[2], default_value=11.) data = example_pb2.Example( features=feature_pb2.Features( feature={ 'price': feature_pb2.Feature( float_list=feature_pb2.FloatList(value=[20., 110.])) })) no_data = example_pb2.Example( features=feature_pb2.Features( feature={ 'something_else': feature_pb2.Feature( float_list=feature_pb2.FloatList(value=[20., 110.])) })) features = parsing_ops.parse_example( serialized=[data.SerializeToString(), no_data.SerializeToString()], features=fc.make_parse_example_spec_v2([price])) self.assertIn('price', features) self.assertAllEqual([[20., 110.], [11., 11.]], self.evaluate(features['price'])) def test_normalizer_fn_must_be_callable(self): with self.assertRaisesRegexp(TypeError, 'must be a callable'): fc.numeric_column('price', normalizer_fn='NotACallable') @test_util.run_deprecated_v1 def test_normalizer_fn_transform_feature(self): def _increment_two(input_tensor): return input_tensor + 2. price = fc.numeric_column('price', shape=[2], normalizer_fn=_increment_two) output = fc._transform_features_v2({ 'price': [[1., 2.], [5., 6.]] }, [price], None) self.assertAllEqual([[3., 4.], [7., 8.]], self.evaluate(output[price])) @test_util.run_deprecated_v1 def test_get_dense_tensor(self): def _increment_two(input_tensor): return input_tensor + 2. price = fc.numeric_column('price', shape=[2], normalizer_fn=_increment_two) transformation_cache = fc.FeatureTransformationCache({ 'price': [[1., 2.], [5., 6.]] }) self.assertEqual( transformation_cache.get(price, None), price.get_dense_tensor(transformation_cache, None)) def test_sparse_tensor_not_supported(self): price = fc.numeric_column('price') transformation_cache = fc.FeatureTransformationCache({ 'price': sparse_tensor.SparseTensor( indices=[[0, 0]], values=[0.3], dense_shape=[1, 1]) }) with self.assertRaisesRegexp(ValueError, 'must be a Tensor'): price.transform_feature(transformation_cache, None) @test_util.run_deprecated_v1 def test_deep_copy(self): a = fc.numeric_column('aaa', shape=[1, 2], default_value=[[3., 2.]]) a_copy = copy.deepcopy(a) self.assertEqual(a_copy.name, 'aaa') self.assertEqual(a_copy.shape, (1, 2)) self.assertEqual(a_copy.default_value, ((3., 2.),)) def test_numpy_default_value(self): a = fc.numeric_column( 'aaa', shape=[1, 2], default_value=np.array([[3., 2.]])) self.assertEqual(a.default_value, ((3., 2.),)) @test_util.run_deprecated_v1 def test_linear_model(self): price = fc.numeric_column('price') with ops.Graph().as_default(): features = {'price': [[1.], [5.]]} model = fc.LinearModel([price]) predictions = model(features) price_var, bias = model.variables with _initialized_session() as sess: self.assertAllClose([0.], self.evaluate(bias)) self.assertAllClose([[0.]], self.evaluate(price_var)) self.assertAllClose([[0.], [0.]], self.evaluate(predictions)) sess.run(price_var.assign([[10.]])) self.assertAllClose([[10.], [50.]], self.evaluate(predictions)) def test_old_linear_model(self): price = fc.numeric_column('price') with ops.Graph().as_default(): features = {'price': [[1.], [5.]]} predictions = fc_old.linear_model(features, [price]) bias = get_linear_model_bias() price_var = get_linear_model_column_var(price) with _initialized_session() as sess: self.assertAllClose([0.], self.evaluate(bias)) self.assertAllClose([[0.]], self.evaluate(price_var)) self.assertAllClose([[0.], [0.]], self.evaluate(predictions)) sess.run(price_var.assign([[10.]])) self.assertAllClose([[10.], [50.]], self.evaluate(predictions)) @test_util.run_deprecated_v1 def test_serialization(self): def _increment_two(input_tensor): return input_tensor + 2. price = fc.numeric_column('price', normalizer_fn=_increment_two) self.assertEqual(['price'], price.parents) config = price._get_config() self.assertEqual({ 'key': 'price', 'shape': (1,), 'default_value': None, 'dtype': 'float32', 'normalizer_fn': '_increment_two' }, config) self.assertEqual( price, fc.NumericColumn._from_config( config, custom_objects={'_increment_two': _increment_two})) class BucketizedColumnTest(test.TestCase): def test_invalid_source_column_type(self): a = fc.categorical_column_with_hash_bucket('aaa', hash_bucket_size=10) with self.assertRaisesRegexp( ValueError, 'source_column must be a column generated with numeric_column'): fc.bucketized_column(a, boundaries=[0, 1]) def test_invalid_source_column_shape(self): a = fc.numeric_column('aaa', shape=[2, 3]) with self.assertRaisesRegexp( ValueError, 'source_column must be one-dimensional column'): fc.bucketized_column(a, boundaries=[0, 1]) def test_invalid_boundaries(self): a = fc.numeric_column('aaa') with self.assertRaisesRegexp(ValueError, 'boundaries must not be empty'): fc.bucketized_column(a, boundaries=None) with self.assertRaisesRegexp(ValueError, 'boundaries must be a sorted list'): fc.bucketized_column(a, boundaries=1.) with self.assertRaisesRegexp(ValueError, 'boundaries must be a sorted list'): fc.bucketized_column(a, boundaries=[1, 0]) with self.assertRaisesRegexp(ValueError, 'boundaries must be a sorted list'): fc.bucketized_column(a, boundaries=[1, 1]) def test_name(self): a = fc.numeric_column('aaa', dtype=dtypes.int32) b = fc.bucketized_column(a, boundaries=[0, 1]) self.assertTrue(b._is_v2_column) self.assertEqual('aaa_bucketized', b.name) def test_is_v2_column_old_numeric(self): a = fc_old._numeric_column('aaa', dtype=dtypes.int32) b = fc.bucketized_column(a, boundaries=[0, 1]) self.assertFalse(b._is_v2_column) self.assertEqual('aaa_bucketized', b.name) def test_parse_spec(self): a = fc.numeric_column('aaa', shape=[2], dtype=dtypes.int32) b = fc.bucketized_column(a, boundaries=[0, 1]) self.assertEqual({ 'aaa': parsing_ops.FixedLenFeature((2,), dtype=dtypes.int32) }, b.parse_example_spec) def test_variable_shape(self): a = fc.numeric_column('aaa', shape=[2], dtype=dtypes.int32) b = fc.bucketized_column(a, boundaries=[0, 1]) # Column 'aaa` has shape [2] times three buckets -> variable_shape=[2, 3]. self.assertAllEqual((2, 3), b.variable_shape) def test_num_buckets(self): a = fc.numeric_column('aaa', shape=[2], dtype=dtypes.int32) b = fc.bucketized_column(a, boundaries=[0, 1]) # Column 'aaa` has shape [2] times three buckets -> num_buckets=6. self.assertEqual(6, b.num_buckets) @test_util.run_deprecated_v1 def test_parse_example(self): price = fc.numeric_column('price', shape=[2]) bucketized_price = fc.bucketized_column(price, boundaries=[0, 50]) data = example_pb2.Example( features=feature_pb2.Features( feature={ 'price': feature_pb2.Feature( float_list=feature_pb2.FloatList(value=[20., 110.])) })) features = parsing_ops.parse_example( serialized=[data.SerializeToString()], features=fc.make_parse_example_spec_v2([bucketized_price])) self.assertIn('price', features) self.assertAllEqual([[20., 110.]], self.evaluate(features['price'])) @test_util.run_deprecated_v1 def test_transform_feature(self): price = fc.numeric_column('price', shape=[2]) bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6]) with ops.Graph().as_default(): transformed_tensor = fc._transform_features_v2({ 'price': [[-1., 1.], [5., 6.]] }, [bucketized_price], None) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllEqual([[0, 1], [3, 4]], self.evaluate(transformed_tensor[bucketized_price])) def test_get_dense_tensor_one_input_value(self): """Tests _get_dense_tensor() for input with shape=[1].""" price = fc.numeric_column('price', shape=[1]) bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6]) with ops.Graph().as_default(): transformation_cache = fc.FeatureTransformationCache({ 'price': [[-1.], [1.], [5.], [6.]] }) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) bucketized_price_tensor = bucketized_price.get_dense_tensor( transformation_cache, None) self.assertAllClose( # One-hot tensor. [[[1., 0., 0., 0., 0.]], [[0., 1., 0., 0., 0.]], [[0., 0., 0., 1., 0.]], [[0., 0., 0., 0., 1.]]], self.evaluate(bucketized_price_tensor)) def test_get_dense_tensor_two_input_values(self): """Tests _get_dense_tensor() for input with shape=[2].""" price = fc.numeric_column('price', shape=[2]) bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6]) with ops.Graph().as_default(): transformation_cache = fc.FeatureTransformationCache({ 'price': [[-1., 1.], [5., 6.]] }) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) bucketized_price_tensor = bucketized_price.get_dense_tensor( transformation_cache, None) self.assertAllClose( # One-hot tensor. [[[1., 0., 0., 0., 0.], [0., 1., 0., 0., 0.]], [[0., 0., 0., 1., 0.], [0., 0., 0., 0., 1.]]], self.evaluate(bucketized_price_tensor)) def test_get_sparse_tensors_one_input_value(self): """Tests _get_sparse_tensors() for input with shape=[1].""" price = fc.numeric_column('price', shape=[1]) bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6]) with ops.Graph().as_default(): transformation_cache = fc.FeatureTransformationCache({ 'price': [[-1.], [1.], [5.], [6.]] }) with _initialized_session() as sess: id_weight_pair = bucketized_price.get_sparse_tensors( transformation_cache, None) self.assertIsNone(id_weight_pair.weight_tensor) id_tensor_value = sess.run(id_weight_pair.id_tensor) self.assertAllEqual([[0, 0], [1, 0], [2, 0], [3, 0]], id_tensor_value.indices) self.assertAllEqual([0, 1, 3, 4], id_tensor_value.values) self.assertAllEqual([4, 1], id_tensor_value.dense_shape) def test_get_sparse_tensors_two_input_values(self): """Tests _get_sparse_tensors() for input with shape=[2].""" price = fc.numeric_column('price', shape=[2]) bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6]) with ops.Graph().as_default(): transformation_cache = fc.FeatureTransformationCache({ 'price': [[-1., 1.], [5., 6.]] }) with _initialized_session() as sess: id_weight_pair = bucketized_price.get_sparse_tensors( transformation_cache, None) self.assertIsNone(id_weight_pair.weight_tensor) id_tensor_value = sess.run(id_weight_pair.id_tensor) self.assertAllEqual([[0, 0], [0, 1], [1, 0], [1, 1]], id_tensor_value.indices) # Values 0-4 correspond to the first column of the input price. # Values 5-9 correspond to the second column of the input price. self.assertAllEqual([0, 6, 3, 9], id_tensor_value.values) self.assertAllEqual([2, 2], id_tensor_value.dense_shape) def test_sparse_tensor_input_not_supported(self): price = fc.numeric_column('price') bucketized_price = fc.bucketized_column(price, boundaries=[0, 1]) transformation_cache = fc.FeatureTransformationCache({ 'price': sparse_tensor.SparseTensor( indices=[[0, 0]], values=[0.3], dense_shape=[1, 1]) }) with self.assertRaisesRegexp(ValueError, 'must be a Tensor'): bucketized_price.transform_feature(transformation_cache, None) @test_util.run_deprecated_v1 def test_deep_copy(self): a = fc.numeric_column('aaa', shape=[2]) a_bucketized = fc.bucketized_column(a, boundaries=[0, 1]) a_bucketized_copy = copy.deepcopy(a_bucketized) self.assertEqual(a_bucketized_copy.name, 'aaa_bucketized') self.assertAllEqual(a_bucketized_copy.variable_shape, (2, 3)) self.assertEqual(a_bucketized_copy.boundaries, (0, 1)) def test_linear_model_one_input_value(self): """Tests linear_model() for input with shape=[1].""" price = fc.numeric_column('price', shape=[1]) bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6]) with ops.Graph().as_default(): features = {'price': [[-1.], [1.], [5.], [6.]]} model = fc.LinearModel([bucketized_price]) predictions = model(features) bucketized_price_var, bias = model.variables with _initialized_session() as sess: self.assertAllClose([0.], self.evaluate(bias)) # One weight variable per bucket, all initialized to zero. self.assertAllClose([[0.], [0.], [0.], [0.], [0.]], self.evaluate(bucketized_price_var)) self.assertAllClose([[0.], [0.], [0.], [0.]], self.evaluate(predictions)) sess.run( bucketized_price_var.assign([[10.], [20.], [30.], [40.], [50.]])) # price -1. is in the 0th bucket, whose weight is 10. # price 1. is in the 1st bucket, whose weight is 20. # price 5. is in the 3rd bucket, whose weight is 40. # price 6. is in the 4th bucket, whose weight is 50. self.assertAllClose([[10.], [20.], [40.], [50.]], self.evaluate(predictions)) sess.run(bias.assign([1.])) self.assertAllClose([[11.], [21.], [41.], [51.]], self.evaluate(predictions)) def test_linear_model_two_input_values(self): """Tests linear_model() for input with shape=[2].""" price = fc.numeric_column('price', shape=[2]) bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6]) with ops.Graph().as_default(): features = {'price': [[-1., 1.], [5., 6.]]} model = fc.LinearModel([bucketized_price]) predictions = model(features) bucketized_price_var, bias = model.variables with _initialized_session() as sess: self.assertAllClose([0.], self.evaluate(bias)) # One weight per bucket per input column, all initialized to zero. self.assertAllClose( [[0.], [0.], [0.], [0.], [0.], [0.], [0.], [0.], [0.], [0.]], self.evaluate(bucketized_price_var)) self.assertAllClose([[0.], [0.]], self.evaluate(predictions)) sess.run( bucketized_price_var.assign([[10.], [20.], [30.], [40.], [50.], [60.], [70.], [80.], [90.], [100.]])) # 1st example: # price -1. is in the 0th bucket, whose weight is 10. # price 1. is in the 6th bucket, whose weight is 70. # 2nd example: # price 5. is in the 3rd bucket, whose weight is 40. # price 6. is in the 9th bucket, whose weight is 100. self.assertAllClose([[80.], [140.]], self.evaluate(predictions)) sess.run(bias.assign([1.])) self.assertAllClose([[81.], [141.]], self.evaluate(predictions)) def test_old_linear_model_one_input_value(self): """Tests linear_model() for input with shape=[1].""" price = fc.numeric_column('price', shape=[1]) bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6]) with ops.Graph().as_default(): features = {'price': [[-1.], [1.], [5.], [6.]]} predictions = fc_old.linear_model(features, [bucketized_price]) bias = get_linear_model_bias() bucketized_price_var = get_linear_model_column_var(bucketized_price) with _initialized_session() as sess: self.assertAllClose([0.], self.evaluate(bias)) # One weight variable per bucket, all initialized to zero. self.assertAllClose([[0.], [0.], [0.], [0.], [0.]], self.evaluate(bucketized_price_var)) self.assertAllClose([[0.], [0.], [0.], [0.]], self.evaluate(predictions)) sess.run( bucketized_price_var.assign([[10.], [20.], [30.], [40.], [50.]])) # price -1. is in the 0th bucket, whose weight is 10. # price 1. is in the 1st bucket, whose weight is 20. # price 5. is in the 3rd bucket, whose weight is 40. # price 6. is in the 4th bucket, whose weight is 50. self.assertAllClose([[10.], [20.], [40.], [50.]], self.evaluate(predictions)) sess.run(bias.assign([1.])) self.assertAllClose([[11.], [21.], [41.], [51.]], self.evaluate(predictions)) def test_old_linear_model_two_input_values(self): """Tests linear_model() for input with shape=[2].""" price = fc.numeric_column('price', shape=[2]) bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6]) with ops.Graph().as_default(): features = {'price': [[-1., 1.], [5., 6.]]} predictions = fc_old.linear_model(features, [bucketized_price]) bias = get_linear_model_bias() bucketized_price_var = get_linear_model_column_var(bucketized_price) with _initialized_session() as sess: self.assertAllClose([0.], self.evaluate(bias)) # One weight per bucket per input column, all initialized to zero. self.assertAllClose( [[0.], [0.], [0.], [0.], [0.], [0.], [0.], [0.], [0.], [0.]], self.evaluate(bucketized_price_var)) self.assertAllClose([[0.], [0.]], self.evaluate(predictions)) sess.run( bucketized_price_var.assign([[10.], [20.], [30.], [40.], [50.], [60.], [70.], [80.], [90.], [100.]])) # 1st example: # price -1. is in the 0th bucket, whose weight is 10. # price 1. is in the 6th bucket, whose weight is 70. # 2nd example: # price 5. is in the 3rd bucket, whose weight is 40. # price 6. is in the 9th bucket, whose weight is 100. self.assertAllClose([[80.], [140.]], self.evaluate(predictions)) sess.run(bias.assign([1.])) self.assertAllClose([[81.], [141.]], self.evaluate(predictions)) def test_old_linear_model_one_input_value_old_numeric(self): """Tests linear_model() for input with shape=[1].""" price = fc_old._numeric_column('price', shape=[1]) bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6]) with ops.Graph().as_default(): features = {'price': [[-1.], [1.], [5.], [6.]]} predictions = fc_old.linear_model(features, [bucketized_price]) bias = get_linear_model_bias() bucketized_price_var = get_linear_model_column_var(bucketized_price) with _initialized_session() as sess: self.assertAllClose([0.], self.evaluate(bias)) # One weight variable per bucket, all initialized to zero. self.assertAllClose([[0.], [0.], [0.], [0.], [0.]], self.evaluate(bucketized_price_var)) self.assertAllClose([[0.], [0.], [0.], [0.]], self.evaluate(predictions)) sess.run( bucketized_price_var.assign([[10.], [20.], [30.], [40.], [50.]])) # price -1. is in the 0th bucket, whose weight is 10. # price 1. is in the 1st bucket, whose weight is 20. # price 5. is in the 3rd bucket, whose weight is 40. # price 6. is in the 4th bucket, whose weight is 50. self.assertAllClose([[10.], [20.], [40.], [50.]], self.evaluate(predictions)) sess.run(bias.assign([1.])) self.assertAllClose([[11.], [21.], [41.], [51.]], self.evaluate(predictions)) @test_util.run_deprecated_v1 def test_serialization(self): price = fc.numeric_column('price', shape=[2]) bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6]) self.assertEqual([price], bucketized_price.parents) config = bucketized_price._get_config() self.assertEqual({ 'source_column': { 'class_name': 'NumericColumn', 'config': { 'key': 'price', 'shape': (2,), 'default_value': None, 'dtype': 'float32', 'normalizer_fn': None } }, 'boundaries': (0, 2, 4, 6) }, config) new_bucketized_price = fc.BucketizedColumn._from_config(config) self.assertEqual(bucketized_price, new_bucketized_price) self.assertIsNot(price, new_bucketized_price.source_column) new_bucketized_price = fc.BucketizedColumn._from_config( config, columns_by_name={price.name: price}) self.assertEqual(bucketized_price, new_bucketized_price) self.assertIs(price, new_bucketized_price.source_column) class HashedCategoricalColumnTest(test.TestCase): @test_util.run_deprecated_v1 def test_defaults(self): a = fc.categorical_column_with_hash_bucket('aaa', 10) self.assertEqual('aaa', a.name) self.assertEqual('aaa', a.key) self.assertEqual(10, a.hash_bucket_size) self.assertEqual(dtypes.string, a.dtype) self.assertTrue(a._is_v2_column) def test_key_should_be_string(self): with self.assertRaisesRegexp(ValueError, 'key must be a string.'): fc.categorical_column_with_hash_bucket(('key',), 10) def test_bucket_size_should_be_given(self): with self.assertRaisesRegexp(ValueError, 'hash_bucket_size must be set.'): fc.categorical_column_with_hash_bucket('aaa', None) def test_bucket_size_should_be_positive(self): with self.assertRaisesRegexp(ValueError, 'hash_bucket_size must be at least 1'): fc.categorical_column_with_hash_bucket('aaa', 0) def test_dtype_should_be_string_or_integer(self): fc.categorical_column_with_hash_bucket('aaa', 10, dtype=dtypes.string) fc.categorical_column_with_hash_bucket('aaa', 10, dtype=dtypes.int32) with self.assertRaisesRegexp(ValueError, 'dtype must be string or integer'): fc.categorical_column_with_hash_bucket('aaa', 10, dtype=dtypes.float32) @test_util.run_deprecated_v1 def test_deep_copy(self): original = fc.categorical_column_with_hash_bucket('aaa', 10) for column in (original, copy.deepcopy(original)): self.assertEqual('aaa', column.name) self.assertEqual(10, column.hash_bucket_size) self.assertEqual(10, column.num_buckets) self.assertEqual(dtypes.string, column.dtype) def test_parse_spec_string(self): a = fc.categorical_column_with_hash_bucket('aaa', 10) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.string) }, a.parse_example_spec) def test_parse_spec_int(self): a = fc.categorical_column_with_hash_bucket('aaa', 10, dtype=dtypes.int32) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int32) }, a.parse_example_spec) @test_util.run_deprecated_v1 def test_parse_example(self): a = fc.categorical_column_with_hash_bucket('aaa', 10) data = example_pb2.Example( features=feature_pb2.Features( feature={ 'aaa': feature_pb2.Feature( bytes_list=feature_pb2.BytesList( value=[b'omar', b'stringer'])) })) features = parsing_ops.parse_example( serialized=[data.SerializeToString()], features=fc.make_parse_example_spec_v2([a])) self.assertIn('aaa', features) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=[[0, 0], [0, 1]], values=np.array([b'omar', b'stringer'], dtype=np.object_), dense_shape=[1, 2]), self.evaluate(features['aaa'])) @test_util.run_deprecated_v1 def test_strings_should_be_hashed(self): hashed_sparse = fc.categorical_column_with_hash_bucket('wire', 10) wire_tensor = sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) outputs = fc._transform_features_v2({ 'wire': wire_tensor }, [hashed_sparse], None) output = outputs[hashed_sparse] # Check exact hashed output. If hashing changes this test will break. expected_values = [6, 4, 1] self.assertEqual(dtypes.int64, output.values.dtype) self.assertAllEqual(expected_values, self.evaluate(output.values)) self.assertAllEqual( self.evaluate(wire_tensor.indices), self.evaluate(output.indices)) self.assertAllEqual( self.evaluate(wire_tensor.dense_shape), self.evaluate(output.dense_shape)) def test_tensor_dtype_should_be_string_or_integer(self): string_fc = fc.categorical_column_with_hash_bucket( 'a_string', 10, dtype=dtypes.string) int_fc = fc.categorical_column_with_hash_bucket( 'a_int', 10, dtype=dtypes.int32) float_fc = fc.categorical_column_with_hash_bucket( 'a_float', 10, dtype=dtypes.string) int_tensor = sparse_tensor.SparseTensor( values=[101], indices=[[0, 0]], dense_shape=[1, 1]) string_tensor = sparse_tensor.SparseTensor( values=['101'], indices=[[0, 0]], dense_shape=[1, 1]) float_tensor = sparse_tensor.SparseTensor( values=[101.], indices=[[0, 0]], dense_shape=[1, 1]) transformation_cache = fc.FeatureTransformationCache({ 'a_int': int_tensor, 'a_string': string_tensor, 'a_float': float_tensor }) transformation_cache.get(string_fc, None) transformation_cache.get(int_fc, None) with self.assertRaisesRegexp(ValueError, 'dtype must be string or integer'): transformation_cache.get(float_fc, None) def test_dtype_should_match_with_tensor(self): hashed_sparse = fc.categorical_column_with_hash_bucket( 'wire', 10, dtype=dtypes.int64) wire_tensor = sparse_tensor.SparseTensor( values=['omar'], indices=[[0, 0]], dense_shape=[1, 1]) transformation_cache = fc.FeatureTransformationCache({'wire': wire_tensor}) with self.assertRaisesRegexp(ValueError, 'dtype must be compatible'): transformation_cache.get(hashed_sparse, None) @test_util.run_deprecated_v1 def test_ints_should_be_hashed(self): hashed_sparse = fc.categorical_column_with_hash_bucket( 'wire', 10, dtype=dtypes.int64) wire_tensor = sparse_tensor.SparseTensor( values=[101, 201, 301], indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) transformation_cache = fc.FeatureTransformationCache({'wire': wire_tensor}) output = transformation_cache.get(hashed_sparse, None) # Check exact hashed output. If hashing changes this test will break. expected_values = [3, 7, 5] self.assertAllEqual(expected_values, self.evaluate(output.values)) @test_util.run_deprecated_v1 def test_int32_64_is_compatible(self): hashed_sparse = fc.categorical_column_with_hash_bucket( 'wire', 10, dtype=dtypes.int64) wire_tensor = sparse_tensor.SparseTensor( values=constant_op.constant([101, 201, 301], dtype=dtypes.int32), indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) transformation_cache = fc.FeatureTransformationCache({'wire': wire_tensor}) output = transformation_cache.get(hashed_sparse, None) # Check exact hashed output. If hashing changes this test will break. expected_values = [3, 7, 5] self.assertAllEqual(expected_values, self.evaluate(output.values)) @test_util.run_deprecated_v1 def test_get_sparse_tensors(self): hashed_sparse = fc.categorical_column_with_hash_bucket('wire', 10) transformation_cache = fc.FeatureTransformationCache({ 'wire': sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) }) id_weight_pair = hashed_sparse.get_sparse_tensors(transformation_cache, None) self.assertIsNone(id_weight_pair.weight_tensor) self.assertEqual( transformation_cache.get(hashed_sparse, None), id_weight_pair.id_tensor) @test_util.run_deprecated_v1 def test_get_sparse_tensors_dense_input(self): hashed_sparse = fc.categorical_column_with_hash_bucket('wire', 10) transformation_cache = fc.FeatureTransformationCache({ 'wire': (('omar', ''), ('stringer', 'marlo')) }) id_weight_pair = hashed_sparse.get_sparse_tensors(transformation_cache, None) self.assertIsNone(id_weight_pair.weight_tensor) self.assertEqual( transformation_cache.get(hashed_sparse, None), id_weight_pair.id_tensor) @test_util.run_deprecated_v1 def test_linear_model(self): wire_column = fc.categorical_column_with_hash_bucket('wire', 4) self.assertEqual(4, wire_column.num_buckets) with ops.Graph().as_default(): model = fc.LinearModel((wire_column,)) predictions = model({ wire_column.name: sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('marlo', 'skywalker', 'omar'), dense_shape=(2, 2)) }) wire_var, bias = model.variables self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose((0.,), self.evaluate(bias)) self.assertAllClose(((0.,), (0.,), (0.,), (0.,)), self.evaluate(wire_var)) self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions)) self.evaluate(wire_var.assign(((1.,), (2.,), (3.,), (4.,)))) # 'marlo' -> 3: wire_var[3] = 4 # 'skywalker' -> 2, 'omar' -> 2: wire_var[2] + wire_var[2] = 3+3 = 6 self.assertAllClose(((4.,), (6.,)), self.evaluate(predictions)) def test_old_linear_model(self): wire_column = fc.categorical_column_with_hash_bucket('wire', 4) self.assertEqual(4, wire_column.num_buckets) with ops.Graph().as_default(): predictions = fc_old.linear_model({ wire_column.name: sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('marlo', 'skywalker', 'omar'), dense_shape=(2, 2)) }, (wire_column,)) bias = get_linear_model_bias() wire_var = get_linear_model_column_var(wire_column) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose((0.,), self.evaluate(bias)) self.assertAllClose(((0.,), (0.,), (0.,), (0.,)), self.evaluate(wire_var)) self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions)) self.evaluate(wire_var.assign(((1.,), (2.,), (3.,), (4.,)))) # 'marlo' -> 3: wire_var[3] = 4 # 'skywalker' -> 2, 'omar' -> 2: wire_var[2] + wire_var[2] = 3+3 = 6 self.assertAllClose(((4.,), (6.,)), self.evaluate(predictions)) @test_util.run_deprecated_v1 def test_serialization(self): wire_column = fc.categorical_column_with_hash_bucket('wire', 4) self.assertEqual(['wire'], wire_column.parents) config = wire_column._get_config() self.assertEqual({ 'key': 'wire', 'hash_bucket_size': 4, 'dtype': 'string' }, config) self.assertEqual(wire_column, fc.HashedCategoricalColumn._from_config(config)) class CrossedColumnTest(test.TestCase): def test_keys_empty(self): with self.assertRaisesRegexp(ValueError, 'keys must be a list with length > 1'): fc.crossed_column([], 10) def test_keys_length_one(self): with self.assertRaisesRegexp(ValueError, 'keys must be a list with length > 1'): fc.crossed_column(['a'], 10) def test_key_type_unsupported(self): with self.assertRaisesRegexp(ValueError, 'Unsupported key type'): fc.crossed_column(['a', fc.numeric_column('c')], 10) with self.assertRaisesRegexp( ValueError, 'categorical_column_with_hash_bucket is not supported'): fc.crossed_column( ['a', fc.categorical_column_with_hash_bucket('c', 10)], 10) def test_hash_bucket_size_negative(self): with self.assertRaisesRegexp(ValueError, 'hash_bucket_size must be > 1'): fc.crossed_column(['a', 'c'], -1) def test_hash_bucket_size_zero(self): with self.assertRaisesRegexp(ValueError, 'hash_bucket_size must be > 1'): fc.crossed_column(['a', 'c'], 0) def test_hash_bucket_size_none(self): with self.assertRaisesRegexp(ValueError, 'hash_bucket_size must be > 1'): fc.crossed_column(['a', 'c'], None) def test_name(self): a = fc.numeric_column('a', dtype=dtypes.int32) b = fc.bucketized_column(a, boundaries=[0, 1]) crossed1 = fc.crossed_column(['d1', 'd2'], 10) self.assertTrue(crossed1._is_v2_column) crossed2 = fc.crossed_column([b, 'c', crossed1], 10) self.assertTrue(crossed2._is_v2_column) self.assertEqual('a_bucketized_X_c_X_d1_X_d2', crossed2.name) def test_is_v2_column(self): a = fc_old._numeric_column('a', dtype=dtypes.int32) b = fc.bucketized_column(a, boundaries=[0, 1]) crossed1 = fc.crossed_column(['d1', 'd2'], 10) self.assertTrue(crossed1._is_v2_column) crossed2 = fc.crossed_column([b, 'c', crossed1], 10) self.assertFalse(crossed2._is_v2_column) self.assertEqual('a_bucketized_X_c_X_d1_X_d2', crossed2.name) def test_name_ordered_alphabetically(self): """Tests that the name does not depend on the order of given columns.""" a = fc.numeric_column('a', dtype=dtypes.int32) b = fc.bucketized_column(a, boundaries=[0, 1]) crossed1 = fc.crossed_column(['d1', 'd2'], 10) crossed2 = fc.crossed_column([crossed1, 'c', b], 10) self.assertEqual('a_bucketized_X_c_X_d1_X_d2', crossed2.name) def test_name_leaf_keys_ordered_alphabetically(self): """Tests that the name does not depend on the order of given columns.""" a = fc.numeric_column('a', dtype=dtypes.int32) b = fc.bucketized_column(a, boundaries=[0, 1]) crossed1 = fc.crossed_column(['d2', 'c'], 10) crossed2 = fc.crossed_column([crossed1, 'd1', b], 10) self.assertEqual('a_bucketized_X_c_X_d1_X_d2', crossed2.name) def test_parse_spec(self): a = fc.numeric_column('a', shape=[2], dtype=dtypes.int32) b = fc.bucketized_column(a, boundaries=[0, 1]) crossed = fc.crossed_column([b, 'c'], 10) self.assertEqual({ 'a': parsing_ops.FixedLenFeature((2,), dtype=dtypes.int32), 'c': parsing_ops.VarLenFeature(dtypes.string), }, crossed.parse_example_spec) def test_num_buckets(self): a = fc.numeric_column('a', shape=[2], dtype=dtypes.int32) b = fc.bucketized_column(a, boundaries=[0, 1]) crossed = fc.crossed_column([b, 'c'], 15) self.assertEqual(15, crossed.num_buckets) @test_util.run_deprecated_v1 def test_deep_copy(self): a = fc.numeric_column('a', dtype=dtypes.int32) b = fc.bucketized_column(a, boundaries=[0, 1]) crossed1 = fc.crossed_column(['d1', 'd2'], 10) crossed2 = fc.crossed_column([b, 'c', crossed1], 15, hash_key=5) crossed2_copy = copy.deepcopy(crossed2) self.assertEqual( 'a_bucketized_X_c_X_d1_X_d2', crossed2_copy.name, ) self.assertEqual(15, crossed2_copy.hash_bucket_size) self.assertEqual(5, crossed2_copy.hash_key) @test_util.run_deprecated_v1 def test_parse_example(self): price = fc.numeric_column('price', shape=[2]) bucketized_price = fc.bucketized_column(price, boundaries=[0, 50]) price_cross_wire = fc.crossed_column([bucketized_price, 'wire'], 10) data = example_pb2.Example( features=feature_pb2.Features( feature={ 'price': feature_pb2.Feature( float_list=feature_pb2.FloatList(value=[20., 110.])), 'wire': feature_pb2.Feature( bytes_list=feature_pb2.BytesList( value=[b'omar', b'stringer'])), })) features = parsing_ops.parse_example( serialized=[data.SerializeToString()], features=fc.make_parse_example_spec_v2([price_cross_wire])) self.assertIn('price', features) self.assertIn('wire', features) self.assertAllEqual([[20., 110.]], self.evaluate(features['price'])) wire_sparse = features['wire'] self.assertAllEqual([[0, 0], [0, 1]], self.evaluate(wire_sparse.indices)) # Use byte constants to pass the open-source test. self.assertAllEqual([b'omar', b'stringer'], self.evaluate(wire_sparse.values)) self.assertAllEqual([1, 2], self.evaluate(wire_sparse.dense_shape)) @test_util.run_deprecated_v1 def test_transform_feature(self): price = fc.numeric_column('price', shape=[2]) bucketized_price = fc.bucketized_column(price, boundaries=[0, 50]) hash_bucket_size = 10 price_cross_wire = fc.crossed_column([bucketized_price, 'wire'], hash_bucket_size) features = { 'price': constant_op.constant([[1., 2.], [5., 6.]]), 'wire': sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]), } outputs = fc._transform_features_v2(features, [price_cross_wire], None) output = outputs[price_cross_wire] output_val = self.evaluate(output) self.assertAllEqual([[0, 0], [0, 1], [1, 0], [1, 1], [1, 2], [1, 3]], output_val.indices) for val in output_val.values: self.assertIn(val, list(range(hash_bucket_size))) self.assertAllEqual([2, 4], output_val.dense_shape) @test_util.run_deprecated_v1 def test_get_sparse_tensors(self): a = fc.numeric_column('a', dtype=dtypes.int32, shape=(2,)) b = fc.bucketized_column(a, boundaries=(0, 1)) crossed1 = fc.crossed_column(['d1', 'd2'], 10) crossed2 = fc.crossed_column([b, 'c', crossed1], 15, hash_key=5) with ops.Graph().as_default(): transformation_cache = fc.FeatureTransformationCache({ 'a': constant_op.constant(((-1., .5), (.5, 1.))), 'c': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=['cA', 'cB', 'cC'], dense_shape=(2, 2)), 'd1': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=['d1A', 'd1B', 'd1C'], dense_shape=(2, 2)), 'd2': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=['d2A', 'd2B', 'd2C'], dense_shape=(2, 2)), }) id_weight_pair = crossed2.get_sparse_tensors(transformation_cache, None) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) id_tensor_eval = self.evaluate(id_weight_pair.id_tensor) self.assertAllEqual( ((0, 0), (0, 1), (1, 0), (1, 1), (1, 2), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7), (1, 8), (1, 9), (1, 10), (1, 11), (1, 12), (1, 13), (1, 14), (1, 15)), id_tensor_eval.indices) # Check exact hashed output. If hashing changes this test will break. # All values are within [0, hash_bucket_size). expected_values = (6, 14, 0, 13, 8, 8, 10, 12, 2, 0, 1, 9, 8, 12, 2, 0, 10, 11) self.assertAllEqual(expected_values, id_tensor_eval.values) self.assertAllEqual((2, 16), id_tensor_eval.dense_shape) def test_get_sparse_tensors_simple(self): """Same as test_get_sparse_tensors, but with simpler values.""" a = fc.numeric_column('a', dtype=dtypes.int32, shape=(2,)) b = fc.bucketized_column(a, boundaries=(0, 1)) crossed = fc.crossed_column([b, 'c'], hash_bucket_size=5, hash_key=5) with ops.Graph().as_default(): transformation_cache = fc.FeatureTransformationCache({ 'a': constant_op.constant(((-1., .5), (.5, 1.))), 'c': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=['cA', 'cB', 'cC'], dense_shape=(2, 2)), }) id_weight_pair = crossed.get_sparse_tensors(transformation_cache, None) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) id_tensor_eval = self.evaluate(id_weight_pair.id_tensor) self.assertAllEqual(((0, 0), (0, 1), (1, 0), (1, 1), (1, 2), (1, 3)), id_tensor_eval.indices) # Check exact hashed output. If hashing changes this test will break. # All values are within [0, hash_bucket_size). expected_values = (1, 0, 1, 3, 4, 2) self.assertAllEqual(expected_values, id_tensor_eval.values) self.assertAllEqual((2, 4), id_tensor_eval.dense_shape) @test_util.run_deprecated_v1 def test_linear_model(self): """Tests linear_model. Uses data from test_get_sparse_tesnsors_simple. """ a = fc.numeric_column('a', dtype=dtypes.int32, shape=(2,)) b = fc.bucketized_column(a, boundaries=(0, 1)) crossed = fc.crossed_column([b, 'c'], hash_bucket_size=5, hash_key=5) with ops.Graph().as_default(): model = fc.LinearModel((crossed,)) predictions = model({ 'a': constant_op.constant(((-1., .5), (.5, 1.))), 'c': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=['cA', 'cB', 'cC'], dense_shape=(2, 2)), }) crossed_var, bias = model.variables with _initialized_session() as sess: self.assertAllClose((0.,), self.evaluate(bias)) self.assertAllClose(((0.,), (0.,), (0.,), (0.,), (0.,)), self.evaluate(crossed_var)) self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions)) sess.run(crossed_var.assign(((1.,), (2.,), (3.,), (4.,), (5.,)))) # Expected ids after cross = (1, 0, 1, 3, 4, 2) self.assertAllClose(((3.,), (14.,)), self.evaluate(predictions)) sess.run(bias.assign((.1,))) self.assertAllClose(((3.1,), (14.1,)), self.evaluate(predictions)) def test_linear_model_with_weights(self): class _TestColumnWithWeights(BaseFeatureColumnForTests, fc.CategoricalColumn): """Produces sparse IDs and sparse weights.""" @property def _is_v2_column(self): return True @property def name(self): return 'test_column' @property def parse_example_spec(self): return { self.name: parsing_ops.VarLenFeature(dtypes.int32), '{}_weights'.format(self.name): parsing_ops.VarLenFeature(dtypes.float32), } @property def num_buckets(self): return 5 def transform_feature(self, transformation_cache, state_manager): return (transformation_cache.get(self.name, state_manager), transformation_cache.get('{}_weights'.format(self.name), state_manager)) def get_sparse_tensors(self, transformation_cache, state_manager): """Populates both id_tensor and weight_tensor.""" ids_and_weights = transformation_cache.get(self, state_manager) return fc.CategoricalColumn.IdWeightPair( id_tensor=ids_and_weights[0], weight_tensor=ids_and_weights[1]) t = _TestColumnWithWeights() crossed = fc.crossed_column([t, 'c'], hash_bucket_size=5, hash_key=5) with ops.Graph().as_default(): with self.assertRaisesRegexp( ValueError, 'crossed_column does not support weight_tensor.*{}'.format(t.name)): model = fc.LinearModel((crossed,)) model({ t.name: sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=[0, 1, 2], dense_shape=(2, 2)), '{}_weights'.format(t.name): sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=[1., 10., 2.], dense_shape=(2, 2)), 'c': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=['cA', 'cB', 'cC'], dense_shape=(2, 2)), }) def test_old_linear_model(self): """Tests linear_model. Uses data from test_get_sparse_tesnsors_simple. """ a = fc.numeric_column('a', dtype=dtypes.int32, shape=(2,)) b = fc.bucketized_column(a, boundaries=(0, 1)) crossed = fc.crossed_column([b, 'c'], hash_bucket_size=5, hash_key=5) with ops.Graph().as_default(): predictions = fc_old.linear_model({ 'a': constant_op.constant(((-1., .5), (.5, 1.))), 'c': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=['cA', 'cB', 'cC'], dense_shape=(2, 2)), }, (crossed,)) bias = get_linear_model_bias() crossed_var = get_linear_model_column_var(crossed) with _initialized_session() as sess: self.assertAllClose((0.,), self.evaluate(bias)) self.assertAllClose(((0.,), (0.,), (0.,), (0.,), (0.,)), self.evaluate(crossed_var)) self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions)) sess.run(crossed_var.assign(((1.,), (2.,), (3.,), (4.,), (5.,)))) # Expected ids after cross = (1, 0, 1, 3, 4, 2) self.assertAllClose(((3.,), (14.,)), self.evaluate(predictions)) sess.run(bias.assign((.1,))) self.assertAllClose(((3.1,), (14.1,)), self.evaluate(predictions)) def test_old_linear_model_with_weights(self): class _TestColumnWithWeights(BaseFeatureColumnForTests, fc.CategoricalColumn, fc_old._CategoricalColumn): """Produces sparse IDs and sparse weights.""" @property def _is_v2_column(self): return True @property def name(self): return 'test_column' @property def parse_example_spec(self): return { self.name: parsing_ops.VarLenFeature(dtypes.int32), '{}_weights'.format(self.name): parsing_ops.VarLenFeature(dtypes.float32), } @property def _parse_example_spec(self): return self.parse_example_spec @property def num_buckets(self): return 5 @property def _num_buckets(self): return self.num_buckets def transform_feature(self, transformation_cache, state_manager): raise ValueError('Should not be called.') def _transform_feature(self, inputs): return (inputs.get(self.name), inputs.get('{}_weights'.format(self.name))) def get_sparse_tensors(self, transformation_cache, state_manager): raise ValueError('Should not be called.') def _get_sparse_tensors(self, inputs, weight_collections=None, trainable=None): """Populates both id_tensor and weight_tensor.""" ids_and_weights = inputs.get(self) return fc.CategoricalColumn.IdWeightPair( id_tensor=ids_and_weights[0], weight_tensor=ids_and_weights[1]) t = _TestColumnWithWeights() crossed = fc.crossed_column([t, 'c'], hash_bucket_size=5, hash_key=5) with ops.Graph().as_default(): with self.assertRaisesRegexp( ValueError, 'crossed_column does not support weight_tensor.*{}'.format(t.name)): fc_old.linear_model({ t.name: sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=[0, 1, 2], dense_shape=(2, 2)), '{}_weights'.format(t.name): sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=[1., 10., 2.], dense_shape=(2, 2)), 'c': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=['cA', 'cB', 'cC'], dense_shape=(2, 2)), }, (crossed,)) def test_old_linear_model_old_numeric(self): """Tests linear_model. Uses data from test_get_sparse_tesnsors_simple. """ a = fc_old._numeric_column('a', dtype=dtypes.int32, shape=(2,)) b = fc.bucketized_column(a, boundaries=(0, 1)) crossed = fc.crossed_column([b, 'c'], hash_bucket_size=5, hash_key=5) with ops.Graph().as_default(): predictions = fc_old.linear_model({ 'a': constant_op.constant(((-1., .5), (.5, 1.))), 'c': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=['cA', 'cB', 'cC'], dense_shape=(2, 2)), }, (crossed,)) bias = get_linear_model_bias() crossed_var = get_linear_model_column_var(crossed) with _initialized_session() as sess: self.assertAllClose((0.,), self.evaluate(bias)) self.assertAllClose(((0.,), (0.,), (0.,), (0.,), (0.,)), self.evaluate(crossed_var)) self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions)) sess.run(crossed_var.assign(((1.,), (2.,), (3.,), (4.,), (5.,)))) # Expected ids after cross = (1, 0, 1, 3, 4, 2) self.assertAllClose(((3.,), (14.,)), self.evaluate(predictions)) sess.run(bias.assign((.1,))) self.assertAllClose(((3.1,), (14.1,)), self.evaluate(predictions)) @test_util.run_deprecated_v1 def test_serialization(self): a = fc.numeric_column('a', dtype=dtypes.int32, shape=(2,)) b = fc.bucketized_column(a, boundaries=(0, 1)) crossed = fc.crossed_column([b, 'c'], hash_bucket_size=5, hash_key=5) self.assertEqual([b, 'c'], crossed.parents) config = crossed._get_config() self.assertEqual({ 'hash_bucket_size': 5, 'hash_key': 5, 'keys': ({ 'config': { 'boundaries': (0, 1), 'source_column': { 'config': { 'dtype': 'int32', 'default_value': None, 'key': 'a', 'normalizer_fn': None, 'shape': (2,) }, 'class_name': 'NumericColumn' } }, 'class_name': 'BucketizedColumn' }, 'c') }, config) new_crossed = fc.CrossedColumn._from_config(config) self.assertEqual(crossed, new_crossed) self.assertIsNot(b, new_crossed.keys[0]) new_crossed = fc.CrossedColumn._from_config( config, columns_by_name={b.name: b}) self.assertEqual(crossed, new_crossed) self.assertIs(b, new_crossed.keys[0]) class LinearModelTest(test.TestCase): def test_raises_if_empty_feature_columns(self): with self.assertRaisesRegexp(ValueError, 'feature_columns must not be empty'): fc.LinearModel(feature_columns=[]) def test_should_be_feature_column(self): with self.assertRaisesRegexp(ValueError, 'must be a FeatureColumn'): fc.LinearModel(feature_columns='NotSupported') def test_should_be_dense_or_categorical_column(self): class NotSupportedColumn(BaseFeatureColumnForTests): @property def _is_v2_column(self): return True @property def name(self): return 'NotSupportedColumn' def transform_feature(self, transformation_cache, state_manager): pass @property def parse_example_spec(self): pass with self.assertRaisesRegexp( ValueError, 'must be either a DenseColumn or CategoricalColumn'): fc.LinearModel(feature_columns=[NotSupportedColumn()]) def test_does_not_support_dict_columns(self): with self.assertRaisesRegexp( ValueError, 'Expected feature_columns to be iterable, found dict.'): fc.LinearModel(feature_columns={'a': fc.numeric_column('a')}) def test_raises_if_duplicate_name(self): with self.assertRaisesRegexp( ValueError, 'Duplicate feature column name found for columns'): fc.LinearModel( feature_columns=[fc.numeric_column('a'), fc.numeric_column('a')]) def test_not_dict_input_features(self): price = fc.numeric_column('price') with ops.Graph().as_default(): features = [[1.], [5.]] model = fc.LinearModel([price]) with self.assertRaisesRegexp(ValueError, 'We expected a dictionary here'): model(features) def test_dense_bias(self): price = fc.numeric_column('price') with ops.Graph().as_default(): features = {'price': [[1.], [5.]]} model = fc.LinearModel([price]) predictions = model(features) price_var, bias = model.variables with _initialized_session() as sess: self.assertAllClose([0.], self.evaluate(bias)) sess.run(price_var.assign([[10.]])) sess.run(bias.assign([5.])) self.assertAllClose([[15.], [55.]], self.evaluate(predictions)) def test_sparse_bias(self): wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4) with ops.Graph().as_default(): wire_tensor = sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3] indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) features = {'wire_cast': wire_tensor} model = fc.LinearModel([wire_cast]) predictions = model(features) wire_cast_var, bias = model.variables with _initialized_session() as sess: self.assertAllClose([0.], self.evaluate(bias)) self.assertAllClose([[0.], [0.], [0.], [0.]], self.evaluate(wire_cast_var)) sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]])) sess.run(bias.assign([5.])) self.assertAllClose([[1005.], [10015.]], self.evaluate(predictions)) def test_dense_and_sparse_bias(self): wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4) price = fc.numeric_column('price') with ops.Graph().as_default(): wire_tensor = sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3] indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) features = {'wire_cast': wire_tensor, 'price': [[1.], [5.]]} model = fc.LinearModel([wire_cast, price]) predictions = model(features) price_var, wire_cast_var, bias = model.variables with _initialized_session() as sess: sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]])) sess.run(bias.assign([5.])) sess.run(price_var.assign([[10.]])) self.assertAllClose([[1015.], [10065.]], self.evaluate(predictions)) def test_dense_and_sparse_column(self): """When the column is both dense and sparse, uses sparse tensors.""" class _DenseAndSparseColumn(BaseFeatureColumnForTests, fc.DenseColumn, fc.CategoricalColumn): @property def _is_v2_column(self): return True @property def name(self): return 'dense_and_sparse_column' @property def parse_example_spec(self): return {self.name: parsing_ops.VarLenFeature(self.dtype)} def transform_feature(self, transformation_cache, state_manager): return transformation_cache.get(self.name, state_manager) @property def variable_shape(self): raise ValueError('Should not use this method.') def get_dense_tensor(self, transformation_cache, state_manager): raise ValueError('Should not use this method.') @property def num_buckets(self): return 4 def get_sparse_tensors(self, transformation_cache, state_manager): sp_tensor = sparse_tensor.SparseTensor( indices=[[0, 0], [1, 0], [1, 1]], values=[2, 0, 3], dense_shape=[2, 2]) return fc.CategoricalColumn.IdWeightPair(sp_tensor, None) dense_and_sparse_column = _DenseAndSparseColumn() with ops.Graph().as_default(): sp_tensor = sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) features = {dense_and_sparse_column.name: sp_tensor} model = fc.LinearModel([dense_and_sparse_column]) predictions = model(features) dense_and_sparse_column_var, bias = model.variables with _initialized_session() as sess: sess.run( dense_and_sparse_column_var.assign([[10.], [100.], [1000.], [10000.]])) sess.run(bias.assign([5.])) self.assertAllClose([[1005.], [10015.]], self.evaluate(predictions)) def test_dense_multi_output(self): price = fc.numeric_column('price') with ops.Graph().as_default(): features = {'price': [[1.], [5.]]} model = fc.LinearModel([price], units=3) predictions = model(features) price_var, bias = model.variables with _initialized_session() as sess: self.assertAllClose(np.zeros((3,)), self.evaluate(bias)) self.assertAllClose(np.zeros((1, 3)), self.evaluate(price_var)) sess.run(price_var.assign([[10., 100., 1000.]])) sess.run(bias.assign([5., 6., 7.])) self.assertAllClose([[15., 106., 1007.], [55., 506., 5007.]], self.evaluate(predictions)) def test_sparse_multi_output(self): wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4) with ops.Graph().as_default(): wire_tensor = sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3] indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) features = {'wire_cast': wire_tensor} model = fc.LinearModel([wire_cast], units=3) predictions = model(features) wire_cast_var, bias = model.variables with _initialized_session() as sess: self.assertAllClose(np.zeros((3,)), self.evaluate(bias)) self.assertAllClose(np.zeros((4, 3)), self.evaluate(wire_cast_var)) sess.run( wire_cast_var.assign([[10., 11., 12.], [100., 110., 120.], [1000., 1100., 1200.], [10000., 11000., 12000.]])) sess.run(bias.assign([5., 6., 7.])) self.assertAllClose([[1005., 1106., 1207.], [10015., 11017., 12019.]], self.evaluate(predictions)) def test_dense_multi_dimension(self): price = fc.numeric_column('price', shape=2) with ops.Graph().as_default(): features = {'price': [[1., 2.], [5., 6.]]} model = fc.LinearModel([price]) predictions = model(features) price_var, _ = model.variables with _initialized_session() as sess: self.assertAllClose([[0.], [0.]], self.evaluate(price_var)) sess.run(price_var.assign([[10.], [100.]])) self.assertAllClose([[210.], [650.]], self.evaluate(predictions)) def test_sparse_multi_rank(self): wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4) with ops.Graph().as_default(): wire_tensor = array_ops.sparse_placeholder(dtypes.string) wire_value = sparse_tensor.SparseTensorValue( values=['omar', 'stringer', 'marlo', 'omar'], # hashed = [2, 0, 3, 2] indices=[[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 0, 1]], dense_shape=[2, 2, 2]) features = {'wire_cast': wire_tensor} model = fc.LinearModel([wire_cast]) predictions = model(features) wire_cast_var, _ = model.variables with _initialized_session() as sess: self.assertAllClose(np.zeros((4, 1)), self.evaluate(wire_cast_var)) self.assertAllClose( np.zeros((2, 1)), predictions.eval(feed_dict={wire_tensor: wire_value})) sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]])) self.assertAllClose( [[1010.], [11000.]], predictions.eval(feed_dict={wire_tensor: wire_value})) def test_sparse_combiner(self): wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4) with ops.Graph().as_default(): wire_tensor = sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3] indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) features = {'wire_cast': wire_tensor} model = fc.LinearModel([wire_cast], sparse_combiner='mean') predictions = model(features) wire_cast_var, bias = model.variables with _initialized_session() as sess: sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]])) sess.run(bias.assign([5.])) self.assertAllClose([[1005.], [5010.]], self.evaluate(predictions)) def test_sparse_combiner_sqrtn(self): wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4) with ops.Graph().as_default(): wire_tensor = sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3] indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) features = {'wire_cast': wire_tensor} model = fc.LinearModel([wire_cast], sparse_combiner='sqrtn') predictions = model(features) wire_cast_var, bias = model.variables with _initialized_session() as sess: self.evaluate(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]])) self.evaluate(bias.assign([5.])) self.assertAllClose([[1005.], [7083.139]], self.evaluate(predictions)) def test_sparse_combiner_with_negative_weights(self): wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4) wire_cast_weights = fc.weighted_categorical_column(wire_cast, 'weights') with ops.Graph().as_default(): wire_tensor = sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3] indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) features = { 'wire_cast': wire_tensor, 'weights': constant_op.constant([[1., 1., -1.0]]) } model = fc.LinearModel([wire_cast_weights], sparse_combiner='sum') predictions = model(features) wire_cast_var, bias = model.variables with _initialized_session() as sess: sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]])) sess.run(bias.assign([5.])) self.assertAllClose([[1005.], [-9985.]], self.evaluate(predictions)) def test_dense_multi_dimension_multi_output(self): price = fc.numeric_column('price', shape=2) with ops.Graph().as_default(): features = {'price': [[1., 2.], [5., 6.]]} model = fc.LinearModel([price], units=3) predictions = model(features) price_var, bias = model.variables with _initialized_session() as sess: self.assertAllClose(np.zeros((3,)), self.evaluate(bias)) self.assertAllClose(np.zeros((2, 3)), self.evaluate(price_var)) sess.run(price_var.assign([[1., 2., 3.], [10., 100., 1000.]])) sess.run(bias.assign([2., 3., 4.])) self.assertAllClose([[23., 205., 2007.], [67., 613., 6019.]], self.evaluate(predictions)) def test_raises_if_shape_mismatch(self): price = fc.numeric_column('price', shape=2) with ops.Graph().as_default(): features = {'price': [[1.], [5.]]} with self.assertRaisesRegexp( Exception, r'Cannot reshape a tensor with 2 elements to shape \[2,2\]'): model = fc.LinearModel([price]) model(features) def test_dense_reshaping(self): price = fc.numeric_column('price', shape=[1, 2]) with ops.Graph().as_default(): features = {'price': [[[1., 2.]], [[5., 6.]]]} model = fc.LinearModel([price]) predictions = model(features) price_var, bias = model.variables with _initialized_session() as sess: self.assertAllClose([0.], self.evaluate(bias)) self.assertAllClose([[0.], [0.]], self.evaluate(price_var)) self.assertAllClose([[0.], [0.]], self.evaluate(predictions)) sess.run(price_var.assign([[10.], [100.]])) self.assertAllClose([[210.], [650.]], self.evaluate(predictions)) def test_dense_multi_column(self): price1 = fc.numeric_column('price1', shape=2) price2 = fc.numeric_column('price2') with ops.Graph().as_default(): features = {'price1': [[1., 2.], [5., 6.]], 'price2': [[3.], [4.]]} model = fc.LinearModel([price1, price2]) predictions = model(features) price1_var, price2_var, bias = model.variables with _initialized_session() as sess: self.assertAllClose([0.], self.evaluate(bias)) self.assertAllClose([[0.], [0.]], self.evaluate(price1_var)) self.assertAllClose([[0.]], self.evaluate(price2_var)) self.assertAllClose([[0.], [0.]], self.evaluate(predictions)) sess.run(price1_var.assign([[10.], [100.]])) sess.run(price2_var.assign([[1000.]])) sess.run(bias.assign([7.])) self.assertAllClose([[3217.], [4657.]], self.evaluate(predictions)) def test_dense_trainable_default(self): price = fc.numeric_column('price') with ops.Graph().as_default() as g: features = {'price': [[1.], [5.]]} model = fc.LinearModel([price]) model(features) price_var, bias = model.variables trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES) self.assertIn(bias, trainable_vars) self.assertIn(price_var, trainable_vars) def test_sparse_trainable_default(self): wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4) with ops.Graph().as_default() as g: wire_tensor = sparse_tensor.SparseTensor( values=['omar'], indices=[[0, 0]], dense_shape=[1, 1]) features = {'wire_cast': wire_tensor} model = fc.LinearModel([wire_cast]) model(features) trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES) wire_cast_var, bias = model.variables self.assertIn(bias, trainable_vars) self.assertIn(wire_cast_var, trainable_vars) def test_dense_trainable_false(self): price = fc.numeric_column('price') with ops.Graph().as_default() as g: features = {'price': [[1.], [5.]]} model = fc.LinearModel([price], trainable=False) model(features) trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES) self.assertEqual([], trainable_vars) def test_sparse_trainable_false(self): wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4) with ops.Graph().as_default() as g: wire_tensor = sparse_tensor.SparseTensor( values=['omar'], indices=[[0, 0]], dense_shape=[1, 1]) features = {'wire_cast': wire_tensor} model = fc.LinearModel([wire_cast], trainable=False) model(features) trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES) self.assertEqual([], trainable_vars) def test_column_order(self): price_a = fc.numeric_column('price_a') price_b = fc.numeric_column('price_b') wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4) with ops.Graph().as_default(): features = { 'price_a': [[1.]], 'price_b': [[3.]], 'wire_cast': sparse_tensor.SparseTensor( values=['omar'], indices=[[0, 0]], dense_shape=[1, 1]) } model = fc.LinearModel([price_a, wire_cast, price_b]) model(features) my_vars = model.variables self.assertIn('price_a', my_vars[0].name) self.assertIn('price_b', my_vars[1].name) self.assertIn('wire_cast', my_vars[2].name) with ops.Graph().as_default(): features = { 'price_a': [[1.]], 'price_b': [[3.]], 'wire_cast': sparse_tensor.SparseTensor( values=['omar'], indices=[[0, 0]], dense_shape=[1, 1]) } model = fc.LinearModel([wire_cast, price_b, price_a]) model(features) my_vars = model.variables self.assertIn('price_a', my_vars[0].name) self.assertIn('price_b', my_vars[1].name) self.assertIn('wire_cast', my_vars[2].name) def test_variable_names(self): price1 = fc.numeric_column('price1') dense_feature = fc.numeric_column('dense_feature') dense_feature_bucketized = fc.bucketized_column( dense_feature, boundaries=[0.]) some_sparse_column = fc.categorical_column_with_hash_bucket( 'sparse_feature', hash_bucket_size=5) some_embedding_column = fc.embedding_column( some_sparse_column, dimension=10) all_cols = [price1, dense_feature_bucketized, some_embedding_column] with ops.Graph().as_default(): model = fc.LinearModel(all_cols) features = { 'price1': [[3.], [4.]], 'dense_feature': [[-1.], [4.]], 'sparse_feature': [['a'], ['x']], } model(features) for var in model.variables: self.assertIsInstance(var, variables_lib.VariableV1) variable_names = [var.name for var in model.variables] self.assertItemsEqual([ 'linear_model/dense_feature_bucketized/weights:0', 'linear_model/price1/weights:0', 'linear_model/sparse_feature_embedding/embedding_weights:0', 'linear_model/sparse_feature_embedding/weights:0', 'linear_model/bias_weights:0', ], variable_names) def test_fit_and_predict(self): columns = [fc.numeric_column('a')] model = fc.LinearModel(columns) model.compile( optimizer=rmsprop.RMSPropOptimizer(1e-3), loss='categorical_crossentropy', metrics=['accuracy']) x = {'a': np.random.random((10, 1))} y = np.random.randint(20, size=(10, 1)) y = keras.utils.to_categorical(y, num_classes=20) model.fit(x, y, epochs=1, batch_size=5) model.fit(x, y, epochs=1, batch_size=5) model.evaluate(x, y, batch_size=5) model.predict(x, batch_size=5) def test_static_batch_size_mismatch(self): price1 = fc.numeric_column('price1') price2 = fc.numeric_column('price2') with ops.Graph().as_default(): features = { 'price1': [[1.], [5.], [7.]], # batchsize = 3 'price2': [[3.], [4.]] # batchsize = 2 } with self.assertRaisesRegexp( ValueError, r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string model = fc.LinearModel([price1, price2]) model(features) def test_subset_of_static_batch_size_mismatch(self): price1 = fc.numeric_column('price1') price2 = fc.numeric_column('price2') price3 = fc.numeric_column('price3') with ops.Graph().as_default(): features = { 'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3 'price2': [[3.], [4.]], # batchsize = 2 'price3': [[3.], [4.], [5.]] # batchsize = 3 } with self.assertRaisesRegexp( ValueError, r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string model = fc.LinearModel([price1, price2, price3]) model(features) def test_runtime_batch_size_mismatch(self): price1 = fc.numeric_column('price1') price2 = fc.numeric_column('price2') with ops.Graph().as_default(): features = { 'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3 'price2': [[3.], [4.]] # batchsize = 2 } model = fc.LinearModel([price1, price2]) predictions = model(features) with _initialized_session() as sess: with self.assertRaisesRegexp(errors.OpError, 'must have the same size and shape'): sess.run( predictions, feed_dict={features['price1']: [[1.], [5.], [7.]]}) def test_runtime_batch_size_matches(self): price1 = fc.numeric_column('price1') price2 = fc.numeric_column('price2') with ops.Graph().as_default(): features = { 'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2 'price2': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2 } model = fc.LinearModel([price1, price2]) predictions = model(features) with _initialized_session() as sess: sess.run( predictions, feed_dict={ features['price1']: [[1.], [5.]], features['price2']: [[1.], [5.]], }) @test_util.run_deprecated_v1 def test_with_numpy_input_fn(self): price = fc.numeric_column('price') price_buckets = fc.bucketized_column( price, boundaries=[ 0., 10., 100., ]) body_style = fc.categorical_column_with_vocabulary_list( 'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan']) input_fn = numpy_io.numpy_input_fn( x={ 'price': np.array([-1., 2., 13., 104.]), 'body-style': np.array(['sedan', 'hardtop', 'wagon', 'sedan']), }, batch_size=2, shuffle=False) features = input_fn() model = fc.LinearModel([price_buckets, body_style]) net = model(features) # self.assertEqual(1 + 3 + 5, net.shape[1]) with _initialized_session() as sess: coord = coordinator.Coordinator() threads = queue_runner_impl.start_queue_runners(sess, coord=coord) body_style_var, price_buckets_var, bias = model.variables sess.run(price_buckets_var.assign([[10.], [100.], [1000.], [10000.]])) sess.run(body_style_var.assign([[-10.], [-100.], [-1000.]])) sess.run(bias.assign([5.])) self.assertAllClose([[10 - 1000 + 5.], [100 - 10 + 5.]], self.evaluate(net)) coord.request_stop() coord.join(threads) @test_util.run_deprecated_v1 def test_with_1d_sparse_tensor(self): price = fc.numeric_column('price') price_buckets = fc.bucketized_column( price, boundaries=[ 0., 10., 100., ]) body_style = fc.categorical_column_with_vocabulary_list( 'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan']) # Provides 1-dim tensor and dense tensor. features = { 'price': constant_op.constant([ -1., 12., ]), 'body-style': sparse_tensor.SparseTensor( indices=((0,), (1,)), values=('sedan', 'hardtop'), dense_shape=(2,)), } self.assertEqual(1, features['price'].shape.ndims) self.assertEqual(1, features['body-style'].dense_shape.get_shape()[0]) model = fc.LinearModel([price_buckets, body_style]) net = model(features) with _initialized_session() as sess: body_style_var, price_buckets_var, bias = model.variables sess.run(price_buckets_var.assign([[10.], [100.], [1000.], [10000.]])) sess.run(body_style_var.assign([[-10.], [-100.], [-1000.]])) sess.run(bias.assign([5.])) self.assertAllClose([[10 - 1000 + 5.], [1000 - 10 + 5.]], self.evaluate(net)) @test_util.run_deprecated_v1 def test_with_1d_unknown_shape_sparse_tensor(self): price = fc.numeric_column('price') price_buckets = fc.bucketized_column( price, boundaries=[ 0., 10., 100., ]) body_style = fc.categorical_column_with_vocabulary_list( 'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan']) country = fc.categorical_column_with_vocabulary_list( 'country', vocabulary_list=['US', 'JP', 'CA']) # Provides 1-dim tensor and dense tensor. features = { 'price': array_ops.placeholder(dtypes.float32), 'body-style': array_ops.sparse_placeholder(dtypes.string), 'country': array_ops.placeholder(dtypes.string), } self.assertIsNone(features['price'].shape.ndims) self.assertIsNone(features['body-style'].get_shape().ndims) price_data = np.array([-1., 12.]) body_style_data = sparse_tensor.SparseTensorValue( indices=((0,), (1,)), values=('sedan', 'hardtop'), dense_shape=(2,)) country_data = np.array(['US', 'CA']) model = fc.LinearModel([price_buckets, body_style, country]) net = model(features) body_style_var, _, price_buckets_var, bias = model.variables with _initialized_session() as sess: sess.run(price_buckets_var.assign([[10.], [100.], [1000.], [10000.]])) sess.run(body_style_var.assign([[-10.], [-100.], [-1000.]])) sess.run(bias.assign([5.])) self.assertAllClose([[10 - 1000 + 5.], [1000 - 10 + 5.]], sess.run( net, feed_dict={ features['price']: price_data, features['body-style']: body_style_data, features['country']: country_data })) @test_util.run_deprecated_v1 def test_with_rank_0_feature(self): price = fc.numeric_column('price') features = { 'price': constant_op.constant(0), } self.assertEqual(0, features['price'].shape.ndims) # Static rank 0 should fail with self.assertRaisesRegexp(ValueError, 'Feature .* cannot have rank 0'): model = fc.LinearModel([price]) model(features) # Dynamic rank 0 should fail features = { 'price': array_ops.placeholder(dtypes.float32), } model = fc.LinearModel([price]) net = model(features) self.assertEqual(1, net.shape[1]) with _initialized_session() as sess: with self.assertRaisesOpError('Feature .* cannot have rank 0'): sess.run(net, feed_dict={features['price']: np.array(1)}) def test_multiple_linear_models(self): price = fc.numeric_column('price') with ops.Graph().as_default(): features1 = {'price': [[1.], [5.]]} features2 = {'price': [[2.], [10.]]} model1 = fc.LinearModel([price]) model2 = fc.LinearModel([price]) predictions1 = model1(features1) predictions2 = model2(features2) price_var1, bias1 = model1.variables price_var2, bias2 = model2.variables with _initialized_session() as sess: self.assertAllClose([0.], self.evaluate(bias1)) sess.run(price_var1.assign([[10.]])) sess.run(bias1.assign([5.])) self.assertAllClose([[15.], [55.]], self.evaluate(predictions1)) self.assertAllClose([0.], self.evaluate(bias2)) sess.run(price_var2.assign([[10.]])) sess.run(bias2.assign([5.])) self.assertAllClose([[25.], [105.]], self.evaluate(predictions2)) class OldLinearModelTest(test.TestCase): def test_raises_if_empty_feature_columns(self): with self.assertRaisesRegexp(ValueError, 'feature_columns must not be empty'): fc_old.linear_model(features={}, feature_columns=[]) def test_should_be_feature_column(self): with self.assertRaisesRegexp(ValueError, 'must be a _FeatureColumn'): fc_old.linear_model(features={'a': [[0]]}, feature_columns='NotSupported') def test_should_be_dense_or_categorical_column(self): class NotSupportedColumn(BaseFeatureColumnForTests, fc.FeatureColumn, fc_old._FeatureColumn): @property def _is_v2_column(self): return True @property def name(self): return 'NotSupportedColumn' def transform_feature(self, transformation_cache, state_manager): pass def _transform_feature(self, inputs): pass @property def parse_example_spec(self): pass @property def _parse_example_spec(self): pass with self.assertRaisesRegexp( ValueError, 'must be either a _DenseColumn or _CategoricalColumn'): fc_old.linear_model( features={'a': [[0]]}, feature_columns=[NotSupportedColumn()]) def test_does_not_support_dict_columns(self): with self.assertRaisesRegexp( ValueError, 'Expected feature_columns to be iterable, found dict.'): fc_old.linear_model( features={'a': [[0]]}, feature_columns={'a': fc.numeric_column('a')}) def test_raises_if_duplicate_name(self): with self.assertRaisesRegexp( ValueError, 'Duplicate feature column name found for columns'): fc_old.linear_model( features={'a': [[0]]}, feature_columns=[fc.numeric_column('a'), fc.numeric_column('a')]) def test_dense_bias(self): price = fc.numeric_column('price') with ops.Graph().as_default(): features = {'price': [[1.], [5.]]} predictions = fc_old.linear_model(features, [price]) bias = get_linear_model_bias() price_var = get_linear_model_column_var(price) with _initialized_session() as sess: self.assertAllClose([0.], self.evaluate(bias)) sess.run(price_var.assign([[10.]])) sess.run(bias.assign([5.])) self.assertAllClose([[15.], [55.]], self.evaluate(predictions)) def test_sparse_bias(self): wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4) with ops.Graph().as_default(): wire_tensor = sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3] indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) features = {'wire_cast': wire_tensor} predictions = fc_old.linear_model(features, [wire_cast]) bias = get_linear_model_bias() wire_cast_var = get_linear_model_column_var(wire_cast) with _initialized_session() as sess: self.assertAllClose([0.], self.evaluate(bias)) self.assertAllClose([[0.], [0.], [0.], [0.]], self.evaluate(wire_cast_var)) sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]])) sess.run(bias.assign([5.])) self.assertAllClose([[1005.], [10015.]], self.evaluate(predictions)) def test_dense_and_sparse_bias(self): wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4) price = fc.numeric_column('price') with ops.Graph().as_default(): wire_tensor = sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3] indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) features = {'wire_cast': wire_tensor, 'price': [[1.], [5.]]} predictions = fc_old.linear_model(features, [wire_cast, price]) bias = get_linear_model_bias() wire_cast_var = get_linear_model_column_var(wire_cast) price_var = get_linear_model_column_var(price) with _initialized_session() as sess: sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]])) sess.run(bias.assign([5.])) sess.run(price_var.assign([[10.]])) self.assertAllClose([[1015.], [10065.]], self.evaluate(predictions)) def test_dense_and_sparse_column(self): """When the column is both dense and sparse, uses sparse tensors.""" class _DenseAndSparseColumn(BaseFeatureColumnForTests, fc.DenseColumn, fc.CategoricalColumn, fc_old._DenseColumn, fc_old._CategoricalColumn): @property def _is_v2_column(self): return True @property def name(self): return 'dense_and_sparse_column' @property def parse_example_spec(self): return {self.name: parsing_ops.VarLenFeature(self.dtype)} @property def _parse_example_spec(self): return self.parse_example_spec def transform_feature(self, transformation_cache, state_manager): raise ValueError('Should not use this method.') def _transform_feature(self, inputs): return inputs.get(self.name) @property def variable_shape(self): return self.variable_shape @property def _variable_shape(self): return self.variable_shape def get_dense_tensor(self, transformation_cache, state_manager): raise ValueError('Should not use this method.') def _get_dense_tensor(self, inputs): raise ValueError('Should not use this method.') @property def num_buckets(self): return 4 @property def _num_buckets(self): return self.num_buckets def get_sparse_tensors(self, transformation_cache, state_manager): raise ValueError('Should not use this method.') def _get_sparse_tensors(self, inputs, weight_collections=None, trainable=None): sp_tensor = sparse_tensor.SparseTensor( indices=[[0, 0], [1, 0], [1, 1]], values=[2, 0, 3], dense_shape=[2, 2]) return fc.CategoricalColumn.IdWeightPair(sp_tensor, None) dense_and_sparse_column = _DenseAndSparseColumn() with ops.Graph().as_default(): sp_tensor = sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) features = {dense_and_sparse_column.name: sp_tensor} predictions = fc_old.linear_model(features, [dense_and_sparse_column]) bias = get_linear_model_bias() dense_and_sparse_column_var = get_linear_model_column_var( dense_and_sparse_column) with _initialized_session() as sess: sess.run( dense_and_sparse_column_var.assign([[10.], [100.], [1000.], [10000.]])) sess.run(bias.assign([5.])) self.assertAllClose([[1005.], [10015.]], self.evaluate(predictions)) def test_dense_multi_output(self): price = fc.numeric_column('price') with ops.Graph().as_default(): features = {'price': [[1.], [5.]]} predictions = fc_old.linear_model(features, [price], units=3) bias = get_linear_model_bias() price_var = get_linear_model_column_var(price) with _initialized_session() as sess: self.assertAllClose(np.zeros((3,)), self.evaluate(bias)) self.assertAllClose(np.zeros((1, 3)), self.evaluate(price_var)) sess.run(price_var.assign([[10., 100., 1000.]])) sess.run(bias.assign([5., 6., 7.])) self.assertAllClose([[15., 106., 1007.], [55., 506., 5007.]], self.evaluate(predictions)) def test_sparse_multi_output(self): wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4) with ops.Graph().as_default(): wire_tensor = sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3] indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) features = {'wire_cast': wire_tensor} predictions = fc_old.linear_model(features, [wire_cast], units=3) bias = get_linear_model_bias() wire_cast_var = get_linear_model_column_var(wire_cast) with _initialized_session() as sess: self.assertAllClose(np.zeros((3,)), self.evaluate(bias)) self.assertAllClose(np.zeros((4, 3)), self.evaluate(wire_cast_var)) sess.run( wire_cast_var.assign([[10., 11., 12.], [100., 110., 120.], [1000., 1100., 1200.], [10000., 11000., 12000.]])) sess.run(bias.assign([5., 6., 7.])) self.assertAllClose([[1005., 1106., 1207.], [10015., 11017., 12019.]], self.evaluate(predictions)) def test_dense_multi_dimension(self): price = fc.numeric_column('price', shape=2) with ops.Graph().as_default(): features = {'price': [[1., 2.], [5., 6.]]} predictions = fc_old.linear_model(features, [price]) price_var = get_linear_model_column_var(price) with _initialized_session() as sess: self.assertAllClose([[0.], [0.]], self.evaluate(price_var)) sess.run(price_var.assign([[10.], [100.]])) self.assertAllClose([[210.], [650.]], self.evaluate(predictions)) def test_sparse_multi_rank(self): wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4) with ops.Graph().as_default(): wire_tensor = array_ops.sparse_placeholder(dtypes.string) wire_value = sparse_tensor.SparseTensorValue( values=['omar', 'stringer', 'marlo', 'omar'], # hashed = [2, 0, 3, 2] indices=[[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 0, 1]], dense_shape=[2, 2, 2]) features = {'wire_cast': wire_tensor} predictions = fc_old.linear_model(features, [wire_cast]) wire_cast_var = get_linear_model_column_var(wire_cast) with _initialized_session() as sess: self.assertAllClose(np.zeros((4, 1)), self.evaluate(wire_cast_var)) self.assertAllClose( np.zeros((2, 1)), predictions.eval(feed_dict={wire_tensor: wire_value})) sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]])) self.assertAllClose( [[1010.], [11000.]], predictions.eval(feed_dict={wire_tensor: wire_value})) def test_sparse_combiner(self): wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4) with ops.Graph().as_default(): wire_tensor = sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3] indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) features = {'wire_cast': wire_tensor} predictions = fc_old.linear_model( features, [wire_cast], sparse_combiner='mean') bias = get_linear_model_bias() wire_cast_var = get_linear_model_column_var(wire_cast) with _initialized_session() as sess: sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]])) sess.run(bias.assign([5.])) self.assertAllClose([[1005.], [5010.]], self.evaluate(predictions)) def test_sparse_combiner_with_negative_weights(self): wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4) wire_cast_weights = fc.weighted_categorical_column(wire_cast, 'weights') with ops.Graph().as_default(): wire_tensor = sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3] indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) features = { 'wire_cast': wire_tensor, 'weights': constant_op.constant([[1., 1., -1.0]]) } predictions = fc_old.linear_model( features, [wire_cast_weights], sparse_combiner='sum') bias = get_linear_model_bias() wire_cast_var = get_linear_model_column_var(wire_cast) with _initialized_session() as sess: sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]])) sess.run(bias.assign([5.])) self.assertAllClose([[1005.], [-9985.]], self.evaluate(predictions)) def test_dense_multi_dimension_multi_output(self): price = fc.numeric_column('price', shape=2) with ops.Graph().as_default(): features = {'price': [[1., 2.], [5., 6.]]} predictions = fc_old.linear_model(features, [price], units=3) bias = get_linear_model_bias() price_var = get_linear_model_column_var(price) with _initialized_session() as sess: self.assertAllClose(np.zeros((3,)), self.evaluate(bias)) self.assertAllClose(np.zeros((2, 3)), self.evaluate(price_var)) sess.run(price_var.assign([[1., 2., 3.], [10., 100., 1000.]])) sess.run(bias.assign([2., 3., 4.])) self.assertAllClose([[23., 205., 2007.], [67., 613., 6019.]], self.evaluate(predictions)) def test_raises_if_shape_mismatch(self): price = fc.numeric_column('price', shape=2) with ops.Graph().as_default(): features = {'price': [[1.], [5.]]} with self.assertRaisesRegexp( Exception, r'Cannot reshape a tensor with 2 elements to shape \[2,2\]'): fc_old.linear_model(features, [price]) def test_dense_reshaping(self): price = fc.numeric_column('price', shape=[1, 2]) with ops.Graph().as_default(): features = {'price': [[[1., 2.]], [[5., 6.]]]} predictions = fc_old.linear_model(features, [price]) bias = get_linear_model_bias() price_var = get_linear_model_column_var(price) with _initialized_session() as sess: self.assertAllClose([0.], self.evaluate(bias)) self.assertAllClose([[0.], [0.]], self.evaluate(price_var)) self.assertAllClose([[0.], [0.]], self.evaluate(predictions)) sess.run(price_var.assign([[10.], [100.]])) self.assertAllClose([[210.], [650.]], self.evaluate(predictions)) def test_dense_multi_column(self): price1 = fc.numeric_column('price1', shape=2) price2 = fc.numeric_column('price2') with ops.Graph().as_default(): features = {'price1': [[1., 2.], [5., 6.]], 'price2': [[3.], [4.]]} predictions = fc_old.linear_model(features, [price1, price2]) bias = get_linear_model_bias() price1_var = get_linear_model_column_var(price1) price2_var = get_linear_model_column_var(price2) with _initialized_session() as sess: self.assertAllClose([0.], self.evaluate(bias)) self.assertAllClose([[0.], [0.]], self.evaluate(price1_var)) self.assertAllClose([[0.]], self.evaluate(price2_var)) self.assertAllClose([[0.], [0.]], self.evaluate(predictions)) sess.run(price1_var.assign([[10.], [100.]])) sess.run(price2_var.assign([[1000.]])) sess.run(bias.assign([7.])) self.assertAllClose([[3217.], [4657.]], self.evaluate(predictions)) def test_fills_cols_to_vars(self): price1 = fc.numeric_column('price1', shape=2) price2 = fc.numeric_column('price2') with ops.Graph().as_default(): features = {'price1': [[1., 2.], [5., 6.]], 'price2': [[3.], [4.]]} cols_to_vars = {} fc_old.linear_model(features, [price1, price2], cols_to_vars=cols_to_vars) bias = get_linear_model_bias() price1_var = get_linear_model_column_var(price1) price2_var = get_linear_model_column_var(price2) self.assertAllEqual(cols_to_vars['bias'], [bias]) self.assertAllEqual(cols_to_vars[price1], [price1_var]) self.assertAllEqual(cols_to_vars[price2], [price2_var]) def test_fills_cols_to_vars_partitioned_variables(self): price1 = fc.numeric_column('price1', shape=2) price2 = fc.numeric_column('price2', shape=3) with ops.Graph().as_default(): features = { 'price1': [[1., 2.], [6., 7.]], 'price2': [[3., 4., 5.], [8., 9., 10.]] } cols_to_vars = {} with variable_scope.variable_scope( 'linear', partitioner=partitioned_variables.fixed_size_partitioner(2, axis=0)): fc_old.linear_model( features, [price1, price2], cols_to_vars=cols_to_vars) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertEqual([0.], self.evaluate(cols_to_vars['bias'][0])) # Partitioning shards the [2, 1] price1 var into 2 [1, 1] Variables. self.assertAllEqual([[0.]], self.evaluate(cols_to_vars[price1][0])) self.assertAllEqual([[0.]], self.evaluate(cols_to_vars[price1][1])) # Partitioning shards the [3, 1] price2 var into a [2, 1] Variable and # a [1, 1] Variable. self.assertAllEqual([[0.], [0.]], self.evaluate(cols_to_vars[price2][0])) self.assertAllEqual([[0.]], self.evaluate(cols_to_vars[price2][1])) def test_fills_cols_to_output_tensors(self): # Provide three _DenseColumn's to input_layer: a _NumericColumn, a # _BucketizedColumn, and an _EmbeddingColumn. Only the _EmbeddingColumn # creates a Variable. apple_numeric_column = fc.numeric_column('apple_numeric_column') banana_dense_feature = fc.numeric_column('banana_dense_feature') banana_dense_feature_bucketized = fc.bucketized_column( banana_dense_feature, boundaries=[0.]) cherry_sparse_column = fc.categorical_column_with_hash_bucket( 'cherry_sparse_feature', hash_bucket_size=5) dragonfruit_embedding_column = fc.embedding_column( cherry_sparse_column, dimension=10) with ops.Graph().as_default(): features = { 'apple_numeric_column': [[3.], [4.]], 'banana_dense_feature': [[-1.], [4.]], 'cherry_sparse_feature': [['a'], ['x']], } cols_to_output_tensors = {} all_cols = [ apple_numeric_column, banana_dense_feature_bucketized, dragonfruit_embedding_column ] input_layer = fc_old.input_layer( features, all_cols, cols_to_output_tensors=cols_to_output_tensors) # We check the mapping by checking that we have the right keys, # and that the values (output_tensors) were indeed the ones used to # form the input layer. self.assertItemsEqual(all_cols, cols_to_output_tensors.keys()) input_layer_inputs = [tensor for tensor in input_layer.op.inputs[:-1]] output_tensors = [tensor for tensor in cols_to_output_tensors.values()] self.assertItemsEqual(input_layer_inputs, output_tensors) def test_dense_collection(self): price = fc.numeric_column('price') with ops.Graph().as_default() as g: features = {'price': [[1.], [5.]]} fc_old.linear_model(features, [price], weight_collections=['my-vars']) my_vars = g.get_collection('my-vars') bias = get_linear_model_bias() price_var = get_linear_model_column_var(price) self.assertIn(bias, my_vars) self.assertIn(price_var, my_vars) def test_sparse_collection(self): wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4) with ops.Graph().as_default() as g: wire_tensor = sparse_tensor.SparseTensor( values=['omar'], indices=[[0, 0]], dense_shape=[1, 1]) features = {'wire_cast': wire_tensor} fc_old.linear_model(features, [wire_cast], weight_collections=['my-vars']) my_vars = g.get_collection('my-vars') bias = get_linear_model_bias() wire_cast_var = get_linear_model_column_var(wire_cast) self.assertIn(bias, my_vars) self.assertIn(wire_cast_var, my_vars) def test_dense_trainable_default(self): price = fc.numeric_column('price') with ops.Graph().as_default() as g: features = {'price': [[1.], [5.]]} fc_old.linear_model(features, [price]) bias = get_linear_model_bias() price_var = get_linear_model_column_var(price) trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES) self.assertIn(bias, trainable_vars) self.assertIn(price_var, trainable_vars) def test_sparse_trainable_default(self): wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4) with ops.Graph().as_default() as g: wire_tensor = sparse_tensor.SparseTensor( values=['omar'], indices=[[0, 0]], dense_shape=[1, 1]) features = {'wire_cast': wire_tensor} fc_old.linear_model(features, [wire_cast]) trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES) bias = get_linear_model_bias() wire_cast_var = get_linear_model_column_var(wire_cast) self.assertIn(bias, trainable_vars) self.assertIn(wire_cast_var, trainable_vars) def test_dense_trainable_false(self): price = fc.numeric_column('price') with ops.Graph().as_default() as g: features = {'price': [[1.], [5.]]} fc_old.linear_model(features, [price], trainable=False) trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES) self.assertEqual([], trainable_vars) def test_sparse_trainable_false(self): wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4) with ops.Graph().as_default() as g: wire_tensor = sparse_tensor.SparseTensor( values=['omar'], indices=[[0, 0]], dense_shape=[1, 1]) features = {'wire_cast': wire_tensor} fc_old.linear_model(features, [wire_cast], trainable=False) trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES) self.assertEqual([], trainable_vars) def test_column_order(self): price_a = fc.numeric_column('price_a') price_b = fc.numeric_column('price_b') wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4) with ops.Graph().as_default() as g: features = { 'price_a': [[1.]], 'price_b': [[3.]], 'wire_cast': sparse_tensor.SparseTensor( values=['omar'], indices=[[0, 0]], dense_shape=[1, 1]) } fc_old.linear_model( features, [price_a, wire_cast, price_b], weight_collections=['my-vars']) my_vars = g.get_collection('my-vars') self.assertIn('price_a', my_vars[0].name) self.assertIn('price_b', my_vars[1].name) self.assertIn('wire_cast', my_vars[2].name) with ops.Graph().as_default() as g: features = { 'price_a': [[1.]], 'price_b': [[3.]], 'wire_cast': sparse_tensor.SparseTensor( values=['omar'], indices=[[0, 0]], dense_shape=[1, 1]) } fc_old.linear_model( features, [wire_cast, price_b, price_a], weight_collections=['my-vars']) my_vars = g.get_collection('my-vars') self.assertIn('price_a', my_vars[0].name) self.assertIn('price_b', my_vars[1].name) self.assertIn('wire_cast', my_vars[2].name) def test_static_batch_size_mismatch(self): price1 = fc.numeric_column('price1') price2 = fc.numeric_column('price2') with ops.Graph().as_default(): features = { 'price1': [[1.], [5.], [7.]], # batchsize = 3 'price2': [[3.], [4.]] # batchsize = 2 } with self.assertRaisesRegexp( ValueError, r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string fc_old.linear_model(features, [price1, price2]) def test_subset_of_static_batch_size_mismatch(self): price1 = fc.numeric_column('price1') price2 = fc.numeric_column('price2') price3 = fc.numeric_column('price3') with ops.Graph().as_default(): features = { 'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3 'price2': [[3.], [4.]], # batchsize = 2 'price3': [[3.], [4.], [5.]] # batchsize = 3 } with self.assertRaisesRegexp( ValueError, r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string fc_old.linear_model(features, [price1, price2, price3]) def test_runtime_batch_size_mismatch(self): price1 = fc.numeric_column('price1') price2 = fc.numeric_column('price2') with ops.Graph().as_default(): features = { 'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3 'price2': [[3.], [4.]] # batchsize = 2 } predictions = fc_old.linear_model(features, [price1, price2]) with _initialized_session() as sess: with self.assertRaisesRegexp(errors.OpError, 'must have the same size and shape'): sess.run( predictions, feed_dict={features['price1']: [[1.], [5.], [7.]]}) def test_runtime_batch_size_matches(self): price1 = fc.numeric_column('price1') price2 = fc.numeric_column('price2') with ops.Graph().as_default(): features = { 'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2 'price2': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2 } predictions = fc_old.linear_model(features, [price1, price2]) with _initialized_session() as sess: sess.run( predictions, feed_dict={ features['price1']: [[1.], [5.]], features['price2']: [[1.], [5.]], }) @test_util.run_deprecated_v1 def test_with_1d_sparse_tensor(self): price = fc.numeric_column('price') price_buckets = fc.bucketized_column( price, boundaries=[ 0., 10., 100., ]) body_style = fc.categorical_column_with_vocabulary_list( 'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan']) # Provides 1-dim tensor and dense tensor. features = { 'price': constant_op.constant([ -1., 12., ]), 'body-style': sparse_tensor.SparseTensor( indices=((0,), (1,)), values=('sedan', 'hardtop'), dense_shape=(2,)), } self.assertEqual(1, features['price'].shape.ndims) self.assertEqual(1, features['body-style'].dense_shape.get_shape()[0]) net = fc_old.linear_model(features, [price_buckets, body_style]) with _initialized_session() as sess: bias = get_linear_model_bias() price_buckets_var = get_linear_model_column_var(price_buckets) body_style_var = get_linear_model_column_var(body_style) sess.run(price_buckets_var.assign([[10.], [100.], [1000.], [10000.]])) sess.run(body_style_var.assign([[-10.], [-100.], [-1000.]])) sess.run(bias.assign([5.])) self.assertAllClose([[10 - 1000 + 5.], [1000 - 10 + 5.]], self.evaluate(net)) @test_util.run_deprecated_v1 def test_with_1d_unknown_shape_sparse_tensor(self): price = fc.numeric_column('price') price_buckets = fc.bucketized_column( price, boundaries=[ 0., 10., 100., ]) body_style = fc.categorical_column_with_vocabulary_list( 'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan']) country = fc.categorical_column_with_vocabulary_list( 'country', vocabulary_list=['US', 'JP', 'CA']) # Provides 1-dim tensor and dense tensor. features = { 'price': array_ops.placeholder(dtypes.float32), 'body-style': array_ops.sparse_placeholder(dtypes.string), 'country': array_ops.placeholder(dtypes.string), } self.assertIsNone(features['price'].shape.ndims) self.assertIsNone(features['body-style'].get_shape().ndims) price_data = np.array([-1., 12.]) body_style_data = sparse_tensor.SparseTensorValue( indices=((0,), (1,)), values=('sedan', 'hardtop'), dense_shape=(2,)) country_data = np.array(['US', 'CA']) net = fc_old.linear_model(features, [price_buckets, body_style, country]) bias = get_linear_model_bias() price_buckets_var = get_linear_model_column_var(price_buckets) body_style_var = get_linear_model_column_var(body_style) with _initialized_session() as sess: sess.run(price_buckets_var.assign([[10.], [100.], [1000.], [10000.]])) sess.run(body_style_var.assign([[-10.], [-100.], [-1000.]])) sess.run(bias.assign([5.])) self.assertAllClose([[10 - 1000 + 5.], [1000 - 10 + 5.]], sess.run( net, feed_dict={ features['price']: price_data, features['body-style']: body_style_data, features['country']: country_data })) @test_util.run_deprecated_v1 def test_with_rank_0_feature(self): price = fc.numeric_column('price') features = { 'price': constant_op.constant(0), } self.assertEqual(0, features['price'].shape.ndims) # Static rank 0 should fail with self.assertRaisesRegexp(ValueError, 'Feature .* cannot have rank 0'): fc_old.linear_model(features, [price]) # Dynamic rank 0 should fail features = { 'price': array_ops.placeholder(dtypes.float32), } net = fc_old.linear_model(features, [price]) self.assertEqual(1, net.shape[1]) with _initialized_session() as sess: with self.assertRaisesOpError('Feature .* cannot have rank 0'): sess.run(net, feed_dict={features['price']: np.array(1)}) def test_multiple_linear_models(self): price = fc.numeric_column('price') with ops.Graph().as_default(): features1 = {'price': [[1.], [5.]]} features2 = {'price': [[2.], [10.]]} predictions1 = fc_old.linear_model(features1, [price]) predictions2 = fc_old.linear_model(features2, [price]) bias1 = get_linear_model_bias(name='linear_model') bias2 = get_linear_model_bias(name='linear_model_1') price_var1 = get_linear_model_column_var(price, name='linear_model') price_var2 = get_linear_model_column_var(price, name='linear_model_1') with _initialized_session() as sess: self.assertAllClose([0.], self.evaluate(bias1)) sess.run(price_var1.assign([[10.]])) sess.run(bias1.assign([5.])) self.assertAllClose([[15.], [55.]], self.evaluate(predictions1)) self.assertAllClose([0.], self.evaluate(bias2)) sess.run(price_var2.assign([[10.]])) sess.run(bias2.assign([5.])) self.assertAllClose([[25.], [105.]], self.evaluate(predictions2)) @test_util.run_deprecated_v1 def test_linear_model_v1_shared_embedding_all_other_v2(self): price = fc.numeric_column('price') # v2 some_sparse_column = fc.categorical_column_with_hash_bucket( 'sparse_feature', hash_bucket_size=5) # v2 some_embedding_column = fc.embedding_column( some_sparse_column, dimension=10) # v2 categorical_column_a = fc.categorical_column_with_identity( key='aaa', num_buckets=3) # v2 categorical_column_b = fc.categorical_column_with_identity( key='bbb', num_buckets=3) # v2 shared_embedding_a, shared_embedding_b = fc.shared_embedding_columns( [categorical_column_a, categorical_column_b], dimension=2) # v1 all_cols = [ price, some_embedding_column, shared_embedding_a, shared_embedding_b ] with ops.Graph().as_default(): features = { 'price': [[3.], [4.]], 'sparse_feature': [['a'], ['x']], 'aaa': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(0, 1, 0), dense_shape=(2, 2)), 'bbb': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(1, 2, 1), dense_shape=(2, 2)), } fc_old.linear_model(features, all_cols) bias = get_linear_model_bias() self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose([0.], self.evaluate(bias)) @test_util.run_deprecated_v1 def test_linear_model_v1_shared_embedding_with_v2_cat_all_other_v2(self): price = fc.numeric_column('price') # v2 some_sparse_column = fc.categorical_column_with_hash_bucket( 'sparse_feature', hash_bucket_size=5) # v2 some_embedding_column = fc.embedding_column( some_sparse_column, dimension=10) # v2 categorical_column_a = fc.categorical_column_with_identity( key='aaa', num_buckets=3) # v2 categorical_column_b = fc.categorical_column_with_identity( key='bbb', num_buckets=3) # v2 shared_embedding_a, shared_embedding_b = fc.shared_embedding_columns( [categorical_column_a, categorical_column_b], dimension=2) # v1 all_cols = [ price, some_embedding_column, shared_embedding_a, shared_embedding_b ] with ops.Graph().as_default(): features = { 'price': [[3.], [4.]], 'sparse_feature': [['a'], ['x']], 'aaa': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(0, 1, 0), dense_shape=(2, 2)), 'bbb': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(1, 2, 1), dense_shape=(2, 2)), } fc_old.linear_model(features, all_cols) bias = get_linear_model_bias() self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose([0.], self.evaluate(bias)) @test_util.run_deprecated_v1 def test_linear_model_v1_v2_mix(self): price = fc.numeric_column('price') # v2 some_sparse_column = fc.categorical_column_with_hash_bucket( 'sparse_feature', hash_bucket_size=5) # v1 some_embedding_column = fc.embedding_column( some_sparse_column, dimension=10) # v1 categorical_column_a = fc.categorical_column_with_identity( key='aaa', num_buckets=3) # v2 categorical_column_b = fc.categorical_column_with_identity( key='bbb', num_buckets=3) # v2 shared_embedding_a, shared_embedding_b = fc.shared_embedding_columns( [categorical_column_a, categorical_column_b], dimension=2) # v1 all_cols = [ price, some_embedding_column, shared_embedding_a, shared_embedding_b ] with ops.Graph().as_default(): features = { 'price': [[3.], [4.]], 'sparse_feature': [['a'], ['x']], 'aaa': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(0, 1, 0), dense_shape=(2, 2)), 'bbb': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(1, 2, 1), dense_shape=(2, 2)), } fc_old.linear_model(features, all_cols) bias = get_linear_model_bias() self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose([0.], self.evaluate(bias)) @test_util.run_deprecated_v1 def test_linear_model_v2_shared_embedding_all_other_v1(self): price = fc.numeric_column('price') # v1 some_sparse_column = fc.categorical_column_with_hash_bucket( 'sparse_feature', hash_bucket_size=5) # v1 some_embedding_column = fc.embedding_column( some_sparse_column, dimension=10) # v1 categorical_column_a = fc.categorical_column_with_identity( key='aaa', num_buckets=3) # v2 categorical_column_b = fc.categorical_column_with_identity( key='bbb', num_buckets=3) # v2 shared_embedding_a, shared_embedding_b = fc.shared_embedding_columns_v2( [categorical_column_a, categorical_column_b], dimension=2) # v2 all_cols = [ price, some_embedding_column, shared_embedding_a, shared_embedding_b ] with ops.Graph().as_default(): features = { 'price': [[3.], [4.]], 'sparse_feature': [['a'], ['x']], 'aaa': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(0, 1, 0), dense_shape=(2, 2)), 'bbb': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(1, 2, 1), dense_shape=(2, 2)), } with self.assertRaisesRegexp(ValueError, 'SharedEmbeddingColumns are not supported'): fc_old.linear_model(features, all_cols) class DenseFeaturesTest(test.TestCase): @test_util.run_in_graph_and_eager_modes() def test_retrieving_input(self): features = {'a': [0.]} dense_features = fc.DenseFeatures(fc.numeric_column('a')) inputs = self.evaluate(dense_features(features)) self.assertAllClose([[0.]], inputs) def test_reuses_variables(self): with context.eager_mode(): sparse_input = sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (2, 0)), values=(0, 1, 2), dense_shape=(3, 3)) # Create feature columns (categorical and embedding). categorical_column = fc.categorical_column_with_identity( key='a', num_buckets=3) embedding_dimension = 2 def _embedding_column_initializer(shape, dtype, partition_info): del shape # unused del dtype # unused del partition_info # unused embedding_values = ( (1, 0), # id 0 (0, 1), # id 1 (1, 1)) # id 2 return embedding_values embedding_column = fc.embedding_column( categorical_column, dimension=embedding_dimension, initializer=_embedding_column_initializer) dense_features = fc.DenseFeatures([embedding_column]) features = {'a': sparse_input} inputs = dense_features(features) variables = dense_features.variables # Sanity check: test that the inputs are correct. self.assertAllEqual([[1, 0], [0, 1], [1, 1]], inputs) # Check that only one variable was created. self.assertEqual(1, len(variables)) # Check that invoking dense_features on the same features does not create # additional variables _ = dense_features(features) self.assertEqual(1, len(variables)) self.assertEqual(variables[0], dense_features.variables[0]) def test_feature_column_dense_features_gradient(self): with context.eager_mode(): sparse_input = sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (2, 0)), values=(0, 1, 2), dense_shape=(3, 3)) # Create feature columns (categorical and embedding). categorical_column = fc.categorical_column_with_identity( key='a', num_buckets=3) embedding_dimension = 2 def _embedding_column_initializer(shape, dtype, partition_info): del shape # unused del dtype # unused del partition_info # unused embedding_values = ( (1, 0), # id 0 (0, 1), # id 1 (1, 1)) # id 2 return embedding_values embedding_column = fc.embedding_column( categorical_column, dimension=embedding_dimension, initializer=_embedding_column_initializer) dense_features = fc.DenseFeatures([embedding_column]) features = {'a': sparse_input} def scale_matrix(): matrix = dense_features(features) return 2 * matrix # Sanity check: Verify that scale_matrix returns the correct output. self.assertAllEqual([[2, 0], [0, 2], [2, 2]], scale_matrix()) # Check that the returned gradient is correct. grad_function = backprop.implicit_grad(scale_matrix) grads_and_vars = grad_function() indexed_slice = grads_and_vars[0][0] gradient = grads_and_vars[0][0].values self.assertAllEqual([0, 1, 2], indexed_slice.indices) self.assertAllEqual([[2, 2], [2, 2], [2, 2]], gradient) def test_raises_if_empty_feature_columns(self): with self.assertRaisesRegexp(ValueError, 'feature_columns must not be empty'): fc.DenseFeatures(feature_columns=[])(features={}) def test_should_be_dense_column(self): with self.assertRaisesRegexp(ValueError, 'must be a .*DenseColumn'): fc.DenseFeatures(feature_columns=[ fc.categorical_column_with_hash_bucket('wire_cast', 4) ])( features={ 'a': [[0]] }) def test_does_not_support_dict_columns(self): with self.assertRaisesRegexp( ValueError, 'Expected feature_columns to be iterable, found dict.'): fc.DenseFeatures(feature_columns={'a': fc.numeric_column('a')})( features={ 'a': [[0]] }) def test_bare_column(self): with ops.Graph().as_default(): features = features = {'a': [0.]} net = fc.DenseFeatures(fc.numeric_column('a'))(features) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose([[0.]], self.evaluate(net)) def test_column_generator(self): with ops.Graph().as_default(): features = features = {'a': [0.], 'b': [1.]} columns = (fc.numeric_column(key) for key in features) net = fc.DenseFeatures(columns)(features) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose([[0., 1.]], self.evaluate(net)) def test_raises_if_duplicate_name(self): with self.assertRaisesRegexp( ValueError, 'Duplicate feature column name found for columns'): fc.DenseFeatures( feature_columns=[fc.numeric_column('a'), fc.numeric_column('a')])( features={ 'a': [[0]] }) def test_one_column(self): price = fc.numeric_column('price') with ops.Graph().as_default(): features = {'price': [[1.], [5.]]} net = fc.DenseFeatures([price])(features) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose([[1.], [5.]], self.evaluate(net)) def test_multi_dimension(self): price = fc.numeric_column('price', shape=2) with ops.Graph().as_default(): features = {'price': [[1., 2.], [5., 6.]]} net = fc.DenseFeatures([price])(features) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose([[1., 2.], [5., 6.]], self.evaluate(net)) def test_compute_output_shape(self): price1 = fc.numeric_column('price1', shape=2) price2 = fc.numeric_column('price2', shape=4) with ops.Graph().as_default(): features = { 'price1': [[1., 2.], [5., 6.]], 'price2': [[3., 4., 5., 6.], [7., 8., 9., 10.]] } dense_features = fc.DenseFeatures([price1, price2]) self.assertEqual((None, 6), dense_features.compute_output_shape((None,))) net = dense_features(features) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose([[1., 2., 3., 4., 5., 6.], [5., 6., 7., 8., 9., 10.]], self.evaluate(net)) def test_raises_if_shape_mismatch(self): price = fc.numeric_column('price', shape=2) with ops.Graph().as_default(): features = {'price': [[1.], [5.]]} with self.assertRaisesRegexp( Exception, r'Cannot reshape a tensor with 2 elements to shape \[2,2\]'): fc.DenseFeatures([price])(features) def test_reshaping(self): price = fc.numeric_column('price', shape=[1, 2]) with ops.Graph().as_default(): features = {'price': [[[1., 2.]], [[5., 6.]]]} net = fc.DenseFeatures([price])(features) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose([[1., 2.], [5., 6.]], self.evaluate(net)) def test_multi_column(self): price1 = fc.numeric_column('price1', shape=2) price2 = fc.numeric_column('price2') with ops.Graph().as_default(): features = {'price1': [[1., 2.], [5., 6.]], 'price2': [[3.], [4.]]} net = fc.DenseFeatures([price1, price2])(features) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose([[1., 2., 3.], [5., 6., 4.]], self.evaluate(net)) def test_cols_to_output_tensors(self): price1 = fc.numeric_column('price1', shape=2) price2 = fc.numeric_column('price2') with ops.Graph().as_default(): cols_dict = {} features = {'price1': [[1., 2.], [5., 6.]], 'price2': [[3.], [4.]]} dense_features = fc.DenseFeatures([price1, price2]) net = dense_features(features, cols_dict) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose([[1., 2.], [5., 6.]], self.evaluate(cols_dict[price1])) self.assertAllClose([[3.], [4.]], self.evaluate(cols_dict[price2])) self.assertAllClose([[1., 2., 3.], [5., 6., 4.]], self.evaluate(net)) def test_column_order(self): price_a = fc.numeric_column('price_a') price_b = fc.numeric_column('price_b') with ops.Graph().as_default(): features = { 'price_a': [[1.]], 'price_b': [[3.]], } net1 = fc.DenseFeatures([price_a, price_b])(features) net2 = fc.DenseFeatures([price_b, price_a])(features) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose([[1., 3.]], self.evaluate(net1)) self.assertAllClose([[1., 3.]], self.evaluate(net2)) def test_fails_for_categorical_column(self): animal = fc.categorical_column_with_identity('animal', num_buckets=4) with ops.Graph().as_default(): features = { 'animal': sparse_tensor.SparseTensor( indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2]) } with self.assertRaisesRegexp(Exception, 'must be a .*DenseColumn'): fc.DenseFeatures([animal])(features) def test_static_batch_size_mismatch(self): price1 = fc.numeric_column('price1') price2 = fc.numeric_column('price2') with ops.Graph().as_default(): features = { 'price1': [[1.], [5.], [7.]], # batchsize = 3 'price2': [[3.], [4.]] # batchsize = 2 } with self.assertRaisesRegexp( ValueError, r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string fc.DenseFeatures([price1, price2])(features) def test_subset_of_static_batch_size_mismatch(self): price1 = fc.numeric_column('price1') price2 = fc.numeric_column('price2') price3 = fc.numeric_column('price3') with ops.Graph().as_default(): features = { 'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3 'price2': [[3.], [4.]], # batchsize = 2 'price3': [[3.], [4.], [5.]] # batchsize = 3 } with self.assertRaisesRegexp( ValueError, r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string fc.DenseFeatures([price1, price2, price3])(features) def test_runtime_batch_size_mismatch(self): price1 = fc.numeric_column('price1') price2 = fc.numeric_column('price2') with ops.Graph().as_default(): features = { 'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3 'price2': [[3.], [4.]] # batchsize = 2 } net = fc.DenseFeatures([price1, price2])(features) with _initialized_session() as sess: with self.assertRaisesRegexp(errors.OpError, 'Dimensions of inputs should match'): sess.run(net, feed_dict={features['price1']: [[1.], [5.], [7.]]}) def test_runtime_batch_size_matches(self): price1 = fc.numeric_column('price1') price2 = fc.numeric_column('price2') with ops.Graph().as_default(): features = { 'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2 'price2': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2 } net = fc.DenseFeatures([price1, price2])(features) with _initialized_session() as sess: sess.run( net, feed_dict={ features['price1']: [[1.], [5.]], features['price2']: [[1.], [5.]], }) def test_multiple_layers_with_same_embedding_column(self): some_sparse_column = fc.categorical_column_with_hash_bucket( 'sparse_feature', hash_bucket_size=5) some_embedding_column = fc.embedding_column( some_sparse_column, dimension=10) with ops.Graph().as_default(): features = { 'sparse_feature': [['a'], ['x']], } all_cols = [some_embedding_column] fc.DenseFeatures(all_cols)(features) fc.DenseFeatures(all_cols)(features) # Make sure that 2 variables get created in this case. self.assertEqual(2, len( ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))) expected_var_names = [ 'dense_features/sparse_feature_embedding/embedding_weights:0', 'dense_features_1/sparse_feature_embedding/embedding_weights:0' ] self.assertItemsEqual( expected_var_names, [v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)]) @test_util.run_deprecated_v1 def test_multiple_layers_with_same_shared_embedding_column(self): categorical_column_a = fc.categorical_column_with_identity( key='aaa', num_buckets=3) categorical_column_b = fc.categorical_column_with_identity( key='bbb', num_buckets=3) embedding_dimension = 2 embedding_column_b, embedding_column_a = fc.shared_embedding_columns_v2( [categorical_column_b, categorical_column_a], dimension=embedding_dimension) with ops.Graph().as_default(): features = { 'aaa': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(0, 1, 0), dense_shape=(2, 2)), 'bbb': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(1, 2, 1), dense_shape=(2, 2)), } all_cols = [embedding_column_a, embedding_column_b] fc.DenseFeatures(all_cols)(features) fc.DenseFeatures(all_cols)(features) # Make sure that only 1 variable gets created in this case. self.assertEqual(1, len( ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))) self.assertItemsEqual( ['aaa_bbb_shared_embedding:0'], [v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)]) @test_util.run_deprecated_v1 def test_multiple_layers_with_same_shared_embedding_column_diff_graphs(self): categorical_column_a = fc.categorical_column_with_identity( key='aaa', num_buckets=3) categorical_column_b = fc.categorical_column_with_identity( key='bbb', num_buckets=3) embedding_dimension = 2 embedding_column_b, embedding_column_a = fc.shared_embedding_columns_v2( [categorical_column_b, categorical_column_a], dimension=embedding_dimension) all_cols = [embedding_column_a, embedding_column_b] with ops.Graph().as_default(): features = { 'aaa': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(0, 1, 0), dense_shape=(2, 2)), 'bbb': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(1, 2, 1), dense_shape=(2, 2)), } fc.DenseFeatures(all_cols)(features) # Make sure that only 1 variable gets created in this case. self.assertEqual(1, len( ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))) with ops.Graph().as_default(): features1 = { 'aaa': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(0, 1, 0), dense_shape=(2, 2)), 'bbb': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(1, 2, 1), dense_shape=(2, 2)), } fc.DenseFeatures(all_cols)(features1) # Make sure that only 1 variable gets created in this case. self.assertEqual(1, len( ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))) self.assertItemsEqual( ['aaa_bbb_shared_embedding:0'], [v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)]) @test_util.run_deprecated_v1 def test_with_numpy_input_fn(self): embedding_values = ( (1., 2., 3., 4., 5.), # id 0 (6., 7., 8., 9., 10.), # id 1 (11., 12., 13., 14., 15.) # id 2 ) def _initializer(shape, dtype, partition_info): del shape, dtype, partition_info return embedding_values # price has 1 dimension in dense_features price = fc.numeric_column('price') body_style = fc.categorical_column_with_vocabulary_list( 'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan']) # one_hot_body_style has 3 dims in dense_features. one_hot_body_style = fc.indicator_column(body_style) # embedded_body_style has 5 dims in dense_features. embedded_body_style = fc.embedding_column( body_style, dimension=5, initializer=_initializer) input_fn = numpy_io.numpy_input_fn( x={ 'price': np.array([11., 12., 13., 14.]), 'body-style': np.array(['sedan', 'hardtop', 'wagon', 'sedan']), }, batch_size=2, shuffle=False) features = input_fn() net = fc.DenseFeatures([price, one_hot_body_style, embedded_body_style])( features) self.assertEqual(1 + 3 + 5, net.shape[1]) with _initialized_session() as sess: coord = coordinator.Coordinator() threads = queue_runner_impl.start_queue_runners(sess, coord=coord) # Each row is formed by concatenating `embedded_body_style`, # `one_hot_body_style`, and `price` in order. self.assertAllEqual([[11., 12., 13., 14., 15., 0., 0., 1., 11.], [1., 2., 3., 4., 5., 1., 0., 0., 12]], sess.run(net)) coord.request_stop() coord.join(threads) @test_util.run_deprecated_v1 def test_with_1d_sparse_tensor(self): embedding_values = ( (1., 2., 3., 4., 5.), # id 0 (6., 7., 8., 9., 10.), # id 1 (11., 12., 13., 14., 15.) # id 2 ) def _initializer(shape, dtype, partition_info): del shape, dtype, partition_info return embedding_values # price has 1 dimension in dense_features price = fc.numeric_column('price') # one_hot_body_style has 3 dims in dense_features. body_style = fc.categorical_column_with_vocabulary_list( 'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan']) one_hot_body_style = fc.indicator_column(body_style) # embedded_body_style has 5 dims in dense_features. country = fc.categorical_column_with_vocabulary_list( 'country', vocabulary_list=['US', 'JP', 'CA']) embedded_country = fc.embedding_column( country, dimension=5, initializer=_initializer) # Provides 1-dim tensor and dense tensor. features = { 'price': constant_op.constant([ 11., 12., ]), 'body-style': sparse_tensor.SparseTensor( indices=((0,), (1,)), values=('sedan', 'hardtop'), dense_shape=(2,)), # This is dense tensor for the categorical_column. 'country': constant_op.constant(['CA', 'US']), } self.assertEqual(1, features['price'].shape.ndims) self.assertEqual(1, features['body-style'].dense_shape.get_shape()[0]) self.assertEqual(1, features['country'].shape.ndims) net = fc.DenseFeatures([price, one_hot_body_style, embedded_country])( features) self.assertEqual(1 + 3 + 5, net.shape[1]) with _initialized_session() as sess: # Each row is formed by concatenating `embedded_body_style`, # `one_hot_body_style`, and `price` in order. self.assertAllEqual([[0., 0., 1., 11., 12., 13., 14., 15., 11.], [1., 0., 0., 1., 2., 3., 4., 5., 12.]], sess.run(net)) @test_util.run_deprecated_v1 def test_with_1d_unknown_shape_sparse_tensor(self): embedding_values = ( (1., 2.), # id 0 (6., 7.), # id 1 (11., 12.) # id 2 ) def _initializer(shape, dtype, partition_info): del shape, dtype, partition_info return embedding_values # price has 1 dimension in dense_features price = fc.numeric_column('price') # one_hot_body_style has 3 dims in dense_features. body_style = fc.categorical_column_with_vocabulary_list( 'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan']) one_hot_body_style = fc.indicator_column(body_style) # embedded_body_style has 5 dims in dense_features. country = fc.categorical_column_with_vocabulary_list( 'country', vocabulary_list=['US', 'JP', 'CA']) embedded_country = fc.embedding_column( country, dimension=2, initializer=_initializer) # Provides 1-dim tensor and dense tensor. features = { 'price': array_ops.placeholder(dtypes.float32), 'body-style': array_ops.sparse_placeholder(dtypes.string), # This is dense tensor for the categorical_column. 'country': array_ops.placeholder(dtypes.string), } self.assertIsNone(features['price'].shape.ndims) self.assertIsNone(features['body-style'].get_shape().ndims) self.assertIsNone(features['country'].shape.ndims) price_data = np.array([11., 12.]) body_style_data = sparse_tensor.SparseTensorValue( indices=((0,), (1,)), values=('sedan', 'hardtop'), dense_shape=(2,)) country_data = np.array([['US'], ['CA']]) net = fc.DenseFeatures([price, one_hot_body_style, embedded_country])( features) self.assertEqual(1 + 3 + 2, net.shape[1]) with _initialized_session() as sess: # Each row is formed by concatenating `embedded_body_style`, # `one_hot_body_style`, and `price` in order. self.assertAllEqual( [[0., 0., 1., 1., 2., 11.], [1., 0., 0., 11., 12., 12.]], sess.run( net, feed_dict={ features['price']: price_data, features['body-style']: body_style_data, features['country']: country_data })) @test_util.run_deprecated_v1 def test_with_rank_0_feature(self): # price has 1 dimension in dense_features price = fc.numeric_column('price') features = { 'price': constant_op.constant(0), } self.assertEqual(0, features['price'].shape.ndims) # Static rank 0 should fail with self.assertRaisesRegexp(ValueError, 'Feature .* cannot have rank 0'): fc.DenseFeatures([price])(features) # Dynamic rank 0 should fail features = { 'price': array_ops.placeholder(dtypes.float32), } net = fc.DenseFeatures([price])(features) self.assertEqual(1, net.shape[1]) with _initialized_session() as sess: with self.assertRaisesOpError('Feature .* cannot have rank 0'): sess.run(net, feed_dict={features['price']: np.array(1)}) class InputLayerTest(test.TestCase): @test_util.run_in_graph_and_eager_modes def test_retrieving_input(self): features = {'a': [0.]} input_layer = fc_old.InputLayer(fc.numeric_column('a')) inputs = self.evaluate(input_layer(features)) self.assertAllClose([[0.]], inputs) def test_reuses_variables(self): with context.eager_mode(): sparse_input = sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (2, 0)), values=(0, 1, 2), dense_shape=(3, 3)) # Create feature columns (categorical and embedding). categorical_column = fc.categorical_column_with_identity( key='a', num_buckets=3) embedding_dimension = 2 def _embedding_column_initializer(shape, dtype, partition_info): del shape # unused del dtype # unused del partition_info # unused embedding_values = ( (1, 0), # id 0 (0, 1), # id 1 (1, 1)) # id 2 return embedding_values embedding_column = fc.embedding_column( categorical_column, dimension=embedding_dimension, initializer=_embedding_column_initializer) input_layer = fc_old.InputLayer([embedding_column]) features = {'a': sparse_input} inputs = input_layer(features) variables = input_layer.variables # Sanity check: test that the inputs are correct. self.assertAllEqual([[1, 0], [0, 1], [1, 1]], inputs) # Check that only one variable was created. self.assertEqual(1, len(variables)) # Check that invoking input_layer on the same features does not create # additional variables _ = input_layer(features) self.assertEqual(1, len(variables)) self.assertEqual(variables[0], input_layer.variables[0]) def test_feature_column_input_layer_gradient(self): with context.eager_mode(): sparse_input = sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (2, 0)), values=(0, 1, 2), dense_shape=(3, 3)) # Create feature columns (categorical and embedding). categorical_column = fc.categorical_column_with_identity( key='a', num_buckets=3) embedding_dimension = 2 def _embedding_column_initializer(shape, dtype, partition_info): del shape # unused del dtype # unused del partition_info # unused embedding_values = ( (1, 0), # id 0 (0, 1), # id 1 (1, 1)) # id 2 return embedding_values embedding_column = fc.embedding_column( categorical_column, dimension=embedding_dimension, initializer=_embedding_column_initializer) input_layer = fc_old.InputLayer([embedding_column]) features = {'a': sparse_input} def scale_matrix(): matrix = input_layer(features) return 2 * matrix # Sanity check: Verify that scale_matrix returns the correct output. self.assertAllEqual([[2, 0], [0, 2], [2, 2]], scale_matrix()) # Check that the returned gradient is correct. grad_function = backprop.implicit_grad(scale_matrix) grads_and_vars = grad_function() indexed_slice = grads_and_vars[0][0] gradient = grads_and_vars[0][0].values self.assertAllEqual([0, 1, 2], indexed_slice.indices) self.assertAllEqual([[2, 2], [2, 2], [2, 2]], gradient) class FunctionalInputLayerTest(test.TestCase): def test_raises_if_empty_feature_columns(self): with self.assertRaisesRegexp(ValueError, 'feature_columns must not be empty'): fc_old.input_layer(features={}, feature_columns=[]) def test_should_be_dense_column(self): with self.assertRaisesRegexp(ValueError, 'must be a _DenseColumn'): fc_old.input_layer( features={'a': [[0]]}, feature_columns=[ fc.categorical_column_with_hash_bucket('wire_cast', 4) ]) def test_does_not_support_dict_columns(self): with self.assertRaisesRegexp( ValueError, 'Expected feature_columns to be iterable, found dict.'): fc_old.input_layer( features={'a': [[0]]}, feature_columns={'a': fc.numeric_column('a')}) def test_bare_column(self): with ops.Graph().as_default(): features = features = {'a': [0.]} net = fc_old.input_layer(features, fc.numeric_column('a')) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose([[0.]], self.evaluate(net)) def test_column_generator(self): with ops.Graph().as_default(): features = features = {'a': [0.], 'b': [1.]} columns = (fc.numeric_column(key) for key in features) net = fc_old.input_layer(features, columns) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose([[0., 1.]], self.evaluate(net)) def test_raises_if_duplicate_name(self): with self.assertRaisesRegexp( ValueError, 'Duplicate feature column name found for columns'): fc_old.input_layer( features={'a': [[0]]}, feature_columns=[fc.numeric_column('a'), fc.numeric_column('a')]) def test_one_column(self): price = fc.numeric_column('price') with ops.Graph().as_default(): features = {'price': [[1.], [5.]]} net = fc_old.input_layer(features, [price]) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose([[1.], [5.]], self.evaluate(net)) def test_multi_dimension(self): price = fc.numeric_column('price', shape=2) with ops.Graph().as_default(): features = {'price': [[1., 2.], [5., 6.]]} net = fc_old.input_layer(features, [price]) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose([[1., 2.], [5., 6.]], self.evaluate(net)) def test_raises_if_shape_mismatch(self): price = fc.numeric_column('price', shape=2) with ops.Graph().as_default(): features = {'price': [[1.], [5.]]} with self.assertRaisesRegexp( Exception, r'Cannot reshape a tensor with 2 elements to shape \[2,2\]'): fc_old.input_layer(features, [price]) def test_reshaping(self): price = fc.numeric_column('price', shape=[1, 2]) with ops.Graph().as_default(): features = {'price': [[[1., 2.]], [[5., 6.]]]} net = fc_old.input_layer(features, [price]) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose([[1., 2.], [5., 6.]], self.evaluate(net)) def test_multi_column(self): price1 = fc.numeric_column('price1', shape=2) price2 = fc.numeric_column('price2') with ops.Graph().as_default(): features = {'price1': [[1., 2.], [5., 6.]], 'price2': [[3.], [4.]]} net = fc_old.input_layer(features, [price1, price2]) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose([[1., 2., 3.], [5., 6., 4.]], self.evaluate(net)) def test_fills_cols_to_vars(self): # Provide three _DenseColumn's to input_layer: a _NumericColumn, a # _BucketizedColumn, and an _EmbeddingColumn. Only the _EmbeddingColumn # creates a Variable. price1 = fc.numeric_column('price1') dense_feature = fc.numeric_column('dense_feature') dense_feature_bucketized = fc.bucketized_column( dense_feature, boundaries=[0.]) some_sparse_column = fc.categorical_column_with_hash_bucket( 'sparse_feature', hash_bucket_size=5) some_embedding_column = fc.embedding_column( some_sparse_column, dimension=10) with ops.Graph().as_default(): features = { 'price1': [[3.], [4.]], 'dense_feature': [[-1.], [4.]], 'sparse_feature': [['a'], ['x']], } cols_to_vars = {} all_cols = [price1, dense_feature_bucketized, some_embedding_column] fc_old.input_layer(features, all_cols, cols_to_vars=cols_to_vars) self.assertItemsEqual(list(cols_to_vars.keys()), all_cols) self.assertEqual(0, len(cols_to_vars[price1])) self.assertEqual(0, len(cols_to_vars[dense_feature_bucketized])) self.assertEqual(1, len(cols_to_vars[some_embedding_column])) self.assertIsInstance(cols_to_vars[some_embedding_column][0], variables_lib.VariableV1) self.assertAllEqual(cols_to_vars[some_embedding_column][0].shape, [5, 10]) @test_util.run_deprecated_v1 def test_fills_cols_to_vars_shared_embedding(self): # Provide 5 DenseColumn's to input_layer: a NumericColumn, a # BucketizedColumn, an EmbeddingColumn, two SharedEmbeddingColumns. The # EmbeddingColumn creates a Variable and the two SharedEmbeddingColumns # shared one variable. price1 = fc.numeric_column('price1') dense_feature = fc.numeric_column('dense_feature') dense_feature_bucketized = fc.bucketized_column( dense_feature, boundaries=[0.]) some_sparse_column = fc.categorical_column_with_hash_bucket( 'sparse_feature', hash_bucket_size=5) some_embedding_column = fc.embedding_column( some_sparse_column, dimension=10) categorical_column_a = fc.categorical_column_with_identity( key='aaa', num_buckets=3) categorical_column_b = fc.categorical_column_with_identity( key='bbb', num_buckets=3) shared_embedding_a, shared_embedding_b = fc.shared_embedding_columns( [categorical_column_a, categorical_column_b], dimension=2) with ops.Graph().as_default(): features = { 'price1': [[3.], [4.]], 'dense_feature': [[-1.], [4.]], 'sparse_feature': [['a'], ['x']], 'aaa': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(0, 1, 0), dense_shape=(2, 2)), 'bbb': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(1, 2, 1), dense_shape=(2, 2)), } cols_to_vars = {} all_cols = [ price1, dense_feature_bucketized, some_embedding_column, shared_embedding_a, shared_embedding_b ] fc_old.input_layer(features, all_cols, cols_to_vars=cols_to_vars) self.assertItemsEqual(list(cols_to_vars.keys()), all_cols) self.assertEqual(0, len(cols_to_vars[price1])) self.assertEqual(0, len(cols_to_vars[dense_feature_bucketized])) self.assertEqual(1, len(cols_to_vars[some_embedding_column])) self.assertEqual(1, len(cols_to_vars[shared_embedding_a])) # This is a bug in the current implementation and should be fixed in the # new one. self.assertEqual(0, len(cols_to_vars[shared_embedding_b])) self.assertIsInstance(cols_to_vars[some_embedding_column][0], variables_lib.Variable) self.assertAllEqual(cols_to_vars[some_embedding_column][0].shape, [5, 10]) self.assertIsInstance(cols_to_vars[shared_embedding_a][0], variables_lib.Variable) self.assertAllEqual(cols_to_vars[shared_embedding_a][0].shape, [3, 2]) def test_fills_cols_to_vars_partitioned_variables(self): price1 = fc.numeric_column('price1') dense_feature = fc.numeric_column('dense_feature') dense_feature_bucketized = fc.bucketized_column( dense_feature, boundaries=[0.]) some_sparse_column = fc.categorical_column_with_hash_bucket( 'sparse_feature', hash_bucket_size=5) some_embedding_column = fc.embedding_column( some_sparse_column, dimension=10) with ops.Graph().as_default(): features = { 'price1': [[3.], [4.]], 'dense_feature': [[-1.], [4.]], 'sparse_feature': [['a'], ['x']], } cols_to_vars = {} all_cols = [price1, dense_feature_bucketized, some_embedding_column] with variable_scope.variable_scope( 'input_from_feature_columns', partitioner=partitioned_variables.fixed_size_partitioner(3, axis=0)): fc_old.input_layer(features, all_cols, cols_to_vars=cols_to_vars) self.assertItemsEqual(list(cols_to_vars.keys()), all_cols) self.assertEqual(0, len(cols_to_vars[price1])) self.assertEqual(0, len(cols_to_vars[dense_feature_bucketized])) self.assertEqual(3, len(cols_to_vars[some_embedding_column])) self.assertEqual( 'input_from_feature_columns/input_layer/sparse_feature_embedding/' 'embedding_weights/part_0:0', cols_to_vars[some_embedding_column][0].name) self.assertAllEqual(cols_to_vars[some_embedding_column][0].shape, [2, 10]) self.assertAllEqual(cols_to_vars[some_embedding_column][1].shape, [2, 10]) self.assertAllEqual(cols_to_vars[some_embedding_column][2].shape, [1, 10]) def test_column_order(self): price_a = fc.numeric_column('price_a') price_b = fc.numeric_column('price_b') with ops.Graph().as_default(): features = { 'price_a': [[1.]], 'price_b': [[3.]], } net1 = fc_old.input_layer(features, [price_a, price_b]) net2 = fc_old.input_layer(features, [price_b, price_a]) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose([[1., 3.]], self.evaluate(net1)) self.assertAllClose([[1., 3.]], self.evaluate(net2)) def test_fails_for_categorical_column(self): animal = fc.categorical_column_with_identity('animal', num_buckets=4) with ops.Graph().as_default(): features = { 'animal': sparse_tensor.SparseTensor( indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2]) } with self.assertRaisesRegexp(Exception, 'must be a _DenseColumn'): fc_old.input_layer(features, [animal]) def test_static_batch_size_mismatch(self): price1 = fc.numeric_column('price1') price2 = fc.numeric_column('price2') with ops.Graph().as_default(): features = { 'price1': [[1.], [5.], [7.]], # batchsize = 3 'price2': [[3.], [4.]] # batchsize = 2 } with self.assertRaisesRegexp( ValueError, r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string fc_old.input_layer(features, [price1, price2]) def test_subset_of_static_batch_size_mismatch(self): price1 = fc.numeric_column('price1') price2 = fc.numeric_column('price2') price3 = fc.numeric_column('price3') with ops.Graph().as_default(): features = { 'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3 'price2': [[3.], [4.]], # batchsize = 2 'price3': [[3.], [4.], [5.]] # batchsize = 3 } with self.assertRaisesRegexp( ValueError, r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string fc_old.input_layer(features, [price1, price2, price3]) def test_runtime_batch_size_mismatch(self): price1 = fc.numeric_column('price1') price2 = fc.numeric_column('price2') with ops.Graph().as_default(): features = { 'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3 'price2': [[3.], [4.]] # batchsize = 2 } net = fc_old.input_layer(features, [price1, price2]) with _initialized_session() as sess: with self.assertRaisesRegexp(errors.OpError, 'Dimensions of inputs should match'): sess.run(net, feed_dict={features['price1']: [[1.], [5.], [7.]]}) def test_runtime_batch_size_matches(self): price1 = fc.numeric_column('price1') price2 = fc.numeric_column('price2') with ops.Graph().as_default(): features = { 'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2 'price2': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2 } net = fc_old.input_layer(features, [price1, price2]) with _initialized_session() as sess: sess.run( net, feed_dict={ features['price1']: [[1.], [5.]], features['price2']: [[1.], [5.]], }) def test_multiple_layers_with_same_embedding_column(self): some_sparse_column = fc.categorical_column_with_hash_bucket( 'sparse_feature', hash_bucket_size=5) some_embedding_column = fc.embedding_column( some_sparse_column, dimension=10) with ops.Graph().as_default(): features = { 'sparse_feature': [['a'], ['x']], } all_cols = [some_embedding_column] fc_old.input_layer(features, all_cols) fc_old.input_layer(features, all_cols) # Make sure that 2 variables get created in this case. self.assertEqual(2, len( ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))) expected_var_names = [ 'input_layer/sparse_feature_embedding/embedding_weights:0', 'input_layer_1/sparse_feature_embedding/embedding_weights:0' ] self.assertItemsEqual( expected_var_names, [v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)]) @test_util.run_deprecated_v1 def test_with_1d_sparse_tensor(self): embedding_values = ( (1., 2., 3., 4., 5.), # id 0 (6., 7., 8., 9., 10.), # id 1 (11., 12., 13., 14., 15.) # id 2 ) def _initializer(shape, dtype, partition_info): del shape, dtype, partition_info return embedding_values # price has 1 dimension in input_layer price = fc.numeric_column('price') # one_hot_body_style has 3 dims in input_layer. body_style = fc.categorical_column_with_vocabulary_list( 'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan']) one_hot_body_style = fc.indicator_column(body_style) # embedded_body_style has 5 dims in input_layer. country = fc.categorical_column_with_vocabulary_list( 'country', vocabulary_list=['US', 'JP', 'CA']) embedded_country = fc.embedding_column( country, dimension=5, initializer=_initializer) # Provides 1-dim tensor and dense tensor. features = { 'price': constant_op.constant([ 11., 12., ]), 'body-style': sparse_tensor.SparseTensor( indices=((0,), (1,)), values=('sedan', 'hardtop'), dense_shape=(2,)), # This is dense tensor for the categorical_column. 'country': constant_op.constant(['CA', 'US']), } self.assertEqual(1, features['price'].shape.ndims) self.assertEqual(1, features['body-style'].dense_shape.get_shape()[0]) self.assertEqual(1, features['country'].shape.ndims) net = fc_old.input_layer(features, [price, one_hot_body_style, embedded_country]) self.assertEqual(1 + 3 + 5, net.shape[1]) with _initialized_session() as sess: # Each row is formed by concatenating `embedded_body_style`, # `one_hot_body_style`, and `price` in order. self.assertAllEqual([[0., 0., 1., 11., 12., 13., 14., 15., 11.], [1., 0., 0., 1., 2., 3., 4., 5., 12.]], sess.run(net)) @test_util.run_deprecated_v1 def test_with_1d_unknown_shape_sparse_tensor(self): embedding_values = ( (1., 2.), # id 0 (6., 7.), # id 1 (11., 12.) # id 2 ) def _initializer(shape, dtype, partition_info): del shape, dtype, partition_info return embedding_values # price has 1 dimension in input_layer price = fc.numeric_column('price') # one_hot_body_style has 3 dims in input_layer. body_style = fc.categorical_column_with_vocabulary_list( 'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan']) one_hot_body_style = fc.indicator_column(body_style) # embedded_body_style has 5 dims in input_layer. country = fc.categorical_column_with_vocabulary_list( 'country', vocabulary_list=['US', 'JP', 'CA']) embedded_country = fc.embedding_column( country, dimension=2, initializer=_initializer) # Provides 1-dim tensor and dense tensor. features = { 'price': array_ops.placeholder(dtypes.float32), 'body-style': array_ops.sparse_placeholder(dtypes.string), # This is dense tensor for the categorical_column. 'country': array_ops.placeholder(dtypes.string), } self.assertIsNone(features['price'].shape.ndims) self.assertIsNone(features['body-style'].get_shape().ndims) self.assertIsNone(features['country'].shape.ndims) price_data = np.array([11., 12.]) body_style_data = sparse_tensor.SparseTensorValue( indices=((0,), (1,)), values=('sedan', 'hardtop'), dense_shape=(2,)) country_data = np.array([['US'], ['CA']]) net = fc_old.input_layer(features, [price, one_hot_body_style, embedded_country]) self.assertEqual(1 + 3 + 2, net.shape[1]) with _initialized_session() as sess: # Each row is formed by concatenating `embedded_body_style`, # `one_hot_body_style`, and `price` in order. self.assertAllEqual( [[0., 0., 1., 1., 2., 11.], [1., 0., 0., 11., 12., 12.]], sess.run( net, feed_dict={ features['price']: price_data, features['body-style']: body_style_data, features['country']: country_data })) @test_util.run_deprecated_v1 def test_with_rank_0_feature(self): # price has 1 dimension in input_layer price = fc.numeric_column('price') features = { 'price': constant_op.constant(0), } self.assertEqual(0, features['price'].shape.ndims) # Static rank 0 should fail with self.assertRaisesRegexp(ValueError, 'Feature .* cannot have rank 0'): fc_old.input_layer(features, [price]) # Dynamic rank 0 should fail features = { 'price': array_ops.placeholder(dtypes.float32), } net = fc_old.input_layer(features, [price]) self.assertEqual(1, net.shape[1]) with _initialized_session() as sess: with self.assertRaisesOpError('Feature .* cannot have rank 0'): sess.run(net, feed_dict={features['price']: np.array(1)}) class MakeParseExampleSpecTest(test.TestCase): class _TestFeatureColumn(BaseFeatureColumnForTests, collections.namedtuple('_TestFeatureColumn', ('parse_spec'))): @property def _is_v2_column(self): return True @property def name(self): return '_TestFeatureColumn' def transform_feature(self, transformation_cache, state_manager): pass def _transform_feature(self, inputs): pass @property def parse_example_spec(self): return self.parse_spec @property def _parse_example_spec(self): return self.parse_spec def test_no_feature_columns(self): actual = fc.make_parse_example_spec_v2([]) self.assertDictEqual({}, actual) def test_invalid_type(self): key1 = 'key1' parse_spec1 = parsing_ops.FixedLenFeature( shape=(2,), dtype=dtypes.float32, default_value=0.) with self.assertRaisesRegexp( ValueError, 'All feature_columns must be FeatureColumn instances.*invalid_column'): fc.make_parse_example_spec_v2((self._TestFeatureColumn({ key1: parse_spec1 }), 'invalid_column')) def test_one_feature_column(self): key1 = 'key1' parse_spec1 = parsing_ops.FixedLenFeature( shape=(2,), dtype=dtypes.float32, default_value=0.) actual = fc.make_parse_example_spec_v2((self._TestFeatureColumn({ key1: parse_spec1 }),)) self.assertDictEqual({key1: parse_spec1}, actual) def test_two_feature_columns(self): key1 = 'key1' parse_spec1 = parsing_ops.FixedLenFeature( shape=(2,), dtype=dtypes.float32, default_value=0.) key2 = 'key2' parse_spec2 = parsing_ops.VarLenFeature(dtype=dtypes.string) actual = fc.make_parse_example_spec_v2((self._TestFeatureColumn({ key1: parse_spec1 }), self._TestFeatureColumn({ key2: parse_spec2 }))) self.assertDictEqual({key1: parse_spec1, key2: parse_spec2}, actual) def test_equal_keys_different_parse_spec(self): key1 = 'key1' parse_spec1 = parsing_ops.FixedLenFeature( shape=(2,), dtype=dtypes.float32, default_value=0.) parse_spec2 = parsing_ops.VarLenFeature(dtype=dtypes.string) with self.assertRaisesRegexp( ValueError, 'feature_columns contain different parse_spec for key key1'): fc.make_parse_example_spec_v2((self._TestFeatureColumn({ key1: parse_spec1 }), self._TestFeatureColumn({ key1: parse_spec2 }))) def test_equal_keys_equal_parse_spec(self): key1 = 'key1' parse_spec1 = parsing_ops.FixedLenFeature( shape=(2,), dtype=dtypes.float32, default_value=0.) actual = fc.make_parse_example_spec_v2((self._TestFeatureColumn({ key1: parse_spec1 }), self._TestFeatureColumn({ key1: parse_spec1 }))) self.assertDictEqual({key1: parse_spec1}, actual) def test_multiple_features_dict(self): """parse_spc for one column is a dict with length > 1.""" key1 = 'key1' parse_spec1 = parsing_ops.FixedLenFeature( shape=(2,), dtype=dtypes.float32, default_value=0.) key2 = 'key2' parse_spec2 = parsing_ops.VarLenFeature(dtype=dtypes.string) key3 = 'key3' parse_spec3 = parsing_ops.VarLenFeature(dtype=dtypes.int32) actual = fc.make_parse_example_spec_v2((self._TestFeatureColumn({ key1: parse_spec1 }), self._TestFeatureColumn({ key2: parse_spec2, key3: parse_spec3 }))) self.assertDictEqual({ key1: parse_spec1, key2: parse_spec2, key3: parse_spec3 }, actual) def _assert_sparse_tensor_value(test_case, expected, actual): test_case.assertEqual(np.int64, np.array(actual.indices).dtype) test_case.assertAllEqual(expected.indices, actual.indices) test_case.assertEqual( np.array(expected.values).dtype, np.array(actual.values).dtype) test_case.assertAllEqual(expected.values, actual.values) test_case.assertEqual(np.int64, np.array(actual.dense_shape).dtype) test_case.assertAllEqual(expected.dense_shape, actual.dense_shape) class VocabularyFileCategoricalColumnTest(test.TestCase): def setUp(self): super(VocabularyFileCategoricalColumnTest, self).setUp() # Contains ints, Golden State Warriors jersey numbers: 30, 35, 11, 23, 22 self._warriors_vocabulary_file_name = test.test_src_dir_path( 'python/feature_column/testdata/warriors_vocabulary.txt') self._warriors_vocabulary_size = 5 # Contains strings, character names from 'The Wire': omar, stringer, marlo self._wire_vocabulary_file_name = test.test_src_dir_path( 'python/feature_column/testdata/wire_vocabulary.txt') self._wire_vocabulary_size = 3 @test_util.run_deprecated_v1 def test_defaults(self): column = fc.categorical_column_with_vocabulary_file( key='aaa', vocabulary_file='path_to_file', vocabulary_size=3) self.assertEqual('aaa', column.name) self.assertEqual('aaa', column.key) self.assertEqual(3, column.num_buckets) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.string) }, column.parse_example_spec) self.assertTrue(column._is_v2_column) def test_key_should_be_string(self): with self.assertRaisesRegexp(ValueError, 'key must be a string.'): fc.categorical_column_with_vocabulary_file( key=('aaa',), vocabulary_file='path_to_file', vocabulary_size=3) @test_util.run_deprecated_v1 def test_all_constructor_args(self): column = fc.categorical_column_with_vocabulary_file( key='aaa', vocabulary_file='path_to_file', vocabulary_size=3, num_oov_buckets=4, dtype=dtypes.int32) self.assertEqual(7, column.num_buckets) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int32) }, column.parse_example_spec) @test_util.run_deprecated_v1 def test_deep_copy(self): original = fc.categorical_column_with_vocabulary_file( key='aaa', vocabulary_file='path_to_file', vocabulary_size=3, num_oov_buckets=4, dtype=dtypes.int32) for column in (original, copy.deepcopy(original)): self.assertEqual('aaa', column.name) self.assertEqual(7, column.num_buckets) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int32) }, column.parse_example_spec) def test_vocabulary_file_none(self): with self.assertRaisesRegexp(ValueError, 'Missing vocabulary_file'): fc.categorical_column_with_vocabulary_file( key='aaa', vocabulary_file=None, vocabulary_size=3) def test_vocabulary_file_empty_string(self): with self.assertRaisesRegexp(ValueError, 'Missing vocabulary_file'): fc.categorical_column_with_vocabulary_file( key='aaa', vocabulary_file='', vocabulary_size=3) @test_util.run_deprecated_v1 def test_invalid_vocabulary_file(self): column = fc.categorical_column_with_vocabulary_file( key='aaa', vocabulary_file='file_does_not_exist', vocabulary_size=10) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('marlo', 'skywalker', 'omar'), dense_shape=(2, 2)) column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': inputs }), None) with self.assertRaisesRegexp(errors.OpError, 'file_does_not_exist'): self.evaluate(lookup_ops.tables_initializer()) def test_invalid_vocabulary_size(self): with self.assertRaisesRegexp(ValueError, 'Invalid vocabulary_size'): fc.categorical_column_with_vocabulary_file( key='aaa', vocabulary_file=self._wire_vocabulary_file_name, vocabulary_size=-1) with self.assertRaisesRegexp(ValueError, 'Invalid vocabulary_size'): fc.categorical_column_with_vocabulary_file( key='aaa', vocabulary_file=self._wire_vocabulary_file_name, vocabulary_size=0) @test_util.run_deprecated_v1 def test_too_large_vocabulary_size(self): column = fc.categorical_column_with_vocabulary_file( key='aaa', vocabulary_file=self._wire_vocabulary_file_name, vocabulary_size=self._wire_vocabulary_size + 1) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('marlo', 'skywalker', 'omar'), dense_shape=(2, 2)) column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': inputs }), None) with self.assertRaisesRegexp(errors.OpError, 'Invalid vocab_size'): self.evaluate(lookup_ops.tables_initializer()) def test_invalid_num_oov_buckets(self): with self.assertRaisesRegexp(ValueError, 'Invalid num_oov_buckets'): fc.categorical_column_with_vocabulary_file( key='aaa', vocabulary_file='path', vocabulary_size=3, num_oov_buckets=-1) def test_invalid_dtype(self): with self.assertRaisesRegexp(ValueError, 'dtype must be string or integer'): fc.categorical_column_with_vocabulary_file( key='aaa', vocabulary_file='path', vocabulary_size=3, dtype=dtypes.float64) def test_invalid_buckets_and_default_value(self): with self.assertRaisesRegexp(ValueError, 'both num_oov_buckets and default_value'): fc.categorical_column_with_vocabulary_file( key='aaa', vocabulary_file=self._wire_vocabulary_file_name, vocabulary_size=self._wire_vocabulary_size, num_oov_buckets=100, default_value=2) def test_invalid_input_dtype_int32(self): column = fc.categorical_column_with_vocabulary_file( key='aaa', vocabulary_file=self._wire_vocabulary_file_name, vocabulary_size=self._wire_vocabulary_size, dtype=dtypes.string) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(12, 24, 36), dense_shape=(2, 2)) with self.assertRaisesRegexp(ValueError, 'dtype must be compatible'): column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': inputs }), None) def test_invalid_input_dtype_string(self): column = fc.categorical_column_with_vocabulary_file( key='aaa', vocabulary_file=self._warriors_vocabulary_file_name, vocabulary_size=self._warriors_vocabulary_size, dtype=dtypes.int32) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('omar', 'stringer', 'marlo'), dense_shape=(2, 2)) with self.assertRaisesRegexp(ValueError, 'dtype must be compatible'): column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': inputs }), None) @test_util.run_deprecated_v1 def test_parse_example(self): a = fc.categorical_column_with_vocabulary_file( key='aaa', vocabulary_file='path_to_file', vocabulary_size=3) data = example_pb2.Example( features=feature_pb2.Features( feature={ 'aaa': feature_pb2.Feature( bytes_list=feature_pb2.BytesList( value=[b'omar', b'stringer'])) })) features = parsing_ops.parse_example( serialized=[data.SerializeToString()], features=fc.make_parse_example_spec_v2([a])) self.assertIn('aaa', features) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=[[0, 0], [0, 1]], values=np.array([b'omar', b'stringer'], dtype=np.object_), dense_shape=[1, 2]), self.evaluate(features['aaa'])) @test_util.run_deprecated_v1 def test_get_sparse_tensors(self): column = fc.categorical_column_with_vocabulary_file( key='aaa', vocabulary_file=self._wire_vocabulary_file_name, vocabulary_size=self._wire_vocabulary_size) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('marlo', 'skywalker', 'omar'), dense_shape=(2, 2)) id_weight_pair = column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': inputs }), None) self.assertIsNone(id_weight_pair.weight_tensor) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array((2, -1, 0), dtype=np.int64), dense_shape=inputs.dense_shape), self.evaluate(id_weight_pair.id_tensor)) @test_util.run_deprecated_v1 def test_get_sparse_tensors_none_vocabulary_size(self): column = fc.categorical_column_with_vocabulary_file( key='aaa', vocabulary_file=self._wire_vocabulary_file_name) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('marlo', 'skywalker', 'omar'), dense_shape=(2, 2)) id_weight_pair = column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': inputs }), None) self.assertIsNone(id_weight_pair.weight_tensor) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array((2, -1, 0), dtype=np.int64), dense_shape=inputs.dense_shape), self.evaluate(id_weight_pair.id_tensor)) @test_util.run_deprecated_v1 def test_transform_feature(self): column = fc.categorical_column_with_vocabulary_file( key='aaa', vocabulary_file=self._wire_vocabulary_file_name, vocabulary_size=self._wire_vocabulary_size) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('marlo', 'skywalker', 'omar'), dense_shape=(2, 2)) id_tensor = fc._transform_features_v2({ 'aaa': inputs }, [column], None)[column] self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array((2, -1, 0), dtype=np.int64), dense_shape=inputs.dense_shape), self.evaluate(id_tensor)) @test_util.run_deprecated_v1 def test_get_sparse_tensors_dense_input(self): column = fc.categorical_column_with_vocabulary_file( key='aaa', vocabulary_file=self._wire_vocabulary_file_name, vocabulary_size=self._wire_vocabulary_size) id_weight_pair = column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': (('marlo', ''), ('skywalker', 'omar')) }), None) self.assertIsNone(id_weight_pair.weight_tensor) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=np.array((2, -1, 0), dtype=np.int64), dense_shape=(2, 2)), self.evaluate(id_weight_pair.id_tensor)) @test_util.run_deprecated_v1 def test_get_sparse_tensors_default_value_in_vocabulary(self): column = fc.categorical_column_with_vocabulary_file( key='aaa', vocabulary_file=self._wire_vocabulary_file_name, vocabulary_size=self._wire_vocabulary_size, default_value=2) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('marlo', 'skywalker', 'omar'), dense_shape=(2, 2)) id_weight_pair = column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': inputs }), None) self.assertIsNone(id_weight_pair.weight_tensor) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array((2, 2, 0), dtype=np.int64), dense_shape=inputs.dense_shape), self.evaluate(id_weight_pair.id_tensor)) @test_util.run_deprecated_v1 def test_get_sparse_tensors_with_oov_buckets(self): column = fc.categorical_column_with_vocabulary_file( key='aaa', vocabulary_file=self._wire_vocabulary_file_name, vocabulary_size=self._wire_vocabulary_size, num_oov_buckets=100) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1), (1, 2)), values=('marlo', 'skywalker', 'omar', 'heisenberg'), dense_shape=(2, 3)) id_weight_pair = column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': inputs }), None) self.assertIsNone(id_weight_pair.weight_tensor) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array((2, 33, 0, 62), dtype=np.int64), dense_shape=inputs.dense_shape), self.evaluate(id_weight_pair.id_tensor)) @test_util.run_deprecated_v1 def test_get_sparse_tensors_small_vocabulary_size(self): # 'marlo' is the last entry in our vocabulary file, so be setting # `vocabulary_size` to 1 less than number of entries in file, we take # 'marlo' out of the vocabulary. column = fc.categorical_column_with_vocabulary_file( key='aaa', vocabulary_file=self._wire_vocabulary_file_name, vocabulary_size=self._wire_vocabulary_size - 1) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('marlo', 'skywalker', 'omar'), dense_shape=(2, 2)) id_weight_pair = column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': inputs }), None) self.assertIsNone(id_weight_pair.weight_tensor) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array((-1, -1, 0), dtype=np.int64), dense_shape=inputs.dense_shape), self.evaluate(id_weight_pair.id_tensor)) @test_util.run_deprecated_v1 def test_get_sparse_tensors_int32(self): column = fc.categorical_column_with_vocabulary_file( key='aaa', vocabulary_file=self._warriors_vocabulary_file_name, vocabulary_size=self._warriors_vocabulary_size, dtype=dtypes.int32) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1), (2, 2)), values=(11, 100, 30, 22), dense_shape=(3, 3)) id_weight_pair = column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': inputs }), None) self.assertIsNone(id_weight_pair.weight_tensor) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array((2, -1, 0, 4), dtype=np.int64), dense_shape=inputs.dense_shape), self.evaluate(id_weight_pair.id_tensor)) @test_util.run_deprecated_v1 def test_get_sparse_tensors_int32_dense_input(self): default_value = -100 column = fc.categorical_column_with_vocabulary_file( key='aaa', vocabulary_file=self._warriors_vocabulary_file_name, vocabulary_size=self._warriors_vocabulary_size, dtype=dtypes.int32, default_value=default_value) id_weight_pair = column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': ((11, -1, -1), (100, 30, -1), (-1, -1, 22)) }), None) self.assertIsNone(id_weight_pair.weight_tensor) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1), (2, 2)), values=np.array((2, default_value, 0, 4), dtype=np.int64), dense_shape=(3, 3)), self.evaluate(id_weight_pair.id_tensor)) @test_util.run_deprecated_v1 def test_get_sparse_tensors_int32_with_oov_buckets(self): column = fc.categorical_column_with_vocabulary_file( key='aaa', vocabulary_file=self._warriors_vocabulary_file_name, vocabulary_size=self._warriors_vocabulary_size, dtype=dtypes.int32, num_oov_buckets=100) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1), (2, 2)), values=(11, 100, 30, 22), dense_shape=(3, 3)) id_weight_pair = column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': inputs }), None) self.assertIsNone(id_weight_pair.weight_tensor) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array((2, 60, 0, 4), dtype=np.int64), dense_shape=inputs.dense_shape), self.evaluate(id_weight_pair.id_tensor)) @test_util.run_deprecated_v1 def test_linear_model(self): wire_column = fc.categorical_column_with_vocabulary_file( key='wire', vocabulary_file=self._wire_vocabulary_file_name, vocabulary_size=self._wire_vocabulary_size, num_oov_buckets=1) self.assertEqual(4, wire_column.num_buckets) with ops.Graph().as_default(): model = fc.LinearModel((wire_column,)) predictions = model({ wire_column.name: sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('marlo', 'skywalker', 'omar'), dense_shape=(2, 2)) }) wire_var, bias = model.variables self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose((0.,), self.evaluate(bias)) self.assertAllClose(((0.,), (0.,), (0.,), (0.,)), self.evaluate(wire_var)) self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions)) self.evaluate(wire_var.assign(((1.,), (2.,), (3.,), (4.,)))) # 'marlo' -> 2: wire_var[2] = 3 # 'skywalker' -> 3, 'omar' -> 0: wire_var[3] + wire_var[0] = 4+1 = 5 self.assertAllClose(((3.,), (5.,)), self.evaluate(predictions)) def test_old_linear_model(self): wire_column = fc.categorical_column_with_vocabulary_file( key='wire', vocabulary_file=self._wire_vocabulary_file_name, vocabulary_size=self._wire_vocabulary_size, num_oov_buckets=1) self.assertEqual(4, wire_column.num_buckets) with ops.Graph().as_default(): predictions = fc_old.linear_model({ wire_column.name: sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('marlo', 'skywalker', 'omar'), dense_shape=(2, 2)) }, (wire_column,)) bias = get_linear_model_bias() wire_var = get_linear_model_column_var(wire_column) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose((0.,), self.evaluate(bias)) self.assertAllClose(((0.,), (0.,), (0.,), (0.,)), self.evaluate(wire_var)) self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions)) self.evaluate(wire_var.assign(((1.,), (2.,), (3.,), (4.,)))) # 'marlo' -> 2: wire_var[2] = 3 # 'skywalker' -> 3, 'omar' -> 0: wire_var[3] + wire_var[0] = 4+1 = 5 self.assertAllClose(((3.,), (5.,)), self.evaluate(predictions)) @test_util.run_deprecated_v1 def test_serialization(self): wire_column = fc.categorical_column_with_vocabulary_file( key='wire', vocabulary_file=self._wire_vocabulary_file_name, vocabulary_size=self._wire_vocabulary_size, num_oov_buckets=1) self.assertEqual(['wire'], wire_column.parents) config = wire_column._get_config() self.assertEqual({ 'default_value': -1, 'dtype': 'string', 'key': 'wire', 'num_oov_buckets': 1, 'vocabulary_file': self._wire_vocabulary_file_name, 'vocabulary_size': 3 }, config) self.assertEqual(wire_column, fc.VocabularyFileCategoricalColumn._from_config(config)) class VocabularyListCategoricalColumnTest(test.TestCase): def test_defaults_string(self): column = fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=('omar', 'stringer', 'marlo')) self.assertEqual('aaa', column.name) self.assertEqual('aaa', column.key) self.assertEqual(3, column.num_buckets) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.string) }, column.parse_example_spec) self.assertTrue(column._is_v2_column) def test_key_should_be_string(self): with self.assertRaisesRegexp(ValueError, 'key must be a string.'): fc.categorical_column_with_vocabulary_list( key=('aaa',), vocabulary_list=('omar', 'stringer', 'marlo')) def test_defaults_int(self): column = fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=(12, 24, 36)) self.assertEqual('aaa', column.name) self.assertEqual('aaa', column.key) self.assertEqual(3, column.num_buckets) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int64) }, column.parse_example_spec) @test_util.run_deprecated_v1 def test_all_constructor_args(self): column = fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=(12, 24, 36), dtype=dtypes.int32, default_value=-99) self.assertEqual(3, column.num_buckets) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int32) }, column.parse_example_spec) @test_util.run_deprecated_v1 def test_deep_copy(self): original = fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=(12, 24, 36), dtype=dtypes.int32) for column in (original, copy.deepcopy(original)): self.assertEqual('aaa', column.name) self.assertEqual(3, column.num_buckets) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int32) }, column.parse_example_spec) def test_invalid_dtype(self): with self.assertRaisesRegexp(ValueError, 'dtype must be string or integer'): fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'), dtype=dtypes.float32) def test_invalid_mapping_dtype(self): with self.assertRaisesRegexp(ValueError, r'vocabulary dtype must be string or integer'): fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=(12., 24., 36.)) def test_mismatched_int_dtype(self): with self.assertRaisesRegexp(ValueError, r'dtype.*and vocabulary dtype.*do not match'): fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'), dtype=dtypes.int32) def test_mismatched_string_dtype(self): with self.assertRaisesRegexp(ValueError, r'dtype.*and vocabulary dtype.*do not match'): fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=(12, 24, 36), dtype=dtypes.string) def test_none_mapping(self): with self.assertRaisesRegexp(ValueError, r'vocabulary_list.*must be non-empty'): fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=None) def test_empty_mapping(self): with self.assertRaisesRegexp(ValueError, r'vocabulary_list.*must be non-empty'): fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=tuple([])) def test_duplicate_mapping(self): with self.assertRaisesRegexp(ValueError, 'Duplicate keys'): fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=(12, 24, 12)) def test_invalid_num_oov_buckets(self): with self.assertRaisesRegexp(ValueError, 'Invalid num_oov_buckets'): fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=(12, 24, 36), num_oov_buckets=-1) def test_invalid_buckets_and_default_value(self): with self.assertRaisesRegexp(ValueError, 'both num_oov_buckets and default_value'): fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=(12, 24, 36), num_oov_buckets=100, default_value=2) def test_invalid_input_dtype_int32(self): column = fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=('omar', 'stringer', 'marlo')) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(12, 24, 36), dense_shape=(2, 2)) with self.assertRaisesRegexp(ValueError, 'dtype must be compatible'): column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': inputs }), None) def test_invalid_input_dtype_string(self): column = fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=(12, 24, 36)) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('omar', 'stringer', 'marlo'), dense_shape=(2, 2)) with self.assertRaisesRegexp(ValueError, 'dtype must be compatible'): column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': inputs }), None) @test_util.run_deprecated_v1 def test_parse_example_string(self): a = fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=('omar', 'stringer', 'marlo')) data = example_pb2.Example( features=feature_pb2.Features( feature={ 'aaa': feature_pb2.Feature( bytes_list=feature_pb2.BytesList( value=[b'omar', b'stringer'])) })) features = parsing_ops.parse_example( serialized=[data.SerializeToString()], features=fc.make_parse_example_spec_v2([a])) self.assertIn('aaa', features) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=[[0, 0], [0, 1]], values=np.array([b'omar', b'stringer'], dtype=np.object_), dense_shape=[1, 2]), self.evaluate(features['aaa'])) @test_util.run_deprecated_v1 def test_parse_example_int(self): a = fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=(11, 21, 31)) data = example_pb2.Example( features=feature_pb2.Features( feature={ 'aaa': feature_pb2.Feature( int64_list=feature_pb2.Int64List(value=[11, 21])) })) features = parsing_ops.parse_example( serialized=[data.SerializeToString()], features=fc.make_parse_example_spec_v2([a])) self.assertIn('aaa', features) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=[[0, 0], [0, 1]], values=[11, 21], dense_shape=[1, 2]), self.evaluate(features['aaa'])) @test_util.run_deprecated_v1 def test_get_sparse_tensors(self): column = fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=('omar', 'stringer', 'marlo')) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('marlo', 'skywalker', 'omar'), dense_shape=(2, 2)) id_weight_pair = column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': inputs }), None) self.assertIsNone(id_weight_pair.weight_tensor) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array((2, -1, 0), dtype=np.int64), dense_shape=inputs.dense_shape), self.evaluate(id_weight_pair.id_tensor)) @test_util.run_deprecated_v1 def test_transform_feature(self): column = fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=('omar', 'stringer', 'marlo')) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('marlo', 'skywalker', 'omar'), dense_shape=(2, 2)) id_tensor = fc._transform_features_v2({ 'aaa': inputs }, [column], None)[column] self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array((2, -1, 0), dtype=np.int64), dense_shape=inputs.dense_shape), self.evaluate(id_tensor)) @test_util.run_deprecated_v1 def test_get_sparse_tensors_dense_input(self): column = fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=('omar', 'stringer', 'marlo')) id_weight_pair = column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': (('marlo', ''), ('skywalker', 'omar')) }), None) self.assertIsNone(id_weight_pair.weight_tensor) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=np.array((2, -1, 0), dtype=np.int64), dense_shape=(2, 2)), self.evaluate(id_weight_pair.id_tensor)) @test_util.run_deprecated_v1 def test_get_sparse_tensors_default_value_in_vocabulary(self): column = fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'), default_value=2) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('marlo', 'skywalker', 'omar'), dense_shape=(2, 2)) id_weight_pair = column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': inputs }), None) self.assertIsNone(id_weight_pair.weight_tensor) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array((2, 2, 0), dtype=np.int64), dense_shape=inputs.dense_shape), self.evaluate(id_weight_pair.id_tensor)) @test_util.run_deprecated_v1 def test_get_sparse_tensors_with_oov_buckets(self): column = fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'), num_oov_buckets=100) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1), (1, 2)), values=('marlo', 'skywalker', 'omar', 'heisenberg'), dense_shape=(2, 3)) id_weight_pair = column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': inputs }), None) self.assertIsNone(id_weight_pair.weight_tensor) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array((2, 33, 0, 62), dtype=np.int64), dense_shape=inputs.dense_shape), self.evaluate(id_weight_pair.id_tensor)) @test_util.run_deprecated_v1 def test_get_sparse_tensors_int32(self): column = fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=np.array((30, 35, 11, 23, 22), dtype=np.int32), dtype=dtypes.int32) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1), (2, 2)), values=np.array((11, 100, 30, 22), dtype=np.int32), dense_shape=(3, 3)) id_weight_pair = column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': inputs }), None) self.assertIsNone(id_weight_pair.weight_tensor) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array((2, -1, 0, 4), dtype=np.int64), dense_shape=inputs.dense_shape), self.evaluate(id_weight_pair.id_tensor)) @test_util.run_deprecated_v1 def test_get_sparse_tensors_int32_dense_input(self): default_value = -100 column = fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=np.array((30, 35, 11, 23, 22), dtype=np.int32), dtype=dtypes.int32, default_value=default_value) id_weight_pair = column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': np.array(((11, -1, -1), (100, 30, -1), (-1, -1, 22)), dtype=np.int32) }), None) self.assertIsNone(id_weight_pair.weight_tensor) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1), (2, 2)), values=np.array((2, default_value, 0, 4), dtype=np.int64), dense_shape=(3, 3)), self.evaluate(id_weight_pair.id_tensor)) @test_util.run_deprecated_v1 def test_get_sparse_tensors_int32_with_oov_buckets(self): column = fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=np.array((30, 35, 11, 23, 22), dtype=np.int32), dtype=dtypes.int32, num_oov_buckets=100) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1), (2, 2)), values=(11, 100, 30, 22), dense_shape=(3, 3)) id_weight_pair = column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': inputs }), None) self.assertIsNone(id_weight_pair.weight_tensor) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array((2, 60, 0, 4), dtype=np.int64), dense_shape=inputs.dense_shape), self.evaluate(id_weight_pair.id_tensor)) @test_util.run_deprecated_v1 def test_linear_model(self): wire_column = fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'), num_oov_buckets=1) self.assertEqual(4, wire_column.num_buckets) with ops.Graph().as_default(): model = fc.LinearModel((wire_column,)) predictions = model({ wire_column.name: sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('marlo', 'skywalker', 'omar'), dense_shape=(2, 2)) }) wire_var, bias = model.variables self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose((0.,), self.evaluate(bias)) self.assertAllClose(((0.,), (0.,), (0.,), (0.,)), self.evaluate(wire_var)) self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions)) self.evaluate(wire_var.assign(((1.,), (2.,), (3.,), (4.,)))) # 'marlo' -> 2: wire_var[2] = 3 # 'skywalker' -> 3, 'omar' -> 0: wire_var[3] + wire_var[0] = 4+1 = 5 self.assertAllClose(((3.,), (5.,)), self.evaluate(predictions)) def test_old_linear_model(self): wire_column = fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'), num_oov_buckets=1) self.assertEqual(4, wire_column.num_buckets) with ops.Graph().as_default(): predictions = fc_old.linear_model({ wire_column.name: sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('marlo', 'skywalker', 'omar'), dense_shape=(2, 2)) }, (wire_column,)) bias = get_linear_model_bias() wire_var = get_linear_model_column_var(wire_column) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose((0.,), self.evaluate(bias)) self.assertAllClose(((0.,), (0.,), (0.,), (0.,)), self.evaluate(wire_var)) self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions)) self.evaluate(wire_var.assign(((1.,), (2.,), (3.,), (4.,)))) # 'marlo' -> 2: wire_var[2] = 3 # 'skywalker' -> 3, 'omar' -> 0: wire_var[3] + wire_var[0] = 4+1 = 5 self.assertAllClose(((3.,), (5.,)), self.evaluate(predictions)) @test_util.run_deprecated_v1 def test_serialization(self): wire_column = fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'), num_oov_buckets=1) self.assertEqual(['aaa'], wire_column.parents) config = wire_column._get_config() self.assertEqual({ 'default_value': -1, 'dtype': 'string', 'key': 'aaa', 'num_oov_buckets': 1, 'vocabulary_list': ('omar', 'stringer', 'marlo') }, config) self.assertEqual(wire_column, fc.VocabularyListCategoricalColumn._from_config(config)) class IdentityCategoricalColumnTest(test.TestCase): def test_constructor(self): column = fc.categorical_column_with_identity(key='aaa', num_buckets=3) self.assertEqual('aaa', column.name) self.assertEqual('aaa', column.key) self.assertEqual(3, column.num_buckets) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int64) }, column.parse_example_spec) self.assertTrue(column._is_v2_column) def test_key_should_be_string(self): with self.assertRaisesRegexp(ValueError, 'key must be a string.'): fc.categorical_column_with_identity(key=('aaa',), num_buckets=3) @test_util.run_deprecated_v1 def test_deep_copy(self): original = fc.categorical_column_with_identity(key='aaa', num_buckets=3) for column in (original, copy.deepcopy(original)): self.assertEqual('aaa', column.name) self.assertEqual(3, column.num_buckets) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int64) }, column.parse_example_spec) def test_invalid_num_buckets_zero(self): with self.assertRaisesRegexp(ValueError, 'num_buckets 0 < 1'): fc.categorical_column_with_identity(key='aaa', num_buckets=0) def test_invalid_num_buckets_negative(self): with self.assertRaisesRegexp(ValueError, 'num_buckets -1 < 1'): fc.categorical_column_with_identity(key='aaa', num_buckets=-1) def test_invalid_default_value_too_small(self): with self.assertRaisesRegexp(ValueError, 'default_value -1 not in range'): fc.categorical_column_with_identity( key='aaa', num_buckets=3, default_value=-1) def test_invalid_default_value_too_big(self): with self.assertRaisesRegexp(ValueError, 'default_value 3 not in range'): fc.categorical_column_with_identity( key='aaa', num_buckets=3, default_value=3) def test_invalid_input_dtype(self): column = fc.categorical_column_with_identity(key='aaa', num_buckets=3) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('omar', 'stringer', 'marlo'), dense_shape=(2, 2)) with self.assertRaisesRegexp(ValueError, 'Invalid input, not integer'): column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': inputs }), None) @test_util.run_deprecated_v1 def test_parse_example(self): a = fc.categorical_column_with_identity(key='aaa', num_buckets=30) data = example_pb2.Example( features=feature_pb2.Features( feature={ 'aaa': feature_pb2.Feature( int64_list=feature_pb2.Int64List(value=[11, 21])) })) features = parsing_ops.parse_example( serialized=[data.SerializeToString()], features=fc.make_parse_example_spec_v2([a])) self.assertIn('aaa', features) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=[[0, 0], [0, 1]], values=np.array([11, 21], dtype=np.int64), dense_shape=[1, 2]), self.evaluate(features['aaa'])) @test_util.run_deprecated_v1 def test_get_sparse_tensors(self): column = fc.categorical_column_with_identity(key='aaa', num_buckets=3) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(0, 1, 0), dense_shape=(2, 2)) id_weight_pair = column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': inputs }), None) self.assertIsNone(id_weight_pair.weight_tensor) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array((0, 1, 0), dtype=np.int64), dense_shape=inputs.dense_shape), self.evaluate(id_weight_pair.id_tensor)) @test_util.run_deprecated_v1 def test_transform_feature(self): column = fc.categorical_column_with_identity(key='aaa', num_buckets=3) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(0, 1, 0), dense_shape=(2, 2)) id_tensor = fc._transform_features_v2({ 'aaa': inputs }, [column], None)[column] self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array((0, 1, 0), dtype=np.int64), dense_shape=inputs.dense_shape), self.evaluate(id_tensor)) @test_util.run_deprecated_v1 def test_get_sparse_tensors_dense_input(self): column = fc.categorical_column_with_identity(key='aaa', num_buckets=3) id_weight_pair = column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': ((0, -1), (1, 0)) }), None) self.assertIsNone(id_weight_pair.weight_tensor) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=np.array((0, 1, 0), dtype=np.int64), dense_shape=(2, 2)), self.evaluate(id_weight_pair.id_tensor)) @test_util.run_deprecated_v1 def test_get_sparse_tensors_with_inputs_too_small(self): column = fc.categorical_column_with_identity(key='aaa', num_buckets=3) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(1, -1, 0), dense_shape=(2, 2)) id_weight_pair = column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': inputs }), None) self.assertIsNone(id_weight_pair.weight_tensor) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) with self.assertRaisesRegexp(errors.OpError, 'assert_greater_or_equal_0'): self.evaluate(id_weight_pair.id_tensor) @test_util.run_deprecated_v1 def test_get_sparse_tensors_with_inputs_too_big(self): column = fc.categorical_column_with_identity(key='aaa', num_buckets=3) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(1, 99, 0), dense_shape=(2, 2)) id_weight_pair = column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': inputs }), None) self.assertIsNone(id_weight_pair.weight_tensor) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) with self.assertRaisesRegexp(errors.OpError, 'assert_less_than_num_buckets'): self.evaluate(id_weight_pair.id_tensor) @test_util.run_deprecated_v1 def test_get_sparse_tensors_with_default_value(self): column = fc.categorical_column_with_identity( key='aaa', num_buckets=4, default_value=3) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(1, -1, 99), dense_shape=(2, 2)) id_weight_pair = column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': inputs }), None) self.assertIsNone(id_weight_pair.weight_tensor) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array((1, 3, 3), dtype=np.int64), dense_shape=inputs.dense_shape), self.evaluate(id_weight_pair.id_tensor)) @test_util.run_deprecated_v1 def test_get_sparse_tensors_with_default_value_and_placeholder_inputs(self): column = fc.categorical_column_with_identity( key='aaa', num_buckets=4, default_value=3) input_indices = array_ops.placeholder(dtype=dtypes.int64) input_values = array_ops.placeholder(dtype=dtypes.int32) input_shape = array_ops.placeholder(dtype=dtypes.int64) inputs = sparse_tensor.SparseTensorValue( indices=input_indices, values=input_values, dense_shape=input_shape) id_weight_pair = column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': inputs }), None) self.assertIsNone(id_weight_pair.weight_tensor) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) with _initialized_session(): _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=np.array(((0, 0), (1, 0), (1, 1)), dtype=np.int64), values=np.array((1, 3, 3), dtype=np.int64), dense_shape=np.array((2, 2), dtype=np.int64)), id_weight_pair.id_tensor.eval( feed_dict={ input_indices: ((0, 0), (1, 0), (1, 1)), input_values: (1, -1, 99), input_shape: (2, 2), })) @test_util.run_deprecated_v1 def test_linear_model(self): column = fc.categorical_column_with_identity(key='aaa', num_buckets=3) self.assertEqual(3, column.num_buckets) with ops.Graph().as_default(): model = fc.LinearModel((column,)) predictions = model({ column.name: sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(0, 2, 1), dense_shape=(2, 2)) }) weight_var, bias = model.variables self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose((0.,), self.evaluate(bias)) self.assertAllClose(((0.,), (0.,), (0.,)), self.evaluate(weight_var)) self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions)) self.evaluate(weight_var.assign(((1.,), (2.,), (3.,)))) # weight_var[0] = 1 # weight_var[2] + weight_var[1] = 3+2 = 5 self.assertAllClose(((1.,), (5.,)), self.evaluate(predictions)) def test_old_linear_model(self): column = fc.categorical_column_with_identity(key='aaa', num_buckets=3) self.assertEqual(3, column.num_buckets) with ops.Graph().as_default(): predictions = fc_old.linear_model({ column.name: sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(0, 2, 1), dense_shape=(2, 2)) }, (column,)) bias = get_linear_model_bias() weight_var = get_linear_model_column_var(column) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose((0.,), self.evaluate(bias)) self.assertAllClose(((0.,), (0.,), (0.,)), self.evaluate(weight_var)) self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions)) self.evaluate(weight_var.assign(((1.,), (2.,), (3.,)))) # weight_var[0] = 1 # weight_var[2] + weight_var[1] = 3+2 = 5 self.assertAllClose(((1.,), (5.,)), self.evaluate(predictions)) @test_util.run_deprecated_v1 def test_serialization(self): column = fc.categorical_column_with_identity(key='aaa', num_buckets=3) self.assertEqual(['aaa'], column.parents) config = column._get_config() self.assertEqual({ 'default_value': None, 'key': 'aaa', 'number_buckets': 3 }, config) self.assertEqual(column, fc.IdentityCategoricalColumn._from_config(config)) class TransformFeaturesTest(test.TestCase): # All transform tests are distributed in column test. # Here we only test multi column case and naming def transform_multi_column(self): bucketized_price = fc.bucketized_column( fc.numeric_column('price'), boundaries=[0, 2, 4, 6]) hashed_sparse = fc.categorical_column_with_hash_bucket('wire', 10) with ops.Graph().as_default(): features = { 'price': [[-1.], [5.]], 'wire': sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) } transformed = fc._transform_features_v2( features, [bucketized_price, hashed_sparse], None) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertIn(bucketized_price.name, transformed[bucketized_price].name) self.assertAllEqual([[0], [3]], self.evaluate(transformed[bucketized_price])) self.assertIn(hashed_sparse.name, transformed[hashed_sparse].name) self.assertAllEqual([6, 4, 1], self.evaluate(transformed[hashed_sparse].values)) def test_column_order(self): """When the column is both dense and sparse, uses sparse tensors.""" class _LoggerColumn(BaseFeatureColumnForTests): def __init__(self, name): self._name = name @property def _is_v2_column(self): return True @property def name(self): return self._name def transform_feature(self, transformation_cache, state_manager): self.call_order = call_logger['count'] call_logger['count'] += 1 return 'Anything' @property def parse_example_spec(self): pass with ops.Graph().as_default(): column1 = _LoggerColumn('1') column2 = _LoggerColumn('2') call_logger = {'count': 0} fc._transform_features_v2({}, [column1, column2], None) self.assertEqual(0, column1.call_order) self.assertEqual(1, column2.call_order) call_logger = {'count': 0} fc._transform_features_v2({}, [column2, column1], None) self.assertEqual(0, column1.call_order) self.assertEqual(1, column2.call_order) class IndicatorColumnTest(test.TestCase): def test_indicator_column(self): a = fc.categorical_column_with_hash_bucket('a', 4) indicator_a = fc.indicator_column(a) self.assertEqual(indicator_a.categorical_column.name, 'a') self.assertEqual(indicator_a.name, 'a_indicator') self.assertEqual(indicator_a.variable_shape, [1, 4]) self.assertTrue(indicator_a._is_v2_column) b = fc_old._categorical_column_with_hash_bucket('b', hash_bucket_size=100) indicator_b = fc.indicator_column(b) self.assertEqual(indicator_b.categorical_column.name, 'b') self.assertEqual(indicator_b.name, 'b_indicator') self.assertEqual(indicator_b.variable_shape, [1, 100]) self.assertFalse(indicator_b._is_v2_column) def test_1D_shape_succeeds(self): animal = fc.indicator_column( fc.categorical_column_with_hash_bucket('animal', 4)) transformation_cache = fc.FeatureTransformationCache({ 'animal': ['fox', 'fox'] }) output = transformation_cache.get(animal, None) self.assertAllEqual([[0., 0., 1., 0.], [0., 0., 1., 0.]], self.evaluate(output)) def test_2D_shape_succeeds(self): # TODO(ispir/cassandrax): Swith to categorical_column_with_keys when ready. animal = fc.indicator_column( fc.categorical_column_with_hash_bucket('animal', 4)) transformation_cache = fc.FeatureTransformationCache({ 'animal': sparse_tensor.SparseTensor( indices=[[0, 0], [1, 0]], values=['fox', 'fox'], dense_shape=[2, 1]) }) output = transformation_cache.get(animal, None) self.assertAllEqual([[0., 0., 1., 0.], [0., 0., 1., 0.]], self.evaluate(output)) def test_multi_hot(self): animal = fc.indicator_column( fc.categorical_column_with_identity('animal', num_buckets=4)) transformation_cache = fc.FeatureTransformationCache({ 'animal': sparse_tensor.SparseTensor( indices=[[0, 0], [0, 1]], values=[1, 1], dense_shape=[1, 2]) }) output = transformation_cache.get(animal, None) self.assertAllEqual([[0., 2., 0., 0.]], self.evaluate(output)) def test_multi_hot2(self): animal = fc.indicator_column( fc.categorical_column_with_identity('animal', num_buckets=4)) transformation_cache = fc.FeatureTransformationCache({ 'animal': sparse_tensor.SparseTensor( indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2]) }) output = transformation_cache.get(animal, None) self.assertAllEqual([[0., 1., 1., 0.]], self.evaluate(output)) @test_util.run_deprecated_v1 def test_deep_copy(self): a = fc.categorical_column_with_hash_bucket('a', 4) column = fc.indicator_column(a) column_copy = copy.deepcopy(column) self.assertEqual(column_copy.categorical_column.name, 'a') self.assertEqual(column.name, 'a_indicator') self.assertEqual(column.variable_shape, [1, 4]) @test_util.run_deprecated_v1 def test_parse_example(self): a = fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=('omar', 'stringer', 'marlo')) a_indicator = fc.indicator_column(a) data = example_pb2.Example( features=feature_pb2.Features( feature={ 'aaa': feature_pb2.Feature( bytes_list=feature_pb2.BytesList( value=[b'omar', b'stringer'])) })) features = parsing_ops.parse_example( serialized=[data.SerializeToString()], features=fc.make_parse_example_spec_v2([a_indicator])) self.assertIn('aaa', features) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=[[0, 0], [0, 1]], values=np.array([b'omar', b'stringer'], dtype=np.object_), dense_shape=[1, 2]), self.evaluate(features['aaa'])) @test_util.run_deprecated_v1 def test_transform(self): a = fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=('omar', 'stringer', 'marlo')) a_indicator = fc.indicator_column(a) features = { 'aaa': sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('marlo', 'skywalker', 'omar'), dense_shape=(2, 2)) } indicator_tensor = fc._transform_features_v2(features, [a_indicator], None)[a_indicator] self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllEqual([[0, 0, 1], [1, 0, 0]], self.evaluate(indicator_tensor)) @test_util.run_deprecated_v1 def test_transform_with_weighted_column(self): # Github issue 12557 ids = fc.categorical_column_with_vocabulary_list( key='ids', vocabulary_list=('a', 'b', 'c')) weights = fc.weighted_categorical_column(ids, 'weights') indicator = fc.indicator_column(weights) features = { 'ids': constant_op.constant([['c', 'b', 'a', 'c']]), 'weights': constant_op.constant([[2., 4., 6., 1.]]) } indicator_tensor = fc._transform_features_v2(features, [indicator], None)[indicator] self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllEqual([[6., 4., 3.]], self.evaluate(indicator_tensor)) @test_util.run_deprecated_v1 def test_transform_with_missing_value_in_weighted_column(self): # Github issue 12583 ids = fc.categorical_column_with_vocabulary_list( key='ids', vocabulary_list=('a', 'b', 'c')) weights = fc.weighted_categorical_column(ids, 'weights') indicator = fc.indicator_column(weights) features = { 'ids': constant_op.constant([['c', 'b', 'unknown']]), 'weights': constant_op.constant([[2., 4., 6.]]) } indicator_tensor = fc._transform_features_v2(features, [indicator], None)[indicator] self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllEqual([[0., 4., 2.]], self.evaluate(indicator_tensor)) @test_util.run_deprecated_v1 def test_transform_with_missing_value_in_categorical_column(self): # Github issue 12583 ids = fc.categorical_column_with_vocabulary_list( key='ids', vocabulary_list=('a', 'b', 'c')) indicator = fc.indicator_column(ids) features = { 'ids': constant_op.constant([['c', 'b', 'unknown']]), } indicator_tensor = fc._transform_features_v2(features, [indicator], None)[indicator] self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllEqual([[0., 1., 1.]], self.evaluate(indicator_tensor)) @test_util.run_deprecated_v1 def test_linear_model(self): animal = fc.indicator_column( fc.categorical_column_with_identity('animal', num_buckets=4)) with ops.Graph().as_default(): features = { 'animal': sparse_tensor.SparseTensor( indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2]) } model = fc.LinearModel([animal]) predictions = model(features) weight_var, _ = model.variables self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) # All should be zero-initialized. self.assertAllClose([[0.], [0.], [0.], [0.]], self.evaluate(weight_var)) self.assertAllClose([[0.]], self.evaluate(predictions)) self.evaluate(weight_var.assign([[1.], [2.], [3.], [4.]])) self.assertAllClose([[2. + 3.]], self.evaluate(predictions)) def test_old_linear_model(self): animal = fc.indicator_column( fc.categorical_column_with_identity('animal', num_buckets=4)) with ops.Graph().as_default(): features = { 'animal': sparse_tensor.SparseTensor( indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2]) } predictions = fc_old.linear_model(features, [animal]) weight_var = get_linear_model_column_var(animal) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) # All should be zero-initialized. self.assertAllClose([[0.], [0.], [0.], [0.]], self.evaluate(weight_var)) self.assertAllClose([[0.]], self.evaluate(predictions)) self.evaluate(weight_var.assign([[1.], [2.], [3.], [4.]])) self.assertAllClose([[2. + 3.]], self.evaluate(predictions)) def test_old_linear_model_old_categorical(self): animal = fc.indicator_column( fc_old._categorical_column_with_identity('animal', num_buckets=4)) with ops.Graph().as_default(): features = { 'animal': sparse_tensor.SparseTensor( indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2]) } predictions = fc_old.linear_model(features, [animal]) weight_var = get_linear_model_column_var(animal) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) # All should be zero-initialized. self.assertAllClose([[0.], [0.], [0.], [0.]], self.evaluate(weight_var)) self.assertAllClose([[0.]], self.evaluate(predictions)) self.evaluate(weight_var.assign([[1.], [2.], [3.], [4.]])) self.assertAllClose([[2. + 3.]], self.evaluate(predictions)) @test_util.run_deprecated_v1 def test_dense_features(self): animal = fc.indicator_column( fc.categorical_column_with_identity('animal', num_buckets=4)) with ops.Graph().as_default(): features = { 'animal': sparse_tensor.SparseTensor( indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2]) } net = fc.DenseFeatures([animal])(features) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose([[0., 1., 1., 0.]], self.evaluate(net)) @test_util.run_deprecated_v1 def test_input_layer(self): animal = fc.indicator_column( fc.categorical_column_with_identity('animal', num_buckets=4)) with ops.Graph().as_default(): features = { 'animal': sparse_tensor.SparseTensor( indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2]) } net = fc_old.input_layer(features, [animal]) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose([[0., 1., 1., 0.]], self.evaluate(net)) def test_input_layer_old_categorical(self): animal = fc.indicator_column( fc_old._categorical_column_with_identity('animal', num_buckets=4)) with ops.Graph().as_default(): features = { 'animal': sparse_tensor.SparseTensor( indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2]) } net = fc_old.input_layer(features, [animal]) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose([[0., 1., 1., 0.]], self.evaluate(net)) @test_util.run_deprecated_v1 def test_serialization(self): parent = fc.categorical_column_with_identity('animal', num_buckets=4) animal = fc.indicator_column(parent) self.assertEqual([parent], animal.parents) config = animal._get_config() self.assertEqual({ 'categorical_column': { 'class_name': 'IdentityCategoricalColumn', 'config': { 'key': 'animal', 'default_value': None, 'number_buckets': 4 } } }, config) new_animal = fc.IndicatorColumn._from_config(config) self.assertEqual(animal, new_animal) self.assertIsNot(parent, new_animal.categorical_column) new_animal = fc.IndicatorColumn._from_config( config, columns_by_name={parent.name: parent}) self.assertEqual(animal, new_animal) self.assertIs(parent, new_animal.categorical_column) class _TestStateManager(fc.StateManager): def __init__(self, trainable=True): # Dict of feature_column to a dict of variables. self._all_variables = {} self._trainable = trainable def create_variable(self, feature_column, name, shape, dtype=None, trainable=True, use_resource=True, initializer=None): if feature_column not in self._all_variables: self._all_variables[feature_column] = {} var_dict = self._all_variables[feature_column] if name in var_dict: return var_dict[name] else: var = variable_scope.get_variable( name=name, shape=shape, dtype=dtype, trainable=self._trainable and trainable, use_resource=use_resource, initializer=initializer) var_dict[name] = var return var def get_variable(self, feature_column, name): if feature_column not in self._all_variables: raise ValueError('Do not recognize FeatureColumn.') if name in self._all_variables[feature_column]: return self._all_variables[feature_column][name] raise ValueError('Could not find variable.') class EmbeddingColumnTest(test.TestCase): @test_util.run_deprecated_v1 def test_defaults(self): categorical_column = fc.categorical_column_with_identity( key='aaa', num_buckets=3) embedding_dimension = 2 embedding_column = fc.embedding_column( categorical_column, dimension=embedding_dimension) self.assertIs(categorical_column, embedding_column.categorical_column) self.assertEqual(embedding_dimension, embedding_column.dimension) self.assertEqual('mean', embedding_column.combiner) self.assertIsNone(embedding_column.ckpt_to_load_from) self.assertIsNone(embedding_column.tensor_name_in_ckpt) self.assertIsNone(embedding_column.max_norm) self.assertTrue(embedding_column.trainable) self.assertEqual('aaa_embedding', embedding_column.name) self.assertEqual((embedding_dimension,), embedding_column.variable_shape) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int64) }, embedding_column.parse_example_spec) self.assertTrue(embedding_column._is_v2_column) def test_is_v2_column(self): categorical_column = fc_old._categorical_column_with_identity( key='aaa', num_buckets=3) embedding_dimension = 2 embedding_column = fc.embedding_column( categorical_column, dimension=embedding_dimension) self.assertFalse(embedding_column._is_v2_column) @test_util.run_deprecated_v1 def test_all_constructor_args(self): categorical_column = fc.categorical_column_with_identity( key='aaa', num_buckets=3) embedding_dimension = 2 embedding_column = fc.embedding_column( categorical_column, dimension=embedding_dimension, combiner='my_combiner', initializer=lambda: 'my_initializer', ckpt_to_load_from='my_ckpt', tensor_name_in_ckpt='my_ckpt_tensor', max_norm=42., trainable=False) self.assertIs(categorical_column, embedding_column.categorical_column) self.assertEqual(embedding_dimension, embedding_column.dimension) self.assertEqual('my_combiner', embedding_column.combiner) self.assertEqual('my_ckpt', embedding_column.ckpt_to_load_from) self.assertEqual('my_ckpt_tensor', embedding_column.tensor_name_in_ckpt) self.assertEqual(42., embedding_column.max_norm) self.assertFalse(embedding_column.trainable) self.assertEqual('aaa_embedding', embedding_column.name) self.assertEqual((embedding_dimension,), embedding_column.variable_shape) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int64) }, embedding_column.parse_example_spec) @test_util.run_deprecated_v1 def test_deep_copy(self): categorical_column = fc.categorical_column_with_identity( key='aaa', num_buckets=3) embedding_dimension = 2 original = fc.embedding_column( categorical_column, dimension=embedding_dimension, combiner='my_combiner', initializer=lambda: 'my_initializer', ckpt_to_load_from='my_ckpt', tensor_name_in_ckpt='my_ckpt_tensor', max_norm=42., trainable=False) for embedding_column in (original, copy.deepcopy(original)): self.assertEqual('aaa', embedding_column.categorical_column.name) self.assertEqual(3, embedding_column.categorical_column.num_buckets) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int64) }, embedding_column.categorical_column.parse_example_spec) self.assertEqual(embedding_dimension, embedding_column.dimension) self.assertEqual('my_combiner', embedding_column.combiner) self.assertEqual('my_ckpt', embedding_column.ckpt_to_load_from) self.assertEqual('my_ckpt_tensor', embedding_column.tensor_name_in_ckpt) self.assertEqual(42., embedding_column.max_norm) self.assertFalse(embedding_column.trainable) self.assertEqual('aaa_embedding', embedding_column.name) self.assertEqual((embedding_dimension,), embedding_column.variable_shape) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int64) }, embedding_column.parse_example_spec) @test_util.run_deprecated_v1 def test_invalid_initializer(self): categorical_column = fc.categorical_column_with_identity( key='aaa', num_buckets=3) with self.assertRaisesRegexp(ValueError, 'initializer must be callable'): fc.embedding_column(categorical_column, dimension=2, initializer='not_fn') @test_util.run_deprecated_v1 def test_parse_example(self): a = fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=('omar', 'stringer', 'marlo')) a_embedded = fc.embedding_column(a, dimension=2) data = example_pb2.Example( features=feature_pb2.Features( feature={ 'aaa': feature_pb2.Feature( bytes_list=feature_pb2.BytesList( value=[b'omar', b'stringer'])) })) features = parsing_ops.parse_example( serialized=[data.SerializeToString()], features=fc.make_parse_example_spec_v2([a_embedded])) self.assertIn('aaa', features) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=[[0, 0], [0, 1]], values=np.array([b'omar', b'stringer'], dtype=np.object_), dense_shape=[1, 2]), self.evaluate(features['aaa'])) @test_util.run_deprecated_v1 def test_transform_feature(self): a = fc.categorical_column_with_identity(key='aaa', num_buckets=3) a_embedded = fc.embedding_column(a, dimension=2) features = { 'aaa': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(0, 1, 0), dense_shape=(2, 2)) } outputs = fc._transform_features_v2(features, [a, a_embedded], None) output_a = outputs[a] output_embedded = outputs[a_embedded] self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) _assert_sparse_tensor_value(self, self.evaluate(output_a), self.evaluate(output_embedded)) @test_util.run_deprecated_v1 def test_get_dense_tensor(self): # Inputs. vocabulary_size = 3 sparse_input = sparse_tensor.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] # example 2, ids [] # example 3, ids [1] indices=((0, 0), (1, 0), (1, 4), (3, 0)), values=(2, 0, 1, 1), dense_shape=(4, 5)) # Embedding variable. embedding_dimension = 2 embedding_values = ( (1., 2.), # id 0 (3., 5.), # id 1 (7., 11.) # id 2 ) def _initializer(shape, dtype, partition_info): self.assertAllEqual((vocabulary_size, embedding_dimension), shape) self.assertEqual(dtypes.float32, dtype) self.assertIsNone(partition_info) return embedding_values # Expected lookup result, using combiner='mean'. expected_lookups = ( # example 0, ids [2], embedding = [7, 11] (7., 11.), # example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5] (2., 3.5), # example 2, ids [], embedding = [0, 0] (0., 0.), # example 3, ids [1], embedding = [3, 5] (3., 5.), ) # Build columns. categorical_column = fc.categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) embedding_column = fc.embedding_column( categorical_column, dimension=embedding_dimension, initializer=_initializer) state_manager = _TestStateManager() embedding_column.create_state(state_manager) # Provide sparse input and get dense result. embedding_lookup = embedding_column.get_dense_tensor( fc.FeatureTransformationCache({ 'aaa': sparse_input }), state_manager) # Assert expected embedding variable and lookups. global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) self.assertItemsEqual(('embedding_weights:0',), tuple([v.name for v in global_vars])) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllEqual(embedding_values, self.evaluate(global_vars[0])) self.assertAllEqual(expected_lookups, self.evaluate(embedding_lookup)) @test_util.run_deprecated_v1 def test_get_dense_tensor_old_categorical(self): # Inputs. vocabulary_size = 3 sparse_input = sparse_tensor.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] # example 2, ids [] # example 3, ids [1] indices=((0, 0), (1, 0), (1, 4), (3, 0)), values=(2, 0, 1, 1), dense_shape=(4, 5)) # Embedding variable. embedding_dimension = 2 embedding_values = ( (1., 2.), # id 0 (3., 5.), # id 1 (7., 11.) # id 2 ) def _initializer(shape, dtype, partition_info): self.assertAllEqual((vocabulary_size, embedding_dimension), shape) self.assertEqual(dtypes.float32, dtype) self.assertIsNone(partition_info) return embedding_values # Expected lookup result, using combiner='mean'. expected_lookups = ( # example 0, ids [2], embedding = [7, 11] (7., 11.), # example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5] (2., 3.5), # example 2, ids [], embedding = [0, 0] (0., 0.), # example 3, ids [1], embedding = [3, 5] (3., 5.), ) # Build columns. categorical_column = fc_old._categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) embedding_column = fc.embedding_column( categorical_column, dimension=embedding_dimension, initializer=_initializer) # Provide sparse input and get dense result. embedding_lookup = embedding_column._get_dense_tensor( fc_old._LazyBuilder({ 'aaa': sparse_input })) # Assert expected embedding variable and lookups. global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) self.assertItemsEqual(('embedding_weights:0',), tuple([v.name for v in global_vars])) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllEqual(embedding_values, self.evaluate(global_vars[0])) self.assertAllEqual(expected_lookups, self.evaluate(embedding_lookup)) @test_util.run_deprecated_v1 def test_get_dense_tensor_3d(self): # Inputs. vocabulary_size = 4 sparse_input = sparse_tensor.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] # example 2, ids [] # example 3, ids [1] indices=((0, 0, 0), (1, 1, 0), (1, 1, 4), (3, 0, 0), (3, 1, 2)), values=(2, 0, 1, 1, 2), dense_shape=(4, 2, 5)) # Embedding variable. embedding_dimension = 3 embedding_values = ( (1., 2., 4.), # id 0 (3., 5., 1.), # id 1 (7., 11., 2.), # id 2 (2., 7., 12.) # id 3 ) def _initializer(shape, dtype, partition_info): self.assertAllEqual((vocabulary_size, embedding_dimension), shape) self.assertEqual(dtypes.float32, dtype) self.assertIsNone(partition_info) return embedding_values # Expected lookup result, using combiner='mean'. expected_lookups = ( # example 0, ids [[2], []], embedding = [[7, 11, 2], [0, 0, 0]] ((7., 11., 2.), (0., 0., 0.)), # example 1, ids [[], [0, 1]], embedding # = mean([[], [1, 2, 4] + [3, 5, 1]]) = [[0, 0, 0], [2, 3.5, 2.5]] ((0., 0., 0.), (2., 3.5, 2.5)), # example 2, ids [[], []], embedding = [[0, 0, 0], [0, 0, 0]] ((0., 0., 0.), (0., 0., 0.)), # example 3, ids [[1], [2]], embedding = [[3, 5, 1], [7, 11, 2]] ((3., 5., 1.), (7., 11., 2.)), ) # Build columns. categorical_column = fc.categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) embedding_column = fc.embedding_column( categorical_column, dimension=embedding_dimension, initializer=_initializer) state_manager = _TestStateManager() embedding_column.create_state(state_manager) # Provide sparse input and get dense result. embedding_lookup = embedding_column.get_dense_tensor( fc.FeatureTransformationCache({ 'aaa': sparse_input }), state_manager) # Assert expected embedding variable and lookups. global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) self.assertItemsEqual(('embedding_weights:0',), tuple([v.name for v in global_vars])) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllEqual(embedding_values, self.evaluate(global_vars[0])) self.assertAllEqual(expected_lookups, self.evaluate(embedding_lookup)) @test_util.run_deprecated_v1 def test_get_dense_tensor_placeholder_inputs(self): # Inputs. vocabulary_size = 3 sparse_input = sparse_tensor.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] # example 2, ids [] # example 3, ids [1] indices=((0, 0), (1, 0), (1, 4), (3, 0)), values=(2, 0, 1, 1), dense_shape=(4, 5)) # Embedding variable. embedding_dimension = 2 embedding_values = ( (1., 2.), # id 0 (3., 5.), # id 1 (7., 11.) # id 2 ) def _initializer(shape, dtype, partition_info): self.assertAllEqual((vocabulary_size, embedding_dimension), shape) self.assertEqual(dtypes.float32, dtype) self.assertIsNone(partition_info) return embedding_values # Expected lookup result, using combiner='mean'. expected_lookups = ( # example 0, ids [2], embedding = [7, 11] (7., 11.), # example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5] (2., 3.5), # example 2, ids [], embedding = [0, 0] (0., 0.), # example 3, ids [1], embedding = [3, 5] (3., 5.), ) # Build columns. categorical_column = fc.categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) embedding_column = fc.embedding_column( categorical_column, dimension=embedding_dimension, initializer=_initializer) state_manager = _TestStateManager() embedding_column.create_state(state_manager) # Provide sparse input and get dense result. input_indices = array_ops.placeholder(dtype=dtypes.int64) input_values = array_ops.placeholder(dtype=dtypes.int64) input_shape = array_ops.placeholder(dtype=dtypes.int64) embedding_lookup = embedding_column.get_dense_tensor( fc.FeatureTransformationCache({ 'aaa': sparse_tensor.SparseTensorValue( indices=input_indices, values=input_values, dense_shape=input_shape) }), state_manager) # Assert expected embedding variable and lookups. global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) self.assertItemsEqual(('embedding_weights:0',), tuple([v.name for v in global_vars])) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) with _initialized_session(): self.assertAllEqual(embedding_values, self.evaluate(global_vars[0])) self.assertAllEqual( expected_lookups, embedding_lookup.eval( feed_dict={ input_indices: sparse_input.indices, input_values: sparse_input.values, input_shape: sparse_input.dense_shape, })) @test_util.run_deprecated_v1 def test_get_dense_tensor_restore_from_ckpt(self): # Inputs. vocabulary_size = 3 sparse_input = sparse_tensor.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] # example 2, ids [] # example 3, ids [1] indices=((0, 0), (1, 0), (1, 4), (3, 0)), values=(2, 0, 1, 1), dense_shape=(4, 5)) # Embedding variable. The checkpoint file contains _embedding_values. embedding_dimension = 2 embedding_values = ( (1., 2.), # id 0 (3., 5.), # id 1 (7., 11.) # id 2 ) ckpt_path = test.test_src_dir_path( 'python/feature_column/testdata/embedding.ckpt') ckpt_tensor = 'my_embedding' # Expected lookup result, using combiner='mean'. expected_lookups = ( # example 0, ids [2], embedding = [7, 11] (7., 11.), # example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5] (2., 3.5), # example 2, ids [], embedding = [0, 0] (0., 0.), # example 3, ids [1], embedding = [3, 5] (3., 5.), ) # Build columns. categorical_column = fc.categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) embedding_column = fc.embedding_column( categorical_column, dimension=embedding_dimension, ckpt_to_load_from=ckpt_path, tensor_name_in_ckpt=ckpt_tensor) state_manager = _TestStateManager() embedding_column.create_state(state_manager) # Provide sparse input and get dense result. embedding_lookup = embedding_column.get_dense_tensor( fc.FeatureTransformationCache({ 'aaa': sparse_input }), state_manager) # Assert expected embedding variable and lookups. global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) self.assertItemsEqual(('embedding_weights:0',), tuple([v.name for v in global_vars])) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllEqual(embedding_values, self.evaluate(global_vars[0])) self.assertAllEqual(expected_lookups, self.evaluate(embedding_lookup)) @test_util.run_deprecated_v1 def test_linear_model(self): # Inputs. batch_size = 4 vocabulary_size = 3 sparse_input = sparse_tensor.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] # example 2, ids [] # example 3, ids [1] indices=((0, 0), (1, 0), (1, 4), (3, 0)), values=(2, 0, 1, 1), dense_shape=(batch_size, 5)) # Embedding variable. embedding_dimension = 2 embedding_shape = (vocabulary_size, embedding_dimension) zeros_embedding_values = np.zeros(embedding_shape) def _initializer(shape, dtype, partition_info): self.assertAllEqual(embedding_shape, shape) self.assertEqual(dtypes.float32, dtype) self.assertIsNone(partition_info) return zeros_embedding_values # Build columns. categorical_column = fc.categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) embedding_column = fc.embedding_column( categorical_column, dimension=embedding_dimension, initializer=_initializer) with ops.Graph().as_default(): model = fc.LinearModel((embedding_column,)) predictions = model({categorical_column.name: sparse_input}) expected_var_names = ( 'linear_model/bias_weights:0', 'linear_model/aaa_embedding/weights:0', 'linear_model/aaa_embedding/embedding_weights:0', ) self.assertItemsEqual( expected_var_names, [v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)]) trainable_vars = { v.name: v for v in ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES) } self.assertItemsEqual(expected_var_names, trainable_vars.keys()) bias = trainable_vars['linear_model/bias_weights:0'] embedding_weights = trainable_vars[ 'linear_model/aaa_embedding/embedding_weights:0'] linear_weights = trainable_vars['linear_model/aaa_embedding/weights:0'] self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) # Predictions with all zero weights. self.assertAllClose(np.zeros((1,)), self.evaluate(bias)) self.assertAllClose(zeros_embedding_values, self.evaluate(embedding_weights)) self.assertAllClose( np.zeros((embedding_dimension, 1)), self.evaluate(linear_weights)) self.assertAllClose(np.zeros((batch_size, 1)), self.evaluate(predictions)) # Predictions with all non-zero weights. self.evaluate( embedding_weights.assign(( (1., 2.), # id 0 (3., 5.), # id 1 (7., 11.) # id 2 ))) self.evaluate(linear_weights.assign(((4.,), (6.,)))) # example 0, ids [2], embedding[0] = [7, 11] # example 1, ids [0, 1], embedding[1] = mean([1, 2] + [3, 5]) = [2, 3.5] # example 2, ids [], embedding[2] = [0, 0] # example 3, ids [1], embedding[3] = [3, 5] # sum(embeddings * linear_weights) # = [4*7 + 6*11, 4*2 + 6*3.5, 4*0 + 6*0, 4*3 + 6*5] = [94, 29, 0, 42] self.assertAllClose(((94.,), (29.,), (0.,), (42.,)), self.evaluate(predictions)) @test_util.run_deprecated_v1 def test_dense_features(self): # Inputs. vocabulary_size = 3 sparse_input = sparse_tensor.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] # example 2, ids [] # example 3, ids [1] indices=((0, 0), (1, 0), (1, 4), (3, 0)), values=(2, 0, 1, 1), dense_shape=(4, 5)) # Embedding variable. embedding_dimension = 2 embedding_values = ( (1., 2.), # id 0 (3., 5.), # id 1 (7., 11.) # id 2 ) def _initializer(shape, dtype, partition_info): self.assertAllEqual((vocabulary_size, embedding_dimension), shape) self.assertEqual(dtypes.float32, dtype) self.assertIsNone(partition_info) return embedding_values # Expected lookup result, using combiner='mean'. expected_lookups = ( # example 0, ids [2], embedding = [7, 11] (7., 11.), # example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5] (2., 3.5), # example 2, ids [], embedding = [0, 0] (0., 0.), # example 3, ids [1], embedding = [3, 5] (3., 5.), ) # Build columns. categorical_column = fc.categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) embedding_column = fc.embedding_column( categorical_column, dimension=embedding_dimension, initializer=_initializer) # Provide sparse input and get dense result. l = fc.DenseFeatures((embedding_column,)) dense_features = l({'aaa': sparse_input}) # Assert expected embedding variable and lookups. global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) self.assertItemsEqual(('dense_features/aaa_embedding/embedding_weights:0',), tuple([v.name for v in global_vars])) for v in global_vars: self.assertIsInstance(v, variables_lib.Variable) trainable_vars = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES) self.assertItemsEqual(('dense_features/aaa_embedding/embedding_weights:0',), tuple([v.name for v in trainable_vars])) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllEqual(embedding_values, self.evaluate(trainable_vars[0])) self.assertAllEqual(expected_lookups, self.evaluate(dense_features)) @test_util.run_deprecated_v1 def test_dense_features_not_trainable(self): # Inputs. vocabulary_size = 3 sparse_input = sparse_tensor.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] # example 2, ids [] # example 3, ids [1] indices=((0, 0), (1, 0), (1, 4), (3, 0)), values=(2, 0, 1, 1), dense_shape=(4, 5)) # Embedding variable. embedding_dimension = 2 embedding_values = ( (1., 2.), # id 0 (3., 5.), # id 1 (7., 11.) # id 2 ) def _initializer(shape, dtype, partition_info): self.assertAllEqual((vocabulary_size, embedding_dimension), shape) self.assertEqual(dtypes.float32, dtype) self.assertIsNone(partition_info) return embedding_values # Expected lookup result, using combiner='mean'. expected_lookups = ( # example 0, ids [2], embedding = [7, 11] (7., 11.), # example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5] (2., 3.5), # example 2, ids [], embedding = [0, 0] (0., 0.), # example 3, ids [1], embedding = [3, 5] (3., 5.), ) # Build columns. categorical_column = fc.categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) embedding_column = fc.embedding_column( categorical_column, dimension=embedding_dimension, initializer=_initializer, trainable=False) # Provide sparse input and get dense result. dense_features = fc.DenseFeatures((embedding_column,))({ 'aaa': sparse_input }) # Assert expected embedding variable and lookups. global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) self.assertItemsEqual(('dense_features/aaa_embedding/embedding_weights:0',), tuple([v.name for v in global_vars])) self.assertItemsEqual([], ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllEqual(embedding_values, self.evaluate(global_vars[0])) self.assertAllEqual(expected_lookups, self.evaluate(dense_features)) @test_util.run_deprecated_v1 def test_input_layer(self): # Inputs. vocabulary_size = 3 sparse_input = sparse_tensor.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] # example 2, ids [] # example 3, ids [1] indices=((0, 0), (1, 0), (1, 4), (3, 0)), values=(2, 0, 1, 1), dense_shape=(4, 5)) # Embedding variable. embedding_dimension = 2 embedding_values = ( (1., 2.), # id 0 (3., 5.), # id 1 (7., 11.) # id 2 ) def _initializer(shape, dtype, partition_info): self.assertAllEqual((vocabulary_size, embedding_dimension), shape) self.assertEqual(dtypes.float32, dtype) self.assertIsNone(partition_info) return embedding_values # Expected lookup result, using combiner='mean'. expected_lookups = ( # example 0, ids [2], embedding = [7, 11] (7., 11.), # example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5] (2., 3.5), # example 2, ids [], embedding = [0, 0] (0., 0.), # example 3, ids [1], embedding = [3, 5] (3., 5.), ) # Build columns. categorical_column = fc.categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) embedding_column = fc.embedding_column( categorical_column, dimension=embedding_dimension, initializer=_initializer) # Provide sparse input and get dense result. feature_layer = fc_old.input_layer({ 'aaa': sparse_input }, (embedding_column,)) # Assert expected embedding variable and lookups. global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) self.assertItemsEqual(('input_layer/aaa_embedding/embedding_weights:0',), tuple([v.name for v in global_vars])) trainable_vars = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES) self.assertItemsEqual(('input_layer/aaa_embedding/embedding_weights:0',), tuple([v.name for v in trainable_vars])) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllEqual(embedding_values, self.evaluate(trainable_vars[0])) self.assertAllEqual(expected_lookups, self.evaluate(feature_layer)) def test_old_linear_model(self): # Inputs. batch_size = 4 vocabulary_size = 3 sparse_input = sparse_tensor.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] # example 2, ids [] # example 3, ids [1] indices=((0, 0), (1, 0), (1, 4), (3, 0)), values=(2, 0, 1, 1), dense_shape=(batch_size, 5)) # Embedding variable. embedding_dimension = 2 embedding_shape = (vocabulary_size, embedding_dimension) zeros_embedding_values = np.zeros(embedding_shape) def _initializer(shape, dtype, partition_info): self.assertAllEqual(embedding_shape, shape) self.assertEqual(dtypes.float32, dtype) self.assertIsNone(partition_info) return zeros_embedding_values # Build columns. categorical_column = fc.categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) embedding_column = fc.embedding_column( categorical_column, dimension=embedding_dimension, initializer=_initializer) with ops.Graph().as_default(): predictions = fc_old.linear_model({ categorical_column.name: sparse_input }, (embedding_column,)) expected_var_names = ( 'linear_model/bias_weights:0', 'linear_model/aaa_embedding/weights:0', 'linear_model/aaa_embedding/embedding_weights:0', ) self.assertItemsEqual( expected_var_names, [v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)]) trainable_vars = { v.name: v for v in ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES) } self.assertItemsEqual(expected_var_names, trainable_vars.keys()) bias = trainable_vars['linear_model/bias_weights:0'] embedding_weights = trainable_vars[ 'linear_model/aaa_embedding/embedding_weights:0'] linear_weights = trainable_vars['linear_model/aaa_embedding/weights:0'] self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) # Predictions with all zero weights. self.assertAllClose(np.zeros((1,)), self.evaluate(bias)) self.assertAllClose(zeros_embedding_values, self.evaluate(embedding_weights)) self.assertAllClose( np.zeros((embedding_dimension, 1)), self.evaluate(linear_weights)) self.assertAllClose(np.zeros((batch_size, 1)), self.evaluate(predictions)) # Predictions with all non-zero weights. self.evaluate( embedding_weights.assign(( (1., 2.), # id 0 (3., 5.), # id 1 (7., 11.) # id 2 ))) self.evaluate(linear_weights.assign(((4.,), (6.,)))) # example 0, ids [2], embedding[0] = [7, 11] # example 1, ids [0, 1], embedding[1] = mean([1, 2] + [3, 5]) = [2, 3.5] # example 2, ids [], embedding[2] = [0, 0] # example 3, ids [1], embedding[3] = [3, 5] # sum(embeddings * linear_weights) # = [4*7 + 6*11, 4*2 + 6*3.5, 4*0 + 6*0, 4*3 + 6*5] = [94, 29, 0, 42] self.assertAllClose(((94.,), (29.,), (0.,), (42.,)), self.evaluate(predictions)) def test_old_linear_model_old_categorical(self): # Inputs. batch_size = 4 vocabulary_size = 3 sparse_input = sparse_tensor.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] # example 2, ids [] # example 3, ids [1] indices=((0, 0), (1, 0), (1, 4), (3, 0)), values=(2, 0, 1, 1), dense_shape=(batch_size, 5)) # Embedding variable. embedding_dimension = 2 embedding_shape = (vocabulary_size, embedding_dimension) zeros_embedding_values = np.zeros(embedding_shape) def _initializer(shape, dtype, partition_info): self.assertAllEqual(embedding_shape, shape) self.assertEqual(dtypes.float32, dtype) self.assertIsNone(partition_info) return zeros_embedding_values # Build columns. categorical_column = fc_old._categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) embedding_column = fc.embedding_column( categorical_column, dimension=embedding_dimension, initializer=_initializer) with ops.Graph().as_default(): predictions = fc_old.linear_model({ categorical_column.name: sparse_input }, (embedding_column,)) expected_var_names = ( 'linear_model/bias_weights:0', 'linear_model/aaa_embedding/weights:0', 'linear_model/aaa_embedding/embedding_weights:0', ) self.assertItemsEqual( expected_var_names, [v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)]) trainable_vars = { v.name: v for v in ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES) } self.assertItemsEqual(expected_var_names, trainable_vars.keys()) bias = trainable_vars['linear_model/bias_weights:0'] embedding_weights = trainable_vars[ 'linear_model/aaa_embedding/embedding_weights:0'] linear_weights = trainable_vars['linear_model/aaa_embedding/weights:0'] self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) # Predictions with all zero weights. self.assertAllClose(np.zeros((1,)), self.evaluate(bias)) self.assertAllClose(zeros_embedding_values, self.evaluate(embedding_weights)) self.assertAllClose( np.zeros((embedding_dimension, 1)), self.evaluate(linear_weights)) self.assertAllClose(np.zeros((batch_size, 1)), self.evaluate(predictions)) # Predictions with all non-zero weights. self.evaluate( embedding_weights.assign(( (1., 2.), # id 0 (3., 5.), # id 1 (7., 11.) # id 2 ))) self.evaluate(linear_weights.assign(((4.,), (6.,)))) # example 0, ids [2], embedding[0] = [7, 11] # example 1, ids [0, 1], embedding[1] = mean([1, 2] + [3, 5]) = [2, 3.5] # example 2, ids [], embedding[2] = [0, 0] # example 3, ids [1], embedding[3] = [3, 5] # sum(embeddings * linear_weights) # = [4*7 + 6*11, 4*2 + 6*3.5, 4*0 + 6*0, 4*3 + 6*5] = [94, 29, 0, 42] self.assertAllClose(((94.,), (29.,), (0.,), (42.,)), self.evaluate(predictions)) @test_util.run_deprecated_v1 def test_serialization_with_default_initializer(self): # Build columns. categorical_column = fc.categorical_column_with_identity( key='aaa', num_buckets=3) embedding_column = fc.embedding_column(categorical_column, dimension=2) self.assertEqual([categorical_column], embedding_column.parents) config = embedding_column._get_config() self.assertEqual({ 'categorical_column': { 'class_name': 'IdentityCategoricalColumn', 'config': { 'number_buckets': 3, 'key': 'aaa', 'default_value': None } }, 'ckpt_to_load_from': None, 'combiner': 'mean', 'dimension': 2, 'initializer': { 'class_name': 'TruncatedNormal', 'config': { 'dtype': 'float32', 'stddev': 0.7071067811865475, 'seed': None, 'mean': 0.0 } }, 'max_norm': None, 'tensor_name_in_ckpt': None, 'trainable': True }, config) custom_objects = {'TruncatedNormal': init_ops.TruncatedNormal} new_embedding_column = fc.EmbeddingColumn._from_config( config, custom_objects=custom_objects) self.assertEqual(embedding_column._get_config(), new_embedding_column._get_config()) self.assertIsNot(categorical_column, new_embedding_column.categorical_column) new_embedding_column = fc.EmbeddingColumn._from_config( config, custom_objects=custom_objects, columns_by_name={categorical_column.name: categorical_column}) self.assertEqual(embedding_column._get_config(), new_embedding_column._get_config()) self.assertIs(categorical_column, new_embedding_column.categorical_column) @test_util.run_deprecated_v1 def test_serialization_with_custom_initializer(self): def _initializer(shape, dtype, partition_info): del shape, dtype, partition_info return ValueError('Not expected to be called') # Build columns. categorical_column = fc.categorical_column_with_identity( key='aaa', num_buckets=3) embedding_column = fc.embedding_column( categorical_column, dimension=2, initializer=_initializer) self.assertEqual([categorical_column], embedding_column.parents) config = embedding_column._get_config() self.assertEqual({ 'categorical_column': { 'class_name': 'IdentityCategoricalColumn', 'config': { 'number_buckets': 3, 'key': 'aaa', 'default_value': None } }, 'ckpt_to_load_from': None, 'combiner': 'mean', 'dimension': 2, 'initializer': '_initializer', 'max_norm': None, 'tensor_name_in_ckpt': None, 'trainable': True }, config) custom_objects = { '_initializer': _initializer, } new_embedding_column = fc.EmbeddingColumn._from_config( config, custom_objects=custom_objects) self.assertEqual(embedding_column, new_embedding_column) self.assertIsNot(categorical_column, new_embedding_column.categorical_column) new_embedding_column = fc.EmbeddingColumn._from_config( config, custom_objects=custom_objects, columns_by_name={categorical_column.name: categorical_column}) self.assertEqual(embedding_column, new_embedding_column) self.assertIs(categorical_column, new_embedding_column.categorical_column) class SharedEmbeddingColumnTest(test.TestCase): @test_util.run_deprecated_v1 def test_defaults(self): categorical_column_a = fc.categorical_column_with_identity( key='aaa', num_buckets=3) categorical_column_b = fc.categorical_column_with_identity( key='bbb', num_buckets=3) embedding_dimension = 2 embedding_column_b, embedding_column_a = fc.shared_embedding_columns_v2( [categorical_column_b, categorical_column_a], dimension=embedding_dimension) self.assertIs(categorical_column_a, embedding_column_a.categorical_column) self.assertIs(categorical_column_b, embedding_column_b.categorical_column) self.assertIsNone(embedding_column_a.max_norm) self.assertIsNone(embedding_column_b.max_norm) self.assertEqual('aaa_shared_embedding', embedding_column_a.name) self.assertEqual('bbb_shared_embedding', embedding_column_b.name) self.assertEqual((embedding_dimension,), embedding_column_a.variable_shape) self.assertEqual((embedding_dimension,), embedding_column_b.variable_shape) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int64) }, embedding_column_a.parse_example_spec) self.assertEqual({ 'bbb': parsing_ops.VarLenFeature(dtypes.int64) }, embedding_column_b.parse_example_spec) @test_util.run_deprecated_v1 def test_all_constructor_args(self): categorical_column_a = fc.categorical_column_with_identity( key='aaa', num_buckets=3) categorical_column_b = fc.categorical_column_with_identity( key='bbb', num_buckets=3) embedding_dimension = 2 embedding_column_a, embedding_column_b = fc.shared_embedding_columns_v2( [categorical_column_a, categorical_column_b], dimension=embedding_dimension, combiner='my_combiner', initializer=lambda: 'my_initializer', shared_embedding_collection_name='shared_embedding_collection_name', ckpt_to_load_from='my_ckpt', tensor_name_in_ckpt='my_ckpt_tensor', max_norm=42., trainable=False) self.assertIs(categorical_column_a, embedding_column_a.categorical_column) self.assertIs(categorical_column_b, embedding_column_b.categorical_column) self.assertEqual(42., embedding_column_a.max_norm) self.assertEqual(42., embedding_column_b.max_norm) self.assertEqual('aaa_shared_embedding', embedding_column_a.name) self.assertEqual('bbb_shared_embedding', embedding_column_b.name) self.assertEqual((embedding_dimension,), embedding_column_a.variable_shape) self.assertEqual((embedding_dimension,), embedding_column_b.variable_shape) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int64) }, embedding_column_a.parse_example_spec) self.assertEqual({ 'bbb': parsing_ops.VarLenFeature(dtypes.int64) }, embedding_column_b.parse_example_spec) @test_util.run_deprecated_v1 def test_deep_copy(self): categorical_column_a = fc.categorical_column_with_identity( key='aaa', num_buckets=3) categorical_column_b = fc.categorical_column_with_identity( key='bbb', num_buckets=3) embedding_dimension = 2 original_a, _ = fc.shared_embedding_columns_v2( [categorical_column_a, categorical_column_b], dimension=embedding_dimension, combiner='my_combiner', initializer=lambda: 'my_initializer', shared_embedding_collection_name='shared_embedding_collection_name', ckpt_to_load_from='my_ckpt', tensor_name_in_ckpt='my_ckpt_tensor', max_norm=42., trainable=False) for embedding_column_a in (original_a, copy.deepcopy(original_a)): self.assertEqual('aaa', embedding_column_a.categorical_column.name) self.assertEqual(3, embedding_column_a.categorical_column.num_buckets) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int64) }, embedding_column_a.categorical_column.parse_example_spec) self.assertEqual(42., embedding_column_a.max_norm) self.assertEqual('aaa_shared_embedding', embedding_column_a.name) self.assertEqual((embedding_dimension,), embedding_column_a.variable_shape) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int64) }, embedding_column_a.parse_example_spec) @test_util.run_deprecated_v1 def test_invalid_initializer(self): categorical_column_a = fc.categorical_column_with_identity( key='aaa', num_buckets=3) categorical_column_b = fc.categorical_column_with_identity( key='bbb', num_buckets=3) with self.assertRaisesRegexp(ValueError, 'initializer must be callable'): fc.shared_embedding_columns_v2( [categorical_column_a, categorical_column_b], dimension=2, initializer='not_fn') @test_util.run_deprecated_v1 def test_incompatible_column_type(self): categorical_column_a = fc.categorical_column_with_identity( key='aaa', num_buckets=3) categorical_column_b = fc.categorical_column_with_identity( key='bbb', num_buckets=3) categorical_column_c = fc.categorical_column_with_hash_bucket( key='ccc', hash_bucket_size=3) with self.assertRaisesRegexp( ValueError, 'all categorical_columns must have the same type.*' 'IdentityCategoricalColumn.*HashedCategoricalColumn'): fc.shared_embedding_columns_v2( [categorical_column_a, categorical_column_b, categorical_column_c], dimension=2) @test_util.run_deprecated_v1 def test_weighted_categorical_column_ok(self): categorical_column_a = fc.categorical_column_with_identity( key='aaa', num_buckets=3) weighted_categorical_column_a = fc.weighted_categorical_column( categorical_column_a, weight_feature_key='aaa_weights') categorical_column_b = fc.categorical_column_with_identity( key='bbb', num_buckets=3) weighted_categorical_column_b = fc.weighted_categorical_column( categorical_column_b, weight_feature_key='bbb_weights') fc.shared_embedding_columns_v2( [weighted_categorical_column_a, categorical_column_b], dimension=2) fc.shared_embedding_columns_v2( [categorical_column_a, weighted_categorical_column_b], dimension=2) fc.shared_embedding_columns_v2( [weighted_categorical_column_a, weighted_categorical_column_b], dimension=2) @test_util.run_deprecated_v1 def test_parse_example(self): a = fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=('omar', 'stringer', 'marlo')) b = fc.categorical_column_with_vocabulary_list( key='bbb', vocabulary_list=('omar', 'stringer', 'marlo')) a_embedded, b_embedded = fc.shared_embedding_columns_v2([a, b], dimension=2) data = example_pb2.Example( features=feature_pb2.Features( feature={ 'aaa': feature_pb2.Feature( bytes_list=feature_pb2.BytesList( value=[b'omar', b'stringer'])), 'bbb': feature_pb2.Feature( bytes_list=feature_pb2.BytesList( value=[b'stringer', b'marlo'])), })) features = parsing_ops.parse_example( serialized=[data.SerializeToString()], features=fc.make_parse_example_spec_v2([a_embedded, b_embedded])) self.assertIn('aaa', features) self.assertIn('bbb', features) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=[[0, 0], [0, 1]], values=np.array([b'omar', b'stringer'], dtype=np.object_), dense_shape=[1, 2]), self.evaluate(features['aaa'])) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=[[0, 0], [0, 1]], values=np.array([b'stringer', b'marlo'], dtype=np.object_), dense_shape=[1, 2]), self.evaluate(features['bbb'])) @test_util.run_deprecated_v1 def test_transform_feature(self): a = fc.categorical_column_with_identity(key='aaa', num_buckets=3) b = fc.categorical_column_with_identity(key='bbb', num_buckets=3) a_embedded, b_embedded = fc.shared_embedding_columns_v2([a, b], dimension=2) features = { 'aaa': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(0, 1, 0), dense_shape=(2, 2)), 'bbb': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(1, 2, 1), dense_shape=(2, 2)), } outputs = fc._transform_features_v2(features, [a, a_embedded, b, b_embedded], None) output_a = outputs[a] output_a_embedded = outputs[a_embedded] output_b = outputs[b] output_b_embedded = outputs[b_embedded] self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) _assert_sparse_tensor_value(self, self.evaluate(output_a), self.evaluate(output_a_embedded)) _assert_sparse_tensor_value(self, self.evaluate(output_b), self.evaluate(output_b_embedded)) @test_util.run_deprecated_v1 def test_get_dense_tensor(self): # Inputs. vocabulary_size = 3 # -1 values are ignored. input_a = np.array([ [2, -1, -1], # example 0, ids [2] [0, 1, -1] ]) # example 1, ids [0, 1] input_b = np.array([ [0, -1, -1], # example 0, ids [0] [-1, -1, -1] ]) # example 1, ids [] input_features = {'aaa': input_a, 'bbb': input_b} # Embedding variable. embedding_dimension = 2 embedding_values = ( (1., 2.), # id 0 (3., 5.), # id 1 (7., 11.) # id 2 ) def _initializer(shape, dtype, partition_info): self.assertAllEqual((vocabulary_size, embedding_dimension), shape) self.assertEqual(dtypes.float32, dtype) self.assertIsNone(partition_info) return embedding_values # Expected lookup result, using combiner='mean'. expected_lookups_a = ( # example 0: (7., 11.), # ids [2], embedding = [7, 11] # example 1: (2., 3.5), # ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5] ) expected_lookups_b = ( # example 0: (1., 2.), # ids [0], embedding = [1, 2] # example 1: (0., 0.), # ids [], embedding = [0, 0] ) # Build columns. categorical_column_a = fc.categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) categorical_column_b = fc.categorical_column_with_identity( key='bbb', num_buckets=vocabulary_size) embedding_column_a, embedding_column_b = fc.shared_embedding_columns_v2( [categorical_column_a, categorical_column_b], dimension=embedding_dimension, initializer=_initializer) # Provide sparse input and get dense result. embedding_lookup_a = embedding_column_a.get_dense_tensor( fc.FeatureTransformationCache(input_features), None) embedding_lookup_b = embedding_column_b.get_dense_tensor( fc.FeatureTransformationCache(input_features), None) # Assert expected embedding variable and lookups. global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) self.assertItemsEqual(('aaa_bbb_shared_embedding:0',), tuple([v.name for v in global_vars])) embedding_var = global_vars[0] self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllEqual(embedding_values, self.evaluate(embedding_var)) self.assertAllEqual(expected_lookups_a, self.evaluate(embedding_lookup_a)) self.assertAllEqual(expected_lookups_b, self.evaluate(embedding_lookup_b)) @test_util.run_deprecated_v1 def test_get_dense_tensor_placeholder_inputs(self): # Inputs. vocabulary_size = 3 # -1 values are ignored. input_a = np.array([ [2, -1, -1], # example 0, ids [2] [0, 1, -1] ]) # example 1, ids [0, 1] input_b = np.array([ [0, -1, -1], # example 0, ids [0] [-1, -1, -1] ]) # example 1, ids [] # Specify shape, because dense input must have rank specified. input_a_placeholder = array_ops.placeholder( dtype=dtypes.int64, shape=[None, 3]) input_b_placeholder = array_ops.placeholder( dtype=dtypes.int64, shape=[None, 3]) input_features = { 'aaa': input_a_placeholder, 'bbb': input_b_placeholder, } feed_dict = { input_a_placeholder: input_a, input_b_placeholder: input_b, } # Embedding variable. embedding_dimension = 2 embedding_values = ( (1., 2.), # id 0 (3., 5.), # id 1 (7., 11.) # id 2 ) def _initializer(shape, dtype, partition_info): self.assertAllEqual((vocabulary_size, embedding_dimension), shape) self.assertEqual(dtypes.float32, dtype) self.assertIsNone(partition_info) return embedding_values # Build columns. categorical_column_a = fc.categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) categorical_column_b = fc.categorical_column_with_identity( key='bbb', num_buckets=vocabulary_size) embedding_column_a, embedding_column_b = fc.shared_embedding_columns_v2( [categorical_column_a, categorical_column_b], dimension=embedding_dimension, initializer=_initializer) # Provide sparse input and get dense result. embedding_lookup_a = embedding_column_a.get_dense_tensor( fc.FeatureTransformationCache(input_features), None) embedding_lookup_b = embedding_column_b.get_dense_tensor( fc.FeatureTransformationCache(input_features), None) with _initialized_session() as sess: sess.run([embedding_lookup_a, embedding_lookup_b], feed_dict=feed_dict) @test_util.run_deprecated_v1 def test_linear_model(self): # Inputs. batch_size = 2 vocabulary_size = 3 # -1 values are ignored. input_a = np.array([ [2, -1, -1], # example 0, ids [2] [0, 1, -1] ]) # example 1, ids [0, 1] input_b = np.array([ [0, -1, -1], # example 0, ids [0] [-1, -1, -1] ]) # example 1, ids [] # Embedding variable. embedding_dimension = 2 embedding_shape = (vocabulary_size, embedding_dimension) zeros_embedding_values = np.zeros(embedding_shape) def _initializer(shape, dtype, partition_info): self.assertAllEqual(embedding_shape, shape) self.assertEqual(dtypes.float32, dtype) self.assertIsNone(partition_info) return zeros_embedding_values # Build columns. categorical_column_a = fc.categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) categorical_column_b = fc.categorical_column_with_identity( key='bbb', num_buckets=vocabulary_size) embedding_column_a, embedding_column_b = fc.shared_embedding_columns_v2( [categorical_column_a, categorical_column_b], dimension=embedding_dimension, initializer=_initializer) with ops.Graph().as_default(): model = fc.LinearModel((embedding_column_a, embedding_column_b)) predictions = model({ categorical_column_a.name: input_a, categorical_column_b.name: input_b }) # Linear weights do not follow the column name. But this is a rare use # case, and fixing it would add too much complexity to the code. expected_var_names = ( 'linear_model/bias_weights:0', 'linear_model/aaa_shared_embedding/weights:0', 'aaa_bbb_shared_embedding:0', 'linear_model/bbb_shared_embedding/weights:0', ) self.assertItemsEqual( expected_var_names, [v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)]) trainable_vars = { v.name: v for v in ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES) } self.assertItemsEqual(expected_var_names, trainable_vars.keys()) bias = trainable_vars['linear_model/bias_weights:0'] embedding_weights = trainable_vars['aaa_bbb_shared_embedding:0'] linear_weights_a = trainable_vars[ 'linear_model/aaa_shared_embedding/weights:0'] linear_weights_b = trainable_vars[ 'linear_model/bbb_shared_embedding/weights:0'] self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) # Predictions with all zero weights. self.assertAllClose(np.zeros((1,)), self.evaluate(bias)) self.assertAllClose(zeros_embedding_values, self.evaluate(embedding_weights)) self.assertAllClose( np.zeros((embedding_dimension, 1)), self.evaluate(linear_weights_a)) self.assertAllClose( np.zeros((embedding_dimension, 1)), self.evaluate(linear_weights_b)) self.assertAllClose(np.zeros((batch_size, 1)), self.evaluate(predictions)) # Predictions with all non-zero weights. self.evaluate( embedding_weights.assign(( (1., 2.), # id 0 (3., 5.), # id 1 (7., 11.) # id 2 ))) self.evaluate(linear_weights_a.assign(((4.,), (6.,)))) # example 0, ids [2], embedding[0] = [7, 11] # example 1, ids [0, 1], embedding[1] = mean([1, 2] + [3, 5]) = [2, 3.5] # sum(embeddings * linear_weights) # = [4*7 + 6*11, 4*2 + 6*3.5] = [94, 29] self.evaluate(linear_weights_b.assign(((3.,), (5.,)))) # example 0, ids [0], embedding[0] = [1, 2] # example 1, ids [], embedding[1] = 0, 0] # sum(embeddings * linear_weights) # = [3*1 + 5*2, 3*0 +5*0] = [13, 0] self.assertAllClose([[94. + 13.], [29.]], self.evaluate(predictions)) def _test_dense_features(self, trainable=True): # Inputs. vocabulary_size = 3 sparse_input_a = sparse_tensor.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] indices=((0, 0), (1, 0), (1, 4)), values=(2, 0, 1), dense_shape=(2, 5)) sparse_input_b = sparse_tensor.SparseTensorValue( # example 0, ids [0] # example 1, ids [] indices=((0, 0),), values=(0,), dense_shape=(2, 5)) sparse_input_c = sparse_tensor.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] indices=((0, 1), (1, 1), (1, 3)), values=(2, 0, 1), dense_shape=(2, 5)) sparse_input_d = sparse_tensor.SparseTensorValue( # example 0, ids [2] # example 1, ids [] indices=((0, 1),), values=(2,), dense_shape=(2, 5)) # Embedding variable. embedding_dimension = 2 embedding_values = ( (1., 2.), # id 0 (3., 5.), # id 1 (7., 11.) # id 2 ) def _initializer(shape, dtype, partition_info): self.assertAllEqual((vocabulary_size, embedding_dimension), shape) self.assertEqual(dtypes.float32, dtype) self.assertIsNone(partition_info) return embedding_values # Expected lookup result, using combiner='mean'. expected_lookups = ( # example 0: # A ids [2], embedding = [7, 11] # B ids [0], embedding = [1, 2] # C ids [2], embedding = [7, 11] # D ids [2], embedding = [7, 11] (7., 11., 1., 2., 7., 11., 7., 11.), # example 1: # A ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5] # B ids [], embedding = [0, 0] # C ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5] # D ids [], embedding = [0, 0] (2., 3.5, 0., 0., 2., 3.5, 0., 0.), ) # Build columns. categorical_column_a = fc.categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) categorical_column_b = fc.categorical_column_with_identity( key='bbb', num_buckets=vocabulary_size) categorical_column_c = fc.categorical_column_with_identity( key='ccc', num_buckets=vocabulary_size) categorical_column_d = fc.categorical_column_with_identity( key='ddd', num_buckets=vocabulary_size) embedding_column_a, embedding_column_b = fc.shared_embedding_columns_v2( [categorical_column_a, categorical_column_b], dimension=embedding_dimension, initializer=_initializer, trainable=trainable) embedding_column_c, embedding_column_d = fc.shared_embedding_columns_v2( [categorical_column_c, categorical_column_d], dimension=embedding_dimension, initializer=_initializer, trainable=trainable) features = { 'aaa': sparse_input_a, 'bbb': sparse_input_b, 'ccc': sparse_input_c, 'ddd': sparse_input_d } # Provide sparse input and get dense result. dense_features = fc.DenseFeatures( feature_columns=(embedding_column_b, embedding_column_a, embedding_column_c, embedding_column_d))( features) # Assert expected embedding variable and lookups. global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) self.assertItemsEqual( ['aaa_bbb_shared_embedding:0', 'ccc_ddd_shared_embedding:0'], tuple([v.name for v in global_vars])) for v in global_vars: self.assertIsInstance(v, variables_lib.Variable) trainable_vars = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES) if trainable: self.assertItemsEqual( ['aaa_bbb_shared_embedding:0', 'ccc_ddd_shared_embedding:0'], tuple([v.name for v in trainable_vars])) else: self.assertItemsEqual([], tuple([v.name for v in trainable_vars])) shared_embedding_vars = global_vars self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllEqual(embedding_values, self.evaluate(shared_embedding_vars[0])) self.assertAllEqual(expected_lookups, self.evaluate(dense_features)) @test_util.run_deprecated_v1 def test_dense_features(self): self._test_dense_features() @test_util.run_deprecated_v1 def test_dense_features_no_trainable(self): self._test_dense_features(trainable=False) @test_util.run_deprecated_v1 def test_serialization(self): def _initializer(shape, dtype, partition_info): del shape, dtype, partition_info return ValueError('Not expected to be called') categorical_column_a = fc.categorical_column_with_identity( key='aaa', num_buckets=3) categorical_column_b = fc.categorical_column_with_identity( key='bbb', num_buckets=3) embedding_column_a, embedding_column_b = fc.shared_embedding_columns_v2( [categorical_column_a, categorical_column_b], dimension=2, initializer=_initializer) self.assertEqual([categorical_column_a], embedding_column_a.parents) self.assertEqual([categorical_column_b], embedding_column_b.parents) # TODO(rohanj): Add tests for (from|get)_config once implemented class WeightedCategoricalColumnTest(test.TestCase): @test_util.run_deprecated_v1 def test_defaults(self): column = fc.weighted_categorical_column( categorical_column=fc.categorical_column_with_identity( key='ids', num_buckets=3), weight_feature_key='values') self.assertEqual('ids_weighted_by_values', column.name) self.assertEqual(3, column.num_buckets) self.assertEqual({ 'ids': parsing_ops.VarLenFeature(dtypes.int64), 'values': parsing_ops.VarLenFeature(dtypes.float32) }, column.parse_example_spec) self.assertTrue(column._is_v2_column) def test_is_v2_column(self): column = fc.weighted_categorical_column( categorical_column=fc_old._categorical_column_with_identity( key='ids', num_buckets=3), weight_feature_key='values') self.assertFalse(column._is_v2_column) @test_util.run_deprecated_v1 def test_deep_copy(self): """Tests deepcopy of categorical_column_with_hash_bucket.""" original = fc.weighted_categorical_column( categorical_column=fc.categorical_column_with_identity( key='ids', num_buckets=3), weight_feature_key='values') for column in (original, copy.deepcopy(original)): self.assertEqual('ids_weighted_by_values', column.name) self.assertEqual(3, column.num_buckets) self.assertEqual({ 'ids': parsing_ops.VarLenFeature(dtypes.int64), 'values': parsing_ops.VarLenFeature(dtypes.float32) }, column.parse_example_spec) def test_invalid_dtype_none(self): with self.assertRaisesRegexp(ValueError, 'is not convertible to float'): fc.weighted_categorical_column( categorical_column=fc.categorical_column_with_identity( key='ids', num_buckets=3), weight_feature_key='values', dtype=None) def test_invalid_dtype_string(self): with self.assertRaisesRegexp(ValueError, 'is not convertible to float'): fc.weighted_categorical_column( categorical_column=fc.categorical_column_with_identity( key='ids', num_buckets=3), weight_feature_key='values', dtype=dtypes.string) def test_invalid_input_dtype(self): column = fc.weighted_categorical_column( categorical_column=fc.categorical_column_with_identity( key='ids', num_buckets=3), weight_feature_key='values') strings = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('omar', 'stringer', 'marlo'), dense_shape=(2, 2)) with self.assertRaisesRegexp(ValueError, 'Bad dtype'): fc._transform_features_v2({ 'ids': strings, 'values': strings }, (column,), None) def test_column_name_collision(self): with self.assertRaisesRegexp(ValueError, r'Parse config.*already exists'): fc.weighted_categorical_column( categorical_column=fc.categorical_column_with_identity( key='aaa', num_buckets=3), weight_feature_key='aaa').parse_example_spec() def test_missing_weights(self): column = fc.weighted_categorical_column( categorical_column=fc.categorical_column_with_identity( key='ids', num_buckets=3), weight_feature_key='values') inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('omar', 'stringer', 'marlo'), dense_shape=(2, 2)) with self.assertRaisesRegexp(ValueError, 'values is not in features dictionary'): fc._transform_features_v2({'ids': inputs}, (column,), None) @test_util.run_deprecated_v1 def test_parse_example(self): a = fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=('omar', 'stringer', 'marlo')) a_weighted = fc.weighted_categorical_column(a, weight_feature_key='weights') data = example_pb2.Example( features=feature_pb2.Features( feature={ 'aaa': feature_pb2.Feature( bytes_list=feature_pb2.BytesList( value=[b'omar', b'stringer'])), 'weights': feature_pb2.Feature( float_list=feature_pb2.FloatList(value=[1., 10.])) })) features = parsing_ops.parse_example( serialized=[data.SerializeToString()], features=fc.make_parse_example_spec_v2([a_weighted])) self.assertIn('aaa', features) self.assertIn('weights', features) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=[[0, 0], [0, 1]], values=np.array([b'omar', b'stringer'], dtype=np.object_), dense_shape=[1, 2]), self.evaluate(features['aaa'])) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=[[0, 0], [0, 1]], values=np.array([1., 10.], dtype=np.float32), dense_shape=[1, 2]), self.evaluate(features['weights'])) @test_util.run_deprecated_v1 def test_transform_features(self): column = fc.weighted_categorical_column( categorical_column=fc.categorical_column_with_identity( key='ids', num_buckets=3), weight_feature_key='values') inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(0, 1, 0), dense_shape=(2, 2)) weights = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(0.5, 1.0, 0.1), dense_shape=(2, 2)) id_tensor, weight_tensor = fc._transform_features_v2({ 'ids': inputs, 'values': weights, }, (column,), None)[column] self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array(inputs.values, dtype=np.int64), dense_shape=inputs.dense_shape), self.evaluate(id_tensor)) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=weights.indices, values=np.array(weights.values, dtype=np.float32), dense_shape=weights.dense_shape), self.evaluate(weight_tensor)) @test_util.run_deprecated_v1 def test_transform_features_dense_input(self): column = fc.weighted_categorical_column( categorical_column=fc.categorical_column_with_identity( key='ids', num_buckets=3), weight_feature_key='values') weights = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(0.5, 1.0, 0.1), dense_shape=(2, 2)) id_tensor, weight_tensor = fc._transform_features_v2({ 'ids': ((0, -1), (1, 0)), 'values': weights, }, (column,), None)[column] self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=np.array((0, 1, 0), dtype=np.int64), dense_shape=(2, 2)), self.evaluate(id_tensor)) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=weights.indices, values=np.array(weights.values, dtype=np.float32), dense_shape=weights.dense_shape), self.evaluate(weight_tensor)) @test_util.run_deprecated_v1 def test_transform_features_dense_weights(self): column = fc.weighted_categorical_column( categorical_column=fc.categorical_column_with_identity( key='ids', num_buckets=3), weight_feature_key='values') inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(2, 1, 0), dense_shape=(2, 2)) id_tensor, weight_tensor = fc._transform_features_v2({ 'ids': inputs, 'values': ((.5, 0.), (1., .1)), }, (column,), None)[column] self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array(inputs.values, dtype=np.int64), dense_shape=inputs.dense_shape), self.evaluate(id_tensor)) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=np.array((.5, 1., .1), dtype=np.float32), dense_shape=(2, 2)), self.evaluate(weight_tensor)) @test_util.run_deprecated_v1 def test_linear_model(self): column = fc.weighted_categorical_column( categorical_column=fc.categorical_column_with_identity( key='ids', num_buckets=3), weight_feature_key='values') with ops.Graph().as_default(): model = fc.LinearModel((column,)) predictions = model({ 'ids': sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(0, 2, 1), dense_shape=(2, 2)), 'values': sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(.5, 1., .1), dense_shape=(2, 2)) }) weight_var, bias = model.variables self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose((0.,), self.evaluate(bias)) self.assertAllClose(((0.,), (0.,), (0.,)), self.evaluate(weight_var)) self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions)) self.evaluate(weight_var.assign(((1.,), (2.,), (3.,)))) # weight_var[0] * weights[0, 0] = 1 * .5 = .5 # weight_var[2] * weights[1, 0] + weight_var[1] * weights[1, 1] # = 3*1 + 2*.1 = 3+.2 = 3.2 self.assertAllClose(((.5,), (3.2,)), self.evaluate(predictions)) def test_linear_model_mismatched_shape(self): column = fc.weighted_categorical_column( categorical_column=fc.categorical_column_with_identity( key='ids', num_buckets=3), weight_feature_key='values') with ops.Graph().as_default(): with self.assertRaisesRegexp(ValueError, r'Dimensions.*are not compatible'): model = fc.LinearModel((column,)) model({ 'ids': sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(0, 2, 1), dense_shape=(2, 2)), 'values': sparse_tensor.SparseTensorValue( indices=((0, 0), (0, 1), (1, 0), (1, 1)), values=(.5, 11., 1., .1), dense_shape=(2, 2)) }) def test_linear_model_mismatched_dense_values(self): column = fc.weighted_categorical_column( categorical_column=fc.categorical_column_with_identity( key='ids', num_buckets=3), weight_feature_key='values') with ops.Graph().as_default(): model = fc.LinearModel((column,), sparse_combiner='mean') predictions = model({ 'ids': sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(0, 2, 1), dense_shape=(2, 2)), 'values': ((.5,), (1.,)) }) # Disabling the constant folding optimizer here since it changes the # error message differently on CPU and GPU. config = config_pb2.ConfigProto() config.graph_options.rewrite_options.constant_folding = ( rewriter_config_pb2.RewriterConfig.OFF) with _initialized_session(config): with self.assertRaisesRegexp(errors.OpError, 'Incompatible shapes'): self.evaluate(predictions) def test_linear_model_mismatched_dense_shape(self): column = fc.weighted_categorical_column( categorical_column=fc.categorical_column_with_identity( key='ids', num_buckets=3), weight_feature_key='values') with ops.Graph().as_default(): model = fc.LinearModel((column,)) predictions = model({ 'ids': sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(0, 2, 1), dense_shape=(2, 2)), 'values': ((.5,), (1.,), (.1,)) }) weight_var, bias = model.variables self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose((0.,), self.evaluate(bias)) self.assertAllClose(((0.,), (0.,), (0.,)), self.evaluate(weight_var)) self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions)) self.evaluate(weight_var.assign(((1.,), (2.,), (3.,)))) # weight_var[0] * weights[0, 0] = 1 * .5 = .5 # weight_var[2] * weights[1, 0] + weight_var[1] * weights[1, 1] # = 3*1 + 2*.1 = 3+.2 = 3.2 self.assertAllClose(((.5,), (3.2,)), self.evaluate(predictions)) def test_old_linear_model(self): column = fc.weighted_categorical_column( categorical_column=fc.categorical_column_with_identity( key='ids', num_buckets=3), weight_feature_key='values') with ops.Graph().as_default(): predictions = fc_old.linear_model({ 'ids': sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(0, 2, 1), dense_shape=(2, 2)), 'values': sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(.5, 1., .1), dense_shape=(2, 2)) }, (column,)) bias = get_linear_model_bias() weight_var = get_linear_model_column_var(column) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose((0.,), self.evaluate(bias)) self.assertAllClose(((0.,), (0.,), (0.,)), self.evaluate(weight_var)) self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions)) self.evaluate(weight_var.assign(((1.,), (2.,), (3.,)))) # weight_var[0] * weights[0, 0] = 1 * .5 = .5 # weight_var[2] * weights[1, 0] + weight_var[1] * weights[1, 1] # = 3*1 + 2*.1 = 3+.2 = 3.2 self.assertAllClose(((.5,), (3.2,)), self.evaluate(predictions)) def test_old_linear_model_mismatched_shape(self): column = fc.weighted_categorical_column( categorical_column=fc.categorical_column_with_identity( key='ids', num_buckets=3), weight_feature_key='values') with ops.Graph().as_default(): with self.assertRaisesRegexp(ValueError, r'Dimensions.*are not compatible'): fc_old.linear_model({ 'ids': sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(0, 2, 1), dense_shape=(2, 2)), 'values': sparse_tensor.SparseTensorValue( indices=((0, 0), (0, 1), (1, 0), (1, 1)), values=(.5, 11., 1., .1), dense_shape=(2, 2)) }, (column,)) def test_old_linear_model_mismatched_dense_values(self): column = fc.weighted_categorical_column( categorical_column=fc.categorical_column_with_identity( key='ids', num_buckets=3), weight_feature_key='values') with ops.Graph().as_default(): predictions = fc_old.linear_model({ 'ids': sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(0, 2, 1), dense_shape=(2, 2)), 'values': ((.5,), (1.,)) }, (column,), sparse_combiner='mean') # Disabling the constant folding optimizer here since it changes the # error message differently on CPU and GPU. config = config_pb2.ConfigProto() config.graph_options.rewrite_options.constant_folding = ( rewriter_config_pb2.RewriterConfig.OFF) with _initialized_session(config): with self.assertRaisesRegexp(errors.OpError, 'Incompatible shapes'): self.evaluate(predictions) def test_old_linear_model_mismatched_dense_shape(self): column = fc.weighted_categorical_column( categorical_column=fc.categorical_column_with_identity( key='ids', num_buckets=3), weight_feature_key='values') with ops.Graph().as_default(): predictions = fc_old.linear_model({ 'ids': sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(0, 2, 1), dense_shape=(2, 2)), 'values': ((.5,), (1.,), (.1,)) }, (column,)) bias = get_linear_model_bias() weight_var = get_linear_model_column_var(column) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose((0.,), self.evaluate(bias)) self.assertAllClose(((0.,), (0.,), (0.,)), self.evaluate(weight_var)) self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions)) self.evaluate(weight_var.assign(((1.,), (2.,), (3.,)))) # weight_var[0] * weights[0, 0] = 1 * .5 = .5 # weight_var[2] * weights[1, 0] + weight_var[1] * weights[1, 1] # = 3*1 + 2*.1 = 3+.2 = 3.2 self.assertAllClose(((.5,), (3.2,)), self.evaluate(predictions)) def test_old_linear_model_old_categorical(self): column = fc.weighted_categorical_column( categorical_column=fc_old._categorical_column_with_identity( key='ids', num_buckets=3), weight_feature_key='values') with ops.Graph().as_default(): predictions = fc_old.linear_model({ 'ids': sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(0, 2, 1), dense_shape=(2, 2)), 'values': sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(.5, 1., .1), dense_shape=(2, 2)) }, (column,)) bias = get_linear_model_bias() weight_var = get_linear_model_column_var(column) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose((0.,), self.evaluate(bias)) self.assertAllClose(((0.,), (0.,), (0.,)), self.evaluate(weight_var)) self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions)) self.evaluate(weight_var.assign(((1.,), (2.,), (3.,)))) # weight_var[0] * weights[0, 0] = 1 * .5 = .5 # weight_var[2] * weights[1, 0] + weight_var[1] * weights[1, 1] # = 3*1 + 2*.1 = 3+.2 = 3.2 self.assertAllClose(((.5,), (3.2,)), self.evaluate(predictions)) # TODO(ptucker): Add test with embedding of weighted categorical. @test_util.run_deprecated_v1 def test_serialization(self): categorical_column = fc.categorical_column_with_identity( key='ids', num_buckets=3) column = fc.weighted_categorical_column( categorical_column=categorical_column, weight_feature_key='weight') self.assertEqual([categorical_column, 'weight'], column.parents) config = column._get_config() self.assertEqual({ 'categorical_column': { 'config': { 'key': 'ids', 'number_buckets': 3, 'default_value': None }, 'class_name': 'IdentityCategoricalColumn' }, 'dtype': 'float32', 'weight_feature_key': 'weight' }, config) self.assertEqual(column, fc.WeightedCategoricalColumn._from_config(config)) new_column = fc.WeightedCategoricalColumn._from_config( config, columns_by_name={categorical_column.name: categorical_column}) self.assertEqual(column, new_column) self.assertIs(categorical_column, new_column.categorical_column) class FeatureColumnForSerializationTest(BaseFeatureColumnForTests): @property def _is_v2_column(self): return True @property def name(self): return 'BadParentsFeatureColumn' def transform_feature(self, transformation_cache, state_manager): return 'Output' @property def parse_example_spec(self): pass class SerializationTest(test.TestCase): """Tests for serialization, deserialization helpers.""" def test_serialize_non_feature_column(self): class NotAFeatureColumn(object): pass with self.assertRaisesRegexp(ValueError, 'is not a FeatureColumn'): serialization.serialize_feature_column(NotAFeatureColumn()) def test_deserialize_invalid_config(self): with self.assertRaisesRegexp(ValueError, 'Improper config format: {}'): serialization.deserialize_feature_column({}) def test_deserialize_config_missing_key(self): config_missing_key = { 'config': { # Dtype is missing and should cause a failure. # 'dtype': 'int32', 'default_value': None, 'key': 'a', 'normalizer_fn': None, 'shape': (2,) }, 'class_name': 'NumericColumn' } with self.assertRaisesRegexp(ValueError, 'Invalid config:'): serialization.deserialize_feature_column(config_missing_key) def test_deserialize_invalid_class(self): with self.assertRaisesRegexp( ValueError, 'Unknown feature_column_v2: NotExistingFeatureColumnClass'): serialization.deserialize_feature_column({ 'class_name': 'NotExistingFeatureColumnClass', 'config': {} }) def test_deserialization_deduping(self): price = fc.numeric_column('price') bucketized_price = fc.bucketized_column(price, boundaries=[0, 1]) configs = serialization.serialize_feature_columns([price, bucketized_price]) deserialized_feature_columns = serialization.deserialize_feature_columns( configs) self.assertEqual(2, len(deserialized_feature_columns)) new_price = deserialized_feature_columns[0] new_bucketized_price = deserialized_feature_columns[1] # Ensure these are not the original objects: self.assertIsNot(price, new_price) self.assertIsNot(bucketized_price, new_bucketized_price) # But they are equivalent: self.assertEquals(price, new_price) self.assertEquals(bucketized_price, new_bucketized_price) # Check that deduping worked: self.assertIs(new_bucketized_price.source_column, new_price) def deserialization_custom_objects(self): # Note that custom_objects is also tested extensively above per class, this # test ensures that the public wrappers also handle it correctly. def _custom_fn(input_tensor): return input_tensor + 42. price = fc.numeric_column('price', normalizer_fn=_custom_fn) configs = serialization.serialize_feature_columns([price]) deserialized_feature_columns = serialization.deserialize_feature_columns( configs) self.assertEqual(1, len(deserialized_feature_columns)) new_price = deserialized_feature_columns[0] # Ensure these are not the original objects: self.assertIsNot(price, new_price) # But they are equivalent: self.assertEquals(price, new_price) # Check that normalizer_fn points to the correct function. self.assertIs(new_price.normalizer_fn, _custom_fn) if __name__ == '__main__': test.main()
{ "content_hash": "e166bc713df551ef52d390d2200b1ac3", "timestamp": "", "source": "github", "line_count": 8441, "max_line_length": 124, "avg_line_length": 39.32733088496624, "alnum_prop": 0.6034034015941584, "repo_name": "ghchinoy/tensorflow", "id": "1bc43ba7dd43e2baf3dbad520bfc211931b5f544", "size": "332651", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tensorflow/python/feature_column/feature_column_v2_test.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Assembly", "bytes": "3568" }, { "name": "Batchfile", "bytes": "15317" }, { "name": "C", "bytes": "699905" }, { "name": "C#", "bytes": "8446" }, { "name": "C++", "bytes": "67022491" }, { "name": "CMake", "bytes": "206499" }, { "name": "Dockerfile", "bytes": "73602" }, { "name": "Go", "bytes": "1585039" }, { "name": "HTML", "bytes": "4680118" }, { "name": "Java", "bytes": "836400" }, { "name": "Jupyter Notebook", "bytes": "1665583" }, { "name": "LLVM", "bytes": "6536" }, { "name": "Makefile", "bytes": "98194" }, { "name": "Objective-C", "bytes": "94022" }, { "name": "Objective-C++", "bytes": "175222" }, { "name": "PHP", "bytes": "17600" }, { "name": "Pascal", "bytes": "3239" }, { "name": "Perl", "bytes": "7536" }, { "name": "Python", "bytes": "48407007" }, { "name": "RobotFramework", "bytes": "891" }, { "name": "Ruby", "bytes": "4733" }, { "name": "Shell", "bytes": "476920" }, { "name": "Smarty", "bytes": "27495" }, { "name": "Swift", "bytes": "56155" } ], "symlink_target": "" }
from helper import greeting greeting("BLAHHHHH")
{ "content_hash": "26a3effa8fb254dcaa31b1dc56bb453c", "timestamp": "", "source": "github", "line_count": 3, "max_line_length": 27, "avg_line_length": 16.666666666666668, "alnum_prop": 0.8, "repo_name": "artpomm/cs3240-labdemo", "id": "cadedea00ecab3c4f0838f32a613244b105328d0", "size": "50", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "hello.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "124" } ], "symlink_target": "" }
"""Base class and functions for compilers""" from itertools import product from collections import namedtuple, OrderedDict from compilertools._utils import import_class, BaseClass from compilertools._config import CONFIG from compilertools.processors import get_processor, get_arch __all__ = ["CompilerBase", "get_compiler"] def get_compiler(compiler=None, current_compiler=False): """Returns compiler class Parameters ---------- compiler : str of CompilerBase subclass Compiler Name or instance current_compiler : bool Compiler used to build Returns ------- CompilerBase subclass instance Compiler class instance.""" if isinstance(compiler, CompilerBase): return compiler if compiler is None: from distutils.ccompiler import get_default_compiler compiler = get_default_compiler() alias = CONFIG.get("compilers", {}).get(compiler, compiler) if alias == "unix": alias = _which_unix_compiler(compiler) return import_class("compilers", alias, "Compiler", CompilerBase)( current_compiler=current_compiler ) def _which_unix_compiler(compiler): """ Find which Unix compiler is "cc", "c++". Default to GCC if no other found. Parameters ---------- compiler : str Compiler Name Returns ------- str: Detected compiler Name """ from subprocess import Popen, PIPE try: version_str = ( Popen( [compiler if compiler != "unix" else "cc", "--version"], stdout=PIPE, universal_newlines=True, ) .stdout.read() .lower() ) except OSError: return "gcc" if "clang" in version_str: return "llvm" return "gcc" def _get_arch_and_cpu(arch=None, current_machine=False): """Returns arch and updates CPU linked to compiler. Parameters ---------- arch : str CPU Architecture. If None, use current computer arch, else use specified current_machine : bool If True returns current machine CPU Returns ------- str Architecture compilertools.processors.ProcessorBase subclass Processor instance. """ arch = get_arch(arch) return arch, get_processor(arch, current_machine=current_machine) def _order_args_matrix(args_matrix, current_machine=False, current_compiler=False): """Converts args matrix to args ordered dict Parameters ---------- args_matrix : list of CompilerBase.Arg result from self._compile_args_matrix or self._link_args_matrix current_machine : bool If True, return only arguments compatibles with current machine (conditions from "import_if"). current_compiler : bool If True, return only arguments compatibles with current compiler (conditions from "build_if"). Returns ------- collections.OrderedDict with keys and values as str Arguments matrix. Keys are suffixes, values are compiler arguments. """ args_combinations = OrderedDict() for args in product(*args_matrix): args_list = [] suffix_list = [] is_compatible = True for arg in args: if current_machine: is_compatible = not (not arg.import_if or not is_compatible) if current_compiler: is_compatible = not (not arg.build_if or not is_compatible) if not is_compatible: break arg_arg = arg.args if arg_arg: if isinstance(arg_arg, str): args_list.append(arg_arg) else: args_list.extend(arg_arg) arg_suffix = arg.suffix if arg_suffix: suffix_list.append(arg_suffix.replace(".", "_").replace("-", "_")) if is_compatible: args_combinations["-".join(suffix_list)] = args_list return args_combinations class CompilerBase(BaseClass): """Base class for compiler""" Arg = namedtuple("Argument", "args suffix import_if build_if") Arg.__new__.__defaults__ = ("", "", True, True) Arg.__doc__ = """ Compiler argument. Parameters ---------- args : list of str arguments sent to compiler (ex "-flto -w"). suffix : str suffix related to this argument in compiled file name. import_if : bool condition that must be True for importing file compiled with this argument (ex architecture compatibility). Default value is True. build_if : bool Condition that must be True for compile file with this argument and the current compiler (Ex compiler version). Default value is True.""" def __init__(self, current_compiler=False): BaseClass.__init__(self) self["current_compiler"] = current_compiler self._default["current_compiler"] = False self._default["version"] = 0.0 def _compile_args_matrix(self, arch, cpu): """Returns available compiler arguments for the specified CPU architecture as a matrix. Override to define matrix. Parameters ---------- arch : str CPU Architecture. cpu : compilertools.processors.ProcessorBase subclass Processor instance Returns ------- list of CompilerBase.Arg Arguments matrix.""" raise NotImplementedError def _compile_args_current_machine(self, arch, cpu): """Defines optimized arguments for current machine. By default, gets the best options from compile_args method. Override to define another behavior. Parameters ---------- arch : str CPU Architecture. cpu : compilertools.processors.ProcessorBase subclass Processor instance. Returns ------- str Best compiler arguments for current machine.""" args = _order_args_matrix( self._compile_args_matrix(arch, cpu), current_machine=True ) if not args: return [] return args[list(args)[0]] def compile_args(self, arch=None, current_machine=False): """Gets compiler args list for a specific architecture. Parameters ---------- arch : str Target architecture name. current_machine : bool If True, returns only arguments compatibles with current machine (conditions from "Arg.import_if"). Returns ------- collections.OrderedDict with keys and values as str Arguments matrix. Keys are suffixes, values are compiler arguments.""" return _order_args_matrix( self._compile_args_matrix( *_get_arch_and_cpu(arch, current_machine=current_machine) ), current_machine, self["current_compiler"], ) def compile_args_current_machine(self): """Return compiler arguments optimized by compiler for current machine Returns ------- str Best compiler arguments for current machine.""" return self._compile_args_current_machine( *_get_arch_and_cpu(current_machine=True) ) @BaseClass._memoized_property def name(self): """Compiler type name Returns ------- str Name.""" return self.__module__.rsplit(".", 1)[-1] @BaseClass._memoized_property def api(self): """Compatibles API Returns ------- dict Keys are API names, values are dict of arguments with keys in {'link', 'compile'}.""" return {} @BaseClass._memoized_property def option(self): """Compatibles Options Returns ------- dict Keys are options names, values are dict of arguments with keys in {'link', 'compile'}.""" return {}
{ "content_hash": "f4a32db172694dcee006a241774af671", "timestamp": "", "source": "github", "line_count": 288, "max_line_length": 87, "avg_line_length": 28.256944444444443, "alnum_prop": 0.5844187761120668, "repo_name": "JGoutin/compilertools", "id": "aaa61df4a82ded9033ce24b8d79a15962383a235", "size": "8138", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "compilertools/compilers/_core.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "Python", "bytes": "144292" } ], "symlink_target": "" }
from ripcordclient.common import base from ripcordclient.common import exception CREATE_ATTRIBUTES = [ 'disabled', 'name', ] UPDATE_ATTRIBUTES = CREATE_ATTRIBUTES class Domain(base.Resource): def __repr__(self): return '<Domain %s>' % self._info class DomainManager(base.Manager): resource_class = Domain def __check_keys(self, attributes, **kwargs): keys = {} for (key, value) in kwargs.items(): if key in attributes: keys[key] = value else: raise exception.InvalidAttribute(attr=key) return keys def __create(self, attributes, path, **kwargs): keys = self.__check_keys(attributes=attributes, **kwargs) return self._create(path, keys) def __update(self, attributes, path, **kwargs): keys = self.__check_keys(attributes=attributes, **kwargs) return self._update(path, keys) @staticmethod def _path(uuid=None): return '/v1/domains/%s' % uuid if uuid else '/v1/domains' def create(self, **kwargs): path = self._path() return self.__create( attributes=CREATE_ATTRIBUTES, path=path, **kwargs) def delete(self, uuid): return self._delete(self._path(uuid=uuid)) def get(self, uuid): try: return self._list(self._path(uuid=uuid))[0] except IndexError: return None def list(self): return self._list(self._path()) def update(self, uuid, **kwargs): path = self._path(uuid=uuid) return self.__update( attributes=UPDATE_ATTRIBUTES, path=path, **kwargs)
{ "content_hash": "431865a9870d01af21bd19d0355b7c44", "timestamp": "", "source": "github", "line_count": 67, "max_line_length": 65, "avg_line_length": 24.746268656716417, "alnum_prop": 0.5922798552472859, "repo_name": "kickstandproject/python-ripcordclient", "id": "a8aaad6308b7a5744487bc0628ce51aab5bd992a", "size": "2267", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "ripcordclient/v1/domain.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "119715" } ], "symlink_target": "" }
__author__ = 'ghost' import tornado.web from app.helper import BaseRequestHandler, has_pet_cache, set_pet_cache, BaseApiRequestHandler from app.helper import get_to_tomorrow from app.libs import router from app.models.admin import Award, Banner from app.models.home import Pet from settings import logger, rdb @router.Route('/home') class HomeHandler(BaseRequestHandler): @tornado.web.authenticated def get(self, *args, **kwargs): uid = self.current_user # 如果尚未领取,重定向领取列表 if not has_pet_cache(uid) and not Pet.findone(uid=uid): return self.redirect('/pets') # 设置领养的缓存 set_pet_cache(uid) # 获取领取用户信息 user_join_pet_info = Pet.get_info(uid) if not user_join_pet_info: return self.redirect('/pets') user_join_pet_info.update(nickname=user_join_pet_info['nickname'].decode('unicode-escape')) self.render('home.html', user_join_pet_info=user_join_pet_info) @router.Route('/pets') class PetsHandler(BaseRequestHandler): """ 领养列表 """ @tornado.web.authenticated def get(self): # 渲染列表页面 self.render("pets.html") @router.Route('/pet/(?P<ptype>\w+)') class PetHandler(BaseRequestHandler): """ 领养接口,领养成功重定向个人页 """ @tornado.web.authenticated def get(self, ptype): uid = self.current_user # 如果领取了,重定向个人页 if has_pet_cache(uid) or Pet.findone(uid=uid): logger.info('user {} have get pet'.format(uid)) return self.redirect('/home') # 领养 pet = Pet(type=ptype, uid=uid) try: row = pet.insert() logger.info('insert pet success {}'.format(row)) set_pet_cache(uid) except Exception, e: self.set_status(500) logger.error('insert pet error {}'.format(e)) return self.redirect('/pets') self.redirect('/home') @router.Route('/awards') class AwardsHandler(BaseRequestHandler): @tornado.web.authenticated def get(self, *args, **kwargs): uid = self.current_user pet = Pet.findone(uid=uid) if pet: score = pet['score'] else: score = 0 awards = Award.get_all_use() for awd in awards: key = 'aid:{}'.format(awd['id']) count = int(rdb.llen(key)) if rdb.llen(key) else 0 setattr(awd, 'count', count) banners = Banner.findall(status=1) logger.info('user {} current score {}'.format(uid, score)) self.render('awards.html', awards=awards, banners=banners) @router.Route('/awards/(?P<aid>\d+)') class AwardsCodeHandler(BaseApiRequestHandler): """ 领取奖品 """ @tornado.web.authenticated def post(self, aid): uid = self.current_user pet = Pet.findone(uid=uid) # 如果没有领取 if not pet: self.set_status(400) result = dict(code=40021, msg=u'尚未领取宝贝哦') return self.jsonify(result) award = Award.findone(id=aid, status=1) # 奖品下线 if not award: self.set_status(400) result = dict(code=40022, msg=u'奖品已经下线') return self.jsonify(result) key = 'aid:{}'.format(award['id']) if int(rdb.llen(key)) == 0: self.set_status(400) result = dict(code=40022, msg=u'来晚了,被领光啦') return self.jsonify(result) # 所需点数大于当前点数,无法领取 if award['score'] > pet['score']: result = dict(code=40022, msg=u'点数不够哦,陪宝宝多玩几天吧') return self.jsonify(result) # 领取扣除点数 award_code = rdb.lpop(key) if Pet.wining(uid=uid, aid=award['id'], score=award['score'], code=award_code): logger.info("insert winning success award code {}".format(award_code)) result = dict(code=40027, msg=u'兑换成功!', award_code=award_code, provide=award['provide']) else: rdb.lpush(key, award_code) logger.warning('insert wining failed') self.set_status(500) result = dict(code=40012, msg=u'网络错误,请重试') # TODO test code # result = dict(code=40027, msg=u'兑换成功!', award_code='11111', provide='provide') return self.jsonify(result) @router.Route('/api/v1/keep') class KeepHandler(BaseApiRequestHandler): """ 喂养接口, :type_ : eat 投食 wash 洗澡 game 有戏 每天仅能进行一次 """ @tornado.web.authenticated def get(self): uid = self.current_user type_ = self.get_argument('type', None) if not type_: self.set_status(400) result = dict(code=40011, msg=u'缺少type参数') return self.jsonify(result) keep_info = self.keep_map(type_) key = "uid:{}:keep:{}".format(uid, type_) times = rdb.incr(key) if times == 1: rdb.expire(key, get_to_tomorrow()) else: logger.warning('user{} have try times {}'.format(uid, times)) result = dict(code=40010, msg=u'每天只能{}一次哦!'.format(keep_info['name'])) return self.jsonify(result) try: row = Pet.keep(uid=uid, score=keep_info['score']) logger.info('user {} keep pet {}'.format(uid, row)) except Exception, e: self.set_status(500) logger.error('keep pet error {}'.format(e)) result = dict(code=40012, msg=u'网络错误,请重试') return self.jsonify(result) self.set_status(201) self.jsonify({}) @staticmethod def keep_map(type_): maps = { 'eat': {'name': u'投食', 'score': 2}, 'wash': {'name': u'洗澡', 'score': 3}, 'game': {'name': u'游戏', 'score': 5} } return maps[type_]
{ "content_hash": "82f912c9020b15e61f2cc1375d82acab", "timestamp": "", "source": "github", "line_count": 191, "max_line_length": 100, "avg_line_length": 30.240837696335078, "alnum_prop": 0.5618074792243767, "repo_name": "rsj217/tongdao", "id": "6acd4633696a5286aa15d6cdf11a1febc82d67a0", "size": "6208", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "app/views/home.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "59870" }, { "name": "HTML", "bytes": "40085" }, { "name": "JavaScript", "bytes": "203637" }, { "name": "Python", "bytes": "84144" } ], "symlink_target": "" }
import scrapy import json import re from locations.items import GeojsonPointItem class CircleKSpider(scrapy.Spider): name = "circle_k" allowed_domains = ["www.circlek.com"] start_urls = ( 'https://www.circlek.com/copy_new_stores.php?lat=36.70099&lng=-95.93642399999999&distance=16000&services=', ) def parse(self, response): results = json.loads(response.body_as_unicode()) for data in results: properties = { 'ref': data['id'], 'lat': float(data['latitude']) if data['latitude'] else None, 'lon': float(data['longitude']) if data['longitude'] else None, 'addr_full': data['address'], 'website': 'https://www.circlek.com/store-locator/us/{}/{}/{}'.format( data['city'].lower(), data['address'].lower().replace(' ', '-'), data['cost_center'], ), 'city': data['city'], } yield GeojsonPointItem(**properties)
{ "content_hash": "c87ec5a109ae7f796707c16324dc120a", "timestamp": "", "source": "github", "line_count": 32, "max_line_length": 115, "avg_line_length": 33.25, "alnum_prop": 0.5319548872180451, "repo_name": "iandees/all-the-places", "id": "8fed8b6ec3a4d8873897b2997101465a6e6178dc", "size": "1088", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "locations/spiders/circle_k.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "2134" }, { "name": "Python", "bytes": "116132" }, { "name": "Shell", "bytes": "4477" } ], "symlink_target": "" }
from mock import MagicMock import pytest import f5_cccl.resource.ltm.monitor.http_monitor as target @pytest.fixture def http_config(): return {"name": "test_monitor", "partition": "Test", "interval": 1, "timeout": 10, "send": "GET /\r\n", "recv": "SERVER"} @pytest.fixture def bigip(): bigip = MagicMock() return bigip def test_create_w_defaults(http_config): monitor = target.HTTPMonitor( name=http_config['name'], partition=http_config['partition']) assert monitor assert monitor.name == "test_monitor" assert monitor.partition == "Test" data = monitor.data assert data.get('interval') == 5 assert data.get('timeout') == 16 assert data.get('send') == "GET /\\r\\n" assert data.get('recv') == "" def test_create_w_config(http_config): monitor = target.HTTPMonitor( **http_config ) assert monitor assert monitor.name == "test_monitor" assert monitor.partition == "Test" data = monitor.data assert data.get('interval') == 1 assert data.get('timeout') == 10 assert data.get('send') == "GET /\r\n" assert data.get('recv') == "SERVER" def test_get_uri_path(bigip, http_config): monitor = target.HTTPMonitor(**http_config) assert (monitor._uri_path(bigip) == bigip.tm.ltm.monitor.https.http) def test_create_icr_monitor(http_config): monitor = target.IcrHTTPMonitor(**http_config) assert isinstance(monitor, target.HTTPMonitor) def test_create_api_monitor(http_config): monitor = target.ApiHTTPMonitor(**http_config) assert isinstance(monitor, target.HTTPMonitor) def test_create_monitors_invalid(http_config): # Set interval to be larger than timeout, # ICR Monitor will be created, API Monitor will not http_config['interval'] = 30 monitor = target.IcrHTTPMonitor(**http_config) assert isinstance(monitor, target.IcrHTTPMonitor) with pytest.raises(ValueError): monitor = target.ApiHTTPMonitor(**http_config)
{ "content_hash": "319d2257a21015051505be117db91554", "timestamp": "", "source": "github", "line_count": 79, "max_line_length": 58, "avg_line_length": 26.164556962025316, "alnum_prop": 0.6477987421383647, "repo_name": "f5devcentral/f5-cccl", "id": "f8e07b98c63b3b49e0c6a1322fa190c0a5eeaeb7", "size": "2681", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "f5_cccl/resource/ltm/monitor/test/test_http_monitor.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "484942" }, { "name": "Shell", "bytes": "5416" } ], "symlink_target": "" }
"""Test a tflite model using random input data.""" from __future__ import print_function from absl import flags import numpy as np import tensorflow as tf flags.DEFINE_string('model_path', None, 'Path to model.') FLAGS = flags.FLAGS def main(_): flags.mark_flag_as_required('model_path') # Load TFLite model and allocate tensors. interpreter = tf.lite.Interpreter(model_path=FLAGS.model_path) interpreter.allocate_tensors() # Get input and output tensors. input_details = interpreter.get_input_details() print('input_details:', input_details) output_details = interpreter.get_output_details() print('output_details:', output_details) # Test model on random input data. input_shape = input_details[0]['shape'] # change the following line to feed into your own data. input_data = np.array(np.random.random_sample(input_shape), dtype=np.float32) interpreter.set_tensor(input_details[0]['index'], input_data) interpreter.invoke() output_data = interpreter.get_tensor(output_details[0]['index']) print(output_data) if __name__ == '__main__': tf.app.run()
{ "content_hash": "b144546d77c2c5e2b17c0be5baaacca0", "timestamp": "", "source": "github", "line_count": 38, "max_line_length": 79, "avg_line_length": 28.86842105263158, "alnum_prop": 0.7192342752962625, "repo_name": "alexgorban/models", "id": "9725541f0f951c85a5d716f911514f31bcf1ea42", "size": "1787", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "research/lstm_object_detection/test_tflite_model.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C++", "bytes": "1619012" }, { "name": "Dockerfile", "bytes": "9821" }, { "name": "GLSL", "bytes": "976" }, { "name": "HTML", "bytes": "147010" }, { "name": "JavaScript", "bytes": "33316" }, { "name": "Jupyter Notebook", "bytes": "454746" }, { "name": "Makefile", "bytes": "4933" }, { "name": "Python", "bytes": "16363107" }, { "name": "Shell", "bytes": "144095" }, { "name": "Starlark", "bytes": "148029" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Link', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('slug', models.SlugField(verbose_name='Link for the url')), ('url', models.URLField(verbose_name='Destination URL')), ('create_date', models.DateTimeField(auto_now_add=True, verbose_name='Added at')), ('title', models.CharField(max_length=100, verbose_name='Title')), ('description', models.TextField(blank=True, verbose_name='Description')), ('hits', models.IntegerField(db_index=True, default=0, editable=False, null=True, verbose_name='Hits')), ('language', models.CharField(blank=True, choices=[('he', 'Hebrew'), ('en', 'English')], max_length=5, null=True, verbose_name='Link Language')), ], options={ 'verbose_name': 'Link', 'verbose_name_plural': 'Links', 'ordering': ('-create_date',), }, ), ]
{ "content_hash": "246527df30e84d0b0910a73b9e14f2e0", "timestamp": "", "source": "github", "line_count": 32, "max_line_length": 161, "avg_line_length": 40.46875, "alnum_prop": 0.5606177606177606, "repo_name": "MeirKriheli/debian.org.il", "id": "3e362439c8cf3c479d533911d2312910934e4bfc", "size": "1366", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "apps/links/migrations/0001_initial.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "3500" }, { "name": "HTML", "bytes": "6576" }, { "name": "Python", "bytes": "28325" } ], "symlink_target": "" }
import unittest from mock import patch, Mock from the_ark import picard_client picard_client_obj = None class UtilsTestCase(unittest.TestCase): def setUp(self): self.picard_client_obj = picard_client.PicardClient() @patch('requests.post') def test_send_to_picard(self, requests_post): r = Mock() r.status_code = 200 r.text = '{"first_name": "bill"}' requests_post.return_value = r self.assertEqual(self.picard_client_obj.send_to_picard ('url', 'data', self.picard_client_obj.create_headers()), {"first_name": "bill"}) self.assertEqual(self.picard_client_obj.send_to_picard ('url', 'data'), {"first_name": "bill"}) @patch('requests.post') def test_send_to_picard_400(self, requests_post): r = Mock() r.status_code = 400 r.text = 'tish broke' requests_post.return_value = r self.assertRaises(picard_client.PicardClientException, self.picard_client_obj.send_to_picard, '', '', '') r.status_code = 303 self.assertRaises(picard_client.PicardClientException, self.picard_client_obj.send_to_picard, '', '', '')
{ "content_hash": "b2249ebe47b6b221615863d651125ae3", "timestamp": "", "source": "github", "line_count": 38, "max_line_length": 113, "avg_line_length": 33.44736842105263, "alnum_prop": 0.5790715971675846, "repo_name": "meltmedia/the-ark", "id": "282d3083b7721737ad60092ff741a68dc07b9557", "size": "1271", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "tests/test_picard_client.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "HTML", "bytes": "3371" }, { "name": "Makefile", "bytes": "341" }, { "name": "Python", "bytes": "360524" } ], "symlink_target": "" }
""" CDP tests """ from scapy.packet import Packet from scapy.all import ShortField, StrField from scapy.layers.l2 import Dot3, LLC, SNAP from scapy.contrib.cdp import CDPMsgDeviceID, CDPMsgSoftwareVersion, \ CDPMsgPlatform, CDPMsgPortID, CDPv2_HDR from framework import VppTestCase from scapy.all import raw from re import compile from time import sleep from util import ppp import platform import sys import unittest """ TestCDP is a subclass of VPPTestCase classes. CDP test. """ class CustomTLV(Packet): """ Custom TLV protocol layer for scapy """ fields_desc = [ ShortField("type", 0), ShortField("length", 4), StrField("value", "") ] class TestCDP(VppTestCase): """ CDP Test Case """ nen_ptr = compile(r"not enabled") cdp_ptr = compile(r"^([-\.\w]+)\s+([-\.\w]+)\s+([-\.\w]+)\s+([-\.\w]+)$") err_ptr = compile(r"^([\d]+)\s+([-\w]+)\s+([ -\.\w)(]+)$") @property def device_id(self): return platform.node() @property def version(self): return platform.release() @property def port_id(self): return self.interface.name @property def platform(self): return platform.system() @classmethod def setUpClass(cls): super(TestCDP, cls).setUpClass() try: cls.create_pg_interfaces(range(1)) cls.interface = cls.pg_interfaces[0] cls.interface.admin_up() cls.interface.config_ip4() cls.interface.resolve_arp() except Exception: super(TestCDP, cls).tearDownClass() raise @classmethod def tearDownClass(cls): super(TestCDP, cls).tearDownClass() def test_enable_cdp(self): self.logger.info(self.vapi.cdp_enable_disable(enable_disable=1)) ret = self.vapi.cli("show cdp") self.logger.info(ret) not_enabled = self.nen_ptr.search(ret) self.assertFalse(not_enabled, "CDP isn't enabled") def test_send_cdp_packet(self): self.logger.info(self.vapi.cdp_enable_disable(enable_disable=1)) self.send_packet(self.create_packet()) neighbors = list(self.show_cdp()) self.assertTrue(neighbors, "CDP didn't register neighbor") port, system = neighbors[0] length = min(len(system), len(self.device_id)) self.assert_equal(port, self.port_id, "CDP received invalid port id") self.assert_equal(system[:length], self.device_id[:length], "CDP received invalid device id") def test_cdp_underflow_tlv(self): self.send_bad_packet(3, ".") def test_cdp_overflow_tlv(self): self.send_bad_packet(8, ".") def send_bad_packet(self, l, v): self.logger.info(self.vapi.cdp_enable_disable(enable_disable=1)) self.send_packet(self.create_bad_packet(l, v)) errors = list(self.show_errors()) self.assertTrue(errors) expected_errors = False for count, node, reason in errors: if (node == u'cdp-input' and reason == u'cdp packets with bad TLVs' and int(count) >= 1): expected_errors = True break self.assertTrue(expected_errors, "CDP didn't drop bad packet") def send_packet(self, packet): self.logger.debug(ppp("Sending packet:", packet)) self.interface.add_stream(packet) self.pg_start() def create_base_packet(self): packet = (Dot3(src=self.interface.remote_mac, dst="01:00:0c:cc:cc:cc") / LLC(dsap=0xaa, ssap=0xaa, ctrl=0x03) / SNAP()/CDPv2_HDR()) return packet def create_packet(self): packet = (self.create_base_packet() / CDPMsgDeviceID(val=self.device_id) / CDPMsgSoftwareVersion(val=self.version) / CDPMsgPortID(iface=self.port_id) / CDPMsgPlatform(val=self.platform)) return packet def create_bad_packet(self, tl=4, tv=""): packet = (self.create_base_packet() / CustomTLV(type=1, length=tl, value=tv)) return packet def process_cli(self, exp, ptr): for line in self.vapi.cli(exp).split('\n')[1:]: m = ptr.match(line.strip()) if m: yield m.groups() def show_cdp(self): for pack in self.process_cli("show cdp", self.cdp_ptr): try: port, system, _, _ = pack except ValueError: pass else: yield port, system def show_errors(self): for pack in self.process_cli("show errors", self.err_ptr): try: count, node, reason = pack except ValueError: pass else: yield count, node, reason
{ "content_hash": "ceb42765eada31c98dacfb30f2e874d4", "timestamp": "", "source": "github", "line_count": 172, "max_line_length": 77, "avg_line_length": 28.8953488372093, "alnum_prop": 0.5637826961770623, "repo_name": "vpp-dev/vpp", "id": "1bc67c45c3e3dae89bc28d0a57c79bb15c6436b5", "size": "4993", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/plugins/cdp/test/test_cdp.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Assembly", "bytes": "16871" }, { "name": "C", "bytes": "21411560" }, { "name": "C++", "bytes": "2210928" }, { "name": "CMake", "bytes": "179409" }, { "name": "CSS", "bytes": "847" }, { "name": "Emacs Lisp", "bytes": "111146" }, { "name": "Go", "bytes": "13884" }, { "name": "HTML", "bytes": "612" }, { "name": "Lua", "bytes": "79974" }, { "name": "M4", "bytes": "257" }, { "name": "Makefile", "bytes": "120923" }, { "name": "Objective-C", "bytes": "50546" }, { "name": "Python", "bytes": "3767934" }, { "name": "Ruby", "bytes": "8015" }, { "name": "Shell", "bytes": "106805" } ], "symlink_target": "" }
VERSION='1.00' import os,sys,time import optparse from RecoLuminosity.LumiDB import pileupParser from RecoLuminosity.LumiDB import selectionParser from RecoLuminosity.LumiDB import csvLumibyLSParser from math import exp from math import sqrt def parseInputFile(inputfilename): ''' output ({run:[ls:[inlumi, meanint]]}) ''' selectf=open(inputfilename,'r') inputfilecontent=selectf.read() p=pileupParser.pileupParser(inputfilecontent) # p=inputFilesetParser.inputFilesetParser(inputfilename) runlsbyfile=p.runsandls() return runlsbyfile ############################## ## ######################## ## ## ## ################## ## ## ## ## ## Main Program ## ## ## ## ## ################## ## ## ## ######################## ## ############################## if __name__ == '__main__': parser = optparse.OptionParser ("Usage: %prog [--options]", description = "Script to rescale pileup distributions using inputs derived by calculating luminosity for a given set of HLT paths. Input format must be -lumibyls-") # # parser = argparse.ArgumentParser(prog=os.path.basename(sys.argv[0]),description = "Pileup Lumi Calculation",formatter_class=argparse.ArgumentDefaultsHelpFormatter) CalculationModeChoices = ['truth', 'observed'] # # parse arguments # # # basic arguments # #parser.add_argument('action',choices=allowedActions, # help='command actions') parser.add_option('-o',dest='outputfile',action='store', default='PileupRecalcJSON.txt', help='output pileup JSON file') parser.add_option('-i',dest='inputfile',action='store', help='Input Run/LS/lumis file for your trigger selection (required)') parser.add_option('--inputLumiJSON',dest='inputLumiJSON',action='store', help='Input Lumi/Pileup file in JSON format (required)') parser.add_option('--verbose',dest='verbose',action='store_true',help='verbose mode for printing' ) # parse arguments try: (options, args) = parser.parse_args() except Exception , e: print e # if not args: # parser.print_usage() # sys.exit() # if len (args) != 1: # parser.print_usage() # raise RuntimeError, "Exactly one output file must be given" # output = args[0] # options=parser.parse_args() if options.verbose: print 'General configuration' print '\toutputfile: ',options.outputfile print '\tinput selection file: ',options.inputfile #inpf = open (options.inputfile, 'r') #inputfilecontent = inpf.read() inputRange = csvLumibyLSParser.csvLumibyLSParser (options.inputfile).runsandls() #print 'number of runs processed %d' % csvLumibyLSParser.csvLumibyLSParser (options.inputfile).numruns() #inputRange=inputFilesetParser.inputFilesetParser(options.inputfile) inputPileupRange=parseInputFile(options.inputLumiJSON) # now, we have to find the information for the input runs and LumiSections # in the Lumi/Pileup list. First, loop over inputs OUTPUTLINE = "" OUTPUTLINE+='{' for (run, lslist) in sorted (inputRange.iteritems() ): # now, look for matching run, then match lumi sections #print "searching for run %d" % (run) if run in inputPileupRange.keys(): OUTPUTLINE+= ('"%d":' % run ) OUTPUTLINE+= ' [' LSPUlist = inputPileupRange[run] #print "LSPUlist", LSPUlist for LSnumber in lslist: if LSnumber in LSPUlist.keys(): PUlumiInfo = LSPUlist[LSnumber] HLTlumiInfo = lslist[LSnumber] #print "found LS %d" % (LSnumber) #print HLTlumiInfo scale = 0 if PUlumiInfo[0] > 0.: scale=HLTlumiInfo[1]/PUlumiInfo[0] # rescale to HLT recorded Lumi if scale > 1.001: print 'Run %d, LS %d, HLT Scale (%f), HLTL (%f), PUL (%f) larger than one - please check!' % (run, LSnumber, scale, HLTlumiInfo[1],PUlumiInfo[0]) scale=1.01 # HLT integrated values are wrong, punt newIntLumi = scale*PUlumiInfo[0] newRmsLumi = PUlumiInfo[1] newInstLumi = PUlumiInfo[2] if scale == 0: newInstLumi = PUlumiInfo[2] # keep non-zero value, with zero weight # to avoid spike at zero interactions #print PUlumiInfo[0],HLTlumiInfo[1] LumiString = "[%d,%2.4e,%2.4e,%2.4e]," % (LSnumber, newIntLumi, newRmsLumi ,newInstLumi) OUTPUTLINE += LumiString #for study #print '%d %d %f %f' % (run,LSnumber,PUlumiInfo[0],HLTlumiInfo[1]) else: # no match, use zero for int lumi newInstLumi = 10.0 # keep non-zero value, with zero weight # to avoid spike at zero interactions #print PUlumiInfo[0],HLTlumiInfo[1] LumiString = "[%d,0.0,0.0,%2.4e]," % (LSnumber, newInstLumi) OUTPUTLINE += LumiString lastindex=len(OUTPUTLINE)-1 trunc = OUTPUTLINE[0:lastindex] OUTPUTLINE = trunc OUTPUTLINE += '], ' else: # trouble print "Run %d not found in Lumi/Pileup input file. Check your files!" % (run) # print run # print lslist # histFile = ROOT.TFile.Open (output, 'recreate') # if not histFile: # raise RuntimeError, \ # "Could not open '%s' as an output root file" % output # pileupHist.Write() #for hist in histList: # hist.Write() # histFile.Close() # sys.exit() lastindex=len(OUTPUTLINE)-2 trunc = OUTPUTLINE[0:lastindex] OUTPUTLINE = trunc OUTPUTLINE += ' }' outputfile = open(options.outputfile,'w') if not outputfile: raise RuntimeError, \ "Could not open '%s' as an output JSON file" % output outputfile.write(OUTPUTLINE) outputfile.close() sys.exit()
{ "content_hash": "365ebe72f8444d03f31e093e34ed367a", "timestamp": "", "source": "github", "line_count": 176, "max_line_length": 201, "avg_line_length": 37.01136363636363, "alnum_prop": 0.553269880257906, "repo_name": "iamjakob/lumiCalc", "id": "f6ac3951d98daff181dff50263a01a609de42180", "size": "6536", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "LumiDB/scripts/pileupReCalc_HLTpaths.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "1179775" }, { "name": "Shell", "bytes": "6905" } ], "symlink_target": "" }
import sys import os workload_dir = "/Users/Anderson/GoogleDrive/NOCulator/hring/src/bin/workload_list/" workload = "homo_8x8" out_dir = "./results/homo/8x8/design/" if not os.path.exists(out_dir): os.makedirs(out_dir) for sim_index in range(1,31, 1): out_file = "sim_" + str(sim_index) + ".out" command_line = "mono ./sim.exe -config ./config_qos.txt -output " + out_dir + out_file + " -workload " + workload_dir + workload + " " + str(sim_index) os.system (command_line)
{ "content_hash": "168b8e3d8758211b94c24693a232a460", "timestamp": "", "source": "github", "line_count": 15, "max_line_length": 153, "avg_line_length": 33.333333333333336, "alnum_prop": 0.65, "repo_name": "anderson1008/NOCulator", "id": "7152ca8824de2d1950b3c066dae22d5cd6d30e69", "size": "519", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "hring/src/Script/NAS/sim_act_8x8.py", "mode": "33261", "license": "mit", "language": [ { "name": "C#", "bytes": "4349444" }, { "name": "Coq", "bytes": "57347" }, { "name": "Makefile", "bytes": "19889" }, { "name": "Python", "bytes": "299164" }, { "name": "Shell", "bytes": "18976" }, { "name": "Stata", "bytes": "411" }, { "name": "SystemVerilog", "bytes": "10102" }, { "name": "Tcl", "bytes": "87988" }, { "name": "Verilog", "bytes": "1629040" } ], "symlink_target": "" }
""" Django settings for project project. Generated by 'django-admin startproject' using Django 1.10. For more information on this file, see https://docs.djangoproject.com/en/1.10/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.10/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'm3@vm^t4jw3*i827_(@-lf_t0zlw1&r^@uf&@^8ak3)_sk4dpa' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'core', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'project.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.jinja2.Jinja2', 'DIRS': ['jinja2'], 'APP_DIRS': True, 'OPTIONS': { 'variable_start_string': '[{', 'variable_end_string': '}]', }, }, { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'project.wsgi.application' # Database # https://docs.djangoproject.com/en/1.10/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.10/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.10/howto/static-files/ STATIC_URL = '/static/' ANGULAR_URL = '/ng/' ANGULAR_ROOT = os.path.join(BASE_DIR, 'ngApp/')
{ "content_hash": "dc9ef0c5bc5e6d6900b8d66fe965d80b", "timestamp": "", "source": "github", "line_count": 136, "max_line_length": 91, "avg_line_length": 25.191176470588236, "alnum_prop": 0.6652072387624052, "repo_name": "BOOLRon/lovelypic", "id": "965ef84ab06e70260f27983099c4bbef8c73065f", "size": "3426", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "project/settings.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "102402" }, { "name": "HTML", "bytes": "7992" }, { "name": "JavaScript", "bytes": "32737" }, { "name": "Python", "bytes": "32220" }, { "name": "TypeScript", "bytes": "3782" } ], "symlink_target": "" }
import xyz class Ncurses(xyz.Package): pkg_name = 'ncurses' def configure(self): self.host_lib_configure('--with-shared', '--without-cxx-binding', enable_shared=True) def install(self): super().install() self.rmtree('{prefix_dir}', 'man') self.rmtree('{prefix_dir}', 'share', 'terminfo') self.rmtree('{prefix_dir}', 'share', 'tabset') self.rmtree('{eprefix_dir}', 'bin') rules = Ncurses
{ "content_hash": "32ffbf8d84ecfebdff4618e1c9466ba5", "timestamp": "", "source": "github", "line_count": 17, "max_line_length": 93, "avg_line_length": 26.58823529411765, "alnum_prop": 0.5951327433628318, "repo_name": "BreakawayConsulting/xyz", "id": "eaceed119b871cbddc5fb59bf59e44041ffddbe7", "size": "452", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "rules/ncurses.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "58183" } ], "symlink_target": "" }
from collections import OrderedDict import functools import re from typing import ( Dict, Mapping, MutableMapping, MutableSequence, Optional, Sequence, Tuple, Type, Union, ) from google.api_core import exceptions as core_exceptions from google.api_core import gapic_v1 from google.api_core import retry as retries from google.api_core.client_options import ClientOptions from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore import pkg_resources try: OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.Retry, object] # type: ignore from google.protobuf import timestamp_pb2 # type: ignore from google.rpc import error_details_pb2 # type: ignore from google.cloud.bigquery_migration_v2alpha.services.migration_service import pagers from google.cloud.bigquery_migration_v2alpha.types import ( migration_entities, migration_error_details, migration_metrics, migration_service, ) from .client import MigrationServiceClient from .transports.base import DEFAULT_CLIENT_INFO, MigrationServiceTransport from .transports.grpc_asyncio import MigrationServiceGrpcAsyncIOTransport class MigrationServiceAsyncClient: """Service to handle EDW migrations.""" _client: MigrationServiceClient DEFAULT_ENDPOINT = MigrationServiceClient.DEFAULT_ENDPOINT DEFAULT_MTLS_ENDPOINT = MigrationServiceClient.DEFAULT_MTLS_ENDPOINT migration_subtask_path = staticmethod(MigrationServiceClient.migration_subtask_path) parse_migration_subtask_path = staticmethod( MigrationServiceClient.parse_migration_subtask_path ) migration_workflow_path = staticmethod( MigrationServiceClient.migration_workflow_path ) parse_migration_workflow_path = staticmethod( MigrationServiceClient.parse_migration_workflow_path ) common_billing_account_path = staticmethod( MigrationServiceClient.common_billing_account_path ) parse_common_billing_account_path = staticmethod( MigrationServiceClient.parse_common_billing_account_path ) common_folder_path = staticmethod(MigrationServiceClient.common_folder_path) parse_common_folder_path = staticmethod( MigrationServiceClient.parse_common_folder_path ) common_organization_path = staticmethod( MigrationServiceClient.common_organization_path ) parse_common_organization_path = staticmethod( MigrationServiceClient.parse_common_organization_path ) common_project_path = staticmethod(MigrationServiceClient.common_project_path) parse_common_project_path = staticmethod( MigrationServiceClient.parse_common_project_path ) common_location_path = staticmethod(MigrationServiceClient.common_location_path) parse_common_location_path = staticmethod( MigrationServiceClient.parse_common_location_path ) @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials info. Args: info (dict): The service account private key info. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: MigrationServiceAsyncClient: The constructed client. """ return MigrationServiceClient.from_service_account_info.__func__(MigrationServiceAsyncClient, info, *args, **kwargs) # type: ignore @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials file. Args: filename (str): The path to the service account private key json file. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: MigrationServiceAsyncClient: The constructed client. """ return MigrationServiceClient.from_service_account_file.__func__(MigrationServiceAsyncClient, filename, *args, **kwargs) # type: ignore from_service_account_json = from_service_account_file @classmethod def get_mtls_endpoint_and_cert_source( cls, client_options: Optional[ClientOptions] = None ): """Return the API endpoint and client cert source for mutual TLS. The client cert source is determined in the following order: (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the client cert source is None. (2) if `client_options.client_cert_source` is provided, use the provided one; if the default client cert source exists, use the default one; otherwise the client cert source is None. The API endpoint is determined in the following order: (1) if `client_options.api_endpoint` if provided, use the provided one. (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the default mTLS endpoint; if the environment variabel is "never", use the default API endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise use the default API endpoint. More details can be found at https://google.aip.dev/auth/4114. Args: client_options (google.api_core.client_options.ClientOptions): Custom options for the client. Only the `api_endpoint` and `client_cert_source` properties may be used in this method. Returns: Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the client cert source to use. Raises: google.auth.exceptions.MutualTLSChannelError: If any errors happen. """ return MigrationServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore @property def transport(self) -> MigrationServiceTransport: """Returns the transport used by the client instance. Returns: MigrationServiceTransport: The transport used by the client instance. """ return self._client.transport get_transport_class = functools.partial( type(MigrationServiceClient).get_transport_class, type(MigrationServiceClient) ) def __init__( self, *, credentials: Optional[ga_credentials.Credentials] = None, transport: Union[str, MigrationServiceTransport] = "grpc_asyncio", client_options: Optional[ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiates the migration service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. transport (Union[str, ~.MigrationServiceTransport]): The transport to use. If set to None, a transport is chosen automatically. client_options (ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT environment variable can also be used to override the endpoint: "always" (always use the default mTLS endpoint), "never" (always use the default regular endpoint) and "auto" (auto switch to the default mTLS endpoint if client certificate is present, this is the default value). However, the ``api_endpoint`` property takes precedence if provided. (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable is "true", then the ``client_cert_source`` property can be used to provide client certificate for mutual TLS transport. If not provided, the default SSL client certificate will be used if present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not set, no client certificate will be used. Raises: google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport creation failed for any reason. """ self._client = MigrationServiceClient( credentials=credentials, transport=transport, client_options=client_options, client_info=client_info, ) async def create_migration_workflow( self, request: Optional[ Union[migration_service.CreateMigrationWorkflowRequest, dict] ] = None, *, parent: Optional[str] = None, migration_workflow: Optional[migration_entities.MigrationWorkflow] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> migration_entities.MigrationWorkflow: r"""Creates a migration workflow. .. code-block:: python # This snippet has been automatically generated and should be regarded as a # code template only. # It will require modifications to work: # - It may require correct/in-range values for request initialization. # - It may require specifying regional endpoints when creating the service # client as shown in: # https://googleapis.dev/python/google-api-core/latest/client_options.html from google.cloud import bigquery_migration_v2alpha async def sample_create_migration_workflow(): # Create a client client = bigquery_migration_v2alpha.MigrationServiceAsyncClient() # Initialize request argument(s) request = bigquery_migration_v2alpha.CreateMigrationWorkflowRequest( parent="parent_value", ) # Make the request response = await client.create_migration_workflow(request=request) # Handle the response print(response) Args: request (Optional[Union[google.cloud.bigquery_migration_v2alpha.types.CreateMigrationWorkflowRequest, dict]]): The request object. Request to create a migration workflow resource. parent (:class:`str`): Required. The name of the project to which this migration workflow belongs. Example: ``projects/foo/locations/bar`` This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. migration_workflow (:class:`google.cloud.bigquery_migration_v2alpha.types.MigrationWorkflow`): Required. The migration workflow to create. This corresponds to the ``migration_workflow`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.bigquery_migration_v2alpha.types.MigrationWorkflow: A migration workflow which specifies what needs to be done for an EDW migration. """ # Create or coerce a protobuf request object. # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, migration_workflow]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = migration_service.CreateMigrationWorkflowRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if migration_workflow is not None: request.migration_workflow = migration_workflow # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.create_migration_workflow, default_timeout=60.0, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = await rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # Done; return the response. return response async def get_migration_workflow( self, request: Optional[ Union[migration_service.GetMigrationWorkflowRequest, dict] ] = None, *, name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> migration_entities.MigrationWorkflow: r"""Gets a previously created migration workflow. .. code-block:: python # This snippet has been automatically generated and should be regarded as a # code template only. # It will require modifications to work: # - It may require correct/in-range values for request initialization. # - It may require specifying regional endpoints when creating the service # client as shown in: # https://googleapis.dev/python/google-api-core/latest/client_options.html from google.cloud import bigquery_migration_v2alpha async def sample_get_migration_workflow(): # Create a client client = bigquery_migration_v2alpha.MigrationServiceAsyncClient() # Initialize request argument(s) request = bigquery_migration_v2alpha.GetMigrationWorkflowRequest( name="name_value", ) # Make the request response = await client.get_migration_workflow(request=request) # Handle the response print(response) Args: request (Optional[Union[google.cloud.bigquery_migration_v2alpha.types.GetMigrationWorkflowRequest, dict]]): The request object. A request to get a previously created migration workflow. name (:class:`str`): Required. The unique identifier for the migration workflow. Example: ``projects/123/locations/us/workflows/1234`` This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.bigquery_migration_v2alpha.types.MigrationWorkflow: A migration workflow which specifies what needs to be done for an EDW migration. """ # Create or coerce a protobuf request object. # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = migration_service.GetMigrationWorkflowRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_migration_workflow, default_retry=retries.Retry( initial=1.0, maximum=10.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.ServiceUnavailable, ), deadline=120.0, ), default_timeout=120.0, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. response = await rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # Done; return the response. return response async def list_migration_workflows( self, request: Optional[ Union[migration_service.ListMigrationWorkflowsRequest, dict] ] = None, *, parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListMigrationWorkflowsAsyncPager: r"""Lists previously created migration workflow. .. code-block:: python # This snippet has been automatically generated and should be regarded as a # code template only. # It will require modifications to work: # - It may require correct/in-range values for request initialization. # - It may require specifying regional endpoints when creating the service # client as shown in: # https://googleapis.dev/python/google-api-core/latest/client_options.html from google.cloud import bigquery_migration_v2alpha async def sample_list_migration_workflows(): # Create a client client = bigquery_migration_v2alpha.MigrationServiceAsyncClient() # Initialize request argument(s) request = bigquery_migration_v2alpha.ListMigrationWorkflowsRequest( parent="parent_value", ) # Make the request page_result = client.list_migration_workflows(request=request) # Handle the response async for response in page_result: print(response) Args: request (Optional[Union[google.cloud.bigquery_migration_v2alpha.types.ListMigrationWorkflowsRequest, dict]]): The request object. A request to list previously created migration workflows. parent (:class:`str`): Required. The project and location of the migration workflows to list. Example: ``projects/123/locations/us`` This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.bigquery_migration_v2alpha.services.migration_service.pagers.ListMigrationWorkflowsAsyncPager: Response object for a ListMigrationWorkflows call. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = migration_service.ListMigrationWorkflowsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_migration_workflows, default_retry=retries.Retry( initial=1.0, maximum=10.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.ServiceUnavailable, ), deadline=120.0, ), default_timeout=120.0, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = await rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # This method is paged; wrap the response in a pager, which provides # an `__aiter__` convenience method. response = pagers.ListMigrationWorkflowsAsyncPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response async def delete_migration_workflow( self, request: Optional[ Union[migration_service.DeleteMigrationWorkflowRequest, dict] ] = None, *, name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes a migration workflow by name. .. code-block:: python # This snippet has been automatically generated and should be regarded as a # code template only. # It will require modifications to work: # - It may require correct/in-range values for request initialization. # - It may require specifying regional endpoints when creating the service # client as shown in: # https://googleapis.dev/python/google-api-core/latest/client_options.html from google.cloud import bigquery_migration_v2alpha async def sample_delete_migration_workflow(): # Create a client client = bigquery_migration_v2alpha.MigrationServiceAsyncClient() # Initialize request argument(s) request = bigquery_migration_v2alpha.DeleteMigrationWorkflowRequest( name="name_value", ) # Make the request await client.delete_migration_workflow(request=request) Args: request (Optional[Union[google.cloud.bigquery_migration_v2alpha.types.DeleteMigrationWorkflowRequest, dict]]): The request object. A request to delete a previously created migration workflow. name (:class:`str`): Required. The unique identifier for the migration workflow. Example: ``projects/123/locations/us/workflows/1234`` This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ # Create or coerce a protobuf request object. # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = migration_service.DeleteMigrationWorkflowRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.delete_migration_workflow, default_timeout=60.0, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. await rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) async def start_migration_workflow( self, request: Optional[ Union[migration_service.StartMigrationWorkflowRequest, dict] ] = None, *, name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Starts a previously created migration workflow. I.e., the state transitions from DRAFT to RUNNING. This is a no-op if the state is already RUNNING. An error will be signaled if the state is anything other than DRAFT or RUNNING. .. code-block:: python # This snippet has been automatically generated and should be regarded as a # code template only. # It will require modifications to work: # - It may require correct/in-range values for request initialization. # - It may require specifying regional endpoints when creating the service # client as shown in: # https://googleapis.dev/python/google-api-core/latest/client_options.html from google.cloud import bigquery_migration_v2alpha async def sample_start_migration_workflow(): # Create a client client = bigquery_migration_v2alpha.MigrationServiceAsyncClient() # Initialize request argument(s) request = bigquery_migration_v2alpha.StartMigrationWorkflowRequest( name="name_value", ) # Make the request await client.start_migration_workflow(request=request) Args: request (Optional[Union[google.cloud.bigquery_migration_v2alpha.types.StartMigrationWorkflowRequest, dict]]): The request object. A request to start a previously created migration workflow. name (:class:`str`): Required. The unique identifier for the migration workflow. Example: ``projects/123/locations/us/workflows/1234`` This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ # Create or coerce a protobuf request object. # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = migration_service.StartMigrationWorkflowRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.start_migration_workflow, default_retry=retries.Retry( initial=1.0, maximum=10.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.ServiceUnavailable, ), deadline=120.0, ), default_timeout=120.0, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. await rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) async def get_migration_subtask( self, request: Optional[ Union[migration_service.GetMigrationSubtaskRequest, dict] ] = None, *, name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> migration_entities.MigrationSubtask: r"""Gets a previously created migration subtask. .. code-block:: python # This snippet has been automatically generated and should be regarded as a # code template only. # It will require modifications to work: # - It may require correct/in-range values for request initialization. # - It may require specifying regional endpoints when creating the service # client as shown in: # https://googleapis.dev/python/google-api-core/latest/client_options.html from google.cloud import bigquery_migration_v2alpha async def sample_get_migration_subtask(): # Create a client client = bigquery_migration_v2alpha.MigrationServiceAsyncClient() # Initialize request argument(s) request = bigquery_migration_v2alpha.GetMigrationSubtaskRequest( name="name_value", ) # Make the request response = await client.get_migration_subtask(request=request) # Handle the response print(response) Args: request (Optional[Union[google.cloud.bigquery_migration_v2alpha.types.GetMigrationSubtaskRequest, dict]]): The request object. A request to get a previously created migration subtasks. name (:class:`str`): Required. The unique identifier for the migration subtask. Example: ``projects/123/locations/us/workflows/1234/subtasks/543`` This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.bigquery_migration_v2alpha.types.MigrationSubtask: A subtask for a migration which carries details about the configuration of the subtask. The content of the details should not matter to the end user, but is a contract between the subtask creator and subtask worker. """ # Create or coerce a protobuf request object. # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = migration_service.GetMigrationSubtaskRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_migration_subtask, default_retry=retries.Retry( initial=1.0, maximum=10.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.ServiceUnavailable, ), deadline=120.0, ), default_timeout=120.0, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. response = await rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # Done; return the response. return response async def list_migration_subtasks( self, request: Optional[ Union[migration_service.ListMigrationSubtasksRequest, dict] ] = None, *, parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListMigrationSubtasksAsyncPager: r"""Lists previously created migration subtasks. .. code-block:: python # This snippet has been automatically generated and should be regarded as a # code template only. # It will require modifications to work: # - It may require correct/in-range values for request initialization. # - It may require specifying regional endpoints when creating the service # client as shown in: # https://googleapis.dev/python/google-api-core/latest/client_options.html from google.cloud import bigquery_migration_v2alpha async def sample_list_migration_subtasks(): # Create a client client = bigquery_migration_v2alpha.MigrationServiceAsyncClient() # Initialize request argument(s) request = bigquery_migration_v2alpha.ListMigrationSubtasksRequest( parent="parent_value", ) # Make the request page_result = client.list_migration_subtasks(request=request) # Handle the response async for response in page_result: print(response) Args: request (Optional[Union[google.cloud.bigquery_migration_v2alpha.types.ListMigrationSubtasksRequest, dict]]): The request object. A request to list previously created migration subtasks. parent (:class:`str`): Required. The migration task of the subtasks to list. Example: ``projects/123/locations/us/workflows/1234`` This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.bigquery_migration_v2alpha.services.migration_service.pagers.ListMigrationSubtasksAsyncPager: Response object for a ListMigrationSubtasks call. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = migration_service.ListMigrationSubtasksRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_migration_subtasks, default_retry=retries.Retry( initial=1.0, maximum=10.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.ServiceUnavailable, ), deadline=120.0, ), default_timeout=120.0, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = await rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # This method is paged; wrap the response in a pager, which provides # an `__aiter__` convenience method. response = pagers.ListMigrationSubtasksAsyncPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response async def __aenter__(self): return self async def __aexit__(self, exc_type, exc, tb): await self.transport.close() try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=pkg_resources.get_distribution( "google-cloud-bigquery-migration", ).version, ) except pkg_resources.DistributionNotFound: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() __all__ = ("MigrationServiceAsyncClient",)
{ "content_hash": "7a2a5a98ca4c8218cc8ebf40bdd74e76", "timestamp": "", "source": "github", "line_count": 1033, "max_line_length": 144, "avg_line_length": 40.43756050338819, "alnum_prop": 0.607057358996457, "repo_name": "googleapis/python-bigquery-migration", "id": "88a4858ddb8f190b60441b16158a18cb4ad8de05", "size": "42372", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "google/cloud/bigquery_migration_v2alpha/services/migration_service/async_client.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "2050" }, { "name": "Python", "bytes": "715104" }, { "name": "Shell", "bytes": "30696" } ], "symlink_target": "" }
from typing import Dict from pandas import DataFrame, concat from lib.data_source import DataSource from lib.utils import table_rename from lib.cast import safe_int_cast, numeric_code_as_string from lib.time import datetime_isoformat class CataloniaMunicipalitiesDataSource(DataSource): def parse_dataframes( self, dataframes: Dict[str, DataFrame], aux: Dict[str, DataFrame], **parse_opts ) -> DataFrame: data = table_rename( dataframes[0], { "TipusCasData": "date", # "ComarcaCodi": "comarca_code", # "ComarcaDescripcio": "comarca_name", "MunicipiCodi": "subregion2_code", "MunicipiDescripcio": "subregion2_name", "SexeCodi": "sex", # "SexeDescripcio": "sex", "TipusCasDescripcio": "_case_type", "NumCasos": "new_confirmed", }, drop=True, ) # Remove "suspect" cases data = data[data["_case_type"] != "Sospitós"].drop(columns=["_case_type"]) # Use placeholder code for unknown values data.loc[data["subregion2_code"].isna(), "subregion2_code"] = "00000" # Region codes need cleaning up to match INEI codes data["subregion2_code"] = data["subregion2_code"].apply( lambda x: numeric_code_as_string(x, 5) ) # Derive key from subregion code data["key"] = "ES_CT_" + data["subregion2_code"] # Parse sex, date and numeric values sex_adapter = {"0": "male", "1": "female"} data["sex"] = data["sex"].apply(lambda x: sex_adapter.get(x, "sex_unknown")) data["date"] = data["date"].apply(lambda x: datetime_isoformat(x, "%d/%m/%Y")) data["new_confirmed"] = data["new_confirmed"].apply(safe_int_cast) # Aggregate manually since some municipalities are clumped together if they are too small ccaa = data.drop(columns=["subregion2_code"]).groupby(["date", "sex"]).sum().reset_index() ccaa["key"] = "ES_CT" # Remove unnecessary data data = data[data["key"] != "ES_CT_00000"] return concat([ccaa, data]) class CataloniaHealthDeptDataSource(DataSource): def parse_dataframes( self, dataframes: Dict[str, DataFrame], aux: Dict[str, DataFrame], **parse_opts ) -> DataFrame: data = table_rename( dataframes[0], { "TipusCasData": "date", "SexeCodi": "sex", # "SexeDescripcio": "sex", "EdatRang": "age", "TipusCasDescripcio": "_case_type", "NumCasos": "new_confirmed", }, drop=True, ) # Remove "suspect" cases data = data[data["_case_type"] != "Sospitós"].drop(columns=["_case_type"]) # Derive key from subregion code data["key"] = "ES_CT" # Parse age, sex, date and numeric values sex_adapter = {"0": "male", "1": "female"} data["age"] = data["age"].str.replace("90\+", "90-") data["sex"] = data["sex"].apply(lambda x: sex_adapter.get(x, "sex_unknown")) data["date"] = data["date"].apply(lambda x: datetime_isoformat(x, "%d/%m/%Y")) data["new_confirmed"] = data["new_confirmed"].apply(safe_int_cast) return data
{ "content_hash": "ae621a30366c3d888d8e0b90d3a72693", "timestamp": "", "source": "github", "line_count": 93, "max_line_length": 98, "avg_line_length": 36.24731182795699, "alnum_prop": 0.5585879560961139, "repo_name": "GoogleCloudPlatform/covid-19-open-data", "id": "9d2059c1ec0b535c75d5dbba961c1359797f7723", "size": "3949", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "src/pipelines/epidemiology/es_ct_authority.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "1839" }, { "name": "Python", "bytes": "901210" }, { "name": "Shell", "bytes": "10370" } ], "symlink_target": "" }
""" CheckiOReferee is a base referee for checking you code. arguments: tests -- the dict contains tests in the specific structure. You can find an example in tests.py. cover_code -- is a wrapper for the user function and additional operations before give data in the user function. You can use some predefined codes from checkio.referee.cover_codes checker -- is replacement for the default checking of an user function result. If given, then instead simple "==" will be using the checker function which return tuple with result (false or true) and some additional info (some message). You can use some predefined codes from checkio.referee.checkers add_allowed_modules -- additional module which will be allowed for your task. add_close_builtins -- some closed builtin words, as example, if you want, you can close "eval" remove_allowed_modules -- close standard library modules, as example "math" checkio.referee.checkers checkers.float_comparison -- Checking function fabric for check result with float numbers. Syntax: checkers.float_comparison(digits) -- where "digits" is a quantity of significant digits after coma. checkio.referee.cover_codes cover_codes.unwrap_args -- Your "input" from test can be given as a list. if you want unwrap this before user function calling, then using this function. For example: if your test's input is [2, 2] and you use this cover_code, then user function will be called as checkio(2, 2) cover_codes.unwrap_kwargs -- the same as unwrap_kwargs, but unwrap dict. """ from checkio.signals import ON_CONNECT from checkio import api from checkio.referees.io import CheckiOReferee from checkio.referees import cover_codes from checkio.referees import checkers from tests import TESTS api.add_listener( ON_CONNECT, CheckiOReferee( tests=TESTS, cover_code={ 'python-27': cover_codes.unwrap_args, # or None 'python-3': cover_codes.unwrap_args }, function_name="chase", checker=checkers.float_comparison(8) ).on_ready)
{ "content_hash": "0832ca933bcf8bdb22eda84dfa083636", "timestamp": "", "source": "github", "line_count": 47, "max_line_length": 102, "avg_line_length": 46.659574468085104, "alnum_prop": 0.6999544003647971, "repo_name": "Bryukh-Checkio-Tasks/checkio-mission-achilles-tortoise", "id": "3b6b9b2dcaaa71bb8746fc14ee98e29d0d4c1341", "size": "2193", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "verification/referee.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "3722" }, { "name": "HTML", "bytes": "11679" }, { "name": "JavaScript", "bytes": "4854" }, { "name": "Python", "bytes": "5331" } ], "symlink_target": "" }
import requests from django.core.management.base import BaseCommand from census.models import APISetting from census.models import CensusInfo import json class Command(BaseCommand): help = 'Imports data from API if not done before' def handle(self, *args, **options): calls = APISetting.objects.exclude(api_used='true') for call in calls: # Creates api call with settings from APISetting census_api = 'http://api.census.gov/data/2010/sf1?key=' all_hs_variables = call.housingvariable_set.values_list('housing_variable', flat=True) all_hs_vars_string = ','.join(all_hs_variables) get_hs_vars = '&get=' + all_hs_vars_string get_for_zip = '&for=zip+code+tabulation+area:*' get_state = '&in=state:' + call.state_selected join_api_call = (census_api, str(call.api_key), str(get_hs_vars), str(get_for_zip), str(get_state)) api_call = ''.join(map(str, join_api_call)) r = requests.get(api_call) data_received = r.json() data_received_keys = data_received[0] census_entry = dict() for each_value in data_received[1:]: for i in range(len(data_received_keys)): census_entry[data_received_keys[i]] = each_value[i] census_parsed = json.dumps(census_entry) entry = CensusInfo(census_info=census_parsed, api_view_id=call.id) entry.save() call.api_used = 'true' call.save()
{ "content_hash": "ea8cb3434322b7fa4f9f178554d4f36e", "timestamp": "", "source": "github", "line_count": 42, "max_line_length": 111, "avg_line_length": 37.38095238095238, "alnum_prop": 0.5929936305732484, "repo_name": "escobar022/cens_django", "id": "fe454db88b3553f96d386c0d60ea42bbd047dd6e", "size": "1570", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "census/management/commands/censuscall.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "1237" }, { "name": "HTML", "bytes": "7036" }, { "name": "JavaScript", "bytes": "2182" }, { "name": "Python", "bytes": "14384" } ], "symlink_target": "" }
"""mysite URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.11/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.conf.urls import url, include 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) """ from django.conf.urls import url from django.conf.urls.static import static #from django.conf.urls import patterns from django.conf import settings from django.contrib import admin from myapp import views as myappviews from consultantform import views as myviews from django.views.generic.base import TemplateView urlpatterns = [ url(r'^admin/',admin.site.urls),#fwdslash url(r'^home/$',myappviews.home,name="home"), url(r'^$',myappviews.auth_view,name="auth_view"),#addedslash url(r'^login/$',myappviews.auth_view,name="auth_view"), url(r'^logout/$',myappviews.getout,name="getout"), url(r'^invalid/$',myappviews.invalid_login,name="invalid_login"), url(r'^clients/$',myappviews.clients,name="clients"), url(r'^ram/(?P<consultantform_id>\d+)$',myviews.ram,name="ram"), url(r'^ram1/(?P<consultantform_id>\d+)$',myviews.ram1,name="ram1"), url(r'^ram2/(?P<consultantform_id>\d+)$',myviews.ram2,name="ram2"), url(r'^ram3/(?P<consultantform_id>\d+)$',myviews.ram3,name="ram3"), url(r'^ramm/(?P<checklistform_id>\d+)$',myviews.ramm,name="ramm"), url(r'^rammm/(?P<projectform_id>\d+)$',myviews.rammm,name="rammm"), url(r'^consultantformss/(?P<a_id>\d+)/$',myviews.consultantformss,name='consultantformss'), url(r'^projects/(?P<a_id>\d+)$',myviews.projects,name='projects'), url(r'^finalformss/(?P<a_id>\d+)/$',myviews.finalformss,name='finalformss'), url(r'^consultantformss/(?P<a_id>\d+)/get/(?P<consultantform_id>\d+)/$',myviews.consultantform,name='consultantform'), url(r'^consultantformss/(?P<a_id>\d+)/getbr/(?P<consultantform_id>\d+)/$',myviews.branchform,name='branchform'), url(r'^consultantformss/(?P<a_id>\d+)/getsu/(?P<consultantform_id>\d+)/$',myviews.subsidiaryform,name='subsidiaryform'), url(r'^consultantformss/(?P<a_id>\d+)/getre/(?P<consultantform_id>\d+)/$',myviews.relatedform,name='relatedform'), url(r'^consultantformss/(?P<a_id>\d+)/getpr/(?P<consultantform_id>\d+)/$',myviews.productform,name='productform'), url(r'^consultantformss/(?P<a_id>\d+)/gets/(?P<checklistform_id>\d+)/$',myviews.checklistform,name='checklistform'), url(r'^consultantformss/(?P<a_id>\d+)/getp/(?P<projectform_id>\d+)/$',myviews.projectform,name='projectform'), url(r'^consultantformss/(?P<a_id>\d+)/getf/(?P<serviceform_id>\d+)/$',myviews.serviceforms,name='serviceforms'), url(r'^consultantformss/(?P<a_id>\d+)/get/(?P<consultantform_id>\d+)/$',myviews.consultantform,name='consultantform'), url(r'^consultantformss/(?P<a_id>\d+)/gett/(?P<form_id>\d+)/$',myviews.finalform,name='finalform'), url(r'^consultantformss/(?P<a_id>\d+)/getd/(?P<project_id>\d+)/(?P<dueform_id>\d+)/$',myviews.sam,name='sam'), url(r'^consultantformss/(?P<a_id>\d+)/getsc/(?P<project_id>\d+)/(?P<scriptform_id>\d+)/$',myviews.samm,name='samm'), url(r'^consultantformss/(?P<a_id>\d+)/getst/(?P<project_id>\d+)/(?P<strategyform_id>\d+)/$',myviews.sammm,name='sammm'), url(r'^consultantformss/(?P<a_id>\d+)/getps/(?P<project_id>\d+)/(?P<psform_id>\d+)/$',myviews.dam,name='dam'), url(r'^consultantformss/(?P<a_id>\d+)/getdi/(?P<project_id>\d+)/(?P<digiform_id>\d+)/$',myviews.damm,name='damm'), url(r'^consultantformss/(?P<a_id>\d+)/getm/(?P<project_id>\d+)/(?P<miomform_id>\d+)/$',myviews.dammm,name='dammm'), url(r'^consultantformss/(?P<a_id>\d+)/getpd/(?P<project_id>\d+)/(?P<dueform_id>\d+)/$',myviews.samp,name='samp'), url(r'^consultantformss/(?P<a_id>\d+)/getpsc/(?P<project_id>\d+)/(?P<scriptform_id>\d+)/$',myviews.sammp,name='sammp'), url(r'^consultantformss/(?P<a_id>\d+)/getpst/(?P<project_id>\d+)/(?P<strategyform_id>\d+)/$',myviews.sammmp,name='sammmp'), url(r'^consultantformss/(?P<a_id>\d+)/getpps/(?P<project_id>\d+)/(?P<psform_id>\d+)/$',myviews.damp,name='damp'), url(r'^consultantformss/(?P<a_id>\d+)/getpdi/(?P<project_id>\d+)/(?P<digiform_id>\d+)/$',myviews.dammp,name='dammp'), url(r'^consultantformss/(?P<a_id>\d+)/getpm/(?P<project_id>\d+)/(?P<miomform_id>\d+)/$',myviews.dammmp,name='dammmp'), url(r'^consultantformss/(?P<a_id>\d+)/create/$',myviews.create,name='create'), url(r'^consultantformss/(?P<a_id>\d+)/create1/$',myviews.create1,name='create1'), url(r'^consultantformss/(?P<a_id>\d+)/create2/$',myviews.create2,name='create2'), url(r'^consultantformss/(?P<a_id>\d+)/create3/$',myviews.create3,name='create3'), url(r'^consultantformss/(?P<a_id>\d+)/checklist/$',myviews.checklist,name='checklist'), url(r'^consultantformss/(?P<a_id>\d+)/getf/(?P<project_id>\d+)/created$',myviews.created,name='created'), url(r'^consultantformss/(?P<a_id>\d+)/getf/(?P<project_id>\d+)/createsc$',myviews.createsc,name='createsc'), url(r'^consultantformss/(?P<a_id>\d+)/getf/(?P<project_id>\d+)/createst$',myviews.createst,name='createst'), url(r'^consultantformss/(?P<a_id>\d+)/getf/(?P<project_id>\d+)/createps$',myviews.createps,name='createps'), url(r'^consultantformss/(?P<a_id>\d+)/getf/(?P<project_id>\d+)/createdigi$',myviews.createdigi,name='createdigi'), url(r'^consultantformss/(?P<a_id>\d+)/getf/(?P<project_id>\d+)/createmiom$',myviews.createmiom,name='createmiom'), url(r'^500/$', TemplateView.as_view(template_name="404.html")), url(r'^404/$', TemplateView.as_view(template_name="404.html")), ] if settings.DEBUG: urlpatterns += static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT) #if settings.DEBUG:
{ "content_hash": "4d1451b731b1ef0f5a1988b5dc0a29fe", "timestamp": "", "source": "github", "line_count": 91, "max_line_length": 127, "avg_line_length": 66.87912087912088, "alnum_prop": 0.6628327308577062, "repo_name": "rajeshgupta14/pathend", "id": "2faf59679af175d36c0529fd1ae4df2470a26189", "size": "6086", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "main/urls.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "26412" }, { "name": "HTML", "bytes": "86059" }, { "name": "JavaScript", "bytes": "284160" }, { "name": "Python", "bytes": "183208" } ], "symlink_target": "" }
def abspath(path): """Return a normalized absolutized version of the pathname *path*. On most platforms, this is equivalent to ``normpath(join(os.getcwd(), path))``.""" return '' altsep = None def basename(path): """Return the base name of pathname *path*. This is the second half of the pair returned by ``split(path)``. Note that the result of this function is different from the Unix :program:`basename` program; where :program:`basename` for ``'/foo/bar/'`` returns ``'bar'``, the :func:`basename` function returns an empty string (``''``).""" return '' def commonprefix(list): """Return the longest path prefix (taken character-by-character) that is a prefix of all paths in *list*. If *list* is empty, return the empty string (``''``). Note that this may return invalid paths because it works a character at a time.""" return '' curdir = '.' defpath = ':/bin:/usr/bin' devnull = '/dev/null' def dirname(path): """Return the directory name of pathname *path*. This is the first half of the pair returned by ``split(path)``.""" return '' def exists(path): """Return ``True`` if *path* refers to an existing path. Returns ``False`` for broken symbolic links. On some platforms, this function may return ``False`` if permission is not granted to execute :func:`os.stat` on the requested file, even if the *path* physically exists.""" return False def expanduser(path): """On Unix and Windows, return the argument with an initial component of ``~`` or ``~user`` replaced by that *user*'s home directory.""" return '' def expandvars(path): """Return the argument with environment variables expanded. Substrings of the form ``$name`` or ``${name}`` are replaced by the value of environment variable *name*. Malformed variable names and references to non-existing variables are left unchanged.""" return '' extsep = '.' def getatime(path): """Return the time of last access of *path*. The return value is a number giving the number of seconds since the epoch (see the :mod:`time` module). Raise :exc:`os.error` if the file does not exist or is inaccessible.""" return 0.0 def getctime(path): """Return the system's ctime which, on some systems (like Unix) is the time of the last change, and, on others (like Windows), is the creation time for *path*. The return value is a number giving the number of seconds since the epoch (see the :mod:`time` module). Raise :exc:`os.error` if the file does not exist or is inaccessible.""" return 0.0 def getmtime(path): """Return the time of last modification of *path*. The return value is a number giving the number of seconds since the epoch (see the :mod:`time` module). Raise :exc:`os.error` if the file does not exist or is inaccessible.""" return 0.0 def getsize(path): """Return the size, in bytes, of *path*. Raise :exc:`os.error` if the file does not exist or is inaccessible.""" return 0 def isabs(path): """Return ``True`` if *path* is an absolute pathname. On Unix, that means it begins with a slash, on Windows that it begins with a (back)slash after chopping off a potential drive letter.""" return False def isdir(path): """Return ``True`` if *path* is an existing directory. This follows symbolic links, so both :func:`islink` and :func:`isdir` can be true for the same path.""" return False def isfile(path): """Return ``True`` if *path* is an existing regular file. This follows symbolic links, so both :func:`islink` and :func:`isfile` can be true for the same path.""" return False def islink(path): """Return ``True`` if *path* refers to a directory entry that is a symbolic link. Always ``False`` if symbolic links are not supported.""" return False def ismount(path): """Return ``True`` if pathname *path* is a :dfn:`mount point`: a point in a file system where a different file system has been mounted. The function checks whether *path*'s parent, :file:`path/..`, is on a different device than *path*, or whether :file:`path/..` and *path* point to the same i-node on the same device --- this should detect mount points for all Unix and POSIX variants.""" return False def join(*paths): """Join one or more path components intelligently. If any component is an absolute path, all previous components (on Windows, including the previous drive letter, if there was one) are thrown away, and joining continues. The return value is the concatenation of *path1*, and optionally *path2*, etc., with exactly one directory separator (``os.sep``) following each non-empty part except the last. (This means that an empty last part will result in a path that ends with a separator.) Note that on Windows, since there is a current directory for each drive, ``os.path.join("c:", "foo")`` represents a path relative to the current directory on drive :file:`C:` (:file:`c:foo`), not :file:`c:\\foo`.""" return '' def lexists(path): """Return ``True`` if *path* refers to an existing path. Returns ``True`` for broken symbolic links. Equivalent to :func:`exists` on platforms lacking :func:`os.lstat`.""" return False def normcase(path): """Normalize the case of a pathname. On Unix and Mac OS X, this returns the path unchanged; on case-insensitive filesystems, it converts the path to lowercase. On Windows, it also converts forward slashes to backward slashes.""" return 0 def normpath(path): """Normalize a pathname. This collapses redundant separators and up-level references so that ``A//B``, ``A/B/``, ``A/./B`` and ``A/foo/../B`` all become ``A/B``.""" return '.' pardir = '..' pathsep = ':' def realpath(path): """Return the canonical path of the specified filename, eliminating any symbolic links encountered in the path (if they are supported by the operating system).""" return '' def relpath(path, start=''): """Return a relative filepath to *path* either from the current directory or from an optional *start* point.""" return '' def samefile(path1, path2): """Return ``True`` if both pathname arguments refer to the same file or directory (as indicated by device number and i-node number). Raise an exception if a :func:`os.stat` call on either pathname fails.""" return True def sameopenfile(fp1, fp2): """Return ``True`` if the file descriptors *fp1* and *fp2* refer to the same file.""" return True def samestat(stat1, stat2): """Return ``True`` if the stat tuples *stat1* and *stat2* refer to the same file. These structures may have been returned by :func:`fstat`, :func:`lstat`, or :func:`stat`. This function implements the underlying comparison used by :func:`samefile` and :func:`sameopenfile`.""" return True sep = '/' def split(path): """Split the pathname *path* into a pair, ``(head, tail)`` where *tail* is the last pathname component and *head* is everything leading up to that. The *tail* part will never contain a slash; if *path* ends in a slash, *tail* will be empty. If there is no slash in *path*, *head* will be empty. If *path* is empty, both *head* and *tail* are empty. Trailing slashes are stripped from *head* unless it is the root (one or more slashes only). In all cases, ``join(head, tail)`` returns a path to the same location as *path* (but the strings may differ).""" return ('', '') def splitdrive(path): """Split the pathname *path* into a pair ``(drive, tail)`` where *drive* is either a drive specification or the empty string. On systems which do not use drive specifications, *drive* will always be the empty string. In all cases, ``drive + tail`` will be the same as *path*.""" return ('', '') def splitext(path): """Split the pathname *path* into a pair ``(root, ext)`` such that ``root + ext == path``, and *ext* is empty or begins with a period and contains at most one period. Leading periods on the basename are ignored; ``splitext('.cshrc')`` returns ``('.cshrc', '')``.""" return ('', '') supports_unicode_filenames = False def walk(path, visit, arg): """Calls the function *visit* with arguments ``(arg, dirname, names)`` for each directory in the directory tree rooted at *path* (including *path* itself, if it is a directory). The argument *dirname* specifies the visited directory, the argument *names* lists the files in the directory (gotten from ``os.listdir(dirname)``). The *visit* function may modify *names* to influence the set of directories visited below *dirname*, e.g. to avoid visiting certain parts of the tree. (The object referred to by *names* must be modified in place, using :keyword:`del` or slice assignment.)""" return None
{ "content_hash": "2eeda537f86aa3901958b531726cb8a1", "timestamp": "", "source": "github", "line_count": 208, "max_line_length": 89, "avg_line_length": 44.17307692307692, "alnum_prop": 0.6579233783195473, "repo_name": "kmod/icbd", "id": "09ba102e3b7fdb80a3b36a1b5530d816dfcf1191", "size": "9219", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "stdlib/type_mocks/os/path.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "33042" }, { "name": "C++", "bytes": "35981" }, { "name": "CSS", "bytes": "8888" }, { "name": "JavaScript", "bytes": "3602" }, { "name": "Makefile", "bytes": "48655" }, { "name": "Objective-C", "bytes": "88" }, { "name": "Python", "bytes": "10340340" }, { "name": "Shell", "bytes": "18865" } ], "symlink_target": "" }
"""Tests that non-HD wallets that are upgraded are able to receive via MWEB""" import os import shutil from test_framework.test_framework import BitcoinTestFramework from test_framework.ltc_util import create_non_hd_wallet, setup_mweb_chain from test_framework.util import assert_equal class MWEBWalletUpgradeTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 2 self.extra_args = [['-whitelist=noban@127.0.0.1'],[]] # immediate tx relay def skip_test_if_missing_module(self): self.skip_if_no_previous_releases() self.skip_if_no_wallet() def run_test(self): node0 = self.nodes[0] node1 = self.nodes[1] # # Mine until MWEB is activated # self.log.info("Setting up MWEB chain") setup_mweb_chain(node0) self.sync_all() # # Create a non-HD wallet using an older litecoin core version # self.log.info("Creating non-hd wallet") nonhd_wallet_dat = create_non_hd_wallet(self.chain, self.options) # # Replace node1's wallet with the non-HD wallet.dat # self.log.info("Replacing wallet with non-hd wallet.dat") node1.get_wallet_rpc(self.default_wallet_name).unloadwallet() upgrade_wallet_dir = os.path.join(node1.datadir, "regtest", "wallets", self.default_wallet_name) shutil.rmtree(upgrade_wallet_dir) os.mkdir(upgrade_wallet_dir) shutil.copy(nonhd_wallet_dat, upgrade_wallet_dir) node1.loadwallet(self.default_wallet_name) # # Upgrade node1's non-HD wallet to the latest version # self.log.info("Upgrading wallet") node1.upgradewallet() # # Send to MWEB address of upgraded wallet (node1) # self.log.info("Send to upgraded wallet's mweb address") mweb_addr = node1.getnewaddress(address_type='mweb') tx1_id = node0.sendtoaddress(mweb_addr, 25) self.sync_mempools() # # Verify transaction is received by upgraded wallet (node1) # self.log.info("Verify upgraded wallet lists the transaction") tx1 = node1.gettransaction(txid=tx1_id) assert_equal(tx1['confirmations'], 0) assert_equal(tx1['amount'], 25) assert_equal(tx1['details'][0]['address'], mweb_addr) node0.generate(1) self.sync_all() # # Verify that MWEB coins can be spent by upgraded wallet (node1) # self.log.info("Spending MWEB coins") mining_mweb_addr = node0.getnewaddress(address_type='mweb') tx2_id = node1.sendtoaddress(mining_mweb_addr, 10) self.sync_mempools() # # Mine 1 block and verify transaction confirms # self.log.info("Mining block to verify it confirms") node0.generate(1) tx2 = node0.gettransaction(txid=tx2_id) assert_equal(tx2['confirmations'], 1) assert_equal(tx2['amount'], 10) assert_equal(tx2['details'][0]['address'], mining_mweb_addr) if __name__ == '__main__': MWEBWalletUpgradeTest().main()
{ "content_hash": "8c8407a86f61c30f7a051157b030423f", "timestamp": "", "source": "github", "line_count": 93, "max_line_length": 104, "avg_line_length": 34.365591397849464, "alnum_prop": 0.619837296620776, "repo_name": "litecoin-project/litecoin", "id": "2efdadd4cde577e82d86762b9ec91bf750095489", "size": "3406", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "test/functional/mweb_wallet_upgrade.py", "mode": "33188", "license": "mit", "language": [ { "name": "Assembly", "bytes": "898000" }, { "name": "C", "bytes": "1594708" }, { "name": "C++", "bytes": "8860047" }, { "name": "CMake", "bytes": "29310" }, { "name": "HTML", "bytes": "21833" }, { "name": "Java", "bytes": "30291" }, { "name": "M4", "bytes": "226003" }, { "name": "Makefile", "bytes": "123607" }, { "name": "Objective-C++", "bytes": "5489" }, { "name": "Python", "bytes": "2267056" }, { "name": "QMake", "bytes": "798" }, { "name": "Sage", "bytes": "31382" }, { "name": "Scheme", "bytes": "7554" }, { "name": "Shell", "bytes": "150309" } ], "symlink_target": "" }
from openerp.tests.common import TransactionCase class TestUom(TransactionCase): """Tests for unit of measure conversion""" def setUp(self): super(TestUom, self).setUp() self.product = self.registry('product.product') self.uom = self.registry('product.uom') self.imd = self.registry('ir.model.data') def test_10_conversion(self): cr, uid = self.cr, self.uid gram_id = self.imd.get_object_reference(cr, uid, 'product', 'product_uom_gram')[1] tonne_id = self.imd.get_object_reference(cr, uid, 'product', 'product_uom_ton')[1] qty = self.uom._compute_qty(cr, uid, gram_id, 1020000, tonne_id) self.assertEquals(qty, 1.02, "Converted quantity does not correspond.") price = self.uom._compute_price(cr, uid, gram_id, 2, tonne_id) self.assertEquals(price, 2000000.0, "Converted price does not correspond.") def test_20_rounding(self): cr, uid = self.cr, self.uid unit_id = self.imd.get_object_reference(cr, uid, 'product', 'product_uom_unit')[1] categ_unit_id = self.imd.get_object_reference(cr, uid, 'product', 'product_uom_categ_unit')[1] score_id = self.uom.create(cr, uid, { 'name': 'Score', 'factor_inv': 20, 'uom_type': 'bigger', 'rounding': 1.0, 'category_id': categ_unit_id }) qty = self.uom._compute_qty(cr, uid, unit_id, 2, score_id) self.assertEquals(qty, 1, "Converted quantity should be rounded up.")
{ "content_hash": "20b5c8c90fe44accce9013c0ef20fc8b", "timestamp": "", "source": "github", "line_count": 37, "max_line_length": 102, "avg_line_length": 41.7027027027027, "alnum_prop": 0.6079066753078418, "repo_name": "diogocs1/comps", "id": "3d3ba04b689a735b401e71c57b265caa44ce2dac", "size": "1543", "binary": false, "copies": "126", "ref": "refs/heads/master", "path": "web/addons/product/tests/test_uom.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "ApacheConf", "bytes": "701" }, { "name": "CSS", "bytes": "856533" }, { "name": "HTML", "bytes": "299671" }, { "name": "Java", "bytes": "620166" }, { "name": "JavaScript", "bytes": "5844302" }, { "name": "Makefile", "bytes": "21002" }, { "name": "PHP", "bytes": "14259" }, { "name": "Python", "bytes": "10647376" }, { "name": "Ruby", "bytes": "220" }, { "name": "Shell", "bytes": "17746" }, { "name": "XSLT", "bytes": "120278" } ], "symlink_target": "" }
"""FMOD python bindings.""" from .fmodex import (get_disk_busy, get_memory_stats, initialize_debugging, initialize_memory, set_disk_busy) # Avoid recursive import hell from . import (channel, channel_group, dsp, dsp_connection, geometry, globalvars, reverb, sound, sound_group, system) # import reverb presets from .reverb_presets import REVERB_PRESET, set_reverb_preset # import structures to be used with Resonance Audio plugin from .roomproperties import MaterialNames, RoomProperties from .utils import FmodError __version__ = "0.7.0" c = {} c["DSP"] = dsp.DSP c["DSP_Connection"] = dsp_connection.DSPConnection c["Geometry"] = geometry.Geometry c["Channel"] = channel.Channel c["ChannelGroup"] = channel_group.ChannelGroup c["Reverb3D"] = reverb.Reverb3D c["Sound"] = sound.Sound c["SoundGroup"] = sound_group.SoundGroup c["System"] = system.System globalvars.class_list = c from . import constants System = c["System"]
{ "content_hash": "863d71c8d4de7c14c2b63e794d978b6a", "timestamp": "", "source": "github", "line_count": 33, "max_line_length": 75, "avg_line_length": 29.363636363636363, "alnum_prop": 0.7192982456140351, "repo_name": "tyrylu/pyfmodex", "id": "4eb62ab6497ad1e893cd06da8d36a8ec79592c8e", "size": "969", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "pyfmodex/__init__.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "475156" } ], "symlink_target": "" }
from django.views.generic import TemplateView from gn_django import utils from .models import ExampleModel from gn_django.url import utils as url_utils from gn_django.video import youtube class Home(TemplateView): template_name = "home.j2" def get_context_data(self): return {'msg': utils.super_helper()} class Include(TemplateView): template_name = "include.j2" def get_context_data(self): return { 'string': 'This is a message', 'obj': ExampleModel('Hello World!'), 'parent_context': 'Parent context', } class Youtube(TemplateView): template_name = "youtube.j2" def get_context_data(self): url = 'https://www.youtube.com/watch?v=7pV5r4ePww8' return { 'url': url, 'id': youtube.get_id(url), 'thumb': youtube.get_thumb(url), 'alt_thumb': youtube.get_thumb(url, 2), } class URLProtocol(TemplateView): template_name = 'url-protocol.j2' def get_context_data(self): http = 'http://www.eurogamer.net' https = 'https://www.usgamer.net' return { 'http': http, 'http_stripped': url_utils.strip_protocol(http), 'https': https, 'https_stripped': url_utils.strip_protocol(https), } class StaticLinkExtension(TemplateView): template_name = 'static-link-extension.j2'
{ "content_hash": "21dacee840ed246391a9701ec41cd867", "timestamp": "", "source": "github", "line_count": 53, "max_line_length": 62, "avg_line_length": 26.71698113207547, "alnum_prop": 0.605225988700565, "repo_name": "gamernetwork/gn-django", "id": "f439a74fae51779bfee8b21dfcbdae29ab9e04c1", "size": "1416", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "examples/simple/main/views.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "317" }, { "name": "Dockerfile", "bytes": "361" }, { "name": "HTML", "bytes": "454" }, { "name": "JavaScript", "bytes": "1804" }, { "name": "Python", "bytes": "153563" }, { "name": "Shell", "bytes": "5024" } ], "symlink_target": "" }
from enum import Enum from . import general # Export public API __all__ = ( 'AllenRelation', 'Interval', ) class AllenRelation(Enum): """ The relations of Allen's interval algebra. https://en.wikipedia.org/wiki/Allen%27s_interval_algebra """ # Inverse relations have constants that are negatives of each other # Sorted order before = -6 # 0: a1 a2 b1 b2 abut_before = -5 # 1: a1 a2=b1 b2 overlap_before = -4 # 2: a1 b1 a2 b2 outside_end = -3 # 3: a1 b1 a2=b2 outside = -2 # 4: a1 b1 b2 a2 # 5: a1=a2=b1 b2 inside_begin = -1 # 6: a1=b1 a2 b2 equal = 0 # 7: a1=b1 a2=b2 outside_begin = 1 # 8: a1=b1 b2 a2 inside = 2 # 9: b1 a1 a2 b2 inside_end = 3 # 10: b1 a1 a2=b2 overlap_after = 4 # 11: b1 a1 b2 a2 # 12: b1 b2=a1=a2 abut_after = 5 # 13: b1 b2=a1 a2 after = 6 # 14: b1 b2 a1 a2 def is_inverse(self, other): return self.value == -other.value def inverse(self): return AllenRelation(-self.value) def _union(itvl1, itvl2): if itvl1.is_empty(): return (itvl2,) elif itvl2.is_empty(): return (itvl1,) elif itvl1.hi < itvl2.lo: return (itvl1, itvl2) elif itvl2.hi < itvl1.lo: return (itvl2, itvl1) elif itvl1.hi == itvl2.lo: if itvl1.is_hi_open and itvl2.is_lo_open: return (itvl1, itvl2) else: return (Interval(itvl1.lo, itvl2.hi, itvl1.is_lo_open, itvl2.is_hi_open),) elif itvl2.hi == itvl1.lo: if itvl2.is_hi_open and itvl1.is_lo_open: return (itvl2, itvl1) else: return (Interval(itvl2.lo, itvl1.hi, itvl2.is_lo_open, itvl1.is_hi_open),) else: lo, lo_open = min((itvl1.lo, itvl1.is_lo_open), (itvl2.lo, itvl2.is_lo_open)) hi, hi_open = max((itvl1.hi, not itvl1.is_hi_open), (itvl2.hi, not itvl2.is_hi_open)) hi_open = not hi_open return (Interval(lo, hi, lo_open, hi_open),) def _intersection_bounds(itvl1, itvl2): lo, lo_open = max((itvl1.lo, itvl1.is_lo_open), (itvl2.lo, itvl2.is_lo_open)) hi, hi_open = min((itvl1.hi, not itvl1.is_hi_open), (itvl2.hi, not itvl2.is_hi_open)) hi_open = not hi_open return (lo, hi, lo_open, hi_open) def _intersection(itvl1, itvl2): lo, hi, lo_open, hi_open = _intersection_bounds(itvl1, itvl2) # Empty if lo > hi: return Interval(0, lo_open=True) # Either empty or a point depending on if both are closed elif lo == hi: return Interval(lo, lo_open=(lo_open or hi_open)) # Overlapping else: return Interval(lo, hi, lo_open, hi_open) class Interval: """Interval for any orderable type""" __slots__ = ('_lo', '_hi', '_lopen', '_hopen', '_length', '_key') def __init__( self, lo, hi=None, lo_open=False, hi_open=False, length=None, ): """ Create an interval with the given bounds. The bounds must be orderable. lo: Lower bound. hi: Upper bound. Omit to create a point. lo_open: Whether the interval *excludes* the lower bound. hi_open: Whether the interval *excludes* the upper bound. length: Length of the interval. Useful when the bounds do not support subtraction. A point interval is one where the bounds are equal and closed, e.g. "•(8, 8)•". An empty interval is one where the bounds are equal and open, e.g. "◦(8, 8)◦". If the bounds support subtraction, then the length of the interval will be computed automatically. Otherwise, you can supply your own length. Similarly, an interval can be constructed from `lo` and `length` if the two can be added together to set `hi`. Examples: >>> Interval(8) # point interval >>> Interval(8, lo_open=True) # empty interval >>> Interval(0.0, float('inf'), hi_open=True) # non-negative reals: •(0,∞)◦ >>> Interval('a', 'b', length=1) >>> 'b' in Interval('a', 'c') # -> True >>> Interval('b', 'c') in Interval('a', 'd') # -> True >>> Interval('b', 'c').allen_relation(Interval('a', 'd')) # -> <AllenRelation.inside: 2> """ # Default hi to lo or compute it based on the length if hi is None: if length is None: hi = lo else: hi = lo + length # Check that lo <= hi if lo > hi: raise ValueError( 'Bad interval bounds: (lo: {!r}) </= (hi: {!r})' .format(lo, hi)) # Compute the length if not provided and if possible if length is None: try: length = hi - lo except TypeError: pass # Make sure point / empty intervals are sensible: both bounds # must be open (empty) or both must be closed (point), the # length should be zero if lo == hi: lo_open = lo_open or hi_open hi_open = lo_open if length is None: length = 0 # Setup the interval from the sane values self._lo = lo self._hi = hi self._lopen = lo_open self._hopen = hi_open self._length = length self._key = None @property def lo(self): return self._lo @property def hi(self): return self._hi @property def is_lo_open(self): return self._lopen @property def is_hi_open(self): return self._hopen def is_empty(self): return (self._lo == self._hi and self._lopen and self._hopen) def is_point(self): return (self._lo == self._hi and not (self._lopen or self._hopen)) def length(self): return self._length def key(self): if self._key is None: self._key = (self.lo, self.is_lo_open, self.hi, not self.is_hi_open) return self._key def __eq__(self, other): return self is other or ( type(other) is Interval and (self.key() == other.key() or self.is_empty() and other.is_empty())) def __hash__(self): if self.is_empty(): return 0 else: return hash(self.key()) def __lt__(self, other): if other.is_empty(): return False if self.is_empty(): return True return self.key() < other.key() def __le__(self, other): if self.is_empty(): return True if other.is_empty(): return False return self.key() <= other.key() def __gt__(self, other): if self.is_empty(): return False if other.is_empty(): return True return self.key() > other.key() def __ge__(self, other): if other.is_empty(): return True if self.is_empty(): return False return self.key() >= other.key() def __repr__(self): return 'Interval({!r}, {!r}, {!r}, {!r}, {!r})'.format( self.lo, self.hi, self.is_lo_open, self.is_hi_open, self.length(), ) def __str__(self): return '{}({}, {}){}'.format( '◦' if self.is_lo_open else '•', self.lo, self.hi, '◦' if self.is_hi_open else '•', ) # Python set API def __contains__(self, item): if isinstance(item, Interval): return item.issubset(self) return (self.lo < item < self.hi or (not self.is_lo_open and self.lo == item) or (not self.is_hi_open and self.hi == item)) def issubset(self, other): if self.is_empty(): return True if self.lo < other.lo or self.hi > other.hi: return False return ((self.lo > other.lo or self.is_lo_open >= other.is_lo_open) and (self.hi < other.hi or self.is_hi_open >= other.is_hi_open)) def union(self, *others): itvls = [self] itvls.extend(others) itvls.sort(key=lambda i: (i.lo, i.hi)) unioned = [itvls[0]] for itvl in itvls[1:]: unioned[-1:] = _union(unioned[-1], itvl) if len(unioned) > 1: return CompoundInterval(*unioned) else: return unioned[0] def intersects(self, other): lo, hi, lo_open, hi_open = _intersection_bounds(self, other) # Empty if lo > hi: return False # Either empty or a point depending on if both are closed elif lo == hi: return not (lo_open or hi_open) # Overlapping else: return True def intersection(self, *others): itvl = self for other in others: itvl = _intersection(itvl, other) if itvl.is_empty(): break return itvl def difference(self, *others): # TODO raise NotImplementedError() def symmetric_difference(self, other): # TODO raise NotImplementedError() # Allen's interval algebra def allen_relation(self, other): # Order the lo bound wrt the other bounds. There are 5 # possibilities, so convert to a base 5 number. cmp_lolo = general.cmp(self.lo, other.lo) cmp_lohi = general.cmp(self.lo, other.hi) lo_num = cmp_lolo + cmp_lohi + 2 # Order the hi bound wrt the other bounds cmp_hilo = general.cmp(self.hi, other.lo) cmp_hihi = general.cmp(self.hi, other.hi) hi_num = cmp_hilo + cmp_hihi + 2 # The hi number must be at least the lo number. This limits the # possibilities to [5, 4, 3, 2, 1]. The cumulative sums of this # are the lo bases. Use these facts to calculate the number # corresponding to the Allen relation. lo_bases = [0, 5, 9, 12, 14] allen_num = lo_bases[lo_num] + hi_num - lo_num # Correct for if both lo and hi equal an endpoint of the other # interval. (Allen's algebra doesn't distinguish these cases.) # These are (lo=1, hi=1) -> 5 and (lo=3, hi=3) -> 12. Since 12 # is "abut after", make 5 be "abut before" for symmetry. This # breaks the sorted order but it maintains inverses. if allen_num > 12: allen_num -= 2 elif allen_num > 5: allen_num -= 1 elif allen_num == 5: allen_num = 1 # The Allen number is now in [0:12]. Convert it to an # enumeration number and return the relation. return AllenRelation(allen_num - 6) class CompoundInterval: def __init__(self, *intervals): if not intervals: raise ValueError('No intervals passed to constructor.') _intervals = [] for itvl in intervals: if not isinstance(itvl, Interval): raise TypeError('Not an Interval: {!r}'.format(itvl)) _intervals.append(itvl) self._intervals = sorted(_intervals, key=lambda i: (i.lo, i.hi)) self._hi = max(i.hi for i in self._intervals) @property def lo(self): return self._intervals[0].lo @property def hi(self): return self._hi def is_empty(self): return all(i.is_empty() for i in self._intervals) def length(self): """ Return the sum of the known lengths of the intervals in this compound interval or `None` if all of the lengths are unknown. """ lengths = [i.length() for i in self._intervals] lengths = [l for l in lengths if l is not None] if lengths: return sum(lengths) else: return None def __contains__(self, item): return any(i.__contains__(item) for i in self._intervals) def __len__(self): return len(self._intervals) def __iter__(self): return iter(self._intervals) def __repr__(self): return 'CompoundInterval({!r})'.format(self._intervals)
{ "content_hash": "bbf1578e8cea28133fa1c635ca1bea02", "timestamp": "", "source": "github", "line_count": 402, "max_line_length": 97, "avg_line_length": 31.16417910447761, "alnum_prop": 0.5254629629629629, "repo_name": "afbarnard/esal", "id": "0155a596647ef010e97640179379e6f821e2b2d3", "size": "12728", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "esal/interval.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "166716" } ], "symlink_target": "" }
import Gaffer import GafferImage Gaffer.Metadata.registerNode( GafferImage.ImageContextVariables, "description", """ Defines variables which can be referenced by upstream expressions. """, plugs = { "variables" : [ "description", """ The variables to be defined - arbitrary numbers of variables can be added here. """, ], } )
{ "content_hash": "39a51932e337ab921144b381b88c84af", "timestamp": "", "source": "github", "line_count": 27, "max_line_length": 67, "avg_line_length": 13.37037037037037, "alnum_prop": 0.6759002770083102, "repo_name": "goddardl/gaffer", "id": "739c67fe5b9c0ce15f67a95c3725b9faec194c5d", "size": "2164", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "python/GafferImageUI/ImageContextVariablesUI.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "C", "bytes": "2228" }, { "name": "C++", "bytes": "4178625" }, { "name": "GLSL", "bytes": "6250" }, { "name": "Python", "bytes": "4152621" }, { "name": "Shell", "bytes": "8787" }, { "name": "Slash", "bytes": "36371" } ], "symlink_target": "" }
import copy import datetime import httplib import eventlet from oslo.config import cfg from neutron import context as ctx from neutron.extensions import portbindings from neutron.openstack.common import excutils from neutron.openstack.common import log from neutron.openstack.common import timeutils from neutron.plugins.bigswitch import config as pl_config from neutron.plugins.bigswitch import plugin from neutron.plugins.bigswitch import servermanager from neutron.plugins.common import constants as pconst from neutron.plugins.ml2 import driver_api as api EXTERNAL_PORT_OWNER = 'neutron:external_port' LOG = log.getLogger(__name__) # time in seconds to maintain existence of vswitch response CACHE_VSWITCH_TIME = 60 class BigSwitchMechanismDriver(plugin.NeutronRestProxyV2Base, api.MechanismDriver): """Mechanism Driver for Big Switch Networks Controller. This driver relays the network create, update, delete operations to the Big Switch Controller. """ def initialize(self): LOG.debug(_('Initializing driver')) # register plugin config opts pl_config.register_config() self.evpool = eventlet.GreenPool(cfg.CONF.RESTPROXY.thread_pool_size) # backend doesn't support bulk operations yet self.native_bulk_support = False # init network ctrl connections self.servers = servermanager.ServerPool() self.servers.get_topo_function = self._get_all_data self.servers.get_topo_function_args = {'get_ports': True, 'get_floating_ips': False, 'get_routers': False} self.segmentation_types = ', '.join(cfg.CONF.ml2.type_drivers) # Track hosts running IVS to avoid excessive calls to the backend self.ivs_host_cache = {} LOG.debug(_("Initialization done")) def create_network_postcommit(self, context): # create network on the network controller self._send_create_network(context.current) def update_network_postcommit(self, context): # update network on the network controller self._send_update_network(context.current) def delete_network_postcommit(self, context): # delete network on the network controller self._send_delete_network(context.current) def create_port_postcommit(self, context): # create port on the network controller port = self._prepare_port_for_controller(context) if port: self.async_port_create(port["network"]["tenant_id"], port["network"]["id"], port) def update_port_postcommit(self, context): # update port on the network controller port = self._prepare_port_for_controller(context) if port: try: self.async_port_create(port["network"]["tenant_id"], port["network"]["id"], port) except servermanager.RemoteRestError as e: with excutils.save_and_reraise_exception() as ctxt: if (cfg.CONF.RESTPROXY.auto_sync_on_failure and e.status == httplib.NOT_FOUND and servermanager.NXNETWORK in e.reason): ctxt.reraise = False LOG.error(_("Iconsistency with backend controller " "triggering full synchronization.")) topoargs = self.servers.get_topo_function_args self._send_all_data( send_ports=topoargs['get_ports'], send_floating_ips=topoargs['get_floating_ips'], send_routers=topoargs['get_routers'], triggered_by_tenant=port["network"]["tenant_id"] ) def delete_port_postcommit(self, context): # delete port on the network controller port = context.current net = context.network.current self.servers.rest_delete_port(net["tenant_id"], net["id"], port['id']) def _prepare_port_for_controller(self, context): # make a copy so the context isn't changed for other drivers port = copy.deepcopy(context.current) net = context.network.current port['network'] = net port['bound_segment'] = context.bound_segment actx = ctx.get_admin_context() prepped_port = self._extend_port_dict_binding(actx, port) prepped_port = self._map_state_and_status(prepped_port) if (portbindings.HOST_ID not in prepped_port or prepped_port[portbindings.HOST_ID] == ''): LOG.warning(_("Ignoring port notification to controller because " "of missing host ID.")) # in ML2, controller doesn't care about ports without # the host_id set return False return prepped_port def bind_port(self, context): """Marks ports as bound. Binds external ports and IVS ports. Fabric configuration will occur on the subsequent port update. Currently only vlan segments are supported. """ if context.current['device_owner'] == EXTERNAL_PORT_OWNER: # TODO(kevinbenton): check controller to see if the port exists # so this driver can be run in parallel with others that add # support for external port bindings for segment in context.network.network_segments: if segment[api.NETWORK_TYPE] == pconst.TYPE_VLAN: context.set_binding( segment[api.ID], portbindings.VIF_TYPE_BRIDGE, {portbindings.CAP_PORT_FILTER: False, portbindings.OVS_HYBRID_PLUG: False}) return # IVS hosts will have a vswitch with the same name as the hostname if self.does_vswitch_exist(context.host): for segment in context.network.network_segments: if segment[api.NETWORK_TYPE] == pconst.TYPE_VLAN: context.set_binding( segment[api.ID], portbindings.VIF_TYPE_IVS, {portbindings.CAP_PORT_FILTER: True, portbindings.OVS_HYBRID_PLUG: False}) def does_vswitch_exist(self, host): """Check if Indigo vswitch exists with the given hostname. Returns True if switch exists on backend. Returns False if switch does not exist. Returns None if backend could not be reached. Caches response from backend. """ try: return self._get_cached_vswitch_existence(host) except ValueError: # cache was empty for that switch or expired pass try: self.servers.rest_get_switch(host) exists = True except servermanager.RemoteRestError as e: if e.status == 404: exists = False else: # Another error, return without caching to try again on # next binding return self.ivs_host_cache[host] = { 'timestamp': datetime.datetime.now(), 'exists': exists } return exists def _get_cached_vswitch_existence(self, host): """Returns cached existence. Old and non-cached raise ValueError.""" entry = self.ivs_host_cache.get(host) if not entry: raise ValueError(_('No cache entry for host %s') % host) diff = timeutils.delta_seconds(entry['timestamp'], datetime.datetime.now()) if diff > CACHE_VSWITCH_TIME: self.ivs_host_cache.pop(host) raise ValueError(_('Expired cache entry for host %s') % host) return entry['exists']
{ "content_hash": "565c322f81068f78ce19771c6c4715f5", "timestamp": "", "source": "github", "line_count": 191, "max_line_length": 78, "avg_line_length": 41.69109947643979, "alnum_prop": 0.5970111766922014, "repo_name": "jumpstarter-io/neutron", "id": "dc8c12c33974e233d3e9d84386539cf4bf474ec8", "size": "8736", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "neutron/plugins/ml2/drivers/mech_bigswitch/driver.py", "mode": "33188", "license": "apache-2.0", "language": [], "symlink_target": "" }
import inspect import importlib from os import path, walk __all__ = ["ALL_AGENTS"] # # All Agents TABLE ALL_AGENTS = {} MODULES = {} for root, dirs, files in walk("agents_dir"): for file_ in files: name, ext = path.splitext(file_) if ext == ".py" and name != "agents": MODULES[name] = importlib.import_module( "agents_dir.{0}".format(name)) clsmembers = inspect.getmembers(MODULES[name], inspect.isclass) for class_name, class_ in clsmembers: pkg, mod = path.splitext(class_.__module__) if mod == ".{0}".format(name): ALL_AGENTS[name] = class_
{ "content_hash": "710dc60f7d0efe7edd1eff992fc4a92d", "timestamp": "", "source": "github", "line_count": 23, "max_line_length": 75, "avg_line_length": 29.217391304347824, "alnum_prop": 0.5565476190476191, "repo_name": "valefranz/AI-Project-VacuumEnvironment", "id": "5862fe8d15b2c49bc075411d84b292b0cd251b4e", "size": "695", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "agents_list.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "432094" } ], "symlink_target": "" }
""" Check sum """ # Standard library modules. from collections import namedtuple import hashlib # Third party modules. # Local modules. # Globals and constants variables. CHECKSUM_ALGORITHM_SHA1 = 'SHA-1' CHECKSUM_ALGORITHM_SUM32 = 'SUM32' class Checksum(namedtuple('Checksum', ['value', 'algorithm'])): def __new__(cls, value, algorithm): if algorithm not in _CHECKSUM_ALGORITHMS: raise ValueError('Unknown algorithm: %s' % algorithm) value = value.upper() return cls.__bases__[0].__new__(cls, value, algorithm) def calculate_checksum_sha1(buffer): sha1 = hashlib.sha1() sha1.update(buffer) value = sha1.hexdigest() return Checksum(value, CHECKSUM_ALGORITHM_SHA1) def calculate_checksum_sum32(buffer): try: sumbytes = sum(buffer) except: # Python 2 sumbytes = sum(map(ord, buffer)) value = hex(sumbytes)[2:34].zfill(32) return Checksum(value, CHECKSUM_ALGORITHM_SUM32) _CHECKSUM_ALGORITHMS = {CHECKSUM_ALGORITHM_SHA1: calculate_checksum_sha1, CHECKSUM_ALGORITHM_SUM32: calculate_checksum_sum32} def calculate_checksum(algorithm, buffer): try: method = _CHECKSUM_ALGORITHMS[algorithm.upper()] except KeyError: raise ValueError('Unknown checksum algorithm: %s' % algorithm) else: return method(buffer)
{ "content_hash": "fa98b0648ee30d7a76ab52b084bf2eb5", "timestamp": "", "source": "github", "line_count": 49, "max_line_length": 75, "avg_line_length": 27.755102040816325, "alnum_prop": 0.6720588235294118, "repo_name": "pyhmsa/pyhmsa", "id": "e5e3e9832d184a6f1bd17a2cc6b6e06f2bbb54d2", "size": "1360", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "pyhmsa/type/checksum.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "569193" } ], "symlink_target": "" }
from .request import make_request from flask_paginate import Pagination PER_PAGE = 9 def paginate(arr, page, offset): if page < 0: page = 1 start = (page - 1) * offset end = start + offset if len(arr) < end: end = len(arr) - 1 return arr[start:end] def paginate_data(req_type, order_by, page): order_by = 'asc' if order_by is None else order_by data = make_request({ 'type': req_type, 'by_date': order_by }) matches = paginate(data, page, PER_PAGE) pagination = Pagination(page=page, total=len(data), per_page=PER_PAGE, record_name='matches', css_framework='bootstrap3') return { 'matches': matches, 'pagination': pagination }
{ "content_hash": "8a2485160c0414541d9a6d6b4e996069", "timestamp": "", "source": "github", "line_count": 36, "max_line_length": 55, "avg_line_length": 23.444444444444443, "alnum_prop": 0.5260663507109005, "repo_name": "alexraileanu/worldcup", "id": "2460de0c970e9bfefb5e8156da178f86d432a01d", "size": "844", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "website/utils/paginate.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "58" }, { "name": "HTML", "bytes": "10255" }, { "name": "JavaScript", "bytes": "1454" }, { "name": "Python", "bytes": "8092" } ], "symlink_target": "" }
from functools import partial import inspect class KVNest(str): """A subclass of str that allows namespacing keys for use in redis as well as provide partial methods into redis.""" def __new__(cls,*args,**kwargs): """Override's str's __new__ so we can store the redis connection if one is provided""" if 'connection' in kwargs: connection = kwargs['connection'] del kwargs['connection'] obj = super(KVNest,cls).__new__(cls,*args,**kwargs) if 'connection' in locals(): obj._redis = connection return obj def __getitem__(self,key): """Concatenates the key and returns a new KVNest instead of indexing into the string""" return KVNest(':'.join((self,key))) def __getattribute__(self,name): """Transparently returns partially applied versions of redis methods.""" try: return super(KVNest,self).__getattribute__(name) except AttributeError, e: return partial(getattr(self._redis,name),str(self)) # return super(KVNest,self).__getattribute__(name)
{ "content_hash": "f66ae9de089b62eab7a1bce9dc65a258", "timestamp": "", "source": "github", "line_count": 29, "max_line_length": 117, "avg_line_length": 34.206896551724135, "alnum_prop": 0.7006048387096774, "repo_name": "anateus/kvnest", "id": "4bbad6e9561d14be18c4e0a0e7f849fca29f5d17", "size": "992", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/kvnest.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "1285" } ], "symlink_target": "" }
import sys import os import re import time import threading from Xlib import X, XK, display, error from Xlib.ext import record from Xlib.protocol import rq from Xlib.protocol.event import KeyPress from Xlib.protocol.event import KeyRelease ####################################################################### ########################START CLASS DEF################################ ####################################################################### class HookManager(threading.Thread): """This is the main class. Instantiate it, and you can hand it KeyDown and KeyUp (functions in your own code) which execute to parse the pyxhookkeyevent class that is returned. This simply takes these two values for now: KeyDown = The function to execute when a key is pressed, if it returns anything. It hands the function an argument that is the pyxhookkeyevent class. KeyUp = The function to execute when a key is released, if it returns anything. It hands the function an argument that is the pyxhookkeyevent class. """ def __init__(self): threading.Thread.__init__(self) self.finished = threading.Event() # Give these some initial values self.mouse_position_x = 0 self.mouse_position_y = 0 self.ison = {"shift":False, "caps":False} # Compile our regex statements. self.isshift = re.compile('^Shift') self.iscaps = re.compile('^Caps_Lock') self.shiftablechar = re.compile('^[a-z0-9]$|^minus$|^equal$|^bracketleft$|^bracketright$|^semicolon$|^backslash$|^apostrophe$|^comma$|^period$|^slash$|^grave$') self.logrelease = re.compile('.*') self.isspace = re.compile('^space$') # Assign default function actions (do nothing). self.KeyDown = lambda x: True self.KeyUp = lambda x: True self.MouseAllButtonsDown = lambda x: True self.MouseAllButtonsUp = lambda x: True self.MouseMovement = lambda x: True self.contextEventMask = [X.KeyPress,X.MotionNotify] # Hook to our display. self.local_dpy = display.Display() self.record_dpy = display.Display() def run(self): # Check if the extension is present if not self.record_dpy.has_extension("RECORD"): print("RECORD extension not found") sys.exit(1) r = self.record_dpy.record_get_version(0, 0) print("RECORD extension version %d.%d" % (r.major_version, r.minor_version)) # Create a recording context; we only want key and mouse events self.ctx = self.record_dpy.record_create_context( 0, [record.AllClients], [{ 'core_requests': (0, 0), 'core_replies': (0, 0), 'ext_requests': (0, 0, 0, 0), 'ext_replies': (0, 0, 0, 0), 'delivered_events': (0, 0), 'device_events': tuple(self.contextEventMask), #(X.KeyPress, X.ButtonPress), 'errors': (0, 0), 'client_started': False, 'client_died': False, }]) # Enable the context; this only returns after a call to record_disable_context, # while calling the callback function in the meantime self.record_dpy.record_enable_context(self.ctx, self.processevents) # Finally free the context self.record_dpy.record_free_context(self.ctx) def cancel(self): self.finished.set() self.local_dpy.record_disable_context(self.ctx) self.local_dpy.flush() def printevent(self, event): print(event) def HookKeyboard(self): pass # We don't need to do anything here anymore, since the default mask # is now set to contain X.KeyPress #self.contextEventMask[0] = X.KeyPress def HookMouse(self): pass # We don't need to do anything here anymore, since the default mask # is now set to contain X.MotionNotify # need mouse motion to track pointer position, since ButtonPress events # don't carry that info. #self.contextEventMask[1] = X.MotionNotify def processevents(self, reply): # If grab is not hooked if not getattr(self, '_grab_is_hooked', False): # Set grab is hooked self._grab_is_hooked = True # Grab keys self._grab_keys() if reply.category != record.FromServer: return if reply.client_swapped: print("* received swapped protocol data, cowardly ignored") return if not len(reply.data) or ord(str(reply.data[0])) < 2: # not an event return data = reply.data while len(data): event, data = rq.EventField(None).parse_binary_value(data, self.record_dpy.display, None, None) # Whether propagate the event propagate = True # If is KeyPress event if event.type == X.KeyPress: # Get event object hookevent = self.keypressevent(event) # Call event handler propagate = self.KeyDown(hookevent) # If is KeyRelease event elif event.type == X.KeyRelease: # Get event object hookevent = self.keyreleaseevent(event) # Call event handler propagate = self.KeyUp(hookevent) # If is not KeyPress or KeyRelease event else: # Ignore return # If need propagate the event if propagate: # Get focus window window = self.local_dpy.get_input_focus()._data['focus'] # If is KeyPress event if event.type == X.KeyPress: # Get event class event_class = KeyPress # If is KeyRelease event elif event.type == X.KeyRelease: # Get event class event_class = KeyRelease else: return # Create event object new_event = event_class( detail=event.detail, time=event.time, root=event.root, window=window, child=X.NONE, root_x=event.root_x, root_y=event.root_y, event_x=event.event_x, event_y=event.event_y, state=event.state, same_screen=event.same_screen, ) # Send event self.local_dpy.send_event(window, new_event, propagate=True) # Flush self.local_dpy.flush() # Return return if event.type == X.KeyPress: hookevent = self.keypressevent(event) self.KeyDown(hookevent) elif event.type == X.KeyRelease: hookevent = self.keyreleaseevent(event) self.KeyUp(hookevent) elif event.type == X.ButtonPress: hookevent = self.buttonpressevent(event) self.MouseAllButtonsDown(hookevent) elif event.type == X.ButtonRelease: hookevent = self.buttonreleaseevent(event) self.MouseAllButtonsUp(hookevent) elif event.type == X.MotionNotify: # use mouse moves to record mouse position, since press and release events # do not give mouse position info (event.root_x and event.root_y have # bogus info). hookevent = self.mousemoveevent(event) self.MouseMovement(hookevent) #print "processing events...", event.type def _grab_keys(self): # Select event types self.local_dpy.screen().root.xrandr_select_input( X.KeyPressMask | X.KeyReleaseMask ) # Ungrab all keys self.local_dpy.screen().root.ungrab_key(X.AnyKey, X.AnyModifier) # Grabbed key code set grabbed_keycode_set = set() # For each key definition in `XK` module for key_name, keysym in vars(XK).items(): # If the key name starts with `XK_` if key_name.startswith('XK_'): # Convert keysym to key code keycode = self.local_dpy.keysym_to_keycode(keysym) # If the key code is not in the grabbed key code set if keycode not in grabbed_keycode_set: # Add the key code to the grabbed key code set grabbed_keycode_set.add(keycode) # Ungrab the key self.local_dpy.screen().root.ungrab_key(keycode, 0) # Grab the key self.local_dpy.screen().root.grab_key( keycode, 0, False, X.GrabModeAsync, X.GrabModeAsync ) def keypressevent(self, event): matchto = self.lookup_keysym(self.local_dpy.keycode_to_keysym(event.detail, 0)) if self.shiftablechar.match(self.lookup_keysym(self.local_dpy.keycode_to_keysym(event.detail, 0))): ## This is a character that can be typed. if self.ison["shift"] == False: keysym = self.local_dpy.keycode_to_keysym(event.detail, 0) return self.makekeyhookevent(keysym, event) else: keysym = self.local_dpy.keycode_to_keysym(event.detail, 1) return self.makekeyhookevent(keysym, event) else: ## Not a typable character. keysym = self.local_dpy.keycode_to_keysym(event.detail, 0) if self.isshift.match(matchto): self.ison["shift"] = self.ison["shift"] + 1 elif self.iscaps.match(matchto): if self.ison["caps"] == False: self.ison["shift"] = self.ison["shift"] + 1 self.ison["caps"] = True if self.ison["caps"] == True: self.ison["shift"] = self.ison["shift"] - 1 self.ison["caps"] = False return self.makekeyhookevent(keysym, event) def keyreleaseevent(self, event): if self.shiftablechar.match(self.lookup_keysym(self.local_dpy.keycode_to_keysym(event.detail, 0))): if self.ison["shift"] == False: keysym = self.local_dpy.keycode_to_keysym(event.detail, 0) else: keysym = self.local_dpy.keycode_to_keysym(event.detail, 1) else: keysym = self.local_dpy.keycode_to_keysym(event.detail, 0) matchto = self.lookup_keysym(keysym) if self.isshift.match(matchto): self.ison["shift"] = self.ison["shift"] - 1 return self.makekeyhookevent(keysym, event) def buttonpressevent(self, event): #self.clickx = self.rootx #self.clicky = self.rooty return self.makemousehookevent(event) def buttonreleaseevent(self, event): #if (self.clickx == self.rootx) and (self.clicky == self.rooty): ##print "ButtonClick " + str(event.detail) + " x=" + str(self.rootx) + " y=" + str(self.rooty) #if (event.detail == 1) or (event.detail == 2) or (event.detail == 3): #self.captureclick() #else: #pass return self.makemousehookevent(event) # sys.stdout.write("ButtonDown " + str(event.detail) + " x=" + str(self.clickx) + " y=" + str(self.clicky) + "\n") # sys.stdout.write("ButtonUp " + str(event.detail) + " x=" + str(self.rootx) + " y=" + str(self.rooty) + "\n") #sys.stdout.flush() def mousemoveevent(self, event): self.mouse_position_x = event.root_x self.mouse_position_y = event.root_y return self.makemousehookevent(event) # need the following because XK.keysym_to_string() only does printable chars # rather than being the correct inverse of XK.string_to_keysym() def lookup_keysym(self, keysym): for name in dir(XK): if name.startswith("XK_") and getattr(XK, name) == keysym: return name.lstrip("XK_") return "[%d]" % keysym def asciivalue(self, keysym): asciinum = XK.string_to_keysym(self.lookup_keysym(keysym)) if asciinum < 256: return asciinum else: return 0 def makekeyhookevent(self, keysym, event): storewm = self.xwindowinfo() if event.type == X.KeyPress: MessageName = "key down" elif event.type == X.KeyRelease: MessageName = "key up" return pyxhookkeyevent(storewm["handle"], storewm["name"], storewm["class"], self.lookup_keysym(keysym), self.asciivalue(keysym), False, event.detail, MessageName) def makemousehookevent(self, event): storewm = self.xwindowinfo() if event.detail == 1: MessageName = "mouse left " elif event.detail == 3: MessageName = "mouse right " elif event.detail == 2: MessageName = "mouse middle " elif event.detail == 5: MessageName = "mouse wheel down " elif event.detail == 4: MessageName = "mouse wheel up " else: MessageName = "mouse " + str(event.detail) + " " if event.type == X.ButtonPress: MessageName = MessageName + "down" elif event.type == X.ButtonRelease: MessageName = MessageName + "up" else: MessageName = "mouse moved" return pyxhookmouseevent(storewm["handle"], storewm["name"], storewm["class"], (self.mouse_position_x, self.mouse_position_y), MessageName) def xwindowinfo(self): try: windowvar = self.local_dpy.get_input_focus().focus wmname = windowvar.get_wm_name() wmclass = windowvar.get_wm_class() wmhandle = str(windowvar)[20:30] except: ## This is to keep things running smoothly. It almost never happens, but still... return {"name":None, "class":None, "handle":None} if (wmname == None) and (wmclass == None): try: windowvar = windowvar.query_tree().parent wmname = windowvar.get_wm_name() wmclass = windowvar.get_wm_class() wmhandle = str(windowvar)[20:30] except: ## This is to keep things running smoothly. It almost never happens, but still... return {"name":None, "class":None, "handle":None} if wmclass == None: return {"name":wmname, "class":wmclass, "handle":wmhandle} else: return {"name":wmname, "class":wmclass[0], "handle":wmhandle} class pyxhookkeyevent: """This is the class that is returned with each key event.f It simply creates the variables below in the class. Window = The handle of the window. WindowName = The name of the window. WindowProcName = The backend process for the window. Key = The key pressed, shifted to the correct caps value. Ascii = An ascii representation of the key. It returns 0 if the ascii value is not between 31 and 256. KeyID = This is just False for now. Under windows, it is the Virtual Key Code, but that's a windows-only thing. ScanCode = Please don't use this. It differs for pretty much every type of keyboard. X11 abstracts this information anyway. MessageName = "key down", "key up". """ def __init__(self, Window, WindowName, WindowProcName, Key, Ascii, KeyID, ScanCode, MessageName): self.Window = Window self.WindowName = WindowName self.WindowProcName = WindowProcName self.Key = Key self.Ascii = Ascii self.KeyID = KeyID self.ScanCode = ScanCode self.MessageName = MessageName def __str__(self): return "Window Handle: " + str(self.Window) + "\nWindow Name: " + str(self.WindowName) + "\nWindow's Process Name: " + str(self.WindowProcName) + "\nKey Pressed: " + str(self.Key) + "\nAscii Value: " + str(self.Ascii) + "\nKeyID: " + str(self.KeyID) + "\nScanCode: " + str(self.ScanCode) + "\nMessageName: " + str(self.MessageName) + "\n" class pyxhookmouseevent: """This is the class that is returned with each key event.f It simply creates the variables below in the class. Window = The handle of the window. WindowName = The name of the window. WindowProcName = The backend process for the window. Position = 2-tuple (x,y) coordinates of the mouse click MessageName = "mouse left|right|middle down", "mouse left|right|middle up". """ def __init__(self, Window, WindowName, WindowProcName, Position, MessageName): self.Window = Window self.WindowName = WindowName self.WindowProcName = WindowProcName self.Position = Position self.MessageName = MessageName def __str__(self): return "Window Handle: " + str(self.Window) + "\nWindow Name: " + str(self.WindowName) + "\nWindow's Process Name: " + str(self.WindowProcName) + "\nPosition: " + str(self.Position) + "\nMessageName: " + str(self.MessageName) + "\n" ####################################################################### #########################END CLASS DEF################################# ####################################################################### if __name__ == '__main__': hm = HookManager() hm.HookKeyboard() hm.HookMouse() hm.KeyDown = hm.printevent hm.KeyUp = hm.printevent hm.MouseAllButtonsDown = hm.printevent hm.MouseAllButtonsUp = hm.printevent hm.MouseMovement = hm.printevent hm.start() time.sleep(10) hm.cancel()
{ "content_hash": "3d7e7f6d335202d936c05d871d1fe5b5", "timestamp": "", "source": "github", "line_count": 439, "max_line_length": 346, "avg_line_length": 41.489749430523915, "alnum_prop": 0.5561106840891622, "repo_name": "AoiKuiyuyou/AoikHotkey", "id": "9ec65e0a63f716e3177927b5b7abf453bd38a738", "size": "19685", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/aoikhotkeydep/pyxHook.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "2226031" }, { "name": "Shell", "bytes": "5986" } ], "symlink_target": "" }
import pyaf.Bench.TS_datasets as tsds import tests.artificial.process_artificial_dataset as art art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "ConstantTrend", cycle_length = 7, transform = "Difference", sigma = 0.0, exog_count = 20, ar_order = 12);
{ "content_hash": "2ba6c71f122c78669190cd3524395ca8", "timestamp": "", "source": "github", "line_count": 7, "max_line_length": 170, "avg_line_length": 38.57142857142857, "alnum_prop": 0.7111111111111111, "repo_name": "antoinecarme/pyaf", "id": "040690de2bffb7a1dae352ec784af887814a6cd1", "size": "270", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/artificial/transf_Difference/trend_ConstantTrend/cycle_7/ar_12/test_artificial_128_Difference_ConstantTrend_7_12_20.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Makefile", "bytes": "6773299" }, { "name": "Procfile", "bytes": "24" }, { "name": "Python", "bytes": "54209093" }, { "name": "R", "bytes": "807" }, { "name": "Shell", "bytes": "3619" } ], "symlink_target": "" }
import bst def size(node): if node is None: return 0 else: return node.size def update_size(node): node.size = size(node.left) + size(node.right) + 1 class BST(bst.BST): """ Simple binary search tree implementation, augmented with subtree sizes. This BST supports insert, find, and delete-min operations. Each tree contains some (possibly 0) BSTnode objects, representing nodes, and a pointer to the root. """ def __init__(self): self.root = None def insert(self, t): """Insert key t into this BST, modifying it in-place.""" node = bst.BST.insert(self, t) while node is not None: update_size(node) node = node.parent def delete_min(self): """Delete the minimum key (and return the old node containing it).""" deleted, parent = bst.BST.delete_min(self) node = parent while node is not None: update_size(node) node = node.parent return deleted, parent def test(args=None): bst.test(args, BSTtype=BST) if __name__ == '__main__': test()
{ "content_hash": "3cef5c4daf6fd1f38a97396632121e72", "timestamp": "", "source": "github", "line_count": 42, "max_line_length": 77, "avg_line_length": 26.404761904761905, "alnum_prop": 0.6095581605049594, "repo_name": "luoshao23/ML_algorithm", "id": "7f40f1799a8b87cd64aa031801855967db2e6f0b", "size": "1109", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "BSTree/bstsize.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "4514" }, { "name": "C++", "bytes": "3203" }, { "name": "Jupyter Notebook", "bytes": "1542804" }, { "name": "MATLAB", "bytes": "119208" }, { "name": "Makefile", "bytes": "85" }, { "name": "Python", "bytes": "370897" } ], "symlink_target": "" }
class Queue: def __init__(self): self._items = [] self._index = -1 def enqueue(self, element): if element is None: raise TypeError("Element cannot be of type None") self._items.append(element) def dequeue(self): if self.is_empty(): raise EmptyQueue("Empty queue cannot be dequeued") return self._items.pop(0) def peek(self): if self.is_empty(): raise EmptyQueue("Empty queue cannot be peeked into") return self._items[0] def is_empty(self): return len(self._items) == 0 def __len__(self): return len(self._items) def __iter__(self): return self def __next__(self): self._index = self._index + 1 if self._index >= len(self._items): raise StopIteration return self._items[self._index] class EmptyQueue(Exception): """ A wrapper exception to hide implementation details """ pass
{ "content_hash": "b2c6a4f8f9ee2eaaec29b8eb198c0b07", "timestamp": "", "source": "github", "line_count": 43, "max_line_length": 65, "avg_line_length": 23, "alnum_prop": 0.5621840242669363, "repo_name": "vilisimo/ads", "id": "421a5724c2c24e15f0c171e9b0904e441e275878", "size": "989", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "python/structures/queue.py", "mode": "33188", "license": "mit", "language": [ { "name": "Java", "bytes": "80324" }, { "name": "Python", "bytes": "183126" } ], "symlink_target": "" }
from mrjob.job import MRJob from mrjob.step import MRStep # Task, list all the bigrams in the shakespeare.txt # file, and the number of occurences # This could be done without overriding "steps" class MRmulti(MRJob): def mapper_get_bigrams(self, key, line): n = 0 for word in line.split(): if n > 0: yield "{} {}".format(prev_word, word), 1 prev_word = word n += 1 def sum_bigrams(self, bigram, values): yield sum(values), bigram def steps(self): return [MRStep(mapper=self.mapper_get_bigrams, reducer=self.sum_bigrams)] if __name__ == '__main__': MRmulti.run() # Run like this # python ex7_3.py shakespeare.txt | sort -n
{ "content_hash": "deddaea227f5874268f69786447ee8c2", "timestamp": "", "source": "github", "line_count": 30, "max_line_length": 56, "avg_line_length": 25.133333333333333, "alnum_prop": 0.5888594164456233, "repo_name": "North-Guard/BigToolsComplicatedData", "id": "789287c2fd609dbedb5f80fb2eef1dc6c4aa1b40", "size": "754", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "Week7/ex7_3.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "42470" }, { "name": "Shell", "bytes": "194" }, { "name": "TeX", "bytes": "6703" } ], "symlink_target": "" }
from __future__ import (absolute_import, division, print_function, unicode_literals) from builtins import (ascii, bytes, chr, dict, filter, hex, input, int, map, next, oct, open, pow, range, round, str, super, zip) # standard lib imports try: from collections.abc import Sequence except ImportError: # try older import location from collections import Sequence # third-party lib imports from past.builtins import basestring # local imports from .._ParamsBase import ParamsBase class ShareItemParams(ParamsBase): """Holds parameter values for the "share" content item request.""" @property def groups(self): return self._get_nullable_csv_list("groups") @groups.setter def groups(self, value): self._set_nullable_csv_list("groups", value, lambda v: v.strip()) @property def confirm_item_control(self): return self._props.get("confirmItemControl") @confirm_item_control.setter def confirm_item_control(self, value): self._set_nullable_bool("confirmItemControl", value)
{ "content_hash": "ff5daef603110a04282b0b3a60550520", "timestamp": "", "source": "github", "line_count": 35, "max_line_length": 117, "avg_line_length": 30.742857142857144, "alnum_prop": 0.6933085501858736, "repo_name": "DavidWhittingham/agsadmin", "id": "0773e04534f82f7ad0f061b4df768f771a685563", "size": "1076", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "agsadmin/sharing_admin/content/ShareItemParams.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "PowerShell", "bytes": "442" }, { "name": "Python", "bytes": "173794" } ], "symlink_target": "" }
""" Take an output file from the Cannon, and produce a scatter plot of coloured points, with label values on the two axes and the colour of the points representing the SNR needed to attain some required level of accuracy in a third label. """ import argparse import gzip import json import os import re import sys import numpy as np from fourgp_degrade import SNRConverter from lib.label_information import LabelInformation from lib.plot_settings import snr_defined_at_wavelength from lib.pyxplot_driver import PyxplotDriver # Read input parameters parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('--label', required=True, action="append", dest='labels', help="Labels we should plot on the two axes of the scatter plot.") parser.add_argument('--colour-by-label', required=True, dest='colour_by_label', help="Label we should use to colour code points by the SNR needed to achieve some " "nominal accuracy in that label.") parser.add_argument('--target-accuracy', required=True, dest='target_accuracy', type=float, help="The target accuracy in the label we are colour-coding.") parser.add_argument('--colour-range-min', required=True, dest='colour_range_min', type=float, help="The range of SNR/pixel values to use in colouring points.") parser.add_argument('--colour-range-max', required=True, dest='colour_range_max', type=float, help="The range of SNR/pixel values to use in colouring points.") parser.add_argument('--cannon-output', required=True, default="", dest='cannon', help="Filename of the JSON file containing the label values estimated by the Cannon, without " "the <.summary.json.gz> suffix.") parser.add_argument('--output', default="/tmp/scatter_plot_snr_required", dest='output_stub', help="Data file to write output to.") parser.add_argument('--accuracy-unit', default="apples", dest='accuracy_unit', help="Unit to put after target accuracy we're aiming to achieve in label") args = parser.parse_args() # Check that we have one label to plot on each axis label, and a title to show on each axis assert len(args.labels) == 2, "A scatter plot needs two labels to plot -- one on each axis." # Label information label_metadata = LabelInformation().label_metadata # Labels are supplied with ranges listed in {}. We extract the names to pass to label_tabulator. label_list = [] for item in args.labels + [args.colour_by_label]: test = re.match("(.*){(.*:.*)}", item) assert test is not None, "Label names should take the form <name{:2}>, with range to plot in {}." label_list.append({ "name": test.group(1), "latex": label_metadata[test.group(1)]["latex"], "range": test.group(2), "min": test.group(2).split(":")[0], "max": test.group(2).split(":")[1] }) # Read Cannon output if not os.path.exists(args.cannon + ".full.json.gz"): print("scatter_plot_snr_required.py could not proceed: Cannon run <{}> not found". \ format(args.cannon + ".full.json.gz")) sys.exit() cannon_output = json.loads(gzip.open(args.cannon + ".full.json.gz", "rt").read()) # Check that labels exist for label in label_list: if label["name"] not in cannon_output["labels"]: print("scatter_plot_snr_required.py could not proceed: Label <{}> not present in <{}>".format(label["name"], args.cannon)) sys.exit() # Create a sorted list of all the SNR values we've got snr_values = [item['spectrum_metadata']['SNR'] for item in cannon_output['spectra']] snr_values = sorted(set(snr_values)) # Create a sorted list of all the stars we've got star_names = [item['Starname'] for item in cannon_output['spectra']] star_names = sorted(set(star_names)) # Work out multiplication factor to convert SNR/pixel to SNR/A snr_converter = SNRConverter(raster=np.array(cannon_output['wavelength_raster']), snr_at_wavelength=snr_defined_at_wavelength) # Loop over stars, calculating offsets for the label we're colour-coding offsets = {} # offsets[star_name][SNR] = dictionary of label names and absolute offsets label_values = {} # label_values[star_name] = list of label values on x and y axes for star in cannon_output['spectra']: object_name = star['Starname'] if object_name not in star_names: continue if object_name not in offsets: offsets[object_name] = {} try: offsets[object_name][star['spectrum_metadata']['SNR']] = \ abs(star['cannon_output'][label_list[-1]['name']] - star['spectrum_metadata'][label_list[-1]['name']]) if object_name not in label_values: label_values[object_name] = [star['spectrum_metadata'][item['name']] for item in label_list] except KeyError: # If this star has missing data for one of the labels being measured, discard it star_names.remove(object_name) offsets.pop(object_name, None) # Delete if exists label_values.pop(object_name, None) # Loop over stars, calculating the SNR needed for each one output = [] for star_name in star_names: if star_name in offsets: snr_required_per_a = 1e6 previous_offset = 1e6 previous_snr = 0 for snr in snr_values: new_offset = offsets[star_name][snr] if (new_offset <= args.target_accuracy) and (previous_offset > args.target_accuracy): weight_a = abs(new_offset - args.target_accuracy) weight_b = abs(previous_offset - args.target_accuracy) snr_required_per_pixel = (previous_snr * weight_a + snr * weight_b) / (weight_a + weight_b) snr_required_per_a = snr_converter.per_pixel(snr_required_per_pixel).per_a() previous_offset = new_offset previous_snr = snr output.append(label_values[star_name][:-1] + [snr_required_per_a]) # Make sure that output directory exists os.system("mkdir -p {}".format(args.output_stub)) # Write values to data files # Each line is in the format <x y SNR/A> filename = "{}/snr_required.dat".format(args.output_stub) with open(filename, "w") as f: for line in output: f.write("%16s %16s %16s\n" % tuple(line)) # Create pyxplot script to produce this plot plotter = PyxplotDriver() plotter.make_plot(output_filename=filename, data_files=[filename], caption=re.sub("_", r"\_", cannon_output['description']), pyxplot_script=""" col_scale_z(z) = min(max( (z-({colour_range_min_per_pixel})) / \ (({colour_range_max_per_pixel})-({colour_range_min_per_pixel})) ,0),1) col_scale(z) = (z>{colour_range_max_per_pixel}) ? colors.black : hsb(0.75 * col_scale_z(z), 1, 1) set numerics errors quiet set fontsize 1.1 set nokey set multiplot set xlabel "Input {x_label}" set xrange [{x_range}] set ylabel "Input {y_label}" set yrange [{y_range}] set axis y left ; unset xtics ; unset mxtics plot "{data_filename}" title "SNR needed to achieve accuracy of {target_accuracy} {unit} in {colour_by_label}" \ with dots colour col_scale($3) ps 8 unset label set noxlabel set xrange [0:1] set noxtics ; set nomxtics set axis y y2 right set ylabel "SNR/\AA (at 6000\AA) needed to achieve accuracy of {target_accuracy} {unit} in {colour_by_label}" set yrange [{colour_range_min_per_pixel}:{colour_range_max_per_pixel}] set y2label "SNR/pixel (at 6000\AA)" set y2range [{colour_range_min_per_a}:{colour_range_max_per_a}] set c1range [{colour_range_min_per_pixel}:{colour_range_max_per_pixel}] norenormalise set width {colour_bar_width} set size ratio 1 / 0.05 set colormap col_scale(c1) set nocolkey set sample grid 2x200 set origin {colour_bar_x_pos}, 0 plot y with colourmap """.format(colour_range_min_per_pixel=args.colour_range_min, colour_range_max_per_pixel=args.colour_range_max, colour_range_min_per_a=snr_converter.per_pixel(args.colour_range_min).per_a(), colour_range_max_per_a=snr_converter.per_pixel(args.colour_range_max).per_a(), x_label=label_list[0]["latex"], x_range=label_list[0]["range"], y_label=label_list[1]["latex"], y_range=label_list[1]["range"], data_filename=filename, target_accuracy=args.target_accuracy, unit=args.accuracy_unit, colour_by_label=label_list[2]["latex"], colour_bar_width=plotter.width * plotter.aspect * 0.05, colour_bar_x_pos=plotter.width + 1 )) # Clean up plotter del plotter
{ "content_hash": "2ee0035eeb9238320bc5b763670b11dd", "timestamp": "", "source": "github", "line_count": 197, "max_line_length": 117, "avg_line_length": 45.85279187817259, "alnum_prop": 0.6321266467397321, "repo_name": "dcf21/4most-4gp-scripts", "id": "5429fb56608c9c26ae0c0309fef89957b2ad47c1", "size": "9548", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/scripts/visualisation/cannon_performance/scatter_plot_snr_required.py", "mode": "33261", "license": "mit", "language": [ { "name": "Python", "bytes": "660733" }, { "name": "Shell", "bytes": "276964" } ], "symlink_target": "" }
import re import unicodedata def load_yaml(filename): """ Reads any yaml file and returns it as object. """ import yaml stream = file(filename) return yaml.load(stream) debug=False def compare_rules(x,y): """ Compares parser rules and sees which has more tokens or conditions (prev, next) """ diff = len(y['tokens'])-len(x['tokens']) if diff != 0: return diff (x_conds, y_conds) = (0,0) for cond in ('prev','next'): if cond in x: x_conds +=len(cond) if cond in y: y_conds +=len(cond) for cond in ('prev_class', 'next_class'): if cond in x: x_conds += 1 if cond in y: y_conds += 1 return y_conds - x_conds # see if one has more <classes> def rules_from_yaml_data(rules_raw): """ Returns sorted and usable parser rules from yaml file. rules_raw is a dictionary (loaded from yaml): key: <previous token class> token token <next token class> value: production previous class and next class of token are optional. The production allows \N{unicode char name} strings. Output of return is list of rules containing: prev_class: previous token class [optional, will look behind prev] prev: previous tokens [optional] tokens: list of tokens next: next token class [optional] next_class: next token class [optional, will look ahead of next] production: string production Example: (<wb> a b c) d (e f <wb>): \N{LATIN SMALL LETTER D} """ import unicodedata def unescape_unicode_charnames(s): """ Takes \N{charname} in production rule and turns to Unicode string. """ def get_unicode_char(matchobj): """ Returns Unicode character of \N{character name} """ s = matchobj.group(0) m = re.match(r'\\N{(.+)}',s) char = unicodedata.lookup(m.group(1)) return char return re.sub(r'\\N{.+?}',get_unicode_char,s) # load and prepare rules rules = [] # clean list of rule # print "rules_r is"+str(rules_raw) for key in rules_raw: if debug: print "key is "+key+" = "+rules_raw[key] rule = {} #1 #2 #3 s ='(?:' s +='\(' s +='(?:\s?<(.+?)> )?' # group 1, prev class (in brackets indicating cluster) s +='(.+?)\s?' # group 2, prev tokens (to be split) s +='\) ' s +='|' # either a cluster or a particular previous class (Could add additional support, e.g. class or paretic. s +='<(.+?)> ' # group 3, prev class (not in cluster) s +=')?' s += '(.+?)' # group 4, tokens s += '(?:' # cluster for following tokens, clusters s += ' \(' s += '\s?(.+?)' # group 5, next tokens s += '(?: <(.+?)>)?' # group 6, next class s += '\s?\)' s += '|' s += ' <(.+?)>' # group 7, follo s += ')?$' m = re.match (s, key, re.S) assert (m is not None) if m.group(1): rule['prev_class'] = m.group(1) if m.group(2): rule['prev_tokens'] = m.group(2).split(' ') if m.group(3): rule['prev_class'] = m.group(3) if m.group(4)==' ': rule['tokens'] = [' '] #ugh--missed this one. else: rule['tokens'] = m.group(4).split(' ') if m.group(5): rule['next_tokens'] = m.group(5).split(' ') if m.group(6): rule['next_class'] = m.group(6) if m.group(7): rule['next_class'] = m.group(7) rule['production'] = unescape_unicode_charnames(rules_raw[key]) if debug:print rule if debug:print '----' rules.append(rule) return rules def unescape_unicode_charnames(s): """ Takes \N{charname} in production rule and turns to Unicode string. """ def get_unicode_char(matchobj): # """ # Returns Unicode character of \N{character name} # """ # if debug: print "Trying "+matchobj+"in get_unicode_char" s = matchobj.group(0) m = re.match(r'\\N{(.+)}',s) char = unicodedata.lookup(m.group(1)) return char return re.sub(r'\\N{.+?}',get_unicode_char,s) def onmatch_rules_from_yaml_data(rules_raw): ''' Some quick code to generate the onmatch rules. It only relies on classes ''' ''' returns a tuple ( (left_classes,right_classes) , prod) rules are classes ''' onmatch_rules = [] # clean list of rule # print "rules_r is"+str(rules_raw) debug=True match_rules = [] for key in rules_raw: assert len(key)==1 rule = key.keys()[0] prod_orig = key[rule] prod = unescape_unicode_charnames(prod_orig) # print rule m= re.match('([^+]+)\+([^+]+)$',rule) assert m l = m.group(1) #left r =m.group(2) #right cl_l=re.findall('(?<=<)[^<]+(?=>)',l) cl_r=re.findall('(?<=<)[^<]+(?=>)',r) onmatch_rules.append(( (cl_l, cl_r) , prod )) return(onmatch_rules) debug=False class Parser: error_on_last = False last_string = '' error_string = '' def __init__(self, yaml_file='', data=None): if yaml_file != '': data = load_yaml(yaml_file) else: assert data is not None self.rules = rules_from_yaml_data(data['rules']) # specifically YAML here rules = self.rules rules.sort(cmp=compare_rules) self.tokens = data['tokens'] self.token_match_re = self.generate_token_match_re() if 'onmatch' in data: self.onmatch_rules = onmatch_rules_from_yaml_data(data['onmatch']) else: self.onmatch_rules = None def generate_token_match_string(self): tokens = self.tokens.keys() sorted_tokens = sorted(tokens, key=len, reverse=True) escaped_tokens = map(re.escape, sorted_tokens) tokens_re_string = '|'.join(escaped_tokens)+'|.' # grab unknowns return tokens_re_string def generate_token_match_re(self): ''' Create regular expression from Parser.tokens sorted by length Adds final "." in case nothing found ''' tokens = self.tokens.keys() sorted_tokens = sorted(tokens, key=len, reverse=True) escaped_tokens = map(re.escape, sorted_tokens) tokens_re_string = '|'.join(escaped_tokens)+'|.' # grab unknowns return re.compile(tokens_re_string, re.S) def tokenize(self,input): return self.token_match_re.findall(input) def parse(self,input,on_error='',on_error_additional='',return_all_matches=False, debug=False): #reset error-catching variables self.last_string = input self.error_string = '' self.error_on_last = False self.parse_details = [] output = '' tkns = self.token_match_re.findall(input) t_i = 0 # t_i counter for token position in list while t_i<len(tkns): # while in range of tokens in string matched = False for rule_id,rule in enumerate(self.rules): try: r_tkns = [] if ('prev_tokens' in rule): i_start = t_i-len(rule['prev_tokens']) r_tkns += rule['prev_tokens'] #'print problem in '+str(rule) else: i_start = t_i r_tkns +=rule['tokens'] if 'next_tokens' in rule: r_tkns += rule['next_tokens'] if all(r_tkns[i] == tkns[i_start+i] for i in range(len(r_tkns)) ): if 'prev_class' in rule: # if rule has a prev class if i_start==0: # if at start of string, allow for word break prev_token = ' ' else: prev_token = tkns[i_start-1] if not(rule['prev_class'] in self.tokens[prev_token]): continue if 'next_class' in rule: if i_start+len(r_tkns)==len(tkns): # if end of string next_token = ' ' else: next_token = tkns[i_start+len(r_tkns)] if not next_token in self.tokens: next_token = ' ' # in case it's missing (SHOULD THIS BE ADDED ABOVE?) if not(rule['next_class'] in self.tokens[next_token]): continue # We did it! if debug==True: print "matched "+str(rule) matched = True output += rule['production'] self.parse_details.append({'tokens':rule['tokens'], 'start':t_i, 'rule':rule, 'rule_id':rule_id}) t_i += len(rule['tokens']) break except IndexError: continue if matched==False: import unicodedata curr_error = 'no match at token # '+str(t_i)+': '+tkns[t_i]+" " try: for c in tkns[t_i]: curr_error += unicodedata.name(unichr(ord(c)))+" " except TypeError: curr_error += "TYPE ERROR HERE!!!!" curr_error += ' [' + on_error_additional + ']' if on_error=='print': print curr_error self.error_on_last = True self.error_string +=curr_error+"\n" self.parse_details.append({'tokens':tkns[t_i], 'start':t_i, 'rule':None}) # save error prev_token=' ' # reset t_i += 1 return output def match_all_at(self,tkns,t_i): """ Finds all matches at a particular token_index (t_i) Returns {rule_id: id for rule, start: index of match (from token array), tokens: tokens matched,? production: production ? } """ matches = '' output = [] for rule_id,rule in enumerate(self.rules): try: r_tkns = [] # array of all tokens to match (including rule's [prev_tokens]&[next_tokens] if ('prev_tokens' in rule): i_start = t_i-len(rule['prev_tokens']) r_tkns += rule['prev_tokens'] else: i_start = t_i r_tkns +=rule['tokens'] if 'next_tokens' in rule: r_tkns += rule['next_tokens'] if all(r_tkns[i] == tkns[i_start+i] for i in range(len(r_tkns)) ): if 'prev_class' in rule: # if rule has a prev class if i_start==0: # if at start of string, allow for word break prev_token = 'b' else: prev_token = tkns[i_start-1] if not(rule['prev_class'] in self.tokens[prev_token]): continue if 'next_class' in rule: if i_start+len(r_tkns)==len(tkns): # if end of string next_token = 'b' else: next_token = tkns[i_start+len(r_tkns)] if not next_token in self.tokens: next_token = 'b' # in case it's missing (SHOULD THIS BE ADDED ABOVE?) if not(rule['next_class'] in self.tokens[next_token]): continue # We did it! matched = True output.append( {'rule_id':rule_id, 'tokens':rule['tokens'], 'start': t_i, 'rule':rule} ) except IndexError: continue return output if __name__ == '__main__': import pdb p = Parser('settings/urdu-meter.yaml') import pprint pprint.pprint(p.rules) print p.tokenize(' dal-daaz')
{ "content_hash": "5af1829c0667b9d14a561a00b1b07dc3", "timestamp": "", "source": "github", "line_count": 327, "max_line_length": 121, "avg_line_length": 38.26299694189603, "alnum_prop": 0.4824968030690537, "repo_name": "seanpue/graphparser", "id": "9b4a30e787042467e6d5f0efbca0afb0d5a4ed58", "size": "12512", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "oldparser.py", "mode": "33188", "license": "mit", "language": [ { "name": "Jupyter Notebook", "bytes": "4825" }, { "name": "Python", "bytes": "39594" } ], "symlink_target": "" }
import boto3.session from tests import unittest class TestSession(unittest.TestCase): def setUp(self): self.session = boto3.session.Session(region_name='us-west-2') def test_events_attribute(self): # Create some function to register. def my_handler(my_list, **kwargs): return my_list.append('my_handler called') # Register the handler to the event. self.session.events.register('myevent', my_handler) initial_list = [] # Emit the event. self.session.events.emit('myevent', my_list=initial_list) # Ensure that the registered handler was called. assert initial_list == ['my_handler called'] def test_can_access_region_property(self): session = boto3.session.Session(region_name='us-west-1') assert session.region_name == 'us-west-1' def test_get_available_partitions(self): partitions = self.session.get_available_partitions() assert isinstance(partitions, list) assert partitions def test_get_available_regions(self): regions = self.session.get_available_regions('s3') assert isinstance(regions, list) assert regions
{ "content_hash": "4d52170b79b36a9051bc3ad2273a012e", "timestamp": "", "source": "github", "line_count": 35, "max_line_length": 69, "avg_line_length": 34.285714285714285, "alnum_prop": 0.6575, "repo_name": "boto/boto3", "id": "65c28b174305b4d3b4039da7e3f2e57252d70c58", "size": "1762", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "tests/functional/test_session.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "674057" } ], "symlink_target": "" }
from __future__ import unicode_literals, absolute_import import os import os.path from django.utils import six from whoosh import filedb from whoosh.qparser import MultifieldParser from whoosh.writing import AsyncWriter from .. import base from .. import utils from .. import exceptions from .. import connections from . import fields from . import result class Whoosh(base.SearchBackend): vendor = "whoosh" def __init__(self, storage='whoosh.filedb.filestore.FileStorage', *args, **kwargs): self.manager = connections.ConnectionManager() storage_cls = utils.load_class(storage) self._storage = storage_cls(*args, **kwargs) self._storage.create() def exists_index(self, index): index = base._resolve_index(index) return self._storage.index_exists(index.get_name()) def create_index(self, index, overwrite=False): index = base._resolve_index(index) if not overwrite and self.exists_index(index): raise exceptions.IndexAlreadyExists( "index {0} already exists".format(index.get_name())) return self._storage.create_index(indexname=index.get_name(), schema=index.get_schema()) def delete_index(self, index): # use create_index with overwrite=True for clear index. # whoos does not has support for delete index. self.create_index(index, overwrite=True) def delete_all_indexes(self): self._storage.clean() def update(self, index, document, **options): index = base._resolve_index(index) ix = self._storage.open_index(indexname=index.get_name()) writer = AsyncWriter(ix) adapted_document = index.adapt_document(document) writer.update_document(**adapted_document) writer.commit() def update_bulk(self, index, documents): index = base._resolve_index(index) ix = self._storage.open_index(indexname=index.get_name()) writer = AsyncWriter(ix) adapted_documents = (index.adapt_document(doc) for doc in documents) for doc in adapted_documents: writer.update_document(**doc) writer.commit() def search(self, query_string, index, parser=None, **kwargs): index = base._resolve_index(index) if parser is None: parser = MultifieldParser(fieldnames=index.get_searchable_fieldnames(), schema=index.get_schema()) query = parser.parse(query_string) return self._search(query, index, **kwargs) def _search(self, query, index, method="search", **kwargs): ix = self._storage.open_index(indexname=index.get_name()) with ix.searcher() as searcher: raw_result = getattr(searcher, method)(query, **kwargs) return result.SearchResult(raw_result)
{ "content_hash": "6e41a4a3b331143c2e16758a0a1251ff", "timestamp": "", "source": "github", "line_count": 89, "max_line_length": 83, "avg_line_length": 33.01123595505618, "alnum_prop": 0.6310415248468346, "repo_name": "niwinz/needlestack", "id": "d25e8c2f08f11ee0a855f55706d35a6a174c601d", "size": "2963", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "needlestack/whoosh/base.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "47011" } ], "symlink_target": "" }
"""Create Common Workflow Language (CWL) runnable files and tools from a world object. """ import copy import json import operator import os import toolz as tz import yaml from bcbio import utils def from_world(world, run_info_file): base = utils.splitext_plus(os.path.basename(run_info_file))[0] out_dir = utils.safe_makedir("%s-workflow" % (base)) out_file = os.path.join(out_dir, "%s-main.cwl" % (base)) samples = [xs[0] for xs in world] # unpack world data objects analyses = list(set([x["analysis"] for x in samples])) assert len(analyses) == 1, "Currently support writing CWL for a single analysis type" if analyses[0].startswith("variant"): prep_variant_cwl(samples, out_dir, out_file) else: raise NotImplementedError("Unsupported CWL analysis type: %s" % analyses[0]) def _standard_bcbio_cwl(samples, inputs): """Retrieve CWL inputs shared amongst different workflows. ToDo: calculate system requirements (cores/memory) from input configuration. """ return {"class": "Workflow", "hints": [{"class": "DockerRequirement", "dockerImport": "bcbio/bcbio", "dockerImageId": "bcbio/bcbio"}], "requirements": [{"class": "EnvVarRequirement", "envDef": [{"envName": "MPLCONFIGDIR", "envValue": "."}]}, {"class": "ScatterFeatureRequirement"}], "inputs": inputs, "outputs": [], "steps": []} def _write_tool(step_dir, name, inputs, outputs): out_file = os.path.join(step_dir, "%s.cwl" % name) out = {"class": "CommandLineTool", "baseCommand": ["bcbio_nextgen.py", "runfn", name, "cwl"], "inputs": [], "outputs": []} for i, inp in enumerate(inputs): assert inp["type"]["type"] == "array", inp inp_tool = copy.deepcopy(inp) inp_tool["inputBinding"] = {"prefix": "%s=" % inp["id"].replace("#", ""), "separate": False, "itemSeparator": ";;", "position": i} inp_tool["type"] = inp["type"]["items"] out["inputs"].append(inp_tool) # XXX Need to generalize outputs, just a hack for now to test align_prep for outp in outputs: outp_tool = copy.deepcopy(outp) outp_tool["type"] = {"type": "array", "items": "File"} outp_tool["default"] = None outp_tool["outputBinding"] = {"glob": "align_prep/*.gz", "secondaryFiles": [".gbi"]} out["outputs"].append(outp_tool) with open(out_file, "w") as out_handle: yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False) return os.path.join("steps", os.path.basename(out_file)) def _step_template(name, step_dir, inputs, outputs, source=""): """Templating function for writing a step to avoid repeating namespaces. """ outputs = [x for x in inputs if x["id"].endswith(tuple(outputs))] step_file = _write_tool(step_dir, name, inputs, outputs) inputs = [{"id": "#%s.%s" % (name, inp["id"].replace("#", "")), "source": "#" + ("%s." % source if source else "") + inp["id"].replace("#", "")} for inp in inputs] return {"run": {"import": step_file}, "id": "#%s" % name, "scatterMethod": "dotproduct", "scatter": [x["id"] for x in inputs], "inputs": inputs, "outputs": [{"id": "#%s.%s" % (name, output["id"].replace("#", ""))} for output in outputs]} def prep_variant_cwl(samples, out_dir, out_file): """Output a CWL decription for running a variant calling workflow. """ step_dir = utils.safe_makedir(os.path.join(out_dir, "steps")) sample_json, variables = _flatten_samples(samples, out_file) out = _standard_bcbio_cwl(samples, variables) s1 = _step_template("prep_align_inputs", step_dir, variables, ["files"]) out["steps"] = [s1] with open(out_file, "w") as out_handle: yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False) return out_file, sample_json def _flatten_samples(samples, base_file): """Create a flattened JSON representation of data from the bcbio world map. """ out_file = "%s-samples.json" % utils.splitext_plus(base_file)[0] flat_data = [] for data in samples: cur_flat = {} for key_path in [["description"], ["rgnames"], ["config", "algorithm"], ["metadata"], ["genome_build"], ["files"], ["reference"], ["genome_resources"], ["vrn_file"]]: cur_key = "__".join(key_path) for flat_key, flat_val in _to_cwldata(cur_key, tz.get_in(key_path, data)): cur_flat[flat_key] = flat_val flat_data.append(cur_flat) out = {} for key in sorted(list(set(reduce(operator.add, [d.keys() for d in flat_data])))): out[key] = [] for cur_flat in flat_data: out[key].append(cur_flat.get(key)) with open(out_file, "w") as out_handle: json.dump(out, out_handle, sort_keys=True, indent=4, separators=(',', ': ')) return out_file, _samplejson_to_inputs(out) def _add_avro_type(inp, val): inp["type"] = _get_avro_type(val) return inp def _get_avro_type(val): """Infer avro type for the current input. """ if isinstance(val, dict): assert val.get("class") == "File" return "File" elif isinstance(val, (tuple, list)): types = [] for ctype in [_get_avro_type(v) for v in val]: if isinstance(ctype, dict): nested_types = [x["items"] for x in types if isinstance(x, dict)] if ctype["items"] not in nested_types: types.append(ctype) elif ctype not in types: types.append(ctype) return {"type": "array", "items": (types[0] if len(types) == 1 else types)} elif val is None: return "null" else: return "string" def _samplejson_to_inputs(svals): """Convert sample output into inputs for CWL configuration files, with types. """ out = [] for key, val in svals.items(): out.append(_add_avro_type({"id": "#%s" % key}, val)) return out def _to_cwldata(key, val): """Convert nested dictionary into CWL data, flatening and marking up files. Moves file objects to the top level, enabling insertion in CWL inputs/outputs. """ out = [] if isinstance(val, dict): remain_val = {} for nkey, nval in val.items(): cur_nkey = "%s__%s" % (key, nkey) cwl_nval = _item_to_cwldata(nval) if isinstance(cwl_nval, dict): out.extend(_to_cwldata(cur_nkey, nval)) elif cwl_nval == nval: remain_val[nkey] = nval else: out.append((cur_nkey, cwl_nval)) if remain_val: out.append((key, json.dumps(remain_val, separators=(',', ':')))) else: out.append((key, _item_to_cwldata(val))) return out def _item_to_cwldata(x): """"Markup an item with CWL specific metadata. """ if isinstance(x, (list, tuple)): return [_item_to_cwldata(subx) for subx in x] elif x and isinstance(x, basestring) and (os.path.isfile(x) or os.path.isdir(x)) and os.path.exists(x): return {"class": "File", "path": x} else: return x
{ "content_hash": "e1f209dfad0d867c7f7683e04f487e26", "timestamp": "", "source": "github", "line_count": 179, "max_line_length": 111, "avg_line_length": 41.564245810055866, "alnum_prop": 0.5701612903225807, "repo_name": "lpantano/bcbio-nextgen", "id": "f457423a3c35adf6b91a31f8375459381a6089d4", "size": "7440", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "bcbio/cwl/create.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "1553199" }, { "name": "Ruby", "bytes": "624" }, { "name": "Shell", "bytes": "14377" } ], "symlink_target": "" }
# # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """``tornado.web`` provides a simple web framework with asynchronous features that allow it to scale to large numbers of open connections, making it ideal for `long polling <http://en.wikipedia.org/wiki/Push_technology#Long_polling>`_. Here is a simple "Hello, world" example app: .. testcode:: import tornado.ioloop import tornado.web class MainHandler(tornado.web.RequestHandler): def get(self): self.write("Hello, world") if __name__ == "__main__": application = tornado.web.Application([ (r"/", MainHandler), ]) application.listen(8888) tornado.ioloop.IOLoop.current().start() .. testoutput:: :hide: See the :doc:`guide` for additional information. Thread-safety notes ------------------- In general, methods on `RequestHandler` and elsewhere in Tornado are not thread-safe. In particular, methods such as `~RequestHandler.write()`, `~RequestHandler.finish()`, and `~RequestHandler.flush()` must only be called from the main thread. If you use multiple threads it is important to use `.IOLoop.add_callback` to transfer control back to the main thread before finishing the request, or to limit your use of other threads to `.IOLoop.run_in_executor` and ensure that your callbacks running in the executor do not refer to Tornado objects. """ from __future__ import absolute_import, division, print_function import base64 import binascii import datetime import email.utils import functools import gzip import hashlib import hmac import mimetypes import numbers import os.path import re import stat import sys import threading import time import tornado import traceback import types import warnings from inspect import isclass from io import BytesIO from tornado.concurrent import Future, future_set_result_unless_cancelled from tornado import escape from tornado import gen from tornado import httputil from tornado import iostream from tornado import locale from tornado.log import access_log, app_log, gen_log from tornado import stack_context from tornado import template from tornado.escape import utf8, _unicode from tornado.routing import (AnyMatches, DefaultHostMatches, HostMatches, ReversibleRouter, Rule, ReversibleRuleRouter, URLSpec) from tornado.util import (ObjectDict, raise_exc_info, unicode_type, _websocket_mask, PY3) url = URLSpec if PY3: import http.cookies as Cookie import urllib.parse as urlparse from urllib.parse import urlencode else: import Cookie import urlparse from urllib import urlencode try: import typing # noqa # The following types are accepted by RequestHandler.set_header # and related methods. _HeaderTypes = typing.Union[bytes, unicode_type, numbers.Integral, datetime.datetime] except ImportError: pass MIN_SUPPORTED_SIGNED_VALUE_VERSION = 1 """The oldest signed value version supported by this version of Tornado. Signed values older than this version cannot be decoded. .. versionadded:: 3.2.1 """ MAX_SUPPORTED_SIGNED_VALUE_VERSION = 2 """The newest signed value version supported by this version of Tornado. Signed values newer than this version cannot be decoded. .. versionadded:: 3.2.1 """ DEFAULT_SIGNED_VALUE_VERSION = 2 """The signed value version produced by `.RequestHandler.create_signed_value`. May be overridden by passing a ``version`` keyword argument. .. versionadded:: 3.2.1 """ DEFAULT_SIGNED_VALUE_MIN_VERSION = 1 """The oldest signed value accepted by `.RequestHandler.get_secure_cookie`. May be overridden by passing a ``min_version`` keyword argument. .. versionadded:: 3.2.1 """ class RequestHandler(object): """Base class for HTTP request handlers. Subclasses must define at least one of the methods defined in the "Entry points" section below. """ SUPPORTED_METHODS = ("GET", "HEAD", "POST", "DELETE", "PATCH", "PUT", "OPTIONS") _template_loaders = {} # type: typing.Dict[str, template.BaseLoader] _template_loader_lock = threading.Lock() _remove_control_chars_regex = re.compile(r"[\x00-\x08\x0e-\x1f]") def __init__(self, application, request, **kwargs): super(RequestHandler, self).__init__() self.application = application self.request = request self._headers_written = False self._finished = False self._auto_finish = True self._transforms = None # will be set in _execute self._prepared_future = None self._headers = None # type: httputil.HTTPHeaders self.path_args = None self.path_kwargs = None self.ui = ObjectDict((n, self._ui_method(m)) for n, m in application.ui_methods.items()) # UIModules are available as both `modules` and `_tt_modules` in the # template namespace. Historically only `modules` was available # but could be clobbered by user additions to the namespace. # The template {% module %} directive looks in `_tt_modules` to avoid # possible conflicts. self.ui["_tt_modules"] = _UIModuleNamespace(self, application.ui_modules) self.ui["modules"] = self.ui["_tt_modules"] self.clear() self.request.connection.set_close_callback(self.on_connection_close) self.initialize(**kwargs) def initialize(self): """Hook for subclass initialization. Called for each request. A dictionary passed as the third argument of a url spec will be supplied as keyword arguments to initialize(). Example:: class ProfileHandler(RequestHandler): def initialize(self, database): self.database = database def get(self, username): ... app = Application([ (r'/user/(.*)', ProfileHandler, dict(database=database)), ]) """ pass @property def settings(self): """An alias for `self.application.settings <Application.settings>`.""" return self.application.settings def head(self, *args, **kwargs): raise HTTPError(405) def get(self, *args, **kwargs): raise HTTPError(405) def post(self, *args, **kwargs): raise HTTPError(405) def delete(self, *args, **kwargs): raise HTTPError(405) def patch(self, *args, **kwargs): raise HTTPError(405) def put(self, *args, **kwargs): raise HTTPError(405) def options(self, *args, **kwargs): raise HTTPError(405) def prepare(self): """Called at the beginning of a request before `get`/`post`/etc. Override this method to perform common initialization regardless of the request method. Asynchronous support: Decorate this method with `.gen.coroutine` or use ``async def`` to make it asynchronous (the `asynchronous` decorator cannot be used on `prepare`). If this method returns a `.Future` execution will not proceed until the `.Future` is done. .. versionadded:: 3.1 Asynchronous support. """ pass def on_finish(self): """Called after the end of a request. Override this method to perform cleanup, logging, etc. This method is a counterpart to `prepare`. ``on_finish`` may not produce any output, as it is called after the response has been sent to the client. """ pass def on_connection_close(self): """Called in async handlers if the client closed the connection. Override this to clean up resources associated with long-lived connections. Note that this method is called only if the connection was closed during asynchronous processing; if you need to do cleanup after every request override `on_finish` instead. Proxies may keep a connection open for a time (perhaps indefinitely) after the client has gone away, so this method may not be called promptly after the end user closes their connection. """ if _has_stream_request_body(self.__class__): if not self.request.body.done(): self.request.body.set_exception(iostream.StreamClosedError()) self.request.body.exception() def clear(self): """Resets all headers and content for this response.""" self._headers = httputil.HTTPHeaders({ "Server": "TornadoServer/%s" % tornado.version, "Content-Type": "text/html; charset=UTF-8", "Date": httputil.format_timestamp(time.time()), }) self.set_default_headers() self._write_buffer = [] self._status_code = 200 self._reason = httputil.responses[200] def set_default_headers(self): """Override this to set HTTP headers at the beginning of the request. For example, this is the place to set a custom ``Server`` header. Note that setting such headers in the normal flow of request processing may not do what you want, since headers may be reset during error handling. """ pass def set_status(self, status_code, reason=None): """Sets the status code for our response. :arg int status_code: Response status code. :arg str reason: Human-readable reason phrase describing the status code. If ``None``, it will be filled in from `http.client.responses` or "Unknown". .. versionchanged:: 5.0 No longer validates that the response code is in `http.client.responses`. """ self._status_code = status_code if reason is not None: self._reason = escape.native_str(reason) else: self._reason = httputil.responses.get(status_code, "Unknown") def get_status(self): """Returns the status code for our response.""" return self._status_code def set_header(self, name, value): # type: (str, _HeaderTypes) -> None """Sets the given response header name and value. If a datetime is given, we automatically format it according to the HTTP specification. If the value is not a string, we convert it to a string. All header values are then encoded as UTF-8. """ self._headers[name] = self._convert_header_value(value) def add_header(self, name, value): # type: (str, _HeaderTypes) -> None """Adds the given response header and value. Unlike `set_header`, `add_header` may be called multiple times to return multiple values for the same header. """ self._headers.add(name, self._convert_header_value(value)) def clear_header(self, name): """Clears an outgoing header, undoing a previous `set_header` call. Note that this method does not apply to multi-valued headers set by `add_header`. """ if name in self._headers: del self._headers[name] _INVALID_HEADER_CHAR_RE = re.compile(r"[\x00-\x1f]") def _convert_header_value(self, value): # type: (_HeaderTypes) -> str # Convert the input value to a str. This type check is a bit # subtle: The bytes case only executes on python 3, and the # unicode case only executes on python 2, because the other # cases are covered by the first match for str. if isinstance(value, str): retval = value elif isinstance(value, bytes): # py3 # Non-ascii characters in headers are not well supported, # but if you pass bytes, use latin1 so they pass through as-is. retval = value.decode('latin1') elif isinstance(value, unicode_type): # py2 # TODO: This is inconsistent with the use of latin1 above, # but it's been that way for a long time. Should it change? retval = escape.utf8(value) elif isinstance(value, numbers.Integral): # return immediately since we know the converted value will be safe return str(value) elif isinstance(value, datetime.datetime): return httputil.format_timestamp(value) else: raise TypeError("Unsupported header value %r" % value) # If \n is allowed into the header, it is possible to inject # additional headers or split the request. if RequestHandler._INVALID_HEADER_CHAR_RE.search(retval): raise ValueError("Unsafe header value %r", retval) return retval _ARG_DEFAULT = object() def get_argument(self, name, default=_ARG_DEFAULT, strip=True): """Returns the value of the argument with the given name. If default is not provided, the argument is considered to be required, and we raise a `MissingArgumentError` if it is missing. If the argument appears in the url more than once, we return the last value. The returned value is always unicode. """ return self._get_argument(name, default, self.request.arguments, strip) def get_arguments(self, name, strip=True): """Returns a list of the arguments with the given name. If the argument is not present, returns an empty list. The returned values are always unicode. """ # Make sure `get_arguments` isn't accidentally being called with a # positional argument that's assumed to be a default (like in # `get_argument`.) assert isinstance(strip, bool) return self._get_arguments(name, self.request.arguments, strip) def get_body_argument(self, name, default=_ARG_DEFAULT, strip=True): """Returns the value of the argument with the given name from the request body. If default is not provided, the argument is considered to be required, and we raise a `MissingArgumentError` if it is missing. If the argument appears in the url more than once, we return the last value. The returned value is always unicode. .. versionadded:: 3.2 """ return self._get_argument(name, default, self.request.body_arguments, strip) def get_body_arguments(self, name, strip=True): """Returns a list of the body arguments with the given name. If the argument is not present, returns an empty list. The returned values are always unicode. .. versionadded:: 3.2 """ return self._get_arguments(name, self.request.body_arguments, strip) def get_query_argument(self, name, default=_ARG_DEFAULT, strip=True): """Returns the value of the argument with the given name from the request query string. If default is not provided, the argument is considered to be required, and we raise a `MissingArgumentError` if it is missing. If the argument appears in the url more than once, we return the last value. The returned value is always unicode. .. versionadded:: 3.2 """ return self._get_argument(name, default, self.request.query_arguments, strip) def get_query_arguments(self, name, strip=True): """Returns a list of the query arguments with the given name. If the argument is not present, returns an empty list. The returned values are always unicode. .. versionadded:: 3.2 """ return self._get_arguments(name, self.request.query_arguments, strip) def _get_argument(self, name, default, source, strip=True): args = self._get_arguments(name, source, strip=strip) if not args: if default is self._ARG_DEFAULT: raise MissingArgumentError(name) return default return args[-1] def _get_arguments(self, name, source, strip=True): values = [] for v in source.get(name, []): v = self.decode_argument(v, name=name) if isinstance(v, unicode_type): # Get rid of any weird control chars (unless decoding gave # us bytes, in which case leave it alone) v = RequestHandler._remove_control_chars_regex.sub(" ", v) if strip: v = v.strip() values.append(v) return values def decode_argument(self, value, name=None): """Decodes an argument from the request. The argument has been percent-decoded and is now a byte string. By default, this method decodes the argument as utf-8 and returns a unicode string, but this may be overridden in subclasses. This method is used as a filter for both `get_argument()` and for values extracted from the url and passed to `get()`/`post()`/etc. The name of the argument is provided if known, but may be None (e.g. for unnamed groups in the url regex). """ try: return _unicode(value) except UnicodeDecodeError: raise HTTPError(400, "Invalid unicode in %s: %r" % (name or "url", value[:40])) @property def cookies(self): """An alias for `self.request.cookies <.httputil.HTTPServerRequest.cookies>`.""" return self.request.cookies def get_cookie(self, name, default=None): """Returns the value of the request cookie with the given name. If the named cookie is not present, returns ``default``. This method only returns cookies that were present in the request. It does not see the outgoing cookies set by `set_cookie` in this handler. """ if self.request.cookies is not None and name in self.request.cookies: return self.request.cookies[name].value return default def set_cookie(self, name, value, domain=None, expires=None, path="/", expires_days=None, **kwargs): """Sets an outgoing cookie name/value with the given options. Newly-set cookies are not immediately visible via `get_cookie`; they are not present until the next request. expires may be a numeric timestamp as returned by `time.time`, a time tuple as returned by `time.gmtime`, or a `datetime.datetime` object. Additional keyword arguments are set on the cookies.Morsel directly. See https://docs.python.org/3/library/http.cookies.html#http.cookies.Morsel for available attributes. """ # The cookie library only accepts type str, in both python 2 and 3 name = escape.native_str(name) value = escape.native_str(value) if re.search(r"[\x00-\x20]", name + value): # Don't let us accidentally inject bad stuff raise ValueError("Invalid cookie %r: %r" % (name, value)) if not hasattr(self, "_new_cookie"): self._new_cookie = Cookie.SimpleCookie() if name in self._new_cookie: del self._new_cookie[name] self._new_cookie[name] = value morsel = self._new_cookie[name] if domain: morsel["domain"] = domain if expires_days is not None and not expires: expires = datetime.datetime.utcnow() + datetime.timedelta( days=expires_days) if expires: morsel["expires"] = httputil.format_timestamp(expires) if path: morsel["path"] = path for k, v in kwargs.items(): if k == 'max_age': k = 'max-age' # skip falsy values for httponly and secure flags because # SimpleCookie sets them regardless if k in ['httponly', 'secure'] and not v: continue morsel[k] = v def clear_cookie(self, name, path="/", domain=None): """Deletes the cookie with the given name. Due to limitations of the cookie protocol, you must pass the same path and domain to clear a cookie as were used when that cookie was set (but there is no way to find out on the server side which values were used for a given cookie). Similar to `set_cookie`, the effect of this method will not be seen until the following request. """ expires = datetime.datetime.utcnow() - datetime.timedelta(days=365) self.set_cookie(name, value="", path=path, expires=expires, domain=domain) def clear_all_cookies(self, path="/", domain=None): """Deletes all the cookies the user sent with this request. See `clear_cookie` for more information on the path and domain parameters. Similar to `set_cookie`, the effect of this method will not be seen until the following request. .. versionchanged:: 3.2 Added the ``path`` and ``domain`` parameters. """ for name in self.request.cookies: self.clear_cookie(name, path=path, domain=domain) def set_secure_cookie(self, name, value, expires_days=30, version=None, **kwargs): """Signs and timestamps a cookie so it cannot be forged. You must specify the ``cookie_secret`` setting in your Application to use this method. It should be a long, random sequence of bytes to be used as the HMAC secret for the signature. To read a cookie set with this method, use `get_secure_cookie()`. Note that the ``expires_days`` parameter sets the lifetime of the cookie in the browser, but is independent of the ``max_age_days`` parameter to `get_secure_cookie`. Secure cookies may contain arbitrary byte values, not just unicode strings (unlike regular cookies) Similar to `set_cookie`, the effect of this method will not be seen until the following request. .. versionchanged:: 3.2.1 Added the ``version`` argument. Introduced cookie version 2 and made it the default. """ self.set_cookie(name, self.create_signed_value(name, value, version=version), expires_days=expires_days, **kwargs) def create_signed_value(self, name, value, version=None): """Signs and timestamps a string so it cannot be forged. Normally used via set_secure_cookie, but provided as a separate method for non-cookie uses. To decode a value not stored as a cookie use the optional value argument to get_secure_cookie. .. versionchanged:: 3.2.1 Added the ``version`` argument. Introduced cookie version 2 and made it the default. """ self.require_setting("cookie_secret", "secure cookies") secret = self.application.settings["cookie_secret"] key_version = None if isinstance(secret, dict): if self.application.settings.get("key_version") is None: raise Exception("key_version setting must be used for secret_key dicts") key_version = self.application.settings["key_version"] return create_signed_value(secret, name, value, version=version, key_version=key_version) def get_secure_cookie(self, name, value=None, max_age_days=31, min_version=None): """Returns the given signed cookie if it validates, or None. The decoded cookie value is returned as a byte string (unlike `get_cookie`). Similar to `get_cookie`, this method only returns cookies that were present in the request. It does not see outgoing cookies set by `set_secure_cookie` in this handler. .. versionchanged:: 3.2.1 Added the ``min_version`` argument. Introduced cookie version 2; both versions 1 and 2 are accepted by default. """ self.require_setting("cookie_secret", "secure cookies") if value is None: value = self.get_cookie(name) return decode_signed_value(self.application.settings["cookie_secret"], name, value, max_age_days=max_age_days, min_version=min_version) def get_secure_cookie_key_version(self, name, value=None): """Returns the signing key version of the secure cookie. The version is returned as int. """ self.require_setting("cookie_secret", "secure cookies") if value is None: value = self.get_cookie(name) return get_signature_key_version(value) def redirect(self, url, permanent=False, status=None): """Sends a redirect to the given (optionally relative) URL. If the ``status`` argument is specified, that value is used as the HTTP status code; otherwise either 301 (permanent) or 302 (temporary) is chosen based on the ``permanent`` argument. The default is 302 (temporary). """ if self._headers_written: raise Exception("Cannot redirect after headers have been written") if status is None: status = 301 if permanent else 302 else: assert isinstance(status, int) and 300 <= status <= 399 self.set_status(status) self.set_header("Location", utf8(url)) self.finish() def write(self, chunk): """Writes the given chunk to the output buffer. To write the output to the network, use the flush() method below. If the given chunk is a dictionary, we write it as JSON and set the Content-Type of the response to be ``application/json``. (if you want to send JSON as a different ``Content-Type``, call set_header *after* calling write()). Note that lists are not converted to JSON because of a potential cross-site security vulnerability. All JSON output should be wrapped in a dictionary. More details at http://haacked.com/archive/2009/06/25/json-hijacking.aspx/ and https://github.com/facebook/tornado/issues/1009 """ if self._finished: raise RuntimeError("Cannot write() after finish()") if not isinstance(chunk, (bytes, unicode_type, dict)): message = "write() only accepts bytes, unicode, and dict objects" if isinstance(chunk, list): message += ". Lists not accepted for security reasons; see " + \ "http://www.tornadoweb.org/en/stable/web.html#tornado.web.RequestHandler.write" raise TypeError(message) if isinstance(chunk, dict): chunk = escape.json_encode(chunk) self.set_header("Content-Type", "application/json; charset=UTF-8") chunk = utf8(chunk) self._write_buffer.append(chunk) def render(self, template_name, **kwargs): """Renders the template with the given arguments as the response. ``render()`` calls ``finish()``, so no other output methods can be called after it. Returns a `.Future` with the same semantics as the one returned by `finish`. Awaiting this `.Future` is optional. .. versionchanged:: 5.1 Now returns a `.Future` instead of ``None``. """ if self._finished: raise RuntimeError("Cannot render() after finish()") html = self.render_string(template_name, **kwargs) # Insert the additional JS and CSS added by the modules on the page js_embed = [] js_files = [] css_embed = [] css_files = [] html_heads = [] html_bodies = [] for module in getattr(self, "_active_modules", {}).values(): embed_part = module.embedded_javascript() if embed_part: js_embed.append(utf8(embed_part)) file_part = module.javascript_files() if file_part: if isinstance(file_part, (unicode_type, bytes)): js_files.append(file_part) else: js_files.extend(file_part) embed_part = module.embedded_css() if embed_part: css_embed.append(utf8(embed_part)) file_part = module.css_files() if file_part: if isinstance(file_part, (unicode_type, bytes)): css_files.append(file_part) else: css_files.extend(file_part) head_part = module.html_head() if head_part: html_heads.append(utf8(head_part)) body_part = module.html_body() if body_part: html_bodies.append(utf8(body_part)) if js_files: # Maintain order of JavaScript files given by modules js = self.render_linked_js(js_files) sloc = html.rindex(b'</body>') html = html[:sloc] + utf8(js) + b'\n' + html[sloc:] if js_embed: js = self.render_embed_js(js_embed) sloc = html.rindex(b'</body>') html = html[:sloc] + js + b'\n' + html[sloc:] if css_files: css = self.render_linked_css(css_files) hloc = html.index(b'</head>') html = html[:hloc] + utf8(css) + b'\n' + html[hloc:] if css_embed: css = self.render_embed_css(css_embed) hloc = html.index(b'</head>') html = html[:hloc] + css + b'\n' + html[hloc:] if html_heads: hloc = html.index(b'</head>') html = html[:hloc] + b''.join(html_heads) + b'\n' + html[hloc:] if html_bodies: hloc = html.index(b'</body>') html = html[:hloc] + b''.join(html_bodies) + b'\n' + html[hloc:] return self.finish(html) def render_linked_js(self, js_files): """Default method used to render the final js links for the rendered webpage. Override this method in a sub-classed controller to change the output. """ paths = [] unique_paths = set() for path in js_files: if not is_absolute(path): path = self.static_url(path) if path not in unique_paths: paths.append(path) unique_paths.add(path) return ''.join('<script src="' + escape.xhtml_escape(p) + '" type="text/javascript"></script>' for p in paths) def render_embed_js(self, js_embed): """Default method used to render the final embedded js for the rendered webpage. Override this method in a sub-classed controller to change the output. """ return b'<script type="text/javascript">\n//<![CDATA[\n' + \ b'\n'.join(js_embed) + b'\n//]]>\n</script>' def render_linked_css(self, css_files): """Default method used to render the final css links for the rendered webpage. Override this method in a sub-classed controller to change the output. """ paths = [] unique_paths = set() for path in css_files: if not is_absolute(path): path = self.static_url(path) if path not in unique_paths: paths.append(path) unique_paths.add(path) return ''.join('<link href="' + escape.xhtml_escape(p) + '" ' 'type="text/css" rel="stylesheet"/>' for p in paths) def render_embed_css(self, css_embed): """Default method used to render the final embedded css for the rendered webpage. Override this method in a sub-classed controller to change the output. """ return b'<style type="text/css">\n' + b'\n'.join(css_embed) + \ b'\n</style>' def render_string(self, template_name, **kwargs): """Generate the given template with the given arguments. We return the generated byte string (in utf8). To generate and write a template as a response, use render() above. """ # If no template_path is specified, use the path of the calling file template_path = self.get_template_path() if not template_path: frame = sys._getframe(0) web_file = frame.f_code.co_filename while frame.f_code.co_filename == web_file: frame = frame.f_back template_path = os.path.dirname(frame.f_code.co_filename) with RequestHandler._template_loader_lock: if template_path not in RequestHandler._template_loaders: loader = self.create_template_loader(template_path) RequestHandler._template_loaders[template_path] = loader else: loader = RequestHandler._template_loaders[template_path] t = loader.load(template_name) namespace = self.get_template_namespace() namespace.update(kwargs) return t.generate(**namespace) def get_template_namespace(self): """Returns a dictionary to be used as the default template namespace. May be overridden by subclasses to add or modify values. The results of this method will be combined with additional defaults in the `tornado.template` module and keyword arguments to `render` or `render_string`. """ namespace = dict( handler=self, request=self.request, current_user=self.current_user, locale=self.locale, _=self.locale.translate, pgettext=self.locale.pgettext, static_url=self.static_url, xsrf_form_html=self.xsrf_form_html, reverse_url=self.reverse_url ) namespace.update(self.ui) return namespace def create_template_loader(self, template_path): """Returns a new template loader for the given path. May be overridden by subclasses. By default returns a directory-based loader on the given path, using the ``autoescape`` and ``template_whitespace`` application settings. If a ``template_loader`` application setting is supplied, uses that instead. """ settings = self.application.settings if "template_loader" in settings: return settings["template_loader"] kwargs = {} if "autoescape" in settings: # autoescape=None means "no escaping", so we have to be sure # to only pass this kwarg if the user asked for it. kwargs["autoescape"] = settings["autoescape"] if "template_whitespace" in settings: kwargs["whitespace"] = settings["template_whitespace"] return template.Loader(template_path, **kwargs) def flush(self, include_footers=False, callback=None): """Flushes the current output buffer to the network. The ``callback`` argument, if given, can be used for flow control: it will be run when all flushed data has been written to the socket. Note that only one flush callback can be outstanding at a time; if another flush occurs before the previous flush's callback has been run, the previous callback will be discarded. .. versionchanged:: 4.0 Now returns a `.Future` if no callback is given. .. deprecated:: 5.1 The ``callback`` argument is deprecated and will be removed in Tornado 6.0. """ chunk = b"".join(self._write_buffer) self._write_buffer = [] if not self._headers_written: self._headers_written = True for transform in self._transforms: self._status_code, self._headers, chunk = \ transform.transform_first_chunk( self._status_code, self._headers, chunk, include_footers) # Ignore the chunk and only write the headers for HEAD requests if self.request.method == "HEAD": chunk = None # Finalize the cookie headers (which have been stored in a side # object so an outgoing cookie could be overwritten before it # is sent). if hasattr(self, "_new_cookie"): for cookie in self._new_cookie.values(): self.add_header("Set-Cookie", cookie.OutputString(None)) start_line = httputil.ResponseStartLine('', self._status_code, self._reason) return self.request.connection.write_headers( start_line, self._headers, chunk, callback=callback) else: for transform in self._transforms: chunk = transform.transform_chunk(chunk, include_footers) # Ignore the chunk and only write the headers for HEAD requests if self.request.method != "HEAD": return self.request.connection.write(chunk, callback=callback) else: future = Future() future.set_result(None) return future def finish(self, chunk=None): """Finishes this response, ending the HTTP request. Passing a ``chunk`` to ``finish()`` is equivalent to passing that chunk to ``write()`` and then calling ``finish()`` with no arguments. Returns a `.Future` which may optionally be awaited to track the sending of the response to the client. This `.Future` resolves when all the response data has been sent, and raises an error if the connection is closed before all data can be sent. .. versionchanged:: 5.1 Now returns a `.Future` instead of ``None``. """ if self._finished: raise RuntimeError("finish() called twice") if chunk is not None: self.write(chunk) # Automatically support ETags and add the Content-Length header if # we have not flushed any content yet. if not self._headers_written: if (self._status_code == 200 and self.request.method in ("GET", "HEAD") and "Etag" not in self._headers): self.set_etag_header() if self.check_etag_header(): self._write_buffer = [] self.set_status(304) if (self._status_code in (204, 304) or (self._status_code >= 100 and self._status_code < 200)): assert not self._write_buffer, "Cannot send body with %s" % self._status_code self._clear_headers_for_304() elif "Content-Length" not in self._headers: content_length = sum(len(part) for part in self._write_buffer) self.set_header("Content-Length", content_length) if hasattr(self.request, "connection"): # Now that the request is finished, clear the callback we # set on the HTTPConnection (which would otherwise prevent the # garbage collection of the RequestHandler when there # are keepalive connections) self.request.connection.set_close_callback(None) future = self.flush(include_footers=True) self.request.connection.finish() self._log() self._finished = True self.on_finish() self._break_cycles() return future def detach(self): """Take control of the underlying stream. Returns the underlying `.IOStream` object and stops all further HTTP processing. Intended for implementing protocols like websockets that tunnel over an HTTP handshake. This method is only supported when HTTP/1.1 is used. .. versionadded:: 5.1 """ self._finished = True return self.request.connection.detach() def _break_cycles(self): # Break up a reference cycle between this handler and the # _ui_module closures to allow for faster GC on CPython. self.ui = None def send_error(self, status_code=500, **kwargs): """Sends the given HTTP error code to the browser. If `flush()` has already been called, it is not possible to send an error, so this method will simply terminate the response. If output has been written but not yet flushed, it will be discarded and replaced with the error page. Override `write_error()` to customize the error page that is returned. Additional keyword arguments are passed through to `write_error`. """ if self._headers_written: gen_log.error("Cannot send error response after headers written") if not self._finished: # If we get an error between writing headers and finishing, # we are unlikely to be able to finish due to a # Content-Length mismatch. Try anyway to release the # socket. try: self.finish() except Exception: gen_log.error("Failed to flush partial response", exc_info=True) return self.clear() reason = kwargs.get('reason') if 'exc_info' in kwargs: exception = kwargs['exc_info'][1] if isinstance(exception, HTTPError) and exception.reason: reason = exception.reason self.set_status(status_code, reason=reason) try: self.write_error(status_code, **kwargs) except Exception: app_log.error("Uncaught exception in write_error", exc_info=True) if not self._finished: self.finish() def write_error(self, status_code, **kwargs): """Override to implement custom error pages. ``write_error`` may call `write`, `render`, `set_header`, etc to produce output as usual. If this error was caused by an uncaught exception (including HTTPError), an ``exc_info`` triple will be available as ``kwargs["exc_info"]``. Note that this exception may not be the "current" exception for purposes of methods like ``sys.exc_info()`` or ``traceback.format_exc``. """ if self.settings.get("serve_traceback") and "exc_info" in kwargs: # in debug mode, try to send a traceback self.set_header('Content-Type', 'text/plain') for line in traceback.format_exception(*kwargs["exc_info"]): self.write(line) self.finish() else: self.finish("<html><title>%(code)d: %(message)s</title>" "<body>%(code)d: %(message)s</body></html>" % { "code": status_code, "message": self._reason, }) @property def locale(self): """The locale for the current session. Determined by either `get_user_locale`, which you can override to set the locale based on, e.g., a user preference stored in a database, or `get_browser_locale`, which uses the ``Accept-Language`` header. .. versionchanged: 4.1 Added a property setter. """ if not hasattr(self, "_locale"): self._locale = self.get_user_locale() if not self._locale: self._locale = self.get_browser_locale() assert self._locale return self._locale @locale.setter def locale(self, value): self._locale = value def get_user_locale(self): """Override to determine the locale from the authenticated user. If None is returned, we fall back to `get_browser_locale()`. This method should return a `tornado.locale.Locale` object, most likely obtained via a call like ``tornado.locale.get("en")`` """ return None def get_browser_locale(self, default="en_US"): """Determines the user's locale from ``Accept-Language`` header. See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.4 """ if "Accept-Language" in self.request.headers: languages = self.request.headers["Accept-Language"].split(",") locales = [] for language in languages: parts = language.strip().split(";") if len(parts) > 1 and parts[1].startswith("q="): try: score = float(parts[1][2:]) except (ValueError, TypeError): score = 0.0 else: score = 1.0 locales.append((parts[0], score)) if locales: locales.sort(key=lambda pair: pair[1], reverse=True) codes = [l[0] for l in locales] return locale.get(*codes) return locale.get(default) @property def current_user(self): """The authenticated user for this request. This is set in one of two ways: * A subclass may override `get_current_user()`, which will be called automatically the first time ``self.current_user`` is accessed. `get_current_user()` will only be called once per request, and is cached for future access:: def get_current_user(self): user_cookie = self.get_secure_cookie("user") if user_cookie: return json.loads(user_cookie) return None * It may be set as a normal variable, typically from an overridden `prepare()`:: @gen.coroutine def prepare(self): user_id_cookie = self.get_secure_cookie("user_id") if user_id_cookie: self.current_user = yield load_user(user_id_cookie) Note that `prepare()` may be a coroutine while `get_current_user()` may not, so the latter form is necessary if loading the user requires asynchronous operations. The user object may be any type of the application's choosing. """ if not hasattr(self, "_current_user"): self._current_user = self.get_current_user() return self._current_user @current_user.setter def current_user(self, value): self._current_user = value def get_current_user(self): """Override to determine the current user from, e.g., a cookie. This method may not be a coroutine. """ return None def get_login_url(self): """Override to customize the login URL based on the request. By default, we use the ``login_url`` application setting. """ self.require_setting("login_url", "@tornado.web.authenticated") return self.application.settings["login_url"] def get_template_path(self): """Override to customize template path for each handler. By default, we use the ``template_path`` application setting. Return None to load templates relative to the calling file. """ return self.application.settings.get("template_path") @property def xsrf_token(self): """The XSRF-prevention token for the current user/session. To prevent cross-site request forgery, we set an '_xsrf' cookie and include the same '_xsrf' value as an argument with all POST requests. If the two do not match, we reject the form submission as a potential forgery. See http://en.wikipedia.org/wiki/Cross-site_request_forgery This property is of type `bytes`, but it contains only ASCII characters. If a character string is required, there is no need to base64-encode it; just decode the byte string as UTF-8. .. versionchanged:: 3.2.2 The xsrf token will now be have a random mask applied in every request, which makes it safe to include the token in pages that are compressed. See http://breachattack.com for more information on the issue fixed by this change. Old (version 1) cookies will be converted to version 2 when this method is called unless the ``xsrf_cookie_version`` `Application` setting is set to 1. .. versionchanged:: 4.3 The ``xsrf_cookie_kwargs`` `Application` setting may be used to supply additional cookie options (which will be passed directly to `set_cookie`). For example, ``xsrf_cookie_kwargs=dict(httponly=True, secure=True)`` will set the ``secure`` and ``httponly`` flags on the ``_xsrf`` cookie. """ if not hasattr(self, "_xsrf_token"): version, token, timestamp = self._get_raw_xsrf_token() output_version = self.settings.get("xsrf_cookie_version", 2) cookie_kwargs = self.settings.get("xsrf_cookie_kwargs", {}) if output_version == 1: self._xsrf_token = binascii.b2a_hex(token) elif output_version == 2: mask = os.urandom(4) self._xsrf_token = b"|".join([ b"2", binascii.b2a_hex(mask), binascii.b2a_hex(_websocket_mask(mask, token)), utf8(str(int(timestamp)))]) else: raise ValueError("unknown xsrf cookie version %d", output_version) if version is None: expires_days = 30 if self.current_user else None self.set_cookie("_xsrf", self._xsrf_token, expires_days=expires_days, **cookie_kwargs) return self._xsrf_token def _get_raw_xsrf_token(self): """Read or generate the xsrf token in its raw form. The raw_xsrf_token is a tuple containing: * version: the version of the cookie from which this token was read, or None if we generated a new token in this request. * token: the raw token data; random (non-ascii) bytes. * timestamp: the time this token was generated (will not be accurate for version 1 cookies) """ if not hasattr(self, '_raw_xsrf_token'): cookie = self.get_cookie("_xsrf") if cookie: version, token, timestamp = self._decode_xsrf_token(cookie) else: version, token, timestamp = None, None, None if token is None: version = None token = os.urandom(16) timestamp = time.time() self._raw_xsrf_token = (version, token, timestamp) return self._raw_xsrf_token def _decode_xsrf_token(self, cookie): """Convert a cookie string into a the tuple form returned by _get_raw_xsrf_token. """ try: m = _signed_value_version_re.match(utf8(cookie)) if m: version = int(m.group(1)) if version == 2: _, mask, masked_token, timestamp = cookie.split("|") mask = binascii.a2b_hex(utf8(mask)) token = _websocket_mask( mask, binascii.a2b_hex(utf8(masked_token))) timestamp = int(timestamp) return version, token, timestamp else: # Treat unknown versions as not present instead of failing. raise Exception("Unknown xsrf cookie version") else: version = 1 try: token = binascii.a2b_hex(utf8(cookie)) except (binascii.Error, TypeError): token = utf8(cookie) # We don't have a usable timestamp in older versions. timestamp = int(time.time()) return (version, token, timestamp) except Exception: # Catch exceptions and return nothing instead of failing. gen_log.debug("Uncaught exception in _decode_xsrf_token", exc_info=True) return None, None, None def check_xsrf_cookie(self): """Verifies that the ``_xsrf`` cookie matches the ``_xsrf`` argument. To prevent cross-site request forgery, we set an ``_xsrf`` cookie and include the same value as a non-cookie field with all ``POST`` requests. If the two do not match, we reject the form submission as a potential forgery. The ``_xsrf`` value may be set as either a form field named ``_xsrf`` or in a custom HTTP header named ``X-XSRFToken`` or ``X-CSRFToken`` (the latter is accepted for compatibility with Django). See http://en.wikipedia.org/wiki/Cross-site_request_forgery Prior to release 1.1.1, this check was ignored if the HTTP header ``X-Requested-With: XMLHTTPRequest`` was present. This exception has been shown to be insecure and has been removed. For more information please see http://www.djangoproject.com/weblog/2011/feb/08/security/ http://weblog.rubyonrails.org/2011/2/8/csrf-protection-bypass-in-ruby-on-rails .. versionchanged:: 3.2.2 Added support for cookie version 2. Both versions 1 and 2 are supported. """ token = (self.get_argument("_xsrf", None) or self.request.headers.get("X-Xsrftoken") or self.request.headers.get("X-Csrftoken")) if not token: raise HTTPError(403, "'_xsrf' argument missing from POST") _, token, _ = self._decode_xsrf_token(token) _, expected_token, _ = self._get_raw_xsrf_token() if not token: raise HTTPError(403, "'_xsrf' argument has invalid format") if not _time_independent_equals(utf8(token), utf8(expected_token)): raise HTTPError(403, "XSRF cookie does not match POST argument") def xsrf_form_html(self): """An HTML ``<input/>`` element to be included with all POST forms. It defines the ``_xsrf`` input value, which we check on all POST requests to prevent cross-site request forgery. If you have set the ``xsrf_cookies`` application setting, you must include this HTML within all of your HTML forms. In a template, this method should be called with ``{% module xsrf_form_html() %}`` See `check_xsrf_cookie()` above for more information. """ return '<input type="hidden" name="_xsrf" value="' + \ escape.xhtml_escape(self.xsrf_token) + '"/>' def static_url(self, path, include_host=None, **kwargs): """Returns a static URL for the given relative static file path. This method requires you set the ``static_path`` setting in your application (which specifies the root directory of your static files). This method returns a versioned url (by default appending ``?v=<signature>``), which allows the static files to be cached indefinitely. This can be disabled by passing ``include_version=False`` (in the default implementation; other static file implementations are not required to support this, but they may support other options). By default this method returns URLs relative to the current host, but if ``include_host`` is true the URL returned will be absolute. If this handler has an ``include_host`` attribute, that value will be used as the default for all `static_url` calls that do not pass ``include_host`` as a keyword argument. """ self.require_setting("static_path", "static_url") get_url = self.settings.get("static_handler_class", StaticFileHandler).make_static_url if include_host is None: include_host = getattr(self, "include_host", False) if include_host: base = self.request.protocol + "://" + self.request.host else: base = "" return base + get_url(self.settings, path, **kwargs) def require_setting(self, name, feature="this feature"): """Raises an exception if the given app setting is not defined.""" if not self.application.settings.get(name): raise Exception("You must define the '%s' setting in your " "application to use %s" % (name, feature)) def reverse_url(self, name, *args): """Alias for `Application.reverse_url`.""" return self.application.reverse_url(name, *args) def compute_etag(self): """Computes the etag header to be used for this request. By default uses a hash of the content written so far. May be overridden to provide custom etag implementations, or may return None to disable tornado's default etag support. """ hasher = hashlib.sha1() for part in self._write_buffer: hasher.update(part) return '"%s"' % hasher.hexdigest() def set_etag_header(self): """Sets the response's Etag header using ``self.compute_etag()``. Note: no header will be set if ``compute_etag()`` returns ``None``. This method is called automatically when the request is finished. """ etag = self.compute_etag() if etag is not None: self.set_header("Etag", etag) def check_etag_header(self): """Checks the ``Etag`` header against requests's ``If-None-Match``. Returns ``True`` if the request's Etag matches and a 304 should be returned. For example:: self.set_etag_header() if self.check_etag_header(): self.set_status(304) return This method is called automatically when the request is finished, but may be called earlier for applications that override `compute_etag` and want to do an early check for ``If-None-Match`` before completing the request. The ``Etag`` header should be set (perhaps with `set_etag_header`) before calling this method. """ computed_etag = utf8(self._headers.get("Etag", "")) # Find all weak and strong etag values from If-None-Match header # because RFC 7232 allows multiple etag values in a single header. etags = re.findall( br'\*|(?:W/)?"[^"]*"', utf8(self.request.headers.get("If-None-Match", "")) ) if not computed_etag or not etags: return False match = False if etags[0] == b'*': match = True else: # Use a weak comparison when comparing entity-tags. def val(x): return x[2:] if x.startswith(b'W/') else x for etag in etags: if val(etag) == val(computed_etag): match = True break return match def _stack_context_handle_exception(self, type, value, traceback): try: # For historical reasons _handle_request_exception only takes # the exception value instead of the full triple, # so re-raise the exception to ensure that it's in # sys.exc_info() raise_exc_info((type, value, traceback)) except Exception: self._handle_request_exception(value) return True @gen.coroutine def _execute(self, transforms, *args, **kwargs): """Executes this request with the given output transforms.""" self._transforms = transforms try: if self.request.method not in self.SUPPORTED_METHODS: raise HTTPError(405) self.path_args = [self.decode_argument(arg) for arg in args] self.path_kwargs = dict((k, self.decode_argument(v, name=k)) for (k, v) in kwargs.items()) # If XSRF cookies are turned on, reject form submissions without # the proper cookie if self.request.method not in ("GET", "HEAD", "OPTIONS") and \ self.application.settings.get("xsrf_cookies"): self.check_xsrf_cookie() result = self.prepare() if result is not None: result = yield result if self._prepared_future is not None: # Tell the Application we've finished with prepare() # and are ready for the body to arrive. future_set_result_unless_cancelled(self._prepared_future, None) if self._finished: return if _has_stream_request_body(self.__class__): # In streaming mode request.body is a Future that signals # the body has been completely received. The Future has no # result; the data has been passed to self.data_received # instead. try: yield self.request.body except iostream.StreamClosedError: return method = getattr(self, self.request.method.lower()) result = method(*self.path_args, **self.path_kwargs) if result is not None: result = yield result if self._auto_finish and not self._finished: self.finish() except Exception as e: try: self._handle_request_exception(e) except Exception: app_log.error("Exception in exception handler", exc_info=True) finally: # Unset result to avoid circular references result = None if (self._prepared_future is not None and not self._prepared_future.done()): # In case we failed before setting _prepared_future, do it # now (to unblock the HTTP server). Note that this is not # in a finally block to avoid GC issues prior to Python 3.4. self._prepared_future.set_result(None) def data_received(self, chunk): """Implement this method to handle streamed request data. Requires the `.stream_request_body` decorator. """ raise NotImplementedError() def _log(self): """Logs the current request. Sort of deprecated since this functionality was moved to the Application, but left in place for the benefit of existing apps that have overridden this method. """ self.application.log_request(self) def _request_summary(self): return "%s %s (%s)" % (self.request.method, self.request.uri, self.request.remote_ip) def _handle_request_exception(self, e): if isinstance(e, Finish): # Not an error; just finish the request without logging. if not self._finished: self.finish(*e.args) return try: self.log_exception(*sys.exc_info()) except Exception: # An error here should still get a best-effort send_error() # to avoid leaking the connection. app_log.error("Error in exception logger", exc_info=True) if self._finished: # Extra errors after the request has been finished should # be logged, but there is no reason to continue to try and # send a response. return if isinstance(e, HTTPError): self.send_error(e.status_code, exc_info=sys.exc_info()) else: self.send_error(500, exc_info=sys.exc_info()) def log_exception(self, typ, value, tb): """Override to customize logging of uncaught exceptions. By default logs instances of `HTTPError` as warnings without stack traces (on the ``tornado.general`` logger), and all other exceptions as errors with stack traces (on the ``tornado.application`` logger). .. versionadded:: 3.1 """ if isinstance(value, HTTPError): if value.log_message: format = "%d %s: " + value.log_message args = ([value.status_code, self._request_summary()] + list(value.args)) gen_log.warning(format, *args) else: app_log.error("Uncaught exception %s\n%r", self._request_summary(), self.request, exc_info=(typ, value, tb)) def _ui_module(self, name, module): def render(*args, **kwargs): if not hasattr(self, "_active_modules"): self._active_modules = {} if name not in self._active_modules: self._active_modules[name] = module(self) rendered = self._active_modules[name].render(*args, **kwargs) return rendered return render def _ui_method(self, method): return lambda *args, **kwargs: method(self, *args, **kwargs) def _clear_headers_for_304(self): # 304 responses should not contain entity headers (defined in # http://www.w3.org/Protocols/rfc2616/rfc2616-sec7.html#sec7.1) # not explicitly allowed by # http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.5 headers = ["Allow", "Content-Encoding", "Content-Language", "Content-Length", "Content-MD5", "Content-Range", "Content-Type", "Last-Modified"] for h in headers: self.clear_header(h) def asynchronous(method): """Wrap request handler methods with this if they are asynchronous. This decorator is for callback-style asynchronous methods; for coroutines, use the ``@gen.coroutine`` decorator without ``@asynchronous``. (It is legal for legacy reasons to use the two decorators together provided ``@asynchronous`` is first, but ``@asynchronous`` will be ignored in this case) This decorator should only be applied to the :ref:`HTTP verb methods <verbs>`; its behavior is undefined for any other method. This decorator does not *make* a method asynchronous; it tells the framework that the method *is* asynchronous. For this decorator to be useful the method must (at least sometimes) do something asynchronous. If this decorator is given, the response is not finished when the method returns. It is up to the request handler to call `self.finish() <RequestHandler.finish>` to finish the HTTP request. Without this decorator, the request is automatically finished when the ``get()`` or ``post()`` method returns. Example: .. testcode:: class MyRequestHandler(RequestHandler): @asynchronous def get(self): http = httpclient.AsyncHTTPClient() http.fetch("http://friendfeed.com/", self._on_download) def _on_download(self, response): self.write("Downloaded!") self.finish() .. testoutput:: :hide: .. versionchanged:: 3.1 The ability to use ``@gen.coroutine`` without ``@asynchronous``. .. versionchanged:: 4.3 Returning anything but ``None`` or a yieldable object from a method decorated with ``@asynchronous`` is an error. Such return values were previously ignored silently. .. deprecated:: 5.1 This decorator is deprecated and will be removed in Tornado 6.0. Use coroutines instead. """ warnings.warn("@asynchronous is deprecated, use coroutines instead", DeprecationWarning) # Delay the IOLoop import because it's not available on app engine. from tornado.ioloop import IOLoop @functools.wraps(method) def wrapper(self, *args, **kwargs): self._auto_finish = False with stack_context.ExceptionStackContext( self._stack_context_handle_exception, delay_warning=True): result = method(self, *args, **kwargs) if result is not None: result = gen.convert_yielded(result) # If @asynchronous is used with @gen.coroutine, (but # not @gen.engine), we can automatically finish the # request when the future resolves. Additionally, # the Future will swallow any exceptions so we need # to throw them back out to the stack context to finish # the request. def future_complete(f): f.result() if not self._finished: self.finish() IOLoop.current().add_future(result, future_complete) # Once we have done this, hide the Future from our # caller (i.e. RequestHandler._when_complete), which # would otherwise set up its own callback and # exception handler (resulting in exceptions being # logged twice). return None return result return wrapper def stream_request_body(cls): """Apply to `RequestHandler` subclasses to enable streaming body support. This decorator implies the following changes: * `.HTTPServerRequest.body` is undefined, and body arguments will not be included in `RequestHandler.get_argument`. * `RequestHandler.prepare` is called when the request headers have been read instead of after the entire body has been read. * The subclass must define a method ``data_received(self, data):``, which will be called zero or more times as data is available. Note that if the request has an empty body, ``data_received`` may not be called. * ``prepare`` and ``data_received`` may return Futures (such as via ``@gen.coroutine``, in which case the next method will not be called until those futures have completed. * The regular HTTP method (``post``, ``put``, etc) will be called after the entire body has been read. See the `file receiver demo <https://github.com/tornadoweb/tornado/tree/master/demos/file_upload/>`_ for example usage. """ # noqa: E501 if not issubclass(cls, RequestHandler): raise TypeError("expected subclass of RequestHandler, got %r", cls) cls._stream_request_body = True return cls def _has_stream_request_body(cls): if not issubclass(cls, RequestHandler): raise TypeError("expected subclass of RequestHandler, got %r", cls) return getattr(cls, '_stream_request_body', False) def removeslash(method): """Use this decorator to remove trailing slashes from the request path. For example, a request to ``/foo/`` would redirect to ``/foo`` with this decorator. Your request handler mapping should use a regular expression like ``r'/foo/*'`` in conjunction with using the decorator. """ @functools.wraps(method) def wrapper(self, *args, **kwargs): if self.request.path.endswith("/"): if self.request.method in ("GET", "HEAD"): uri = self.request.path.rstrip("/") if uri: # don't try to redirect '/' to '' if self.request.query: uri += "?" + self.request.query self.redirect(uri, permanent=True) return else: raise HTTPError(404) return method(self, *args, **kwargs) return wrapper def addslash(method): """Use this decorator to add a missing trailing slash to the request path. For example, a request to ``/foo`` would redirect to ``/foo/`` with this decorator. Your request handler mapping should use a regular expression like ``r'/foo/?'`` in conjunction with using the decorator. """ @functools.wraps(method) def wrapper(self, *args, **kwargs): if not self.request.path.endswith("/"): if self.request.method in ("GET", "HEAD"): uri = self.request.path + "/" if self.request.query: uri += "?" + self.request.query self.redirect(uri, permanent=True) return raise HTTPError(404) return method(self, *args, **kwargs) return wrapper class _ApplicationRouter(ReversibleRuleRouter): """Routing implementation used internally by `Application`. Provides a binding between `Application` and `RequestHandler`. This implementation extends `~.routing.ReversibleRuleRouter` in a couple of ways: * it allows to use `RequestHandler` subclasses as `~.routing.Rule` target and * it allows to use a list/tuple of rules as `~.routing.Rule` target. ``process_rule`` implementation will substitute this list with an appropriate `_ApplicationRouter` instance. """ def __init__(self, application, rules=None): assert isinstance(application, Application) self.application = application super(_ApplicationRouter, self).__init__(rules) def process_rule(self, rule): rule = super(_ApplicationRouter, self).process_rule(rule) if isinstance(rule.target, (list, tuple)): rule.target = _ApplicationRouter(self.application, rule.target) return rule def get_target_delegate(self, target, request, **target_params): if isclass(target) and issubclass(target, RequestHandler): return self.application.get_handler_delegate(request, target, **target_params) return super(_ApplicationRouter, self).get_target_delegate(target, request, **target_params) class Application(ReversibleRouter): """A collection of request handlers that make up a web application. Instances of this class are callable and can be passed directly to HTTPServer to serve the application:: application = web.Application([ (r"/", MainPageHandler), ]) http_server = httpserver.HTTPServer(application) http_server.listen(8080) ioloop.IOLoop.current().start() The constructor for this class takes in a list of `~.routing.Rule` objects or tuples of values corresponding to the arguments of `~.routing.Rule` constructor: ``(matcher, target, [target_kwargs], [name])``, the values in square brackets being optional. The default matcher is `~.routing.PathMatches`, so ``(regexp, target)`` tuples can also be used instead of ``(PathMatches(regexp), target)``. A common routing target is a `RequestHandler` subclass, but you can also use lists of rules as a target, which create a nested routing configuration:: application = web.Application([ (HostMatches("example.com"), [ (r"/", MainPageHandler), (r"/feed", FeedHandler), ]), ]) In addition to this you can use nested `~.routing.Router` instances, `~.httputil.HTTPMessageDelegate` subclasses and callables as routing targets (see `~.routing` module docs for more information). When we receive requests, we iterate over the list in order and instantiate an instance of the first request class whose regexp matches the request path. The request class can be specified as either a class object or a (fully-qualified) name. A dictionary may be passed as the third element (``target_kwargs``) of the tuple, which will be used as keyword arguments to the handler's constructor and `~RequestHandler.initialize` method. This pattern is used for the `StaticFileHandler` in this example (note that a `StaticFileHandler` can be installed automatically with the static_path setting described below):: application = web.Application([ (r"/static/(.*)", web.StaticFileHandler, {"path": "/var/www"}), ]) We support virtual hosts with the `add_handlers` method, which takes in a host regular expression as the first argument:: application.add_handlers(r"www\.myhost\.com", [ (r"/article/([0-9]+)", ArticleHandler), ]) If there's no match for the current request's host, then ``default_host`` parameter value is matched against host regular expressions. .. warning:: Applications that do not use TLS may be vulnerable to :ref:`DNS rebinding <dnsrebinding>` attacks. This attack is especially relevant to applications that only listen on ``127.0.0.1` or other private networks. Appropriate host patterns must be used (instead of the default of ``r'.*'``) to prevent this risk. The ``default_host`` argument must not be used in applications that may be vulnerable to DNS rebinding. You can serve static files by sending the ``static_path`` setting as a keyword argument. We will serve those files from the ``/static/`` URI (this is configurable with the ``static_url_prefix`` setting), and we will serve ``/favicon.ico`` and ``/robots.txt`` from the same directory. A custom subclass of `StaticFileHandler` can be specified with the ``static_handler_class`` setting. .. versionchanged:: 4.5 Integration with the new `tornado.routing` module. """ def __init__(self, handlers=None, default_host=None, transforms=None, **settings): if transforms is None: self.transforms = [] if settings.get("compress_response") or settings.get("gzip"): self.transforms.append(GZipContentEncoding) else: self.transforms = transforms self.default_host = default_host self.settings = settings self.ui_modules = {'linkify': _linkify, 'xsrf_form_html': _xsrf_form_html, 'Template': TemplateModule, } self.ui_methods = {} self._load_ui_modules(settings.get("ui_modules", {})) self._load_ui_methods(settings.get("ui_methods", {})) if self.settings.get("static_path"): path = self.settings["static_path"] handlers = list(handlers or []) static_url_prefix = settings.get("static_url_prefix", "/static/") static_handler_class = settings.get("static_handler_class", StaticFileHandler) static_handler_args = settings.get("static_handler_args", {}) static_handler_args['path'] = path for pattern in [re.escape(static_url_prefix) + r"(.*)", r"/(favicon\.ico)", r"/(robots\.txt)"]: handlers.insert(0, (pattern, static_handler_class, static_handler_args)) if self.settings.get('debug'): self.settings.setdefault('autoreload', True) self.settings.setdefault('compiled_template_cache', False) self.settings.setdefault('static_hash_cache', False) self.settings.setdefault('serve_traceback', True) self.wildcard_router = _ApplicationRouter(self, handlers) self.default_router = _ApplicationRouter(self, [ Rule(AnyMatches(), self.wildcard_router) ]) # Automatically reload modified modules if self.settings.get('autoreload'): from tornado import autoreload autoreload.start() def listen(self, port, address="", **kwargs): """Starts an HTTP server for this application on the given port. This is a convenience alias for creating an `.HTTPServer` object and calling its listen method. Keyword arguments not supported by `HTTPServer.listen <.TCPServer.listen>` are passed to the `.HTTPServer` constructor. For advanced uses (e.g. multi-process mode), do not use this method; create an `.HTTPServer` and call its `.TCPServer.bind`/`.TCPServer.start` methods directly. Note that after calling this method you still need to call ``IOLoop.current().start()`` to start the server. Returns the `.HTTPServer` object. .. versionchanged:: 4.3 Now returns the `.HTTPServer` object. """ # import is here rather than top level because HTTPServer # is not importable on appengine from tornado.httpserver import HTTPServer server = HTTPServer(self, **kwargs) server.listen(port, address) return server def add_handlers(self, host_pattern, host_handlers): """Appends the given handlers to our handler list. Host patterns are processed sequentially in the order they were added. All matching patterns will be considered. """ host_matcher = HostMatches(host_pattern) rule = Rule(host_matcher, _ApplicationRouter(self, host_handlers)) self.default_router.rules.insert(-1, rule) if self.default_host is not None: self.wildcard_router.add_rules([( DefaultHostMatches(self, host_matcher.host_pattern), host_handlers )]) def add_transform(self, transform_class): self.transforms.append(transform_class) def _load_ui_methods(self, methods): if isinstance(methods, types.ModuleType): self._load_ui_methods(dict((n, getattr(methods, n)) for n in dir(methods))) elif isinstance(methods, list): for m in methods: self._load_ui_methods(m) else: for name, fn in methods.items(): if not name.startswith("_") and hasattr(fn, "__call__") \ and name[0].lower() == name[0]: self.ui_methods[name] = fn def _load_ui_modules(self, modules): if isinstance(modules, types.ModuleType): self._load_ui_modules(dict((n, getattr(modules, n)) for n in dir(modules))) elif isinstance(modules, list): for m in modules: self._load_ui_modules(m) else: assert isinstance(modules, dict) for name, cls in modules.items(): try: if issubclass(cls, UIModule): self.ui_modules[name] = cls except TypeError: pass def __call__(self, request): # Legacy HTTPServer interface dispatcher = self.find_handler(request) return dispatcher.execute() def find_handler(self, request, **kwargs): route = self.default_router.find_handler(request) if route is not None: return route if self.settings.get('default_handler_class'): return self.get_handler_delegate( request, self.settings['default_handler_class'], self.settings.get('default_handler_args', {})) return self.get_handler_delegate( request, ErrorHandler, {'status_code': 404}) def get_handler_delegate(self, request, target_class, target_kwargs=None, path_args=None, path_kwargs=None): """Returns `~.httputil.HTTPMessageDelegate` that can serve a request for application and `RequestHandler` subclass. :arg httputil.HTTPServerRequest request: current HTTP request. :arg RequestHandler target_class: a `RequestHandler` class. :arg dict target_kwargs: keyword arguments for ``target_class`` constructor. :arg list path_args: positional arguments for ``target_class`` HTTP method that will be executed while handling a request (``get``, ``post`` or any other). :arg dict path_kwargs: keyword arguments for ``target_class`` HTTP method. """ return _HandlerDelegate( self, request, target_class, target_kwargs, path_args, path_kwargs) def reverse_url(self, name, *args): """Returns a URL path for handler named ``name`` The handler must be added to the application as a named `URLSpec`. Args will be substituted for capturing groups in the `URLSpec` regex. They will be converted to strings if necessary, encoded as utf8, and url-escaped. """ reversed_url = self.default_router.reverse_url(name, *args) if reversed_url is not None: return reversed_url raise KeyError("%s not found in named urls" % name) def log_request(self, handler): """Writes a completed HTTP request to the logs. By default writes to the python root logger. To change this behavior either subclass Application and override this method, or pass a function in the application settings dictionary as ``log_function``. """ if "log_function" in self.settings: self.settings["log_function"](handler) return if handler.get_status() < 400: log_method = access_log.info elif handler.get_status() < 500: log_method = access_log.warning else: log_method = access_log.error request_time = 1000.0 * handler.request.request_time() log_method("%d %s %.2fms", handler.get_status(), handler._request_summary(), request_time) class _HandlerDelegate(httputil.HTTPMessageDelegate): def __init__(self, application, request, handler_class, handler_kwargs, path_args, path_kwargs): self.application = application self.connection = request.connection self.request = request self.handler_class = handler_class self.handler_kwargs = handler_kwargs or {} self.path_args = path_args or [] self.path_kwargs = path_kwargs or {} self.chunks = [] self.stream_request_body = _has_stream_request_body(self.handler_class) def headers_received(self, start_line, headers): if self.stream_request_body: self.request.body = Future() return self.execute() def data_received(self, data): if self.stream_request_body: return self.handler.data_received(data) else: self.chunks.append(data) def finish(self): if self.stream_request_body: future_set_result_unless_cancelled(self.request.body, None) else: self.request.body = b''.join(self.chunks) self.request._parse_body() self.execute() def on_connection_close(self): if self.stream_request_body: self.handler.on_connection_close() else: self.chunks = None def execute(self): # If template cache is disabled (usually in the debug mode), # re-compile templates and reload static files on every # request so you don't need to restart to see changes if not self.application.settings.get("compiled_template_cache", True): with RequestHandler._template_loader_lock: for loader in RequestHandler._template_loaders.values(): loader.reset() if not self.application.settings.get('static_hash_cache', True): StaticFileHandler.reset() self.handler = self.handler_class(self.application, self.request, **self.handler_kwargs) transforms = [t(self.request) for t in self.application.transforms] if self.stream_request_body: self.handler._prepared_future = Future() # Note that if an exception escapes handler._execute it will be # trapped in the Future it returns (which we are ignoring here, # leaving it to be logged when the Future is GC'd). # However, that shouldn't happen because _execute has a blanket # except handler, and we cannot easily access the IOLoop here to # call add_future (because of the requirement to remain compatible # with WSGI) self.handler._execute(transforms, *self.path_args, **self.path_kwargs) # If we are streaming the request body, then execute() is finished # when the handler has prepared to receive the body. If not, # it doesn't matter when execute() finishes (so we return None) return self.handler._prepared_future class HTTPError(Exception): """An exception that will turn into an HTTP error response. Raising an `HTTPError` is a convenient alternative to calling `RequestHandler.send_error` since it automatically ends the current function. To customize the response sent with an `HTTPError`, override `RequestHandler.write_error`. :arg int status_code: HTTP status code. Must be listed in `httplib.responses <http.client.responses>` unless the ``reason`` keyword argument is given. :arg str log_message: Message to be written to the log for this error (will not be shown to the user unless the `Application` is in debug mode). May contain ``%s``-style placeholders, which will be filled in with remaining positional parameters. :arg str reason: Keyword-only argument. The HTTP "reason" phrase to pass in the status line along with ``status_code``. Normally determined automatically from ``status_code``, but can be used to use a non-standard numeric code. """ def __init__(self, status_code=500, log_message=None, *args, **kwargs): self.status_code = status_code self.log_message = log_message self.args = args self.reason = kwargs.get('reason', None) if log_message and not args: self.log_message = log_message.replace('%', '%%') def __str__(self): message = "HTTP %d: %s" % ( self.status_code, self.reason or httputil.responses.get(self.status_code, 'Unknown')) if self.log_message: return message + " (" + (self.log_message % self.args) + ")" else: return message class Finish(Exception): """An exception that ends the request without producing an error response. When `Finish` is raised in a `RequestHandler`, the request will end (calling `RequestHandler.finish` if it hasn't already been called), but the error-handling methods (including `RequestHandler.write_error`) will not be called. If `Finish()` was created with no arguments, the pending response will be sent as-is. If `Finish()` was given an argument, that argument will be passed to `RequestHandler.finish()`. This can be a more convenient way to implement custom error pages than overriding ``write_error`` (especially in library code):: if self.current_user is None: self.set_status(401) self.set_header('WWW-Authenticate', 'Basic realm="something"') raise Finish() .. versionchanged:: 4.3 Arguments passed to ``Finish()`` will be passed on to `RequestHandler.finish`. """ pass class MissingArgumentError(HTTPError): """Exception raised by `RequestHandler.get_argument`. This is a subclass of `HTTPError`, so if it is uncaught a 400 response code will be used instead of 500 (and a stack trace will not be logged). .. versionadded:: 3.1 """ def __init__(self, arg_name): super(MissingArgumentError, self).__init__( 400, 'Missing argument %s' % arg_name) self.arg_name = arg_name class ErrorHandler(RequestHandler): """Generates an error response with ``status_code`` for all requests.""" def initialize(self, status_code): self.set_status(status_code) def prepare(self): raise HTTPError(self._status_code) def check_xsrf_cookie(self): # POSTs to an ErrorHandler don't actually have side effects, # so we don't need to check the xsrf token. This allows POSTs # to the wrong url to return a 404 instead of 403. pass class RedirectHandler(RequestHandler): """Redirects the client to the given URL for all GET requests. You should provide the keyword argument ``url`` to the handler, e.g.:: application = web.Application([ (r"/oldpath", web.RedirectHandler, {"url": "/newpath"}), ]) `RedirectHandler` supports regular expression substitutions. E.g., to swap the first and second parts of a path while preserving the remainder:: application = web.Application([ (r"/(.*?)/(.*?)/(.*)", web.RedirectHandler, {"url": "/{1}/{0}/{2}"}), ]) The final URL is formatted with `str.format` and the substrings that match the capturing groups. In the above example, a request to "/a/b/c" would be formatted like:: str.format("/{1}/{0}/{2}", "a", "b", "c") # -> "/b/a/c" Use Python's :ref:`format string syntax <formatstrings>` to customize how values are substituted. .. versionchanged:: 4.5 Added support for substitutions into the destination URL. .. versionchanged:: 5.0 If any query arguments are present, they will be copied to the destination URL. """ def initialize(self, url, permanent=True): self._url = url self._permanent = permanent def get(self, *args): to_url = self._url.format(*args) if self.request.query_arguments: to_url = httputil.url_concat( to_url, list(httputil.qs_to_qsl(self.request.query_arguments))) self.redirect(to_url, permanent=self._permanent) class StaticFileHandler(RequestHandler): """A simple handler that can serve static content from a directory. A `StaticFileHandler` is configured automatically if you pass the ``static_path`` keyword argument to `Application`. This handler can be customized with the ``static_url_prefix``, ``static_handler_class``, and ``static_handler_args`` settings. To map an additional path to this handler for a static data directory you would add a line to your application like:: application = web.Application([ (r"/content/(.*)", web.StaticFileHandler, {"path": "/var/www"}), ]) The handler constructor requires a ``path`` argument, which specifies the local root directory of the content to be served. Note that a capture group in the regex is required to parse the value for the ``path`` argument to the get() method (different than the constructor argument above); see `URLSpec` for details. To serve a file like ``index.html`` automatically when a directory is requested, set ``static_handler_args=dict(default_filename="index.html")`` in your application settings, or add ``default_filename`` as an initializer argument for your ``StaticFileHandler``. To maximize the effectiveness of browser caching, this class supports versioned urls (by default using the argument ``?v=``). If a version is given, we instruct the browser to cache this file indefinitely. `make_static_url` (also available as `RequestHandler.static_url`) can be used to construct a versioned url. This handler is intended primarily for use in development and light-duty file serving; for heavy traffic it will be more efficient to use a dedicated static file server (such as nginx or Apache). We support the HTTP ``Accept-Ranges`` mechanism to return partial content (because some browsers require this functionality to be present to seek in HTML5 audio or video). **Subclassing notes** This class is designed to be extensible by subclassing, but because of the way static urls are generated with class methods rather than instance methods, the inheritance patterns are somewhat unusual. Be sure to use the ``@classmethod`` decorator when overriding a class method. Instance methods may use the attributes ``self.path`` ``self.absolute_path``, and ``self.modified``. Subclasses should only override methods discussed in this section; overriding other methods is error-prone. Overriding ``StaticFileHandler.get`` is particularly problematic due to the tight coupling with ``compute_etag`` and other methods. To change the way static urls are generated (e.g. to match the behavior of another server or CDN), override `make_static_url`, `parse_url_path`, `get_cache_time`, and/or `get_version`. To replace all interaction with the filesystem (e.g. to serve static content from a database), override `get_content`, `get_content_size`, `get_modified_time`, `get_absolute_path`, and `validate_absolute_path`. .. versionchanged:: 3.1 Many of the methods for subclasses were added in Tornado 3.1. """ CACHE_MAX_AGE = 86400 * 365 * 10 # 10 years _static_hashes = {} # type: typing.Dict _lock = threading.Lock() # protects _static_hashes def initialize(self, path, default_filename=None): self.root = path self.default_filename = default_filename @classmethod def reset(cls): with cls._lock: cls._static_hashes = {} def head(self, path): return self.get(path, include_body=False) @gen.coroutine def get(self, path, include_body=True): # Set up our path instance variables. self.path = self.parse_url_path(path) del path # make sure we don't refer to path instead of self.path again absolute_path = self.get_absolute_path(self.root, self.path) self.absolute_path = self.validate_absolute_path( self.root, absolute_path) if self.absolute_path is None: return self.modified = self.get_modified_time() self.set_headers() if self.should_return_304(): self.set_status(304) return request_range = None range_header = self.request.headers.get("Range") if range_header: # As per RFC 2616 14.16, if an invalid Range header is specified, # the request will be treated as if the header didn't exist. request_range = httputil._parse_request_range(range_header) size = self.get_content_size() if request_range: start, end = request_range if (start is not None and start >= size) or end == 0: # As per RFC 2616 14.35.1, a range is not satisfiable only: if # the first requested byte is equal to or greater than the # content, or when a suffix with length 0 is specified self.set_status(416) # Range Not Satisfiable self.set_header("Content-Type", "text/plain") self.set_header("Content-Range", "bytes */%s" % (size, )) return if start is not None and start < 0: start += size if end is not None and end > size: # Clients sometimes blindly use a large range to limit their # download size; cap the endpoint at the actual file size. end = size # Note: only return HTTP 206 if less than the entire range has been # requested. Not only is this semantically correct, but Chrome # refuses to play audio if it gets an HTTP 206 in response to # ``Range: bytes=0-``. if size != (end or size) - (start or 0): self.set_status(206) # Partial Content self.set_header("Content-Range", httputil._get_content_range(start, end, size)) else: start = end = None if start is not None and end is not None: content_length = end - start elif end is not None: content_length = end elif start is not None: content_length = size - start else: content_length = size self.set_header("Content-Length", content_length) if include_body: content = self.get_content(self.absolute_path, start, end) if isinstance(content, bytes): content = [content] for chunk in content: try: self.write(chunk) yield self.flush() except iostream.StreamClosedError: return else: assert self.request.method == "HEAD" def compute_etag(self): """Sets the ``Etag`` header based on static url version. This allows efficient ``If-None-Match`` checks against cached versions, and sends the correct ``Etag`` for a partial response (i.e. the same ``Etag`` as the full file). .. versionadded:: 3.1 """ version_hash = self._get_cached_version(self.absolute_path) if not version_hash: return None return '"%s"' % (version_hash, ) def set_headers(self): """Sets the content and caching headers on the response. .. versionadded:: 3.1 """ self.set_header("Accept-Ranges", "bytes") self.set_etag_header() if self.modified is not None: self.set_header("Last-Modified", self.modified) content_type = self.get_content_type() if content_type: self.set_header("Content-Type", content_type) cache_time = self.get_cache_time(self.path, self.modified, content_type) if cache_time > 0: self.set_header("Expires", datetime.datetime.utcnow() + datetime.timedelta(seconds=cache_time)) self.set_header("Cache-Control", "max-age=" + str(cache_time)) self.set_extra_headers(self.path) def should_return_304(self): """Returns True if the headers indicate that we should return 304. .. versionadded:: 3.1 """ # If client sent If-None-Match, use it, ignore If-Modified-Since if self.request.headers.get('If-None-Match'): return self.check_etag_header() # Check the If-Modified-Since, and don't send the result if the # content has not been modified ims_value = self.request.headers.get("If-Modified-Since") if ims_value is not None: date_tuple = email.utils.parsedate(ims_value) if date_tuple is not None: if_since = datetime.datetime(*date_tuple[:6]) if if_since >= self.modified: return True return False @classmethod def get_absolute_path(cls, root, path): """Returns the absolute location of ``path`` relative to ``root``. ``root`` is the path configured for this `StaticFileHandler` (in most cases the ``static_path`` `Application` setting). This class method may be overridden in subclasses. By default it returns a filesystem path, but other strings may be used as long as they are unique and understood by the subclass's overridden `get_content`. .. versionadded:: 3.1 """ abspath = os.path.abspath(os.path.join(root, path)) return abspath def validate_absolute_path(self, root, absolute_path): """Validate and return the absolute path. ``root`` is the configured path for the `StaticFileHandler`, and ``path`` is the result of `get_absolute_path` This is an instance method called during request processing, so it may raise `HTTPError` or use methods like `RequestHandler.redirect` (return None after redirecting to halt further processing). This is where 404 errors for missing files are generated. This method may modify the path before returning it, but note that any such modifications will not be understood by `make_static_url`. In instance methods, this method's result is available as ``self.absolute_path``. .. versionadded:: 3.1 """ # os.path.abspath strips a trailing /. # We must add it back to `root` so that we only match files # in a directory named `root` instead of files starting with # that prefix. root = os.path.abspath(root) if not root.endswith(os.path.sep): # abspath always removes a trailing slash, except when # root is '/'. This is an unusual case, but several projects # have independently discovered this technique to disable # Tornado's path validation and (hopefully) do their own, # so we need to support it. root += os.path.sep # The trailing slash also needs to be temporarily added back # the requested path so a request to root/ will match. if not (absolute_path + os.path.sep).startswith(root): raise HTTPError(403, "%s is not in root static directory", self.path) if (os.path.isdir(absolute_path) and self.default_filename is not None): # need to look at the request.path here for when path is empty # but there is some prefix to the path that was already # trimmed by the routing if not self.request.path.endswith("/"): self.redirect(self.request.path + "/", permanent=True) return absolute_path = os.path.join(absolute_path, self.default_filename) if not os.path.exists(absolute_path): raise HTTPError(404) if not os.path.isfile(absolute_path): raise HTTPError(403, "%s is not a file", self.path) return absolute_path @classmethod def get_content(cls, abspath, start=None, end=None): """Retrieve the content of the requested resource which is located at the given absolute path. This class method may be overridden by subclasses. Note that its signature is different from other overridable class methods (no ``settings`` argument); this is deliberate to ensure that ``abspath`` is able to stand on its own as a cache key. This method should either return a byte string or an iterator of byte strings. The latter is preferred for large files as it helps reduce memory fragmentation. .. versionadded:: 3.1 """ with open(abspath, "rb") as file: if start is not None: file.seek(start) if end is not None: remaining = end - (start or 0) else: remaining = None while True: chunk_size = 64 * 1024 if remaining is not None and remaining < chunk_size: chunk_size = remaining chunk = file.read(chunk_size) if chunk: if remaining is not None: remaining -= len(chunk) yield chunk else: if remaining is not None: assert remaining == 0 return @classmethod def get_content_version(cls, abspath): """Returns a version string for the resource at the given path. This class method may be overridden by subclasses. The default implementation is a hash of the file's contents. .. versionadded:: 3.1 """ data = cls.get_content(abspath) hasher = hashlib.md5() if isinstance(data, bytes): hasher.update(data) else: for chunk in data: hasher.update(chunk) return hasher.hexdigest() def _stat(self): if not hasattr(self, '_stat_result'): self._stat_result = os.stat(self.absolute_path) return self._stat_result def get_content_size(self): """Retrieve the total size of the resource at the given path. This method may be overridden by subclasses. .. versionadded:: 3.1 .. versionchanged:: 4.0 This method is now always called, instead of only when partial results are requested. """ stat_result = self._stat() return stat_result[stat.ST_SIZE] def get_modified_time(self): """Returns the time that ``self.absolute_path`` was last modified. May be overridden in subclasses. Should return a `~datetime.datetime` object or None. .. versionadded:: 3.1 """ stat_result = self._stat() modified = datetime.datetime.utcfromtimestamp( stat_result[stat.ST_MTIME]) return modified def get_content_type(self): """Returns the ``Content-Type`` header to be used for this request. .. versionadded:: 3.1 """ mime_type, encoding = mimetypes.guess_type(self.absolute_path) # per RFC 6713, use the appropriate type for a gzip compressed file if encoding == "gzip": return "application/gzip" # As of 2015-07-21 there is no bzip2 encoding defined at # http://www.iana.org/assignments/media-types/media-types.xhtml # So for that (and any other encoding), use octet-stream. elif encoding is not None: return "application/octet-stream" elif mime_type is not None: return mime_type # if mime_type not detected, use application/octet-stream else: return "application/octet-stream" def set_extra_headers(self, path): """For subclass to add extra headers to the response""" pass def get_cache_time(self, path, modified, mime_type): """Override to customize cache control behavior. Return a positive number of seconds to make the result cacheable for that amount of time or 0 to mark resource as cacheable for an unspecified amount of time (subject to browser heuristics). By default returns cache expiry of 10 years for resources requested with ``v`` argument. """ return self.CACHE_MAX_AGE if "v" in self.request.arguments else 0 @classmethod def make_static_url(cls, settings, path, include_version=True): """Constructs a versioned url for the given path. This method may be overridden in subclasses (but note that it is a class method rather than an instance method). Subclasses are only required to implement the signature ``make_static_url(cls, settings, path)``; other keyword arguments may be passed through `~RequestHandler.static_url` but are not standard. ``settings`` is the `Application.settings` dictionary. ``path`` is the static path being requested. The url returned should be relative to the current host. ``include_version`` determines whether the generated URL should include the query string containing the version hash of the file corresponding to the given ``path``. """ url = settings.get('static_url_prefix', '/static/') + path if not include_version: return url version_hash = cls.get_version(settings, path) if not version_hash: return url return '%s?v=%s' % (url, version_hash) def parse_url_path(self, url_path): """Converts a static URL path into a filesystem path. ``url_path`` is the path component of the URL with ``static_url_prefix`` removed. The return value should be filesystem path relative to ``static_path``. This is the inverse of `make_static_url`. """ if os.path.sep != "/": url_path = url_path.replace("/", os.path.sep) return url_path @classmethod def get_version(cls, settings, path): """Generate the version string to be used in static URLs. ``settings`` is the `Application.settings` dictionary and ``path`` is the relative location of the requested asset on the filesystem. The returned value should be a string, or ``None`` if no version could be determined. .. versionchanged:: 3.1 This method was previously recommended for subclasses to override; `get_content_version` is now preferred as it allows the base class to handle caching of the result. """ abs_path = cls.get_absolute_path(settings['static_path'], path) return cls._get_cached_version(abs_path) @classmethod def _get_cached_version(cls, abs_path): with cls._lock: hashes = cls._static_hashes if abs_path not in hashes: try: hashes[abs_path] = cls.get_content_version(abs_path) except Exception: gen_log.error("Could not open static file %r", abs_path) hashes[abs_path] = None hsh = hashes.get(abs_path) if hsh: return hsh return None class FallbackHandler(RequestHandler): """A `RequestHandler` that wraps another HTTP server callback. The fallback is a callable object that accepts an `~.httputil.HTTPServerRequest`, such as an `Application` or `tornado.wsgi.WSGIContainer`. This is most useful to use both Tornado ``RequestHandlers`` and WSGI in the same server. Typical usage:: wsgi_app = tornado.wsgi.WSGIContainer( django.core.handlers.wsgi.WSGIHandler()) application = tornado.web.Application([ (r"/foo", FooHandler), (r".*", FallbackHandler, dict(fallback=wsgi_app), ]) """ def initialize(self, fallback): self.fallback = fallback def prepare(self): self.fallback(self.request) self._finished = True self.on_finish() class OutputTransform(object): """A transform modifies the result of an HTTP request (e.g., GZip encoding) Applications are not expected to create their own OutputTransforms or interact with them directly; the framework chooses which transforms (if any) to apply. """ def __init__(self, request): pass def transform_first_chunk(self, status_code, headers, chunk, finishing): # type: (int, httputil.HTTPHeaders, bytes, bool) -> typing.Tuple[int, httputil.HTTPHeaders, bytes] # noqa: E501 return status_code, headers, chunk def transform_chunk(self, chunk, finishing): return chunk class GZipContentEncoding(OutputTransform): """Applies the gzip content encoding to the response. See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11 .. versionchanged:: 4.0 Now compresses all mime types beginning with ``text/``, instead of just a whitelist. (the whitelist is still used for certain non-text mime types). """ # Whitelist of compressible mime types (in addition to any types # beginning with "text/"). CONTENT_TYPES = set(["application/javascript", "application/x-javascript", "application/xml", "application/atom+xml", "application/json", "application/xhtml+xml", "image/svg+xml"]) # Python's GzipFile defaults to level 9, while most other gzip # tools (including gzip itself) default to 6, which is probably a # better CPU/size tradeoff. GZIP_LEVEL = 6 # Responses that are too short are unlikely to benefit from gzipping # after considering the "Content-Encoding: gzip" header and the header # inside the gzip encoding. # Note that responses written in multiple chunks will be compressed # regardless of size. MIN_LENGTH = 1024 def __init__(self, request): self._gzipping = "gzip" in request.headers.get("Accept-Encoding", "") def _compressible_type(self, ctype): return ctype.startswith('text/') or ctype in self.CONTENT_TYPES def transform_first_chunk(self, status_code, headers, chunk, finishing): # type: (int, httputil.HTTPHeaders, bytes, bool) -> typing.Tuple[int, httputil.HTTPHeaders, bytes] # noqa: E501 # TODO: can/should this type be inherited from the superclass? if 'Vary' in headers: headers['Vary'] += ', Accept-Encoding' else: headers['Vary'] = 'Accept-Encoding' if self._gzipping: ctype = _unicode(headers.get("Content-Type", "")).split(";")[0] self._gzipping = self._compressible_type(ctype) and \ (not finishing or len(chunk) >= self.MIN_LENGTH) and \ ("Content-Encoding" not in headers) if self._gzipping: headers["Content-Encoding"] = "gzip" self._gzip_value = BytesIO() self._gzip_file = gzip.GzipFile(mode="w", fileobj=self._gzip_value, compresslevel=self.GZIP_LEVEL) chunk = self.transform_chunk(chunk, finishing) if "Content-Length" in headers: # The original content length is no longer correct. # If this is the last (and only) chunk, we can set the new # content-length; otherwise we remove it and fall back to # chunked encoding. if finishing: headers["Content-Length"] = str(len(chunk)) else: del headers["Content-Length"] return status_code, headers, chunk def transform_chunk(self, chunk, finishing): if self._gzipping: self._gzip_file.write(chunk) if finishing: self._gzip_file.close() else: self._gzip_file.flush() chunk = self._gzip_value.getvalue() self._gzip_value.truncate(0) self._gzip_value.seek(0) return chunk def authenticated(method): """Decorate methods with this to require that the user be logged in. If the user is not logged in, they will be redirected to the configured `login url <RequestHandler.get_login_url>`. If you configure a login url with a query parameter, Tornado will assume you know what you're doing and use it as-is. If not, it will add a `next` parameter so the login page knows where to send you once you're logged in. """ @functools.wraps(method) def wrapper(self, *args, **kwargs): if not self.current_user: if self.request.method in ("GET", "HEAD"): url = self.get_login_url() if "?" not in url: if urlparse.urlsplit(url).scheme: # if login url is absolute, make next absolute too next_url = self.request.full_url() else: next_url = self.request.uri url += "?" + urlencode(dict(next=next_url)) self.redirect(url) return raise HTTPError(403) return method(self, *args, **kwargs) return wrapper class UIModule(object): """A re-usable, modular UI unit on a page. UI modules often execute additional queries, and they can include additional CSS and JavaScript that will be included in the output page, which is automatically inserted on page render. Subclasses of UIModule must override the `render` method. """ def __init__(self, handler): self.handler = handler self.request = handler.request self.ui = handler.ui self.locale = handler.locale @property def current_user(self): return self.handler.current_user def render(self, *args, **kwargs): """Override in subclasses to return this module's output.""" raise NotImplementedError() def embedded_javascript(self): """Override to return a JavaScript string to be embedded in the page.""" return None def javascript_files(self): """Override to return a list of JavaScript files needed by this module. If the return values are relative paths, they will be passed to `RequestHandler.static_url`; otherwise they will be used as-is. """ return None def embedded_css(self): """Override to return a CSS string that will be embedded in the page.""" return None def css_files(self): """Override to returns a list of CSS files required by this module. If the return values are relative paths, they will be passed to `RequestHandler.static_url`; otherwise they will be used as-is. """ return None def html_head(self): """Override to return an HTML string that will be put in the <head/> element. """ return None def html_body(self): """Override to return an HTML string that will be put at the end of the <body/> element. """ return None def render_string(self, path, **kwargs): """Renders a template and returns it as a string.""" return self.handler.render_string(path, **kwargs) class _linkify(UIModule): def render(self, text, **kwargs): return escape.linkify(text, **kwargs) class _xsrf_form_html(UIModule): def render(self): return self.handler.xsrf_form_html() class TemplateModule(UIModule): """UIModule that simply renders the given template. {% module Template("foo.html") %} is similar to {% include "foo.html" %}, but the module version gets its own namespace (with kwargs passed to Template()) instead of inheriting the outer template's namespace. Templates rendered through this module also get access to UIModule's automatic javascript/css features. Simply call set_resources inside the template and give it keyword arguments corresponding to the methods on UIModule: {{ set_resources(js_files=static_url("my.js")) }} Note that these resources are output once per template file, not once per instantiation of the template, so they must not depend on any arguments to the template. """ def __init__(self, handler): super(TemplateModule, self).__init__(handler) # keep resources in both a list and a dict to preserve order self._resource_list = [] self._resource_dict = {} def render(self, path, **kwargs): def set_resources(**kwargs): if path not in self._resource_dict: self._resource_list.append(kwargs) self._resource_dict[path] = kwargs else: if self._resource_dict[path] != kwargs: raise ValueError("set_resources called with different " "resources for the same template") return "" return self.render_string(path, set_resources=set_resources, **kwargs) def _get_resources(self, key): return (r[key] for r in self._resource_list if key in r) def embedded_javascript(self): return "\n".join(self._get_resources("embedded_javascript")) def javascript_files(self): result = [] for f in self._get_resources("javascript_files"): if isinstance(f, (unicode_type, bytes)): result.append(f) else: result.extend(f) return result def embedded_css(self): return "\n".join(self._get_resources("embedded_css")) def css_files(self): result = [] for f in self._get_resources("css_files"): if isinstance(f, (unicode_type, bytes)): result.append(f) else: result.extend(f) return result def html_head(self): return "".join(self._get_resources("html_head")) def html_body(self): return "".join(self._get_resources("html_body")) class _UIModuleNamespace(object): """Lazy namespace which creates UIModule proxies bound to a handler.""" def __init__(self, handler, ui_modules): self.handler = handler self.ui_modules = ui_modules def __getitem__(self, key): return self.handler._ui_module(key, self.ui_modules[key]) def __getattr__(self, key): try: return self[key] except KeyError as e: raise AttributeError(str(e)) if hasattr(hmac, 'compare_digest'): # python 3.3 _time_independent_equals = hmac.compare_digest else: def _time_independent_equals(a, b): if len(a) != len(b): return False result = 0 if isinstance(a[0], int): # python3 byte strings for x, y in zip(a, b): result |= x ^ y else: # python2 for x, y in zip(a, b): result |= ord(x) ^ ord(y) return result == 0 def create_signed_value(secret, name, value, version=None, clock=None, key_version=None): if version is None: version = DEFAULT_SIGNED_VALUE_VERSION if clock is None: clock = time.time timestamp = utf8(str(int(clock()))) value = base64.b64encode(utf8(value)) if version == 1: signature = _create_signature_v1(secret, name, value, timestamp) value = b"|".join([value, timestamp, signature]) return value elif version == 2: # The v2 format consists of a version number and a series of # length-prefixed fields "%d:%s", the last of which is a # signature, all separated by pipes. All numbers are in # decimal format with no leading zeros. The signature is an # HMAC-SHA256 of the whole string up to that point, including # the final pipe. # # The fields are: # - format version (i.e. 2; no length prefix) # - key version (integer, default is 0) # - timestamp (integer seconds since epoch) # - name (not encoded; assumed to be ~alphanumeric) # - value (base64-encoded) # - signature (hex-encoded; no length prefix) def format_field(s): return utf8("%d:" % len(s)) + utf8(s) to_sign = b"|".join([ b"2", format_field(str(key_version or 0)), format_field(timestamp), format_field(name), format_field(value), b'']) if isinstance(secret, dict): assert key_version is not None, 'Key version must be set when sign key dict is used' assert version >= 2, 'Version must be at least 2 for key version support' secret = secret[key_version] signature = _create_signature_v2(secret, to_sign) return to_sign + signature else: raise ValueError("Unsupported version %d" % version) # A leading version number in decimal # with no leading zeros, followed by a pipe. _signed_value_version_re = re.compile(br"^([1-9][0-9]*)\|(.*)$") def _get_version(value): # Figures out what version value is. Version 1 did not include an # explicit version field and started with arbitrary base64 data, # which makes this tricky. m = _signed_value_version_re.match(value) if m is None: version = 1 else: try: version = int(m.group(1)) if version > 999: # Certain payloads from the version-less v1 format may # be parsed as valid integers. Due to base64 padding # restrictions, this can only happen for numbers whose # length is a multiple of 4, so we can treat all # numbers up to 999 as versions, and for the rest we # fall back to v1 format. version = 1 except ValueError: version = 1 return version def decode_signed_value(secret, name, value, max_age_days=31, clock=None, min_version=None): if clock is None: clock = time.time if min_version is None: min_version = DEFAULT_SIGNED_VALUE_MIN_VERSION if min_version > 2: raise ValueError("Unsupported min_version %d" % min_version) if not value: return None value = utf8(value) version = _get_version(value) if version < min_version: return None if version == 1: return _decode_signed_value_v1(secret, name, value, max_age_days, clock) elif version == 2: return _decode_signed_value_v2(secret, name, value, max_age_days, clock) else: return None def _decode_signed_value_v1(secret, name, value, max_age_days, clock): parts = utf8(value).split(b"|") if len(parts) != 3: return None signature = _create_signature_v1(secret, name, parts[0], parts[1]) if not _time_independent_equals(parts[2], signature): gen_log.warning("Invalid cookie signature %r", value) return None timestamp = int(parts[1]) if timestamp < clock() - max_age_days * 86400: gen_log.warning("Expired cookie %r", value) return None if timestamp > clock() + 31 * 86400: # _cookie_signature does not hash a delimiter between the # parts of the cookie, so an attacker could transfer trailing # digits from the payload to the timestamp without altering the # signature. For backwards compatibility, sanity-check timestamp # here instead of modifying _cookie_signature. gen_log.warning("Cookie timestamp in future; possible tampering %r", value) return None if parts[1].startswith(b"0"): gen_log.warning("Tampered cookie %r", value) return None try: return base64.b64decode(parts[0]) except Exception: return None def _decode_fields_v2(value): def _consume_field(s): length, _, rest = s.partition(b':') n = int(length) field_value = rest[:n] # In python 3, indexing bytes returns small integers; we must # use a slice to get a byte string as in python 2. if rest[n:n + 1] != b'|': raise ValueError("malformed v2 signed value field") rest = rest[n + 1:] return field_value, rest rest = value[2:] # remove version number key_version, rest = _consume_field(rest) timestamp, rest = _consume_field(rest) name_field, rest = _consume_field(rest) value_field, passed_sig = _consume_field(rest) return int(key_version), timestamp, name_field, value_field, passed_sig def _decode_signed_value_v2(secret, name, value, max_age_days, clock): try: key_version, timestamp, name_field, value_field, passed_sig = _decode_fields_v2(value) except ValueError: return None signed_string = value[:-len(passed_sig)] if isinstance(secret, dict): try: secret = secret[key_version] except KeyError: return None expected_sig = _create_signature_v2(secret, signed_string) if not _time_independent_equals(passed_sig, expected_sig): return None if name_field != utf8(name): return None timestamp = int(timestamp) if timestamp < clock() - max_age_days * 86400: # The signature has expired. return None try: return base64.b64decode(value_field) except Exception: return None def get_signature_key_version(value): value = utf8(value) version = _get_version(value) if version < 2: return None try: key_version, _, _, _, _ = _decode_fields_v2(value) except ValueError: return None return key_version def _create_signature_v1(secret, *parts): hash = hmac.new(utf8(secret), digestmod=hashlib.sha1) for part in parts: hash.update(utf8(part)) return utf8(hash.hexdigest()) def _create_signature_v2(secret, s): hash = hmac.new(utf8(secret), digestmod=hashlib.sha256) hash.update(utf8(s)) return utf8(hash.hexdigest()) def is_absolute(path): return any(path.startswith(x) for x in ["/", "http:", "https:"])
{ "content_hash": "1d23f31036bef4ff16d115bc2830e02b", "timestamp": "", "source": "github", "line_count": 3394, "max_line_length": 119, "avg_line_length": 39.35209192692987, "alnum_prop": 0.6040835273770038, "repo_name": "hhru/tornado", "id": "6760b0b9a166c79ccbbbf9abe1c0c7994bdd4384", "size": "133561", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "tornado/web.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "1664" }, { "name": "HTML", "bytes": "25" }, { "name": "Python", "bytes": "1625160" }, { "name": "Ruby", "bytes": "1428" }, { "name": "Shell", "bytes": "4070" } ], "symlink_target": "" }
from PyQt5 import QtCore, QtGui, QtWidgets import sys class window(QtWidgets.QMainWindow): def __init__(self): super().__init__() self.dirModel = QtWidgets.QFileSystemModel(self) self.dirModel.setRootPath('/home/urban/Muzyka') self.folderTreeView = QtWidgets.QTreeView() self.folderTreeView.setModel(self.dirModel) self.setCentralWidget(self.folderTreeView) self.show()
{ "content_hash": "b39743a5947d7fb47883f94e8bf71600", "timestamp": "", "source": "github", "line_count": 14, "max_line_length": 56, "avg_line_length": 30.857142857142858, "alnum_prop": 0.6805555555555556, "repo_name": "urbansan/mediaFolderGrabber", "id": "ac9b5b2264891cf1928562212b2e56ad77430cbc", "size": "432", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "mainWindow.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "3817" } ], "symlink_target": "" }
print("Hello world") print("Hello Again") print("I like type this.") print("This is fun") print('Yay! Printing') print("I'd much rather you 'not'.") print('I "said" do not touch this') print('妳好,我是Python!') print('這是我的練習題') print('下面要加上#的註釋') # This is pound # This is hash # This is mesh
{ "content_hash": "28bafe9bc201abfa681f8b94e9bdaaf9", "timestamp": "", "source": "github", "line_count": 13, "max_line_length": 35, "avg_line_length": 22.23076923076923, "alnum_prop": 0.6782006920415224, "repo_name": "wangzongyuan/LPTHW", "id": "60946a90992556421c4aa0883078cef955f483ce", "size": "347", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "ex1.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "21622" } ], "symlink_target": "" }
'''Simple decorator for dealing with typed query parameters.''' from __future__ import (absolute_import, division, print_function, with_statement) from schematics.exceptions import ConversionError, ValidationError import supercell.api as s class QueryParams(s.Middleware): """Simple middleware for ensuring types in query parameters. A simple example:: @QueryParams(( ('limit', IntType()), ('q', StringType()) ) ) @s.async def get(self, *args, **kwargs): limit = kwargs.get('limit', 0) q = kwargs.get('q', None) ... If a param is required, simply set the `required` property for the schematics type definition:: @QueryParams(( ('limit', IntType(required=True)), ('q', StringType()) ) ) ... If the parameter is missing, a HTTP 400 error is raised. By default the dictionary containing the typed query parameters is added to the `kwargs` of the method with the key *query*. In order to change that, simply change the key in the definition:: @QueryParams(( ... ), kwargs_name='myquery' ) ... """ def __init__(self, params, kwargs_name='query'): super(QueryParams, self).__init__() self.params = params self.kwargs_name = kwargs_name @s.coroutine def before(self, handler, args, kwargs): kwargs[self.kwargs_name] = q = {} for (name, typedef) in self.params: if handler.get_argument(name, None): try: parsed = typedef(handler.get_argument(name)) q[name] = parsed except (ConversionError, ValidationError) as e: validation_errors = {name: e.messages} raise s.Error(additional=validation_errors) elif typedef.required and not handler.get_argument(name, None): raise s.Error(additional={'msg': 'Missing required argument "%s"' % name}) @s.coroutine def after(self, handler, args, kwargs, result): pass
{ "content_hash": "199a6cea883cdee97619681fb92a3bed", "timestamp": "", "source": "github", "line_count": 73, "max_line_length": 76, "avg_line_length": 31.45205479452055, "alnum_prop": 0.5405052264808362, "repo_name": "tobigue/supercell", "id": "c45029e1a82b9dfa752b5347ff81dd71e34f145e", "size": "2944", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "supercell/queryparam.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "JavaScript", "bytes": "709" }, { "name": "Python", "bytes": "146037" }, { "name": "Shell", "bytes": "5503" } ], "symlink_target": "" }
import arcpy import os import sys import traceback import TestUtilities def RunTest(): try: arcpy.AddMessage("Starting Test: CreateCIBMosaicDataset") toolbox = TestUtilities.toolbox arcpy.ImportToolbox(toolbox, "DefenseImagery") arcpy.env.overwriteOutput = True # Set environment settings print("Running from: " + str(TestUtilities.currentPath)) print("Geodatabase path: " + str(TestUtilities.geodatabasePath)) arcpy.env.overwriteOutput = True webMercator = arcpy.SpatialReference(r"WGS 1984 Web Mercator (Auxiliary Sphere)") inputName = "Imagery_Test" inputMosaicDatasetFullPath = os.path.join(TestUtilities.inputGDB, inputName) if arcpy.Exists(inputMosaicDatasetFullPath): print("deleting: " + inputMosaicDatasetFullPath) arcpy.Delete_management(inputMosaicDatasetFullPath) ######################################################## # Execute: arcpy.CreateCIBMosaicDataset_DefenseImagery(TestUtilities.inputGDB, inputName, webMercator) ######################################################## #Verify Results inputFeatureCount = int(arcpy.GetCount_management(inputMosaicDatasetFullPath).getOutput(0)) print("Input FeatureClass: " + str(inputMosaicDatasetFullPath)) print("Input Feature Count: " + str(inputFeatureCount)) if inputFeatureCount > 0 : print("Mosaic Dataset has already been created and populated") print("Test Successful") except arcpy.ExecuteError: # Get the tool error messages msgs = arcpy.GetMessages() arcpy.AddError(msgs) # return a system error code sys.exit(-1) except Exception as e: # Get the traceback object tb = sys.exc_info()[2] tbinfo = traceback.format_tb(tb)[0] # Concatenate information together concerning the error into a message string pymsg = "PYTHON ERRORS:\nTraceback info:\n" + tbinfo + "\nError Info:\n" + str(sys.exc_info()[1]) msgs = "ArcPy ERRORS:\n" + arcpy.GetMessages() + "\n" # Return python error messages for use in script tool or Python Window arcpy.AddError(pymsg) arcpy.AddError(msgs) # return a system error code sys.exit(-1) RunTest()
{ "content_hash": "afc8488627f124e3bbb9752b1a2ac15e", "timestamp": "", "source": "github", "line_count": 76, "max_line_length": 105, "avg_line_length": 33.5921052631579, "alnum_prop": 0.5808852330591461, "repo_name": "JudTown17/solutions-geoprocessing-toolbox", "id": "83e04574ae16ff7bdafc5a99696d555f1dad4920", "size": "3436", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "data_management/test/test_imagery_tools/TestCreateCIBMosaicDataset.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "964652" } ], "symlink_target": "" }
from capabilities.capability import capability import subprocess # used for bash commands (when required), unix-only from pipes import quote # used to sanitize bash input when complex commands are required, unix-only class tasks(capability): # goes to the folder, gets the first commit, and filters out the date # sometimes the project doesn't have a first commit, so it returns the zeroth commit def analyze(projectPath): try: catch = subprocess.check_output( 'cd %s && hg log -r 1 --template "{date|shortdate}"' \ % quote(projectPath), shell=True, stderr=subprocess.DEVNULL) except(subprocess.CalledProcessError): return subprocess.check_output( 'cd %s && hg log -r 0 --template "{date|shortdate}"' \ % quote(projectPath), shell=True).decode('utf-8') else: return catch.decode('utf-8') def updateDb(dbConn, py_name, value): # print(py_name +" "+ value) cur = dbConn.cursor() # cursor to make changes cur.execute( "UPDATE project.metadata SET createdDate = %s WHERE name = %s;", (value, py_name) ) dbConn.commit() # save changes to db def getColumns(): return ['createdDate','date'] # end of tasks
{ "content_hash": "16b7afd128e8163bb5fda8f8fa25e4e0", "timestamp": "", "source": "github", "line_count": 28, "max_line_length": 104, "avg_line_length": 44.714285714285715, "alnum_prop": 0.6509584664536742, "repo_name": "Nateowami/flex-languagedepot-metadata", "id": "acccf71dbbfc8fa9dcc34ab765525edde9c18f2d", "size": "1252", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/capabilities/createdDate.py", "mode": "33188", "license": "mit", "language": [ { "name": "Lasso", "bytes": "156547" }, { "name": "Python", "bytes": "36326" }, { "name": "Shell", "bytes": "3003" } ], "symlink_target": "" }
import sure from django.test import TestCase from tasks.const import STATUS_SUCCESS, STATUS_FAILED from ..jslint import jslint_violation from .base import get_content class JSlintViolationCase(TestCase): """JSlint violation case""" def test_success(self): """Test success result""" data = {'raw': ''} result = jslint_violation(data) result['status'].should.be.equal(STATUS_SUCCESS) result['plot']['count'].should.be.equal(0) def test_fail_on_real(self): """Test fail on real data""" data = { 'raw': get_content('jslint.out'), } result = jslint_violation(data) result['status'].should.be.equal(STATUS_FAILED) result['plot']['count'].should.be.equal(131)
{ "content_hash": "edd72293c5f27c8cc23c48596a67b3a3", "timestamp": "", "source": "github", "line_count": 25, "max_line_length": 56, "avg_line_length": 30.76, "alnum_prop": 0.6254876462938882, "repo_name": "nvbn/coviolations_web", "id": "7a91e464db6093152eb678925a42e98242173602", "size": "769", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "violations/tests/test_jslint.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "6025" }, { "name": "CoffeeScript", "bytes": "30912" }, { "name": "Puppet", "bytes": "729" }, { "name": "Python", "bytes": "330675" }, { "name": "Shell", "bytes": "383" } ], "symlink_target": "" }
import logging import json from unittest import mock from django.test import TestCase, tag from django.contrib.auth.models import User from django.urls import reverse from django.conf import settings from rest_framework import status from uploadedfiles.models import UploadedFile from users.serializers import SwiftManager class UserViewTests(TestCase): """ Generic user view tests' setup and tearDown """ def setUp(self): # avoid cluttered console output (for instance logging all the http requests) logging.disable(logging.WARNING) self.content_type = 'application/vnd.collection+json' self.username = 'cube' self.password = 'cubepass' self.email = 'dev@babymri.org' def tearDown(self): # re-enable logging logging.disable(logging.NOTSET) class UserCreateViewTests(UserViewTests): """ Test the user-create view """ def setUp(self): super(UserCreateViewTests, self).setUp() self.create_url = reverse("user-create") self.post = json.dumps( {"template": {"data": [{"name": "username", "value": self.username}, {"name": "password", "value": self.password}, {"name": "email", "value": self.email}]}}) def test_user_create_success(self): with mock.patch.object(SwiftManager, 'upload_obj', return_value=None) as upload_obj_mock: response = self.client.post(self.create_url, data=self.post, content_type=self.content_type) self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(response.data["username"], self.username) self.assertEqual(response.data["email"], self.email) welcome_file_path = '%s/uploads/welcome.txt' % self.username upload_obj_mock.assert_called_with(welcome_file_path, mock.ANY, content_type='text/plain') @tag('integration') def test_integration_user_create_success(self): response = self.client.post(self.create_url, data=self.post, content_type=self.content_type) self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(response.data["username"], self.username) self.assertEqual(response.data["email"], self.email) user = User.objects.get(username=self.username) welcome_file_path = '%s/uploads/welcome.txt' % self.username welcome_file = UploadedFile.objects.get(owner=user) self.assertEqual(welcome_file.fname.name, welcome_file_path) # delete welcome file swift_manager = SwiftManager(settings.SWIFT_CONTAINER_NAME, settings.SWIFT_CONNECTION_PARAMS) swift_manager.delete_obj(welcome_file_path) def test_user_create_failure_already_exists(self): User.objects.create_user(username=self.username, email=self.email, password=self.password) response = self.client.post(self.create_url, data=self.post, content_type=self.content_type) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) def test_user_create_failure_bad_password(self): post = json.dumps( {"template": {"data": [{"name": "username", "value": "new_user"}, {"name": "email", "value": self.email}, {"name": "password", "value": "small"}]}}) response = self.client.post(self.create_url, data=post, content_type=self.content_type) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) class UserDetailViewTests(UserViewTests): """ Test the user-detail view """ def setUp(self): super(UserDetailViewTests, self).setUp() user = User.objects.create_user(username=self.username, email=self.email, password=self.password) self.read_update_url = reverse("user-detail", kwargs={"pk": user.id}) self.put = json.dumps({ "template": {"data": [{"name": "password", "value": "updated_pass"}, {"name": "email", "value": "dev1@babymri.org"}]}}) def test_user_detail_success(self): self.client.login(username=self.username, password=self.password) response = self.client.get(self.read_update_url) self.assertContains(response, self.username) self.assertContains(response, self.email) def test_user_detail_failure_unauthenticated(self): response = self.client.get(self.read_update_url) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) def test_user_update_success(self): self.client.login(username=self.username, password=self.password) response = self.client.put(self.read_update_url, data=self.put, content_type=self.content_type) self.assertContains(response, self.username) self.assertContains(response, "dev1@babymri.org") def test_user_update_failure_unauthenticated(self): response = self.client.put(self.read_update_url, data=self.put, content_type=self.content_type) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) def test_user_update_failure_access_denied(self): User.objects.create_user(username="other_username", email="dev2@babymri.org", password="other_password") self.client.login(username="other_username", password="other_password") response = self.client.put(self.read_update_url, data=self.put, content_type=self.content_type) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
{ "content_hash": "6b94b42fad8a6b875e32cf9aabbf9e3e", "timestamp": "", "source": "github", "line_count": 140, "max_line_length": 85, "avg_line_length": 43.792857142857144, "alnum_prop": 0.6059370412656989, "repo_name": "FNNDSC/ChRIS_ultron_backEnd", "id": "57338cca7d6e015945d1c6eec3fc2f52aaea7263", "size": "6132", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "chris_backend/users/tests/test_views.py", "mode": "33261", "license": "mit", "language": [ { "name": "Dockerfile", "bytes": "3051" }, { "name": "HTML", "bytes": "2839" }, { "name": "JavaScript", "bytes": "262" }, { "name": "Python", "bytes": "978019" }, { "name": "Shell", "bytes": "74679" } ], "symlink_target": "" }
import random from rally.common import logging from rally import consts from rally import exceptions from rally.plugins.openstack import scenario from rally.plugins.openstack.scenarios.cinder import utils as cinder_utils from rally.plugins.openstack.scenarios.glance import utils as glance_utils from rally.plugins.openstack.scenarios.nova import utils as nova_utils from rally.task import atomic from rally.task import types from rally.task import validation LOG = logging.getLogger(__name__) class CinderVolumes(cinder_utils.CinderScenario, nova_utils.NovaScenario, glance_utils.GlanceScenario): """Benchmark scenarios for Cinder Volumes.""" @types.convert(image={"type": "glance_image"}) @validation.image_exists("image", nullable=True) @validation.required_services(consts.Service.CINDER) @validation.required_openstack(users=True) @scenario.configure(context={"cleanup": ["cinder"]}) def create_and_list_volume(self, size, detailed=True, image=None, **kwargs): """Create a volume and list all volumes. Measure the "cinder volume-list" command performance. If you have only 1 user in your context, you will add 1 volume on every iteration. So you will have more and more volumes and will be able to measure the performance of the "cinder volume-list" command depending on the number of images owned by users. :param size: volume size (integer, in GB) or dictionary, must contain two values: min - minimum size volumes will be created as; max - maximum size volumes will be created as. :param detailed: determines whether the volume listing should contain detailed information about all of them :param image: image to be used to create volume :param kwargs: optional args to create a volume """ if image: kwargs["imageRef"] = image self._create_volume(size, **kwargs) self._list_volumes(detailed) @validation.required_services(consts.Service.CINDER) @validation.required_openstack(users=True) @scenario.configure(context={"cleanup": ["cinder"]}) def list_volumes(self, detailed=True): """List all volumes. This simple scenario tests the cinder list command by listing all the volumes. :param detailed: True if detailed information about volumes should be listed """ self._list_volumes(detailed) @types.convert(image={"type": "glance_image"}) @validation.image_exists("image", nullable=True) @validation.required_services(consts.Service.CINDER) @validation.required_openstack(users=True) @scenario.configure(context={"cleanup": ["cinder"]}) def create_and_update_volume(self, size, image=None, create_volume_kwargs=None, update_volume_kwargs=None): """Create a volume and update its name and description. :param size: volume size (integer, in GB) :param image: image to be used to create volume :param create_volume_kwargs: dict, to be used to create volume :param update_volume_kwargs: dict, to be used to update volume """ create_volume_kwargs = create_volume_kwargs or {} update_volume_kwargs = update_volume_kwargs or {} if image: create_volume_kwargs["imageRef"] = image volume = self._create_volume(size, **create_volume_kwargs) self._update_volume(volume, **update_volume_kwargs) @types.convert(image={"type": "glance_image"}) @validation.image_exists("image", nullable=True) @validation.required_services(consts.Service.CINDER) @validation.required_openstack(users=True) @scenario.configure(context={"cleanup": ["cinder"]}) def create_and_delete_volume(self, size, image=None, min_sleep=0, max_sleep=0, **kwargs): """Create and then delete a volume. Good for testing a maximal bandwidth of cloud. Optional 'min_sleep' and 'max_sleep' parameters allow the scenario to simulate a pause between volume creation and deletion (of random duration from [min_sleep, max_sleep]). :param size: volume size (integer, in GB) or dictionary, must contain two values: min - minimum size volumes will be created as; max - maximum size volumes will be created as. :param image: image to be used to create volume :param min_sleep: minimum sleep time between volume creation and deletion (in seconds) :param max_sleep: maximum sleep time between volume creation and deletion (in seconds) :param kwargs: optional args to create a volume """ if image: kwargs["imageRef"] = image volume = self._create_volume(size, **kwargs) self.sleep_between(min_sleep, max_sleep) self._delete_volume(volume) @types.convert(image={"type": "glance_image"}) @validation.image_exists("image", nullable=True) @validation.required_services(consts.Service.CINDER) @validation.required_openstack(users=True) @scenario.configure(context={"cleanup": ["cinder"]}) def create_volume(self, size, image=None, **kwargs): """Create a volume. Good test to check how influence amount of active volumes on performance of creating new. :param size: volume size (integer, in GB) or dictionary, must contain two values: min - minimum size volumes will be created as; max - maximum size volumes will be created as. :param image: image to be used to create volume :param kwargs: optional args to create a volume """ if image: kwargs["imageRef"] = image self._create_volume(size, **kwargs) @validation.required_services(consts.Service.CINDER) @validation.required_openstack(users=True) @validation.required_contexts("volumes") @scenario.configure(context={"cleanup": ["cinder"]}) def modify_volume_metadata(self, sets=10, set_size=3, deletes=5, delete_size=3): """Modify a volume's metadata. This requires a volume to be created with the volumes context. Additionally, ``sets * set_size`` must be greater than or equal to ``deletes * delete_size``. :param sets: how many set_metadata operations to perform :param set_size: number of metadata keys to set in each set_metadata operation :param deletes: how many delete_metadata operations to perform :param delete_size: number of metadata keys to delete in each delete_metadata operation """ if sets * set_size < deletes * delete_size: raise exceptions.InvalidArgumentsException( "Not enough metadata keys will be created: " "Setting %(num_keys)s keys, but deleting %(num_deletes)s" % {"num_keys": sets * set_size, "num_deletes": deletes * delete_size}) volume = random.choice(self.context["tenant"]["volumes"]) keys = self._set_metadata(volume["id"], sets, set_size) self._delete_metadata(volume["id"], keys, deletes, delete_size) @validation.required_services(consts.Service.CINDER) @validation.required_openstack(users=True) @scenario.configure(context={"cleanup": ["cinder"]}) def create_and_extend_volume(self, size, new_size, min_sleep=0, max_sleep=0, **kwargs): """Create and extend a volume and then delete it. :param size: volume size (in GB) or dictionary, must contain two values: min - minimum size volumes will be created as; max - maximum size volumes will be created as. :param new_size: volume new size (in GB) or dictionary, must contain two values: min - minimum size volumes will be created as; max - maximum size volumes will be created as. to extend. Notice: should be bigger volume size :param min_sleep: minimum sleep time between volume extension and deletion (in seconds) :param max_sleep: maximum sleep time between volume extension and deletion (in seconds) :param kwargs: optional args to extend the volume """ volume = self._create_volume(size, **kwargs) self._extend_volume(volume, new_size) self.sleep_between(min_sleep, max_sleep) self._delete_volume(volume) @validation.required_services(consts.Service.CINDER) @validation.required_contexts("volumes") @validation.required_openstack(users=True) @scenario.configure(context={"cleanup": ["cinder"]}) def create_from_volume_and_delete_volume(self, size, min_sleep=0, max_sleep=0, **kwargs): """Create volume from volume and then delete it. Scenario for testing volume clone.Optional 'min_sleep' and 'max_sleep' parameters allow the scenario to simulate a pause between volume creation and deletion (of random duration from [min_sleep, max_sleep]). :param size: volume size (in GB), or dictionary, must contain two values: min - minimum size volumes will be created as; max - maximum size volumes will be created as. Should be equal or bigger source volume size :param min_sleep: minimum sleep time between volume creation and deletion (in seconds) :param max_sleep: maximum sleep time between volume creation and deletion (in seconds) :param kwargs: optional args to create a volume """ source_vol = random.choice(self.context["tenant"]["volumes"]) volume = self._create_volume(size, source_volid=source_vol["id"], **kwargs) self.sleep_between(min_sleep, max_sleep) self._delete_volume(volume) @validation.required_services(consts.Service.CINDER) @validation.required_contexts("volumes") @validation.required_openstack(users=True) @scenario.configure(context={"cleanup": ["cinder"]}) def create_and_delete_snapshot(self, force=False, min_sleep=0, max_sleep=0, **kwargs): """Create and then delete a volume-snapshot. Optional 'min_sleep' and 'max_sleep' parameters allow the scenario to simulate a pause between snapshot creation and deletion (of random duration from [min_sleep, max_sleep]). :param force: when set to True, allows snapshot of a volume when the volume is attached to an instance :param min_sleep: minimum sleep time between snapshot creation and deletion (in seconds) :param max_sleep: maximum sleep time between snapshot creation and deletion (in seconds) :param kwargs: optional args to create a snapshot """ volume = random.choice(self.context["tenant"]["volumes"]) snapshot = self._create_snapshot(volume["id"], force=force, **kwargs) self.sleep_between(min_sleep, max_sleep) self._delete_snapshot(snapshot) @types.convert(image={"type": "glance_image"}, flavor={"type": "nova_flavor"}) @validation.image_valid_on_flavor("flavor", "image") @validation.required_services(consts.Service.NOVA, consts.Service.CINDER) @validation.required_openstack(users=True) @scenario.configure(context={"cleanup": ["cinder", "nova"]}) @logging.log_deprecated_args( "Use 'create_vm_params' for additional instance parameters.", "0.2.0", ["kwargs"], once=True) def create_and_attach_volume(self, size, image, flavor, create_volume_params=None, create_vm_params=None, **kwargs): """Create a VM and attach a volume to it. Simple test to create a VM and attach a volume, then detach the volume and delete volume/VM. :param size: volume size (integer, in GB) or dictionary, must contain two values: min - minimum size volumes will be created as; max - maximum size volumes will be created as. :param image: Glance image name to use for the VM :param flavor: VM flavor name :param create_volume_params: optional arguments for volume creation :param create_vm_params: optional arguments for VM creation :param kwargs: (deprecated) optional arguments for VM creation """ create_volume_params = create_volume_params or {} if kwargs and create_vm_params: raise ValueError("You can not set both 'kwargs'" "and 'create_vm_params' attributes." "Please use 'create_vm_params'.") create_vm_params = create_vm_params or kwargs or {} server = self._boot_server(image, flavor, **create_vm_params) volume = self._create_volume(size, **create_volume_params) self._attach_volume(server, volume) self._detach_volume(server, volume) self._delete_volume(volume) self._delete_server(server) @validation.volume_type_exists("volume_type") @validation.required_services(consts.Service.NOVA, consts.Service.CINDER) @validation.required_openstack(users=True) @scenario.configure(context={"cleanup": ["cinder", "nova"]}) def create_snapshot_and_attach_volume(self, volume_type=False, size=None, **kwargs): """Create volume, snapshot and attach/detach volume. This scenario is based on the standalone qaStressTest.py (https://github.com/WaltHP/cinder-stress). :param volume_type: Whether or not to specify volume type when creating volumes. :param size: Volume size - dictionary, contains two values: min - minimum size volumes will be created as; max - maximum size volumes will be created as. default values: {"min": 1, "max": 5} :param kwargs: Optional parameters used during volume snapshot creation. """ if size is None: size = {"min": 1, "max": 5} selected_type = None volume_types = [None] if volume_type: volume_types_list = self.clients("cinder").volume_types.list() for s in volume_types_list: volume_types.append(s.name) selected_type = random.choice(volume_types) volume = self._create_volume(size, volume_type=selected_type) snapshot = self._create_snapshot(volume.id, False, **kwargs) server = self.get_random_server() self._attach_volume(server, volume) self._detach_volume(server, volume) self._delete_snapshot(snapshot) self._delete_volume(volume) @validation.required_services(consts.Service.NOVA, consts.Service.CINDER) @validation.required_openstack(users=True) @scenario.configure(context={"cleanup": ["cinder", "nova"]}) def create_nested_snapshots_and_attach_volume(self, size=None, nested_level=1, **kwargs): """Create a volume from snapshot and attach/detach the volume This scenario create volume, create it's snapshot, attach volume, then create new volume from existing snapshot and so on, with defined nested level, after all detach and delete them. volume->snapshot->volume->snapshot->volume ... :param size: Volume size - dictionary, contains two values: min - minimum size volumes will be created as; max - maximum size volumes will be created as. default values: {"min": 1, "max": 5} :param nested_level: amount of nested levels :param kwargs: Optional parameters used during volume snapshot creation. """ if size is None: size = {"min": 1, "max": 5} # NOTE: Volume size cannot be smaller than the snapshot size, so # volume with specified size should be created to avoid # size mismatching between volume and snapshot due random # size in _create_volume method. size = random.randint(size["min"], size["max"]) source_vol = self._create_volume(size) nes_objs = [(self.get_random_server(), source_vol, self._create_snapshot(source_vol.id, False, **kwargs))] self._attach_volume(nes_objs[0][0], nes_objs[0][1]) snapshot = nes_objs[0][2] for i in range(nested_level - 1): volume = self._create_volume(size, snapshot_id=snapshot.id) snapshot = self._create_snapshot(volume.id, False, **kwargs) server = self.get_random_server() self._attach_volume(server, volume) nes_objs.append((server, volume, snapshot)) nes_objs.reverse() for server, volume, snapshot in nes_objs: self._detach_volume(server, volume) self._delete_snapshot(snapshot) self._delete_volume(volume) @validation.required_services(consts.Service.CINDER) @validation.required_contexts("volumes") @validation.required_openstack(users=True) @scenario.configure(context={"cleanup": ["cinder"]}) def create_and_list_snapshots(self, force=False, detailed=True, **kwargs): """Create and then list a volume-snapshot. :param force: when set to True, allows snapshot of a volume when the volume is attached to an instance :param detailed: True if detailed information about snapshots should be listed :param kwargs: optional args to create a snapshot """ volume = random.choice(self.context["tenant"]["volumes"]) self._create_snapshot(volume["id"], force=force, **kwargs) self._list_snapshots(detailed) @types.convert(image={"type": "glance_image"}) @validation.required_services(consts.Service.CINDER, consts.Service.GLANCE) @validation.required_openstack(users=True) @validation.required_parameters("size") @scenario.configure(context={"cleanup": ["cinder", "glance"]}) def create_and_upload_volume_to_image(self, size, image=None, force=False, container_format="bare", disk_format="raw", do_delete=True, **kwargs): """Create and upload a volume to image. :param size: volume size (integers, in GB), or dictionary, must contain two values: min - minimum size volumes will be created as; max - maximum size volumes will be created as. :param image: image to be used to create volume. :param force: when set to True volume that is attached to an instance could be uploaded to image :param container_format: image container format :param disk_format: disk format for image :param do_delete: deletes image and volume after uploading if True :param kwargs: optional args to create a volume """ if image: kwargs["imageRef"] = image volume = self._create_volume(size, **kwargs) image = self._upload_volume_to_image(volume, force, container_format, disk_format) if do_delete: self._delete_volume(volume) self._delete_image(image) @validation.required_cinder_services("cinder-backup") @validation.required_services(consts.Service.CINDER) @validation.required_openstack(users=True) @scenario.configure(context={"cleanup": ["cinder"]}) def create_volume_backup(self, size, do_delete=True, create_volume_kwargs=None, create_backup_kwargs=None): """Create a volume backup. :param size: volume size in GB :param do_delete: if True, a volume and a volume backup will be deleted after creation. :param create_volume_kwargs: optional args to create a volume :param create_backup_kwargs: optional args to create a volume backup """ create_volume_kwargs = create_volume_kwargs or {} create_backup_kwargs = create_backup_kwargs or {} volume = self._create_volume(size, **create_volume_kwargs) backup = self._create_backup(volume.id, **create_backup_kwargs) if do_delete: self._delete_volume(volume) self._delete_backup(backup) @validation.required_cinder_services("cinder-backup") @validation.required_services(consts.Service.CINDER) @validation.required_openstack(users=True) @scenario.configure(context={"cleanup": ["cinder"]}) def create_and_restore_volume_backup(self, size, do_delete=True, create_volume_kwargs=None, create_backup_kwargs=None): """Restore volume backup. :param size: volume size in GB :param do_delete: if True, the volume and the volume backup will be deleted after creation. :param create_volume_kwargs: optional args to create a volume :param create_backup_kwargs: optional args to create a volume backup """ create_volume_kwargs = create_volume_kwargs or {} create_backup_kwargs = create_backup_kwargs or {} volume = self._create_volume(size, **create_volume_kwargs) backup = self._create_backup(volume.id, **create_backup_kwargs) self._restore_backup(backup.id) if do_delete: self._delete_volume(volume) self._delete_backup(backup) @validation.required_cinder_services("cinder-backup") @validation.required_services(consts.Service.CINDER) @validation.required_openstack(users=True) @scenario.configure(context={"cleanup": ["cinder"]}) def create_and_list_volume_backups(self, size, detailed=True, do_delete=True, create_volume_kwargs=None, create_backup_kwargs=None): """Create and then list a volume backup. :param size: volume size in GB :param detailed: True if detailed information about backup should be listed :param do_delete: if True, a volume backup will be deleted :param create_volume_kwargs: optional args to create a volume :param create_backup_kwargs: optional args to create a volume backup """ create_volume_kwargs = create_volume_kwargs or {} create_backup_kwargs = create_backup_kwargs or {} volume = self._create_volume(size, **create_volume_kwargs) backup = self._create_backup(volume.id, **create_backup_kwargs) self._list_backups(detailed) if do_delete: self._delete_volume(volume) self._delete_backup(backup) @types.convert(image={"type": "glance_image"}) @validation.image_exists("image", nullable=True) @validation.required_services(consts.Service.CINDER) @validation.required_openstack(users=True) @scenario.configure(context={"cleanup": ["cinder"]}) def create_volume_and_clone(self, size, image=None, nested_level=1, **kwargs): """Create a volume, then clone it to another volume. This creates a volume, then clone it to anothor volume, and then clone the new volume to next volume... 1. create source volume (from image) 2. clone source volume to volume1 3. clone volume1 to volume2 4. clone volume2 to volume3 5. ... :param size: volume size (integer, in GB) or dictionary, must contain two values: min - minimum size volumes will be created as; max - maximum size volumes will be created as. :param image: image to be used to create initial volume :param nested_level: amount of nested levels :param kwargs: optional args to create volumes """ if image: kwargs["imageRef"] = image source_vol = self._create_volume(size, **kwargs) kwargs.pop("imageRef", None) for i in range(nested_level): with atomic.ActionTimer(self, "cinder.clone_volume"): source_vol = self._create_volume(source_vol.size, source_volid=source_vol.id, atomic_action=False, **kwargs)
{ "content_hash": "2eb4bedc8d3a77062ff9423d4f3ea94d", "timestamp": "", "source": "github", "line_count": 569, "max_line_length": 79, "avg_line_length": 45.63444639718805, "alnum_prop": 0.6007471308634368, "repo_name": "eayunstack/rally", "id": "8c5cec5df1132aa2a84f261b1b840686d0cee737", "size": "26610", "binary": false, "copies": "1", "ref": "refs/heads/product", "path": "rally/plugins/openstack/scenarios/cinder/volumes.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "HTML", "bytes": "36716" }, { "name": "Mako", "bytes": "17389" }, { "name": "Python", "bytes": "2988245" }, { "name": "Shell", "bytes": "41128" } ], "symlink_target": "" }
from __future__ import absolute_import, division, print_function __metaclass__ = type DOCUMENTATION = r''' --- module: bigip_profile_udp short_description: Manage UDP profiles on a BIG-IP description: - Manage UDP profiles on a BIG-IP system. There are many UDP profiles, each with their own adjustments to the standard C(udp) profile. Users of this module should be aware that many of the available options have no module default. Instead, the default is assigned by the BIG-IP system itself which, in most cases, is acceptable. version_added: "1.0.0" options: name: description: - Specifies the name of the profile. type: str required: True parent: description: - Specifies the profile from which this profile inherits settings. - When creating a new profile, if this parameter is not specified, the default is the system-supplied C(udp) profile. type: str idle_timeout: description: - Specifies the length of time a connection is idle (has no traffic) before the connection is eligible for deletion. - When creating a new profile, if this parameter is not specified, the remote device will choose a default value appropriate for the profile, based on its C(parent) profile. - When a number is specified, indicates the number of seconds the UDP connection can remain idle before the system deletes it. - When C(indefinite), specifies UDP connections can remain idle indefinitely. - When C(0) or C(immediate), specifies you do not want the UDP connection to remain idle, and it is therefore immediately eligible for deletion. type: str datagram_load_balancing: description: - When C(yes), specifies the system load balances UDP traffic packet-by-packet. type: bool partition: description: - Device partition to manage resources on. type: str default: Common state: description: - When C(present), ensures the profile exists. - When C(absent), ensures the profile is removed. type: str choices: - present - absent default: present extends_documentation_fragment: f5networks.f5_modules.f5 author: - Tim Rupp (@caphrim007) ''' EXAMPLES = r''' - name: Create a TCP profile bigip_profile_tcp: name: foo parent: udp idle_timeout: 300 datagram_load_balancing: no state: present provider: user: admin password: secret server: lb.mydomain.com delegate_to: localhost ''' RETURN = r''' parent: description: The new parent of the resource. returned: changed type: str sample: udp idle_timeout: description: The new idle timeout of the resource. returned: changed type: int sample: 100 datagram_load_balancing: description: The new datagram load balancing setting of the resource. returned: changed type: bool sample: True ''' from datetime import datetime from ansible.module_utils.basic import ( AnsibleModule, env_fallback ) from ..module_utils.bigip import F5RestClient from ..module_utils.common import ( F5ModuleError, AnsibleF5Parameters, transform_name, f5_argument_spec, fq_name ) from ..module_utils.icontrol import tmos_version from ..module_utils.teem import send_teem class Parameters(AnsibleF5Parameters): api_map = { 'datagramLoadBalancing': 'datagram_load_balancing', 'idleTimeout': 'idle_timeout', 'defaultsFrom': 'parent', } api_attributes = [ 'datagramLoadBalancing', 'idleTimeout', 'defaultsFrom', ] returnables = [ 'datagram_load_balancing', 'idle_timeout', 'parent', ] updatables = [ 'datagram_load_balancing', 'idle_timeout', 'parent', ] @property def idle_timeout(self): if self._values['idle_timeout'] is None: return None if self._values['idle_timeout'] == 'indefinite': return self._values['idle_timeout'] if self._values['idle_timeout'] in ['0', 'immediate']: return 'immediate' return int(self._values['idle_timeout']) class ApiParameters(Parameters): @property def datagram_load_balancing(self): if self._values['datagram_load_balancing'] is None: return None if self._values['datagram_load_balancing'] == 'enabled': return True return False class ModuleParameters(Parameters): @property def parent(self): if self._values['parent'] is None: return None result = fq_name(self.partition, self._values['parent']) return result class Changes(Parameters): def to_return(self): result = {} try: for returnable in self.returnables: result[returnable] = getattr(self, returnable) result = self._filter_params(result) except Exception: pass return result class UsableChanges(Changes): @property def datagram_load_balancing(self): if self._values['datagram_load_balancing'] is None: return None if self._values['datagram_load_balancing']: return 'enabled' return 'disabled' class ReportableChanges(Changes): @property def datagram_load_balancing(self): if self._values['datagram_load_balancing'] is None: return None if self._values['datagram_load_balancing'] == 'enabled': return True return False class Difference(object): def __init__(self, want, have=None): self.want = want self.have = have def compare(self, param): try: result = getattr(self, param) return result except AttributeError: return self.__default(param) def __default(self, param): attr1 = getattr(self.want, param) try: attr2 = getattr(self.have, param) if attr1 != attr2: return attr1 except AttributeError: return attr1 class ModuleManager(object): def __init__(self, *args, **kwargs): self.module = kwargs.get('module', None) self.client = F5RestClient(**self.module.params) self.want = ModuleParameters(params=self.module.params) self.have = ApiParameters() self.changes = UsableChanges() def _set_changed_options(self): changed = {} for key in Parameters.returnables: if getattr(self.want, key) is not None: changed[key] = getattr(self.want, key) if changed: self.changes = UsableChanges(params=changed) def _update_changed_options(self): diff = Difference(self.want, self.have) updatables = Parameters.updatables changed = dict() for k in updatables: change = diff.compare(k) if change is None: continue else: if isinstance(change, dict): changed.update(change) else: changed[k] = change if changed: self.changes = UsableChanges(params=changed) return True return False def should_update(self): result = self._update_changed_options() if result: return True return False def exec_module(self): start = datetime.now().isoformat() version = tmos_version(self.client) changed = False result = dict() state = self.want.state if state == "present": changed = self.present() elif state == "absent": changed = self.absent() reportable = ReportableChanges(params=self.changes.to_return()) changes = reportable.to_return() result.update(**changes) result.update(dict(changed=changed)) self._announce_deprecations(result) send_teem(start, self.module, version) return result def _announce_deprecations(self, result): warnings = result.pop('__warnings', []) for warning in warnings: self.client.module.deprecate( msg=warning['msg'], version=warning['version'] ) def present(self): if self.exists(): return self.update() else: return self.create() def exists(self): uri = "https://{0}:{1}/mgmt/tm/ltm/profile/udp/{2}".format( self.client.provider['server'], self.client.provider['server_port'], transform_name(self.want.partition, self.want.name) ) resp = self.client.api.get(uri) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if resp.status == 404 or 'code' in response and response['code'] == 404: return False if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]: return True errors = [401, 403, 409, 500, 501, 502, 503, 504] if resp.status in errors or 'code' in response and response['code'] in errors: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) def update(self): self.have = self.read_current_from_device() if not self.should_update(): return False if self.module.check_mode: return True self.update_on_device() return True def remove(self): if self.module.check_mode: return True self.remove_from_device() if self.exists(): raise F5ModuleError("Failed to delete the resource.") return True def create(self): self._set_changed_options() if self.module.check_mode: return True self.create_on_device() return True def create_on_device(self): params = self.changes.api_params() params['name'] = self.want.name params['partition'] = self.want.partition uri = "https://{0}:{1}/mgmt/tm/ltm/profile/udp/".format( self.client.provider['server'], self.client.provider['server_port'] ) resp = self.client.api.post(uri, json=params) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] in [400, 403, 404]: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) return response['selfLink'] def update_on_device(self): params = self.changes.api_params() uri = "https://{0}:{1}/mgmt/tm/ltm/profile/udp/{2}".format( self.client.provider['server'], self.client.provider['server_port'], transform_name(self.want.partition, self.want.name) ) resp = self.client.api.patch(uri, json=params) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] in [400, 404]: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) def absent(self): if self.exists(): return self.remove() return False def remove_from_device(self): uri = "https://{0}:{1}/mgmt/tm/ltm/profile/udp/{2}".format( self.client.provider['server'], self.client.provider['server_port'], transform_name(self.want.partition, self.want.name) ) response = self.client.api.delete(uri) if response.status == 200: return True raise F5ModuleError(response.content) def read_current_from_device(self): uri = "https://{0}:{1}/mgmt/tm/ltm/profile/udp/{2}".format( self.client.provider['server'], self.client.provider['server_port'], transform_name(self.want.partition, self.want.name) ) resp = self.client.api.get(uri) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) return ApiParameters(params=response) class ArgumentSpec(object): def __init__(self): self.supports_check_mode = True argument_spec = dict( name=dict(required=True), parent=dict(), idle_timeout=dict(), datagram_load_balancing=dict(type='bool'), state=dict( default='present', choices=['present', 'absent'] ), partition=dict( default='Common', fallback=(env_fallback, ['F5_PARTITION']) ) ) self.argument_spec = {} self.argument_spec.update(f5_argument_spec) self.argument_spec.update(argument_spec) def main(): spec = ArgumentSpec() module = AnsibleModule( argument_spec=spec.argument_spec, supports_check_mode=spec.supports_check_mode, ) try: mm = ModuleManager(module=module) results = mm.exec_module() module.exit_json(**results) except F5ModuleError as ex: module.fail_json(msg=str(ex)) if __name__ == '__main__': main()
{ "content_hash": "47de8ea99c8a813c6239849117b4c56f", "timestamp": "", "source": "github", "line_count": 457, "max_line_length": 94, "avg_line_length": 30.317286652078774, "alnum_prop": 0.592638036809816, "repo_name": "F5Networks/f5-ansible-modules", "id": "f9388364e19946671c96440919c07edcc4868fb8", "size": "14032", "binary": false, "copies": "1", "ref": "refs/heads/doc-update", "path": "ansible_collections/f5networks/f5_modules/plugins/modules/bigip_profile_udp.py", "mode": "33188", "license": "mit", "language": [ { "name": "Makefile", "bytes": "1931" }, { "name": "Python", "bytes": "345682" } ], "symlink_target": "" }
class AngrError(Exception): pass class AngrValueError(AngrError, ValueError): pass class AngrLifterError(AngrError): pass class AngrExitError(AngrError): pass class AngrPathError(AngrError): pass class AngrVaultError(AngrError): pass class PathUnreachableError(AngrPathError): pass class SimulationManagerError(AngrError): pass class AngrInvalidArgumentError(AngrError): pass class AngrSurveyorError(AngrError): pass class AngrAnalysisError(AngrError): pass class AngrBladeError(AngrError): pass class AngrBladeSimProcError(AngrBladeError): pass class AngrAnnotatedCFGError(AngrError): pass class AngrBackwardSlicingError(AngrError): pass class AngrGirlScoutError(AngrError): pass class AngrCallableError(AngrSurveyorError): pass class AngrCallableMultistateError(AngrCallableError): pass class AngrSyscallError(AngrError): pass class AngrSimOSError(AngrError): pass class AngrAssemblyError(AngrError): pass # Congruency check failure class AngrIncongruencyError(AngrAnalysisError): pass # # ForwardAnalysis errors # class AngrForwardAnalysisError(AngrError): pass class AngrSkipJobNotice(AngrForwardAnalysisError): pass class AngrDelayJobNotice(AngrForwardAnalysisError): pass class AngrJobMergingFailureNotice(AngrForwardAnalysisError): pass class AngrJobWideningFailureNotice(AngrForwardAnalysisError): pass # # CFG errors # class AngrCFGError(AngrError): pass # # VFG Errors and notices # class AngrVFGError(AngrError): pass class AngrVFGRestartAnalysisNotice(AngrVFGError): pass # # Data graph errors # class AngrDataGraphError(AngrAnalysisError): # TODO: deprecated pass class AngrDDGError(AngrAnalysisError): pass # # Loop analysis # class AngrLoopAnalysisError(AngrAnalysisError): pass # # Exploration techniques # class AngrExplorationTechniqueError(AngrError): pass class AngrExplorerError(AngrExplorationTechniqueError): pass class AngrDirectorError(AngrExplorationTechniqueError): pass class AngrTracerError(AngrExplorationTechniqueError): pass # # VariableRecovery errors # class AngrVariableRecoveryError(AngrAnalysisError): pass # # Tracer # class TracerEnvironmentError(AngrError): pass # # Simulation errors # class SimError(Exception): bbl_addr = None stmt_idx = None ins_addr = None executed_instruction_count = None guard = None def record_state(self, state): self.bbl_addr = state.scratch.bbl_addr self.stmt_idx = state.scratch.stmt_idx self.ins_addr = state.scratch.ins_addr self.executed_instruction_count = state.history.recent_instruction_count self.guard = state.scratch.guard return self # # State-related errors # class SimStateError(SimError): pass class SimMergeError(SimStateError): pass class SimMemoryError(SimStateError): pass class SimMemoryMissingError(SimMemoryError): pass class SimAbstractMemoryError(SimMemoryError): pass class SimRegionMapError(SimMemoryError): pass class SimMemoryLimitError(SimMemoryError): pass class SimMemoryAddressError(SimMemoryError): pass class SimFastMemoryError(SimMemoryError): pass class SimEventError(SimStateError): pass class SimPosixError(SimStateError): pass class SimFilesystemError(SimError): pass class SimSymbolicFilesystemError(SimFilesystemError): pass class SimFileError(SimMemoryError, SimFilesystemError): pass class SimHeapError(SimStateError): pass # # Error class during VEX parsing # class SimUnsupportedError(SimError): pass # # Solver-related errors # class SimSolverError(SimError): pass class SimSolverModeError(SimSolverError): pass class SimSolverOptionError(SimSolverError): pass class SimValueError(SimSolverError): pass class SimUnsatError(SimValueError): pass # # SimIROp errors # class SimOperationError(SimError): pass class UnsupportedIROpError(SimOperationError, SimUnsupportedError): pass # # SimIRExpr errors # class SimExpressionError(SimError): pass class UnsupportedIRExprError(SimExpressionError, SimUnsupportedError): pass class SimCCallError(SimExpressionError): pass class UnsupportedCCallError(SimCCallError, SimUnsupportedError): pass class SimUninitializedAccessError(SimExpressionError): def __init__(self, expr_type, expr): SimExpressionError.__init__(self) self.expr_type = expr_type self.expr = expr def __repr__(self): return "SimUninitializedAccessError (expr %s is used as %s)" % (self.expr, self.expr_type) def __reduce__(self): return (SimUninitializedAccessError, (self.expr_type, self.expr)) # # SimIRStmt errors # class SimStatementError(SimError): pass class UnsupportedIRStmtError(SimStatementError, SimUnsupportedError): pass class UnsupportedDirtyError(UnsupportedIRStmtError, SimUnsupportedError): pass # # Engine-related errors # class SimEngineError(SimError): pass class SimIRSBError(SimEngineError): pass class SimTranslationError(SimEngineError): pass class SimProcedureError(SimEngineError): pass class SimProcedureArgumentError(SimProcedureError): pass class SimShadowStackError(SimProcedureError): pass class SimFastPathError(SimEngineError): pass class SimIRSBNoDecodeError(SimIRSBError): pass class AngrUnsupportedSyscallError(AngrSyscallError, SimProcedureError, SimUnsupportedError): pass UnsupportedSyscallError = AngrUnsupportedSyscallError class SimReliftException(SimEngineError): def __init__(self, state): super(SimReliftException, self).__init__() self.state = state # # SimSlicer errors # class SimSlicerError(SimError): pass # # SimAction errors # class SimActionError(SimError): pass # # SimCC errors # class SimCCError(SimError): pass # # UCManager errors # class SimUCManagerError(SimError): pass class SimUCManagerAllocationError(SimUCManagerError): pass # # SimUnicorn errors # class SimUnicornUnsupport(SimError): pass class SimUnicornError(SimError): pass class SimUnicornSymbolic(SimError): pass # # Call-stack Errors # class SimEmptyCallStackError(SimError): pass # # SimStateOptions Errors # class SimStateOptionsError(SimError): pass # # Errors that may be handled by exception handling # class SimException(SimError): pass class SimSegfaultException(SimException, SimMemoryError): def __init__(self, addr, reason, original_addr=None): self.addr = addr self.reason = reason self.original_addr = original_addr super(SimSegfaultError, self).__init__('%#x (%s)' % (addr, reason)) def __repr__(self): return 'SimSegfaultException(%#x (%s%s)' % ( self.addr, self.reason, (', original %s' % self.original_addr.__repr__(max_depth=3)) if self.original_addr is not None else '' ) def __reduce__(self): return (SimSegfaultException, (self.addr, self.reason, self.original_addr)) SimSegfaultError = SimSegfaultException class SimZeroDivisionException(SimException, SimOperationError): pass class AngrNoPluginError(AngrError): pass # # Concrete Targets Execution errors # class SimConcreteMemoryError(AngrError): pass class SimConcreteRegisterError(AngrError): pass class SimConcreteBreakpointError(AngrError): pass
{ "content_hash": "68121cea94d08b5774e1e5e3e87f02d4", "timestamp": "", "source": "github", "line_count": 443, "max_line_length": 114, "avg_line_length": 17.002257336343114, "alnum_prop": 0.7404407859798194, "repo_name": "schieb/angr", "id": "cf8c2c3d1dc06296be1b808cae46b1d261199d82", "size": "7532", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "angr/errors.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "C", "bytes": "6375" }, { "name": "C++", "bytes": "39522" }, { "name": "Dockerfile", "bytes": "493" }, { "name": "Makefile", "bytes": "739" }, { "name": "Python", "bytes": "4987778" } ], "symlink_target": "" }
import math class ClickerState: ''' simple class to keep track of the game state ''' def __init__(self): self._current_amount_of_cookies = 0.0 self._current_time = 0.0 self._current_cps = 2.0 def get_cps(self): ''' get current CPS, returns it as a float ''' return self._current_cps def get_time(self): ''' get current time, returns it as a float ''' return self._current_time def time_until(self, cookies): ''' return time until you have the given number of cookies (could be 0 if you already have enough cookies); ''' if self._current_amount_of_cookies >= cookies: return 0.0 else: # consider only the given number of cookies (without current resource) return (cookies - self._current_amount_of_cookies) / self._current_cps def wait(self, time): ''' wait for given amount of time and update state, does nothing if time <= 0 ''' if time <= 0: pass else: self._current_time += time self._current_amount_of_cookies += time * self._current_cps def buy_item(self, cost, additional_cps): ''' buy an item and update state, does nothing if you cannot afford the item ''' if cost > self._current_amount_of_cookies: pass else: self._current_amount_of_cookies -= cost self._current_cps += additional_cps def read_file(filename): ''' read and checks file for consistency, returns its content as a list ''' with open(filename, 'r') as datafile: content = datafile.readlines() content = [line.strip('\n') for line in content] # there shall be (test cases) * number of lines if len(content) == int(content[0]): print 'Input file is incomplete/corrupted.' return content def feeding_in(content): ''' parsing through the raw list, returns its content as a list with item_cost, item_cps, X ''' # stores and removes # of test cases test_cases = int(content.pop(0)) # omitts empty spaces and converts strings to integers content = [map(float, line.split(' ')) for line in content] given_input = [] for i in range(0, test_cases): given_input.append(content[i]) return given_input def simulate_clicker(given_input): ''' function to run a Cookie Clicker game for the given duration with buying Farm; returns minimized time to achieve given value X ''' with open('B-large-practice.out', 'w') as datafile: for row in xrange(len(given_input)): item_cost, item_cps, X = given_input[row] # initialize variables, n is for arbitrary value to hopefully avoid of local minima fastest_to_X = float('+inf') new_click = ClickerState() time_to_X_list = [] n = 0 while True: wait_time = new_click.time_until(item_cost) time_to_X = X / new_click.get_cps() + new_click.get_time() time_to_X_list.append(time_to_X) n += 1 if n < X * 5: new_click.wait(wait_time) new_click.buy_item(item_cost, item_cps) fastest_to_X = time_to_X else: break print 'Case #{0}: {1}'.format(row + 1, min(time_to_X_list)) datafile.write('Case #{0}: {1}\n'.format(row + 1, min(time_to_X_list))) # let's run it! #read = read_file('B-large-practice.in') #print simulate_clicker(feeding_in(read)) # Judged response for input B-small: Correct! # Judged response for input B-large: Correct! def clicker_alpha(given_input): ''' function to run a Cookie Clicker game for the given duration with buying Farm; returns minimized time to achieve given value X, better/faster math from: https://github.com/KirarinSnow/Google-Code-Jam/blob/master/Qualification%20Round%202014/B.js ''' with open('B-large-practice.out', 'w') as datafile: for row in xrange(len(given_input)): item_cost, item_cps, X = given_input[row] optimum = max(1, math.ceil(X / item_cost - 2 / item_cps)) fastest_to_X = (X - item_cost) / (2 + (optimum - 1) * item_cps) for j in range(int(optimum)): fastest_to_X += item_cost / (2 + j* item_cps); print 'Case #{0}: {1}'.format(row + 1, fastest_to_X) datafile.write('Case #{0}: {1}\n'.format(row + 1, fastest_to_X)) # let's run it! #read = read_file('B-large-practice.in') #print clicker_alpha(feeding_in(read)) # let's run it! #print simulate_clicker(30.0, 1.0, 2.0) #print simulate_clicker(30.0, 2.0, 100.0) #print simulate_clicker(30.5, 3.14159, 1999.19990) #print simulate_clicker(500.0, 4.0, 2000.0)
{ "content_hash": "4d9d9917c6096171e9be06b207794feb", "timestamp": "", "source": "github", "line_count": 150, "max_line_length": 96, "avg_line_length": 33.72666666666667, "alnum_prop": 0.5680964617513342, "repo_name": "chubbypanda/google-code-jam", "id": "cdfece76098a79b46f0a99f0aaf8c3ad36efbcc8", "size": "5290", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "2014/codeJam_qualification_b.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "7796" } ], "symlink_target": "" }
from __future__ import (absolute_import, division, print_function) # Copyright 2019 Fortinet, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. __metaclass__ = type ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'metadata_version': '1.1'} DOCUMENTATION = ''' --- module: fortios_dlp_sensor short_description: Configure DLP sensors in Fortinet's FortiOS and FortiGate. description: - This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the user to set and modify dlp feature and sensor category. Examples include all parameters and values need to be adjusted to datasources before usage. Tested with FOS v6.0.5 version_added: "2.8" author: - Miguel Angel Munoz (@mamunozgonzalez) - Nicolas Thomas (@thomnico) notes: - Requires fortiosapi library developed by Fortinet - Run as a local_action in your playbook requirements: - fortiosapi>=0.9.8 options: host: description: - FortiOS or FortiGate IP address. type: str required: false username: description: - FortiOS or FortiGate username. type: str required: false password: description: - FortiOS or FortiGate password. type: str default: "" vdom: description: - Virtual domain, among those defined previously. A vdom is a virtual instance of the FortiGate that can be configured and used as a different unit. type: str default: root https: description: - Indicates if the requests towards FortiGate must use HTTPS protocol. type: bool default: true ssl_verify: description: - Ensures FortiGate certificate must be verified by a proper CA. type: bool default: true version_added: 2.9 state: description: - Indicates whether to create or remove the object. This attribute was present already in previous version in a deeper level. It has been moved out to this outer level. type: str required: false choices: - present - absent version_added: 2.9 dlp_sensor: description: - Configure DLP sensors. default: null type: dict suboptions: state: description: - B(Deprecated) - Starting with Ansible 2.9 we recommend using the top-level 'state' parameter. - HORIZONTALLINE - Indicates whether to create or remove the object. type: str required: false choices: - present - absent comment: description: - Comment. type: str dlp_log: description: - Enable/disable DLP logging. type: str choices: - enable - disable extended_log: description: - Enable/disable extended logging for data leak prevention. type: str choices: - enable - disable filter: description: - Set up DLP filters for this sensor. type: list suboptions: action: description: - Action to take with content that this DLP sensor matches. type: str choices: - allow - log-only - block - quarantine-ip archive: description: - Enable/disable DLP archiving. type: str choices: - disable - enable company_identifier: description: - Enter a company identifier watermark to match. Only watermarks that your company has placed on the files are matched. type: str expiry: description: - Quarantine duration in days, hours, minutes format (dddhhmm). type: str file_size: description: - Match files this size or larger (0 - 4294967295 kbytes). type: int file_type: description: - Select the number of a DLP file pattern table to match. Source dlp.filepattern.id. type: int filter_by: description: - Select the type of content to match. type: str choices: - credit-card - ssn - regexp - file-type - file-size - fingerprint - watermark - encrypted fp_sensitivity: description: - Select a DLP file pattern sensitivity to match. type: list suboptions: name: description: - Select a DLP sensitivity. Source dlp.fp-sensitivity.name. required: true type: str id: description: - ID. required: true type: int match_percentage: description: - Percentage of fingerprints in the fingerprint databases designated with the selected fp-sensitivity to match. type: int name: description: - Filter name. type: str proto: description: - Check messages or files over one or more of these protocols. type: str choices: - smtp - pop3 - imap - http-get - http-post - ftp - nntp - mapi - mm1 - mm3 - mm4 - mm7 regexp: description: - Enter a regular expression to match (max. 255 characters). type: str severity: description: - Select the severity or threat level that matches this filter. type: str choices: - info - low - medium - high - critical type: description: - Select whether to check the content of messages (an email message) or files (downloaded files or email attachments). type: str choices: - file - message flow_based: description: - Enable/disable flow-based DLP. type: str choices: - enable - disable full_archive_proto: description: - Protocols to always content archive. type: str choices: - smtp - pop3 - imap - http-get - http-post - ftp - nntp - mapi - mm1 - mm3 - mm4 - mm7 nac_quar_log: description: - Enable/disable NAC quarantine logging. type: str choices: - enable - disable name: description: - Name of the DLP sensor. required: true type: str options: description: - Configure DLP options. type: str replacemsg_group: description: - Replacement message group used by this DLP sensor. Source system.replacemsg-group.name. type: str summary_proto: description: - Protocols to always log summary. type: str choices: - smtp - pop3 - imap - http-get - http-post - ftp - nntp - mapi - mm1 - mm3 - mm4 - mm7 ''' EXAMPLES = ''' - hosts: localhost vars: host: "192.168.122.40" username: "admin" password: "" vdom: "root" ssl_verify: "False" tasks: - name: Configure DLP sensors. fortios_dlp_sensor: host: "{{ host }}" username: "{{ username }}" password: "{{ password }}" vdom: "{{ vdom }}" https: "False" state: "present" dlp_sensor: comment: "Comment." dlp_log: "enable" extended_log: "enable" filter: - action: "allow" archive: "disable" company_identifier: "myId_9" expiry: "<your_own_value>" file_size: "11" file_type: "12 (source dlp.filepattern.id)" filter_by: "credit-card" fp_sensitivity: - name: "default_name_15 (source dlp.fp-sensitivity.name)" id: "16" match_percentage: "17" name: "default_name_18" proto: "smtp" regexp: "<your_own_value>" severity: "info" type: "file" flow_based: "enable" full_archive_proto: "smtp" nac_quar_log: "enable" name: "default_name_26" options: "<your_own_value>" replacemsg_group: "<your_own_value> (source system.replacemsg-group.name)" summary_proto: "smtp" ''' RETURN = ''' build: description: Build number of the fortigate image returned: always type: str sample: '1547' http_method: description: Last method used to provision the content into FortiGate returned: always type: str sample: 'PUT' http_status: description: Last result given by FortiGate on last operation applied returned: always type: str sample: "200" mkey: description: Master key (id) used in the last call to FortiGate returned: success type: str sample: "id" name: description: Name of the table used to fulfill the request returned: always type: str sample: "urlfilter" path: description: Path of the table used to fulfill the request returned: always type: str sample: "webfilter" revision: description: Internal revision number returned: always type: str sample: "17.0.2.10658" serial: description: Serial number of the unit returned: always type: str sample: "FGVMEVYYQT3AB5352" status: description: Indication of the operation's result returned: always type: str sample: "success" vdom: description: Virtual domain used returned: always type: str sample: "root" version: description: Version of the FortiGate returned: always type: str sample: "v5.6.3" ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.connection import Connection from ansible.module_utils.network.fortios.fortios import FortiOSHandler from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG def login(data, fos): host = data['host'] username = data['username'] password = data['password'] ssl_verify = data['ssl_verify'] fos.debug('on') if 'https' in data and not data['https']: fos.https('off') else: fos.https('on') fos.login(host, username, password, verify=ssl_verify) def filter_dlp_sensor_data(json): option_list = ['comment', 'dlp_log', 'extended_log', 'filter', 'flow_based', 'full_archive_proto', 'nac_quar_log', 'name', 'options', 'replacemsg_group', 'summary_proto'] dictionary = {} for attribute in option_list: if attribute in json and json[attribute] is not None: dictionary[attribute] = json[attribute] return dictionary def underscore_to_hyphen(data): if isinstance(data, list): for elem in data: elem = underscore_to_hyphen(elem) elif isinstance(data, dict): new_data = {} for k, v in data.items(): new_data[k.replace('_', '-')] = underscore_to_hyphen(v) data = new_data return data def dlp_sensor(data, fos): vdom = data['vdom'] if 'state' in data and data['state']: state = data['state'] elif 'state' in data['dlp_sensor'] and data['dlp_sensor']: state = data['dlp_sensor']['state'] else: state = True dlp_sensor_data = data['dlp_sensor'] filtered_data = underscore_to_hyphen(filter_dlp_sensor_data(dlp_sensor_data)) if state == "present": return fos.set('dlp', 'sensor', data=filtered_data, vdom=vdom) elif state == "absent": return fos.delete('dlp', 'sensor', mkey=filtered_data['name'], vdom=vdom) def is_successful_status(status): return status['status'] == "success" or \ status['http_method'] == "DELETE" and status['http_status'] == 404 def fortios_dlp(data, fos): if data['dlp_sensor']: resp = dlp_sensor(data, fos) return not is_successful_status(resp), \ resp['status'] == "success", \ resp def main(): fields = { "host": {"required": False, "type": "str"}, "username": {"required": False, "type": "str"}, "password": {"required": False, "type": "str", "default": "", "no_log": True}, "vdom": {"required": False, "type": "str", "default": "root"}, "https": {"required": False, "type": "bool", "default": True}, "ssl_verify": {"required": False, "type": "bool", "default": True}, "state": {"required": False, "type": "str", "choices": ["present", "absent"]}, "dlp_sensor": { "required": False, "type": "dict", "default": None, "options": { "state": {"required": False, "type": "str", "choices": ["present", "absent"]}, "comment": {"required": False, "type": "str"}, "dlp_log": {"required": False, "type": "str", "choices": ["enable", "disable"]}, "extended_log": {"required": False, "type": "str", "choices": ["enable", "disable"]}, "filter": {"required": False, "type": "list", "options": { "action": {"required": False, "type": "str", "choices": ["allow", "log-only", "block", "quarantine-ip"]}, "archive": {"required": False, "type": "str", "choices": ["disable", "enable"]}, "company_identifier": {"required": False, "type": "str"}, "expiry": {"required": False, "type": "str"}, "file_size": {"required": False, "type": "int"}, "file_type": {"required": False, "type": "int"}, "filter_by": {"required": False, "type": "str", "choices": ["credit-card", "ssn", "regexp", "file-type", "file-size", "fingerprint", "watermark", "encrypted"]}, "fp_sensitivity": {"required": False, "type": "list", "options": { "name": {"required": True, "type": "str"} }}, "id": {"required": True, "type": "int"}, "match_percentage": {"required": False, "type": "int"}, "name": {"required": False, "type": "str"}, "proto": {"required": False, "type": "str", "choices": ["smtp", "pop3", "imap", "http-get", "http-post", "ftp", "nntp", "mapi", "mm1", "mm3", "mm4", "mm7"]}, "regexp": {"required": False, "type": "str"}, "severity": {"required": False, "type": "str", "choices": ["info", "low", "medium", "high", "critical"]}, "type": {"required": False, "type": "str", "choices": ["file", "message"]} }}, "flow_based": {"required": False, "type": "str", "choices": ["enable", "disable"]}, "full_archive_proto": {"required": False, "type": "str", "choices": ["smtp", "pop3", "imap", "http-get", "http-post", "ftp", "nntp", "mapi", "mm1", "mm3", "mm4", "mm7"]}, "nac_quar_log": {"required": False, "type": "str", "choices": ["enable", "disable"]}, "name": {"required": True, "type": "str"}, "options": {"required": False, "type": "str"}, "replacemsg_group": {"required": False, "type": "str"}, "summary_proto": {"required": False, "type": "str", "choices": ["smtp", "pop3", "imap", "http-get", "http-post", "ftp", "nntp", "mapi", "mm1", "mm3", "mm4", "mm7"]} } } } module = AnsibleModule(argument_spec=fields, supports_check_mode=False) # legacy_mode refers to using fortiosapi instead of HTTPAPI legacy_mode = 'host' in module.params and module.params['host'] is not None and \ 'username' in module.params and module.params['username'] is not None and \ 'password' in module.params and module.params['password'] is not None if not legacy_mode: if module._socket_path: connection = Connection(module._socket_path) fos = FortiOSHandler(connection) is_error, has_changed, result = fortios_dlp(module.params, fos) else: module.fail_json(**FAIL_SOCKET_MSG) else: try: from fortiosapi import FortiOSAPI except ImportError: module.fail_json(msg="fortiosapi module is required") fos = FortiOSAPI() login(module.params, fos) is_error, has_changed, result = fortios_dlp(module.params, fos) fos.logout() if not is_error: module.exit_json(changed=has_changed, meta=result) else: module.fail_json(msg="Error in repo", meta=result) if __name__ == '__main__': main()
{ "content_hash": "257cd2656dc91f4f7dc15202fecad708", "timestamp": "", "source": "github", "line_count": 601, "max_line_length": 147, "avg_line_length": 36.63394342762063, "alnum_prop": 0.44483807966571287, "repo_name": "thaim/ansible", "id": "2273abb8c1cf76cd80f08f590ddeac72273e33bc", "size": "22035", "binary": false, "copies": "13", "ref": "refs/heads/fix-broken-link", "path": "lib/ansible/modules/network/fortios/fortios_dlp_sensor.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "7" }, { "name": "Shell", "bytes": "246" } ], "symlink_target": "" }
import pytest from django.test import TestCase, RequestFactory from django.core.urlresolvers import reverse from django.conf import settings from wagtail.wagtailcore.models import Site from molo.core.models import ( SiteLanguageRelation, Main, Languages, SiteSettings) from molo.core.tests.base import MoloTestCaseMixin from molo.surveys.models import ( MoloSurveyPage, MoloSurveyFormField, SurveysIndexPage) from gem.models import GemSettings @pytest.mark.django_db class TestModels(TestCase, MoloTestCaseMixin): def setUp(self): self.mk_main() self.main = Main.objects.all().first() self.language_setting = Languages.objects.create( site_id=self.main.get_site().pk) self.english = SiteLanguageRelation.objects.create( language_setting=self.language_setting, locale='en', is_active=True) self.french = SiteLanguageRelation.objects.create( language_setting=self.language_setting, locale='fr', is_active=True) self.survey_index = SurveysIndexPage.objects.first() self.yourmind = self.mk_section( self.section_index, title='Your mind') self.yourmind_sub = self.mk_section( self.yourmind, title='Your mind subsection') self.site_settings = SiteSettings.for_site(self.main.get_site()) self.site_settings.enable_tag_navigation = True self.site_settings.save() def test_partner_credit(self): response = self.client.get('/') self.assertNotContains(response, 'Thank You') self.assertNotContains(response, 'https://www.google.co.za/') default_site = Site.objects.get(is_default_site=True) setting = GemSettings.for_site(default_site) setting.show_partner_credit = True setting.partner_credit_description = "Thank You" setting.partner_credit_link = "https://www.google.co.za/" setting.save() response = self.client.get('/') self.assertContains(response, 'Thank You') self.assertContains(response, 'https://www.google.co.za/') def test_show_join_banner(self): from os.path import abspath, dirname, join temp_dir = [ { 'DIRS': [join( settings.PROJECT_ROOT, 'gem', 'templates', 'springster'), ], 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', 'molo.core.context_processors.locale', 'wagtail.contrib.settings.context_processors.settings', 'gem.context_processors.default_forms', 'gem.processors.compress_settings', ], "loaders": [ "django.template.loaders.filesystem.Loader", "mote.loaders.app_directories.Loader", "django.template.loaders.app_directories.Loader", ] }, }] with self.settings(TEMPLATES=temp_dir): molo_survey_page = MoloSurveyPage( title='survey title', slug='survey-slug', intro='Introduction to Test Survey ...', thank_you_text='Thank you for taking the Test Survey', ) self.survey_index.add_child(instance=molo_survey_page) MoloSurveyFormField.objects.create( page=molo_survey_page, sort_order=1, label='Your favourite animal', field_type='singleline', required=True ) response = self.client.get('%s?next=%s' % ( reverse('molo.profiles:auth_logout'), reverse('molo.profiles:user_register'))) response = self.client.get('/') self.assertNotContains(response, 'profiles-join-banner') setting = GemSettings.for_site(self.main.get_site()) setting.show_join_banner = True setting.save() response = self.client.get('/') self.assertContains(response, 'profiles-join-banner')
{ "content_hash": "36be245fa5ef4ec84b135a347ad513f8", "timestamp": "", "source": "github", "line_count": 108, "max_line_length": 79, "avg_line_length": 42.175925925925924, "alnum_prop": 0.5835345773874863, "repo_name": "Mitso/springstertestapp", "id": "bf3ae6e7039172d98855eb7cacd45b7bcbb41c0c", "size": "4570", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "gem/tests/test_models.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "CSS", "bytes": "128710" }, { "name": "HTML", "bytes": "138391" }, { "name": "JavaScript", "bytes": "9716" }, { "name": "Python", "bytes": "183268" }, { "name": "Shell", "bytes": "563" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [("users", "0051_userprofile_proxy_notification_sent")] operations = [ migrations.AddField( model_name="editor", name="ignore_wp_blocks", field=models.BooleanField( default=False, help_text="Ignore the 'not currently blocked' criterion for access?", ), ), migrations.AddField( model_name="editor", name="wp_account_old_enough", field=models.BooleanField( default=False, editable=False, help_text="At their last login, did this user meet the account age criterion in the terms of use?", ), ), migrations.AddField( model_name="editor", name="wp_bundle_eligible", field=models.BooleanField( default=False, editable=False, help_text="At their last login, did this user meet the criteria for access to the library card bundle?", ), ), migrations.AddField( model_name="editor", name="wp_editcount_prev", field=models.IntegerField( blank=True, default=0, editable=False, help_text="Previous Wikipedia edit count", null=True, ), ), migrations.AddField( model_name="editor", name="wp_editcount_prev_updated", field=models.DateTimeField( blank=True, default=None, editable=False, help_text="When the previous editcount was last updated from Wikipedia", null=True, ), ), migrations.AddField( model_name="editor", name="wp_editcount_recent", field=models.IntegerField( blank=True, default=0, editable=False, help_text="Recent Wikipedia edit count", null=True, ), ), migrations.AddField( model_name="editor", name="wp_editcount_updated", field=models.DateTimeField( blank=True, default=None, editable=False, help_text="When the editcount was updated from Wikipedia", null=True, ), ), migrations.AddField( model_name="editor", name="wp_enough_edits", field=models.BooleanField( default=False, editable=False, help_text="At their last login, did this user meet the total editcount criterion in the terms of use?", ), ), migrations.AddField( model_name="editor", name="wp_enough_recent_edits", field=models.BooleanField( default=False, editable=False, help_text="At their last login, did this user meet the recent editcount criterion in the terms of use?", ), ), migrations.AddField( model_name="editor", name="wp_not_blocked", field=models.BooleanField( default=False, editable=False, help_text="At their last login, did this user meet the 'not currently blocked' criterion in the terms of use?", ), ), migrations.AlterField( model_name="editor", name="wp_valid", field=models.BooleanField( default=False, editable=False, help_text="At their last login, did this user meet the criteria in the terms of use?", ), ), ]
{ "content_hash": "c0d30e15025fd17771428729be8c5222", "timestamp": "", "source": "github", "line_count": 117, "max_line_length": 127, "avg_line_length": 34.17094017094017, "alnum_prop": 0.5027513756878439, "repo_name": "WikipediaLibrary/TWLight", "id": "491b1b3b177a539a44201308c4603bc0b7bfdc66", "size": "4072", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "TWLight/users/migrations/0052_auto_20200312_1628.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "46216" }, { "name": "Dockerfile", "bytes": "2207" }, { "name": "HTML", "bytes": "318838" }, { "name": "JavaScript", "bytes": "1413" }, { "name": "Perl", "bytes": "4206" }, { "name": "Python", "bytes": "1544162" }, { "name": "Shell", "bytes": "87697" } ], "symlink_target": "" }
from pprint import pprint import urllib import urllib2 import xml.etree.cElementTree as ElementTree import xmldict import ApplicationHelper import plot import numpy as np #Zillow API Functions & URLs zillow_zws_id = 'X1-ZWz1b9iilf4xe3_90hcq' zillow_api = {'CalculateAdjustableMortgage': 'http://www.zillow.com/webservice/CalculateAdjustableMortgage.htm?{0}', 'CalculateAffordability': 'http://www.zillow.com/webservice/mortgage/CalculateAffordability.htm?{0}', 'CalculateBiWeeklyPayment': 'http://www.zillow.com/webservice/CalculateBiWeeklyPayment.htm?{0}', 'CalculateDiscountPoints': 'http://www.zillow.com/webservice/CalculateDiscountPoints.htm?{0}', 'CalculateFixedVsAdjustableRate': 'http://www.zillow.com/webservice/CalculateFixedVsAdjustableRate.htm?{0}', 'CalculateHELOC': 'http://www.zillow.com/webservice/CalculateHELOC.htm?{0}', 'CalculateInterstOnlyVsTraditional': 'http://www.zillow.com/webservice/CalculateInterstOnlyVsTraditional.htm?{0}', 'CalculateMonthlyPaymentsAdvanced': 'http://www.zillow.com/webservice/CalculateMonthlyPaymentsAdvanced.htm?{0}', 'CalculateMortgageTerms': 'http://www.zillow.com/webservice/CalculateMortgageTerms.htm?{0}', 'CalculateNoCostVsTraditional': 'http://www.zillow.com/webservice/CalculateNoCostVsTraditional.htm?{0}', 'CalculateRefinance': 'http://www.zillow.com/webservice/CalculateRefinance.htm?{0}', 'CalculateTaxSavings': 'http://www.zillow.com/webservice/CalculateTaxSavings.htm?{0}', 'GetChart': 'http://www.zillow.com/webservice/GetChart.htm?{0}', 'GetComps': 'http://www.zillow.com/webservice/GetComps.htm?{0}', 'GetDeepComps': 'http://www.zillow.com/webservice/GetDeepComps.htm?{0}', 'GetDeepSearchResults': 'http://www.zillow.com/webservice/GetDeepSearchResults.htm?{0}', 'GetDemographics': 'http://www.zillow.com/webservice/GetDemographics.htm?{0}', 'GetMonthlyPayments': 'http://www.zillow.com/webservice/GetMonthlyPayments.htm?{0}', 'GetRateSummary': 'http://www.zillow.com/webservice/GetRateSummary.htm?{0}', 'GetRegionChart': 'http://www.zillow.com/webservice/GetRegionChart.htm?{0}', 'GetRegionChildren': 'http://www.zillow.com/webservice/GetRegionChildren.htm?{0}', 'GetSearchResults': 'http://www.zillow.com/webservice/GetSearchResults.htm?{0}', 'GetUpdatedPropertyDetails': 'http://www.zillow.com/webservice/GetUpdatedPropertyDetails.htm?{0}', 'GetZestimate': 'http://www.zillow.com/webservice/GetZestimate.htm?{0}'} #Trulia API Functions & URLs trulia_api = {'getCitiesInState': 'http://api.trulia.com/webservices.php?library=LocationInfo&function=getCitiesInState&{0}', 'getCityStats': 'http://api.trulia.com/webservices.php?library=TruliaStats&function=getCityStats&{0}', 'getCountiesInState': 'http://api.trulia.com/webservices.php?library=LocationInfo&function=getCountiesInState&{0}', 'getCountyStats': 'http://api.trulia.com/webservices.php?library=TruliaStats&function=getCountyStats&{0}', 'getNeighborhoodStats': 'http://api.trulia.com/webservices.php?library=TruliaStats&function=getNeighborhoodStats&{0}', 'getNeighborhoodsInCity': 'http://api.trulia.com/webservices.php?library=LocationInfo&function=getNeighborhoodsInCity&{0}', 'getStateStats': 'http://api.trulia.com/webservices.php?library=TruliaStats&function=getStateStats&{0}', 'getStates': 'http://api.trulia.com/webservices.php?library=LocationInfo&function=getStates&{0}', 'getZipCodeStats': 'http://api.trulia.com/webservices.php?library=TruliaStats&function=getZipCodeStats&{0}', 'getZipCodesInState': 'http://api.trulia.com/webservices.php?library=LocationInfo&function=getZipCodesInState&{0}'} #Default Arguments for testing arguments = {'address': '531 St Marlo Dr', 'citystatezip': '31028', 'unit-type':'dollar', 'count':'25', 'rentzestimate':'true', 'zws-id': zillow_zws_id} def main(): print ApplicationHelper.logo #Members api_helper = APIHelper() #Execution print("{0}".format(ApplicationHelper.figlet(arguments['address']))) asset = Property(arguments['address'],arguments['citystatezip']) #Get Search Results search = api_helper.call(zillow_api['GetSearchResults'],arguments) asset.zillow_search = search arguments['zpid'] = search[search.keys()[0]]['response']['results']['result']['zpid'] asset.zpid = arguments['zpid'] asset.zestimate = int(search[search.keys()[0]]['response']['results']['result']['zestimate']['amount']['#text']) #Get Zestimate zestimate_search_result = api_helper.call(zillow_api['GetZestimate'],arguments) asset.zillow_zestimate_search_result = zestimate_search_result #Get Deep Comps for ZPID deep_comps = api_helper.call(zillow_api['GetDeepComps'],arguments) #Populate Asset Data asset.comps = deep_comps[deep_comps.keys()[0]]['response']['properties']['comparables']['comp'] asset.comps_sqft = [int(x['finishedSqFt']) for x in asset.comps] asset.comps_bedrooms = [int(x['bedrooms']) for x in asset.comps] asset.comps_rent_zestimates = [int(x['rentzestimate']['amount']['#text']) for x in asset.comps] asset.comps_built_years = [x['yearBuilt'] for x in asset.comps] asset.comps_last_sold_dates = [x['lastSoldDate'] for x in asset.comps] asset.comps_zestimates = [int(x['zestimate']['amount']['#text']) for x in asset.comps] class XMLHelper(object): def toDict(self,data): root = ElementTree.XML(data) result = xmldict.xml_to_dict(root) return result class APIHelper(object): def call(self,url,arguments): """Takes an API URL and a dictionary of arguments. Encodes arguments and returns HTTP response of URL + Encoded Arguments converted to a dictionary""" xml_helper = XMLHelper() arguments = urllib.urlencode(arguments) call = url.format(arguments) #print call response = urllib2.urlopen(url.format(arguments)) data = xml_helper.toDict(response.read()) return data class Property(object): def __init__(self,address,zipcode): self.address = address self.zipcode = zipcode if __name__ == '__main__': main()
{ "content_hash": "4135c21b719941d3d2673faf272c6c2d", "timestamp": "", "source": "github", "line_count": 105, "max_line_length": 152, "avg_line_length": 56.04761904761905, "alnum_prop": 0.7563296516567545, "repo_name": "tspires/personal", "id": "383b83cd576f596bf2e87e7d8db9f3b04ee4595c", "size": "5903", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "practice/dionysus/dionysus.py", "mode": "33261", "license": "mit", "language": [ { "name": "C", "bytes": "304" }, { "name": "C++", "bytes": "73201" }, { "name": "Clojure", "bytes": "1627" }, { "name": "CoffeeScript", "bytes": "1692" }, { "name": "Elixir", "bytes": "1807" }, { "name": "Go", "bytes": "654" }, { "name": "Haskell", "bytes": "4500" }, { "name": "Java", "bytes": "5658" }, { "name": "JavaScript", "bytes": "4497" }, { "name": "Makefile", "bytes": "244" }, { "name": "OCaml", "bytes": "2055" }, { "name": "Objective-C", "bytes": "3825" }, { "name": "Perl6", "bytes": "2909" }, { "name": "Python", "bytes": "14038" }, { "name": "Ruby", "bytes": "3195" }, { "name": "Scala", "bytes": "2814" }, { "name": "Shell", "bytes": "417" } ], "symlink_target": "" }
"""Security_Label TQL Filter""" # standard library from enum import Enum # first-party from tcex.api.tc.v3.api_endpoints import ApiEndpoints from tcex.api.tc.v3.filter_abc import FilterABC from tcex.api.tc.v3.tql.tql import Tql from tcex.api.tc.v3.tql.tql_operator import TqlOperator from tcex.api.tc.v3.tql.tql_type import TqlType class SecurityLabelFilter(FilterABC): """Filter Object for SecurityLabels""" @property def _api_endpoint(self) -> str: """Return the API endpoint.""" return ApiEndpoints.SECURITY_LABELS.value def color(self, operator: Enum, color: str): """Filter Color based on **color** keyword. Args: operator: The operator enum for the filter. color: The color of the security label (in hex triplet format). """ self._tql.add_filter('color', operator, color, TqlType.STRING) def date_added(self, operator: Enum, date_added: str): """Filter Date Added based on **dateAdded** keyword. Args: operator: The operator enum for the filter. date_added: The date the security label was added to the system. """ date_added = self.utils.any_to_datetime(date_added).strftime('%Y-%m-%d %H:%M:%S') self._tql.add_filter('dateAdded', operator, date_added, TqlType.STRING) def description(self, operator: Enum, description: str): """Filter Description based on **description** keyword. Args: operator: The operator enum for the filter. description: The description of the security label. """ self._tql.add_filter('description', operator, description, TqlType.STRING) @property def has_group(self): """Return **GroupFilter** for further filtering.""" # first-party from tcex.api.tc.v3.groups.group_filter import GroupFilter groups = GroupFilter(Tql()) self._tql.add_filter('hasGroup', TqlOperator.EQ, groups, TqlType.SUB_QUERY) return groups def has_group_attribute(self, operator: Enum, has_group_attribute: int): """Filter Associated Group based on **hasGroupAttribute** keyword. Args: operator: The operator enum for the filter. has_group_attribute: A nested query for association to other groups. """ self._tql.add_filter('hasGroupAttribute', operator, has_group_attribute, TqlType.INTEGER) @property def has_indicator(self): """Return **IndicatorFilter** for further filtering.""" # first-party from tcex.api.tc.v3.indicators.indicator_filter import IndicatorFilter indicators = IndicatorFilter(Tql()) self._tql.add_filter('hasIndicator', TqlOperator.EQ, indicators, TqlType.SUB_QUERY) return indicators def has_indicator_attribute(self, operator: Enum, has_indicator_attribute: int): """Filter Associated Indicator based on **hasIndicatorAttribute** keyword. Args: operator: The operator enum for the filter. has_indicator_attribute: A nested query for association to other indicators. """ self._tql.add_filter( 'hasIndicatorAttribute', operator, has_indicator_attribute, TqlType.INTEGER ) @property def has_victim(self): """Return **VictimFilter** for further filtering.""" # first-party from tcex.api.tc.v3.victims.victim_filter import VictimFilter victims = VictimFilter(Tql()) self._tql.add_filter('hasVictim', TqlOperator.EQ, victims, TqlType.SUB_QUERY) return victims def has_victim_attribute(self, operator: Enum, has_victim_attribute: int): """Filter Associated Victim based on **hasVictimAttribute** keyword. Args: operator: The operator enum for the filter. has_victim_attribute: A nested query for association to other victims. """ self._tql.add_filter('hasVictimAttribute', operator, has_victim_attribute, TqlType.INTEGER) def id(self, operator: Enum, id: int): # pylint: disable=redefined-builtin """Filter ID based on **id** keyword. Args: operator: The operator enum for the filter. id: The ID of the security label. """ self._tql.add_filter('id', operator, id, TqlType.INTEGER) def name(self, operator: Enum, name: str): """Filter Name based on **name** keyword. Args: operator: The operator enum for the filter. name: The name of the security label. """ self._tql.add_filter('name', operator, name, TqlType.STRING) def owner(self, operator: Enum, owner: int): """Filter Owner ID based on **owner** keyword. Args: operator: The operator enum for the filter. owner: The owner ID of the security label. """ self._tql.add_filter('owner', operator, owner, TqlType.INTEGER) def owner_name(self, operator: Enum, owner_name: str): """Filter Owner Name based on **ownerName** keyword. Args: operator: The operator enum for the filter. owner_name: The owner name of the security label. """ self._tql.add_filter('ownerName', operator, owner_name, TqlType.STRING) def summary(self, operator: Enum, summary: str): """Filter Summary based on **summary** keyword. Args: operator: The operator enum for the filter. summary: The name of the security label. """ self._tql.add_filter('summary', operator, summary, TqlType.STRING) def victim_id(self, operator: Enum, victim_id: int): """Filter Victim ID based on **victimId** keyword. Args: operator: The operator enum for the filter. victim_id: The ID of the victim the security label is applied to. """ self._tql.add_filter('victimId', operator, victim_id, TqlType.INTEGER)
{ "content_hash": "55869a2d3c33700f24478912272ab913", "timestamp": "", "source": "github", "line_count": 160, "max_line_length": 99, "avg_line_length": 37.55625, "alnum_prop": 0.6343817606922949, "repo_name": "ThreatConnect-Inc/tcex", "id": "f97018f4da57340b507cd48654955f48a0cd79f9", "size": "6009", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "tcex/api/tc/v3/security_labels/security_label_filter.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "2735042" } ], "symlink_target": "" }
import tensorflow as tf import numpy as np print "Simple TensorFlow example }:->" print "" matrix1 = tf.constant([[3., 3.]]) matrix2 = tf.constant([[2.], [2.]]) matrixProduct = tf.matmul(matrix1, matrix2) # y = x^2 + 2x + 1 x = tf.placeholder(tf.float32, shape=[1]) formulaProduct = tf.Variable(x*x + x*2 + 1) with tf.Session() as sess: # Task1: matrix multiplication matrixResult = sess.run(matrixProduct) print("Matrix product: " + str(matrixResult)) # Task2: formula evaluation for i in range(5): sess.run(tf.initialize_all_variables(), feed_dict={x: [i]}) formulaResult = sess.run(formulaProduct) print("y=x^2+2x+1, for x=" + str(i) + ": " + str(formulaResult)) print ""
{ "content_hash": "092572436099cb609a02eca5ed87be1d", "timestamp": "", "source": "github", "line_count": 26, "max_line_length": 72, "avg_line_length": 27.884615384615383, "alnum_prop": 0.6386206896551724, "repo_name": "chupakabr/lottery-guesser", "id": "fe53bb879167e3a5ba1f1bf0bf7ac20c46f02437", "size": "725", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "ai/study-tensorflow/simple-tensorflow-example.py", "mode": "33188", "license": "mit", "language": [ { "name": "Java", "bytes": "22938" }, { "name": "Python", "bytes": "4845" }, { "name": "Shell", "bytes": "1123" } ], "symlink_target": "" }
""" django-colortools ~~~~~~ `django-colortools <http://www.github.com/neubloc/django-colortools>` :copyright: 2010 by David Cramer, 2011 by Neubloc, LLC. """ __all__ = ('__version__', '__build__', '__docformat__', 'get_revision') __version__ = (0, 3, 3) __docformat__ = 'restructuredtext en' import os def _get_git_revision(path): revision_file = os.path.join(path, 'refs', 'heads', 'master') if not os.path.exists(revision_file): return None fh = open(revision_file, 'r') try: return fh.read().strip() finally: fh.close() def get_revision(): """ :returns: Revision number of this branch/checkout, if available. None if no revision number can be determined. """ package_dir = os.path.dirname(__file__) checkout_dir = os.path.normpath(os.path.join(package_dir, '..')) path = os.path.join(checkout_dir, '.git') if os.path.exists(path): return _get_git_revision(path) return None __build__ = get_revision() def get_version(): base = '.'.join(map(str, __version__)) if __build__: base = '%s (%s)' % (base, __build__) return base
{ "content_hash": "9c7b2896e14f7c433dc9909f3e2eb7fd", "timestamp": "", "source": "github", "line_count": 44, "max_line_length": 72, "avg_line_length": 25.681818181818183, "alnum_prop": 0.6053097345132743, "repo_name": "neubloc/django-colortools", "id": "29abca37d60c7eb8b4ce884a58b6b1d1f5417a40", "size": "1130", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "colortools/__init__.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "Python", "bytes": "21035" } ], "symlink_target": "" }
import sys import json import bm_json import tabulate import argparse from scipy import stats import subprocess import multiprocessing import collections import pipes import os sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), '..', '..', 'run_tests', 'python_utils')) import comment_on_pr import jobset import itertools import speedup import random import shutil import errno _INTERESTING = ( 'cpu_time', 'real_time', 'locks_per_iteration', 'allocs_per_iteration', 'writes_per_iteration', 'atm_cas_per_iteration', 'atm_add_per_iteration', ) def changed_ratio(n, o): if float(o) <= .0001: o = 0 if float(n) <= .0001: n = 0 if o == 0 and n == 0: return 0 if o == 0: return 100 return (float(n)-float(o))/float(o) def median(ary): ary = sorted(ary) n = len(ary) if n%2 == 0: return (ary[n/2] + ary[n/2+1]) / 2.0 else: return ary[n/2] def min_change(pct): return lambda n, o: abs(changed_ratio(n,o)) > pct/100.0 _AVAILABLE_BENCHMARK_TESTS = ['bm_fullstack_unary_ping_pong', 'bm_fullstack_streaming_ping_pong', 'bm_fullstack_streaming_pump', 'bm_closure', 'bm_cq', 'bm_call_create', 'bm_error', 'bm_chttp2_hpack', 'bm_chttp2_transport', 'bm_pollset', 'bm_metadata', 'bm_fullstack_trickle'] argp = argparse.ArgumentParser(description='Perform diff on microbenchmarks') argp.add_argument('-t', '--track', choices=sorted(_INTERESTING), nargs='+', default=sorted(_INTERESTING), help='Which metrics to track') argp.add_argument('-b', '--benchmarks', nargs='+', choices=_AVAILABLE_BENCHMARK_TESTS, default=['bm_cq']) argp.add_argument('-d', '--diff_base', type=str) argp.add_argument('-r', '--repetitions', type=int, default=1) argp.add_argument('-l', '--loops', type=int, default=20) argp.add_argument('-j', '--jobs', type=int, default=multiprocessing.cpu_count()) args = argp.parse_args() assert args.diff_base def avg(lst): sum = 0.0 n = 0.0 for el in lst: sum += el n += 1 return sum / n def make_cmd(cfg): return ['make'] + args.benchmarks + [ 'CONFIG=%s' % cfg, '-j', '%d' % args.jobs] def build(dest): shutil.rmtree('bm_diff_%s' % dest, ignore_errors=True) subprocess.check_call(['git', 'submodule', 'update']) try: subprocess.check_call(make_cmd('opt')) subprocess.check_call(make_cmd('counters')) except subprocess.CalledProcessError, e: subprocess.check_call(['make', 'clean']) subprocess.check_call(make_cmd('opt')) subprocess.check_call(make_cmd('counters')) os.rename('bins', 'bm_diff_%s' % dest) def collect1(bm, cfg, ver, idx): cmd = ['bm_diff_%s/%s/%s' % (ver, cfg, bm), '--benchmark_out=%s.%s.%s.%d.json' % (bm, cfg, ver, idx), '--benchmark_out_format=json', '--benchmark_repetitions=%d' % (args.repetitions) ] return jobset.JobSpec(cmd, shortname='%s %s %s %d/%d' % (bm, cfg, ver, idx+1, args.loops), verbose_success=True, timeout_seconds=None) build('new') where_am_i = subprocess.check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD']).strip() subprocess.check_call(['git', 'checkout', args.diff_base]) try: build('old') finally: subprocess.check_call(['git', 'checkout', where_am_i]) subprocess.check_call(['git', 'submodule', 'update']) jobs = [] for loop in range(0, args.loops): jobs.extend(x for x in itertools.chain( (collect1(bm, 'opt', 'new', loop) for bm in args.benchmarks), (collect1(bm, 'counters', 'new', loop) for bm in args.benchmarks), (collect1(bm, 'opt', 'old', loop) for bm in args.benchmarks), (collect1(bm, 'counters', 'old', loop) for bm in args.benchmarks), )) random.shuffle(jobs, random.SystemRandom().random) jobset.run(jobs, maxjobs=args.jobs) class Benchmark: def __init__(self): self.samples = { True: collections.defaultdict(list), False: collections.defaultdict(list) } self.final = {} def add_sample(self, data, new): for f in args.track: if f in data: self.samples[new][f].append(float(data[f])) def process(self): for f in sorted(args.track): new = self.samples[True][f] old = self.samples[False][f] if not new or not old: continue mdn_diff = abs(median(new) - median(old)) print '%s: new=%r old=%r mdn_diff=%r' % (f, new, old, mdn_diff) s = speedup.speedup(new, old) if abs(s) > 3 and mdn_diff > 0.5: self.final[f] = '%+d%%' % s return self.final.keys() def skip(self): return not self.final def row(self, flds): return [self.final[f] if f in self.final else '' for f in flds] def eintr_be_gone(fn): """Run fn until it doesn't stop because of EINTR""" while True: try: return fn() except IOError, e: if e.errno != errno.EINTR: raise def read_json(filename): with open(filename) as f: return json.loads(f.read()) def finalize(): benchmarks = collections.defaultdict(Benchmark) for bm in args.benchmarks: for loop in range(0, args.loops): js_new_ctr = read_json('%s.counters.new.%d.json' % (bm, loop)) js_new_opt = read_json('%s.opt.new.%d.json' % (bm, loop)) js_old_ctr = read_json('%s.counters.old.%d.json' % (bm, loop)) js_old_opt = read_json('%s.opt.old.%d.json' % (bm, loop)) for row in bm_json.expand_json(js_new_ctr, js_new_opt): print row name = row['cpp_name'] if name.endswith('_mean') or name.endswith('_stddev'): continue benchmarks[name].add_sample(row, True) for row in bm_json.expand_json(js_old_ctr, js_old_opt): print row name = row['cpp_name'] if name.endswith('_mean') or name.endswith('_stddev'): continue benchmarks[name].add_sample(row, False) really_interesting = set() for name, bm in benchmarks.items(): print name really_interesting.update(bm.process()) fields = [f for f in args.track if f in really_interesting] headers = ['Benchmark'] + fields rows = [] for name in sorted(benchmarks.keys()): if benchmarks[name].skip(): continue rows.append([name] + benchmarks[name].row(fields)) if rows: text = 'Performance differences noted:\n' + tabulate.tabulate(rows, headers=headers, floatfmt='+.2f') else: text = 'No significant performance differences' comment_on_pr.comment_on_pr('```\n%s\n```' % text) print text eintr_be_gone(finalize)
{ "content_hash": "feb0eea834d90c1997e6c9a158438565", "timestamp": "", "source": "github", "line_count": 219, "max_line_length": 105, "avg_line_length": 30.80365296803653, "alnum_prop": 0.597687518529499, "repo_name": "philcleveland/grpc", "id": "09b62ae1c9067d9b40a933269af3f04d82c846ee", "size": "8300", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tools/profiling/microbenchmarks/bm_diff.py", "mode": "33261", "license": "bsd-3-clause", "language": [ { "name": "Batchfile", "bytes": "23280" }, { "name": "C", "bytes": "6948606" }, { "name": "C#", "bytes": "1531689" }, { "name": "C++", "bytes": "2137976" }, { "name": "CMake", "bytes": "429850" }, { "name": "DTrace", "bytes": "147" }, { "name": "JavaScript", "bytes": "341201" }, { "name": "M4", "bytes": "43609" }, { "name": "Makefile", "bytes": "864350" }, { "name": "Objective-C", "bytes": "351578" }, { "name": "PHP", "bytes": "301699" }, { "name": "Protocol Buffer", "bytes": "126616" }, { "name": "PureBasic", "bytes": "147" }, { "name": "Python", "bytes": "1410849" }, { "name": "Ruby", "bytes": "697648" }, { "name": "Shell", "bytes": "59291" }, { "name": "Swift", "bytes": "5418" } ], "symlink_target": "" }
import pytest from funcy import merge from firebase import firebase as f from django.conf import settings from firestone import fb_url, get_job as fjob, merge_ids, patch_job as pj, put_job from firestone.models import apply_for_job @pytest.fixture def fire_app(test_firebase): fauth = f.FirebaseAuthentication(settings.FIREBASE_SECRET, settings.MR_WOLF_EMAIL, admin=True, debug=True) return f.FirebaseApplication(fb_url(settings.FIREBASE), fauth) @pytest.fixture def available_experts(fire_app): exp_ids = fire_app.get('/experts', None) us = [fire_app.get('/users', k) for k in exp_ids.keys()] us = merge_ids(exp_ids, us) return filter(lambda u: u['available'], us) # {u'simplelogin:2': True, u'simplelogin:1': True} @pytest.fixture def get_job(fire_app): return fjob @pytest.fixture def patch_job(): return pj @pytest.fixture def fresh_job(patch_job): def fn(job_id): patch_job(job_id, {'applicants': [], 'status': 'drafting'}) return fn @pytest.fixture def super_fresh_job(patch_job): def fn(job_id): put_job(job_id, {'owner': job_id}) return fn @pytest.fixture def apply_for_job(): return apply_for_job @pytest.fixture def test_firebase(): return 'jonny-test' @pytest.fixture def empty_job(): return { "owner": "sample:1", "status": "drafting" } @pytest.fixture def applied_job(empty_job): return merge(empty_job, { "applicants": [{}, {}] }) @pytest.fixture def full_job(empty_job): return merge(empty_job, { "applicants": [{}, {}, {}], "status": "drafted" })
{ "content_hash": "ad6b8037ee9ac79b4bbf41879e247332", "timestamp": "", "source": "github", "line_count": 70, "max_line_length": 110, "avg_line_length": 23.042857142857144, "alnum_prop": 0.6553006819590824, "repo_name": "popara/jonny-api", "id": "94cc84ce17f57ce636ded42651a6eb5d2dcfbfa3", "size": "1613", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "fixures/firebase_fixutres.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "42262" }, { "name": "HTML", "bytes": "3899" }, { "name": "JavaScript", "bytes": "77703" }, { "name": "Python", "bytes": "83332" } ], "symlink_target": "" }
import scrapy class WebcrawlerItem(scrapy.Item): # define the fields for your item here like: # name = scrapy.Field() pass
{ "content_hash": "5cec24e139ea0a7ebc5af5254b84162c", "timestamp": "", "source": "github", "line_count": 7, "max_line_length": 48, "avg_line_length": 19.571428571428573, "alnum_prop": 0.6861313868613139, "repo_name": "NeX-Studio/DFB-Data-Mining", "id": "8bd86d11ca9f044e6792ec22dbe0e82f64b5c4b0", "size": "289", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "webcrawler/items.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "7963" } ], "symlink_target": "" }
import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.append(os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = u'pymetro' copyright = u'2009, Brian Luft' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.1' # The full version, including alpha/beta/rc tags. release = '0.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. #unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_use_modindex = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'pymetrodoc' # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'pymetro.tex', u'pymetro Documentation', u'Brian Luft', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True
{ "content_hash": "0f60c73797427296c9f7e6fc32bca207", "timestamp": "", "source": "github", "line_count": 181, "max_line_length": 80, "avg_line_length": 32.425414364640886, "alnum_prop": 0.7093201567558357, "repo_name": "unbracketed/pymetro", "id": "cacba6a3621453c3f08635f041aaf0e1bb8648c1", "size": "6287", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "docs/conf.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "Python", "bytes": "12557" } ], "symlink_target": "" }