text
stringlengths
4
1.02M
meta
dict
import unittest from webkitpy.common.net.statusserver import StatusServer from webkitpy.common.system.outputcapture import OutputCaptureTestCaseBase from webkitpy.common.net.web_mock import MockBrowser class StatusServerTest(OutputCaptureTestCaseBase): def test_url_for_issue(self): mock_browser = MockBrowser() status_server = StatusServer(browser=mock_browser, bot_id='123') status_server.update_status('queue name', 'the status') self.assertEqual('queue name', mock_browser.params['queue_name']) self.assertEqual('the status', mock_browser.params['status']) self.assertEqual('123', mock_browser.params['bot_id'])
{ "content_hash": "680a2af6fe806f234ccfac688a8fd562", "timestamp": "", "source": "github", "line_count": 15, "max_line_length": 74, "avg_line_length": 44.733333333333334, "alnum_prop": 0.7406855439642325, "repo_name": "leighpauls/k2cro4", "id": "1f0afd05bb7c0009e8a34fdbdba171787bc2a661", "size": "2201", "binary": false, "copies": "8", "ref": "refs/heads/master", "path": "third_party/WebKit/Tools/Scripts/webkitpy/common/net/statusserver_unittest.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "ASP", "bytes": "3062" }, { "name": "AppleScript", "bytes": "25392" }, { "name": "Arduino", "bytes": "464" }, { "name": "Assembly", "bytes": "68131038" }, { "name": "C", "bytes": "242794338" }, { "name": "C#", "bytes": "11024" }, { "name": "C++", "bytes": "353525184" }, { "name": "Common Lisp", "bytes": "3721" }, { "name": "D", "bytes": "1931" }, { "name": "Emacs Lisp", "bytes": "1639" }, { "name": "F#", "bytes": "4992" }, { "name": "FORTRAN", "bytes": "10404" }, { "name": "Java", "bytes": "3845159" }, { "name": "JavaScript", "bytes": "39146656" }, { "name": "Lua", "bytes": "13768" }, { "name": "Matlab", "bytes": "22373" }, { "name": "Objective-C", "bytes": "21887598" }, { "name": "PHP", "bytes": "2344144" }, { "name": "Perl", "bytes": "49033099" }, { "name": "Prolog", "bytes": "2926122" }, { "name": "Python", "bytes": "39863959" }, { "name": "R", "bytes": "262" }, { "name": "Racket", "bytes": "359" }, { "name": "Ruby", "bytes": "304063" }, { "name": "Scheme", "bytes": "14853" }, { "name": "Shell", "bytes": "9195117" }, { "name": "Tcl", "bytes": "1919771" }, { "name": "Verilog", "bytes": "3092" }, { "name": "Visual Basic", "bytes": "1430" }, { "name": "eC", "bytes": "5079" } ], "symlink_target": "" }
from crawlers import FDroidCrawler, PlayCrawler class Crawler: """Crawler that combines Google Play and F-Droid crawlers to fill in a given database""" def __init__(self, db): self.db = db def crawl_gplay(self): """Crawls the Google Play website updating the database with the top applications for each available category""" count = 0 crawler = PlayCrawler() categories = crawler.get_all_categories() for i, category in enumerate(categories): i += 1 # 1-index based print('Retrieving packages for category {} ({}/{})' .format(category, i, len(categories))) for pkg in crawler.get_all_packages(category): if self.db.insert_package(pkg, category): count += 1 self.db.commit() return count def crawl_fdroid(self): """Crawls the F-Droid package index website updating the database with the all the available applications on F-Droid""" count = 0 crawler = FDroidCrawler() for pkg, categories in crawler.yield_packages_categories(): if self.db.insert_package(pkg, categories): count += 1 self.db.commit() return count
{ "content_hash": "8c9386da8f8447f89a1e9c64b67291c6", "timestamp": "", "source": "github", "line_count": 40, "max_line_length": 73, "avg_line_length": 32.525, "alnum_prop": 0.5872405841660261, "repo_name": "Open-App-Categorizer/PkgCat-Crawler", "id": "a4d959d679af872710aef35cff9acbea4d922ba6", "size": "1301", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "crawlers/crawler.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "9885" } ], "symlink_target": "" }
"""WebSocket client for asyncio.""" import asyncio import json import async_timeout from .client_exceptions import ClientError from .helpers import PY_352, call_later, create_future from .http import (WS_CLOSED_MESSAGE, WS_CLOSING_MESSAGE, WebSocketError, WSMessage, WSMsgType) class ClientWebSocketResponse: def __init__(self, reader, writer, protocol, response, timeout, autoclose, autoping, loop, *, receive_timeout=None, heartbeat=None, compress=0, client_notakeover=False): self._response = response self._conn = response.connection self._writer = writer self._reader = reader self._protocol = protocol self._closed = False self._closing = False self._close_code = None self._timeout = timeout self._receive_timeout = receive_timeout self._autoclose = autoclose self._autoping = autoping self._heartbeat = heartbeat self._heartbeat_cb = None if heartbeat is not None: self._pong_heartbeat = heartbeat/2.0 self._pong_response_cb = None self._loop = loop self._waiting = None self._exception = None self._compress = compress self._client_notakeover = client_notakeover self._reset_heartbeat() def _cancel_heartbeat(self): if self._pong_response_cb is not None: self._pong_response_cb.cancel() self._pong_response_cb = None if self._heartbeat_cb is not None: self._heartbeat_cb.cancel() self._heartbeat_cb = None def _reset_heartbeat(self): self._cancel_heartbeat() if self._heartbeat is not None: self._heartbeat_cb = call_later( self._send_heartbeat, self._heartbeat, self._loop) def _send_heartbeat(self): if self._heartbeat is not None and not self._closed: self.ping() if self._pong_response_cb is not None: self._pong_response_cb.cancel() self._pong_response_cb = call_later( self._pong_not_received, self._pong_heartbeat, self._loop) def _pong_not_received(self): if not self._closed: self._closed = True self._close_code = 1006 self._exception = asyncio.TimeoutError() self._response.close() @property def closed(self): return self._closed @property def close_code(self): return self._close_code @property def protocol(self): return self._protocol @property def compress(self): return self._compress @property def client_notakeover(self): return self._client_notakeover def get_extra_info(self, name, default=None): """extra info from connection transport""" try: return self._response.connection.transport.get_extra_info( name, default) except Exception: return default def exception(self): return self._exception def ping(self, message='b'): self._writer.ping(message) def pong(self, message='b'): self._writer.pong(message) def send_str(self, data): if not isinstance(data, str): raise TypeError('data argument must be str (%r)' % type(data)) return self._writer.send(data, binary=False) def send_bytes(self, data): if not isinstance(data, (bytes, bytearray, memoryview)): raise TypeError('data argument must be byte-ish (%r)' % type(data)) return self._writer.send(data, binary=True) def send_json(self, data, *, dumps=json.dumps): return self.send_str(dumps(data)) @asyncio.coroutine def close(self, *, code=1000, message=b''): # we need to break `receive()` cycle first, # `close()` may be called from different task if self._waiting is not None and not self._closed: self._reader.feed_data(WS_CLOSING_MESSAGE, 0) yield from self._waiting if not self._closed: self._cancel_heartbeat() self._closed = True try: self._writer.close(code, message) except asyncio.CancelledError: self._close_code = 1006 self._response.close() raise except Exception as exc: self._close_code = 1006 self._exception = exc self._response.close() return True if self._closing: self._response.close() return True while True: try: with async_timeout.timeout(self._timeout, loop=self._loop): msg = yield from self._reader.read() except asyncio.CancelledError: self._close_code = 1006 self._response.close() raise except Exception as exc: self._close_code = 1006 self._exception = exc self._response.close() return True if msg.type == WSMsgType.CLOSE: self._close_code = msg.data self._response.close() return True else: return False @asyncio.coroutine def receive(self, timeout=None): while True: if self._waiting is not None: raise RuntimeError( 'Concurrent call to receive() is not allowed') if self._closed: return WS_CLOSED_MESSAGE elif self._closing: yield from self.close() return WS_CLOSED_MESSAGE try: self._waiting = create_future(self._loop) try: with async_timeout.timeout( timeout or self._receive_timeout, loop=self._loop): msg = yield from self._reader.read() self._reset_heartbeat() finally: waiter = self._waiting self._waiting = None waiter.set_result(True) except (asyncio.CancelledError, asyncio.TimeoutError): self._close_code = 1006 raise except ClientError: self._closed = True self._close_code = 1006 return WS_CLOSED_MESSAGE except WebSocketError as exc: self._close_code = exc.code yield from self.close(code=exc.code) return WSMessage(WSMsgType.ERROR, exc, None) except Exception as exc: self._exception = exc self._closing = True self._close_code = 1006 yield from self.close() return WSMessage(WSMsgType.ERROR, exc, None) if msg.type == WSMsgType.CLOSE: self._closing = True self._close_code = msg.data if not self._closed and self._autoclose: yield from self.close() elif msg.type == WSMsgType.CLOSING: self._closing = True elif msg.type == WSMsgType.PING and self._autoping: self.pong(msg.data) continue elif msg.type == WSMsgType.PONG and self._autoping: continue return msg @asyncio.coroutine def receive_str(self, *, timeout=None): msg = yield from self.receive(timeout) if msg.type != WSMsgType.TEXT: raise TypeError( "Received message {}:{!r} is not str".format(msg.type, msg.data)) return msg.data @asyncio.coroutine def receive_bytes(self, *, timeout=None): msg = yield from self.receive(timeout) if msg.type != WSMsgType.BINARY: raise TypeError( "Received message {}:{!r} is not bytes".format(msg.type, msg.data)) return msg.data @asyncio.coroutine def receive_json(self, *, loads=json.loads, timeout=None): data = yield from self.receive_str(timeout=timeout) return loads(data) def __aiter__(self): return self if not PY_352: # pragma: no cover __aiter__ = asyncio.coroutine(__aiter__) @asyncio.coroutine def __anext__(self): msg = yield from self.receive() if msg.type in (WSMsgType.CLOSE, WSMsgType.CLOSING, WSMsgType.CLOSED): raise StopAsyncIteration # NOQA return msg
{ "content_hash": "b398091fd3e3deb549e52ac7e5fb7e5a", "timestamp": "", "source": "github", "line_count": 271, "max_line_length": 79, "avg_line_length": 33.18819188191882, "alnum_prop": 0.5284634200578163, "repo_name": "playpauseandstop/aiohttp", "id": "0b8bed63a2584d88e9c610ca9fd008af097e9ecb", "size": "8994", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "aiohttp/client_ws.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Makefile", "bytes": "3186" }, { "name": "Python", "bytes": "1345545" } ], "symlink_target": "" }
from ydk.ext.path import Annotation from ydk.ext.path import Capability from ydk.ext.path import Codec from ydk.ext.path import DataNode from ydk.ext.path import Repository from ydk.ext.path import RootSchemaNode from ydk.ext.path import Rpc from ydk.ext.path import SchemaNode from ydk.ext.path import Statement from .sessions import NetconfSession from .sessions import RestconfSession __all__ = [ "Annotation", "Capability", "Codec", "DataNode", "NetconfSession", "Repository", "RestconfSession", "RootSchemaNode", "Rpc", "SchemaNode", "Statement" ]
{ "content_hash": "1a6b3464cff17890529c313e4566160d", "timestamp": "", "source": "github", "line_count": 24, "max_line_length": 39, "avg_line_length": 28.125, "alnum_prop": 0.6444444444444445, "repo_name": "CiscoDevNet/ydk-gen", "id": "18bb5abdb6ff53cfd8db5e8344c847f9042e9bef", "size": "1389", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "sdk/python/core/ydk/path/__init__.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "21945" }, { "name": "C", "bytes": "15875" }, { "name": "C++", "bytes": "3529963" }, { "name": "CMake", "bytes": "120070" }, { "name": "CSS", "bytes": "134" }, { "name": "Dockerfile", "bytes": "770" }, { "name": "Go", "bytes": "566728" }, { "name": "Makefile", "bytes": "960022" }, { "name": "Python", "bytes": "1052712" }, { "name": "Ruby", "bytes": "4023" }, { "name": "Shell", "bytes": "153786" } ], "symlink_target": "" }
""" MMBT configuration """ from ...utils import logging logger = logging.get_logger(__name__) class MMBTConfig(object): """ This is the configuration class to store the configuration of a :class:`~transformers.MMBTModel`. It is used to instantiate a MMBT model according to the specified arguments, defining the model architecture. Args: config (:class:`~transformers.PreTrainedConfig`): Config of the underlying Transformer models. Its values are copied over to use a single config. num_labels (:obj:`int`, `optional`): Size of final Linear layer for classification. modal_hidden_size (:obj:`int`, `optional`, defaults to 2048): Embedding dimension of the non-text modality encoder. """ def __init__(self, config, num_labels=None, modal_hidden_size=2048): self.__dict__ = config.__dict__ self.modal_hidden_size = modal_hidden_size if num_labels: self.num_labels = num_labels
{ "content_hash": "0de72922d1e1304386411914c18c2b4d", "timestamp": "", "source": "github", "line_count": 27, "max_line_length": 115, "avg_line_length": 37.148148148148145, "alnum_prop": 0.6590229312063809, "repo_name": "huggingface/pytorch-transformers", "id": "bbb6c9d240e99e6e1267de72d12ee741109943a2", "size": "1654", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/transformers/models/mmbt/configuration_mmbt.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "194" }, { "name": "Jupyter Notebook", "bytes": "535623" }, { "name": "Python", "bytes": "897445" } ], "symlink_target": "" }
"""Script for updating the project settings for a chromium branch. To initialize a new chromium branch, run the following from the root of the repo (where MM is the milestone number and BBBB is the branch number): ``` infra/config/scripts/branch.py initialize --milestone MM --branch BBBB infra/config/main.star infra/config/dev.star ``` Usage: branch.py initialize --milestone XX --branch YYYY """ import argparse import json import os INFRA_CONFIG_DIR = os.path.abspath(os.path.join(__file__, '..', '..')) def parse_args(args=None, *, parser_type=None): parser_type = parser_type or argparse.ArgumentParser parser = parser_type( description='Update the project settings for a chromium branch') parser.set_defaults(func=None) parser.add_argument('--settings-json', help='Path to the settings.json file', default=os.path.join(INFRA_CONFIG_DIR, 'settings.json')) subparsers = parser.add_subparsers() init_parser = subparsers.add_parser( 'initialize', help='Initialize the settings for a branch') init_parser.set_defaults(func=initialize_cmd) init_parser.add_argument( '--milestone', required=True, help=('The milestone identifier ' '(e.g. the milestone number for standard release channel)')) init_parser.add_argument( '--branch', required=True, help='The branch name, must correspond to a ref in refs/branch-heads') set_type_parser = subparsers.add_parser( 'set-type', help='Change the branch type of the project') set_type_parser.set_defaults(func=set_type_cmd) set_type_parser.add_argument( '--type', required=True, choices=BRANCH_TYPES, action='append', help='The type of the branch to change the project config to') args = parser.parse_args(args) if args.func is None: parser.error('no sub-command specified') return args def initial_settings(milestone, branch): settings = dict( project=f'chromium-m{milestone}', project_title=f'Chromium M{milestone}', ref=f'refs/branch-heads/{branch}', chrome_project=f'chrome-m{milestone}', branch_types=['standard'], ) return json.dumps(settings, indent=4) + '\n' def initialize_cmd(args): settings = initial_settings(args.milestone, args.branch) with open(args.settings_json, 'w') as f: f.write(settings) BRANCH_TYPES = ( 'standard', 'desktop-extended-stable', 'cros-lts', 'fuchsia-lts', ) def set_type(settings_json, branch_types): for t in branch_types: assert t in BRANCH_TYPES, 'Unknown branch_type {!r}'.format(t) settings = json.loads(settings_json) settings.update(branch_types=branch_types) return json.dumps(settings, indent=4) + '\n' def set_type_cmd(args): with open(args.settings_json) as f: settings = f.read() settings = set_type(settings, args.type) with open(args.settings_json, 'w') as f: f.write(settings) def main(): args = parse_args() args.func(args) if __name__ == '__main__': main()
{ "content_hash": "995aebf7105ff2e26ccbdbfdbc30ab96", "timestamp": "", "source": "github", "line_count": 111, "max_line_length": 78, "avg_line_length": 27.405405405405407, "alnum_prop": 0.6738987508218277, "repo_name": "chromium/chromium", "id": "fac6016317fa7aa3dce25b736eb42ed2d3adabda", "size": "3206", "binary": false, "copies": "6", "ref": "refs/heads/main", "path": "infra/config/scripts/branch.py", "mode": "33261", "license": "bsd-3-clause", "language": [], "symlink_target": "" }
from django.views.generic.base import TemplateView from users.mixins import AdminRequiredMixin class FoodNewView(AdminRequiredMixin, TemplateView): template_name = 'food/new.html'
{ "content_hash": "8428d5b20341540c1a18ff649402e05f", "timestamp": "", "source": "github", "line_count": 7, "max_line_length": 52, "avg_line_length": 26.857142857142858, "alnum_prop": 0.8085106382978723, "repo_name": "jupiny/MIDASChallenge2017", "id": "a88994b62c3ff635621c669b0ce3e9382ee223c4", "size": "188", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "midas_web_solution/food/views/new.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "1597" }, { "name": "HTML", "bytes": "65927" }, { "name": "JavaScript", "bytes": "29024" }, { "name": "Makefile", "bytes": "110" }, { "name": "Python", "bytes": "31157" } ], "symlink_target": "" }
""" Given a binary tree, flatten it to a linked list in-place. For example, Given 1 / \ 2 5 / \ \ 3 4 6 The flattened tree should look like: 1 \ 2 \ 3 \ 4 \ 5 \ 6 If you notice carefully in the flattened tree, each node's right child points to the next node of a pre-order traversal. """ # Definition for a binary tree node # class TreeNode: # def __init__(self, x): # self.val = x # self.left = None # self.right = None class Solution: # @param root, a tree node # @return nothing, do it in place def flatten(self, root): self.flattenHelper(root) def flattenHelper(self, root): if root == None: return None # keep track of the subtrees roots left = root.left right = root.right current = root # Truncate the left subtree root.left = None # Flatten the left subtree current.right = self.flattenHelper(left) while current.right: current = current.right # Flatten the right subtree current.right = self.flattenHelper(right) return root
{ "content_hash": "29939a9a07a6f65d30ea89e1281dd206", "timestamp": "", "source": "github", "line_count": 64, "max_line_length": 120, "avg_line_length": 18.171875, "alnum_prop": 0.5812553740326741, "repo_name": "Ahmed--Mohsen/leetcode", "id": "cb304e8db5f2f479f996dc2348b0995efbfe0386", "size": "1163", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "flatten_tree_to_linked_list.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "317482" } ], "symlink_target": "" }
import logging import sys from collections import defaultdict from datetime import datetime from operator import attrgetter from time import time from typing import List, Optional, Tuple, Union from urllib.parse import quote # Using `from elasticsearch import *` would break elasticsearch mocking used in unit test. import elasticsearch import pendulum from elasticsearch_dsl import Search from airflow.configuration import conf from airflow.models import TaskInstance from airflow.utils import timezone from airflow.utils.log.file_task_handler import FileTaskHandler from airflow.utils.log.json_formatter import JSONFormatter from airflow.utils.log.logging_mixin import ExternalLoggingMixin, LoggingMixin # Elasticsearch hosted log type EsLogMsgType = List[Tuple[str, str]] class ElasticsearchTaskHandler(FileTaskHandler, ExternalLoggingMixin, LoggingMixin): """ ElasticsearchTaskHandler is a python log handler that reads logs from Elasticsearch. Note logs are not directly indexed into Elasticsearch. Instead, it flushes logs into local files. Additional software setup is required to index the log into Elasticsearch, such as using Filebeat and Logstash. To efficiently query and sort Elasticsearch results, we assume each log message has a field `log_id` consists of ti primary keys: `log_id = {dag_id}-{task_id}-{execution_date}-{try_number}` Log messages with specific log_id are sorted based on `offset`, which is a unique integer indicates log message's order. Timestamp here are unreliable because multiple log messages might have the same timestamp. """ PAGE = 0 MAX_LINE_PER_PAGE = 1000 LOG_NAME = 'Elasticsearch' def __init__( self, base_log_folder: str, filename_template: str, log_id_template: str, end_of_log_mark: str, write_stdout: bool, json_format: bool, json_fields: str, host_field: str = "host", offset_field: str = "offset", host: str = "localhost:9200", frontend: str = "localhost:5601", es_kwargs: Optional[dict] = conf.getsection("elasticsearch_configs"), ): """ :param base_log_folder: base folder to store logs locally :param log_id_template: log id template :param host: Elasticsearch host name """ es_kwargs = es_kwargs or {} super().__init__(base_log_folder, filename_template) self.closed = False self.client = elasticsearch.Elasticsearch([host], **es_kwargs) self.log_id_template = log_id_template self.frontend = frontend self.mark_end_on_close = True self.end_of_log_mark = end_of_log_mark self.write_stdout = write_stdout self.json_format = json_format self.json_fields = [label.strip() for label in json_fields.split(",")] self.host_field = host_field self.offset_field = offset_field self.context_set = False self.formatter: logging.Formatter self.handler: Union[logging.FileHandler, logging.StreamHandler] # type: ignore[assignment] def _render_log_id(self, ti: TaskInstance, try_number: int) -> str: dag_run = ti.dag_run if self.json_format: data_interval_start = self._clean_date(dag_run.data_interval_start) data_interval_end = self._clean_date(dag_run.data_interval_end) execution_date = self._clean_date(dag_run.execution_date) else: data_interval_start = dag_run.data_interval_start.isoformat() data_interval_end = dag_run.data_interval_end.isoformat() execution_date = dag_run.execution_date.isoformat() return self.log_id_template.format( dag_id=ti.dag_id, task_id=ti.task_id, run_id=ti.run_id, data_interval_start=data_interval_start, data_interval_end=data_interval_end, execution_date=execution_date, try_number=try_number, ) @staticmethod def _clean_date(value: datetime) -> str: """ Clean up a date value so that it is safe to query in elasticsearch by removing reserved characters. # https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-query-string-query.html#_reserved_characters :param execution_date: execution date of the dag run. """ return value.strftime("%Y_%m_%dT%H_%M_%S_%f") def _group_logs_by_host(self, logs): grouped_logs = defaultdict(list) for log in logs: key = getattr(log, self.host_field, 'default_host') grouped_logs[key].append(log) # return items sorted by timestamp. result = sorted(grouped_logs.items(), key=lambda kv: getattr(kv[1][0], 'message', '_')) return result def _read_grouped_logs(self): return True def _read( self, ti: TaskInstance, try_number: int, metadata: Optional[dict] = None ) -> Tuple[EsLogMsgType, dict]: """ Endpoint for streaming log. :param ti: task instance object :param try_number: try_number of the task instance :param metadata: log metadata, can be used for steaming log reading and auto-tailing. :return: a list of tuple with host and log documents, metadata. """ if not metadata: metadata = {'offset': 0} if 'offset' not in metadata: metadata['offset'] = 0 offset = metadata['offset'] log_id = self._render_log_id(ti, try_number) logs = self.es_read(log_id, offset, metadata) logs_by_host = self._group_logs_by_host(logs) next_offset = offset if not logs else attrgetter(self.offset_field)(logs[-1]) # Ensure a string here. Large offset numbers will get JSON.parsed incorrectly # on the client. Sending as a string prevents this issue. # https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Number/MAX_SAFE_INTEGER metadata['offset'] = str(next_offset) # end_of_log_mark may contain characters like '\n' which is needed to # have the log uploaded but will not be stored in elasticsearch. loading_hosts = [ item[0] for item in logs_by_host if item[-1][-1].message != self.end_of_log_mark.strip() ] metadata['end_of_log'] = False if not logs else len(loading_hosts) == 0 cur_ts = pendulum.now() # Assume end of log after not receiving new log for 5 min, # as executor heartbeat is 1 min and there might be some # delay before Elasticsearch makes the log available. if 'last_log_timestamp' in metadata: last_log_ts = timezone.parse(metadata['last_log_timestamp']) if ( cur_ts.diff(last_log_ts).in_minutes() >= 5 or 'max_offset' in metadata and int(offset) >= int(metadata['max_offset']) ): metadata['end_of_log'] = True if int(offset) != int(next_offset) or 'last_log_timestamp' not in metadata: metadata['last_log_timestamp'] = str(cur_ts) # If we hit the end of the log, remove the actual end_of_log message # to prevent it from showing in the UI. def concat_logs(lines): log_range = (len(lines) - 1) if lines[-1].message == self.end_of_log_mark.strip() else len(lines) return '\n'.join(self._format_msg(lines[i]) for i in range(log_range)) message = [(host, concat_logs(hosted_log)) for host, hosted_log in logs_by_host] return message, metadata def _format_msg(self, log_line): """Format ES Record to match settings.LOG_FORMAT when used with json_format""" # Using formatter._style.format makes it future proof i.e. # if we change the formatter style from '%' to '{' or '$', this will still work if self.json_format: try: return self.formatter._style.format(_ESJsonLogFmt(self.json_fields, **log_line.to_dict())) except Exception: pass # Just a safe-guard to preserve backwards-compatibility return log_line.message def es_read(self, log_id: str, offset: str, metadata: dict) -> list: """ Returns the logs matching log_id in Elasticsearch and next offset. Returns '' if no log is found or there was an error. :param log_id: the log_id of the log to read. :type log_id: str :param offset: the offset start to read log from. :type offset: str :param metadata: log metadata, used for steaming log download. :type metadata: dict """ # Offset is the unique key for sorting logs given log_id. search = Search(using=self.client).query('match_phrase', log_id=log_id).sort(self.offset_field) search = search.filter('range', **{self.offset_field: {'gt': int(offset)}}) max_log_line = search.count() if 'download_logs' in metadata and metadata['download_logs'] and 'max_offset' not in metadata: try: if max_log_line > 0: metadata['max_offset'] = attrgetter(self.offset_field)( search[max_log_line - 1].execute()[-1] ) else: metadata['max_offset'] = 0 except Exception: self.log.exception('Could not get current log size with log_id: %s', log_id) logs = [] if max_log_line != 0: try: logs = search[self.MAX_LINE_PER_PAGE * self.PAGE : self.MAX_LINE_PER_PAGE].execute() except Exception: self.log.exception('Could not read log with log_id: %s', log_id) return logs def emit(self, record): if self.handler: record.offset = int(time() * (10 ** 9)) self.handler.emit(record) def set_context(self, ti: TaskInstance) -> None: """ Provide task_instance context to airflow task handler. :param ti: task instance object """ self.mark_end_on_close = not ti.raw if self.json_format: self.formatter = JSONFormatter( fmt=self.formatter._fmt, json_fields=self.json_fields + [self.offset_field], extras={ 'dag_id': str(ti.dag_id), 'task_id': str(ti.task_id), 'execution_date': self._clean_date(ti.execution_date), 'try_number': str(ti.try_number), 'log_id': self._render_log_id(ti, ti.try_number), }, ) if self.write_stdout: if self.context_set: # We don't want to re-set up the handler if this logger has # already been initialized return self.handler = logging.StreamHandler(stream=sys.__stdout__) self.handler.setLevel(self.level) self.handler.setFormatter(self.formatter) else: super().set_context(ti) self.context_set = True def close(self) -> None: # When application exit, system shuts down all handlers by # calling close method. Here we check if logger is already # closed to prevent uploading the log to remote storage multiple # times when `logging.shutdown` is called. if self.closed: return if not self.mark_end_on_close: self.closed = True return # Case which context of the handler was not set. if self.handler is None: self.closed = True return # Reopen the file stream, because FileHandler.close() would be called # first in logging.shutdown() and the stream in it would be set to None. if self.handler.stream is None or self.handler.stream.closed: # type: ignore[attr-defined] self.handler.stream = self.handler._open() # type: ignore[union-attr] # Mark the end of file using end of log mark, # so we know where to stop while auto-tailing. self.handler.stream.write(self.end_of_log_mark) if self.write_stdout: self.handler.close() sys.stdout = sys.__stdout__ super().close() self.closed = True @property def log_name(self) -> str: """The log name""" return self.LOG_NAME def get_external_log_url(self, task_instance: TaskInstance, try_number: int) -> str: """ Creates an address for an external log collecting service. :param task_instance: task instance object :type: task_instance: TaskInstance :param try_number: task instance try_number to read logs from. :type try_number: Optional[int] :return: URL to the external log collection service :rtype: str """ log_id = self._render_log_id(task_instance, try_number) scheme = '' if '://' in self.frontend else 'https://' return scheme + self.frontend.format(log_id=quote(log_id)) @property def supports_external_link(self) -> bool: """Whether we can support external links""" return bool(self.frontend) class _ESJsonLogFmt: """Helper class to read ES Logs and re-format it to match settings.LOG_FORMAT""" # A separate class is needed because 'self.formatter._style.format' uses '.__dict__' def __init__(self, json_fields: List, **kwargs): for field in json_fields: self.__setattr__(field, '') self.__dict__.update(kwargs)
{ "content_hash": "0cc133d695e514735c70f1c5e050e91c", "timestamp": "", "source": "github", "line_count": 355, "max_line_length": 128, "avg_line_length": 38.7830985915493, "alnum_prop": 0.6100377687391052, "repo_name": "mistercrunch/airflow", "id": "c2b041e03836383a272d0eaf5041bfb1d6f29887", "size": "14556", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "airflow/providers/elasticsearch/log/es_task_handler.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "36341" }, { "name": "HTML", "bytes": "99243" }, { "name": "JavaScript", "bytes": "891460" }, { "name": "Mako", "bytes": "494" }, { "name": "Python", "bytes": "773270" }, { "name": "Shell", "bytes": "5659" } ], "symlink_target": "" }
from distutils.core import setup setup( name='django-qrauth', version='0.1.2', author='aruseni', author_email='aruseni.magiku@gmail.com', packages=['qrauth'], url='https://github.com/aruseni/django-qrauth', license='BSD licence, see LICENCE.md', description=('Nice QR codes that allow the users to instantly' ' sign in to the website on their mobile devices'), long_description=open('README.md').read()[:-1], zip_safe=False, install_requires=['qrcode', 'redis', 'PIL'], )
{ "content_hash": "ec7071f1e45105aa36a91268a7f67ad8", "timestamp": "", "source": "github", "line_count": 16, "max_line_length": 68, "avg_line_length": 33.375, "alnum_prop": 0.6441947565543071, "repo_name": "aruseni/django-qrauth", "id": "3af9fcb07ee7d0a810261fc9b615241a7575a69c", "size": "557", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "setup.py", "mode": "33261", "license": "bsd-2-clause", "language": [ { "name": "Python", "bytes": "8984" } ], "symlink_target": "" }
import curses import numpy as np from npi.core import Program, IntegerArguments, NPIStep, StepOutput, StepInput, StepInOut __author__ = 'k_morishita' class Screen: data = None def __init__(self, height, width): self.height = height self.width = width self.init_screen() def init_screen(self): self.data = np.zeros([self.height, self.width], dtype=np.int8) def fill(self, ch): self.data.fill(ch) def as_float32(self): return self.data.astype(np.float32) def __setitem__(self, key, value): self.data[key] = value def __getitem__(self, item): return self.data[item] class Terminal: W_TOP = 1 W_LEFT = 1 LOG_WINDOW_HEIGHT = 10 LOG_WINDOW_WIDTH = 80 INFO_WINDOW_HEIGHT = 10 INFO_WINDOW_WIDTH = 40 main_window = None info_window = None log_window = None def __init__(self, stdscr, char_map=None): print(type(stdscr)) self.stdscr = stdscr self.char_map = char_map or dict((ch, chr(ch)) for ch in range(128)) self.log_list = [] def init_window(self, width, height): curses.curs_set(0) border_win = curses.newwin(height + 2, width + 2, self.W_TOP, self.W_LEFT) # h, w, y, x border_win.box() self.stdscr.refresh() border_win.refresh() self.main_window = curses.newwin(height, width, self.W_TOP + 1, self.W_LEFT + 1) self.main_window.refresh() self.main_window.timeout(1) self.info_window = curses.newwin(self.INFO_WINDOW_HEIGHT, self.INFO_WINDOW_WIDTH, self.W_TOP + 1, self.W_LEFT + width + 2) self.log_window = curses.newwin(self.LOG_WINDOW_HEIGHT, self.LOG_WINDOW_WIDTH, self.W_TOP + max(height, self.INFO_WINDOW_HEIGHT) + 5, self.W_LEFT) self.log_window.refresh() def wait_for_key(self): self.stdscr.getch() def update_main_screen(self, screen): for y in range(screen.height): line = "".join([self.char_map[ch] for ch in screen[y]]) self.ignore_error_add_str(self.main_window, y, 0, line) def update_main_window_attr(self, screen, y, x, attr): ch = screen[y, x] self.ignore_error_add_str(self.main_window, y, x, self.char_map[ch], attr) def refresh_main_window(self): self.main_window.refresh() def update_info_screen(self, info_list): self.info_window.clear() for i, info_str in enumerate(info_list): self.info_window.addstr(i, 2, info_str) self.info_window.refresh() def add_log(self, line): self.log_list.insert(0, str(line)[:self.LOG_WINDOW_WIDTH]) self.log_list = self.log_list[:self.LOG_WINDOW_HEIGHT-1] self.log_window.clear() for i, line in enumerate(self.log_list): line = str(line) + " " * (self.LOG_WINDOW_WIDTH - len(str(line))) self.log_window.addstr(i, 0, line) self.log_window.refresh() @staticmethod def ignore_error_add_str(win, y, x, s, attr=curses.A_NORMAL): """一番右下に書き込むと例外が飛んでくるけど、漢は黙って無視するのがお作法らしい?""" try: win.addstr(y, x, s, attr) except curses.error: pass def show_env_to_terminal(terminal, env): terminal.update_main_screen(env.screen) for i, p in enumerate(env.pointers): terminal.update_main_window_attr(env.screen, i, p, curses.A_REVERSE) terminal.refresh_main_window() class TerminalNPIRunner: def __init__(self, terminal: Terminal, model: NPIStep=None, recording=True, max_depth=10, max_step=1000): self.terminal = terminal self.model = model self.steps = 0 self.step_list = [] self.alpha = 0.5 self.verbose = True self.recording = recording self.max_depth = max_depth self.max_step = max_step def reset(self): self.steps = 0 self.step_list = [] self.model.reset() def display_env(self, env, force=False): if (self.verbose or force) and self.terminal: show_env_to_terminal(self.terminal, env) def display_information(self, program: Program, arguments: IntegerArguments, result: StepOutput, depth: int): if self.verbose and self.terminal: information = [ "Step %2d Depth: %2d" % (self.steps, depth), program.description_with_args(arguments), 'r=%.2f' % result.r, ] if result.program: information.append("-> %s" % result.program.description_with_args(result.arguments)) self.terminal.update_info_screen(information) self.wait() def npi_program_interface(self, env, program: Program, arguments: IntegerArguments, depth=0): if self.max_depth < depth or self.max_step < self.steps: raise StopIteration() self.model.enter_function() result = StepOutput(0, None, None) while result.r < self.alpha: self.steps += 1 if self.max_step < self.steps: raise StopIteration() env_observation = env.get_observation() result = self.model.step(env_observation, program, arguments.copy()) if self.recording: self.step_list.append(StepInOut(StepInput(env_observation, program, arguments.copy()), result)) self.display_information(program, arguments, result, depth) if program.output_to_env: program.do(env, arguments.copy()) self.display_env(env) else: if result.program: # modify original algorithm self.npi_program_interface(env, result.program, result.arguments, depth=depth+1) self.model.exit_function() def wait(self): self.terminal.wait_for_key()
{ "content_hash": "4f794b7c00214d439eb2d6fe9774f9c2", "timestamp": "", "source": "github", "line_count": 173, "max_line_length": 113, "avg_line_length": 34.23699421965318, "alnum_prop": 0.5912544318757387, "repo_name": "mokemokechicken/keras_npi", "id": "670fdbf28f17bee1844ee4a7f31eaecdcda2832e", "size": "6039", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "src/npi/terminal_core.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "39952" }, { "name": "Shell", "bytes": "1093" } ], "symlink_target": "" }
"""Support for Oncue sensors.""" from __future__ import annotations from aiooncue import OncueDevice, OncueSensor from homeassistant.helpers import device_registry as dr from homeassistant.helpers.entity import DeviceInfo, Entity, EntityDescription from homeassistant.helpers.update_coordinator import ( CoordinatorEntity, DataUpdateCoordinator, ) from .const import DOMAIN class OncueEntity(CoordinatorEntity, Entity): """Representation of an Oncue entity.""" def __init__( self, coordinator: DataUpdateCoordinator, device_id: str, device: OncueDevice, sensor: OncueSensor, description: EntityDescription, ) -> None: """Initialize the sensor.""" super().__init__(coordinator) self.entity_description = description self._device_id = device_id self._attr_unique_id = f"{device_id}_{description.key}" self._attr_name = f"{device.name} {sensor.display_name}" mac_address_hex = hex(int(device.sensors["MacAddress"].value))[2:] self._attr_device_info = DeviceInfo( identifiers={(DOMAIN, device_id)}, connections={(dr.CONNECTION_NETWORK_MAC, mac_address_hex)}, name=device.name, hw_version=device.hardware_version, sw_version=device.sensors["FirmwareVersion"].display_value, model=device.sensors["GensetModelNumberSelect"].display_value, manufacturer="Kohler", ) @property def _oncue_value(self) -> str: """Return the sensor value.""" device: OncueDevice = self.coordinator.data[self._device_id] sensor: OncueSensor = device.sensors[self.entity_description.key] return sensor.value
{ "content_hash": "bc3a49f1dab4f9ed312ed744924585a9", "timestamp": "", "source": "github", "line_count": 49, "max_line_length": 78, "avg_line_length": 35.6530612244898, "alnum_prop": 0.6571265025758443, "repo_name": "GenericStudent/home-assistant", "id": "40ca21edf96aecb4e45fbaa7589fb3dd496c1c8d", "size": "1747", "binary": false, "copies": "3", "ref": "refs/heads/dev", "path": "homeassistant/components/oncue/entity.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "3070" }, { "name": "Python", "bytes": "44491729" }, { "name": "Shell", "bytes": "5092" } ], "symlink_target": "" }
from __future__ import unicode_literals from datetime import datetime from dateutil import rrule import pytz import random from django.core.urlresolvers import reverse_lazy from django.shortcuts import redirect from django.utils.translation import ugettext_lazy as _ from django.views.generic import View from dash.orgs.views import OrgPermsMixin, OrgObjPermsMixin from smartmin.views import ( SmartCRUDL, SmartCreateView, SmartDeleteView, SmartFormView, SmartListView, SmartReadView, SmartUpdateView, SmartView) from tracpro.polls.models import Answer, PollRun, Response from .models import BaselineTerm from .forms import BaselineTermForm, SpoofDataForm, BaselineTermFilterForm from .charts import chart_baseline class BaselineTermCRUDL(SmartCRUDL): model = BaselineTerm actions = ('create', 'read', 'update', 'delete', 'list', 'data_spoof', 'clear_spoof') path = "indicators" class BaselineTermMixin(object): def get_queryset(self): indicators = BaselineTerm.objects.by_org(self.request.org) indicators = indicators.select_related( 'baseline_question', 'follow_up_question') return indicators class Create(OrgPermsMixin, SmartCreateView): form_class = BaselineTermForm success_url = 'id@baseline.baselineterm_read' def get_form_kwargs(self): kwargs = super(BaselineTermCRUDL.Create, self).get_form_kwargs() kwargs['user'] = self.request.user return kwargs class List(BaselineTermMixin, OrgPermsMixin, SmartListView): default_order = ('-start_date', '-end_date') fields = ('name', 'start_date', 'end_date', 'baseline_question', 'follow_up_question') link_fields = ('name') class Delete(BaselineTermMixin, OrgObjPermsMixin, SmartDeleteView): cancel_url = '@baseline.baselineterm_list' redirect_url = reverse_lazy('baseline.baselineterm_list') class Update(BaselineTermMixin, OrgObjPermsMixin, SmartUpdateView): form_class = BaselineTermForm delete_url = '' # Turn off the smartmin delete button for this view success_url = 'id@baseline.baselineterm_read' def get_form_kwargs(self): kwargs = super(BaselineTermCRUDL.Update, self).get_form_kwargs() kwargs['user'] = self.request.user return kwargs class Read(BaselineTermMixin, OrgObjPermsMixin, SmartReadView): def get_context_data(self, **kwargs): filter_form = BaselineTermFilterForm( org=self.request.org, baseline_term=self.object, data_regions=self.request.data_regions, data=self.request.GET) if filter_form.is_valid(): chart_data, summary_table = chart_baseline( self.object, filter_form, self.request.region, self.request.include_subregions) else: chart_data = None summary_table = None kwargs.setdefault('form', filter_form) kwargs.setdefault('chart_data', chart_data) kwargs.setdefault('summary_table', summary_table) return super(BaselineTermCRUDL.Read, self).get_context_data(**kwargs) class DataSpoof(OrgPermsMixin, SmartFormView): title = _("Baseline Term Data Spoof") template_name = 'baseline/baselineterm_data.html' form_class = SpoofDataForm cancel_url = '@baseline.baselineterm_list' success_url = '@baseline.baselineterm_list' def dispatch(self, request, *args, **kwargs): # Prevent Data Spoof for orgs with show_spoof_data turned off if not self.request.org.show_spoof_data: return redirect('baseline.baselineterm_list') return super(BaselineTermCRUDL.DataSpoof, self).dispatch(request, *args, **kwargs) def get_form_kwargs(self): kwargs = super(BaselineTermCRUDL.DataSpoof, self).get_form_kwargs() kwargs.setdefault('org', self.request.org) return kwargs def random_answer_calculate(self, min_value, max_value): random_value = min_value if min_value == max_value else random.randrange(min_value, max_value) return random_value def create_baseline(self, poll, date, contacts, baseline_question, baseline_minimum, baseline_maximum): baseline_datetime = datetime.combine(date, datetime.utcnow().time().replace(tzinfo=pytz.utc)) baseline_pollrun = PollRun.objects.create_spoofed( poll=poll, conducted_on=baseline_datetime) for contact in contacts: # Create a Response AKA FlowRun for each contact for Baseline response = Response.objects.create( pollrun=baseline_pollrun, contact=contact, created_on=baseline_datetime, updated_on=baseline_datetime, status=Response.STATUS_COMPLETE, is_active=True) random_answer = self.random_answer_calculate(baseline_minimum, baseline_maximum) # Create a randomized Answer for each contact for Baseline Answer.objects.create( response=response, question=baseline_question, value=random_answer, submitted_on=baseline_datetime, category='') def form_valid(self, form): baseline_question = self.form.cleaned_data['baseline_question'] follow_up_question = self.form.cleaned_data['follow_up_question'] contacts = self.form.cleaned_data['contacts'] baseline_minimum = self.form.cleaned_data['baseline_minimum'] baseline_maximum = self.form.cleaned_data['baseline_maximum'] follow_up_minimum = self.form.cleaned_data['follow_up_minimum'] follow_up_maximum = self.form.cleaned_data['follow_up_maximum'] start = self.form.cleaned_data['start_date'] end = self.form.cleaned_data['end_date'] # Create a single PollRun for the Baseline Poll for all contacts self.create_baseline(baseline_question.poll, start, contacts, baseline_question, baseline_minimum, baseline_maximum) # Create a PollRun for each date from start to end dates for the Follow Up Poll for loop_count, follow_up_date in enumerate(rrule.rrule(rrule.DAILY, dtstart=start, until=end)): follow_up_datetime = datetime.combine(follow_up_date, datetime.utcnow().time().replace(tzinfo=pytz.utc)) follow_up_pollrun = PollRun.objects.create_spoofed( poll=follow_up_question.poll, conducted_on=follow_up_datetime) for contact in contacts: # Create a Response AKA FlowRun for each contact for Follow Up response = Response.objects.create( pollrun=follow_up_pollrun, contact=contact, created_on=follow_up_datetime, updated_on=follow_up_datetime, status=Response.STATUS_COMPLETE, is_active=True) random_answer = self.random_answer_calculate(follow_up_minimum, follow_up_maximum) # Create a randomized Answer for each contact for Follow Up Answer.objects.create( response=response, question=follow_up_question, value=random_answer, submitted_on=follow_up_datetime, category='') loop_count += 1 return redirect(self.get_success_url()) class ClearSpoof(OrgPermsMixin, SmartView, View): def dispatch(self, request, *args, **kwargs): # Prevent Data Spoof for orgs with show_spoof_data turned off if not self.request.org.show_spoof_data: return redirect('baseline.baselineterm_list') return super(BaselineTermCRUDL.ClearSpoof, self).dispatch(request, *args, **kwargs) def post(self, request, *args, **kwargs): # Spoofed data has TYPE_SPOOFED. Filter only for current org. pollruns = PollRun.objects.filter(pollrun_type=PollRun.TYPE_SPOOFED, poll__org=self.request.org) # This will create a cascading delete to clear out all Spoofed Poll data # from PollRun, Answer and Response pollruns.delete() return redirect('baseline.baselineterm_list')
{ "content_hash": "e5449c221e611768e8b21f3a076f7eea", "timestamp": "", "source": "github", "line_count": 192, "max_line_length": 120, "avg_line_length": 46.036458333333336, "alnum_prop": 0.6169249915148772, "repo_name": "xkmato/tracpro", "id": "24d360f9591110f58f3bd7b00eeff087926052ec", "size": "8839", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "tracpro/baseline/views.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "27726" }, { "name": "CoffeeScript", "bytes": "10296" }, { "name": "HTML", "bytes": "107840" }, { "name": "JavaScript", "bytes": "25237" }, { "name": "Makefile", "bytes": "1962" }, { "name": "Python", "bytes": "406848" }, { "name": "SaltStack", "bytes": "19566" }, { "name": "Scheme", "bytes": "29815" }, { "name": "Shell", "bytes": "205447" } ], "symlink_target": "" }
from setuptools import setup import os PROJECT_ROOT, _ = os.path.split(__file__) REVISION = '0.2.28' PROJECT_NAME = 'JenkinsAPI' PROJECT_AUTHORS = "Salim Fadhley, Aleksey Maksimov" # Please see readme.rst for a complete list of contributors PROJECT_EMAILS = 'salimfadhley@gmail.com, ctpeko3a@gmail.com' PROJECT_URL = "https://github.com/salimfadhley/jenkinsapi" SHORT_DESCRIPTION = 'A Python API for accessing resources on a Jenkins continuous-integration server.' try: DESCRIPTION = open(os.path.join(PROJECT_ROOT, "README.rst")).read() except IOError: DESCRIPTION = SHORT_DESCRIPTION GLOBAL_ENTRY_POINTS = { "console_scripts": ["jenkins_invoke=jenkinsapi.command_line.jenkins_invoke:main", "jenkinsapi_version=jenkinsapi.command_line.jenkinsapi_version:main"] } setup( name=PROJECT_NAME.lower(), version=REVISION, author=PROJECT_AUTHORS, author_email=PROJECT_EMAILS, packages=[ 'jenkinsapi', 'jenkinsapi.utils', 'jenkinsapi.command_line', 'jenkinsapi_tests'], zip_safe=True, include_package_data=False, install_requires=['requests>=2.3.0', 'pytz>=2014.4'], test_suite='nose.collector', tests_require=['mock', 'nose', 'coverage', 'unittest2'], entry_points=GLOBAL_ENTRY_POINTS, url=PROJECT_URL, description=SHORT_DESCRIPTION, long_description=DESCRIPTION, license='MIT', classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Console', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', 'Operating System :: OS Independent', 'Programming Language :: Python :: 2.7', 'Topic :: Software Development :: Testing', ], )
{ "content_hash": "40eb29c4323eab0016d54c0bbb3919eb", "timestamp": "", "source": "github", "line_count": 53, "max_line_length": 102, "avg_line_length": 33.698113207547166, "alnum_prop": 0.6668533034714446, "repo_name": "JohnLZeller/jenkinsapi", "id": "20eda5ed76ec3c6a5f3b90fafd842f41c9e2b5f1", "size": "1786", "binary": false, "copies": "6", "ref": "refs/heads/master", "path": "setup.py", "mode": "33188", "license": "mit", "language": [ { "name": "JavaScript", "bytes": "1625" }, { "name": "Python", "bytes": "324650" }, { "name": "Shell", "bytes": "1076" } ], "symlink_target": "" }
from django.conf.urls import include, url from django.contrib import admin from newsapp import views,auth #from local module import views urlpatterns = [ url(r'^$', views.articles_list,name='articles_list'), url(r'^feeds/new', views.new_feed, name='feed_new'), url(r'^feeds/', views.feeds_list,name='feeds_list'), url(r'^login/', auth.login, name = 'login'), url(r'^logout/', auth.logout, name = 'logout'), url(r'^register/', auth.register, name = 'register'), url(r'^admin/', include(admin.site.urls)), ]
{ "content_hash": "d20ee071aeb928e32a61fc9ef5a60c6b", "timestamp": "", "source": "github", "line_count": 12, "max_line_length": 62, "avg_line_length": 44.5, "alnum_prop": 0.6647940074906367, "repo_name": "khansrk/newsappsrk", "id": "0a2a432c50130d04cb8b824261e1bfc333b6540d", "size": "534", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "news/urls.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "7433" }, { "name": "Python", "bytes": "11306" } ], "symlink_target": "" }
"""An Application for launching a kernel""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. from __future__ import print_function import atexit import os import sys import signal import zmq from zmq.eventloop import ioloop from zmq.eventloop.zmqstream import ZMQStream from IPython.core.ultratb import FormattedTB from IPython.core.application import ( BaseIPythonApplication, base_flags, base_aliases, catch_config_error ) from IPython.core.profiledir import ProfileDir from IPython.core.shellapp import ( InteractiveShellApp, shell_flags, shell_aliases ) from IPython.utils import io from ipython_genutils.path import filefind, ensure_dir_exists from traitlets import ( Any, Instance, Dict, Unicode, Integer, Bool, DottedObjectName, Type, ) from ipython_genutils.importstring import import_item from jupyter_core.paths import jupyter_runtime_dir from jupyter_client import write_connection_file from jupyter_client.connect import ConnectionFileMixin # local imports from .heartbeat import Heartbeat from .ipkernel import IPythonKernel from .parentpoller import ParentPollerUnix, ParentPollerWindows from jupyter_client.session import ( Session, session_flags, session_aliases, ) from .zmqshell import ZMQInteractiveShell #----------------------------------------------------------------------------- # Flags and Aliases #----------------------------------------------------------------------------- kernel_aliases = dict(base_aliases) kernel_aliases.update({ 'ip' : 'IPKernelApp.ip', 'hb' : 'IPKernelApp.hb_port', 'shell' : 'IPKernelApp.shell_port', 'iopub' : 'IPKernelApp.iopub_port', 'stdin' : 'IPKernelApp.stdin_port', 'control' : 'IPKernelApp.control_port', 'f' : 'IPKernelApp.connection_file', 'transport': 'IPKernelApp.transport', }) kernel_flags = dict(base_flags) kernel_flags.update({ 'no-stdout' : ( {'IPKernelApp' : {'no_stdout' : True}}, "redirect stdout to the null device"), 'no-stderr' : ( {'IPKernelApp' : {'no_stderr' : True}}, "redirect stderr to the null device"), 'pylab' : ( {'IPKernelApp' : {'pylab' : 'auto'}}, """Pre-load matplotlib and numpy for interactive use with the default matplotlib backend."""), }) # inherit flags&aliases for any IPython shell apps kernel_aliases.update(shell_aliases) kernel_flags.update(shell_flags) # inherit flags&aliases for Sessions kernel_aliases.update(session_aliases) kernel_flags.update(session_flags) _ctrl_c_message = """\ NOTE: When using the `ipython kernel` entry point, Ctrl-C will not work. To exit, you will have to explicitly quit this process, by either sending "quit" from a client, or using Ctrl-\\ in UNIX-like environments. To read more about this, see https://github.com/ipython/ipython/issues/2049 """ #----------------------------------------------------------------------------- # Application class for starting an IPython Kernel #----------------------------------------------------------------------------- class IPKernelApp(BaseIPythonApplication, InteractiveShellApp, ConnectionFileMixin): name='ipython-kernel' aliases = Dict(kernel_aliases) flags = Dict(kernel_flags) classes = [IPythonKernel, ZMQInteractiveShell, ProfileDir, Session] # the kernel class, as an importstring kernel_class = Type('ipykernel.ipkernel.IPythonKernel', config=True, klass='ipykernel.kernelbase.Kernel', help="""The Kernel subclass to be used. This should allow easy re-use of the IPKernelApp entry point to configure and launch kernels other than IPython's own. """) kernel = Any() poller = Any() # don't restrict this even though current pollers are all Threads heartbeat = Instance(Heartbeat, allow_none=True) ports = Dict() # connection info: connection_dir = Unicode() def _connection_dir_default(self): d = jupyter_runtime_dir() ensure_dir_exists(d, 0o700) return d @property def abs_connection_file(self): if os.path.basename(self.connection_file) == self.connection_file: return os.path.join(self.connection_dir, self.connection_file) else: return self.connection_file # streams, etc. no_stdout = Bool(False, config=True, help="redirect stdout to the null device") no_stderr = Bool(False, config=True, help="redirect stderr to the null device") outstream_class = DottedObjectName('ipykernel.iostream.OutStream', config=True, help="The importstring for the OutStream factory") displayhook_class = DottedObjectName('ipykernel.displayhook.ZMQDisplayHook', config=True, help="The importstring for the DisplayHook factory") # polling parent_handle = Integer(int(os.environ.get('JPY_PARENT_PID') or 0), config=True, help="""kill this process if its parent dies. On Windows, the argument specifies the HANDLE of the parent process, otherwise it is simply boolean. """) interrupt = Integer(int(os.environ.get('JPY_INTERRUPT_EVENT') or 0), config=True, help="""ONLY USED ON WINDOWS Interrupt this process when the parent is signaled. """) def init_crash_handler(self): # Install minimal exception handling sys.excepthook = FormattedTB(mode='Verbose', color_scheme='NoColor', ostream=sys.__stdout__) def init_poller(self): if sys.platform == 'win32': if self.interrupt or self.parent_handle: self.poller = ParentPollerWindows(self.interrupt, self.parent_handle) elif self.parent_handle: self.poller = ParentPollerUnix() def _bind_socket(self, s, port): iface = '%s://%s' % (self.transport, self.ip) if self.transport == 'tcp': if port <= 0: port = s.bind_to_random_port(iface) else: s.bind("tcp://%s:%i" % (self.ip, port)) elif self.transport == 'ipc': if port <= 0: port = 1 path = "%s-%i" % (self.ip, port) while os.path.exists(path): port = port + 1 path = "%s-%i" % (self.ip, port) else: path = "%s-%i" % (self.ip, port) s.bind("ipc://%s" % path) return port def write_connection_file(self): """write connection info to JSON file""" cf = self.abs_connection_file self.log.debug("Writing connection file: %s", cf) write_connection_file(cf, ip=self.ip, key=self.session.key, transport=self.transport, shell_port=self.shell_port, stdin_port=self.stdin_port, hb_port=self.hb_port, iopub_port=self.iopub_port, control_port=self.control_port) def cleanup_connection_file(self): cf = self.abs_connection_file self.log.debug("Cleaning up connection file: %s", cf) try: os.remove(cf) except (IOError, OSError): pass self.cleanup_ipc_files() def init_connection_file(self): if not self.connection_file: self.connection_file = "kernel-%s.json"%os.getpid() try: self.connection_file = filefind(self.connection_file, ['.', self.connection_dir]) except IOError: self.log.debug("Connection file not found: %s", self.connection_file) # This means I own it, so I will clean it up: atexit.register(self.cleanup_connection_file) return try: self.load_connection_file() except Exception: self.log.error("Failed to load connection file: %r", self.connection_file, exc_info=True) self.exit(1) def init_sockets(self): # Create a context, a session, and the kernel sockets. self.log.info("Starting the kernel at pid: %i", os.getpid()) context = zmq.Context.instance() # Uncomment this to try closing the context. # atexit.register(context.term) self.shell_socket = context.socket(zmq.ROUTER) self.shell_socket.linger = 1000 self.shell_port = self._bind_socket(self.shell_socket, self.shell_port) self.log.debug("shell ROUTER Channel on port: %i" % self.shell_port) self.iopub_socket = context.socket(zmq.PUB) self.iopub_socket.linger = 1000 self.iopub_port = self._bind_socket(self.iopub_socket, self.iopub_port) self.log.debug("iopub PUB Channel on port: %i" % self.iopub_port) self.stdin_socket = context.socket(zmq.ROUTER) self.stdin_socket.linger = 1000 self.stdin_port = self._bind_socket(self.stdin_socket, self.stdin_port) self.log.debug("stdin ROUTER Channel on port: %i" % self.stdin_port) self.control_socket = context.socket(zmq.ROUTER) self.control_socket.linger = 1000 self.control_port = self._bind_socket(self.control_socket, self.control_port) self.log.debug("control ROUTER Channel on port: %i" % self.control_port) def init_heartbeat(self): """start the heart beating""" # heartbeat doesn't share context, because it mustn't be blocked # by the GIL, which is accessed by libzmq when freeing zero-copy messages hb_ctx = zmq.Context() self.heartbeat = Heartbeat(hb_ctx, (self.transport, self.ip, self.hb_port)) self.hb_port = self.heartbeat.port self.log.debug("Heartbeat REP Channel on port: %i" % self.hb_port) self.heartbeat.start() def log_connection_info(self): """display connection info, and store ports""" basename = os.path.basename(self.connection_file) if basename == self.connection_file or \ os.path.dirname(self.connection_file) == self.connection_dir: # use shortname tail = basename else: tail = self.connection_file lines = [ "To connect another client to this kernel, use:", " --existing %s" % tail, ] # log connection info # info-level, so often not shown. # frontends should use the %connect_info magic # to see the connection info for line in lines: self.log.info(line) # also raw print to the terminal if no parent_handle (`ipython kernel`) if not self.parent_handle: io.rprint(_ctrl_c_message) for line in lines: io.rprint(line) self.ports = dict(shell=self.shell_port, iopub=self.iopub_port, stdin=self.stdin_port, hb=self.hb_port, control=self.control_port) def init_blackhole(self): """redirects stdout/stderr to devnull if necessary""" if self.no_stdout or self.no_stderr: blackhole = open(os.devnull, 'w') if self.no_stdout: sys.stdout = sys.__stdout__ = blackhole if self.no_stderr: sys.stderr = sys.__stderr__ = blackhole def init_io(self): """Redirect input streams and set a display hook.""" if self.outstream_class: outstream_factory = import_item(str(self.outstream_class)) sys.stdout = outstream_factory(self.session, self.iopub_socket, u'stdout') sys.stderr = outstream_factory(self.session, self.iopub_socket, u'stderr') if self.displayhook_class: displayhook_factory = import_item(str(self.displayhook_class)) sys.displayhook = displayhook_factory(self.session, self.iopub_socket) def init_signal(self): signal.signal(signal.SIGINT, signal.SIG_IGN) def init_kernel(self): """Create the Kernel object itself""" shell_stream = ZMQStream(self.shell_socket) control_stream = ZMQStream(self.control_socket) kernel_factory = self.kernel_class.instance kernel = kernel_factory(parent=self, session=self.session, shell_streams=[shell_stream, control_stream], iopub_socket=self.iopub_socket, stdin_socket=self.stdin_socket, log=self.log, profile_dir=self.profile_dir, user_ns=self.user_ns, ) kernel.record_ports(self.ports) self.kernel = kernel def init_gui_pylab(self): """Enable GUI event loop integration, taking pylab into account.""" # Provide a wrapper for :meth:`InteractiveShellApp.init_gui_pylab` # to ensure that any exception is printed straight to stderr. # Normally _showtraceback associates the reply with an execution, # which means frontends will never draw it, as this exception # is not associated with any execute request. shell = self.shell _showtraceback = shell._showtraceback try: # replace error-sending traceback with stderr def print_tb(etype, evalue, stb): print ("GUI event loop or pylab initialization failed", file=io.stderr) print (shell.InteractiveTB.stb2text(stb), file=io.stderr) shell._showtraceback = print_tb InteractiveShellApp.init_gui_pylab(self) finally: shell._showtraceback = _showtraceback def init_shell(self): self.shell = getattr(self.kernel, 'shell', None) if self.shell: self.shell.configurables.append(self) def init_extensions(self): super(IPKernelApp, self).init_extensions() # BEGIN HARDCODED WIDGETS HACK # Ensure ipywidgets extension is loaded if available extension_man = self.shell.extension_manager if 'ipywidgets' not in extension_man.loaded: try: extension_man.load_extension('ipywidgets') except ImportError as e: self.log.debug('ipywidgets package not installed. Widgets will not be available.') # END HARDCODED WIDGETS HACK @catch_config_error def initialize(self, argv=None): super(IPKernelApp, self).initialize(argv) self.init_blackhole() self.init_connection_file() self.init_poller() self.init_sockets() self.init_heartbeat() # writing/displaying connection info must be *after* init_sockets/heartbeat self.log_connection_info() self.write_connection_file() self.init_io() self.init_signal() self.init_kernel() # shell init steps self.init_path() self.init_shell() if self.shell: self.init_gui_pylab() self.init_extensions() self.init_code() # flush stdout/stderr, so that anything written to these streams during # initialization do not get associated with the first execution request sys.stdout.flush() sys.stderr.flush() def start(self): if self.poller is not None: self.poller.start() self.kernel.start() try: ioloop.IOLoop.instance().start() except KeyboardInterrupt: pass launch_new_instance = IPKernelApp.launch_instance def main(): """Run an IPKernel as an application""" app = IPKernelApp.instance() app.initialize() app.start() if __name__ == '__main__': main()
{ "content_hash": "8dcfaf96de46999623529dfc678dd467", "timestamp": "", "source": "github", "line_count": 403, "max_line_length": 101, "avg_line_length": 38.68982630272953, "alnum_prop": 0.6142252437147255, "repo_name": "bdh1011/wau", "id": "719bb560244345f16df96324268a31f4e607c17b", "size": "15592", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "venv/lib/python2.7/site-packages/ipykernel/kernelapp.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "1176" }, { "name": "C", "bytes": "5022853" }, { "name": "C++", "bytes": "43676" }, { "name": "CSS", "bytes": "10359" }, { "name": "D", "bytes": "1841" }, { "name": "FORTRAN", "bytes": "3707" }, { "name": "GAP", "bytes": "14120" }, { "name": "Groff", "bytes": "7236" }, { "name": "HTML", "bytes": "1709320" }, { "name": "JavaScript", "bytes": "1200059" }, { "name": "Jupyter Notebook", "bytes": "310219" }, { "name": "Lua", "bytes": "11887" }, { "name": "Makefile", "bytes": "112163" }, { "name": "Mako", "bytes": "412" }, { "name": "Objective-C", "bytes": "1291" }, { "name": "Perl", "bytes": "171375" }, { "name": "Python", "bytes": "49407229" }, { "name": "Ruby", "bytes": "58403" }, { "name": "Shell", "bytes": "47672" }, { "name": "Smarty", "bytes": "22599" }, { "name": "Tcl", "bytes": "426334" }, { "name": "XSLT", "bytes": "153073" } ], "symlink_target": "" }
from __future__ import unicode_literals import frappe from frappe.translate import get_lang_dict # migrate language from name to code def execute(): return
{ "content_hash": "9e9504a02b4dcddf1c5df191f87a8f86", "timestamp": "", "source": "github", "line_count": 8, "max_line_length": 42, "avg_line_length": 19.875, "alnum_prop": 0.779874213836478, "repo_name": "adityahase/frappe", "id": "d685fd7d0e9a0aef3aeab11c4859f7a90ec04ddb", "size": "159", "binary": false, "copies": "3", "ref": "refs/heads/develop", "path": "frappe/patches/v6_24/set_language_as_code.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "288806" }, { "name": "HTML", "bytes": "209164" }, { "name": "JavaScript", "bytes": "2350450" }, { "name": "Less", "bytes": "160693" }, { "name": "Makefile", "bytes": "99" }, { "name": "Python", "bytes": "3035663" }, { "name": "SCSS", "bytes": "45340" }, { "name": "Shell", "bytes": "517" }, { "name": "Vue", "bytes": "73943" } ], "symlink_target": "" }
__author__ = 'nlivni' import django from django.conf.urls import patterns, url from models import Story from panews.views import StoryDetailView, StoryListView, StoryTemplateCreate, StoryDelete, StoryUpdate, CategoryListView, StoryCustom, StorySuccessView django.setup() urlpatterns = patterns('', # url(r'^$', 'panews.views.home', name='home'), # homepage / url(r'^$', StoryListView.as_view( model=Story, template_name="home.html", context_object_name="story_list" ) , name='home'), # create template /story/template/custom/ url(r'^story/template/create/$', StoryTemplateCreate.as_view(), name='template_create'), # list of stories by category /story/<category_slug>/ url(r'^story/category/(?P<category_slug>[-_\w]+)/$', CategoryListView.as_view(), name='category_list'), # view completed story /story/<story_id>/ url(r'^story/(?P<slug>[-_\w]+)/$', StoryDetailView.as_view(), name='story_detail' ), # generic list of all stories /story/ url(r'^story/$', StoryListView.as_view( model=Story ) , name='story_list'), # custom story from story /story/<story_slug>/custom/ url(r'^story/(?P<slug>[-_\w]+)/custom', StoryCustom.as_view(), name='story_custom'), # URLS THAT REQUIRE LOGGING IN (CRUD OF AUTHOR'S OWN CONTENT) # update story @login /story/<story_id>/update/ # update story template @login /story/<story_id>/template/update/ url(r'^story/(?P<edit_slug>[-_\w]+)/update$', StoryUpdate.as_view(), name='story_update'), url(r'^story/(?P<edit_slug>[-_\w]+)/success', StorySuccessView.as_view(), name='story_success'), # delete story template @login /story/<story_id>/template/delete/ url(r'^story/(?P<slug>[-_\w]+)/delete/$', StoryDelete.as_view(), name='story_delete'), )
{ "content_hash": "2fecacf1484169c37294b9116ddaa346", "timestamp": "", "source": "github", "line_count": 58, "max_line_length": 151, "avg_line_length": 34.48275862068966, "alnum_prop": 0.5905, "repo_name": "nlivni/passiveaggressivenews_project", "id": "f63a4cc5603b10e881c137421844151171a401de", "size": "2000", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "passiveaggressivenews/panews/urls.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "CSS", "bytes": "283409" }, { "name": "JavaScript", "bytes": "16207" }, { "name": "PHP", "bytes": "2199" }, { "name": "Python", "bytes": "20879" } ], "symlink_target": "" }
"""Define tests for the Elexa Guardian config flow.""" from aioguardian.errors import GuardianError from homeassistant import data_entry_flow from homeassistant.components.guardian import CONF_UID, DOMAIN from homeassistant.components.guardian.config_flow import ( async_get_pin_from_discovery_hostname, async_get_pin_from_uid, ) from homeassistant.config_entries import SOURCE_USER, SOURCE_ZEROCONF from homeassistant.const import CONF_IP_ADDRESS, CONF_PORT from tests.async_mock import patch from tests.common import MockConfigEntry async def test_duplicate_error(hass, ping_client): """Test that errors are shown when duplicate entries are added.""" conf = {CONF_IP_ADDRESS: "192.168.1.100", CONF_PORT: 7777} MockConfigEntry(domain=DOMAIN, unique_id="guardian_3456", data=conf).add_to_hass( hass ) result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_USER}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_configured" async def test_connect_error(hass): """Test that the config entry errors out if the device cannot connect.""" conf = {CONF_IP_ADDRESS: "192.168.1.100", CONF_PORT: 7777} with patch( "aioguardian.client.Client.connect", side_effect=GuardianError, ): result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_USER}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["errors"] == {CONF_IP_ADDRESS: "cannot_connect"} async def test_get_pin_from_discovery_hostname(): """Test getting a device PIN from the zeroconf-discovered hostname.""" pin = async_get_pin_from_discovery_hostname("GVC1-3456.local.") assert pin == "3456" async def test_get_pin_from_uid(): """Test getting a device PIN from its UID.""" pin = async_get_pin_from_uid("ABCDEF123456") assert pin == "3456" async def test_step_user(hass, ping_client): """Test the user step.""" conf = {CONF_IP_ADDRESS: "192.168.1.100", CONF_PORT: 7777} result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_USER} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_USER}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "ABCDEF123456" assert result["data"] == { CONF_IP_ADDRESS: "192.168.1.100", CONF_PORT: 7777, CONF_UID: "ABCDEF123456", } async def test_step_zeroconf(hass, ping_client): """Test the zeroconf step.""" zeroconf_data = { "host": "192.168.1.100", "port": 7777, "hostname": "GVC1-ABCD.local.", "type": "_api._udp.local.", "name": "Guardian Valve Controller API._api._udp.local.", "properties": {"_raw": {}}, } result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_ZEROCONF}, data=zeroconf_data ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "zeroconf_confirm" result = await hass.config_entries.flow.async_configure( result["flow_id"], user_input={} ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "ABCDEF123456" assert result["data"] == { CONF_IP_ADDRESS: "192.168.1.100", CONF_PORT: 7777, CONF_UID: "ABCDEF123456", } async def test_step_zeroconf_already_in_progress(hass): """Test the zeroconf step aborting because it's already in progress.""" zeroconf_data = { "host": "192.168.1.100", "port": 7777, "hostname": "GVC1-ABCD.local.", "type": "_api._udp.local.", "name": "Guardian Valve Controller API._api._udp.local.", "properties": {"_raw": {}}, } result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_ZEROCONF}, data=zeroconf_data ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "zeroconf_confirm" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_ZEROCONF}, data=zeroconf_data ) assert result["type"] == "abort" assert result["reason"] == "already_in_progress" async def test_step_zeroconf_no_discovery_info(hass): """Test the zeroconf step aborting because no discovery info came along.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_ZEROCONF} ) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "connection_error"
{ "content_hash": "1bea5bf4c37ef2d1bc2a8466d4a4893c", "timestamp": "", "source": "github", "line_count": 140, "max_line_length": 85, "avg_line_length": 35.114285714285714, "alnum_prop": 0.6517493897477624, "repo_name": "titilambert/home-assistant", "id": "91a1a3b83e0d2f4f4e6db5d423aa22c4a7b489aa", "size": "4916", "binary": false, "copies": "4", "ref": "refs/heads/dev", "path": "tests/components/guardian/test_config_flow.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "1488" }, { "name": "Python", "bytes": "25849092" }, { "name": "Shell", "bytes": "4410" } ], "symlink_target": "" }
"""Export and import material meshes.""" import bpy import nose.tools from pyffi.formats.nif import NifFormat from test import SingleNif from test.geometry.trishape import gen_geometry from test.geometry.trishape.test_geometry import TestBaseGeometry from test.property.material.gen_material import Material class TestMaterialProperty(SingleNif): n_name = 'property/material/base_material' b_name = 'Cube' def b_create_objects(self): b_obj = TestBaseGeometry().b_create_base_geometry() b_obj.name = self.b_name self.b_create_material_block(b_obj) def b_create_material_block(self, b_obj): b_mat = bpy.data.materials.new(name='Material') b_mat.specular_intensity = 0 # disable NiSpecularProperty b_obj.data.materials.append(b_mat) bpy.ops.object.shade_smooth() self.b_create_material_property(b_mat) return b_obj def b_create_material_property(self, b_mat): # TODO_3.0 - See above b_mat.ambient = 1.0 b_mat.diffuse_color = (1.0,1.0,1.0) b_mat.diffuse_intensity = 1.0 def b_check_data(self): b_obj = bpy.data.objects[self.b_name] TestBaseGeometry().b_check_geom_obj(b_obj) self.b_check_material_block(b_obj) def b_check_material_block(self, b_obj): b_mesh = b_obj.data b_mat = b_mesh.materials[0] nose.tools.assert_equal(len(b_mesh.materials), 1) self.b_check_material_property(b_mat) def b_check_material_property(self, b_mat): nose.tools.assert_equal(b_mat.ambient, 1.0) nose.tools.assert_equal(b_mat.diffuse_color[0], 1.0) nose.tools.assert_equal(b_mat.diffuse_color[1], 1.0) nose.tools.assert_equal(b_mat.diffuse_color[2], 1.0) def n_create_data(self): data = gen_geometry.n_create_data() data = Material().n_create(data) return data def n_check_data(self, n_data): TestBaseGeometry().n_check_data(n_data) self.n_check_material_block(n_data) def n_check_material_block(self, n_data): n_geom = n_data.roots[0].children[0] nose.tools.assert_equal(n_geom.num_properties, 1) self.n_check_material_property(n_geom.properties[0]) ''' TODO_3.0 - per version checking???? self.n_check_flags(n_data.header()) def n_check_flags(self, n_header): pass if(self.n_header.version == 'MORROWIND'): nose.tools.assert_equal(n_geom.properties[0].flags == 1) ''' def n_check_material_property(self, n_mat_prop): nose.tools.assert_is_instance(n_mat_prop, NifFormat.NiMaterialProperty) # TODO - Refer to header - can be ignored for now, defaults. nose.tools.assert_equal((n_mat_prop.ambient_color.r, n_mat_prop.ambient_color.g, n_mat_prop.ambient_color.b), (1.0,1.0,1.0)) nose.tools.assert_equal((n_mat_prop.diffuse_color.r, n_mat_prop.diffuse_color.g, n_mat_prop.diffuse_color.b), (1.0,1.0,1.0)) ''' class TestAmbientMaterial(TestMaterialProperty): n_name = "property/material/base_material" def b_create_object(self): b_obj = TestBaseGeometry.b_create_object(self) b_mat = b_obj.data.materials[0] #diffuse settings b_mat.niftools.ambient_color = (0.0,1.0,0.0)#TODO_3.0 - update func-> World ambient return b_obj def b_check_data(self, b_obj): b_mesh = b_obj.data b_mat = b_mesh.materials[0] self.b_check_ambient_property(b_mat) def b_check_ambient_property(self, b_mat) nose.tools.assert_equal(b_mat.niftools.ambient_color, (0.0,1.0,0.0)) def n_check_data(self, n_data): n_geom = n_data.roots[0].children[0] self.n_check_material_property(n_geom.properties[0]) def n_check_material_property(self, n_mat_prop): nose.tools.assert_is_instance(n_mat_prop, NifFormat.NiMaterialProperty) nose.tools.assert_equal(n_mat_prop.ambient_color, (0.0,1.0,0.0)) class TestDiffuseMaterial(TestMaterialProperty): n_name = "property/material/base_material" def b_create_object(self): b_obj = TestBaseGeometry.b_create_object(self) b_mat = b_obj.data.materials[0] #diffuse settings b_mat.niftools.diffuse_color = (0.0,1.0,0.0)#TODO_3.0 - update func-> World ambient return b_obj def b_check_data(self, b_obj): b_mesh = b_obj.data b_mat = b_mesh.materials[0] self.b_check_diffuse_property(b_mat) def b_check_diffuse_property(self, b_mat) nose.tools.assert_equal(b_mat.niftools.diffuse_color, (0.0,1.0,0.0)) nose.tools.assert_equal(b_mat.diffuse_intensity, 1.0) def n_check_data(self, n_data): n_geom = n_data.roots[0].children[0] self.n_check_material_property(n_geom.properties[0]) def n_check_material_property(self, n_mat_prop): nose.tools.assert_is_instance(n_mat_prop, NifFormat.NiMaterialProperty) nose.tools.assert_equal(n_mat_prop.diffuse_color, (0.0,1.0,0.0)) '''
{ "content_hash": "b0522bb460bbcb0dfe4c9a7403a82d91", "timestamp": "", "source": "github", "line_count": 144, "max_line_length": 91, "avg_line_length": 35.90972222222222, "alnum_prop": 0.6265712628118353, "repo_name": "amorilia/blender_nif_plugin", "id": "2351975da697d6333b41051b8288ba23dd7919bf", "size": "5171", "binary": false, "copies": "2", "ref": "refs/heads/develop", "path": "testframework/test/property/material/test_material.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "716688" }, { "name": "Shell", "bytes": "1941" } ], "symlink_target": "" }
import inspect from functools import wraps from peeringdb.resource import RESOURCES_BY_TAG def reftag_to_cls(fn): """ decorator that checks function arguments for `concrete` and `resource` and will properly set them to class references if a string (reftag) is passed as the value """ spec = inspect.getfullargspec(fn) names, _ = spec.args, spec.defaults @wraps(fn) def wrapped(*args, **kwargs): i = 0 backend = args[0] for name in names[1:]: value = args[i] if name == "concrete" and isinstance(value, str): args[i] = backend.REFTAG_CONCRETE[value] elif name == "resource" and isinstance(value, str): args[i] = backend.REFTAG_RESOURCE[value] i += 1 return fn(*args, **kwargs) return wrapped class Field: """ We use this to provide field instances to backends that don't use classes to describe their fields """ def __init__(self, name): self.name = name self.column = None # ??? class EmptyContext: """ We use this to provide a dummy context wherever it's optional """ def __enter__(self): pass def __exit__(self, *args): pass class Base: """ Backend base class. Do NOT extend this directly when implementing a new backend, instead extend Interface below. """ # Handleref tag to resource class mapping REFTAG_RESOURCE = RESOURCES_BY_TAG @property def CONCRETE_MAP(self): if not hasattr(self, "_CONCRETE_MAP"): self._CONCRETE_MAP = { concrete: res for (res, concrete) in self.RESOURCE_MAP.items() } return self._CONCRETE_MAP def get_concrete(self, res): """ returns the concrete class for the resource Returns: - concrete class """ return self.RESOURCE_MAP[res] def is_concrete(self, cls): """ check if concrete class exists in the resource -> concrete mapping Returns: - bool: True if class exists in the resource -> concrete mapping """ return cls in self.CONCRETE_MAP def get_resource(self, cls): """ returns the resource class for the concrete class Returns: - resource class """ return self.CONCRETE_MAP[cls] class Interface(Base): """ backend adapter interface extend this when making a new backend """ # Resource class to concrete class mapping # should go in here RESOURCE_MAP = {} # Handleref tag to concrete class mapping # should go in here REFTAG_CONCRETE = {} @classmethod def validation_error(cls, concrete=None): """ should return the exception class that will be raised when an object fails validation Arguments: - concrete: if your backend has class specific validation errors and this is set, return the exception class that would be raised for this concrete class. Returns: - Exception class """ return Exception @classmethod def object_missing_error(cls, concrete=None): """ should return the exception class that will be raised when an object cannot be found Arguments: - concrete: if your backend has class specific object missing errors and this is set, return the exception class that would be raised for this concrete class. Returns: - Exception class """ return Exception @classmethod def atomic_transaction(cls): """ Allows you to return an atomic transaction context if your backend supports it, if it does not, leave as is This should never return None Returns: - python context instance """ return EmptyContext() @classmethod def setup(cls): """ operations that need to be done ONCE during runtime to prepare usage for the backend """ # INTERFACE (REQUIRED) # The following methods are required to be overwritten in # your backend and will raise a NotImplementedError if # they are not. # # when overriding make sure you also apply the `reftag_to_cls` # decorator on the methods that need it @reftag_to_cls def create_object(self, concrete, **data): """ should create object from dict and return it Arguments: - concrete: concrete class Keyword Arguments: - object field names -> values """ raise NotImplementedError() # TODO: def delete_all(self): """ Delete all objects, essentially empty the database """ raise NotImplementedError() def detect_missing_relations(self, obj, exc): """ Should parse error messages and collect the missing relationship errors as a dict of Resource -> {id set} and return it Arguments: - obj: concrete object instance - exc: exception instance Returns: - dict: {Resource : [ids]} """ raise NotImplementedError() def detect_uniqueness_error(self, exc): """ Should parse error message and collect any that describe violations of a uniqueness constraint. return the curresponding fields, else None Arguments: - exc: exception instance Returns: - list: list of fields - None: if no uniqueness errors """ raise NotImplementedError() @reftag_to_cls def get_field_names(self, concrete): """ Should return a list of field names for the concrete class Arguments: - concrete: concrete class Returns: - list: [<str>,...] """ raise NotImplementedError() @reftag_to_cls def get_field_concrete(self, concrete, field_name): """ Return concrete class for relationship by field name Arguments: - concrete: concrete class - field_name Returns: - concrete class """ raise NotImplementedError() @reftag_to_cls def get_object(self, concrete, id): """ should return instance of object with matching id Arguments: - concrete: concrete class - id: object primary key value Returns: - concrete instance """ raise NotImplementedError() @reftag_to_cls def get_object_by(self, concrete, field_name, value): """ very simply search function that should return collection of objects where field matches value Arguments: - concrete: concrete class - field_name: query this field for a match - value: match this value (simple equal matching) Returns: - concrete instance """ raise NotImplementedError() @reftag_to_cls def get_objects(self, concrete, ids=None): """ should return collection of objects Arguments: - concrete: concrete class - ids: if specified should be a list of primary key values and only objects matching those values should be returned Returns: - collection of concrete instances """ raise NotImplementedError() @reftag_to_cls def get_objects_by(self, concrete, field, value): """ very simple search function that should return collection of objects where field matches value Arguments: - concrete: concrete class - field_name: query this field for a match - value: match this value (simple equal matching) Returns: - collection of concrete instances """ raise NotImplementedError() @reftag_to_cls def is_field_related(self, concrete, field_name): """ Should return a tuple containing bools on whether a field signifies a relationship and if it's a single relationship or a relationship to multiple objects Arguments: - concrete: concrete class - field_name: query this field for a match Returns: - tuple: (bool related, bool many) """ raise NotImplementedError() @reftag_to_cls def last_change(self, concrete): """ should return unix epoch timestamp of the `updated` field of the most recently updated object Arguments: - concrete: concrete class Returns: - int """ raise NotImplementedError() def save(self, obj): """ Save the object instance Arguments: - obj: concrete object instance """ raise NotImplementedError() def set_relation_many_to_many(self, obj, field_name, objs): """ Setup a many to many relationship Arguments: - obj: concrete object instance - field_name: name of the field that holds the relationship - objs: collection of concrete objects to setup relationships with """ raise NotImplementedError() def update(self, obj, field_name, value): """ update field on a concrete instance to value this does not have to commit to the database, which will be handled separately via the `save` method. Arguments: - obj: concrete object instance - field_name - value """ setattr(obj, field_name, value) ## INTERFACE (OPTIONAL / SITUATIONAL) @reftag_to_cls def get_field(self, concrete, field_name): """ Should retrun a field instance, if your backend does not use classes to describe fields, leave this as is Arguments: - concrete: concrete class - field_name Returns: - field instance """ return Field(field_name) @reftag_to_cls def get_fields(self, concrete): """ Should return a collection of fields, if your backend does not use classes to describe fields, leave this as is Arguments: - concrete: concrete class Returns: - collection of field instances """ return [Field(name) for name in self.field_names(concrete)] def clean(self, obj): """ Should take an object instance and validate / clean it Arguments: - obj: concrete object instance """ @reftag_to_cls def convert_field(self, concrete, field_name, value): """ Should take a value and a field definition and do a value conversion if needed. should return the new value. Arguments: - concrete: concrete class - field_name - value """ def migrate_database(self, verbosity=0): """ Do database migrations Arguments: - verbosity <int>: arbitrary verbosity setting, 0 = be silent, 1 = show some info about migrations. """ def is_database_migrated(self, **kwargs): """ Should return whether the database is fully migrated Returns: - bool """ return True
{ "content_hash": "3135762bb090070e6e96b43207802652", "timestamp": "", "source": "github", "line_count": 493, "max_line_length": 80, "avg_line_length": 23.787018255578094, "alnum_prop": 0.5736334953526051, "repo_name": "peeringdb/peeringdb-py", "id": "7983a6890d8c0c23630452707e6753c7ff5c442a", "size": "11727", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/peeringdb/backend.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "Python", "bytes": "87555" }, { "name": "Shell", "bytes": "381" } ], "symlink_target": "" }
import unittest import amulet class TestDeploy(unittest.TestCase): """ Trivial deployment test for Apache Zeppelin. This charm cannot do anything useful by itself, so integration testing is done in the bundle. """ def test_deploy(self): self.d = amulet.Deployment(series='trusty') self.d.add('spark', 'apache-spark') self.d.add('zeppelin', 'apache-zeppelin') self.d.relate('spark:client', 'zeppelin:spark') self.d.setup(timeout=900) self.d.sentry.wait(timeout=1800) self.unit = self.d.sentry['zeppelin'][0] if __name__ == '__main__': unittest.main()
{ "content_hash": "2c4b77afb226fac4a28b20c340d416a7", "timestamp": "", "source": "github", "line_count": 24, "max_line_length": 74, "avg_line_length": 26.666666666666668, "alnum_prop": 0.634375, "repo_name": "andrewdmcleod/layer-apache-spark-livy", "id": "7eee23ba85099bdf07a53adac4f1fc3fad4dd9f8", "size": "664", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "tests/01-basic-deployment.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "6695" } ], "symlink_target": "" }
class Klass(object): def just_meth(self): print("just_meth", self) @classmethod def klas_meth(kls): print("klas_meth", kls) @staticmethod def stat_meth(): print("stat_meth")
{ "content_hash": "8dbd917a1cb8f6cef268f56aa0186876", "timestamp": "", "source": "github", "line_count": 12, "max_line_length": 32, "avg_line_length": 18.416666666666668, "alnum_prop": 0.5656108597285068, "repo_name": "dhilipsiva/test", "id": "020b79a573a8650094afc7082df8b1196c80e654", "size": "221", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "klass.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "2019" }, { "name": "C#", "bytes": "3479" }, { "name": "CSS", "bytes": "11446" }, { "name": "CoffeeScript", "bytes": "939" }, { "name": "Dockerfile", "bytes": "22" }, { "name": "Go", "bytes": "4766" }, { "name": "HTML", "bytes": "11534" }, { "name": "Java", "bytes": "1007" }, { "name": "JavaScript", "bytes": "14003" }, { "name": "Python", "bytes": "97717" }, { "name": "Ruby", "bytes": "3426" }, { "name": "Shell", "bytes": "361" }, { "name": "Swift", "bytes": "2861" } ], "symlink_target": "" }
from typing import Any from unittest.mock import patch import ujson from django.http import HttpResponse from zerver.lib.test_classes import ZulipTestCase from zerver.lib.users import get_api_key from zerver.models import get_realm, get_user class ZephyrTest(ZulipTestCase): def test_webathena_kerberos_login(self) -> None: user = self.example_user('hamlet') self.login_user(user) def post(subdomain: Any, **kwargs: Any) -> HttpResponse: params = {k: ujson.dumps(v) for k, v in kwargs.items()} return self.client_post('/accounts/webathena_kerberos_login/', params, subdomain=subdomain) result = post("zulip") self.assert_json_error(result, 'Could not find Kerberos credential') result = post("zulip", cred='whatever') self.assert_json_error(result, 'Webathena login not enabled') email = str(self.mit_email("starnine")) realm = get_realm('zephyr') user = get_user(email, realm) api_key = get_api_key(user) self.login_user(user) def ccache_mock(**kwargs: Any) -> Any: return patch('zerver.views.zephyr.make_ccache', **kwargs) def ssh_mock(**kwargs: Any) -> Any: return patch('zerver.views.zephyr.subprocess.check_call', **kwargs) def mirror_mock() -> Any: return self.settings(PERSONAL_ZMIRROR_SERVER='server') def logging_mock() -> Any: return patch('logging.exception') cred = dict(cname=dict(nameString=['starnine'])) with ccache_mock(side_effect=KeyError('foo')): result = post("zephyr", cred=cred) self.assert_json_error(result, 'Invalid Kerberos cache') with \ ccache_mock(return_value=b'1234'), \ ssh_mock(side_effect=KeyError('foo')), \ logging_mock() as log: result = post("zephyr", cred=cred) self.assert_json_error(result, 'We were unable to setup mirroring for you') log.assert_called_with("Error updating the user's ccache") with ccache_mock(return_value=b'1234'), mirror_mock(), ssh_mock() as ssh: result = post("zephyr", cred=cred) self.assert_json_success(result) ssh.assert_called_with([ 'ssh', 'server', '--', '/home/zulip/python-zulip-api/zulip/integrations/zephyr/process_ccache', 'starnine', api_key, 'MTIzNA==']) # Accounts whose Kerberos usernames are known not to match their # zephyr accounts are hardcoded, and should be handled properly. def kerberos_alter_egos_mock() -> Any: return patch( 'zerver.views.zephyr.kerberos_alter_egos', {'kerberos_alter_ego': 'starnine'}) cred = dict(cname=dict(nameString=['kerberos_alter_ego'])) with \ ccache_mock(return_value=b'1234'), \ mirror_mock(), \ ssh_mock() as ssh, \ kerberos_alter_egos_mock(): result = post("zephyr", cred=cred) self.assert_json_success(result) ssh.assert_called_with([ 'ssh', 'server', '--', '/home/zulip/python-zulip-api/zulip/integrations/zephyr/process_ccache', 'starnine', api_key, 'MTIzNA=='])
{ "content_hash": "c3dfe0377481c4f1e8290b6996923ac7", "timestamp": "", "source": "github", "line_count": 98, "max_line_length": 84, "avg_line_length": 35.204081632653065, "alnum_prop": 0.5753623188405798, "repo_name": "shubhamdhama/zulip", "id": "dae495d455759467aa11c1ec65ae20ab2548f126", "size": "3450", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "zerver/tests/test_zephyr.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "400387" }, { "name": "Dockerfile", "bytes": "2939" }, { "name": "Emacs Lisp", "bytes": "157" }, { "name": "HTML", "bytes": "721395" }, { "name": "JavaScript", "bytes": "3095896" }, { "name": "Perl", "bytes": "398763" }, { "name": "Puppet", "bytes": "71124" }, { "name": "Python", "bytes": "6896725" }, { "name": "Ruby", "bytes": "6110" }, { "name": "Shell", "bytes": "119898" }, { "name": "TypeScript", "bytes": "14645" } ], "symlink_target": "" }
from __future__ import annotations from abc import ABC, abstractmethod from typing import List, Optional import discord from redbot.core import Config from redbot.core.bot import Red class MixinMeta(ABC): """ Metaclass for well behaved type hint detection with composite class. """ # https://github.com/python/mypy/issues/1996 qualified_name: str def __init__(self, *_args): self.config: Config self.bot: Red @abstractmethod def strip_variations(self, s: str) -> str: raise NotImplementedError() @abstractmethod async def wait_for_ready(self) -> None: raise NotImplementedError() @abstractmethod async def is_self_assign_eligible( self, who: discord.Member, role: discord.Role ) -> List[discord.Role]: raise NotImplementedError() @abstractmethod async def update_roles_atomically( self, *, who: discord.Member, give: Optional[List[discord.Role]] = None, remove: Optional[List[discord.Role]] = None, ): raise NotImplementedError() @abstractmethod async def all_are_valid_roles(self, ctx, *roles: discord.Role) -> bool: raise NotImplementedError() @abstractmethod async def maybe_update_guilds(self, *guilds: discord.Guild) -> None: raise NotImplementedError() @abstractmethod def get_top_role(self, member: discord.Member) -> discord.Role: raise NotImplementedError()
{ "content_hash": "fc995db33e820a8ad184092360c98056", "timestamp": "", "source": "github", "line_count": 58, "max_line_length": 75, "avg_line_length": 25.67241379310345, "alnum_prop": 0.6574882471457354, "repo_name": "mikeshardmind/SinbadCogs", "id": "2ebfd8720abc5e01ec7c32a61be3b68a19f5e0e7", "size": "2094", "binary": false, "copies": "1", "ref": "refs/heads/apache2-releases", "path": "rolemanagement/abc.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "890" }, { "name": "Makefile", "bytes": "251" }, { "name": "Python", "bytes": "335552" } ], "symlink_target": "" }
import os import sys #current_dir = 'D:/2013_a/WHI_UBCSP2/Binary'# for root, dirs, files in os.walk(current_dir, topdown=False): for name in files: if name.endswith('.sp2b'): print name os.remove(os.path.join(root, name))
{ "content_hash": "50d1a15313844df4c3a5da1ecd0fb7f0", "timestamp": "", "source": "github", "line_count": 12, "max_line_length": 61, "avg_line_length": 20.75, "alnum_prop": 0.6465863453815262, "repo_name": "annahs/atmos_research", "id": "b6b7709d0a53efb9020574a46390a09d5ff23691", "size": "249", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "util_delete_file_type.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "1677056" } ], "symlink_target": "" }
'''This file generates shell code for the setup.SHELL scripts to set environment variables''' from __future__ import print_function import argparse import copy import errno import os import platform import sys CATKIN_MARKER_FILE = '.catkin' system = platform.system() IS_DARWIN = (system == 'Darwin') IS_WINDOWS = (system == 'Windows') # subfolder of workspace prepended to CMAKE_PREFIX_PATH ENV_VAR_SUBFOLDERS = { 'CMAKE_PREFIX_PATH': '', 'CPATH': 'include', 'LD_LIBRARY_PATH' if not IS_DARWIN else 'DYLD_LIBRARY_PATH': ['lib', os.path.join('lib', 'x86_64-linux-gnu')], 'PATH': 'bin', 'PKG_CONFIG_PATH': [os.path.join('lib', 'pkgconfig'), os.path.join('lib', 'x86_64-linux-gnu', 'pkgconfig')], 'PYTHONPATH': 'lib/python2.7/dist-packages', } def rollback_env_variables(environ, env_var_subfolders): ''' Generate shell code to reset environment variables by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH. This does not cover modifications performed by environment hooks. ''' lines = [] unmodified_environ = copy.copy(environ) for key in sorted(env_var_subfolders.keys()): subfolders = env_var_subfolders[key] if not isinstance(subfolders, list): subfolders = [subfolders] for subfolder in subfolders: value = _rollback_env_variable(unmodified_environ, key, subfolder) if value is not None: environ[key] = value lines.append(assignment(key, value)) if lines: lines.insert(0, comment('reset environment variables by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH')) return lines def _rollback_env_variable(environ, name, subfolder): ''' For each catkin workspace in CMAKE_PREFIX_PATH remove the first entry from env[NAME] matching workspace + subfolder. :param subfolder: str '' or subfoldername that may start with '/' :returns: the updated value of the environment variable. ''' value = environ[name] if name in environ else '' env_paths = [path for path in value.split(os.pathsep) if path] value_modified = False if subfolder: if subfolder.startswith(os.path.sep) or (os.path.altsep and subfolder.startswith(os.path.altsep)): subfolder = subfolder[1:] if subfolder.endswith(os.path.sep) or (os.path.altsep and subfolder.endswith(os.path.altsep)): subfolder = subfolder[:-1] for ws_path in _get_workspaces(environ, include_fuerte=True, include_non_existing=True): path_to_find = os.path.join(ws_path, subfolder) if subfolder else ws_path path_to_remove = None for env_path in env_paths: env_path_clean = env_path[:-1] if env_path and env_path[-1] in [os.path.sep, os.path.altsep] else env_path if env_path_clean == path_to_find: path_to_remove = env_path break if path_to_remove: env_paths.remove(path_to_remove) value_modified = True new_value = os.pathsep.join(env_paths) return new_value if value_modified else None def _get_workspaces(environ, include_fuerte=False, include_non_existing=False): ''' Based on CMAKE_PREFIX_PATH return all catkin workspaces. :param include_fuerte: The flag if paths starting with '/opt/ros/fuerte' should be considered workspaces, ``bool`` ''' # get all cmake prefix paths env_name = 'CMAKE_PREFIX_PATH' value = environ[env_name] if env_name in environ else '' paths = [path for path in value.split(os.pathsep) if path] # remove non-workspace paths workspaces = [path for path in paths if os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE)) or (include_fuerte and path.startswith('/opt/ros/fuerte')) or (include_non_existing and not os.path.exists(path))] return workspaces def prepend_env_variables(environ, env_var_subfolders, workspaces): ''' Generate shell code to prepend environment variables for the all workspaces. ''' lines = [] lines.append(comment('prepend folders of workspaces to environment variables')) paths = [path for path in workspaces.split(os.pathsep) if path] prefix = _prefix_env_variable(environ, 'CMAKE_PREFIX_PATH', paths, '') lines.append(prepend(environ, 'CMAKE_PREFIX_PATH', prefix)) for key in sorted([key for key in env_var_subfolders.keys() if key != 'CMAKE_PREFIX_PATH']): subfolder = env_var_subfolders[key] prefix = _prefix_env_variable(environ, key, paths, subfolder) lines.append(prepend(environ, key, prefix)) return lines def _prefix_env_variable(environ, name, paths, subfolders): ''' Return the prefix to prepend to the environment variable NAME, adding any path in NEW_PATHS_STR without creating duplicate or empty items. ''' value = environ[name] if name in environ else '' environ_paths = [path for path in value.split(os.pathsep) if path] checked_paths = [] for path in paths: if not isinstance(subfolders, list): subfolders = [subfolders] for subfolder in subfolders: path_tmp = path if subfolder: path_tmp = os.path.join(path_tmp, subfolder) # exclude any path already in env and any path we already added if path_tmp not in environ_paths and path_tmp not in checked_paths: checked_paths.append(path_tmp) prefix_str = os.pathsep.join(checked_paths) if prefix_str != '' and environ_paths: prefix_str += os.pathsep return prefix_str def assignment(key, value): if not IS_WINDOWS: return 'export %s="%s"' % (key, value) else: return 'set %s=%s' % (key, value) def comment(msg): if not IS_WINDOWS: return '# %s' % msg else: return 'REM %s' % msg def prepend(environ, key, prefix): if key not in environ or not environ[key]: return assignment(key, prefix) if not IS_WINDOWS: return 'export %s="%s$%s"' % (key, prefix, key) else: return 'set %s=%s%%%s%%' % (key, prefix, key) def find_env_hooks(environ, cmake_prefix_path): ''' Generate shell code with found environment hooks for the all workspaces. ''' lines = [] lines.append(comment('found environment hooks in workspaces')) generic_env_hooks = [] generic_env_hooks_workspace = [] specific_env_hooks = [] specific_env_hooks_workspace = [] generic_env_hooks_by_filename = {} specific_env_hooks_by_filename = {} generic_env_hook_ext = 'bat' if IS_WINDOWS else 'sh' specific_env_hook_ext = environ['CATKIN_SHELL'] if not IS_WINDOWS and 'CATKIN_SHELL' in environ and environ['CATKIN_SHELL'] else None # remove non-workspace paths workspaces = [path for path in cmake_prefix_path.split(os.pathsep) if path and os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE))] for workspace in reversed(workspaces): env_hook_dir = os.path.join(workspace, 'etc', 'catkin', 'profile.d') if os.path.isdir(env_hook_dir): for filename in sorted(os.listdir(env_hook_dir)): if filename.endswith('.%s' % generic_env_hook_ext): # remove previous env hook with same name if present if filename in generic_env_hooks_by_filename: i = generic_env_hooks.index(generic_env_hooks_by_filename[filename]) generic_env_hooks.pop(i) generic_env_hooks_workspace.pop(i) # append env hook generic_env_hooks.append(os.path.join(env_hook_dir, filename)) generic_env_hooks_workspace.append(workspace) generic_env_hooks_by_filename[filename] = generic_env_hooks[-1] elif specific_env_hook_ext is not None and filename.endswith('.%s' % specific_env_hook_ext): # remove previous env hook with same name if present if filename in specific_env_hooks_by_filename: i = specific_env_hooks.index(specific_env_hooks_by_filename[filename]) specific_env_hooks.pop(i) specific_env_hooks_workspace.pop(i) # append env hook specific_env_hooks.append(os.path.join(env_hook_dir, filename)) specific_env_hooks_workspace.append(workspace) specific_env_hooks_by_filename[filename] = specific_env_hooks[-1] env_hooks = generic_env_hooks + specific_env_hooks env_hooks_workspace = generic_env_hooks_workspace + specific_env_hooks_workspace count = len(env_hooks) lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_COUNT', count)) for i in range(count): lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d' % i, env_hooks[i])) lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d_WORKSPACE' % i, env_hooks_workspace[i])) return lines def _parse_arguments(args=None): parser = argparse.ArgumentParser(description='Generates code blocks for the setup.SHELL script.') parser.add_argument('--extend', action='store_true', help='Skip unsetting previous environment variables to extend context') return parser.parse_known_args(args=args)[0] if __name__ == '__main__': try: try: args = _parse_arguments() except Exception as e: print(e, file=sys.stderr) sys.exit(1) # environment at generation time CMAKE_PREFIX_PATH = '/home/praveen/Documents/ROS/tmp/rosaria/devel;/opt/ros/indigo'.split(';') # prepend current workspace if not already part of CPP base_path = os.path.dirname(__file__) if base_path not in CMAKE_PREFIX_PATH: CMAKE_PREFIX_PATH.insert(0, base_path) CMAKE_PREFIX_PATH = os.pathsep.join(CMAKE_PREFIX_PATH) environ = dict(os.environ) lines = [] if not args.extend: lines += rollback_env_variables(environ, ENV_VAR_SUBFOLDERS) lines += prepend_env_variables(environ, ENV_VAR_SUBFOLDERS, CMAKE_PREFIX_PATH) lines += find_env_hooks(environ, CMAKE_PREFIX_PATH) print('\n'.join(lines)) # need to explicitly flush the output sys.stdout.flush() except IOError as e: # and catch potantial "broken pipe" if stdout is not writable # which can happen when piping the output to a file but the disk is full if e.errno == errno.EPIPE: print(e, file=sys.stderr) sys.exit(2) raise sys.exit(0)
{ "content_hash": "03488a62715646b7a8473bcda3e8581d", "timestamp": "", "source": "github", "line_count": 253, "max_line_length": 213, "avg_line_length": 42.1699604743083, "alnum_prop": 0.6403599212672227, "repo_name": "MRSDTeamI/bud-e", "id": "74303d2730d7a27ff50db056939eeae499b3b3c0", "size": "12293", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "Navigation/base_controller/rosaria/build/catkin_generated/installspace/_setup_util.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Arduino", "bytes": "13972" }, { "name": "C", "bytes": "79562" }, { "name": "C++", "bytes": "1070383" }, { "name": "CMake", "bytes": "436528" }, { "name": "Common Lisp", "bytes": "56063" }, { "name": "Eagle", "bytes": "867931" }, { "name": "Makefile", "bytes": "280436" }, { "name": "Matlab", "bytes": "15312" }, { "name": "NewLisp", "bytes": "6146" }, { "name": "Python", "bytes": "526805" }, { "name": "Shell", "bytes": "131605" } ], "symlink_target": "" }
import string INDENT = ' ' KEYWORDS = ['auto', 'break', 'case', 'char', 'const', 'continue', 'default', 'do', 'double', 'else', 'enum', 'extern', 'float', 'for', 'goto', 'if', 'int', 'long', 'register', 'return', 'short', 'signed', 'sizeof', 'static', 'struct', 'switch', 'typedef', 'union', 'unsigned', 'void', 'volatile', 'while'] ID_FIRST = '_' + string.ascii_letters ID_OTHER = ID_FIRST + string.digits CPP_DIRECTIVES = ['assert', 'define', 'elif', 'else', 'endif', 'error', 'ident', 'if', 'ifdef', 'ifndef', 'import', 'include', 'include_next', 'line', 'pragma', 'sccs', 'unassert', 'undef', 'warning'] def is_identifier(s): '''Check if the given string represents a valid C identifier.''' if not s: return False if s in KEYWORDS: return False accept = ID_FIRST for c in s: if c not in accept: return False accept = ID_OTHER return True def to_identifier(s): '''Convert the given string to a valid C identifier by replacing invalid chars by an underscore.''' if not s: return '_' if s in KEYWORDS: return s + '_' def iter_chars(): accept = ID_FIRST for c in s: if c in accept: yield c else: yield '_' accept = ID_OTHER return ''.join(iter_chars()) def byte_reader(file_object): '''Yield bytes from the given file object.''' while True: data = file_object.read(1) if not data: break yield data def strip_comments(iterable, throw_final=True): '''Yield bytes Strip C and C++ comments from the given text. The iterable argument must contain only byte values (0-255). The result bytes contain all characters except those enclosed in C or C++ comments. The only exception is new line characters - those are yield always, even when inside a block comment, this way it is easy to determine the correct line number when the result is further processed. The code is aware of special cases like comment tokens (// or /*) inside literal strings and characters. If throw_final evaluates to True, the current state is checked after all input bytes have been processed. If the internal FSM is not in the final state, a ValueError exception is risen. This happens only when there are unclosed block comments, string or character literals. ''' # States CODE = 0 STRING = 1 STRING_ESCAPE = 2 CHAR = 3 CHAR_ESCAPE = 4 SLASH = 5 LINECOMMENT = 6 BLOCKCOMMENT = 7 BLOCKASTER = 8 BLOCKNEWLINE = 9 state = CODE # State transitions transitions = { CODE : {'"': STRING, "'": CHAR, '/': SLASH, }, STRING : {'"': CODE, '\\': STRING_ESCAPE,}, STRING_ESCAPE : { '': STRING }, CHAR : {"'": CODE, '\\': CHAR_ESCAPE}, CHAR_ESCAPE : {'': CHAR}, SLASH : {'/': LINECOMMENT, '*': BLOCKCOMMENT, '': CODE}, LINECOMMENT : {'\n': CODE}, BLOCKCOMMENT : {'*': BLOCKASTER, '\n': BLOCKNEWLINE}, BLOCKASTER : { '/': CODE, '*': BLOCKASTER, '': BLOCKCOMMENT, '\n': BLOCKNEWLINE }, BLOCKNEWLINE : { '\n': BLOCKNEWLINE, '*': BLOCKASTER, '': BLOCKCOMMENT} } # Output generation (Mealy FSM) silent = lambda x : '' default = lambda x : x transition_out = { (CODE, SLASH) : silent, (SLASH, CODE) : lambda x: '/' + x, (SLASH, BLOCKCOMMENT) : silent, (SLASH, LINECOMMENT) : silent, (LINECOMMENT, LINECOMMENT) : silent, (LINECOMMENT, CODE) : default, (BLOCKCOMMENT, BLOCKNEWLINE) : default, (BLOCKASTER, BLOCKNEWLINE) : default, (BLOCKNEWLINE, BLOCKNEWLINE) : default, (BLOCKCOMMENT, None) : silent, (BLOCKASTER, None) : silent, (BLOCKNEWLINE, None) : silent, } for byte in iterable: trans = transitions[state] next = trans.get(byte, trans.get('', state)) trans = (state, next) fn = (transition_out.get((state, next), None) or transition_out.get((state, None), None) or transition_out.get((None, next), None) or default) out = fn(byte) if False: # Change to True for debugging state_desc = {0: 'CODE', 1: 'STRING', 2: 'STRING_ESCAPE', 3: 'CHAR', 4: 'CHAR_ESCAPE', 5: 'SLASH', 6: 'LINECOMMENT', 7: 'BLOCKCOMMENT', 8: 'BLOCKASTER', 9: 'BLOCKNEWLINE'} out_str = out.replace('\n', '\\n').replace('\t', '\\t') print 'FSM %10s -> %10s : "%s"' % (state_desc[state], state_desc[next], out_str) for c in out: yield c state = next # Check for invalid final states if not throw_final: return if state in (STRING, STRING_ESCAPE): raise ValueError('''missing terminating '"' character''') elif state in (CHAR, CHAR_ESCAPE): raise ValueError('''missing terminating ' character''') elif state in (BLOCKCOMMENT, BLOCKASTER, BLOCKNEWLINE): raise ValueError('''unterminated /* comment''') def iter_lines(iterable, throw_final=False): '''Yield pairs of line number and line contents. The function takes a file object as parameter and yields pairs of line numbers and their content. Lines, which were split using the backslash character, are merged and yield together as a single line. Lines are yield in order, but because of merging of split lines, some lines may be missing. C and C++ comments are automatically removed from the input file using the strip_comments function. The throw_final argument has the same meaning as in the case of the strip_comments function. ''' def iter_lines_raw(): '''Yield whole lines of input characters with the new line character removed.''' line = [] for char in strip_comments(iterable, throw_final): if char != '\n': line.append(char) else: yield ''.join(line) line = [] yield ''.join(line) next_line = [] lineno = 0 for lineno, line_raw in enumerate(iter_lines_raw(), 1): line_stripped = line_raw.rstrip() continued = line_stripped.endswith('\\') if continued: line_stripped = line_stripped[:-1] next_line.append(line_stripped) if not continued: item = ' '.join(next_line) yield lineno, item next_line = [] item = ' '.join(next_line) yield lineno, item
{ "content_hash": "d5b68321a8d252dbb523787278c50bc5", "timestamp": "", "source": "github", "line_count": 252, "max_line_length": 120, "avg_line_length": 30.11111111111111, "alnum_prop": 0.507380073800738, "repo_name": "Samsung/ADBI", "id": "9ff16cc272c0342455933789db0fd4451eb65b78", "size": "7588", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "idk/cachereader/C.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Assembly", "bytes": "18203" }, { "name": "C", "bytes": "577712" }, { "name": "C++", "bytes": "150691" }, { "name": "Logos", "bytes": "354" }, { "name": "Makefile", "bytes": "12730" }, { "name": "PLSQL", "bytes": "4139" }, { "name": "Python", "bytes": "335683" }, { "name": "Shell", "bytes": "7998" } ], "symlink_target": "" }
import os from ingenico.connect.sdk.factory import Factory class GetMandateExample(object): def example(self): with self.__get_client() as client: response = client.merchant("merchantId").mandates().get("42268d8067df43e18a50a2ebf4bdb729") def __get_client(self): api_key_id = os.getenv("connect.api.apiKeyId", "someKey") secret_api_key = os.getenv("connect.api.secretApiKey", "someSecret") configuration_file_name = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../example_configuration.ini')) return Factory.create_client_from_file(configuration_file_name=configuration_file_name, api_key_id=api_key_id, secret_api_key=secret_api_key)
{ "content_hash": "bb567a3647e978b1933d8afc520c6884", "timestamp": "", "source": "github", "line_count": 18, "max_line_length": 103, "avg_line_length": 45.833333333333336, "alnum_prop": 0.6060606060606061, "repo_name": "Ingenico-ePayments/connect-sdk-python2", "id": "4cc1f7772a81dda2ef9e3b1bde62ace58fd31086", "size": "952", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "examples/merchant/mandates/get_mandate_example.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "36" }, { "name": "Python", "bytes": "1733005" } ], "symlink_target": "" }
from pyramid.config import Configurator from pyramid.authorization import ACLAuthorizationPolicy as ACLPolicy from pyramid.session import UnencryptedCookieSessionFactoryConfig from pyramid.interfaces import IBeforeRender from pyramid.events import NewRequest from .security import groupfinder, RootFactory from .config import includeme # used by pyramid from .models import create_engine from .helpers.i18n import locale_negotiator from .helpers.authentication import RouteSwithchAuthPolicy __version__ = '1.7' def main(global_config, **settings): settings = dict(settings) # Scoping sessions for Pyramid ensure session are commit/rollback # after the template has been rendered create_engine(settings, scoped=True) session_factory = UnencryptedCookieSessionFactoryConfig( settings['pyvac.cookie_key'] ) authn_policy = RouteSwithchAuthPolicy(secret=settings['pyvac.cookie_key'], callback=groupfinder) authz_policy = ACLPolicy() config = Configurator(settings=settings, root_factory=RootFactory, locale_negotiator=locale_negotiator, authentication_policy=authn_policy, authorization_policy=authz_policy, session_factory=session_factory) config.add_subscriber('pyvac.helpers.i18n.add_renderer_globals', IBeforeRender) config.add_subscriber('pyvac.helpers.i18n.add_localizer', NewRequest) config.end() return config.make_wsgi_app()
{ "content_hash": "65b633d40251fec9b656864861ae94e1", "timestamp": "", "source": "github", "line_count": 45, "max_line_length": 78, "avg_line_length": 35.55555555555556, "alnum_prop": 0.690625, "repo_name": "doyousoft/pyvac", "id": "bf78e11b9d7f26f9c23424e3c2ed539cb4d96c79", "size": "1624", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "pyvac/__init__.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "96138" }, { "name": "HTML", "bytes": "54295" }, { "name": "JavaScript", "bytes": "4635" }, { "name": "Python", "bytes": "257565" } ], "symlink_target": "" }
""" .. versionadded:: 0.1 .. versionchanged:: 1.2.0 The normalized least-mean-square (NLMS) adaptive filter is an extension of the popular LMS adaptive filter (:ref:`filter-lms`). The NLMS filter can be created as follows >>> import padasip as pa >>> pa.filters.FilterNLMS(n) where `n` is the size (number of taps) of the filter. Content of this page: .. contents:: :local: :depth: 1 .. seealso:: :ref:`filters` Algorithm Explanation ====================================== The NLMS is extension of LMS filter. See :ref:`filter-lms` for explanation of the algorithm behind. The extension is based on normalization of learning rate. The learning rage :math:`\mu` is replaced by learning rate :math:`\eta(k)` normalized with every new sample according to input power as follows :math:`\eta (k) = \\frac{\mu}{\epsilon + || \\textbf{x}(k) ||^2}`, where :math:`|| \\textbf{x}(k) ||^2` is norm of input vector and :math:`\epsilon` is a small positive constant (regularization term). This constant is introduced to preserve the stability in cases where the input is close to zero. Stability and Optimal Performance ====================================== The stability of the NLMS filter si given as follows :math:`0 \le \mu \le 2 + \\frac{2\epsilon}{||\\textbf{x}(k)||^2}`, or in case without regularization term :math:`\epsilon` :math:`\mu \in <0, 2>`. In other words, if you use the zero or only small key argument `\eps`, the key argument `\mu` should be between 0 and 2. Best convergence should be produced by `mu=1.` according to theory. However in practice the optimal value can be strongly case specific. Minimal Working Examples ====================================== If you have measured data you may filter it as follows .. code-block:: python import numpy as np import matplotlib.pylab as plt import padasip as pa # creation of data N = 500 x = np.random.normal(0, 1, (N, 4)) # input matrix v = np.random.normal(0, 0.1, N) # noise d = 2*x[:,0] + 0.1*x[:,1] - 4*x[:,2] + 0.5*x[:,3] + v # target # identification f = pa.filters.FilterNLMS(n=4, mu=0.1, w="random") y, e, w = f.run(d, x) # show results plt.figure(figsize=(15,9)) plt.subplot(211);plt.title("Adaptation");plt.xlabel("samples - k") plt.plot(d,"b", label="d - target") plt.plot(y,"g", label="y - output");plt.legend() plt.subplot(212);plt.title("Filter error");plt.xlabel("samples - k") plt.plot(10*np.log10(e**2),"r", label="e - error [dB]");plt.legend() plt.tight_layout() plt.show() An example how to filter data measured in real-time .. code-block:: python import numpy as np import matplotlib.pylab as plt import padasip as pa # these two function supplement your online measurment def measure_x(): # it produces input vector of size 3 x = np.random.random(3) return x def measure_d(x): # meausure system output d = 2*x[0] + 1*x[1] - 1.5*x[2] return d N = 100 log_d = np.zeros(N) log_y = np.zeros(N) filt = pa.filters.FilterNLMS(3, mu=1.) for k in range(N): # measure input x = measure_x() # predict new value y = filt.predict(x) # do the important stuff with prediction output pass # measure output d = measure_d(x) # update filter filt.adapt(d, x) # log values log_d[k] = d log_y[k] = y ### show results plt.figure(figsize=(15,9)) plt.subplot(211);plt.title("Adaptation");plt.xlabel("samples - k") plt.plot(log_d,"b", label="d - target") plt.plot(log_y,"g", label="y - output");plt.legend() plt.subplot(212);plt.title("Filter error");plt.xlabel("samples - k") plt.plot(10*np.log10((log_d-log_y)**2),"r", label="e - error [dB]") plt.legend(); plt.tight_layout(); plt.show() Code Explanation ====================================== """ import numpy as np from padasip.filters.base_filter import AdaptiveFilter class FilterNLMS(AdaptiveFilter): """ Adaptive NLMS filter. """ kind = "NLMS" def __init__(self, n, mu=0.1, eps=0.001, **kwargs): """ **Kwargs:** * `eps` : regularization term (float). It is introduced to preserve stability for close-to-zero input vectors """ super().__init__(n, mu, **kwargs) self.eps = eps def learning_rule(self, e, x): """ Override the parent class. """ return self.mu / (self.eps + np.dot(x, x)) * x * e
{ "content_hash": "e9564c520553ac0ef62c41d5b44d6318", "timestamp": "", "source": "github", "line_count": 164, "max_line_length": 75, "avg_line_length": 27.926829268292682, "alnum_prop": 0.5989082969432314, "repo_name": "matousc89/padasip", "id": "e2849af9e57bc32ac8e8e84cdf6954f6ded34964", "size": "4580", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "padasip/filters/nlms.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "132278" } ], "symlink_target": "" }
import mock import os from fuel_upgrade.tests.base import BaseTestCase from fuel_upgrade.pre_upgrade_hooks.base import PreUpgradeHookBase from fuel_upgrade.pre_upgrade_hooks.from_5_0_1_to_any_fix_host_system_repo \ import FixHostSystemRepoHook from fuel_upgrade.pre_upgrade_hooks.from_5_0_to_any_add_credentials \ import AddCredentialsHook from fuel_upgrade.pre_upgrade_hooks.from_5_0_to_any_fix_puppet_manifests \ import FixPuppetManifests from fuel_upgrade.pre_upgrade_hooks.from_5_0_to_any_sync_dns \ import SyncDnsHook from fuel_upgrade.pre_upgrade_hooks import PreUpgradeHookManager from fuel_upgrade.pre_upgrade_hooks. \ from_5_0_x_to_any_copy_openstack_release_versions \ import CopyOpenstackReleaseVersions class TestPreUpgradeHooksBase(BaseTestCase): def setUp(self): class Upgrader1(mock.MagicMock): pass class Upgrader2(mock.MagicMock): pass self.upgraders_cls = [Upgrader1, Upgrader2] self.upgraders = [upgrade_cls() for upgrade_cls in self.upgraders_cls] class TestAddCredentialsHook(TestPreUpgradeHooksBase): def setUp(self): super(TestAddCredentialsHook, self).setUp() self.additional_keys = [ 'astute', 'cobbler', 'mcollective', 'postgres', 'keystone', 'FUEL_ACCESS'] def get_hook(self, astute): config = self.fake_config config.astute = astute return AddCredentialsHook(self.upgraders, config) def test_is_required_returns_true(self): hook = self.get_hook({}) self.assertTrue(hook.check_if_required()) def test_is_required_returns_false(self): hook = self.get_hook({ 'astute': {}, 'cobbler': {}, 'mcollective': {}, 'postgres': {}, 'keystone': {}, 'FUEL_ACCESS': {}}) self.assertFalse(hook.check_if_required()) @mock.patch('fuel_upgrade.pre_upgrade_hooks.' 'from_5_0_to_any_add_credentials.read_yaml_config') @mock.patch('fuel_upgrade.pre_upgrade_hooks.' 'from_5_0_to_any_add_credentials.utils') def test_run(self, utils_mock, read_yaml_config_mock): file_key = 'this_key_was_here_before_upgrade' hook = self.get_hook({file_key: file_key}) read_yaml_config_mock.return_value = hook.config.astute hook.run() utils_mock.copy_file.assert_called_once_with( '/etc/fuel/astute.yaml', '/etc/fuel/astute.yaml_0', overwrite=False) agrs = utils_mock.save_as_yaml.call_args self.assertEqual(agrs[0][0], '/etc/fuel/astute.yaml') # Check that the key which was in file # won't be overwritten self.additional_keys.append(file_key) # Check that all required keys are in method call self.assertTrue(all( key in self.additional_keys for key in agrs[0][1].keys())) class TestSyncDnsHook(TestPreUpgradeHooksBase): def setUp(self): super(TestSyncDnsHook, self).setUp() self.additional_keys = [ 'DNS_DOMAIN', 'DNS_SEARCH'] def get_hook(self, astute): config = self.fake_config config.astute = astute return SyncDnsHook(self.upgraders, config) def test_is_required_returns_true(self): hook = self.get_hook({ 'DNS_DOMAIN': 'veryunlikelydomain', 'DNS_SEARCH': 'veryunlikelydomain'}) self.assertTrue(hook.check_if_required()) def test_is_required_returns_false(self): hostname, sep, realdomain = os.uname()[1].partition('.') hook = self.get_hook({ 'DNS_DOMAIN': realdomain, 'DNS_SEARCH': realdomain}) self.assertFalse(hook.check_if_required()) @mock.patch('fuel_upgrade.pre_upgrade_hooks.' 'from_5_0_to_any_sync_dns.read_yaml_config') @mock.patch('fuel_upgrade.pre_upgrade_hooks.' 'from_5_0_to_any_sync_dns.utils') def test_run(self, utils_mock, read_yaml_config): file_key = 'this_key_was_here_before_upgrade' hook = self.get_hook({file_key: file_key}) read_yaml_config.return_value = hook.config.astute hook.run() utils_mock.copy_file.assert_called_once_with( '/etc/fuel/astute.yaml', '/etc/fuel/astute.yaml_0', overwrite=False) args = utils_mock.save_as_yaml.call_args self.assertEqual(args[0][0], '/etc/fuel/astute.yaml') # Check that the key which was in file # won't be overwritten self.additional_keys.append(file_key) # Check that all required keys are in method call self.assertTrue(all( key in self.additional_keys for key in args[0][1].keys())) class TestFixPuppetManifestHook(TestPreUpgradeHooksBase): iterfiles_returns = [ '/tmp/upgrade_path/config/5.0/modules/package/lib/puppet' '/provider/package/yum.rb', '/tmp/upgrade_path/config/5.0/manifests/centos-versions.yaml'] def setUp(self): super(TestFixPuppetManifestHook, self).setUp() conf = self.fake_config conf.from_version = '5.0' self.hook = FixPuppetManifests(self.upgraders, conf) def test_is_required_returns_true(self): self.hook.config.from_version = '5.0' self.assertTrue(self.hook.check_if_required()) self.hook.config.from_version = '5.0.1' self.assertTrue(self.hook.check_if_required()) def test_is_required_returns_false(self): self.hook.config.from_version = '5.1' self.assertFalse(self.hook.check_if_required()) @mock.patch( 'fuel_upgrade.pre_upgrade_hooks.from_5_0_to_any_fix_puppet_manifests.' 'iterfiles', return_value=iterfiles_returns) @mock.patch( 'fuel_upgrade.pre_upgrade_hooks.from_5_0_to_any_fix_puppet_manifests.' 'copy') def test_run(self, copy, _): self.hook.run() copy.assert_has_calls([ mock.call( '/tmp/upgrade_path/config/5.0/modules/package/lib' '/puppet/provider/package/yum.rb', '/etc/puppet/modules/package/lib/puppet/provider/package' '/yum.rb'), mock.call( '/tmp/upgrade_path/config/5.0/manifests' '/centos-versions.yaml', '/etc/puppet/manifests/centos-versions.yaml')]) class TestFixHostSystemRepoHook(TestPreUpgradeHooksBase): def setUp(self): super(TestFixHostSystemRepoHook, self).setUp() conf = self.fake_config conf.from_version = '5.0.1' self.hook = FixHostSystemRepoHook(self.upgraders, conf) @mock.patch( 'fuel_upgrade.pre_upgrade_hooks.' 'from_5_0_1_to_any_fix_host_system_repo.' 'utils.file_exists', return_value=True) def test_is_required_returns_true(self, exists_mock): self.hook.config.from_version = '5.0.1' self.assertTrue(self.hook.check_if_required()) self.assertEqual( exists_mock.call_args_list, [mock.call('/var/www/nailgun/5.0.1/centos/x86_64'), mock.call('/etc/yum.repos.d/5.0.1_nailgun.repo')]) def test_is_required_returns_false(self): self.hook.config.from_version = '5.0' self.assertFalse(self.hook.check_if_required()) self.hook.config.from_version = '5.1' self.assertFalse(self.hook.check_if_required()) @mock.patch( 'fuel_upgrade.pre_upgrade_hooks.' 'from_5_0_1_to_any_fix_host_system_repo.' 'utils.file_exists', return_value=False) def test_is_required_returns_false_if_repo_file_does_not_exist(self, _): self.assertFalse(self.hook.check_if_required()) @mock.patch( 'fuel_upgrade.pre_upgrade_hooks.' 'from_5_0_1_to_any_fix_host_system_repo.' 'utils.file_exists', side_effect=[True, False]) def test_is_required_returns_false_repo_does_not_exist(self, _): self.assertFalse(self.hook.check_if_required()) @mock.patch( 'fuel_upgrade.pre_upgrade_hooks.' 'from_5_0_1_to_any_fix_host_system_repo.utils') def test_run(self, mock_utils): self.hook.run() args, _ = mock_utils.render_template_to_file.call_args_list[0] # The first argument is a path to # template in upgrade script directory # it can be different and depends on # code allocation self.assertTrue(args[0].endswith('templates/nailgun.repo')) self.assertEqual( args[1:], ('/etc/yum.repos.d/5.0.1_nailgun.repo', {'repo_path': '/var/www/nailgun/5.0.1/centos/x86_64', 'version': '5.0.1'})) class TestPreUpgradeHookBase(TestPreUpgradeHooksBase): def get_hook(self, check_if_required=False, enable_for_engines=[]): class PreUpgradeHook(PreUpgradeHookBase): def check_if_required(self): return check_if_required @property def enable_for_engines(self): return enable_for_engines def run(self): pass return PreUpgradeHook(self.upgraders, self.fake_config) @mock.patch('fuel_upgrade.pre_upgrade_hooks.base.' 'PreUpgradeHookBase.is_enabled_for_engines', return_value=False) def test_is_required_returns_false(self, _): self.assertFalse(self.get_hook().is_required) @mock.patch('fuel_upgrade.pre_upgrade_hooks.base.' 'PreUpgradeHookBase.is_enabled_for_engines', return_value=True) def test_is_required_returns_true(self, _): self.assertTrue(self.get_hook(check_if_required=True).is_required) def test_is_enabled_for_engines_returns_true(self): self.assertTrue( self.get_hook( check_if_required=True, enable_for_engines=[self.upgraders_cls[0]]).is_required) def test_is_enabled_for_engines_returns_false(self): class SomeEngine(object): pass self.assertFalse( self.get_hook( check_if_required=True, enable_for_engines=[SomeEngine]).is_required) class TestPreUpgradeHookManager(TestPreUpgradeHooksBase): def setUp(self): super(TestPreUpgradeHookManager, self).setUp() self.required_hooks = [mock.MagicMock(), mock.MagicMock()] for hook in self.required_hooks: type(hook).is_required = mock.PropertyMock(return_value=True) self.not_required_hooks = [mock.MagicMock()] for hook in self.not_required_hooks: type(hook).is_required = mock.PropertyMock(return_value=False) self.hooks = [] self.hooks.extend(self.required_hooks) self.hooks.extend(self.not_required_hooks) self.hook_manager = PreUpgradeHookManager( self.upgraders, self.fake_config) def test_run(self): self.hook_manager.pre_upgrade_hooks = self.hooks self.hook_manager.run() for hook in self.required_hooks: self.called_once(hook.run) for hook in self.not_required_hooks: self.method_was_not_called(hook.run) class TestCopyOpenstackReleaseVersions(TestPreUpgradeHooksBase): iterfiles_returns = [ '/tmp/upgrade_path/config/5.0/modules/package/lib/puppet' '/provider/package/yum.rb', '/tmp/upgrade_path/config/5.0/manifests/centos-versions.yaml'] def setUp(self): super(TestCopyOpenstackReleaseVersions, self).setUp() conf = self.fake_config conf.from_version = '5.0.1' self.hook = CopyOpenstackReleaseVersions(self.upgraders, conf) def test_is_required_returns_true(self): self.hook.config.from_version = '5.0' self.assertTrue(self.hook.check_if_required()) self.hook.config.from_version = '5.0.1' self.assertTrue(self.hook.check_if_required()) def test_is_required_returns_false(self): self.hook.config.from_version = '5.1' self.assertFalse(self.hook.check_if_required()) @mock.patch( 'fuel_upgrade.pre_upgrade_hooks.' 'from_5_0_x_to_any_copy_openstack_release_versions.utils') def test_run(self, mock_utils): self.hook.run() self.assertEqual( mock_utils.create_dir_if_not_exists.call_args_list, [mock.call(self.hook.release_dir)]) self.assertEqual( mock_utils.copy_if_exists.call_args_list, [mock.call(self.hook.version_path_5_0, self.hook.dst_version_path_5_0), mock.call(self.hook.version_path_5_0_1, self.hook.dst_version_path_5_0_1)]) @mock.patch( 'fuel_upgrade.pre_upgrade_hooks.' 'from_5_0_x_to_any_copy_openstack_release_versions.utils') def test_run_from_5_0(self, mock_utils): self.hook.config.from_version = '5.0' self.hook.run() self.assertEqual( mock_utils.copy_if_exists.call_args_list, [mock.call(self.hook.version_path_5_0, self.hook.dst_version_path_5_0)])
{ "content_hash": "cd4375b346c83e32d91537fcd10f8bd2", "timestamp": "", "source": "github", "line_count": 381, "max_line_length": 78, "avg_line_length": 34.79265091863517, "alnum_prop": 0.616852745926373, "repo_name": "Axam/nsx-web", "id": "da2f77a6592c0bb9f2faa62c0b293199ea0bcdb1", "size": "13892", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "fuel_upgrade_system/fuel_upgrade/fuel_upgrade/tests/test_pre_upgrade_hooks.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "99402" }, { "name": "JavaScript", "bytes": "553275" }, { "name": "Python", "bytes": "2623980" }, { "name": "Ruby", "bytes": "33345" }, { "name": "Shell", "bytes": "29681" } ], "symlink_target": "" }
"""take out the volumne outliers in an image""" import nibabel as nib import matplotlib.pyplot as plt import numpy as np import numpy.linalg as npl from itertools import product import diagnostics reload(diagnostics) img=nib.load('ds005/sub001/BOLD/task001_run001/bold.nii') data=img.get_data() """get std""" volstd = diagnostics.vol_std(data) outliers_index, thres = diagnostics.iqr_outliers(volstd)
{ "content_hash": "efc1d911c6e51c84c0b8463a1a8f80d2", "timestamp": "", "source": "github", "line_count": 25, "max_line_length": 57, "avg_line_length": 16.56, "alnum_prop": 0.7584541062801933, "repo_name": "soazig/project-epsilon", "id": "5f22f02d6b89cc604de96af1fd4dc06c36387f30", "size": "414", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "code/utils/get_outlier.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Makefile", "bytes": "7774" }, { "name": "Python", "bytes": "25463" }, { "name": "TeX", "bytes": "6945" } ], "symlink_target": "" }
"""Tests for GRU V2 layer.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import copy import os import shutil from absl.testing import parameterized import numpy as np from tensorflow.core.protobuf import config_pb2 from tensorflow.core.protobuf import rewriter_config_pb2 from tensorflow.python import keras from tensorflow.python.data.ops import dataset_ops from tensorflow.python.eager import backprop from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import test_util as tf_test_util from tensorflow.python.keras import combinations from tensorflow.python.keras import keras_parameterized from tensorflow.python.keras import testing_utils from tensorflow.python.keras.layers import recurrent as rnn_v1 from tensorflow.python.keras.layers import recurrent_v2 as rnn from tensorflow.python.keras.utils import np_utils from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import gen_math_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn from tensorflow.python.ops import random_ops from tensorflow.python.platform import test from tensorflow.python.training import gradient_descent # Global config for grappler setting that is used for graph mode test. _rewrites = rewriter_config_pb2.RewriterConfig() _rewrites.implementation_selector = rewriter_config_pb2.RewriterConfig.ON _rewrites.min_graph_nodes = -1 _graph_options = config_pb2.GraphOptions(rewrite_options=_rewrites) _config = config_pb2.ConfigProto(graph_options=_graph_options) @testing_utils.run_all_without_tensor_float_32('RNN GRU can use TF32 on GPU') @keras_parameterized.run_all_keras_modes(config=_config) class GRUV2Test(keras_parameterized.TestCase): @parameterized.named_parameters( ('non_tan_activation', 'relu', 'sigmoid', 0, False, True, True), ('non_sigmoid_recur_activation', 'tanh', 'relu', 0, False, True, True), ('use_recurrent_dropout', 'tanh', 'sigmoid', 0.1, False, True, True), ('unroll', 'tanh', 'sigmoid', 0, True, True, True), ('not_use_bias', 'tanh', 'sigmoid', 0, False, False, True), ('not_reset_after', 'tanh', 'sigmoid', 0, False, True, False) ) def test_could_use_defun_backend(self, activation, recurrent_activation, recurrent_dropout, unroll, use_bias, reset_after): layer = rnn.GRU(1, activation=activation, recurrent_activation=recurrent_activation, recurrent_dropout=recurrent_dropout, unroll=unroll, use_bias=use_bias, reset_after=reset_after) self.assertFalse(layer._could_use_gpu_kernel) @testing_utils.run_v2_only def test_use_on_default_activation_with_gpu_kernel(self): layer = rnn.GRU(1, activation=nn.tanh) self.assertTrue(layer._could_use_gpu_kernel) layer = rnn.GRU(1, recurrent_activation=nn.sigmoid) self.assertTrue(layer._could_use_gpu_kernel) def test_keras_model_with_gru(self): input_shape = 10 rnn_state_size = 8 output_shape = 8 timestep = 4 batch = 100 epoch = 10 (x_train, y_train), _ = testing_utils.get_test_data( train_samples=batch, test_samples=0, input_shape=(timestep, input_shape), num_classes=output_shape) y_train = np_utils.to_categorical(y_train, output_shape) layer = rnn.GRU(rnn_state_size) inputs = keras.layers.Input( shape=[timestep, input_shape], dtype=dtypes.float32) outputs = layer(inputs) model = keras.models.Model(inputs, outputs) model.compile('rmsprop', loss='mse') model.fit(x_train, y_train, epochs=epoch) model.evaluate(x_train, y_train) model.predict(x_train) def test_dynamic_behavior_GRU(self): num_samples = 2 timesteps = 3 embedding_dim = 4 units = 2 layer = rnn.GRU(units, input_shape=(None, embedding_dim)) model = keras.models.Sequential() model.add(layer) model.compile(gradient_descent.GradientDescentOptimizer(0.001), 'mse') x = np.random.random((num_samples, timesteps, embedding_dim)) y = np.random.random((num_samples, units)) model.train_on_batch(x, y) def test_stacking_GRU(self): inputs = np.random.random((2, 3, 4)) targets = np.abs(np.random.random((2, 3, 5))) targets /= targets.sum(axis=-1, keepdims=True) model = keras.models.Sequential() model.add(rnn.GRU(10, return_sequences=True, unroll=False)) model.add(rnn.GRU(5, return_sequences=True, unroll=False)) model.compile( loss='categorical_crossentropy', optimizer=gradient_descent.GradientDescentOptimizer(0.01)) model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1) def test_from_config_GRU(self): layer_class = rnn.GRU for stateful in (False, True): l1 = layer_class(units=1, stateful=stateful) l2 = layer_class.from_config(l1.get_config()) assert l1.get_config() == l2.get_config() @test.disable_with_predicate( pred=test.is_built_with_rocm, skip_message='Skipping as ROCm MIOpen does not support padded input yet.') @testing_utils.run_v2_only def test_gru_v2_feature_parity_with_canonical_gru(self): input_shape = 10 rnn_state_size = 8 timestep = 4 batch = 20 (x_train, y_train), _ = testing_utils.get_test_data( train_samples=batch, test_samples=0, input_shape=(timestep, input_shape), num_classes=rnn_state_size, random_seed=87654321) y_train = np_utils.to_categorical(y_train, rnn_state_size) # For the last batch item of the test data, we filter out the last # timestep to simulate the variable length sequence and masking test. x_train[-2:, -1, :] = 0.0 y_train[-2:] = 0 inputs = keras.layers.Input( shape=[timestep, input_shape], dtype=dtypes.float32) masked_input = keras.layers.Masking()(inputs) gru_layer = rnn_v1.GRU(rnn_state_size, recurrent_activation='sigmoid', reset_after=True) output = gru_layer(masked_input) gru_model = keras.models.Model(inputs, output) weights = gru_model.get_weights() y_1 = gru_model.predict(x_train) gru_model.compile('rmsprop', 'mse') gru_model.fit(x_train, y_train) y_2 = gru_model.predict(x_train) with testing_utils.device(should_use_gpu=True): cudnn_layer = rnn.GRU(rnn_state_size, recurrent_activation='sigmoid', reset_after=True) cudnn_model = keras.models.Model(inputs, cudnn_layer(masked_input)) cudnn_model.set_weights(weights) y_3 = cudnn_model.predict(x_train) cudnn_model.compile('rmsprop', 'mse') cudnn_model.fit(x_train, y_train) y_4 = cudnn_model.predict(x_train) self.assertAllClose(y_1, y_3, rtol=2e-5, atol=2e-5) self.assertAllClose(y_2, y_4, rtol=2e-5, atol=2e-5) @parameterized.named_parameters( # test_name, use_bias, bias_initializer, activation ('normal', True, 'zeros'), ('no_bias', False, 'zeros'), ('random_bias', True, 'random_uniform'), ) def test_gru_v2_model_save_load(self, use_bias, bias_initializer): temp_dir = self.get_temp_dir() self.addCleanup(shutil.rmtree, temp_dir) h5_path = os.path.join(temp_dir, 'test.h5') batch = 10 timestep = 3 input_dim = 5 units = 2 x = np.random.random((batch, timestep, input_dim)) def build_model(): inputs = keras.layers.Input( shape=[timestep, input_dim], dtype=dtypes.float32) layer = rnn.GRU( units, use_bias=use_bias, bias_initializer=bias_initializer) output = layer(inputs) return keras.models.Model(inputs, output), layer model, layer = build_model() y_ref = model.predict(x) model.save_weights(h5_path) cloned_model, new_layer = build_model() cloned_model.load_weights(h5_path) y = cloned_model.predict(x) self.assertAllClose(y, y_ref) self.assertAllClose(layer.get_weights(), new_layer.get_weights()) def test_gru_v2_output_on_multiple_kernel(self): input_shape = 10 rnn_state_size = 8 timestep = 4 batch = 100 x_train = np.random.random((batch, timestep, input_shape)) inputs = keras.layers.Input( shape=[timestep, input_shape], dtype=dtypes.float32) with testing_utils.device(should_use_gpu=False): layer = rnn.GRU(rnn_state_size) output = layer(inputs) cpu_model = keras.models.Model(inputs, output) weights = cpu_model.get_weights() y_1 = cpu_model.predict(x_train) with testing_utils.device(should_use_gpu=True): layer = rnn.GRU(rnn_state_size) output = layer(inputs) gpu_model = keras.models.Model(inputs, output) gpu_model.set_weights(weights) y_2 = gpu_model.predict(x_train) # Note that CuDNN uses 'sigmoid' as activation, so the GRU V2 uses # 'sigmoid' as default. Construct the canonical GRU with sigmoid to achieve # the same output. with testing_utils.device(should_use_gpu=True): layer = rnn_v1.GRU(rnn_state_size, recurrent_activation='sigmoid', reset_after=True) output = layer(inputs) canonical_model = keras.models.Model(inputs, output) canonical_model.set_weights(weights) y_3 = canonical_model.predict(x_train) self.assertAllClose(y_1, y_2, rtol=1e-5, atol=1e-5) self.assertAllClose(y_2, y_3, rtol=1e-5, atol=1e-5) @parameterized.named_parameters( # test_name, time_major, go_backwards ('normal', False, False), ('time_major', True, False), ('go_backwards', False, True), ('both', True, True), ) def test_time_major_and_go_backward(self, time_major, go_backwards): input_shape = 10 rnn_state_size = 8 timestep = 4 batch = 100 x_train = np.random.random((batch, timestep, input_shape)) def build_model(layer_cls): inputs = keras.layers.Input( shape=[timestep, input_shape], dtype=dtypes.float32) layer = layer_cls(rnn_state_size, recurrent_activation='sigmoid', time_major=time_major, return_sequences=True, go_backwards=go_backwards, reset_after=True) if time_major: converted_input = keras.layers.Lambda( lambda t: array_ops.transpose(t, [1, 0, 2]))(inputs) outputs = layer(converted_input) outputs = keras.layers.Lambda( lambda t: array_ops.transpose(t, [1, 0, 2]))(outputs) else: outputs = layer(inputs) return keras.models.Model(inputs, outputs) gru_model = build_model(rnn_v1.GRU) y_ref = gru_model.predict(x_train) weights = gru_model.get_weights() gru_v2_model = build_model(rnn.GRU) gru_v2_model.set_weights(weights) y = gru_v2_model.predict(x_train) self.assertAllClose(y, y_ref) @test.disable_with_predicate( pred=test.is_built_with_rocm, skip_message='Skipping as ROCm MIOpen does not support padded input yet.') def test_with_masking_layer_GRU(self): layer_class = rnn.GRU inputs = np.random.random((2, 3, 4)) targets = np.abs(np.random.random((2, 3, 5))) targets /= targets.sum(axis=-1, keepdims=True) model = keras.models.Sequential() model.add(keras.layers.Masking(input_shape=(3, 4))) model.add(layer_class(units=5, return_sequences=True, unroll=False)) model.compile(loss='categorical_crossentropy', optimizer=gradient_descent.GradientDescentOptimizer(0.001)) model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1) @test.disable_with_predicate( pred=test.is_built_with_rocm, skip_message='Skipping as ROCm MIOpen does not support padded input yet.') def test_masking_with_stacking_GRU(self): inputs = np.random.random((2, 3, 4)) targets = np.abs(np.random.random((2, 3, 5))) targets /= targets.sum(axis=-1, keepdims=True) model = keras.models.Sequential() model.add(keras.layers.Masking(input_shape=(3, 4))) model.add(rnn.GRU(10, return_sequences=True, unroll=False)) model.add(rnn.GRU(5, return_sequences=True, unroll=False)) model.compile( loss='categorical_crossentropy', optimizer=gradient_descent.GradientDescentOptimizer(0.01)) model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1) def test_return_sequences_GRU(self): num_samples = 2 timesteps = 3 embedding_dim = 4 units = 2 testing_utils.layer_test( rnn.GRU, kwargs={'units': units, 'return_sequences': True}, input_shape=(num_samples, timesteps, embedding_dim)) @test.disable_with_predicate( pred=test.is_built_with_rocm, skip_message='Double type is not yet supported in ROCm') @testing_utils.run_v2_only def test_float64_GRU(self): num_samples = 2 timesteps = 3 embedding_dim = 4 units = 2 testing_utils.layer_test( rnn.GRU, kwargs={'units': units, 'return_sequences': True, 'dtype': 'float64'}, input_shape=(num_samples, timesteps, embedding_dim), input_dtype='float64') @test.disable_with_predicate( pred=test.is_built_with_rocm, skip_message='Skipping as ROCm MIOpen does not support padded input yet.') def test_return_states_GRU(self): layer_class = rnn.GRU x = np.random.random((2, 3, 4)) y = np.abs(np.random.random((2, 5))) s = np.abs(np.random.random((2, 5))) inputs = keras.layers.Input( shape=[3, 4], dtype=dtypes.float32) masked = keras.layers.Masking()(inputs) outputs, states = layer_class(units=5, return_state=True)(masked) model = keras.models.Model(inputs, [outputs, states]) model.compile(loss='categorical_crossentropy', optimizer=gradient_descent.GradientDescentOptimizer(0.001)) model.fit(x, [y, s], epochs=1, batch_size=2, verbose=1) def test_dropout_GRU(self): num_samples = 2 timesteps = 3 embedding_dim = 4 units = 2 testing_utils.layer_test( rnn.GRU, kwargs={'units': units, 'dropout': 0.1, 'recurrent_dropout': 0.1}, input_shape=(num_samples, timesteps, embedding_dim)) def test_constraints_GRU(self): embedding_dim = 4 layer_class = rnn.GRU k_constraint = keras.constraints.max_norm(0.01) r_constraint = keras.constraints.max_norm(0.01) b_constraint = keras.constraints.max_norm(0.01) layer = layer_class( 5, return_sequences=False, weights=None, input_shape=(None, embedding_dim), kernel_constraint=k_constraint, recurrent_constraint=r_constraint, bias_constraint=b_constraint) layer.build((None, None, embedding_dim)) self.assertEqual(layer.cell.kernel.constraint, k_constraint) self.assertEqual(layer.cell.recurrent_kernel.constraint, r_constraint) self.assertEqual(layer.cell.bias.constraint, b_constraint) @parameterized.parameters([0, 1, 2]) def test_implementation_mode_GRU(self, implementation_mode): num_samples = 2 timesteps = 3 embedding_dim = 4 units = 2 testing_utils.layer_test( rnn.GRU, kwargs={'units': units, 'implementation': implementation_mode}, input_shape=(num_samples, timesteps, embedding_dim)) def test_regularizers_GRU(self): embedding_dim = 4 layer_class = rnn.GRU layer = layer_class( 5, return_sequences=False, weights=None, input_shape=(None, embedding_dim), kernel_regularizer=keras.regularizers.l1(0.01), recurrent_regularizer=keras.regularizers.l1(0.01), bias_regularizer='l2', activity_regularizer='l1') layer.build((None, None, 2)) self.assertEqual(len(layer.losses), 3) x = keras.backend.variable(np.ones((2, 3, 2))) layer(x) if context.executing_eagerly(): self.assertEqual(len(layer.losses), 4) else: self.assertEqual(len(layer.get_losses_for(x)), 1) @test.disable_with_predicate( pred=test.is_built_with_rocm, skip_message='Skipping as ROCm MIOpen does not support padded input yet.') def test_statefulness_GRU(self): num_samples = 2 timesteps = 3 embedding_dim = 4 units = 2 layer_class = rnn.GRU model = keras.models.Sequential() model.add( keras.layers.Embedding( 4, embedding_dim, mask_zero=True, input_length=timesteps, batch_input_shape=(num_samples, timesteps))) layer = layer_class( units, return_sequences=False, stateful=True, weights=None) model.add(layer) model.compile( optimizer=gradient_descent.GradientDescentOptimizer(0.01), loss='mse', run_eagerly=testing_utils.should_run_eagerly()) out1 = model.predict(np.ones((num_samples, timesteps))) self.assertEqual(out1.shape, (num_samples, units)) # train once so that the states change model.train_on_batch( np.ones((num_samples, timesteps)), np.ones((num_samples, units))) out2 = model.predict(np.ones((num_samples, timesteps))) # if the state is not reset, output should be different self.assertNotEqual(out1.max(), out2.max()) # check that output changes after states are reset # (even though the model itself didn't change) layer.reset_states() out3 = model.predict(np.ones((num_samples, timesteps))) self.assertNotEqual(out2.max(), out3.max()) # check that container-level reset_states() works model.reset_states() out4 = model.predict(np.ones((num_samples, timesteps))) np.testing.assert_allclose(out3, out4, atol=1e-5) # check that the call to `predict` updated the states out5 = model.predict(np.ones((num_samples, timesteps))) self.assertNotEqual(out4.max(), out5.max()) # Check masking layer.reset_states() left_padded_input = np.ones((num_samples, timesteps)) left_padded_input[0, :1] = 0 left_padded_input[1, :2] = 0 out6 = model.predict(left_padded_input) layer.reset_states() right_padded_input = np.ones((num_samples, timesteps)) right_padded_input[0, -1:] = 0 right_padded_input[1, -2:] = 0 out7 = model.predict(right_padded_input) layer.reset_states() mix_padded_input = np.ones((num_samples, timesteps)) mix_padded_input[0, 1] = 0 mix_padded_input[1, 0] = 0 mix_padded_input[1, 2] = 0 out8 = model.predict(mix_padded_input) self.assertAllClose(out7, out6, atol=1e-5) self.assertAllClose(out8, out7, atol=1e-5) def test_stateful_GRU_training(self): # See b/123587692 for more context. vocab_size = 20 embedding_dim = 10 batch_size = 8 timestep = 12 units = 5 x = np.random.randint(0, vocab_size, size=(batch_size, timestep)) y = np.random.randint(0, vocab_size, size=(batch_size, timestep)) model = keras.Sequential([ keras.layers.Embedding(vocab_size, embedding_dim, batch_input_shape=[batch_size, timestep]), rnn.GRU(units, return_sequences=True, stateful=True), keras.layers.Dense(vocab_size) ]) model.compile( optimizer='adam', loss='sparse_categorical_crossentropy', run_eagerly=testing_utils.should_run_eagerly()) model.fit(x, y, epochs=1, shuffle=False) @test.disable_with_predicate( pred=test.is_built_with_rocm, skip_message='Skipping as ROCm MIOpen does not support padded input yet.') @testing_utils.run_v2_only def test_explicit_device_with_go_backward_and_mask(self): batch_size = 8 timestep = 7 masksteps = 5 units = 4 inputs = np.random.randn(batch_size, timestep, units).astype(np.float32) mask = np.ones((batch_size, timestep)).astype(np.bool) mask[:, masksteps:] = 0 # Test for V1 behavior. lstm_v1 = rnn_v1.GRU(units, return_sequences=True, go_backwards=True) with testing_utils.device(should_use_gpu=True): outputs_masked_v1 = lstm_v1(inputs, mask=constant_op.constant(mask)) outputs_trimmed_v1 = lstm_v1(inputs[:, :masksteps]) self.assertAllClose(outputs_masked_v1[:, -masksteps:], outputs_trimmed_v1) # Test for V2 behavior. lstm = rnn.GRU(units, return_sequences=True, go_backwards=True) with testing_utils.device(should_use_gpu=True): outputs_masked = lstm(inputs, mask=constant_op.constant(mask)) outputs_trimmed = lstm(inputs[:, :masksteps]) self.assertAllClose(outputs_masked[:, -masksteps:], outputs_trimmed) @tf_test_util.enable_output_all_intermediates def test_v1_session_behavior(self): with ops.get_default_graph().as_default(): # See b/139132348 for more details. x = np.random.uniform(size=(100, 4, 8)) y = np.random.uniform(size=(100, 1)) dataset = dataset_ops.Dataset.from_tensor_slices( (x, y)).shuffle(100).batch(32) inp = keras.layers.Input(shape=(4, 8)) layer = rnn.GRU(1)(inp) layer = keras.layers.Dense(1)(layer) model = keras.models.Model(inp, layer) model.compile(loss='mse', optimizer='sgd') model.fit(dataset) def test_with_fully_masked_inputs(self): num_samples = 8 timestep = 5 embedding_dim = 4 vocab_size = 20 units = 2 inputs = np.random.randint(0, vocab_size, size=(num_samples, timestep)) # Set the first inputs to be fully zero. inputs[0, :] = 0.0 model = keras.models.Sequential() model.add( keras.layers.Embedding( vocab_size, embedding_dim, mask_zero=True, input_length=timestep, batch_input_shape=(num_samples, timestep))) layer = rnn.GRU(units) model.add(layer) model.compile( optimizer=gradient_descent.GradientDescentOptimizer(0.01), loss='mse', run_eagerly=testing_utils.should_run_eagerly()) # Make sure it doesn't crash with cudnn kernel. model.predict(inputs) # TODO (b/169895267): test with xla_gpu is disabled. def test_deepcopy(self): if not context.executing_eagerly(): self.skipTest('v2-only test') original_layer = rnn.GRU(5) copied_layer = copy.deepcopy(original_layer) self.assertEqual(copied_layer.units, 5) self.assertEqual(original_layer.get_config(), original_layer.get_config()) # Copy layer before layer call on inputs without weight initialization. inputs = np.random.normal(size=[32, 10, 8]).astype(np.float32) original_layer = rnn.GRU(4) copied_layer = copy.deepcopy(original_layer) outputs = original_layer(inputs) copied_outputs = copied_layer(inputs) self.assertNotAllClose( self.evaluate(outputs), self.evaluate(copied_outputs)) # Copy layer after layer call on inputs with weight initialization. original_layer = rnn.GRU(4) outputs = original_layer(inputs) copied_layer = copy.deepcopy(original_layer) copied_outputs = copied_layer(inputs) self.assertAllClose(self.evaluate(outputs), self.evaluate(copied_outputs)) @testing_utils.run_all_without_tensor_float_32('RNN GRU can use TF32 on GPU') class GRULayerGradientTapeTest(keras_parameterized.TestCase): @combinations.generate(combinations.combine(mode=['eager'])) def test_in_tape(self): with self.test_session(config=_config): time_steps = 10 embedding_size = 11 gru_unit_size = 12 gru = rnn.GRU(gru_unit_size, return_sequences=True, return_state=True, recurrent_activation='sigmoid', recurrent_initializer='glorot_uniform') x = random_ops.random_uniform([1, time_steps, embedding_size]) y = random_ops.random_uniform([1, gru_unit_size]) with backprop.GradientTape() as tape: hidden_state = array_ops.zeros([1, gru_unit_size], dtype=dtypes.float32) _, state = gru(x, initial_state=hidden_state) loss = math_ops.reduce_mean(math_ops.square(state - y)) tape.gradient(loss, gru.variables) @testing_utils.run_all_without_tensor_float_32('RNN GRU can use TF32 on GPU') @keras_parameterized.run_all_keras_modes(config=_config) class GRUGraphRewriteTest(keras_parameterized.TestCase): input_shape = 10 output_shape = 8 rnn_state_size = 8 timestep = 4 batch = 100 epoch = 1 def _test_runtime_with_model(self, model): (x_train, y_train), _ = testing_utils.get_test_data( train_samples=self.batch, test_samples=0, input_shape=(self.timestep, self.input_shape), num_classes=self.output_shape) y_train = np_utils.to_categorical(y_train, self.output_shape) model.compile( optimizer='sgd', loss=['categorical_crossentropy', None]) existing_loss = 0 for _ in range(self.epoch): history = model.fit(x_train, y_train) loss_value = history.history['loss'][0] self.assertNotEqual(existing_loss, loss_value) existing_loss = loss_value _, runtime_value = model.predict(x_train) if test.is_gpu_available(): self.assertEqual(runtime_value[0], rnn._RUNTIME_GPU) else: self.assertEqual(runtime_value[0], rnn._RUNTIME_CPU) @testing_utils.run_v2_only def test_GRU_runtime(self): layer = rnn.GRU(self.rnn_state_size, return_runtime=True) inputs = keras.layers.Input( shape=[self.timestep, self.input_shape], dtype=dtypes.float32) outputs, runtime = layer(inputs) # Expand the runtime so that it is a 1D tensor instead of scalar. # TF model does not work with scalar model output, specially during # aggregation. runtime = keras.layers.Lambda( lambda x: array_ops.expand_dims(x, axis=-1))(runtime) model = keras.models.Model(inputs=inputs, outputs=[outputs, runtime]) self._test_runtime_with_model(model) @test.disable_with_predicate( pred=test.is_built_with_rocm, skip_message='Skipping as ROCm MIOpen does not support padded input yet.') @testing_utils.run_v2_only def test_GRU_runtime_with_mask(self): # Masking will affect which backend is selected based on whether the mask # is strictly right padded. layer = rnn.GRU(self.rnn_state_size, return_runtime=True) inputs = keras.layers.Input( shape=[self.timestep, self.input_shape], dtype=dtypes.float32) masked_inputs = keras.layers.Masking()(inputs) outputs, runtime = layer(masked_inputs) # Expand the runtime so that it is a 1D tensor instead of scalar. # TF model does not work with scalar model output, specially during # aggregation. runtime = keras.layers.Lambda( lambda x: array_ops.expand_dims(x, axis=-1))(runtime) model = keras.models.Model(inputs=inputs, outputs=[outputs, runtime]) (x_train, y_train), _ = testing_utils.get_test_data( train_samples=self.batch, test_samples=0, input_shape=(self.timestep, self.input_shape), num_classes=self.output_shape) y_train = np_utils.to_categorical(y_train, self.output_shape) model.compile( optimizer='sgd', loss=['categorical_crossentropy', None], run_eagerly=testing_utils.should_run_eagerly()) model.fit(x_train, y_train) # Verify unpadded data. _, runtime_value = model.predict(x_train) if test.is_gpu_available(): self.assertEqual(runtime_value[0], rnn._RUNTIME_GPU) else: self.assertEqual(runtime_value[0], rnn._RUNTIME_CPU) # Update x/y to be right padded by setting the last timestep to 0 x_train[:, -1, :] = 0 y_train[:, -1] = 0 _, runtime_value = model.predict(x_train) if test.is_gpu_available(): self.assertEqual(runtime_value[0], rnn._RUNTIME_GPU) else: self.assertEqual(runtime_value[0], rnn._RUNTIME_CPU) # Further update x/y to be mix padded (masks in the middle), and verify # only cpu kernel can be selected. x_train[:, -3, :] = 0 y_train[:, -3] = 0 _, runtime_value = model.predict(x_train) self.assertEqual(runtime_value[0], rnn._RUNTIME_CPU) @testing_utils.run_v2_only def test_GRU_runtime_with_cond(self): # This test is to demonstrate the graph rewrite of grappler plugin under # the condition that the function returns different number of internal # states. layer = rnn.GRU(self.rnn_state_size, return_runtime=True) inputs = keras.layers.Input( shape=[self.timestep, self.input_shape], dtype=dtypes.float32) zeros = array_ops.zeros([self.batch, self.output_shape]) dummy_runtime = rnn._runtime(rnn._RUNTIME_UNKNOWN) a = constant_op.constant(0) b = constant_op.constant(1) # Will always run the GRU layer. outputs, runtime = control_flow_ops.cond( gen_math_ops.less(a, b), lambda: layer(inputs), lambda: (zeros, dummy_runtime)) # Expand the runtime so that it is a 1D tensor instead of scalar. # TF model does not work with scalar model output, specially during # aggregation. runtime = keras.layers.Lambda( lambda x: array_ops.expand_dims(x, axis=-1))(runtime) model = keras.models.Model(inputs=inputs, outputs=[outputs, runtime]) self._test_runtime_with_model(model) if __name__ == '__main__': test.main()
{ "content_hash": "b57ddf3ab753945fe8e31cef38d82cb7", "timestamp": "", "source": "github", "line_count": 820, "max_line_length": 80, "avg_line_length": 36.201219512195124, "alnum_prop": 0.6555162540003369, "repo_name": "petewarden/tensorflow", "id": "3b6e8459507a0cccc34c118bb67859a1744d2e59", "size": "30374", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tensorflow/python/keras/layers/gru_v2_test.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Assembly", "bytes": "31796" }, { "name": "Batchfile", "bytes": "55269" }, { "name": "C", "bytes": "895451" }, { "name": "C#", "bytes": "8562" }, { "name": "C++", "bytes": "82100676" }, { "name": "CMake", "bytes": "6500" }, { "name": "Dockerfile", "bytes": "112853" }, { "name": "Go", "bytes": "1867248" }, { "name": "HTML", "bytes": "4686483" }, { "name": "Java", "bytes": "984477" }, { "name": "Jupyter Notebook", "bytes": "550862" }, { "name": "LLVM", "bytes": "6536" }, { "name": "MLIR", "bytes": "1982867" }, { "name": "Makefile", "bytes": "66496" }, { "name": "Objective-C", "bytes": "116558" }, { "name": "Objective-C++", "bytes": "317461" }, { "name": "PHP", "bytes": "4236" }, { "name": "Pascal", "bytes": "318" }, { "name": "Pawn", "bytes": "20422" }, { "name": "Perl", "bytes": "7536" }, { "name": "Python", "bytes": "37425809" }, { "name": "RobotFramework", "bytes": "1779" }, { "name": "Roff", "bytes": "2705" }, { "name": "Ruby", "bytes": "7464" }, { "name": "SWIG", "bytes": "8992" }, { "name": "Shell", "bytes": "700106" }, { "name": "Smarty", "bytes": "35725" }, { "name": "Starlark", "bytes": "3613406" }, { "name": "Swift", "bytes": "62814" }, { "name": "Vim Snippet", "bytes": "58" } ], "symlink_target": "" }
""" Created on Thu Apr 27 12:16:26 2017 @author: Anand A Joshi, Divya Varadarajan """ import sys from os import system from os.path import isfile, split from multiprocessing import Pool from contextlib import closing import configparser import glob #config_file = '/big_disk/ajoshi/coding_ground/\ #brainsuite-workflows/data/sample_data/thickness.cfg' config_file = sys.argv[1] Config = configparser.ConfigParser() Config.read(config_file) Config.sections() STUDY_DIR = Config.get('CSESVREG', 'STUDY_DIR') NPROC = int(Config.get('CSESVREG', 'NPROC')) BST_INSTALL = Config.get('CSESVREG', 'BST_INSTALL') SVREG_ATLAS = Config.get('CSESVREG', 'SVREG_ATLAS') THICKNESSPVC_EXE = Config.get('THICKNESS', 'THICKNESSPVC_EXE') GENERATE_STATS_EXE = Config.get('THICKNESS', 'GENERATE_STATS_EXE') SMOOTHNESS = Config.get('THICKNESS', 'SMOOTHNESS') SMOOTHNESS_EXE = Config.get('THICKNESS', 'SMOOTHNESS_EXE') sublist = lst = glob.glob(STUDY_DIR+'/*') ind = 0 cmdln1 = [] cmdln2 = [] cmdln3 = [] cmdln4 = [] for sub in sublist: img = sub + '/anat/t1.nii.gz' if not isfile(img): continue # Check if the workflow has already been run subpath, filename = split(img) fname = subpath + '/atlas.pvc-thickness_0-6mm.right.\ smooth_' + SMOOTHNESS + 'mm.dfs' if isfile(fname): continue surfname = subpath + '/atlas.left.mid.cortex.svreg.dfs' if not isfile(surfname): cmdln1.append('qsub -q long.q -l h_vmem=23G -cwd ' + THICKNESSPVC_EXE + ' ' + img[:-7]) cmdln2.append('qsub -q long.q -l h_vmem=23G -cwd ' + GENERATE_STATS_EXE + ' ' + img[:-7]) outsurfname = subpath + '/atlas.pvc-thickness_0-6mm.left.\ smooth_' + SMOOTHNESS + 'mm.dfs' if not isfile(outsurfname): cmdln3.append('qsub -q long.q -l h_vmem=23G -cwd ' + SMOOTHNESS_EXE + ' ' + surfname + ' ' + surfname + ' \ ' + outsurfname) surfname = subpath + '/atlas.right.mid.cortex.svreg.dfs' outsurfname = subpath + '/atlas.pvc-thickness_0-6mm.right.\ smooth_' + SMOOTHNESS + 'mm.dfs' if not isfile(outsurfname): cmdln4.append('qsub -q long.q -l h_vmem=23G -cwd ' + SMOOTHNESS_EXE + ' ' + surfname + ' ' + surfname + ' \ ' + outsurfname) print(cmdln1) ind += 1 with closing(Pool(NPROC)) as p: p.map(system, cmdln1) p.terminate() with closing(Pool(NPROC)) as p: p.map(system, cmdln2) p.terminate() with closing(Pool(NPROC)) as p: p.map(system, cmdln3) p.terminate() with closing(Pool(NPROC)) as p: p.map(system, cmdln4) p.terminate() print("Thickness Computations Done")
{ "content_hash": "7cef839480cecb22a14ab863ee866886", "timestamp": "", "source": "github", "line_count": 94, "max_line_length": 79, "avg_line_length": 28.03191489361702, "alnum_prop": 0.6428842504743834, "repo_name": "ajoshiusc/brainsuite-workflows", "id": "9e7aafab8bf542ddd2ea7122f4ffc33a82570041", "size": "2682", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "process_thickness_qsub.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "17181" } ], "symlink_target": "" }
from unittest import TestCase from src.couchdb import CouchDB class TestDatabaseVersion(TestCase): def test_simple_parse(self): ver = CouchDB.DatabaseVersion("1.2.3") self.assertTrue(ver.valid) self.assertEqual("1.2.3", ver.version) self.assertEqual(1, ver.major) self.assertEqual(2, ver.minor) self.assertEqual(3, ver.build) def test_invalid1(self): ver = CouchDB.DatabaseVersion("a.b.c") self.assertFalse(ver.valid) self.assertIsNone(ver.version) def test_invalid2(self): ver = CouchDB.DatabaseVersion("1.b.c") self.assertFalse(ver.valid) self.assertIsNone(ver.version) def test_invalid3(self): ver = CouchDB.DatabaseVersion("1.2.3.4") self.assertFalse(ver.valid) self.assertIsNone(ver.version) def test_invalid4(self): ver = CouchDB.DatabaseVersion("") self.assertFalse(ver.valid) self.assertIsNone(ver.version) def test_invalid5(self): ver = CouchDB.DatabaseVersion(1.1) self.assertFalse(ver.valid) self.assertIsNone(ver.version) def test_None(self): ver = CouchDB.DatabaseVersion(None) self.assertFalse(ver.valid) self.assertIsNone(ver.version)
{ "content_hash": "2630a009f2c2358cc59272122dde002f", "timestamp": "", "source": "github", "line_count": 43, "max_line_length": 48, "avg_line_length": 29.72093023255814, "alnum_prop": 0.6471048513302035, "repo_name": "RipcordSoftware/avancedb-replication-monitor", "id": "7f1fcd5b941c9a004f6af93deba4116011d21d56", "size": "1278", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "tests/test_databaseVersion.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "88" }, { "name": "Python", "bytes": "97413" } ], "symlink_target": "" }
from django.contrib import admin # Register your models here. from django.contrib import admin from .models import Link, Vote class LinkAdmin(admin.ModelAdmin): pass admin.site.register(Link, LinkAdmin) class VoteAdmin(admin.ModelAdmin): pass admin.site.register(Vote, VoteAdmin)
{ "content_hash": "e826f01e29a5b54a3f3756fb3c7739f9", "timestamp": "", "source": "github", "line_count": 12, "max_line_length": 39, "avg_line_length": 23.666666666666668, "alnum_prop": 0.8028169014084507, "repo_name": "project-musashi/GTR", "id": "24f46be57712b018e90e190b4ea62741d218c7f9", "size": "284", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "GTR/forum/admin.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "7507" }, { "name": "HTML", "bytes": "34153" }, { "name": "JavaScript", "bytes": "3545" }, { "name": "Python", "bytes": "44039" }, { "name": "Shell", "bytes": "3620" } ], "symlink_target": "" }
""" The Plaid API The Plaid REST API. Please see https://plaid.com/docs/api for more details. # noqa: E501 Generated by: https://openapi-generator.tech """ import re # noqa: F401 import sys # noqa: F401 from plaid.model_utils import ( # noqa: F401 ApiTypeError, ModelComposed, ModelNormal, ModelSimple, cached_property, change_keys_js_to_python, convert_js_args_to_python_args, date, datetime, file_type, none_type, validate_get_composed_info, ) class TransferUserAddressInRequest(ModelNormal): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. Attributes: allowed_values (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict with a capitalized key describing the allowed value and an allowed value. These dicts store the allowed enum values. attribute_map (dict): The key is attribute name and the value is json key in definition. discriminator_value_class_map (dict): A dict to go from the discriminator variable value to the discriminator class name. validations (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict that stores validations for max_length, min_length, max_items, min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, inclusive_minimum, and regex. additional_properties_type (tuple): A tuple of classes accepted as additional properties values. """ allowed_values = { } validations = { } @cached_property def additional_properties_type(): """ This must be a method because a model may have properties that are of type self, this must run after the class is loaded """ return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501 _nullable = False @cached_property def openapi_types(): """ This must be a method because a model may have properties that are of type self, this must run after the class is loaded Returns openapi_types (dict): The key is attribute name and the value is attribute type. """ return { 'street': (str,), # noqa: E501 'city': (str,), # noqa: E501 'region': (str,), # noqa: E501 'postal_code': (str,), # noqa: E501 'country': (str,), # noqa: E501 } @cached_property def discriminator(): return None attribute_map = { 'street': 'street', # noqa: E501 'city': 'city', # noqa: E501 'region': 'region', # noqa: E501 'postal_code': 'postal_code', # noqa: E501 'country': 'country', # noqa: E501 } _composed_schemas = {} required_properties = set([ '_data_store', '_check_type', '_spec_property_naming', '_path_to_item', '_configuration', '_visited_composed_classes', ]) @convert_js_args_to_python_args def __init__(self, *args, **kwargs): # noqa: E501 """TransferUserAddressInRequest - a model defined in OpenAPI Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) street (str): The street number and name (i.e., \"100 Market St.\").. [optional] # noqa: E501 city (str): Ex. \"San Francisco\". [optional] # noqa: E501 region (str): The state or province (e.g., \"CA\").. [optional] # noqa: E501 postal_code (str): The postal code (e.g., \"94103\").. [optional] # noqa: E501 country (str): A two-letter country code (e.g., \"US\").. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) for var_name, var_value in kwargs.items(): if var_name not in self.attribute_map and \ self._configuration is not None and \ self._configuration.discard_unknown_keys and \ self.additional_properties_type is None: # discard variable. continue setattr(self, var_name, var_value)
{ "content_hash": "8d9465bc8560928aa9ab38045bb51a5a", "timestamp": "", "source": "github", "line_count": 182, "max_line_length": 110, "avg_line_length": 41.08791208791209, "alnum_prop": 0.5509494517250602, "repo_name": "plaid/plaid-python", "id": "b2d8396b3cacfa6137a6a46fec3bfd4bf6094aae", "size": "7478", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "plaid/model/transfer_user_address_in_request.py", "mode": "33188", "license": "mit", "language": [ { "name": "Dockerfile", "bytes": "323" }, { "name": "Makefile", "bytes": "622" }, { "name": "Mustache", "bytes": "125163" }, { "name": "Python", "bytes": "9342874" } ], "symlink_target": "" }
import specs.fmt as fmt import specs.folly as folly import specs.gmock as gmock import specs.sodium as sodium import specs.zstd as zstd def fbcode_builder_spec(builder): builder.add_option( "fizz/fizz/build:cmake_defines", { # Fizz's build is kind of broken, in the sense that both `mvfst` # and `proxygen` depend on files that are only installed with # `BUILD_TESTS` enabled, e.g. `fizz/crypto/test/TestUtil.h`. "BUILD_TESTS": "ON" }, ) return { "depends_on": [gmock, fmt, folly, sodium, zstd], "steps": [ builder.fb_github_cmake_install( "fizz/fizz/build", github_org="facebookincubator" ) ], }
{ "content_hash": "8632fa40dfc8bdbab5d51d29ccae26c9", "timestamp": "", "source": "github", "line_count": 25, "max_line_length": 76, "avg_line_length": 30.04, "alnum_prop": 0.5818908122503329, "repo_name": "facebook/bistro", "id": "7bde4e6efa03026fdaa7b0f457bed340b6f9f531", "size": "825", "binary": false, "copies": "2", "ref": "refs/heads/main", "path": "build/fbcode_builder/specs/fizz.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "4599" }, { "name": "C++", "bytes": "1140639" }, { "name": "CMake", "bytes": "98471" }, { "name": "CSS", "bytes": "14704" }, { "name": "Dockerfile", "bytes": "1445" }, { "name": "JavaScript", "bytes": "7692" }, { "name": "PHP", "bytes": "235461" }, { "name": "Python", "bytes": "375063" }, { "name": "Shell", "bytes": "5618" }, { "name": "Thrift", "bytes": "46997" } ], "symlink_target": "" }
"""Support for Minut Point.""" import asyncio import logging from pypoint import PointSession import voluptuous as vol from homeassistant import config_entries from homeassistant.config_entries import ConfigEntry from homeassistant.const import ( CONF_CLIENT_ID, CONF_CLIENT_SECRET, CONF_TOKEN, CONF_WEBHOOK_ID, ) from homeassistant.helpers import config_validation as cv from homeassistant.helpers.dispatcher import ( async_dispatcher_connect, async_dispatcher_send, ) from homeassistant.helpers.entity import Entity from homeassistant.helpers.event import async_track_time_interval from homeassistant.helpers.typing import HomeAssistantType from homeassistant.util.dt import as_local, parse_datetime, utc_from_timestamp from . import config_flow from .const import ( CONF_WEBHOOK_URL, DOMAIN, EVENT_RECEIVED, POINT_DISCOVERY_NEW, SCAN_INTERVAL, SIGNAL_UPDATE_ENTITY, SIGNAL_WEBHOOK, ) _LOGGER = logging.getLogger(__name__) DATA_CONFIG_ENTRY_LOCK = "point_config_entry_lock" CONFIG_ENTRY_IS_SETUP = "point_config_entry_is_setup" PLATFORMS = ["binary_sensor", "sensor"] CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.Schema( { vol.Required(CONF_CLIENT_ID): cv.string, vol.Required(CONF_CLIENT_SECRET): cv.string, } ) }, extra=vol.ALLOW_EXTRA, ) async def async_setup(hass, config): """Set up the Minut Point component.""" if DOMAIN not in config: return True conf = config[DOMAIN] config_flow.register_flow_implementation( hass, DOMAIN, conf[CONF_CLIENT_ID], conf[CONF_CLIENT_SECRET] ) hass.async_create_task( hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_IMPORT} ) ) return True async def async_setup_entry(hass: HomeAssistantType, entry: ConfigEntry): """Set up Point from a config entry.""" async def token_saver(token, **kwargs): _LOGGER.debug("Saving updated token %s", token) hass.config_entries.async_update_entry( entry, data={**entry.data, CONF_TOKEN: token} ) session = PointSession( hass.helpers.aiohttp_client.async_get_clientsession(), entry.data["refresh_args"][CONF_CLIENT_ID], entry.data["refresh_args"][CONF_CLIENT_SECRET], token=entry.data[CONF_TOKEN], token_saver=token_saver, ) try: await session.ensure_active_token() except Exception: # pylint: disable=broad-except _LOGGER.error("Authentication Error") return False hass.data[DATA_CONFIG_ENTRY_LOCK] = asyncio.Lock() hass.data[CONFIG_ENTRY_IS_SETUP] = set() await async_setup_webhook(hass, entry, session) client = MinutPointClient(hass, entry, session) hass.data.setdefault(DOMAIN, {}).update({entry.entry_id: client}) hass.async_create_task(client.update()) return True async def async_setup_webhook(hass: HomeAssistantType, entry: ConfigEntry, session): """Set up a webhook to handle binary sensor events.""" if CONF_WEBHOOK_ID not in entry.data: webhook_id = hass.components.webhook.async_generate_id() webhook_url = hass.components.webhook.async_generate_url(webhook_id) _LOGGER.info("Registering new webhook at: %s", webhook_url) hass.config_entries.async_update_entry( entry, data={ **entry.data, CONF_WEBHOOK_ID: webhook_id, CONF_WEBHOOK_URL: webhook_url, }, ) await session.update_webhook( entry.data[CONF_WEBHOOK_URL], entry.data[CONF_WEBHOOK_ID], ["*"], ) hass.components.webhook.async_register( DOMAIN, "Point", entry.data[CONF_WEBHOOK_ID], handle_webhook ) async def async_unload_entry(hass: HomeAssistantType, entry: ConfigEntry): """Unload a config entry.""" hass.components.webhook.async_unregister(entry.data[CONF_WEBHOOK_ID]) session = hass.data[DOMAIN].pop(entry.entry_id) await session.remove_webhook() for platform in PLATFORMS: await hass.config_entries.async_forward_entry_unload(entry, platform) if not hass.data[DOMAIN]: hass.data.pop(DOMAIN) return True async def handle_webhook(hass, webhook_id, request): """Handle webhook callback.""" try: data = await request.json() _LOGGER.debug("Webhook %s: %s", webhook_id, data) except ValueError: return None if isinstance(data, dict): data["webhook_id"] = webhook_id async_dispatcher_send(hass, SIGNAL_WEBHOOK, data, data.get("hook_id")) hass.bus.async_fire(EVENT_RECEIVED, data) class MinutPointClient: """Get the latest data and update the states.""" def __init__(self, hass: HomeAssistantType, config_entry: ConfigEntry, session): """Initialize the Minut data object.""" self._known_devices = set() self._known_homes = set() self._hass = hass self._config_entry = config_entry self._is_available = True self._client = session async_track_time_interval(self._hass, self.update, SCAN_INTERVAL) async def update(self, *args): """Periodically poll the cloud for current state.""" await self._sync() async def _sync(self): """Update local list of devices.""" if not await self._client.update() and self._is_available: self._is_available = False _LOGGER.warning("Device is unavailable") async_dispatcher_send(self._hass, SIGNAL_UPDATE_ENTITY) return async def new_device(device_id, platform): """Load new device.""" config_entries_key = f"{platform}.{DOMAIN}" async with self._hass.data[DATA_CONFIG_ENTRY_LOCK]: if config_entries_key not in self._hass.data[CONFIG_ENTRY_IS_SETUP]: await self._hass.config_entries.async_forward_entry_setup( self._config_entry, platform ) self._hass.data[CONFIG_ENTRY_IS_SETUP].add(config_entries_key) async_dispatcher_send( self._hass, POINT_DISCOVERY_NEW.format(platform, DOMAIN), device_id ) self._is_available = True for home_id in self._client.homes: if home_id not in self._known_homes: await new_device(home_id, "alarm_control_panel") self._known_homes.add(home_id) for device in self._client.devices: if device.device_id not in self._known_devices: for platform in PLATFORMS: await new_device(device.device_id, platform) self._known_devices.add(device.device_id) async_dispatcher_send(self._hass, SIGNAL_UPDATE_ENTITY) def device(self, device_id): """Return device representation.""" return self._client.device(device_id) def is_available(self, device_id): """Return device availability.""" if not self._is_available: return False return device_id in self._client.device_ids async def remove_webhook(self): """Remove the session webhook.""" return await self._client.remove_webhook() @property def homes(self): """Return known homes.""" return self._client.homes async def async_alarm_disarm(self, home_id): """Send alarm disarm command.""" return await self._client.alarm_disarm(home_id) async def async_alarm_arm(self, home_id): """Send alarm arm command.""" return await self._client.alarm_arm(home_id) class MinutPointEntity(Entity): """Base Entity used by the sensors.""" def __init__(self, point_client, device_id, device_class): """Initialize the entity.""" self._async_unsub_dispatcher_connect = None self._client = point_client self._id = device_id self._name = self.device.name self._device_class = device_class self._updated = utc_from_timestamp(0) self._value = None def __str__(self): """Return string representation of device.""" return f"MinutPoint {self.name}" async def async_added_to_hass(self): """Call when entity is added to hass.""" _LOGGER.debug("Created device %s", self) self._async_unsub_dispatcher_connect = async_dispatcher_connect( self.hass, SIGNAL_UPDATE_ENTITY, self._update_callback ) await self._update_callback() async def async_will_remove_from_hass(self): """Disconnect dispatcher listener when removed.""" if self._async_unsub_dispatcher_connect: self._async_unsub_dispatcher_connect() async def _update_callback(self): """Update the value of the sensor.""" @property def available(self): """Return true if device is not offline.""" return self._client.is_available(self.device_id) @property def device(self): """Return the representation of the device.""" return self._client.device(self.device_id) @property def device_class(self): """Return the device class.""" return self._device_class @property def device_id(self): """Return the id of the device.""" return self._id @property def extra_state_attributes(self): """Return status of device.""" attrs = self.device.device_status attrs["last_heard_from"] = as_local(self.last_update).strftime( "%Y-%m-%d %H:%M:%S" ) return attrs @property def device_info(self): """Return a device description for device registry.""" device = self.device.device return { "connections": {("mac", device["device_mac"])}, "identifieres": device["device_id"], "manufacturer": "Minut", "model": f"Point v{device['hardware_version']}", "name": device["description"], "sw_version": device["firmware"]["installed"], "via_device": (DOMAIN, device["home"]), } @property def name(self): """Return the display name of this device.""" return f"{self._name} {self.device_class.capitalize()}" @property def is_updated(self): """Return true if sensor have been updated.""" return self.last_update > self._updated @property def last_update(self): """Return the last_update time for the device.""" last_update = parse_datetime(self.device.last_update) return last_update @property def should_poll(self): """No polling needed for point.""" return False @property def unique_id(self): """Return the unique id of the sensor.""" return f"point.{self._id}-{self.device_class}" @property def value(self): """Return the sensor value.""" return self._value
{ "content_hash": "23aaddbf99a5916c6cc36feb13e83f13", "timestamp": "", "source": "github", "line_count": 350, "max_line_length": 84, "avg_line_length": 31.674285714285713, "alnum_prop": 0.6189788922965903, "repo_name": "adrienbrault/home-assistant", "id": "e5c209004de14a86afb6fa8eb3a092ef09a6e91a", "size": "11086", "binary": false, "copies": "1", "ref": "refs/heads/dev", "path": "homeassistant/components/point/__init__.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "1795" }, { "name": "Python", "bytes": "32021043" }, { "name": "Shell", "bytes": "4900" } ], "symlink_target": "" }
import abc import numpy as np import tensorflow as tf import tensorflow.keras.regularizers as regularizers from rudders.relations import Relations from rudders.math.euclid import apply_rotation, apply_reflection class CFModel(tf.keras.Model, abc.ABC): """Abstract collaborative filtering embedding model class. Module to define basic operations in CF embedding models. This implementation is based on Knowledge Graph embeddings models, in order to model different types of relations between entities (users and items) """ def __init__(self, n_entities, n_relations, item_ids, args): super().__init__() self.dims = args.dims self.item_ids = np.reshape(np.array(item_ids), (-1, 1)) self.initializer = getattr(tf.keras.initializers, args.initializer) self.entity_regularizer = getattr(regularizers, args.regularizer)(args.entity_reg) self.relation_regularizer = getattr(regularizers, args.regularizer)(args.relation_reg) self.entities = tf.keras.layers.Embedding( input_dim=n_entities, output_dim=self.dims, embeddings_initializer=self.initializer, embeddings_regularizer=self.entity_regularizer, name='entity_embeddings') self.relations = tf.keras.layers.Embedding( input_dim=n_relations, output_dim=self.dims, embeddings_initializer=self.initializer, embeddings_regularizer=self.relation_regularizer, name='relation_embeddings') self.bias_head = tf.keras.layers.Embedding( input_dim=n_entities, output_dim=1, embeddings_initializer='zeros', name='head_biases', trainable=args.train_bias) self.bias_tail = tf.keras.layers.Embedding( input_dim=n_entities, output_dim=1, embeddings_initializer='zeros', name='tail_biases', trainable=args.train_bias) self.dropout = tf.keras.layers.Dropout(args.dropout) self.training = True def build(self, input_shape): super().build(input_shape) self.dropout.build(input_shape) @abc.abstractmethod def get_lhs(self, input_tensor): """ Get left hand side embeddings, usually using head and relationship. :param input_tensor: Tensor of size batch_size x 3 containing (h, r, t) indices. :return: Tensor of size batch_size x embedding_dimension representing left hand side embeddings. """ pass @abc.abstractmethod def get_rhs(self, input_tensor): """ Get left hand side embeddings, usually using tail and relationship. :param input_tensor: Tensor of size batch_size x 3 containing (h, r, t) indices. :return: Tensor of size batch_size x embedding_dimension representing left hand side embeddings. """ pass def get_all_items(self, input_tensor): """Identical to get_rhs but using all items :param input_tensor: Tensor of size batch_size x 3 containing (h, r, t) indices. :return: Tensor of size n_items x embedding_dimension representing embeddings for all items in the CF """ input_tensor = np.repeat(self.item_ids, 3, axis=-1) input_tensor[:, 1] = Relations.USER_ITEM.value input_tensor = tf.convert_to_tensor(input_tensor) return self.get_rhs(input_tensor) @abc.abstractmethod def similarity_score(self, lhs, rhs, all_items): """ Computes a similarity score between left_hand_side and right_hand_side embeddings. eval_mode: :param lhs: Tensor of size B1 x embedding_dimension containing left_hand_side embeddings. :param rhs: Tensor of size B2 x embedding_dimension containing right_hand_side embeddings. :param all_items: boolean to indicate whether to compute all pairs of scores or not. If False, B1 must be equal to B2. :return: Tensor representing similarity scores. If all_items is False, this tensor has size B1 x 1, otherwise it has size B1 x B2. """ pass def call(self, input_tensor, all_items=False): """ Forward pass of Collaborative embedding models. :param input_tensor: Tensor of size batch_size x 3 containing triples' indices: (head, relation, tail) :param all_items: boolean to indicate whether to compute scores against all items, or only individual triples' scores. :return: Tensor containing triple scores. If eval_mode is False, this tensor has size batch_size x 1, otherwise it has size batch_size x n_items """ lhs = self.get_lhs(input_tensor) lhs_biases = self.bias_head(input_tensor[:, 0]) if all_items: rhs = self.get_all_items(input_tensor) rhs_biases = self.bias_tail(np.reshape(self.item_ids, (-1,))) else: rhs = self.get_rhs(input_tensor) rhs_biases = self.bias_tail(input_tensor[:, -1]) predictions = self.score(lhs, lhs_biases, rhs, rhs_biases, all_items) return predictions def score(self, lhs, lhs_biases, rhs, rhs_biases, all_items): """ Compute triple scores using embeddings and biases. :param lhs: B1 x embedding_dim :param lhs_biases: B1 x 1 :param rhs: B2 x embedding_dim :param rhs_biases: B2 x 1 :param all_items: boolean to indicate if should compute vs all rhs or just pairs of scores. If False then B1 must be equal to B2 :return: scores: B1 x 1 if all_items is False, else B1 x B2 """ score = self.similarity_score(lhs, rhs, all_items) if all_items: return score + lhs_biases + tf.transpose(rhs_biases) return score + lhs_biases + rhs_biases def random_eval(self, split_data, excluded_items, samples, batch_size=500, num_rand=100, seed=1234): """ Compute ranking-based evaluation metrics in both full and random settings. :param split_data: Dataset with tensor of size n_examples x 3 containing pairs' indices. :param excluded_items: List of item ids to be excluded from the evaluation :param samples: Dict representing items to skip per user for evaluation in the filtered setting. :param batch_size: batch size to use to compute scores. :param num_rand: number of negative samples to draw. :param seed: seed for random sampling. :return: ranks: Numpy array of shape (n_examples, ) containing the rank of each example in full setting (ranking against the full item corpus). ranks_random: Numpy array of shape (n_examples, ) containing the rank of each example in random setting (ranking against randomly selected num_rand items). """ total_examples = tf.data.experimental.cardinality(split_data).numpy() batch_size = min(batch_size, total_examples) ranks = np.ones(total_examples) ranks_random = np.ones(total_examples) for counter, input_tensor in enumerate(split_data.batch(batch_size)): targets = self.call(input_tensor).numpy() scores = self.call(input_tensor, all_items=True).numpy() # scores[:, excluded_items] = -1e6 scores_random = np.ones(shape=(scores.shape[0], num_rand)) for i, query in enumerate(input_tensor): query = query.numpy() filter_out = samples[query[0]] scores[i, filter_out] = -1e6 # sets that value on scores of train items comp_filter_out = list(set(range(scores.shape[1])) - set(filter_out)) np.random.seed(seed) random_indices = np.random.choice(comp_filter_out, num_rand, replace=False) scores_random[i, :] = scores[i, random_indices] # copies the indices chosen for evaluation ini = counter * batch_size end = (counter + 1) * batch_size ranks[ini:end] += np.sum((scores >= targets), axis=1) ranks_random[ini:end] += np.sum((scores_random >= targets), axis=1) return ranks, ranks_random class MuRBase(CFModel, abc.ABC): """ Multi relational graph embeddings model based on: "Multi-relational Poincaré Graph Embeddings" Balazevic et al. 2019. """ def __init__(self, n_entities, n_relations, item_ids, args): super().__init__(n_entities, n_relations, item_ids, args) self.transforms = tf.keras.layers.Embedding( input_dim=n_relations, output_dim=self.dims, embeddings_initializer=self.initializer, embeddings_regularizer=self.relation_regularizer, name='transform_weights') class RotRefBase(CFModel, abc.ABC): """ Attention model that combines reflections and rotations from: "Low-Dimensional Hyperbolic Knowledge Graph Embeddings" Chami et al. 2020. """ def __init__(self, n_entities, n_relations, item_ids, args): super().__init__(n_entities, n_relations, item_ids, args) self.reflections = tf.keras.layers.Embedding( input_dim=n_relations, output_dim=self.dims, embeddings_initializer=self.initializer, embeddings_regularizer=self.relation_regularizer, name='reflection_weights') self.rotations = tf.keras.layers.Embedding( input_dim=n_relations, output_dim=self.dims, embeddings_initializer=self.initializer, embeddings_regularizer=self.relation_regularizer, name='rotation_weights') self.attention_lhs = tf.keras.layers.Embedding( input_dim=n_relations, output_dim=self.dims, embeddings_initializer=self.initializer, embeddings_regularizer=self.relation_regularizer, name='attention_lhs') self.scale = tf.keras.backend.ones(1) / np.sqrt(self.dims) def reflect_entities(self, entity, ref): """ :param entity: bs x dims: entity embeddings :param ref: bs x dims: reflection weights :return: reflected_entity_embeddings: bs x 1 x dims """ queries = apply_reflection(ref, entity) return tf.reshape(queries, (-1, 1, self.dims)) def rotate_entities(self, entity, rot): """ :param entity: bs x dims: entity embeddings :param rot: bs x dims: rotation weights :return: rotated_entity_embeddings: bs x 1 x dims """ queries = apply_rotation(rot, entity) return tf.reshape(queries, (-1, 1, self.dims)) def attn_mechanism(self, queries, attn_vecs): """ Applies self-attention mechanism over query vectors :param queries: b x n x dims: tensor with n queries to combine with attention :param attn_vecs: b x dims: attn vector to calculate attn weights based on dot product between each of the n candidates and the attn vector batch-wise. :return: b x dims: weighted average of n candidates as a single vector representation """ attn_vecs = tf.reshape(attn_vecs, (-1, 1, self.dims)) # b x 1 x dim att_weights = tf.reduce_sum(attn_vecs * queries * self.scale, axis=-1, keepdims=True) # b x n x 1 att_weights = tf.nn.softmax(att_weights, axis=1) res = tf.reduce_sum(att_weights * queries, axis=1) return res def get_heads(self, input_tensor): """ Calculates the head representation by applying rotations and reflections and combining them with a self-attention mechanism. :param input_tensor: Tensor of size batch_size x 3 containing triples' indices: (head, relation, tail) :return: heads: bs x dims """ heads = self.entities(input_tensor[:, 0]) heads = self.dropout(heads, training=self.training) rotations = self.rotations(input_tensor[:, 1]) reflections = self.reflections(input_tensor[:, 1]) attn_vec = self.attention_lhs(input_tensor[:, 1]) ref_q = self.reflect_entities(heads, reflections) rot_q = self.rotate_entities(heads, rotations) queries = tf.concat([ref_q, rot_q], axis=1) return self.attn_mechanism(queries, attn_vec) class UserAttentiveBase(RotRefBase, MuRBase, abc.ABC): """ This model combines two models: - On the left-hand side, rotations, reflections and transformations, which are combined according to a lhs attention vector that is specific for each relation. - On the right-hand side, we add the relation embedding to the tail. For the special case of the USER-ITEM relation, we add all the relations to the tail. We then combine them according to the rhs attention vector, that is user-specific- """ def __init__(self, n_entities, n_relations, item_ids, args): super().__init__(n_entities, n_relations, item_ids, args) self.attention_rhs = tf.keras.layers.Embedding( input_dim=n_entities, output_dim=self.dims, embeddings_initializer=self.initializer, embeddings_regularizer=self.entity_regularizer, name='attention_rhs') self.ui_weights = tf.keras.layers.Embedding( input_dim=n_entities, output_dim=1, embeddings_initializer=tf.keras.initializers.constant(args.ui_weight), name='ui_weights', trainable=args.train_ui_weight) self.item_ids = tf.convert_to_tensor(item_ids) def get_lhs(self, input_tensor): heads = self.entities(input_tensor[:, 0]) heads = self.dropout(heads, training=self.training) rotations = self.rotations(input_tensor[:, 1]) reflections = self.reflections(input_tensor[:, 1]) transforms = self.transforms(input_tensor[:, 1]) attn_vec = self.attention_lhs(input_tensor[:, 1]) ref_q = self.reflect_entities(heads, reflections) rot_q = self.rotate_entities(heads, rotations) trf_q = tf.reshape(transforms * heads, (-1, 1, self.dims)) queries = tf.concat([ref_q, rot_q, trf_q], axis=1) return self.attn_mechanism(queries, attn_vec) def get_rhs_attn_vector(self, input_tensor, all_items=False): """ Returns the attn vectors for the right-hand side. By default it depends on the head entity (user-centric). :param input_tensor: bs x 3: tensor of triplets :param all_items: Whether to compute the attn vector for all items or not :return: tensor of bs x dims. If all_items=True, bs x n_items x dims """ if all_items: input_tensor = tf.tile(tf.expand_dims(input_tensor[:, 0], 1), [1, len(self.item_ids)]) return self.attention_rhs(input_tensor) return self.attention_rhs(input_tensor[:, 0]) @tf.function def combine_entities_and_relations(self, entities, relations, all_relations, attn_vecs, relation_index, ui_weights, tf_op): """ :param entities: b x dims head (or tail) embeddings of each triplet :param relations: b x dims: relation embedding of each triplet :param all_relations: r x dims: one embeddings for each possible relation :param attn_vecs: attn vectors :param relation_index: bs x 1: relation index in the triplet (head, relation_index, tail) :param ui_weights: weight to interpolate between USER-ITEM relation and aggregation of all relations. :param tf_op: it has to be a broadcastable operation between each entity embedding and all the relation embeddings. Usually it is tf.add or tf.multiply :return: b x dims: entity embeddings combined with all relations and aggregated with self-attention """ # adds or multiplies each entity with the corresponding relation regular_embeds = tf_op(entities, relations) if relation_index.shape[0] != 1 and tf.reduce_all(relation_index != Relations.USER_ITEM.value): return regular_embeds # adds or multiplies each entity with all the relation embeddings candidates = tf_op(tf.reshape(entities, (-1, 1, self.dims)), tf.reshape(all_relations, (1, -1, self.dims))) # b x r x dims combined_embeds = self.attn_mechanism(candidates, attn_vecs) # the final embedding is a weighted avg of the reg embed and the combined embed user_item_embed = ui_weights * regular_embeds + (1 - ui_weights) * combined_embeds # if the relation is USER-ITEM it uses the combined embed, if not it uses the regular one is_user_item_rel = tf.tile(tf.expand_dims(relation_index == Relations.USER_ITEM.value, 1), [1, self.dims]) res = tf.where(is_user_item_rel, user_item_embed, regular_embeds) return res def get_rhs(self, input_tensor): """ Calculates the rhs embeddings by either adding the corresponding relation embedding for regular relations (non USER-ITEM relations). For the USER-ITEM relations it adds all the relations to the tail. Then it combines them according to the rhs attention vector, that is user-specific. """ tails = self.entities(input_tensor[:, -1]) tails = self.dropout(tails, training=self.training) rel_index = input_tensor[:, 1] relations = self.relations(rel_index) attn_vecs = self.get_rhs_attn_vector(input_tensor) all_relations = self.relations.weights[0] ui_weights = tf.keras.activations.sigmoid(self.ui_weights(input_tensor[:, 0])) res = self.combine_entities_and_relations(entities=tails, relations=relations, all_relations=all_relations, attn_vecs=attn_vecs, relation_index=rel_index, ui_weights=ui_weights, tf_op=tf.add) return res def get_all_items(self, input_tensor): """ In this case, since the item embedding depends on the head (user) we need to override this function :return: batch x n_items x dims tensor representing embeddings for each item, according to each head (user) in the input tensor """ all_items = self.entities(self.item_ids) # n_items x dims ui_relation = self.relations(tf.convert_to_tensor([Relations.USER_ITEM.value])) # 1 x dims all_relations = self.relations.weights[0] # r x dims attn_vecs = self.get_rhs_attn_vector(input_tensor, all_items=True) # b x n_items x dims ui_weights = tf.keras.activations.sigmoid(self.ui_weights(input_tensor[:, 0])) # adds each item with all the relation embeddings cands = tf.add(tf.reshape(all_items, (-1, 1, self.dims)), tf.reshape(all_relations, (1, -1, self.dims))) # n_items x r x dims # aggregates the points cands = tf.expand_dims(cands, axis=0) # 1 x n_items x r x dims attn_vecs = tf.expand_dims(attn_vecs, axis=2) # b x n_items x 1 x dims att_weights = tf.reduce_sum(attn_vecs * cands * self.scale, axis=-1, keepdims=True) # b x n_items x r x 1 att_weights = tf.nn.softmax(att_weights, axis=2) combined_embeds = tf.reduce_sum(att_weights * cands, axis=2) # b x n_items x dims regular_embeds = tf.expand_dims(tf.add(all_items, ui_relation), 0) # 1 x n_items x dims user_item_embed = tf.reshape(ui_weights, (-1, 1, 1)) * regular_embeds + \ tf.reshape(1 - ui_weights, (-1, 1, 1)) * combined_embeds return user_item_embed
{ "content_hash": "6f62e0ad78530ffa4f02a81f189aa45f", "timestamp": "", "source": "github", "line_count": 420, "max_line_length": 114, "avg_line_length": 47.707142857142856, "alnum_prop": 0.6281878524729251, "repo_name": "PAIR-code/recommendation-rudders", "id": "fd98219c79a9d87fafd5dc6ddd68e302a0d65a90", "size": "20623", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "hyperbolic-rs/rudders/models/base.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "206719" }, { "name": "Shell", "bytes": "796" } ], "symlink_target": "" }
""" Django settings for running tests for Resolwe package. """ import os import re import sys from distutils.util import strtobool # pylint: disable=import-error,no-name-in-module PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__)) SECRET_KEY = "secret" # TODO: Remove this setting completely and only set it in the tests that require it. RESOLWE_HOST_URL = "https://dummy.host.local" DEBUG = True ALLOWED_HOSTS = ["*"] MIDDLEWARE_CLASSES = ( "django.contrib.sessions.middleware.SessionMiddleware", "django.middleware.common.CommonMiddleware", "django.contrib.auth.middleware.AuthenticationMiddleware", ) INSTALLED_APPS = ( "django.contrib.auth", "django.contrib.contenttypes", "django.contrib.sessions", "django.contrib.staticfiles", "channels", "rest_framework", "versionfield", "resolwe", "resolwe.permissions", "resolwe.flow", "resolwe.storage", "resolwe.toolkit", "resolwe.test_helpers", "resolwe_bio", "resolwe_bio.kb", ) ROOT_URLCONF = "tests.urls" TEST_RUNNER = "resolwe.test_helpers.test_runner.ResolweRunner" TEMPLATES = [ { "BACKEND": "django.template.backends.django.DjangoTemplates", "DIRS": [], "APP_DIRS": True, "OPTIONS": { "context_processors": [ "django.template.context_processors.debug", "django.template.context_processors.request", "django.contrib.auth.context_processors.auth", ], }, }, ] AUTHENTICATION_BACKENDS = ( "django.contrib.auth.backends.ModelBackend", "resolwe.permissions.permissions.ResolwePermissionBackend", ) DEFAULT_AUTO_FIELD = "django.db.models.AutoField" ANONYMOUS_USER_NAME = "public" # Check if PostgreSQL settings are set via environment variables pgname = os.environ.get("RESOLWE_POSTGRESQL_NAME", "resolwe-bio") pguser = os.environ.get("RESOLWE_POSTGRESQL_USER", "resolwe") pgpass = os.environ.get("RESOLWE_POSTGRESQL_PASS", "resolwe") pghost = os.environ.get("RESOLWE_POSTGRESQL_HOST", "localhost") pgport = int(os.environ.get("RESOLWE_POSTGRESQL_PORT", 55433)) DATABASES = { "default": { "ENGINE": "django.db.backends.postgresql_psycopg2", "NAME": pgname, "USER": pguser, "PASSWORD": pgpass, "HOST": pghost, "PORT": pgport, } } STATIC_URL = "/static/" REDIS_CONNECTION = { "host": "localhost", "port": int(os.environ.get("RESOLWE_REDIS_PORT", 56380)), "db": int(os.environ.get("RESOLWE_REDIS_DATABASE", 0)), "protocol": (os.environ.get("RESOLWE_REDIS_PROTOCOL", "redis")), } REDIS_CONNECTION_STRING = "{protocol}://{host}:{port}/{db}".format(**REDIS_CONNECTION) LISTENER_CONNECTION = { # Keys in the hosts dictionary are workload connector names. Currently # supported are 'local', 'kubertenes', 'celery' and 'slurm'. "hosts": {"local": "172.17.0.1"}, "port": int(os.environ.get("RESOLWE_LISTENER_SERVICE_PORT", 53893)), "min_port": 50000, "max_port": 60000, "protocol": "tcp", } # The IP address where listener is available from the communication container. # The setting is a dictionary where key is the name of the workload connector. COMMUNICATION_CONTAINER_LISTENER_CONNECTION = {"local": "172.17.0.1"} # Settings in OSX/Windows are different since Docker runs in a virtual machine. if sys.platform == "darwin": LISTENER_CONNECTION["hosts"]["local"] = "0.0.0.0" COMMUNICATION_CONTAINER_LISTENER_CONNECTION = {"local": "127.0.0.1"} FLOW_EXECUTOR = { "NAME": "resolwe.flow.executors.docker", # XXX: Change to a stable resolwe image when it will include all the required tools "CONTAINER_IMAGE": "resolwe/bio-linux8-resolwe-preview", "CONTAINER_NAME_PREFIX": "resolwebio", "REDIS_CONNECTION": REDIS_CONNECTION, "LISTENER_CONNECTION": LISTENER_CONNECTION, } # Set custom executor command if set via environment variable if "RESOLWE_DOCKER_COMMAND" in os.environ: FLOW_DOCKER_COMMAND = os.environ["RESOLWE_DOCKER_COMMAND"] FLOW_API = { "PERMISSIONS": "resolwe.permissions.permissions", } FLOW_EXPRESSION_ENGINES = [ { "ENGINE": "resolwe.flow.expression_engines.jinja", "CUSTOM_FILTERS": [ "resolwe_bio.expression_filters.sample", "resolwe_bio.expression_filters.relation", ], }, ] FLOW_EXECUTION_ENGINES = [ "resolwe.flow.execution_engines.bash", "resolwe.flow.execution_engines.workflow", "resolwe.flow.execution_engines.python", ] # Check if any Manager settings are set via environment variables manager_prefix = os.environ.get("RESOLWE_MANAGER_REDIS_PREFIX", "resolwe-bio.manager") # Ensure Manager channel prefix is a valid Django Channels name. manager_prefix = re.sub("[^0-9a-zA-Z.-]", "-", manager_prefix) FLOW_MANAGER = { "NAME": "resolwe.flow.managers.workload_connectors.local", "REDIS_PREFIX": manager_prefix, "REDIS_CONNECTION": REDIS_CONNECTION, } FLOW_PROCESS_MAX_CORES = 1 FLOW_PROCESS_MAX_MEMORY = 10240 # Don't pull Docker images if set via the environment variable. FLOW_DOCKER_DONT_PULL = strtobool(os.environ.get("RESOLWE_DOCKER_DONT_PULL", "0")) # Disable SECCOMP if set via environment variable. FLOW_DOCKER_DISABLE_SECCOMP = strtobool( os.environ.get("RESOLWE_DOCKER_DISABLE_SECCOMP", "0") ) # Ensure all container images follow a specific format. FLOW_CONTAINER_VALIDATE_IMAGE = r".+:(?!latest)" REST_FRAMEWORK = { "DEFAULT_AUTHENTICATION_CLASSES": ( "rest_framework.authentication.SessionAuthentication", ), "DEFAULT_FILTER_BACKENDS": ( "resolwe.permissions.filters.ResolwePermissionsFilter", "django_filters.rest_framework.backends.DjangoFilterBackend", "resolwe.flow.filters.OrderingFilter", ), # Python<3.7 cannot parse iso-8601 formatted datetimes with tz-info form # "+01:00" (DRF default). It can only parse "+0100" form, so we need to # modify this setting. This will be fixed in Python3.7, where "+01:00" can # be parsed by ``datetime.datetime.strptime`` syntax. # For more, check "%z" syntax description in: # https://docs.python.org/3.7/library/datetime.html#strftime-and-strptime-behavior "DATETIME_FORMAT": "%Y-%m-%dT%H:%M:%S.%f%z", } # Time USE_TZ = True TIME_ZONE = "UTC" # Django does not support parsing of 'iso-8601' formated datetimes by default. # Since Django-filters uses Django forms for parsing, we need to modify Django # setting ``DATETIME_INPUT_FORMATS`` to support 'iso-8601' format. # https://docs.djangoproject.com/en/1.11/ref/settings/#datetime-input-formats DATETIME_INPUT_FORMATS = ( # These are already given Django defaults: "%Y-%m-%d %H:%M:%S", # '2006-10-25 14:30:59' "%Y-%m-%d %H:%M:%S.%f", # '2006-10-25 14:30:59.000200' "%Y-%m-%d %H:%M", # '2006-10-25 14:30' "%Y-%m-%d", # '2006-10-25' # These are iso-8601 formatted: "%Y-%m-%dT%H:%M:%S.%f%z", # '2006-10-25T14:30:59.000200+0200' or '2006-10-25T14:30:59.000200+02:00' (Python>=3.7) "%Y-%m-%dT%H:%M:%S.%fZ", # '2006-10-25T14:30:59.000200Z' "%Y-%m-%dT%H:%M:%S.%f", # '2006-10-25T14:30:59.000200' "%Y-%m-%dT%H:%M:%SZ", # '2006-10-25T14:30:59Z' "%Y-%m-%dT%H:%M:%S", # '2006-10-25T14:30:59' "%Y-%m-%dT%H:%M", # '2006-10-25T14:30' ) FLOW_PROCESSES_FINDERS = ( "resolwe.flow.finders.FileSystemProcessesFinder", "resolwe.flow.finders.AppDirectoriesFinder", ) FLOW_PROCESSES_RUNTIMES = ( "resolwe.process.runtime.Process", "resolwe_bio.process.runtime.ProcessBio", ) FLOW_PROCESSES_DIRS = (os.path.join(PROJECT_ROOT, "../resolwe_bio/processes/"),) # Do not skip tests that fail on Docker executor if this is set via environment # variable if os.environ.get("RESOLWEBIO_TESTS_SKIP_DOCKER_FAILURES", "").lower() in [ "no", "false", ]: TESTS_SKIP_DOCKER_FAILURES = False # Testing. TEST_RUNNER = "resolwe.test_helpers.test_runner.ResolweRunner" TEST_PROCESS_REQUIRE_TAGS = True # Don't profile unless set via the environment variable. TEST_PROCESS_PROFILE = strtobool(os.environ.get("RESOLWE_TEST_PROCESS_PROFILE", "0")) # Channels. ASGI_APPLICATION = "tests.routing.channel_routing" CHANNEL_LAYERS = { "default": { "BACKEND": "channels_redis.core.RedisChannelLayer", "CONFIG": { "hosts": [REDIS_CONNECTION_STRING], "expiry": 3600, }, }, } # Logging. # Set RESOLWEBIO_LOG_FILE environment variable to a file path to enable logging # debugging messages to to a file. log_file_path = os.environ.get( "RESOLWEBIO_LOG_FILE", os.devnull ) # pylint: disable=invalid-name LOGGING = { "version": 1, "disable_existing_loggers": False, "formatters": { "standard": { "format": "%(asctime)s - %(levelname)s - %(name)s[%(process)s]: %(message)s", }, }, "handlers": { "console": { "class": "logging.StreamHandler", "level": "WARNING", "formatter": "standard", }, "file": { "class": "logging.handlers.RotatingFileHandler", "filename": log_file_path, "formatter": "standard", "maxBytes": 1024 * 1024 * 10, # 10 MB }, }, "loggers": { "": { "handlers": ["file"], "level": "DEBUG", }, }, }
{ "content_hash": "6313f84add198679e495196eff6e880c", "timestamp": "", "source": "github", "line_count": 295, "max_line_length": 118, "avg_line_length": 31.664406779661018, "alnum_prop": 0.6527138422010491, "repo_name": "genialis/resolwe-bio", "id": "7284764d2cbaf9492f01357499cabae1528aebbc", "size": "9341", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/settings.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "HTML", "bytes": "10553" }, { "name": "PLpgSQL", "bytes": "4491" }, { "name": "Python", "bytes": "1729619" }, { "name": "R", "bytes": "20619" }, { "name": "Shell", "bytes": "6713" } ], "symlink_target": "" }
import arcpy arcpy.env.workspace = r"PATH" # function that converts numbers to 3 char strings def fun(x): if (x < 10): return '00' + str(x) elif (x < 100): return '0' + str(x) else: return str(x) count = 0 for s in arcpy.ListFiles('*.shp'): print s # add new fields arcpy.AddField_management(s, "CMA_ID", "TEXT", "", "", 3) arcpy.AddField_management(s, "CT_ID", "TEXT", "", "", 3) arcpy.AddField_management(s, "CMACTUID", "TEXT", "", "", 6) # create a list of fields to be edited fields = ["CT","CT_ID","CMA","CMA_ID","CMACTUID"] # update fields using the fun function with arcpy.da.UpdateCursor(s, fields) as cursor: for row in cursor: row[1] = fun(row[0]) row[3] = fun(row[2]) row[4] = row[3] + row[1] cursor.updateRow(row) count = count + 1 print "------------------------------" print "number of shps updated:" print count
{ "content_hash": "344eaa950dbd36cf461c7d94a553b867", "timestamp": "", "source": "github", "line_count": 43, "max_line_length": 60, "avg_line_length": 20.302325581395348, "alnum_prop": 0.6002290950744559, "repo_name": "jamaps/arcpy_scripts", "id": "632fb75a475686868f00e723ca64743955d3a6b4", "size": "1119", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "update_cursor_nums_to_strings.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "24175" } ], "symlink_target": "" }
from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('structure', '0017_structure_contact_representative_score'), ] operations = [ migrations.AddField( model_name='structure', name='active_class_contacts_fraction', field=models.DecimalField(decimal_places=3, max_digits=5, null=True), ), migrations.AddField( model_name='structure', name='inactive_class_contacts_fraction', field=models.DecimalField(decimal_places=3, max_digits=5, null=True), ), ]
{ "content_hash": "ccb2ba8655c1654fff21264e072253e1", "timestamp": "", "source": "github", "line_count": 21, "max_line_length": 81, "avg_line_length": 29.80952380952381, "alnum_prop": 0.6118210862619808, "repo_name": "protwis/protwis", "id": "e5b7725706ed8453e605b7d74291ac47159cebd0", "size": "675", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "structure/migrations/0018_auto_20190605_1403.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "167612" }, { "name": "HTML", "bytes": "2477269" }, { "name": "JavaScript", "bytes": "3119217" }, { "name": "Promela", "bytes": "467" }, { "name": "Python", "bytes": "4289933" } ], "symlink_target": "" }
import difflib import re import time import token from tokenize import generate_tokens, untokenize from StringIO import StringIO from robot.api import logger from robot.errors import (ContinueForLoop, DataError, ExecutionFailed, ExecutionFailures, ExecutionPassed, ExitForLoop, PassExecution, ReturnFromKeyword) from robot.running import Keyword, RUN_KW_REGISTER from robot.running.context import EXECUTION_CONTEXTS from robot.running.usererrorhandler import UserErrorHandler from robot.utils import (asserts, DotDict, escape, format_assign_message, get_error_message, get_time, is_falsy, is_integer, is_string, is_truthy, is_unicode, JYTHON, Matcher, normalize, NormalizedDict, parse_time, prepr, RERAISED_EXCEPTIONS, plural_or_not as s, secs_to_timestr, seq2str, split_from_equals, timestr_to_secs, type_name, unic) from robot.variables import (is_list_var, is_var, DictVariableTableValue, VariableTableValue, VariableSplitter, variable_not_found) from robot.version import get_version if JYTHON: from java.lang import String, Number # TODO: The name of this decorator should be changed. It is used for avoiding # arguments to be resolved by many other keywords than run keyword variants. # Should also consider: # - Exposing this functionality to external libraries. Would require doc # enhancements and clean way to expose variables to make resolving them # based on needs easier. # - Removing the functionality that run keyword variants can be overridded # by custom keywords without a warning. def run_keyword_variant(resolve): def decorator(method): RUN_KW_REGISTER.register_run_keyword('BuiltIn', method.__name__, resolve) return method return decorator class _BuiltInBase(object): @property def _context(self): if EXECUTION_CONTEXTS.current is None: raise RobotNotRunningError('Cannot access execution context') return EXECUTION_CONTEXTS.current @property def _namespace(self): return self._context.namespace def _get_namespace(self, top=False): ctx = EXECUTION_CONTEXTS.top if top else EXECUTION_CONTEXTS.current return ctx.namespace @property def _variables(self): return self._namespace.variables def _matches(self, string, pattern): # Must use this instead of fnmatch when string may contain newlines. matcher = Matcher(pattern, caseless=False, spaceless=False) return matcher.match(string) def _is_true(self, condition): if is_string(condition): condition = self.evaluate(condition, modules='os,sys') return bool(condition) def _log_types(self, *args): self._log_types_at_level('DEBUG', *args) def _log_types_at_level(self, level, *args): msg = ["Argument types are:"] + [self._get_type(a) for a in args] self.log('\n'.join(msg), level) def _get_type(self, arg): # In IronPython type(u'x') is str. We want to report unicode anyway. if is_unicode(arg): return "<type 'unicode'>" return str(type(arg)) class _Converter(_BuiltInBase): def convert_to_integer(self, item, base=None): """Converts the given item to an integer number. If the given item is a string, it is by default expected to be an integer in base 10. There are two ways to convert from other bases: - Give base explicitly to the keyword as ``base`` argument. - Prefix the given string with the base so that ``0b`` means binary (base 2), ``0o`` means octal (base 8), and ``0x`` means hex (base 16). The prefix is considered only when ``base`` argument is not given and may itself be prefixed with a plus or minus sign. The syntax is case-insensitive and possible spaces are ignored. Examples: | ${result} = | Convert To Integer | 100 | | # Result is 100 | | ${result} = | Convert To Integer | FF AA | 16 | # Result is 65450 | | ${result} = | Convert To Integer | 100 | 8 | # Result is 64 | | ${result} = | Convert To Integer | -100 | 2 | # Result is -4 | | ${result} = | Convert To Integer | 0b100 | | # Result is 4 | | ${result} = | Convert To Integer | -0x100 | | # Result is -256 | See also `Convert To Number`, `Convert To Binary`, `Convert To Octal`, `Convert To Hex`, and `Convert To Bytes`. """ self._log_types(item) return self._convert_to_integer(item, base) def _convert_to_integer(self, orig, base=None): try: item = self._handle_java_numbers(orig) item, base = self._get_base(item, base) if base: return int(item, self._convert_to_integer(base)) return int(item) except: raise RuntimeError("'%s' cannot be converted to an integer: %s" % (orig, get_error_message())) def _handle_java_numbers(self, item): if not JYTHON: return item if isinstance(item, String): return unic(item) if isinstance(item, Number): return item.doubleValue() return item def _get_base(self, item, base): if not is_string(item): return item, base item = normalize(item) if item.startswith(('-', '+')): sign = item[0] item = item[1:] else: sign = '' bases = {'0b': 2, '0o': 8, '0x': 16} if base or not item.startswith(tuple(bases)): return sign+item, base return sign+item[2:], bases[item[:2]] def convert_to_binary(self, item, base=None, prefix=None, length=None): """Converts the given item to a binary string. The ``item``, with an optional ``base``, is first converted to an integer using `Convert To Integer` internally. After that it is converted to a binary number (base 2) represented as a string such as ``1011``. The returned value can contain an optional ``prefix`` and can be required to be of minimum ``length`` (excluding the prefix and a possible minus sign). If the value is initially shorter than the required length, it is padded with zeros. Examples: | ${result} = | Convert To Binary | 10 | | | # Result is 1010 | | ${result} = | Convert To Binary | F | base=16 | prefix=0b | # Result is 0b1111 | | ${result} = | Convert To Binary | -2 | prefix=B | length=4 | # Result is -B0010 | See also `Convert To Integer`, `Convert To Octal` and `Convert To Hex`. """ return self._convert_to_bin_oct_hex(bin, item, base, prefix, length) def convert_to_octal(self, item, base=None, prefix=None, length=None): """Converts the given item to an octal string. The ``item``, with an optional ``base``, is first converted to an integer using `Convert To Integer` internally. After that it is converted to an octal number (base 8) represented as a string such as ``775``. The returned value can contain an optional ``prefix`` and can be required to be of minimum ``length`` (excluding the prefix and a possible minus sign). If the value is initially shorter than the required length, it is padded with zeros. Examples: | ${result} = | Convert To Octal | 10 | | | # Result is 12 | | ${result} = | Convert To Octal | -F | base=16 | prefix=0 | # Result is -017 | | ${result} = | Convert To Octal | 16 | prefix=oct | length=4 | # Result is oct0020 | See also `Convert To Integer`, `Convert To Binary` and `Convert To Hex`. """ return self._convert_to_bin_oct_hex(oct, item, base, prefix, length) def convert_to_hex(self, item, base=None, prefix=None, length=None, lowercase=False): """Converts the given item to a hexadecimal string. The ``item``, with an optional ``base``, is first converted to an integer using `Convert To Integer` internally. After that it is converted to a hexadecimal number (base 16) represented as a string such as ``FF0A``. The returned value can contain an optional ``prefix`` and can be required to be of minimum ``length`` (excluding the prefix and a possible minus sign). If the value is initially shorter than the required length, it is padded with zeros. By default the value is returned as an upper case string, but the ``lowercase`` argument a true value (see `Boolean arguments`) turns the value (but not the given prefix) to lower case. Examples: | ${result} = | Convert To Hex | 255 | | | # Result is FF | | ${result} = | Convert To Hex | -10 | prefix=0x | length=2 | # Result is -0x0A | | ${result} = | Convert To Hex | 255 | prefix=X | lowercase=yes | # Result is Xff | See also `Convert To Integer`, `Convert To Binary` and `Convert To Octal`. """ return self._convert_to_bin_oct_hex(hex, item, base, prefix, length, lowercase) def _convert_to_bin_oct_hex(self, method, item, base, prefix, length, lowercase=False): self._log_types(item) ret = method(self._convert_to_integer(item, base)).upper().rstrip('L') prefix = prefix or '' if ret[0] == '-': prefix = '-' + prefix ret = ret[1:] if len(ret) > 1: # oct(0) -> '0' (i.e. has no prefix) prefix_length = {bin: 2, oct: 1, hex: 2}[method] ret = ret[prefix_length:] if length: ret = ret.rjust(self._convert_to_integer(length), '0') if is_truthy(lowercase): ret = ret.lower() return prefix + ret def convert_to_number(self, item, precision=None): """Converts the given item to a floating point number. If the optional ``precision`` is positive or zero, the returned number is rounded to that number of decimal digits. Negative precision means that the number is rounded to the closest multiple of 10 to the power of the absolute precision. Examples: | ${result} = | Convert To Number | 42.512 | | # Result is 42.512 | | ${result} = | Convert To Number | 42.512 | 1 | # Result is 42.5 | | ${result} = | Convert To Number | 42.512 | 0 | # Result is 43.0 | | ${result} = | Convert To Number | 42.512 | -1 | # Result is 40.0 | Notice that machines generally cannot store floating point numbers accurately. This may cause surprises with these numbers in general and also when they are rounded. For more information see, for example, these resources: - http://docs.python.org/2/tutorial/floatingpoint.html - http://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition If you need an integer number, use `Convert To Integer` instead. """ self._log_types(item) return self._convert_to_number(item, precision) def _convert_to_number(self, item, precision=None): number = self._convert_to_number_without_precision(item) if precision: number = round(number, self._convert_to_integer(precision)) return number def _convert_to_number_without_precision(self, item): try: if JYTHON: item = self._handle_java_numbers(item) return float(item) except: error = get_error_message() try: return float(self._convert_to_integer(item)) except RuntimeError: raise RuntimeError("'%s' cannot be converted to a floating " "point number: %s" % (item, error)) def convert_to_string(self, item): """Converts the given item to a Unicode string. Uses ``__unicode__`` or ``__str__`` method with Python objects and ``toString`` with Java objects. Use `Encode String To Bytes` and `Decode Bytes To String` keywords in ``String`` library if you need to convert between Unicode and byte strings using different encodings. Use `Convert To Bytes` if you just want to create byte strings. """ self._log_types(item) return self._convert_to_string(item) def _convert_to_string(self, item): return unic(item) def convert_to_boolean(self, item): """Converts the given item to Boolean true or false. Handles strings ``True`` and ``False`` (case-insensitive) as expected, otherwise returns item's [http://docs.python.org/2/library/stdtypes.html#truth|truth value] using Python's ``bool()`` method. """ self._log_types(item) if is_string(item): if item.upper() == 'TRUE': return True if item.upper() == 'FALSE': return False return bool(item) def convert_to_bytes(self, input, input_type='text'): u"""Converts the given ``input`` to bytes according to the ``input_type``. Valid input types are listed below: - ``text:`` Converts text to bytes character by character. All characters with ordinal below 256 can be used and are converted to bytes with same values. Many characters are easiest to represent using escapes like ``\\x00`` or ``\\xff``. - ``int:`` Converts integers separated by spaces to bytes. Similarly as with `Convert To Integer`, it is possible to use binary, octal, or hex values by prefixing the values with ``0b``, ``0o``, or ``0x``, respectively. - ``hex:`` Converts hexadecimal values to bytes. Single byte is always two characters long (e.g. ``01`` or ``FF``). Spaces are ignored and can be used freely as a visual separator. - ``bin:`` Converts binary values to bytes. Single byte is always eight characters long (e.g. ``00001010``). Spaces are ignored and can be used freely as a visual separator. In addition to giving the input as a string, it is possible to use lists or other iterables containing individual characters or numbers. In that case numbers do not need to be padded to certain length and they cannot contain extra spaces. Examples (last column shows returned bytes): | ${bytes} = | Convert To Bytes | hyv\xe4 | | # hyv\\xe4 | | ${bytes} = | Convert To Bytes | \\xff\\x07 | | # \\xff\\x07 | | ${bytes} = | Convert To Bytes | 82 70 | int | # RF | | ${bytes} = | Convert To Bytes | 0b10 0x10 | int | # \\x02\\x10 | | ${bytes} = | Convert To Bytes | ff 00 07 | hex | # \\xff\\x00\\x07 | | ${bytes} = | Convert To Bytes | 5246212121 | hex | # RF!!! | | ${bytes} = | Convert To Bytes | 0000 1000 | bin | # \\x08 | | ${input} = | Create List | 1 | 2 | 12 | | ${bytes} = | Convert To Bytes | ${input} | int | # \\x01\\x02\\x0c | | ${bytes} = | Convert To Bytes | ${input} | hex | # \\x01\\x02\\x12 | Use `Encode String To Bytes` in ``String`` library if you need to convert text to bytes using a certain encoding. New in Robot Framework 2.8.2. """ try: try: ordinals = getattr(self, '_get_ordinals_from_%s' % input_type) except AttributeError: raise RuntimeError("Invalid input type '%s'." % input_type) return ''.join(chr(o) for o in ordinals(input)) except: raise RuntimeError("Creating bytes failed: %s" % get_error_message()) def _get_ordinals_from_text(self, input): for char in input: yield self._test_ordinal(ord(char), char, 'Character') def _test_ordinal(self, ordinal, original, type): if 0 <= ordinal <= 255: return ordinal raise RuntimeError("%s '%s' cannot be represented as a byte." % (type, original)) def _get_ordinals_from_int(self, input): if is_string(input): input = input.split() elif is_integer(input): input = [input] for integer in input: ordinal = self._convert_to_integer(integer) yield self._test_ordinal(ordinal, integer, 'Integer') def _get_ordinals_from_hex(self, input): for token in self._input_to_tokens(input, length=2): ordinal = self._convert_to_integer(token, base=16) yield self._test_ordinal(ordinal, token, 'Hex value') def _get_ordinals_from_bin(self, input): for token in self._input_to_tokens(input, length=8): ordinal = self._convert_to_integer(token, base=2) yield self._test_ordinal(ordinal, token, 'Binary value') def _input_to_tokens(self, input, length): if not is_string(input): return input input = ''.join(input.split()) if len(input) % length != 0: raise RuntimeError('Expected input to be multiple of %d.' % length) return (input[i:i+length] for i in xrange(0, len(input), length)) def create_list(self, *items): """Returns a list containing given items. The returned list can be assigned both to ``${scalar}`` and ``@{list}`` variables. Examples: | @{list} = | Create List | a | b | c | | ${scalar} = | Create List | a | b | c | | ${ints} = | Create List | ${1} | ${2} | ${3} | """ return list(items) @run_keyword_variant(resolve=0) def create_dictionary(self, *items): """Creates and returns a dictionary based on given items. Items are given using ``key=value`` syntax same way as ``&{dictionary}`` variables are created in the Variable table. Both keys and values can contain variables, and possible equal sign in key can be escaped with a backslash like ``escaped\\=key=value``. It is also possible to get items from existing dictionaries by simply using them like ``&{dict}``. If same key is used multiple times, the last value has precedence. The returned dictionary is ordered, and values with strings as keys can also be accessed using convenient dot-access syntax like ``${dict.key}``. Examples: | &{dict} = | Create Dictionary | key=value | foo=bar | | Should Be True | ${dict} == {'key': 'value', 'foo': 'bar'} | | &{dict} = | Create Dictionary | ${1}=${2} | &{dict} | foo=new | | Should Be True | ${dict} == {1: 2, 'key': 'value', 'foo': 'new'} | | Should Be Equal | ${dict.key} | value | This keyword was changed in Robot Framework 2.9 in many ways: - Moved from ``Collections`` library to ``BuiltIn``. - Support also non-string keys in ``key=value`` syntax. - Deprecated old syntax to give keys and values separately. - Returned dictionary is ordered and dot-accessible. """ separate, combined = self._split_dict_items(items) if separate: self.log("Giving keys and values separately to 'Create Dictionary' " "keyword is deprecated. Use 'key=value' syntax instead.", level='WARN') separate = self._format_separate_dict_items(separate) combined = DictVariableTableValue(combined).resolve(self._variables) result = DotDict(separate) result.update(combined) return result def _split_dict_items(self, items): separate = [] for item in items: name, value = split_from_equals(item) if value is not None or VariableSplitter(item).is_dict_variable(): break separate.append(item) return separate, items[len(separate):] def _format_separate_dict_items(self, separate): separate = self._variables.replace_list(separate) if len(separate) % 2 != 0: raise DataError('Expected even number of keys and values, got %d.' % len(separate)) return [separate[i:i+2] for i in range(0, len(separate), 2)] class _Verify(_BuiltInBase): def _set_and_remove_tags(self, tags): set_tags = [tag for tag in tags if not tag.startswith('-')] remove_tags = [tag[1:] for tag in tags if tag.startswith('-')] if remove_tags: self.remove_tags(*remove_tags) if set_tags: self.set_tags(*set_tags) def fail(self, msg=None, *tags): """Fails the test with the given message and optionally alters its tags. The error message is specified using the ``msg`` argument. It is possible to use HTML in the given error message, similarly as with any other keyword accepting an error message, by prefixing the error with ``*HTML*``. It is possible to modify tags of the current test case by passing tags after the message. Tags starting with a hyphen (e.g. ``-regression``) are removed and others added. Tags are modified using `Set Tags` and `Remove Tags` internally, and the semantics setting and removing them are the same as with these keywords. Examples: | Fail | Test not ready | | | # Fails with the given message. | | Fail | *HTML*<b>Test not ready</b> | | | # Fails using HTML in the message. | | Fail | Test not ready | not-ready | | # Fails and adds 'not-ready' tag. | | Fail | OS not supported | -regression | | # Removes tag 'regression'. | | Fail | My message | tag | -t* | # Removes all tags starting with 't' except the newly added 'tag'. | See `Fatal Error` if you need to stop the whole test execution. Support for modifying tags was added in Robot Framework 2.7.4 and HTML message support in 2.8. """ self._set_and_remove_tags(tags) raise AssertionError(msg) if msg else AssertionError() def fatal_error(self, msg=None): """Stops the whole test execution. The test or suite where this keyword is used fails with the provided message, and subsequent tests fail with a canned message. Possible teardowns will nevertheless be executed. See `Fail` if you only want to stop one test case unconditionally. """ error = AssertionError(msg) if msg else AssertionError() error.ROBOT_EXIT_ON_FAILURE = True raise error def should_not_be_true(self, condition, msg=None): """Fails if the given condition is true. See `Should Be True` for details about how ``condition`` is evaluated and how ``msg`` can be used to override the default error message. """ if not msg: msg = "'%s' should not be true." % condition asserts.fail_if(self._is_true(condition), msg) def should_be_true(self, condition, msg=None): """Fails if the given condition is not true. If ``condition`` is a string (e.g. ``${rc} < 10``), it is evaluated as a Python expression as explained in `Evaluating expressions` and the keyword status is decided based on the result. If a non-string item is given, the status is got directly from its [http://docs.python.org/2/library/stdtypes.html#truth|truth value]. The default error message (``<condition> should be true``) is not very informative, but it can be overridden with the ``msg`` argument. Examples: | Should Be True | ${rc} < 10 | | Should Be True | '${status}' == 'PASS' | # Strings must be quoted | | Should Be True | ${number} | # Passes if ${number} is not zero | | Should Be True | ${list} | # Passes if ${list} is not empty | Variables used like ``${variable}``, as in the examples above, are replaced in the expression before evaluation. Variables are also available in the evaluation namespace and can be accessed using special syntax ``$variable``. This is a new feature in Robot Framework 2.9 and it is explained more thoroughly in `Evaluating expressions`. Examples: | Should Be True | $rc < 10 | | Should Be True | $status == 'PASS' | # Expected string must be quoted | Starting from Robot Framework 2.8, `Should Be True` automatically imports Python's [http://docs.python.org/2/library/os.html|os] and [http://docs.python.org/2/library/sys.html|sys] modules that contain several useful attributes: | Should Be True | os.linesep == '\\n' | # Unixy | | Should Be True | os.linesep == '\\r\\n' | # Windows | | Should Be True | sys.platform == 'darwin' | # OS X | | Should Be True | sys.platform.startswith('java') | # Jython | """ if not msg: msg = "'%s' should be true." % condition asserts.fail_unless(self._is_true(condition), msg) def should_be_equal(self, first, second, msg=None, values=True): """Fails if the given objects are unequal. Optional ``msg`` and ``values`` arguments specify how to construct the error message if this keyword fails: - If ``msg`` is not given, the error message is ``<first> != <second>``. - If ``msg`` is given and ``values`` gets a true value, the error message is ``<msg>: <first> != <second>``. - If ``msg`` is given and ``values`` gets a false value, the error message is simply ``<msg>``. ``values`` is true by default, but can be turned to false by using, for example, string ``false`` or ``no values``. See `Boolean arguments` section for more details. If both arguments are multiline strings, the comparison is done using `multiline string comparisons`. """ self._log_types_at_info_if_different(first, second) self._should_be_equal(first, second, msg, values) def _should_be_equal(self, first, second, msg, values): if first == second: return include_values = self._include_values(values) if include_values and is_string(first) and is_string(second): self._raise_multi_diff(first, second) asserts.fail_unless_equal(first, second, msg, include_values) def _log_types_at_info_if_different(self, first, second): level = 'DEBUG' if type(first) == type(second) else 'INFO' self._log_types_at_level(level, first, second) def _raise_multi_diff(self, first, second): first_lines, second_lines = first.splitlines(), second.splitlines() if len(first_lines) < 3 or len(second_lines) < 3: return self.log("%s\n!=\n%s" % (first, second)) err = 'Multiline strings are different:\n' for line in difflib.unified_diff(first_lines, second_lines, fromfile='first', tofile='second', lineterm=''): err += line + '\n' raise AssertionError(err) def _include_values(self, values): return is_truthy(values) and str(values).upper() != 'NO VALUES' def should_not_be_equal(self, first, second, msg=None, values=True): """Fails if the given objects are equal. See `Should Be Equal` for an explanation on how to override the default error message with ``msg`` and ``values``. """ self._log_types_at_info_if_different(first, second) self._should_not_be_equal(first, second, msg, values) def _should_not_be_equal(self, first, second, msg, values): asserts.fail_if_equal(first, second, msg, self._include_values(values)) def should_not_be_equal_as_integers(self, first, second, msg=None, values=True, base=None): """Fails if objects are equal after converting them to integers. See `Convert To Integer` for information how to convert integers from other bases than 10 using ``base`` argument or ``0b/0o/0x`` prefixes. See `Should Be Equal` for an explanation on how to override the default error message with ``msg`` and ``values``. See `Should Be Equal As Integers` for some usage examples. """ self._log_types_at_info_if_different(first, second) self._should_not_be_equal(self._convert_to_integer(first, base), self._convert_to_integer(second, base), msg, values) def should_be_equal_as_integers(self, first, second, msg=None, values=True, base=None): """Fails if objects are unequal after converting them to integers. See `Convert To Integer` for information how to convert integers from other bases than 10 using ``base`` argument or ``0b/0o/0x`` prefixes. See `Should Be Equal` for an explanation on how to override the default error message with ``msg`` and ``values``. Examples: | Should Be Equal As Integers | 42 | ${42} | Error message | | Should Be Equal As Integers | ABCD | abcd | base=16 | | Should Be Equal As Integers | 0b1011 | 11 | """ self._log_types_at_info_if_different(first, second) self._should_be_equal(self._convert_to_integer(first, base), self._convert_to_integer(second, base), msg, values) def should_not_be_equal_as_numbers(self, first, second, msg=None, values=True, precision=6): """Fails if objects are equal after converting them to real numbers. The conversion is done with `Convert To Number` keyword using the given ``precision``. See `Should Be Equal As Numbers` for examples on how to use ``precision`` and why it does not always work as expected. See also `Should Be Equal` for an explanation on how to override the default error message with ``msg`` and ``values``. """ self._log_types_at_info_if_different(first, second) first = self._convert_to_number(first, precision) second = self._convert_to_number(second, precision) self._should_not_be_equal(first, second, msg, values) def should_be_equal_as_numbers(self, first, second, msg=None, values=True, precision=6): """Fails if objects are unequal after converting them to real numbers. The conversion is done with `Convert To Number` keyword using the given ``precision``. Examples: | Should Be Equal As Numbers | ${x} | 1.1 | | # Passes if ${x} is 1.1 | | Should Be Equal As Numbers | 1.123 | 1.1 | precision=1 | # Passes | | Should Be Equal As Numbers | 1.123 | 1.4 | precision=0 | # Passes | | Should Be Equal As Numbers | 112.3 | 75 | precision=-2 | # Passes | As discussed in the documentation of `Convert To Number`, machines generally cannot store floating point numbers accurately. Because of this limitation, comparing floats for equality is problematic and a correct approach to use depends on the context. This keyword uses a very naive approach of rounding the numbers before comparing them, which is both prone to rounding errors and does not work very well if numbers are really big or small. For more information about comparing floats, and ideas on how to implement your own context specific comparison algorithm, see http://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/. See `Should Not Be Equal As Numbers` for a negative version of this keyword and `Should Be Equal` for an explanation on how to override the default error message with ``msg`` and ``values``. """ self._log_types_at_info_if_different(first, second) first = self._convert_to_number(first, precision) second = self._convert_to_number(second, precision) self._should_be_equal(first, second, msg, values) def should_not_be_equal_as_strings(self, first, second, msg=None, values=True): """Fails if objects are equal after converting them to strings. See `Should Be Equal` for an explanation on how to override the default error message with ``msg`` and ``values``. """ self._log_types_at_info_if_different(first, second) first, second = [self._convert_to_string(i) for i in first, second] self._should_not_be_equal(first, second, msg, values) def should_be_equal_as_strings(self, first, second, msg=None, values=True): """Fails if objects are unequal after converting them to strings. See `Should Be Equal` for an explanation on how to override the default error message with ``msg`` and ``values``. If both arguments are multiline strings, the comparison is done using `multiline string comparisons`. """ self._log_types_at_info_if_different(first, second) first, second = [self._convert_to_string(i) for i in first, second] self._should_be_equal(first, second, msg, values) def should_not_start_with(self, str1, str2, msg=None, values=True): """Fails if the string ``str1`` starts with the string ``str2``. See `Should Be Equal` for an explanation on how to override the default error message with ``msg`` and ``values``. """ msg = self._get_string_msg(str1, str2, msg, values, 'starts with') asserts.fail_if(str1.startswith(str2), msg) def should_start_with(self, str1, str2, msg=None, values=True): """Fails if the string ``str1`` does not start with the string ``str2``. See `Should Be Equal` for an explanation on how to override the default error message with ``msg`` and ``values``. """ msg = self._get_string_msg(str1, str2, msg, values, 'does not start with') asserts.fail_unless(str1.startswith(str2), msg) def should_not_end_with(self, str1, str2, msg=None, values=True): """Fails if the string ``str1`` ends with the string ``str2``. See `Should Be Equal` for an explanation on how to override the default error message with ``msg`` and ``values``. """ msg = self._get_string_msg(str1, str2, msg, values, 'ends with') asserts.fail_if(str1.endswith(str2), msg) def should_end_with(self, str1, str2, msg=None, values=True): """Fails if the string ``str1`` does not end with the string ``str2``. See `Should Be Equal` for an explanation on how to override the default error message with ``msg`` and ``values``. """ msg = self._get_string_msg(str1, str2, msg, values, 'does not end with') asserts.fail_unless(str1.endswith(str2), msg) def should_not_contain(self, item1, item2, msg=None, values=True): """Fails if ``item1`` contains ``item2`` one or more times. Works with strings, lists, and anything that supports Python's ``in`` operator. See `Should Be Equal` for an explanation on how to override the default error message with ``msg`` and ``values``. Examples: | Should Not Contain | ${output} | FAILED | | Should Not Contain | ${some_list} | value | """ msg = self._get_string_msg(item1, item2, msg, values, 'contains') asserts.fail_if(item2 in item1, msg) def should_contain(self, item1, item2, msg=None, values=True): """Fails if ``item1`` does not contain ``item2`` one or more times. Works with strings, lists, and anything that supports Python's ``in`` operator. See `Should Be Equal` for an explanation on how to override the default error message with ``msg`` and ``values``. Examples: | Should Contain | ${output} | PASS | | Should Contain | ${some_list} | value | """ msg = self._get_string_msg(item1, item2, msg, values, 'does not contain') asserts.fail_unless(item2 in item1, msg) def should_contain_x_times(self, item1, item2, count, msg=None): """Fails if ``item1`` does not contain ``item2`` ``count`` times. Works with strings, lists and all objects that `Get Count` works with. The default error message can be overridden with ``msg`` and the actual count is always logged. Examples: | Should Contain X Times | ${output} | hello | 2 | | Should Contain X Times | ${some list} | value | 3 | """ count = self._convert_to_integer(count) x = self.get_count(item1, item2) if not msg: msg = "'%s' contains '%s' %d time%s, not %d time%s." \ % (unic(item1), unic(item2), x, s(x), count, s(count)) self.should_be_equal_as_integers(x, count, msg, values=False) def get_count(self, item1, item2): """Returns and logs how many times ``item2`` is found from ``item1``. This keyword works with Python strings and lists and all objects that either have ``count`` method or can be converted to Python lists. Example: | ${count} = | Get Count | ${some item} | interesting value | | Should Be True | 5 < ${count} < 10 | """ if not hasattr(item1, 'count'): try: item1 = list(item1) except: raise RuntimeError("Converting '%s' to list failed: %s" % (item1, get_error_message())) count = item1.count(item2) self.log('Item found from the first item %d time%s' % (count, s(count))) return count def should_not_match(self, string, pattern, msg=None, values=True): """Fails if the given ``string`` matches the given ``pattern``. Pattern matching is similar as matching files in a shell, and it is always case-sensitive. In the pattern ``*`` matches to anything and ``?`` matches to any single character. See `Should Be Equal` for an explanation on how to override the default error message with ``msg`` and ``values``. """ msg = self._get_string_msg(string, pattern, msg, values, 'matches') asserts.fail_if(self._matches(string, pattern), msg) def should_match(self, string, pattern, msg=None, values=True): """Fails unless the given ``string`` matches the given ``pattern``. Pattern matching is similar as matching files in a shell, and it is always case-sensitive. In the pattern, ``*`` matches to anything and ``?`` matches to any single character. See `Should Be Equal` for an explanation on how to override the default error message with ``msg`` and ``values``. """ msg = self._get_string_msg(string, pattern, msg, values, 'does not match') asserts.fail_unless(self._matches(string, pattern), msg) def should_match_regexp(self, string, pattern, msg=None, values=True): """Fails if ``string`` does not match ``pattern`` as a regular expression. Regular expression check is implemented using the Python [http://docs.python.org/2/library/re.html|re module]. Python's regular expression syntax is derived from Perl, and it is thus also very similar to the syntax used, for example, in Java, Ruby and .NET. Things to note about the regexp syntax in Robot Framework test data: 1) Backslash is an escape character in the test data, and possible backslashes in the pattern must thus be escaped with another backslash (e.g. ``\\\\d\\\\w+``). 2) Strings that may contain special characters, but should be handled as literal strings, can be escaped with the `Regexp Escape` keyword. 3) The given pattern does not need to match the whole string. For example, the pattern ``ello`` matches the string ``Hello world!``. If a full match is needed, the ``^`` and ``$`` characters can be used to denote the beginning and end of the string, respectively. For example, ``^ello$`` only matches the exact string ``ello``. 4) Possible flags altering how the expression is parsed (e.g. ``re.IGNORECASE``, ``re.MULTILINE``) can be set by prefixing the pattern with the ``(?iLmsux)`` group like ``(?im)pattern``. The available flags are ``i`` (case-insensitive), ``m`` (multiline mode), ``s`` (dotall mode), ``x`` (verbose), ``u`` (Unicode dependent) and ``L`` (locale dependent). If this keyword passes, it returns the portion of the string that matched the pattern. Additionally, the possible captured groups are returned. See the `Should Be Equal` keyword for an explanation on how to override the default error message with the ``msg`` and ``values`` arguments. Examples: | Should Match Regexp | ${output} | \\\\d{6} | # Output contains six numbers | | Should Match Regexp | ${output} | ^\\\\d{6}$ | # Six numbers and nothing more | | ${ret} = | Should Match Regexp | Foo: 42 | (?i)foo: \\\\d+ | | ${match} | ${group1} | ${group2} = | | ... | Should Match Regexp | Bar: 43 | (Foo|Bar): (\\\\d+) | => | ${ret} = 'Foo: 42' | ${match} = 'Bar: 43' | ${group1} = 'Bar' | ${group2} = '43' """ msg = self._get_string_msg(string, pattern, msg, values, 'does not match') res = re.search(pattern, string) asserts.fail_if_none(res, msg, values=False) match = res.group(0) groups = res.groups() if groups: return [match] + list(groups) return match def should_not_match_regexp(self, string, pattern, msg=None, values=True): """Fails if ``string`` matches ``pattern`` as a regular expression. See `Should Match Regexp` for more information about arguments. """ msg = self._get_string_msg(string, pattern, msg, values, 'matches') asserts.fail_unless_none(re.search(pattern, string), msg, values=False) def get_length(self, item): """Returns and logs the length of the given item as an integer. The item can be anything that has a length, for example, a string, a list, or a mapping. The keyword first tries to get the length with the Python function ``len``, which calls the item's ``__len__`` method internally. If that fails, the keyword tries to call the item's possible ``length`` and ``size`` methods directly. The final attempt is trying to get the value of the item's ``length`` attribute. If all these attempts are unsuccessful, the keyword fails. Examples: | ${length} = | Get Length | Hello, world! | | | Should Be Equal As Integers | ${length} | 13 | | @{list} = | Create List | Hello, | world! | | ${length} = | Get Length | ${list} | | | Should Be Equal As Integers | ${length} | 2 | See also `Length Should Be`, `Should Be Empty` and `Should Not Be Empty`. """ length = self._get_length(item) self.log('Length is %d' % length) return length def _get_length(self, item): try: return len(item) except RERAISED_EXCEPTIONS: raise except: try: return item.length() except RERAISED_EXCEPTIONS: raise except: try: return item.size() except RERAISED_EXCEPTIONS: raise except: try: return item.length except RERAISED_EXCEPTIONS: raise except: raise RuntimeError("Could not get length of '%s'." % item) def length_should_be(self, item, length, msg=None): """Verifies that the length of the given item is correct. The length of the item is got using the `Get Length` keyword. The default error message can be overridden with the ``msg`` argument. """ length = self._convert_to_integer(length) actual = self.get_length(item) if actual != length: raise AssertionError(msg or "Length of '%s' should be %d but is %d." % (item, length, actual)) def should_be_empty(self, item, msg=None): """Verifies that the given item is empty. The length of the item is got using the `Get Length` keyword. The default error message can be overridden with the ``msg`` argument. """ if self.get_length(item) > 0: raise AssertionError(msg or "'%s' should be empty." % item) def should_not_be_empty(self, item, msg=None): """Verifies that the given item is not empty. The length of the item is got using the `Get Length` keyword. The default error message can be overridden with the ``msg`` argument. """ if self.get_length(item) == 0: raise AssertionError(msg or "'%s' should not be empty." % item) def _get_string_msg(self, str1, str2, msg, values, delim): default = "'%s' %s '%s'" % (unic(str1), delim, unic(str2)) if not msg: msg = default elif self._include_values(values): msg = '%s: %s' % (msg, default) return msg class _Variables(_BuiltInBase): def get_variables(self, no_decoration=False): """Returns a dictionary containing all variables in the current scope. Variables are returned as a special dictionary that allows accessing variables in space, case, and underscore insensitive manner similarly as accessing variables in the test data. This dictionary supports all same operations as normal Python dictionaries and, for example, Collections library can be used to access or modify it. Modifying the returned dictionary has no effect on the variables available in the current scope. By default variables are returned with ``${}``, ``@{}`` or ``&{}`` decoration based on variable types. Giving a true value (see `Boolean arguments`) to the optional argument ``no_decoration`` will return the variables without the decoration. This option is new in Robot Framework 2.9. Example: | ${example_variable} = | Set Variable | example value | | ${variables} = | Get Variables | | | Dictionary Should Contain Key | ${variables} | \\${example_variable} | | Dictionary Should Contain Key | ${variables} | \\${ExampleVariable} | | Set To Dictionary | ${variables} | \\${name} | value | | Variable Should Not Exist | \\${name} | | | | ${no decoration} = | Get Variables | no_decoration=Yes | | Dictionary Should Contain Key | ${no decoration} | example_variable | Note: Prior to Robot Framework 2.7.4 variables were returned as a custom object that did not support all dictionary methods. """ return self._variables.as_dict(decoration=is_falsy(no_decoration)) @run_keyword_variant(resolve=0) def get_variable_value(self, name, default=None): """Returns variable value or ``default`` if the variable does not exist. The name of the variable can be given either as a normal variable name (e.g. ``${NAME}``) or in escaped format (e.g. ``\\${NAME}``). Notice that the former has some limitations explained in `Set Suite Variable`. Examples: | ${x} = | Get Variable Value | ${a} | default | | ${y} = | Get Variable Value | ${a} | ${b} | | ${z} = | Get Variable Value | ${z} | | => | ${x} gets value of ${a} if ${a} exists and string 'default' otherwise | ${y} gets value of ${a} if ${a} exists and value of ${b} otherwise | ${z} is set to Python None if it does not exist previously See `Set Variable If` for another keyword to set variables dynamically. """ try: return self._variables[self._get_var_name(name)] except DataError: return self._variables.replace_scalar(default) def log_variables(self, level='INFO'): """Logs all variables in the current scope with given log level.""" variables = self.get_variables() for name in sorted(variables, key=lambda s: s[2:-1].lower()): msg = format_assign_message(name, variables[name], cut_long=False) self.log(msg, level) @run_keyword_variant(resolve=0) def variable_should_exist(self, name, msg=None): """Fails unless the given variable exists within the current scope. The name of the variable can be given either as a normal variable name (e.g. ``${NAME}``) or in escaped format (e.g. ``\\${NAME}``). Notice that the former has some limitations explained in `Set Suite Variable`. The default error message can be overridden with the ``msg`` argument. See also `Variable Should Not Exist` and `Keyword Should Exist`. """ name = self._get_var_name(name) msg = self._variables.replace_string(msg) if msg \ else "Variable %s does not exist." % name try: self._variables[name] except DataError: raise AssertionError(msg) @run_keyword_variant(resolve=0) def variable_should_not_exist(self, name, msg=None): """Fails if the given variable exists within the current scope. The name of the variable can be given either as a normal variable name (e.g. ``${NAME}``) or in escaped format (e.g. ``\\${NAME}``). Notice that the former has some limitations explained in `Set Suite Variable`. The default error message can be overridden with the ``msg`` argument. See also `Variable Should Exist` and `Keyword Should Exist`. """ name = self._get_var_name(name) msg = self._variables.replace_string(msg) if msg \ else "Variable %s exists." % name try: self._variables[name] except DataError: pass else: raise AssertionError(msg) def replace_variables(self, text): """Replaces variables in the given text with their current values. If the text contains undefined variables, this keyword fails. If the given ``text`` contains only a single variable, its value is returned as-is and it can be any object. Otherwise this keyword always returns a string. Example: The file ``template.txt`` contains ``Hello ${NAME}!`` and variable ``${NAME}`` has the value ``Robot``. | ${template} = | Get File | ${CURDIR}/template.txt | | ${message} = | Replace Variables | ${template} | | Should Be Equal | ${message} | Hello Robot! | """ return self._variables.replace_scalar(text) def set_variable(self, *values): """Returns the given values which can then be assigned to a variables. This keyword is mainly used for setting scalar variables. Additionally it can be used for converting a scalar variable containing a list to a list variable or to multiple scalar variables. It is recommended to use `Create List` when creating new lists. Examples: | ${hi} = | Set Variable | Hello, world! | | ${hi2} = | Set Variable | I said: ${hi} | | ${var1} | ${var2} = | Set Variable | Hello | world | | @{list} = | Set Variable | ${list with some items} | | ${item1} | ${item2} = | Set Variable | ${list with 2 items} | Variables created with this keyword are available only in the scope where they are created. See `Set Global Variable`, `Set Test Variable` and `Set Suite Variable` for information on how to set variables so that they are available also in a larger scope. """ if len(values) == 0: return '' elif len(values) == 1: return values[0] else: return list(values) @run_keyword_variant(resolve=0) def set_test_variable(self, name, *values): """Makes a variable available everywhere within the scope of the current test. Variables set with this keyword are available everywhere within the scope of the currently executed test case. For example, if you set a variable in a user keyword, it is available both in the test case level and also in all other user keywords used in the current test. Other test cases will not see variables set with this keyword. See `Set Suite Variable` for more information and examples. """ name = self._get_var_name(name) value = self._get_var_value(name, values) self._variables.set_test(name, value) self._log_set_variable(name, value) @run_keyword_variant(resolve=0) def set_suite_variable(self, name, *values): """Makes a variable available everywhere within the scope of the current suite. Variables set with this keyword are available everywhere within the scope of the currently executed test suite. Setting variables with this keyword thus has the same effect as creating them using the Variable table in the test data file or importing them from variable files. Possible child test suites do not see variables set with this keyword by default. Starting from Robot Framework 2.9, that can be controlled by using ``children=<option>`` as the last argument. If the specified ``<option>`` is a non-empty string or any other value considered true in Python, the variable is set also to the child suites. Parent and sibling suites will never see variables set with this keyword. The name of the variable can be given either as a normal variable name (e.g. ``${NAME}``) or in escaped format as ``\\${NAME}`` or ``$NAME``. Variable value can be given using the same syntax as when variables are created in the Variable table. If a variable already exists within the new scope, its value will be overwritten. Otherwise a new variable is created. If a variable already exists within the current scope, the value can be left empty and the variable within the new scope gets the value within the current scope. Examples: | Set Suite Variable | ${SCALAR} | Hello, world! | | Set Suite Variable | ${SCALAR} | Hello, world! | children=true | | Set Suite Variable | @{LIST} | First item | Second item | | Set Suite Variable | &{DICT} | key=value | foo=bar | | ${ID} = | Get ID | | Set Suite Variable | ${ID} | To override an existing value with an empty value, use built-in variables ``${EMPTY}``, ``@{EMPTY}`` or ``&{EMPTY}``: | Set Suite Variable | ${SCALAR} | ${EMPTY} | | Set Suite Variable | @{LIST} | @{EMPTY} | # New in RF 2.7.4 | | Set Suite Variable | &{DICT} | &{EMPTY} | # New in RF 2.9 | *NOTE:* If the variable has value which itself is a variable (escaped or not), you must always use the escaped format to set the variable: Example: | ${NAME} = | Set Variable | \\${var} | | Set Suite Variable | ${NAME} | value | # Sets variable ${var} | | Set Suite Variable | \\${NAME} | value | # Sets variable ${NAME} | This limitation applies also to `Set Test Variable`, `Set Global Variable`, `Variable Should Exist`, `Variable Should Not Exist` and `Get Variable Value` keywords. """ name = self._get_var_name(name) if (values and is_string(values[-1]) and values[-1].startswith('children=')): children = self._variables.replace_scalar(values[-1][9:]) children = is_truthy(children) values = values[:-1] else: children = False value = self._get_var_value(name, values) self._variables.set_suite(name, value, children=children) self._log_set_variable(name, value) @run_keyword_variant(resolve=0) def set_global_variable(self, name, *values): """Makes a variable available globally in all tests and suites. Variables set with this keyword are globally available in all test cases and suites executed after setting them. Setting variables with this keyword thus has the same effect as creating from the command line using the options ``--variable`` or ``--variablefile``. Because this keyword can change variables everywhere, it should be used with care. See `Set Suite Variable` for more information and examples. """ name = self._get_var_name(name) value = self._get_var_value(name, values) self._variables.set_global(name, value) self._log_set_variable(name, value) # Helpers def _get_var_name(self, orig): name = self._resolve_possible_variable(orig) try: return self._unescape_variable_if_needed(name) except ValueError: raise RuntimeError("Invalid variable syntax '%s'." % orig) def _resolve_possible_variable(self, name): try: resolved = self._variables.replace_string(name) return self._unescape_variable_if_needed(resolved) except (KeyError, ValueError, DataError): return name def _unescape_variable_if_needed(self, name): if name.startswith('\\'): name = name[1:] if len(name) < 2: raise ValueError if name[0] in '$@&' and name[1] != '{': name = '%s{%s}' % (name[0], name[1:]) if is_var(name): return name # Support for possible internal variables (issue 397) name = '%s{%s}' % (name[0], self.replace_variables(name[2:-1])) if is_var(name): return name raise ValueError def _get_var_value(self, name, values): if not values: return self._variables[name] # TODO: In RF 2.10/3.0 the if branch below can be removed and # VariableTableValue used with all variables. See issue #1919. if name[0] == '$': if len(values) != 1 or VariableSplitter(values[0]).is_list_variable(): raise DataError("Setting list value to scalar variable '%s' " "is not supported anymore. Create list " "variable '@%s' instead." % (name, name[1:])) return self._variables.replace_scalar(values[0]) return VariableTableValue(values, name).resolve(self._variables) def _log_set_variable(self, name, value): self.log(format_assign_message(name, value)) class _RunKeyword(_BuiltInBase): # If you use any of these run keyword variants from another library, you # should register those keywords with 'register_run_keyword' method. See # the documentation of that method at the end of this file. There are also # other run keyword variant keywords in BuiltIn which can also be seen # at the end of this file. def run_keyword(self, name, *args): """Executes the given keyword with the given arguments. Because the name of the keyword to execute is given as an argument, it can be a variable and thus set dynamically, e.g. from a return value of another keyword or from the command line. """ if not is_string(name): raise RuntimeError('Keyword name must be a string.') kw = Keyword(name, args=args) return kw.run(self._context) def run_keywords(self, *keywords): """Executes all the given keywords in a sequence. This keyword is mainly useful in setups and teardowns when they need to take care of multiple actions and creating a new higher level user keyword would be an overkill. By default all arguments are expected to be keywords to be executed. Examples: | Run Keywords | Initialize database | Start servers | Clear logs | | Run Keywords | ${KW 1} | ${KW 2} | | Run Keywords | @{KEYWORDS} | Starting from Robot Framework 2.7.6, keywords can also be run with arguments using upper case ``AND`` as a separator between keywords. The keywords are executed so that the first argument is the first keyword and proceeding arguments until the first ``AND`` are arguments to it. First argument after the first ``AND`` is the second keyword and proceeding arguments until the next ``AND`` are its arguments. And so on. Examples: | Run Keywords | Initialize database | db1 | AND | Start servers | server1 | server2 | | Run Keywords | Initialize database | ${DB NAME} | AND | Start servers | @{SERVERS} | AND | Clear logs | | Run Keywords | ${KW} | AND | @{KW WITH ARGS} | Notice that the ``AND`` control argument must be used explicitly and cannot itself come from a variable. If you need to use literal ``AND`` string as argument, you can either use variables or escape it with a backslash like ``\\AND``. """ self._run_keywords(self._split_run_keywords(list(keywords))) def _run_keywords(self, iterable): errors = [] for kw, args in iterable: try: self.run_keyword(kw, *args) except ExecutionPassed as err: err.set_earlier_failures(errors) raise err except ExecutionFailed as err: errors.extend(err.get_errors()) if not err.can_continue(self._context.in_teardown): break if errors: raise ExecutionFailures(errors) def _split_run_keywords(self, keywords): if 'AND' not in keywords: for name in self._variables.replace_list(keywords): yield name, () else: for name, args in self._split_run_keywords_from_and(keywords): yield name, args def _split_run_keywords_from_and(self, keywords): while 'AND' in keywords: index = keywords.index('AND') yield self._resolve_run_keywords_name_and_args(keywords[:index]) keywords = keywords[index+1:] yield self._resolve_run_keywords_name_and_args(keywords) def _resolve_run_keywords_name_and_args(self, kw_call): kw_call = self._variables.replace_list(kw_call, replace_until=1) if not kw_call: raise DataError('Incorrect use of AND') return kw_call[0], kw_call[1:] def run_keyword_if(self, condition, name, *args): """Runs the given keyword with the given arguments, if ``condition`` is true. The given ``condition`` is evaluated in Python as explained in `Evaluating expressions`, and ``name`` and ``*args`` have same semantics as with `Run Keyword`. Example, a simple if/else construct: | ${status} | ${value} = | `Run Keyword And Ignore Error` | `My Keyword` | | `Run Keyword If` | '${status}' == 'PASS' | `Some Action` | arg | | `Run Keyword Unless` | '${status}' == 'PASS' | `Another Action` | In this example, only either `Some Action` or `Another Action` is executed, based on the status of `My Keyword`. Instead of `Run Keyword And Ignore Error` you can also use `Run Keyword And Return Status`. Variables used like ``${variable}``, as in the examples above, are replaced in the expression before evaluation. Variables are also available in the evaluation namespace and can be accessed using special syntax ``$variable``. This is a new feature in Robot Framework 2.9 and it is explained more thoroughly in `Evaluating expressions`. Example: | `Run Keyword If` | $result is None or $result == 'FAIL' | `Keyword` | Starting from Robot version 2.7.4, this keyword supports also optional ELSE and ELSE IF branches. Both of these are defined in ``*args`` and must use exactly format ``ELSE`` or ``ELSE IF``, respectively. ELSE branches must contain first the name of the keyword to execute and then its possible arguments. ELSE IF branches must first contain a condition, like the first argument to this keyword, and then the keyword to execute and its possible arguments. It is possible to have ELSE branch after ELSE IF and to have multiple ELSE IF branches. Given previous example, if/else construct can also be created like this: | ${status} | ${value} = | `Run Keyword And Ignore Error` | My Keyword | | `Run Keyword If` | '${status}' == 'PASS' | `Some Action` | arg | ELSE | `Another Action` | The return value is the one of the keyword that was executed or None if no keyword was executed (i.e. if ``condition`` was false). Hence, it is recommended to use ELSE and/or ELSE IF branches to conditionally assign return values from keyword to variables (to conditionally assign fixed values to variables, see `Set Variable If`). This is illustrated by the example below: | ${var1} = | `Run Keyword If` | ${rc} == 0 | `Some keyword returning a value` | | ... | ELSE IF | 0 < ${rc} < 42 | `Another keyword` | | ... | ELSE IF | ${rc} < 0 | `Another keyword with args` | ${rc} | arg2 | | ... | ELSE | `Final keyword to handle abnormal cases` | ${rc} | | ${var2} = | `Run Keyword If` | ${condition} | `Some keyword` | In this example, ${var2} will be set to None if ${condition} is false. Notice that ``ELSE`` and ``ELSE IF`` control words must be used explicitly and thus cannot come from variables. If you need to use literal ``ELSE`` and ``ELSE IF`` strings as arguments, you can escape them with a backslash like ``\\ELSE`` and ``\\ELSE IF``. Starting from Robot Framework 2.8, Python's [http://docs.python.org/2/library/os.html|os] and [http://docs.python.org/2/library/sys.html|sys] modules are automatically imported when evaluating the ``condition``. Attributes they contain can thus be used in the condition: | `Run Keyword If` | os.sep == '/' | `Unix Keyword` | | ... | ELSE IF | sys.platform.startswith('java') | `Jython Keyword` | | ... | ELSE | `Windows Keyword` | """ args, branch = self._split_elif_or_else_branch(args) if self._is_true(condition): return self.run_keyword(name, *args) return branch() def _split_elif_or_else_branch(self, args): if 'ELSE IF' in args: args, branch = self._split_branch(args, 'ELSE IF', 2, 'condition and keyword') return args, lambda: self.run_keyword_if(*branch) if 'ELSE' in args: args, branch = self._split_branch(args, 'ELSE', 1, 'keyword') return args, lambda: self.run_keyword(*branch) return args, lambda: None def _split_branch(self, args, control_word, required, required_error): index = list(args).index(control_word) branch = self._variables.replace_list(args[index+1:], required) if len(branch) < required: raise DataError('%s requires %s.' % (control_word, required_error)) return args[:index], branch def run_keyword_unless(self, condition, name, *args): """Runs the given keyword with the given arguments, if ``condition`` is false. See `Run Keyword If` for more information and an example. """ if not self._is_true(condition): return self.run_keyword(name, *args) def run_keyword_and_ignore_error(self, name, *args): """Runs the given keyword with the given arguments and ignores possible error. This keyword returns two values, so that the first is either string ``PASS`` or ``FAIL``, depending on the status of the executed keyword. The second value is either the return value of the keyword or the received error message. See `Run Keyword And Return Status` If you are only interested in the execution status. The keyword name and arguments work as in `Run Keyword`. See `Run Keyword If` for a usage example. Errors caused by invalid syntax, timeouts, or fatal exceptions are not caught by this keyword. Otherwise this keyword itself never fails. Since Robot Framework 2.9, variable errors are caught by this keyword. """ try: return 'PASS', self.run_keyword(name, *args) except ExecutionFailed as err: if err.dont_continue: raise return 'FAIL', unicode(err) def run_keyword_and_return_status(self, name, *args): """Runs the given keyword with given arguments and returns the status as a Boolean value. This keyword returns Boolean ``True`` if the keyword that is executed succeeds and ``False`` if it fails. This is useful, for example, in combination with `Run Keyword If`. If you are interested in the error message or return value, use `Run Keyword And Ignore Error` instead. The keyword name and arguments work as in `Run Keyword`. Example: | ${passed} = | `Run Keyword And Return Status` | Keyword | args | | `Run Keyword If` | ${passed} | Another keyword | Errors caused by invalid syntax, timeouts, or fatal exceptions are not caught by this keyword. Otherwise this keyword itself never fails. New in Robot Framework 2.7.6. """ status, _ = self.run_keyword_and_ignore_error(name, *args) return status == 'PASS' def run_keyword_and_continue_on_failure(self, name, *args): """Runs the keyword and continues execution even if a failure occurs. The keyword name and arguments work as with `Run Keyword`. Example: | Run Keyword And Continue On Failure | Fail | This is a stupid example | | Log | This keyword is executed | The execution is not continued if the failure is caused by invalid syntax, timeout, or fatal exception. Since Robot Framework 2.9, variable errors are caught by this keyword. """ try: return self.run_keyword(name, *args) except ExecutionFailed as err: if not err.dont_continue: err.continue_on_failure = True raise err def run_keyword_and_expect_error(self, expected_error, name, *args): """Runs the keyword and checks that the expected error occurred. The expected error must be given in the same format as in Robot Framework reports. It can be a pattern containing characters ``?``, which matches to any single character and ``*``, which matches to any number of any characters. ``name`` and ``*args`` have same semantics as with `Run Keyword`. If the expected error occurs, the error message is returned and it can be further processed/tested, if needed. If there is no error, or the error does not match the expected error, this keyword fails. Examples: | Run Keyword And Expect Error | My error | Some Keyword | arg1 | arg2 | | ${msg} = | Run Keyword And Expect Error | * | My KW | | Should Start With | ${msg} | Once upon a time in | Errors caused by invalid syntax, timeouts, or fatal exceptions are not caught by this keyword. Since Robot Framework 2.9, variable errors are caught by this keyword. """ try: self.run_keyword(name, *args) except ExecutionFailed as err: if err.dont_continue: raise else: raise AssertionError("Expected error '%s' did not occur." % expected_error) if not self._matches(unicode(err), expected_error): raise AssertionError("Expected error '%s' but got '%s'." % (expected_error, err)) return unicode(err) def repeat_keyword(self, times, name, *args): """Executes the specified keyword multiple times. ``name`` and ``args`` define the keyword that is executed similarly as with `Run Keyword`, and ``times`` specifies how many the keyword should be executed. ``times`` can be given as an integer or as a string that can be converted to an integer. If it is a string, it can have postfix ``times`` or ``x`` (case and space insensitive) to make the expression more explicit. If ``times`` is zero or negative, the keyword is not executed at all. This keyword fails immediately if any of the execution rounds fails. Examples: | Repeat Keyword | 5 times | Go to Previous Page | | Repeat Keyword | ${var} | Some Keyword | arg1 | arg2 | """ times = self._get_times_to_repeat(times) self._run_keywords(self._yield_repeated_keywords(times, name, args)) def _get_times_to_repeat(self, times, require_postfix=False): times = normalize(str(times)) if times.endswith('times'): times = times[:-5] elif times.endswith('x'): times = times[:-1] elif require_postfix: raise ValueError return self._convert_to_integer(times) def _yield_repeated_keywords(self, times, name, args): if times <= 0: self.log("Keyword '%s' repeated zero times." % name) for i in xrange(times): self.log("Repeating keyword, round %d/%d." % (i+1, times)) yield name, args def wait_until_keyword_succeeds(self, retry, retry_interval, name, *args): """Runs the specified keyword and retries if it fails. ``name`` and ``args`` define the keyword that is executed similarly as with `Run Keyword`. How long to retry running the keyword is defined using ``retry`` argument either as timeout or count. ``retry_interval`` is the time to wait before trying to run the keyword again after the previous run has failed. If ``retry`` is given as timeout, it must be in Robot Framework's time format (e.g. ``1 minute``, ``2 min 3 s``, ``4.5``) that is explained in an appendix of Robot Framework User Guide. If it is given as count, it must have ``times`` or ``x`` postfix (e.g. ``5 times``, ``10 x``). ``retry_interval`` must always be given in Robot Framework's time format. If the keyword does not succeed regardless of retries, this keyword fails. If the executed keyword passes, its return value is returned. Examples: | Wait Until Keyword Succeeds | 2 min | 5 sec | My keyword | argument | | ${result} = | Wait Until Keyword Succeeds | 3x | 200ms | My keyword | All normal failures are caught by this keyword. Errors caused by invalid syntax, test or keyword timeouts, or fatal exceptions (caused e.g. by `Fatal Error`) are not caught. Running the same keyword multiple times inside this keyword can create lots of output and considerably increase the size of the generated output files. Starting from Robot Framework 2.7, it is possible to remove unnecessary keywords from the outputs using ``--RemoveKeywords WUKS`` command line option. Support for specifying ``retry`` as a number of times to retry is a new feature in Robot Framework 2.9. Since Robot Framework 2.9, variable errors are caught by this keyword. """ maxtime = count = -1 try: count = self._get_times_to_repeat(retry, require_postfix=True) except ValueError: timeout = timestr_to_secs(retry) maxtime = time.time() + timeout message = 'for %s' % secs_to_timestr(timeout) else: if count <= 0: raise ValueError('Retry count %d is not positive.' % count) message = '%d time%s' % (count, s(count)) retry_interval = timestr_to_secs(retry_interval) while True: try: return self.run_keyword(name, *args) except ExecutionFailed as err: if err.dont_continue: raise count -= 1 if time.time() > maxtime > 0 or count == 0: raise AssertionError("Keyword '%s' failed after retrying " "%s. The last error was: %s" % (name, message, err)) self._sleep_in_parts(retry_interval) def set_variable_if(self, condition, *values): """Sets variable based on the given condition. The basic usage is giving a condition and two values. The given condition is first evaluated the same way as with the `Should Be True` keyword. If the condition is true, then the first value is returned, and otherwise the second value is returned. The second value can also be omitted, in which case it has a default value None. This usage is illustrated in the examples below, where ``${rc}`` is assumed to be zero. | ${var1} = | Set Variable If | ${rc} == 0 | zero | nonzero | | ${var2} = | Set Variable If | ${rc} > 0 | value1 | value2 | | ${var3} = | Set Variable If | ${rc} > 0 | whatever | | => | ${var1} = 'zero' | ${var2} = 'value2' | ${var3} = None It is also possible to have 'else if' support by replacing the second value with another condition, and having two new values after it. If the first condition is not true, the second is evaluated and one of the values after it is returned based on its truth value. This can be continued by adding more conditions without a limit. | ${var} = | Set Variable If | ${rc} == 0 | zero | | ... | ${rc} > 0 | greater than zero | less then zero | | | | ${var} = | Set Variable If | | ... | ${rc} == 0 | zero | | ... | ${rc} == 1 | one | | ... | ${rc} == 2 | two | | ... | ${rc} > 2 | greater than two | | ... | ${rc} < 0 | less than zero | Use `Get Variable Value` if you need to set variables dynamically based on whether a variable exist or not. """ values = self._verify_values_for_set_variable_if(list(values)) if self._is_true(condition): return self._variables.replace_scalar(values[0]) values = self._verify_values_for_set_variable_if(values[1:], True) if len(values) == 1: return self._variables.replace_scalar(values[0]) return self.run_keyword('BuiltIn.Set Variable If', *values[0:]) def _verify_values_for_set_variable_if(self, values, default=False): if not values: if default: return [None] raise RuntimeError('At least one value is required') if is_list_var(values[0]): values[:1] = [escape(item) for item in self._variables[values[0]]] return self._verify_values_for_set_variable_if(values) return values def run_keyword_if_test_failed(self, name, *args): """Runs the given keyword with the given arguments, if the test failed. This keyword can only be used in a test teardown. Trying to use it anywhere else results in an error. Otherwise, this keyword works exactly like `Run Keyword`, see its documentation for more details. Prior to Robot Framework 2.9 failures in test teardown itself were not detected by this keyword. """ test = self._get_test_in_teardown('Run Keyword If Test Failed') if not test.passed: return self.run_keyword(name, *args) def run_keyword_if_test_passed(self, name, *args): """Runs the given keyword with the given arguments, if the test passed. This keyword can only be used in a test teardown. Trying to use it anywhere else results in an error. Otherwise, this keyword works exactly like `Run Keyword`, see its documentation for more details. Prior to Robot Framework 2.9 failures in test teardown itself were not detected by this keyword. """ test = self._get_test_in_teardown('Run Keyword If Test Passed') if test.passed: return self.run_keyword(name, *args) def run_keyword_if_timeout_occurred(self, name, *args): """Runs the given keyword if either a test or a keyword timeout has occurred. This keyword can only be used in a test teardown. Trying to use it anywhere else results in an error. Otherwise, this keyword works exactly like `Run Keyword`, see its documentation for more details. """ self._get_test_in_teardown('Run Keyword If Timeout Occurred') if self._context.timeout_occurred: return self.run_keyword(name, *args) def _get_test_in_teardown(self, kwname): ctx = self._context if ctx.test and ctx.in_test_teardown: return ctx.test raise RuntimeError("Keyword '%s' can only be used in test teardown." % kwname) def run_keyword_if_all_critical_tests_passed(self, name, *args): """Runs the given keyword with the given arguments, if all critical tests passed. This keyword can only be used in suite teardown. Trying to use it in any other place will result in an error. Otherwise, this keyword works exactly like `Run Keyword`, see its documentation for more details. """ suite = self._get_suite_in_teardown('Run Keyword If ' 'All Critical Tests Passed') if suite.statistics.critical.failed == 0: return self.run_keyword(name, *args) def run_keyword_if_any_critical_tests_failed(self, name, *args): """Runs the given keyword with the given arguments, if any critical tests failed. This keyword can only be used in a suite teardown. Trying to use it anywhere else results in an error. Otherwise, this keyword works exactly like `Run Keyword`, see its documentation for more details. """ suite = self._get_suite_in_teardown('Run Keyword If ' 'Any Critical Tests Failed') if suite.statistics.critical.failed > 0: return self.run_keyword(name, *args) def run_keyword_if_all_tests_passed(self, name, *args): """Runs the given keyword with the given arguments, if all tests passed. This keyword can only be used in a suite teardown. Trying to use it anywhere else results in an error. Otherwise, this keyword works exactly like `Run Keyword`, see its documentation for more details. """ suite = self._get_suite_in_teardown('Run Keyword If All Tests Passed') if suite.statistics.all.failed == 0: return self.run_keyword(name, *args) def run_keyword_if_any_tests_failed(self, name, *args): """Runs the given keyword with the given arguments, if one or more tests failed. This keyword can only be used in a suite teardown. Trying to use it anywhere else results in an error. Otherwise, this keyword works exactly like `Run Keyword`, see its documentation for more details. """ suite = self._get_suite_in_teardown('Run Keyword If Any Tests Failed') if suite.statistics.all.failed > 0: return self.run_keyword(name, *args) def _get_suite_in_teardown(self, kwname): if not self._context.in_suite_teardown: raise RuntimeError("Keyword '%s' can only be used in suite teardown." % kwname) return self._context.suite class _Control(_BuiltInBase): def continue_for_loop(self): """Skips the current for loop iteration and continues from the next. Skips the remaining keywords in the current for loop iteration and continues from the next one. Can be used directly in a for loop or in a keyword that the loop uses. Example: | :FOR | ${var} | IN | @{VALUES} | | | Run Keyword If | '${var}' == 'CONTINUE' | Continue For Loop | | | Do Something | ${var} | See `Continue For Loop If` to conditionally continue a for loop without using `Run Keyword If` or other wrapper keywords. New in Robot Framework 2.8. """ self.log("Continuing for loop from the next iteration.") raise ContinueForLoop() def continue_for_loop_if(self, condition): """Skips the current for loop iteration if the ``condition`` is true. A wrapper for `Continue For Loop` to continue a for loop based on the given condition. The condition is evaluated using the same semantics as with `Should Be True` keyword. Example: | :FOR | ${var} | IN | @{VALUES} | | | Continue For Loop If | '${var}' == 'CONTINUE' | | | Do Something | ${var} | New in Robot Framework 2.8. """ if self._is_true(condition): self.continue_for_loop() def exit_for_loop(self): """Stops executing the enclosing for loop. Exits the enclosing for loop and continues execution after it. Can be used directly in a for loop or in a keyword that the loop uses. Example: | :FOR | ${var} | IN | @{VALUES} | | | Run Keyword If | '${var}' == 'EXIT' | Exit For Loop | | | Do Something | ${var} | See `Exit For Loop If` to conditionally exit a for loop without using `Run Keyword If` or other wrapper keywords. """ self.log("Exiting for loop altogether.") raise ExitForLoop() def exit_for_loop_if(self, condition): """Stops executing the enclosing for loop if the ``condition`` is true. A wrapper for `Exit For Loop` to exit a for loop based on the given condition. The condition is evaluated using the same semantics as with `Should Be True` keyword. Example: | :FOR | ${var} | IN | @{VALUES} | | | Exit For Loop If | '${var}' == 'EXIT' | | | Do Something | ${var} | New in Robot Framework 2.8. """ if self._is_true(condition): self.exit_for_loop() @run_keyword_variant(resolve=0) def return_from_keyword(self, *return_values): """Returns from the enclosing user keyword. This keyword can be used to return from a user keyword with PASS status without executing it fully. It is also possible to return values similarly as with the ``[Return]`` setting. For more detailed information about working with the return values, see the User Guide. This keyword is typically wrapped to some other keyword, such as `Run Keyword If` or `Run Keyword If Test Passed`, to return based on a condition: | Run Keyword If | ${rc} < 0 | Return From Keyword | | Run Keyword If Test Passed | Return From Keyword | It is possible to use this keyword to return from a keyword also inside a for loop. That, as well as returning values, is demonstrated by the `Find Index` keyword in the following somewhat advanced example. Notice that it is often a good idea to move this kind of complicated logic into a test library. | ***** Variables ***** | @{LIST} = foo baz | | ***** Test Cases ***** | Example | ${index} = Find Index baz @{LIST} | Should Be Equal ${index} ${1} | ${index} = Find Index non existing @{LIST} | Should Be Equal ${index} ${-1} | | ***** Keywords ***** | Find Index | [Arguments] ${element} @{items} | ${index} = Set Variable ${0} | :FOR ${item} IN @{items} | \\ Run Keyword If '${item}' == '${element}' Return From Keyword ${index} | \\ ${index} = Set Variable ${index + 1} | Return From Keyword ${-1} # Also [Return] would work here. The most common use case, returning based on an expression, can be accomplished directly with `Return From Keyword If`. Both of these keywords are new in Robot Framework 2.8. See also `Run Keyword And Return` and `Run Keyword And Return If`. """ self.log('Returning from the enclosing user keyword.') raise ReturnFromKeyword(return_values) @run_keyword_variant(resolve=1) def return_from_keyword_if(self, condition, *return_values): """Returns from the enclosing user keyword if ``condition`` is true. A wrapper for `Return From Keyword` to return based on the given condition. The condition is evaluated using the same semantics as with `Should Be True` keyword. Given the same example as in `Return From Keyword`, we can rewrite the `Find Index` keyword as follows: | ***** Keywords ***** | Find Index | [Arguments] ${element} @{items} | ${index} = Set Variable ${0} | :FOR ${item} IN @{items} | \\ Return From Keyword If '${item}' == '${element}' ${index} | \\ ${index} = Set Variable ${index + 1} | Return From Keyword ${-1} # Also [Return] would work here. See also `Run Keyword And Return` and `Run Keyword And Return If`. New in Robot Framework 2.8. """ if self._is_true(condition): self.return_from_keyword(*return_values) @run_keyword_variant(resolve=1) def run_keyword_and_return(self, name, *args): """Runs the specified keyword and returns from the enclosing user keyword. The keyword to execute is defined with ``name`` and ``*args`` exactly like with `Run Keyword`. After running the keyword, returns from the enclosing user keyword and passes possible return value from the executed keyword further. Returning from a keyword has exactly same semantics as with `Return From Keyword`. Example: | `Run Keyword And Return` | `My Keyword` | arg1 | arg2 | | # Above is equivalent to: | | ${result} = | `My Keyword` | arg1 | arg2 | | `Return From Keyword` | ${result} | | | Use `Run Keyword And Return If` if you want to run keyword and return based on a condition. New in Robot Framework 2.8.2. """ ret = self.run_keyword(name, *args) self.return_from_keyword(escape(ret)) @run_keyword_variant(resolve=2) def run_keyword_and_return_if(self, condition, name, *args): """Runs the specified keyword and returns from the enclosing user keyword. A wrapper for `Run Keyword And Return` to run and return based on the given ``condition``. The condition is evaluated using the same semantics as with `Should Be True` keyword. Example: | `Run Keyword And Return If` | ${rc} > 0 | `My Keyword` | arg1 | arg2 | | # Above is equivalent to: | | `Run Keyword If` | ${rc} > 0 | `Run Keyword And Return` | `My Keyword ` | arg1 | arg2 | Use `Return From Keyword If` if you want to return a certain value based on a condition. New in Robot Framework 2.8.2. """ if self._is_true(condition): self.run_keyword_and_return(name, *args) def pass_execution(self, message, *tags): """Skips rest of the current test, setup, or teardown with PASS status. This keyword can be used anywhere in the test data, but the place where used affects the behavior: - When used in any setup or teardown (suite, test or keyword), passes that setup or teardown. Possible keyword teardowns of the started keywords are executed. Does not affect execution or statuses otherwise. - When used in a test outside setup or teardown, passes that particular test case. Possible test and keyword teardowns are executed. Possible continuable failures before this keyword is used, as well as failures in executed teardowns, will fail the execution. It is mandatory to give a message explaining why execution was passed. By default the message is considered plain text, but starting it with ``*HTML*`` allows using HTML formatting. It is also possible to modify test tags passing tags after the message similarly as with `Fail` keyword. Tags starting with a hyphen (e.g. ``-regression``) are removed and others added. Tags are modified using `Set Tags` and `Remove Tags` internally, and the semantics setting and removing them are the same as with these keywords. Examples: | Pass Execution | All features available in this version tested. | | Pass Execution | Deprecated test. | deprecated | -regression | This keyword is typically wrapped to some other keyword, such as `Run Keyword If`, to pass based on a condition. The most common case can be handled also with `Pass Execution If`: | Run Keyword If | ${rc} < 0 | Pass Execution | Negative values are cool. | | Pass Execution If | ${rc} < 0 | Negative values are cool. | Passing execution in the middle of a test, setup or teardown should be used with care. In the worst case it leads to tests that skip all the parts that could actually uncover problems in the tested application. In cases where execution cannot continue do to external factors, it is often safer to fail the test case and make it non-critical. New in Robot Framework 2.8. """ message = message.strip() if not message: raise RuntimeError('Message cannot be empty.') self._set_and_remove_tags(tags) log_message, level = self._get_logged_test_message_and_level(message) self.log('Execution passed with message:\n%s' % log_message, level) raise PassExecution(message) @run_keyword_variant(resolve=1) def pass_execution_if(self, condition, message, *tags): """Conditionally skips rest of the current test, setup, or teardown with PASS status. A wrapper for `Pass Execution` to skip rest of the current test, setup or teardown based the given ``condition``. The condition is evaluated similarly as with `Should Be True` keyword, and ``message`` and ``*tags`` have same semantics as with `Pass Execution`. Example: | :FOR | ${var} | IN | @{VALUES} | | | Pass Execution If | '${var}' == 'EXPECTED' | Correct value was found | | | Do Something | ${var} | New in Robot Framework 2.8. """ if self._is_true(condition): message = self._variables.replace_string(message) tags = [self._variables.replace_string(tag) for tag in tags] self.pass_execution(message, *tags) class _Misc(_BuiltInBase): def no_operation(self): """Does absolutely nothing.""" def sleep(self, time_, reason=None): """Pauses the test executed for the given time. ``time`` may be either a number or a time string. Time strings are in a format such as ``1 day 2 hours 3 minutes 4 seconds 5milliseconds`` or ``1d 2h 3m 4s 5ms``, and they are fully explained in an appendix of Robot Framework User Guide. Optional `reason` can be used to explain why sleeping is necessary. Both the time slept and the reason are logged. Examples: | Sleep | 42 | | Sleep | 1.5 | | Sleep | 2 minutes 10 seconds | | Sleep | 10s | Wait for a reply | """ seconds = timestr_to_secs(time_) # Python hangs with negative values if seconds < 0: seconds = 0 self._sleep_in_parts(seconds) self.log('Slept %s' % secs_to_timestr(seconds)) if reason: self.log(reason) def _sleep_in_parts(self, seconds): # time.sleep can't be stopped in windows # to ensure that we can signal stop (with timeout) # split sleeping to small pieces endtime = time.time() + float(seconds) while True: remaining = endtime - time.time() if remaining <= 0: break time.sleep(min(remaining, 0.5)) def catenate(self, *items): """Catenates the given items together and returns the resulted string. By default, items are catenated with spaces, but if the first item contains the string ``SEPARATOR=<sep>``, the separator ``<sep>`` is used instead. Items are converted into strings when necessary. Examples: | ${str1} = | Catenate | Hello | world | | | ${str2} = | Catenate | SEPARATOR=--- | Hello | world | | ${str3} = | Catenate | SEPARATOR= | Hello | world | => | ${str1} = 'Hello world' | ${str2} = 'Hello---world' | ${str3} = 'Helloworld' """ if not items: return '' items = [unic(item) for item in items] if items[0].startswith('SEPARATOR='): sep = items[0][len('SEPARATOR='):] items = items[1:] else: sep = ' ' return sep.join(items) def log(self, message, level='INFO', html=False, console=False, repr=False): u"""Logs the given message with the given level. Valid levels are TRACE, DEBUG, INFO (default), HTML, WARN, and ERROR. Messages below the current active log level are ignored. See `Set Log Level` keyword and ``--loglevel`` command line option for more details about setting the level. Messages logged with the WARN or ERROR levels will be automatically visible also in the console and in the Test Execution Errors section in the log file. Logging can be configured using optional ``html``, ``console`` and ``repr`` arguments. They are off by default, but can be enabled by giving them a true value. See `Boolean arguments` section for more information about true and false values. If the ``html`` argument is given a true value, the message will be considered HTML and special characters such as ``<`` in it are not escaped. For example, logging ``<img src="image.png">`` creates an image when ``html`` is true, but otherwise the message is that exact string. An alternative to using the ``html`` argument is using the HTML pseudo log level. It logs the message as HTML using the INFO level. If the ``console`` argument is true, the message will be written to the console where test execution was started from in addition to the log file. This keyword always uses the standard output stream and adds a newline after the written message. Use `Log To Console` instead if either of these is undesirable, If the ``repr`` argument is true, the given item will be passed through a custom version of Python's ``pprint.pformat()`` function before logging it. This is useful, for example, when working with strings or bytes containing invisible characters, or when working with nested data structures. The custom version differs from the standard one so that it omits the ``u`` prefix from Unicode strings and adds ``b`` prefix to byte strings. Examples: | Log | Hello, world! | | | # Normal INFO message. | | Log | Warning, world! | WARN | | # Warning. | | Log | <b>Hello</b>, world! | html=yes | | # INFO message as HTML. | | Log | <b>Hello</b>, world! | HTML | | # Same as above. | | Log | <b>Hello</b>, world! | DEBUG | html=true | # DEBUG as HTML. | | Log | Hello, console! | console=yes | | # Log also to the console. | | Log | Hyv\xe4 \\x00 | repr=yes | | # Log ``'Hyv\\xe4 \\x00'``. | See `Log Many` if you want to log multiple messages in one go, and `Log To Console` if you only want to write to the console. Arguments ``html``, ``console``, and ``repr`` are new in Robot Framework 2.8.2. Pprint support when ``repr`` is used is new in Robot Framework 2.8.6, and it was changed to drop the ``u`` prefix and add the ``b`` prefix in Robot Framework 2.9. """ if is_truthy(repr): message = prepr(message, width=80) logger.write(message, level, is_truthy(html)) if is_truthy(console): logger.console(message) @run_keyword_variant(resolve=0) def log_many(self, *messages): """Logs the given messages as separate entries using the INFO level. Supports also logging list and dictionary variable items individually. Examples: | Log Many | Hello | ${var} | | Log Many | @{list} | &{dict} | See `Log` and `Log To Console` keywords if you want to use alternative log levels, use HTML, or log to the console. """ for msg in self._yield_logged_messages(messages): self.log(msg) def _yield_logged_messages(self, messages): for msg in messages: var = VariableSplitter(msg) value = self._variables.replace_scalar(msg) if var.is_list_variable(): for item in value: yield item elif var.is_dict_variable(): for name, value in value.items(): yield '%s=%s' % (name, value) else: yield value def log_to_console(self, message, stream='STDOUT', no_newline=False): """Logs the given message to the console. By default uses the standard output stream. Using the standard error stream is possibly by giving the ``stream`` argument value ``STDERR`` (case-insensitive). By default appends a newline to the logged message. This can be disabled by giving the ``no_newline`` argument a true value (see `Boolean arguments`). Examples: | Log To Console | Hello, console! | | | Log To Console | Hello, stderr! | STDERR | | Log To Console | Message starts here and is | no_newline=true | | Log To Console | continued without newline. | | This keyword does not log the message to the normal log file. Use `Log` keyword, possibly with argument ``console``, if that is desired. New in Robot Framework 2.8.2. """ logger.console(message, newline=is_falsy(no_newline), stream=stream) @run_keyword_variant(resolve=0) def comment(self, *messages): """Displays the given messages in the log file as keyword arguments. This keyword does nothing with the arguments it receives, but as they are visible in the log, this keyword can be used to display simple messages. Given arguments are ignored so thoroughly that they can even contain non-existing variables. If you are interested about variable values, you can use the `Log` or `Log Many` keywords. """ pass def set_log_level(self, level): """Sets the log threshold to the specified level and returns the old level. Messages below the level will not logged. The default logging level is INFO, but it can be overridden with the command line option ``--loglevel``. The available levels: TRACE, DEBUG, INFO (default), WARN, ERROR and NONE (no logging). """ try: old = self._context.output.set_log_level(level) except DataError as err: raise RuntimeError(unicode(err)) self._namespace.variables.set_global('${LOG_LEVEL}', level.upper()) self.log('Log level changed from %s to %s' % (old, level.upper())) return old def reload_library(self, name_or_instance): """Rechecks what keywords the specified library provides. Can be called explicitly in the test data or by a library itself when keywords it provides have changed. The library can be specified by its name or as the active instance of the library. The latter is especially useful if the library itself calls this keyword as a method. New in Robot Framework 2.9. """ library = self._namespace.reload_library(name_or_instance) self.log('Reloaded library %s with %s keywords.' % (library.name, len(library))) @run_keyword_variant(resolve=0) def import_library(self, name, *args): """Imports a library with the given name and optional arguments. This functionality allows dynamic importing of libraries while tests are running. That may be necessary, if the library itself is dynamic and not yet available when test data is processed. In a normal case, libraries should be imported using the Library setting in the Setting table. This keyword supports importing libraries both using library names and physical paths. When paths are used, they must be given in absolute format. Forward slashes can be used as path separators in all operating systems. It is possible to pass arguments to the imported library and also named argument syntax works if the library supports it. ``WITH NAME`` syntax can be used to give a custom name to the imported library. Examples: | Import Library | MyLibrary | | Import Library | ${CURDIR}/../Library.py | arg1 | named=arg2 | | Import Library | ${LIBRARIES}/Lib.java | arg | WITH NAME | JavaLib | """ try: self._namespace.import_library(name, list(args)) except DataError as err: raise RuntimeError(unicode(err)) @run_keyword_variant(resolve=0) def import_variables(self, path, *args): """Imports a variable file with the given path and optional arguments. Variables imported with this keyword are set into the test suite scope similarly when importing them in the Setting table using the Variables setting. These variables override possible existing variables with the same names. This functionality can thus be used to import new variables, for example, for each test in a test suite. The given path must be absolute. Forward slashes can be used as path separator regardless the operating system. Examples: | Import Variables | ${CURDIR}/variables.py | | | | Import Variables | ${CURDIR}/../vars/env.py | arg1 | arg2 | """ try: self._namespace.import_variables(path, list(args), overwrite=True) except DataError as err: raise RuntimeError(unicode(err)) @run_keyword_variant(resolve=0) def import_resource(self, path): """Imports a resource file with the given path. Resources imported with this keyword are set into the test suite scope similarly when importing them in the Setting table using the Resource setting. The given path must be absolute. Forward slashes can be used as path separator regardless the operating system. Examples: | Import Resource | ${CURDIR}/resource.txt | | Import Resource | ${CURDIR}/../resources/resource.html | """ try: self._namespace.import_resource(path) except DataError as err: raise RuntimeError(unicode(err)) def set_library_search_order(self, *search_order): """Sets the resolution order to use when a name matches multiple keywords. The library search order is used to resolve conflicts when a keyword name in the test data matches multiple keywords. The first library (or resource, see below) containing the keyword is selected and that keyword implementation used. If the keyword is not found from any library (or resource), test executing fails the same way as when the search order is not set. When this keyword is used, there is no need to use the long ``LibraryName.Keyword Name`` notation. For example, instead of having | MyLibrary.Keyword | arg | | MyLibrary.Another Keyword | | MyLibrary.Keyword | xxx | you can have | Set Library Search Order | MyLibrary | | Keyword | arg | | Another Keyword | | Keyword | xxx | This keyword can be used also to set the order of keywords in different resource files. In this case resource names must be given without paths or extensions like: | Set Library Search Order | resource | another_resource | *NOTE:* - The search order is valid only in the suite where this keywords is used. - Keywords in resources always have higher priority than keywords in libraries regardless the search order. - The old order is returned and can be used to reset the search order later. - Library and resource names in the search order are both case and space insensitive. """ return self._namespace.set_search_order(search_order) def keyword_should_exist(self, name, msg=None): """Fails unless the given keyword exists in the current scope. Fails also if there are more than one keywords with the same name. Works both with the short name (e.g. ``Log``) and the full name (e.g. ``BuiltIn.Log``). The default error message can be overridden with the ``msg`` argument. See also `Variable Should Exist`. """ try: handler = self._namespace.get_handler(name) if isinstance(handler, UserErrorHandler): handler.run() except DataError as err: raise AssertionError(msg or unicode(err)) def get_time(self, format='timestamp', time_='NOW'): """Returns the given time in the requested format. *NOTE:* DateTime library added in Robot Framework 2.8.5 contains much more flexible keywords for getting the current date and time and for date and time handling in general. How time is returned is determined based on the given ``format`` string as follows. Note that all checks are case-insensitive. 1) If ``format`` contains the word ``epoch``, the time is returned in seconds after the UNIX epoch (1970-01-01 00:00:00 UTC). The return value is always an integer. 2) If ``format`` contains any of the words ``year``, ``month``, ``day``, ``hour``, ``min``, or ``sec``, only the selected parts are returned. The order of the returned parts is always the one in the previous sentence and the order of words in ``format`` is not significant. The parts are returned as zero-padded strings (e.g. May -> ``05``). 3) Otherwise (and by default) the time is returned as a timestamp string in the format ``2006-02-24 15:08:31``. By default this keyword returns the current local time, but that can be altered using ``time`` argument as explained below. Note that all checks involving strings are case-insensitive. 1) If ``time`` is a number, or a string that can be converted to a number, it is interpreted as seconds since the UNIX epoch. This documentation was originally written about 1177654467 seconds after the epoch. 2) If ``time`` is a timestamp, that time will be used. Valid timestamp formats are ``YYYY-MM-DD hh:mm:ss`` and ``YYYYMMDD hhmmss``. 3) If ``time`` is equal to ``NOW`` (default), the current local time is used. This time is got using Python's ``time.time()`` function. 4) If ``time`` is equal to ``UTC``, the current time in [http://en.wikipedia.org/wiki/Coordinated_Universal_Time|UTC] is used. This time is got using ``time.time() + time.altzone`` in Python. 5) If ``time`` is in the format like ``NOW - 1 day`` or ``UTC + 1 hour 30 min``, the current local/UTC time plus/minus the time specified with the time string is used. The time string format is described in an appendix of Robot Framework User Guide. Examples (expecting the current local time is 2006-03-29 15:06:21): | ${time} = | Get Time | | | | | ${secs} = | Get Time | epoch | | | | ${year} = | Get Time | return year | | | | ${yyyy} | ${mm} | ${dd} = | Get Time | year,month,day | | @{time} = | Get Time | year month day hour min sec | | | | ${y} | ${s} = | Get Time | seconds and year | | => | ${time} = '2006-03-29 15:06:21' | ${secs} = 1143637581 | ${year} = '2006' | ${yyyy} = '2006', ${mm} = '03', ${dd} = '29' | @{time} = ['2006', '03', '29', '15', '06', '21'] | ${y} = '2006' | ${s} = '21' Examples (expecting the current local time is 2006-03-29 15:06:21 and UTC time is 2006-03-29 12:06:21): | ${time} = | Get Time | | 1177654467 | # Time given as epoch seconds | | ${secs} = | Get Time | sec | 2007-04-27 09:14:27 | # Time given as a timestamp | | ${year} = | Get Time | year | NOW | # The local time of execution | | @{time} = | Get Time | hour min sec | NOW + 1h 2min 3s | # 1h 2min 3s added to the local time | | @{utc} = | Get Time | hour min sec | UTC | # The UTC time of execution | | ${hour} = | Get Time | hour | UTC - 1 hour | # 1h subtracted from the UTC time | => | ${time} = '2007-04-27 09:14:27' | ${secs} = 27 | ${year} = '2006' | @{time} = ['16', '08', '24'] | @{utc} = ['12', '06', '21'] | ${hour} = '11' Support for UTC time was added in Robot Framework 2.7.5 but it did not work correctly until 2.7.7. """ return get_time(format, parse_time(time_)) def evaluate(self, expression, modules=None, namespace=None): """Evaluates the given expression in Python and returns the results. ``expression`` is evaluated in Python as explained in `Evaluating expressions`. ``modules`` argument can be used to specify a comma separated list of Python modules to be imported and added to the evaluation namespace. ``namespace`` argument can be used to pass a custom evaluation namespace as a dictionary. Possible ``modules`` are added to this namespace. This is a new feature in Robot Framework 2.8.4. Variables used like ``${variable}`` are replaced in the expression before evaluation. Variables are also available in the evaluation namespace and can be accessed using special syntax ``$variable``. This is a new feature in Robot Framework 2.9 and it is explained more thoroughly in `Evaluating expressions`. Examples (expecting ``${result}`` is 3.14): | ${status} = | Evaluate | 0 < ${result} < 10 | # Would also work with string '3.14' | | ${status} = | Evaluate | 0 < $result < 10 | # Using variable itself, not string representation | | ${random} = | Evaluate | random.randint(0, sys.maxint) | modules=random, sys | | ${ns} = | Create Dictionary | x=${4} | y=${2} | | ${result} = | Evaluate | x*10 + y | namespace=${ns} | => | ${status} = True | ${random} = <random integer> | ${result} = 42 """ variables = self._variables.as_dict(decoration=False) expression = self._handle_variables_in_expression(expression, variables) namespace = self._create_evaluation_namespace(namespace, modules) variables = self._decorate_variables_for_evaluation(variables) try: if not is_string(expression): raise TypeError("Expression must be string, got %s." % type_name(expression)) if not expression: raise ValueError("Expression cannot be empty.") return eval(expression, namespace, variables) except: raise RuntimeError("Evaluating expression '%s' failed: %s" % (expression, get_error_message())) def _handle_variables_in_expression(self, expression, variables): tokens = [] variable_started = seen_variable = False generated = generate_tokens(StringIO(expression).readline) for toknum, tokval, _, _, _ in generated: if variable_started: if toknum == token.NAME: if tokval not in variables: variable_not_found('$%s' % tokval, variables, deco_braces=False) tokval = 'RF_VAR_' + tokval seen_variable = True else: tokens.append((token.ERRORTOKEN, '$')) variable_started = False if toknum == token.ERRORTOKEN and tokval == '$': variable_started = True else: tokens.append((toknum, tokval)) if seen_variable: return untokenize(tokens).strip() return expression def _create_evaluation_namespace(self, namespace, modules): namespace = dict(namespace or {}) modules = modules.replace(' ', '').split(',') if modules else [] namespace.update((m, __import__(m)) for m in modules if m) return namespace def _decorate_variables_for_evaluation(self, variables): decorated = [('RF_VAR_' + name, value) for name, value in variables.items()] return NormalizedDict(decorated, ignore='_') def call_method(self, object, method_name, *args, **kwargs): """Calls the named method of the given object with the provided arguments. The possible return value from the method is returned and can be assigned to a variable. Keyword fails both if the object does not have a method with the given name or if executing the method raises an exception. Support for ``**kwargs`` is new in Robot Framework 2.9. Since that possible equal signs in other arguments must be escaped with a backslash like ``\\=``. Examples: | Call Method | ${hashtable} | put | myname | myvalue | | ${isempty} = | Call Method | ${hashtable} | isEmpty | | | Should Not Be True | ${isempty} | | | | | ${value} = | Call Method | ${hashtable} | get | myname | | Should Be Equal | ${value} | myvalue | | | | Call Method | ${object} | kwargs | name=value | foo=bar | | Call Method | ${object} | positional | escaped\\=equals | """ try: method = getattr(object, method_name) except AttributeError: raise RuntimeError("Object '%s' does not have method '%s'." % (object, method_name)) try: return method(*args, **kwargs) except: raise RuntimeError("Calling method '%s' failed: %s" % (method_name, get_error_message())) def regexp_escape(self, *patterns): """Returns each argument string escaped for use as a regular expression. This keyword can be used to escape strings to be used with `Should Match Regexp` and `Should Not Match Regexp` keywords. Escaping is done with Python's ``re.escape()`` function. Examples: | ${escaped} = | Regexp Escape | ${original} | | @{strings} = | Regexp Escape | @{strings} | """ if len(patterns) == 0: return '' if len(patterns) == 1: return re.escape(patterns[0]) return [re.escape(p) for p in patterns] def set_test_message(self, message, append=False): """Sets message for the current test case. If the optional ``append`` argument is given a true value (see `Boolean arguments`), the given ``message`` is added after the possible earlier message by joining the messages with a space. In test teardown this keyword can alter the possible failure message, but otherwise failures override messages set by this keyword. Notice that in teardown the initial message is available as a built-in variable ``${TEST MESSAGE}``. It is possible to use HTML format in the message by starting the message with ``*HTML*``. Examples: | Set Test Message | My message | | | Set Test Message | is continued. | append=yes | | Should Be Equal | ${TEST MESSAGE} | My message is continued. | | Set Test Message | `*`HTML`*` <b>Hello!</b> | | This keyword can not be used in suite setup or suite teardown. Support for ``append`` was added in Robot Framework 2.7.7 and support for HTML format in 2.8. """ test = self._namespace.test if not test: raise RuntimeError("'Set Test Message' keyword cannot be used in " "suite setup or teardown.") test.message = self._get_possibly_appended_value(test.message, message, append) message, level = self._get_logged_test_message_and_level(test.message) self.log('Set test message to:\n%s' % message, level) def _get_possibly_appended_value(self, initial, new, append): if not is_unicode(new): new = unic(new) if is_truthy(append) and initial: return '%s %s' % (initial, new) return new def _get_logged_test_message_and_level(self, message): if message.startswith('*HTML*'): return message[6:].lstrip(), 'HTML' return message, 'INFO' def set_test_documentation(self, doc, append=False): """Sets documentation for the current test case. By default the possible existing documentation is overwritten, but this can be changed using the optional ``append`` argument similarly as with `Set Test Message` keyword. The current test documentation is available as a built-in variable ``${TEST DOCUMENTATION}``. This keyword can not be used in suite setup or suite teardown. New in Robot Framework 2.7. Support for ``append`` was added in 2.7.7. """ test = self._namespace.test if not test: raise RuntimeError("'Set Test Documentation' keyword cannot be " "used in suite setup or teardown.") test.doc = self._get_possibly_appended_value(test.doc, doc, append) self._variables.set_test('${TEST_DOCUMENTATION}', test.doc) self.log('Set test documentation to:\n%s' % test.doc) def set_suite_documentation(self, doc, append=False, top=False): """Sets documentation for the current test suite. By default the possible existing documentation is overwritten, but this can be changed using the optional ``append`` argument similarly as with `Set Test Message` keyword. This keyword sets the documentation of the current suite by default. If the optional ``top`` argument is given a true value (see `Boolean arguments`), the documentation of the top level suite is altered instead. The documentation of the current suite is available as a built-in variable ``${SUITE DOCUMENTATION}``. New in Robot Framework 2.7. Support for ``append`` and ``top`` were added in 2.7.7. """ top = is_truthy(top) suite = self._get_namespace(top).suite suite.doc = self._get_possibly_appended_value(suite.doc, doc, append) self._variables.set_suite('${SUITE_DOCUMENTATION}', suite.doc, top) self.log('Set suite documentation to:\n%s' % suite.doc) def set_suite_metadata(self, name, value, append=False, top=False): """Sets metadata for the current test suite. By default possible existing metadata values are overwritten, but this can be changed using the optional ``append`` argument similarly as with `Set Test Message` keyword. This keyword sets the metadata of the current suite by default. If the optional ``top`` argument is given a true value (see `Boolean arguments`), the metadata of the top level suite is altered instead. The metadata of the current suite is available as a built-in variable ``${SUITE METADATA}`` in a Python dictionary. Notice that modifying this variable directly has no effect on the actual metadata the suite has. New in Robot Framework 2.7.4. Support for ``append`` and ``top`` were added in 2.7.7. """ top = is_truthy(top) if not is_unicode(name): name = unic(name) metadata = self._get_namespace(top).suite.metadata original = metadata.get(name, '') metadata[name] = self._get_possibly_appended_value(original, value, append) self._variables.set_suite('${SUITE_METADATA}', metadata.copy(), top) self.log("Set suite metadata '%s' to value '%s'." % (name, metadata[name])) def set_tags(self, *tags): """Adds given ``tags`` for the current test or all tests in a suite. When this keyword is used inside a test case, that test gets the specified tags and other tests are not affected. If this keyword is used in a suite setup, all test cases in that suite, recursively, gets the given tags. It is a failure to use this keyword in a suite teardown. The current tags are available as a built-in variable ``@{TEST TAGS}``. See `Remove Tags` if you want to remove certain tags and `Fail` if you want to fail the test case after setting and/or removing tags. """ ctx = self._context if ctx.test: ctx.test.tags.add(tags) ctx.variables.set_test('@{TEST_TAGS}', list(ctx.test.tags)) elif not ctx.in_suite_teardown: ctx.suite.set_tags(tags, persist=True) else: raise RuntimeError("'Set Tags' cannot be used in suite teardown.") self.log('Set tag%s %s.' % (s(tags), seq2str(tags))) def remove_tags(self, *tags): """Removes given ``tags`` from the current test or all tests in a suite. Tags can be given exactly or using a pattern where ``*`` matches anything and ``?`` matches one character. This keyword can affect either one test case or all test cases in a test suite similarly as `Set Tags` keyword. The current tags are available as a built-in variable ``@{TEST TAGS}``. Example: | Remove Tags | mytag | something-* | ?ython | See `Set Tags` if you want to add certain tags and `Fail` if you want to fail the test case after setting and/or removing tags. """ ctx = self._context if ctx.test: ctx.test.tags.remove(tags) ctx.variables.set_test('@{TEST_TAGS}', list(ctx.test.tags)) elif not ctx.in_suite_teardown: ctx.suite.set_tags(remove=tags, persist=True) else: raise RuntimeError("'Remove Tags' cannot be used in suite teardown.") self.log('Removed tag%s %s.' % (s(tags), seq2str(tags))) def get_library_instance(self, name): """Returns the currently active instance of the specified test library. This keyword makes it easy for test libraries to interact with other test libraries that have state. This is illustrated by the Python example below: | from robot.libraries.BuiltIn import BuiltIn | | def title_should_start_with(expected): | seleniumlib = BuiltIn().get_library_instance('SeleniumLibrary') | title = seleniumlib.get_title() | if not title.startswith(expected): | raise AssertionError("Title '%s' did not start with '%s'" | % (title, expected)) It is also possible to use this keyword in the test data and pass the returned library instance to another keyword. If a library is imported with a custom name, the ``name`` used to get the instance must be that name and not the original library name. """ try: return self._namespace.get_library_instance(name) except DataError as err: raise RuntimeError(unicode(err)) class BuiltIn(_Verify, _Converter, _Variables, _RunKeyword, _Control, _Misc): """An always available standard library with often needed keywords. ``BuiltIn`` is Robot Framework's standard library that provides a set of generic keywords needed often. It is imported automatically and thus always available. The provided keywords can be used, for example, for verifications (e.g. `Should Be Equal`, `Should Contain`), conversions (e.g. `Convert To Integer`) and for various other purposes (e.g. `Log`, `Sleep`, `Run Keyword If`, `Set Global Variable`). == Table of contents == - `HTML error messages` - `Evaluating expressions` - `Boolean arguments` - `Multiline string comparisons` - `Shortcuts` - `Keywords` = HTML error messages = Many of the keywords accept an optional error message to use if the keyword fails. Starting from Robot Framework 2.8, it is possible to use HTML in these messages by prefixing them with ``*HTML*``. See `Fail` keyword for a usage example. Notice that using HTML in messages is not limited to BuiltIn library but works with any error message. = Evaluating expressions = Many keywords, such as `Evaluate`, `Run Keyword If` and `Should Be True`, accept an expression that is evaluated in Python. These expressions are evaluated using Python's [https://docs.python.org/2/library/functions.html#eval|eval] function so that all Python built-ins like ``len()`` and ``int()`` are available. `Evaluate` allows configuring the execution namespace with custom modules, and other keywords have [https://docs.python.org/2/library/os.html|os] and [https://docs.python.org/2/library/sys.html|sys] modules available automatically. Examples: | `Run Keyword If` | os.sep == '/' | Log | Not on Windows | | ${random int} = | `Evaluate` | random.randint(0, 5) | modules=random | When a variable is used in the expressing using the normal ``${variable}`` syntax, its value is replaces before the expression is evaluated. This means that the value used in the expression will be the string representation of the variable value, not the variable value itself. This is not a problem with numbers and other objects that have a string representation that can be evaluated directly, but with other objects the behavior depends on the string representation. Most importantly, strings must always be quoted, and if they can contain newlines, they must be triple quoted. Examples: | `Should Be True` | ${rc} < 10 | Return code greater than 10 | | `Run Keyword If` | '${status}' == 'PASS' | Log | Passed | | `Run Keyword If` | 'FAIL' in '''${output}''' | Log | Output contains FAIL | Starting from Robot Framework 2.9, variables themselves are automatically available in the evaluation namespace. They can be accessed using special variable syntax without the curly braces like ``$variable``. These variables should never be quoted, and in fact they are not even replaced inside strings. Examples: | `Should Be True` | $rc < 10 | Return code greater than 10 | | `Run Keyword If` | $status == 'PASS' | `Log` | Passed | | `Run Keyword If` | 'FAIL' in $output | `Log` | Output contains FAIL | | `Should Be True` | len($result) > 1 and $result[1] == 'OK' | Notice that instead of creating complicated expressions, it is often better to move the logic into a test library. = Boolean arguments = Some keywords accept arguments that are handled as Boolean values true or false. If such an argument is given as a string, it is considered false if it is either empty or case-insensitively equal to ``false`` or ``no``. Keywords verifying something that allow dropping actual and expected values from the possible error message also consider string ``no values`` as false. Other strings are considered true regardless their value, and other argument types are tested using same [http://docs.python.org/2/library/stdtypes.html#truth-value-testing|rules as in Python]. True examples: | `Should Be Equal` | ${x} | ${y} | Custom error | values=True | # Strings are generally true. | | `Should Be Equal` | ${x} | ${y} | Custom error | values=yes | # Same as the above. | | `Should Be Equal` | ${x} | ${y} | Custom error | values=${TRUE} | # Python ``True`` is true. | | `Should Be Equal` | ${x} | ${y} | Custom error | values=${42} | # Numbers other than 0 are true. | False examples: | `Should Be Equal` | ${x} | ${y} | Custom error | values=False | # String ``false`` is false. | | `Should Be Equal` | ${x} | ${y} | Custom error | values=no | # Also string ``no`` is false. | | `Should Be Equal` | ${x} | ${y} | Custom error | values=${EMPTY} | # Empty string is false. | | `Should Be Equal` | ${x} | ${y} | Custom error | values=${FALSE} | # Python ``False`` is false. | | `Should Be Equal` | ${x} | ${y} | Custom error | values=no values | # ``no values`` works with ``values`` argument | Note that prior to Robot Framework 2.9 some keywords considered all non-empty strings, including ``false`` and ``no``, to be true. = Multiline string comparisons = `Should Be Equal` and `Should Be Equal As Strings` report the failures using [https://en.wikipedia.org/wiki/Diff_utility#Unified_format|unified diff format] if both strings have more than two lines. New in Robot Framework 2.9.1. Example: | ${first} = | `Catenate` | SEPARATOR=\\n | Not in second | Same | Differs | Same | | ${second} = | `Catenate` | SEPARATOR=\\n | Same | Differs2 | Same | Not in first | | `Should Be Equal` | ${first} | ${second} | Results in the following error message: | Multiline strings are different: | --- first | +++ second | @@ -1,4 +1,4 @@ | -Not in second | Same | -Differs | +Differs2 | Same | +Not in first """ ROBOT_LIBRARY_SCOPE = 'GLOBAL' ROBOT_LIBRARY_VERSION = get_version() class RobotNotRunningError(AttributeError): """Used when something cannot be done because Robot is not running. Based on AttributeError to be backwards compatible with RF < 2.8.5. May later be based directly on Exception, so new code should except this exception explicitly. """ pass def register_run_keyword(library, keyword, args_to_process=None): """Registers 'run keyword' so that its arguments can be handled correctly. 1) Why is this method needed Keywords running other keywords internally (normally using `Run Keyword` or some variants of it in BuiltIn) must have the arguments meant to the internally executed keyword handled specially to prevent processing them twice. This is done ONLY for keywords registered using this method. If the register keyword has same name as any keyword from Robot Framework standard libraries, it can be used without getting warnings. Normally there is a warning in such cases unless the keyword is used in long format (e.g. MyLib.Keyword). Keywords executed by registered run keywords can be tested in dry-run mode if they have 'name' argument which takes the name of the executed keyword. 2) How to use this method `library` is the name of the library where the registered keyword is implemented. `keyword` can be either a function or method implementing the keyword, or name of the implemented keyword as a string. `args_to_process` is needed when `keyword` is given as a string, and it defines how many of the arguments to the registered keyword must be processed normally. When `keyword` is a method or function, this information is got directly from it so that varargs (those specified with syntax '*args') are not processed but others are. 3) Examples from robot.libraries.BuiltIn import BuiltIn, register_run_keyword def my_run_keyword(name, *args): # do something return BuiltIn().run_keyword(name, *args) # Either one of these works register_run_keyword(__name__, my_run_keyword) register_run_keyword(__name__, 'My Run Keyword', 1) ------------- from robot.libraries.BuiltIn import BuiltIn, register_run_keyword class MyLibrary: def my_run_keyword_if(self, expression, name, *args): # do something return BuiltIn().run_keyword_if(expression, name, *args) # Either one of these works register_run_keyword('MyLibrary', MyLibrary.my_run_keyword_if) register_run_keyword('MyLibrary', 'my_run_keyword_if', 2) """ RUN_KW_REGISTER.register_run_keyword(library, keyword, args_to_process) [register_run_keyword('BuiltIn', getattr(_RunKeyword, a)) for a in dir(_RunKeyword) if a[0] != '_']
{ "content_hash": "f374f0c691f5aae5afc5ef985c02f16d", "timestamp": "", "source": "github", "line_count": 3146, "max_line_length": 123, "avg_line_length": 45.10584869675779, "alnum_prop": 0.6018900234667344, "repo_name": "rwarren14/robotframework", "id": "01b2887fb101a8feda0d06ec2e9862b4f58cd184", "size": "142511", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "src/robot/libraries/BuiltIn.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "210" }, { "name": "CSS", "bytes": "23490" }, { "name": "HTML", "bytes": "140929" }, { "name": "Java", "bytes": "60101" }, { "name": "JavaScript", "bytes": "160761" }, { "name": "Python", "bytes": "2154951" }, { "name": "RobotFramework", "bytes": "2043769" }, { "name": "Shell", "bytes": "281" } ], "symlink_target": "" }
"""This example gets the Marketplace comments for a programmatic proposal.""" # Import appropriate modules from the client library. from datetime import datetime from googleads import ad_manager PROPOSAL_ID = 'INSERT_PROPOSAL_ID_HERE' def main(client, proposal_id): # Initialize appropriate service. marketplace_comment_service = client.GetService( 'ProposalService', version='v201808') query = 'WHERE proposalId = %s' % proposal_id # Create a statement to select marketplace comments. statement = {'query': query} response = marketplace_comment_service.getMarketplaceCommentsByStatement( statement) # Print out some information for each marketplace comment. for marketplace_comment in response['results']: date_time = marketplace_comment['creationTime'] date_time_string = datetime(date_time['date']['year'], date_time['date']['month'], date_time['date']['day'], date_time['hour'], date_time['minute'], date_time['second']).isoformat() print('Marketplace comment with creation time "%s"and comment "%s" was ' 'found.\n' % (date_time_string, marketplace_comment['comment'])) print '\nNumber of results found: %s' % response['totalResultSetSize'] if __name__ == '__main__': # Initialize client object. ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage() main(ad_manager_client, PROPOSAL_ID)
{ "content_hash": "ced29db8b18303fb11a26b8f71a3d04e", "timestamp": "", "source": "github", "line_count": 40, "max_line_length": 77, "avg_line_length": 37.5, "alnum_prop": 0.6586666666666666, "repo_name": "Aloomaio/googleads-python-lib", "id": "3dc4943fdf907f05a94568f55036a1defa2fdb43", "size": "2122", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "examples/ad_manager/v201808/proposal_service/get_marketplace_comments.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "491015" } ], "symlink_target": "" }
import datetime import json from warnings import warn from sdcclient._common import _SdcCommon class PolicyEventsClientOld(_SdcCommon): def __init__(self, token="", sdc_url='https://secure.sysdig.com', ssl_verify=True, custom_headers=None): super(PolicyEventsClientOld, self).__init__(token, sdc_url, ssl_verify, custom_headers) self.product = "SDS" def _get_policy_events_int(self, ctx): warn("The PolicyEventsClientOld class is deprecated in favour of PolicyEventsClientV1; use it only if you have " "an old on-premises installation", DeprecationWarning, 3) policy_events_url = self.url + '/api/policyEvents{id}?from={frm:d}&to={to:d}&offset={offset}&limit={limit}{sampling}{aggregations}{scope}{filter}'.format( id="/%s" % ctx["id"] if "id" in ctx else "", frm=int(ctx['from']), to=int(ctx['to']), offset=ctx['offset'], limit=ctx['limit'], sampling='&sampling=%d' % int(ctx['sampling']) if "sampling" in ctx else "", aggregations='&aggregations=%s' % json.dumps(ctx['aggregations']) if "aggregations" in ctx else "", scope='&scopeFilter=%s' % ctx['scopeFilter'] if "scopeFilter" in ctx else "", filter='&eventFilter=%s' % ctx['eventFilter'] if "eventFilter" in ctx else "") res = self.http.get(policy_events_url, headers=self.hdrs, verify=self.ssl_verify) if not self._checkResponse(res): return [False, self.lasterr] # Increment the offset by limit ctx['offset'] += ctx['limit'] return [True, {"ctx": ctx, "data": res.json()}] def get_policy_events_range(self, from_sec, to_sec, sampling=None, aggregations=None, scope_filter=None, event_filter=None): '''**Description** Fetch all policy events that occurred in the time range [from_sec:to_sec]. This method is used in conjunction with :func:`~sdcclient.SdSecureClient.get_more_policy_events` to provide paginated access to policy events. **Arguments** - from_sec: the start of the timerange for which to get events - end_sec: the end of the timerange for which to get events - sampling: sample all policy events using *sampling* interval. - aggregations: When present it specifies how to aggregate events (sampling does not need to be specified, because when it's present it automatically means events will be aggregated). This field can either be a list of scope metrics or a list of policyEvents fields but (currently) not a mix of the two. When policy events fields are specified, only these can be used= severity, agentId, containerId, policyId, ruleType. - scope_filter: this is a SysdigMonitor-like filter (e.g 'container.image=ubuntu'). When provided, events are filtered by their scope, so only a subset will be returned (e.g. 'container.image=ubuntu' will provide only events that have happened on an ubuntu container). - event_filter: this is a SysdigMonitor-like filter (e.g. policyEvent.policyId=3). When provided, events are filtered by some of their properties. Currently the supported set of filters is policyEvent.all(which can be used just with matches, policyEvent.policyId, policyEvent.id, policyEvent.severity, policyEvent.ruleTye, policyEvent.ruleSubtype. **Success Return Value** An array containing: - A context object that should be passed to later calls to get_more_policy_events. - An array of policy events, in JSON format. See :func:`~sdcclient.SdSecureClient.get_more_policy_events` for details on the contents of policy events. **Example** `examples/get_secure_policy_events.py <https://github.com/draios/python-sdc-client/blob/master/examples/get_secure_policy_events.py>`_ ''' options = {"from": int(from_sec) * 1000000, "to": int(to_sec) * 1000000, "offset": 0, "limit": 1000, "sampling": sampling, "aggregations": aggregations, "scopeFilter": scope_filter, "eventFilter": event_filter} ctx = {k: v for k, v in options.items() if v is not None} return self._get_policy_events_int(ctx) def get_policy_events_duration(self, duration_sec, sampling=None, aggregations=None, scope_filter=None, event_filter=None): '''**Description** Fetch all policy events that occurred in the last duration_sec seconds. This method is used in conjunction with :func:`~sdcclient.SdSecureClient.get_more_policy_events` to provide paginated access to policy events. **Arguments** - duration_sec: Fetch all policy events that have occurred in the last *duration_sec* seconds. - sampling: Sample all policy events using *sampling* interval. - aggregations: When present it specifies how to aggregate events (sampling does not need to be specified, because when it's present it automatically means events will be aggregated). This field can either be a list of scope metrics or a list of policyEvents fields but (currently) not a mix of the two. When policy events fields are specified, only these can be used= severity, agentId, containerId, policyId, ruleType. - scope_filter: this is a SysdigMonitor-like filter (e.g 'container.image=ubuntu'). When provided, events are filtered by their scope, so only a subset will be returned (e.g. 'container.image=ubuntu' will provide only events that have happened on an ubuntu container). - event_filter: this is a SysdigMonitor-like filter (e.g. policyEvent.policyId=3). When provided, events are filtered by some of their properties. Currently the supported set of filters is policyEvent.all(which can be used just with matches, policyEvent.policyId, policyEvent.id, policyEvent.severity, policyEvent.ruleTye, policyEvent.ruleSubtype. **Success Return Value** An array containing: - A context object that should be passed to later calls to get_more_policy_events. - An array of policy events, in JSON format. See :func:`~sdcclient.SdSecureClient.get_more_policy_events` for details on the contents of policy events. **Example** `examples/get_secure_policy_events.py <https://github.com/draios/python-sdc-client/blob/master/examples/get_secure_policy_events.py>`_ ''' epoch = datetime.datetime.utcfromtimestamp(0) to_ts = (datetime.datetime.utcnow() - epoch).total_seconds() * 1000 * 1000 from_ts = to_ts - (int(duration_sec) * 1000 * 1000) options = {"to": to_ts, "from": from_ts, "offset": 0, "limit": 1000, "sampling": sampling, "aggregations": aggregations, "scopeFilter": scope_filter, "eventFilter": event_filter} ctx = {k: v for k, v in options.items() if v is not None} return self._get_policy_events_int(ctx) def get_policy_events_id_range(self, id, from_sec, to_sec, sampling=None, aggregations=None, scope_filter=None, event_filter=None): '''**Description** Fetch all policy events with id that occurred in the time range [from_sec:to_sec]. This method is used in conjunction with :func:`~sdcclient.SdSecureClient.get_more_policy_events` to provide paginated access to policy events. **Arguments** - id: the id of the policy events to fetch. - from_sec: the start of the timerange for which to get events - end_sec: the end of the timerange for which to get events - sampling: sample all policy events using *sampling* interval. - scope_filter: this is a SysdigMonitor-like filter (e.g 'container.image=ubuntu'). When provided, events are filtered by their scope, so only a subset will be returned (e.g. 'container.image=ubuntu' will provide only events that have happened on an ubuntu container). - event_filter: this is a SysdigMonitor-like filter (e.g. policyEvent.policyId=3). When provided, events are filtered by some of their properties. Currently the supported set of filters is policyEvent.all(which can be used just with matches, policyEvent.policyId, policyEvent.id, policyEvent.severity, policyEvent.ruleTye, policyEvent.ruleSubtype. - aggregations: When present it specifies how to aggregate events (sampling does not need to be specified, because when it's present it automatically means events will be aggregated). This field can either be a list of scope metrics or a list of policyEvents fields but (currently) not a mix of the two. When policy events fields are specified, only these can be used= severity, agentId, containerId, policyId, ruleType. **Success Return Value** An array containing: - A context object that should be passed to later calls to get_more_policy_events. - An array of policy events, in JSON format. See :func:`~sdcclient.SdSecureClient.get_more_policy_events` for details on the contents of policy events. **Example** `examples/get_secure_policy_events.py <https://github.com/draios/python-sdc-client/blob/master/examples/get_secure_policy_events.py>`_ ''' options = {"id": id, "from": int(from_sec) * 1000000, "to": int(to_sec) * 1000000, "offset": 0, "limit": 1000, "sampling": sampling, "aggregations": aggregations, "scopeFilter": scope_filter, "eventFilter": event_filter} ctx = {k: v for k, v in options.items() if v is not None} return self._get_policy_events_int(ctx) def get_policy_events_id_duration(self, id, duration_sec, sampling=None, aggregations=None, scope_filter=None, event_filter=None): '''**Description** Fetch all policy events with id that occurred in the last duration_sec seconds. This method is used in conjunction with :func:`~sdcclient.SdSecureClient.get_more_policy_events` to provide paginated access to policy events. **Arguments** - id: the id of the policy events to fetch. - duration_sec: Fetch all policy events that have occurred in the last *duration_sec* seconds. - sampling: Sample all policy events using *sampling* interval. - aggregations: When present it specifies how to aggregate events (sampling does not need to be specified, because when it's present it automatically means events will be aggregated). This field can either be a list of scope metrics or a list of policyEvents fields but (currently) not a mix of the two. When policy events fields are specified, only these can be used= severity, agentId, containerId, policyId, ruleType. - scope_filter: this is a SysdigMonitor-like filter (e.g 'container.image=ubuntu'). When provided, events are filtered by their scope, so only a subset will be returned (e.g. 'container.image=ubuntu' will provide only events that have happened on an ubuntu container). - event_filter: this is a SysdigMonitor-like filter (e.g. policyEvent.policyId=3). When provided, events are filtered by some of their properties. Currently the supported set of filters is policyEvent.all(which can be used just with matches, policyEvent.policyId, policyEvent.id, policyEvent.severity, policyEvent.ruleTye, policyEvent.ruleSubtype. **Success Return Value** An array containing: - A context object that should be passed to later calls to get_more_policy_events. - An array of policy events, in JSON format. See :func:`~sdcclient.SdSecureClient.get_more_policy_events` for details on the contents of policy events. **Example** `examples/get_secure_policy_events.py <https://github.com/draios/python-sdc-client/blob/master/examples/get_secure_policy_events.py>`_ ''' epoch = datetime.datetime.utcfromtimestamp(0) to_ts = (datetime.datetime.utcnow() - epoch).total_seconds() * 1000 * 1000 from_ts = to_ts - (int(duration_sec) * 1000 * 1000) options = {"id": id, "to": to_ts, "from": from_ts, "offset": 0, "limit": 1000, "sampling": sampling, "aggregations": aggregations, "scopeFilter": scope_filter, "eventFilter": event_filter} ctx = {k: v for k, v in options.items() if v is not None} return self._get_policy_events_int(ctx) def get_more_policy_events(self, ctx): '''**Description** Fetch additional policy events after an initial call to :func:`~sdcclient.SdSecureClient.get_policy_events_range` / :func:`~sdcclient.SdSecureClient.get_policy_events_duration` or a prior call to get_more_policy_events. **Arguments** - ctx: a context object returned from an initial call to :func:`~sdcclient.SdSecureClient.get_policy_events_range` / :func:`~sdcclient.SdSecureClient.get_policy_events_duration` or a prior call to get_more_policy_events. **Success Return Value** An array containing: - A context object that should be passed to later calls to get_more_policy_events() - An array of policy events, in JSON format. Each policy event contains the following: - hostMac: the mac address of the machine where the event occurred - severity: a severity level from 1-7 - timestamp: when the event occurred (ns since the epoch) - version: a version number for this message (currently 1) - policyId: a reference to the policy that generated this policy event - output: A string describing the event that occurred - id: a unique identifier for this policy event - isAggregated: if true, this is a combination of multiple policy events - containerId: the container in which the policy event occurred When the number of policy events returned is 0, there are no remaining events and you can stop calling get_more_policy_events(). **Example** `examples/get_secure_policy_events.py <https://github.com/draios/python-sdc-client/blob/master/examples/get_secure_policy_events.py>`_ ''' return self._get_policy_events_int(ctx)
{ "content_hash": "c94450db441c916d728116c3c44a0774", "timestamp": "", "source": "github", "line_count": 212, "max_line_length": 432, "avg_line_length": 70.69339622641509, "alnum_prop": 0.6505638219790485, "repo_name": "draios/python-sdc-client", "id": "a5a316d19d5bad5413f60942002c3eaed1cb591d", "size": "14987", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "sdcclient/secure/_policy_events_old.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "247480" }, { "name": "Shell", "bytes": "13957" } ], "symlink_target": "" }
import sqlite3, json, csv pnr96path = '../pnr96/pnr96-streets.json' municipalities_path = '../data/kommuner.csv' p2k_path = '../postort2kommun.csv' db = sqlite3.connect('pnr96.sqlite') db.text_factory = str c = db.cursor() c.execute('''CREATE TABLE pnr96 ( streetName VARCHAR(255) NOT NULL, postalCode INTEGER NOT NULL, postalTown VARCHAR(50) NOT NULL )''') c.execute('''CREATE TABLE kommun ( municipalityScb INTEGER NOT NULL PRIMARY KEY, municipalityName VARCHAR(50) NOT NULL )''') c.execute('''CREATE TABLE postort2kommun ( postalTown VARCHAR(50) NOT NULL PRIMARY KEY, municipalityScb INTEGER NOT NULL )''') # Insert PNR96 data pnrdata = json.load(open(pnr96path)) for row in pnrdata: c.execute('INSERT INTO pnr96 VALUES (?, ?, ?)', [row['streetName'], row['postalCode'], row['postalTown']]) # Insert municipality data kcsv = csv.reader(open(municipalities_path), delimiter='\t') next(kcsv) # skip header row for row in kcsv: scbnumber = int(row[2]) name = unicode(row[3], "utf-8").upper() c.execute('INSERT INTO kommun VALUES (?, ?)', [scbnumber, name]) # Check if there's a postal town for this municipality c.execute('SELECT postalTown FROM pnr96 WHERE postalTown = ? LIMIT 1', [name]) rows = c.fetchall() if len(rows) == 1: c.execute('INSERT INTO postort2kommun VALUES (?, ?)', [name, scbnumber]) # Insert postal town -> municipality mappings kcsv = csv.reader(open(p2k_path), delimiter='\t') next(kcsv) for row in kcsv: if len(row) < 2: continue # mapping has not yet been defined for this postal town postalTown = row[0] municipalityName = row[1] c.execute('SELECT municipalityScb FROM kommun WHERE municipalityName = ? LIMIT 1', [municipalityName]) rows = c.fetchall() if len(rows) == 0: raise Exception("Municipality not found: %s" % municipalityName) municipalityScb = int(rows[0][0]) c.execute('INSERT INTO postort2kommun VALUES (?, ?)', [postalTown, municipalityScb]) # Create indices c.execute('CREATE INDEX pnr96_streetName ON pnr96 (streetName)') c.close() db.close()
{ "content_hash": "ed6edf539d0973bd5d614bb5684b2aa5", "timestamp": "", "source": "github", "line_count": 68, "max_line_length": 106, "avg_line_length": 30.86764705882353, "alnum_prop": 0.6874702239161505, "repo_name": "samuellb/pnr96addrstats", "id": "ca9097de5e66103d9bbd20d8e8edaae9a35a51cd", "size": "3264", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "pnrlookup/dbgen.py", "mode": "33261", "license": "mit", "language": [ { "name": "Makefile", "bytes": "304" }, { "name": "PHP", "bytes": "5649" }, { "name": "Python", "bytes": "6580" }, { "name": "Shell", "bytes": "12490" } ], "symlink_target": "" }
from .dist_math import * __all__ = ['Binomial', 'BetaBin', 'Bernoulli', 'Poisson', 'NegativeBinomial', 'ConstantDist', 'ZeroInflatedPoisson', 'DiscreteUniform', 'Geometric', 'Categorical'] class Binomial(Discrete): """ Binomial log-likelihood. The discrete probability distribution of the number of successes in a sequence of n independent yes/no experiments, each of which yields success with probability p. .. math:: f(x \mid n, p) = \frac{n!}{x!(n-x)!} p^x (1-p)^{n-x} Parameters ---------- n : int Number of Bernoulli trials, n > x p : float Probability of success in each trial, :math:`p \in [0,1]` .. note:: - :math:`E(X)=np` - :math:`Var(X)=np(1-p)` """ def __init__(self, n, p, *args, **kwargs): super(Binomial, self).__init__(*args, **kwargs) self.n = n self.p = p self.mode = cast(round(n * p), 'int8') def logp(self, value): n = self.n p = self.p return bound( logpow(p, value) + logpow( 1 - p, n - value) + factln( n) - factln(value) - factln(n - value), 0 <= value, value <= n, 0 <= p, p <= 1) class BetaBin(Discrete): """ Beta-binomial log-likelihood. Equivalent to binomial random variables with probabilities drawn from a :math:`\texttt{Beta}(\alpha,\beta)` distribution. .. math:: f(x \mid \alpha, \beta, n) = \frac{\Gamma(\alpha + \beta)}{\Gamma(\alpha)} \frac{\Gamma(n+1)}{\Gamma(x+1)\Gamma(n-x+1)} \frac{\Gamma(\alpha + x)\Gamma(n+\beta-x)}{\Gamma(\alpha+\beta+n)} Parameters ---------- alpha : float alpha > 0 beta : float beta > 0 n : int n=x,x+1,... .. note:: - :math:`E(X)=n\frac{\alpha}{\alpha+\beta}` - :math:`Var(X)=n\frac{\alpha \beta}{(\alpha+\beta)^2(\alpha+\beta+1)}` """ def __init__(self, alpha, beta, n, *args, **kwargs): super(BetaBin, self).__init__(*args, **kwargs) self.alpha = alpha self.beta = beta self.n = n self.mode = cast(round(alpha / (alpha + beta)), 'int8') def logp(self, value): alpha = self.alpha beta = self.beta n = self.n return bound(gammaln(alpha + beta) - gammaln(alpha) - gammaln(beta) + gammaln(n + 1) - gammaln(value + 1) - gammaln(n - value + 1) + gammaln(alpha + value) + gammaln(n + beta - value) - gammaln(beta + alpha + n), 0 <= value, value <= n, alpha > 0, beta > 0) class Bernoulli(Discrete): """Bernoulli log-likelihood The Bernoulli distribution describes the probability of successes (x=1) and failures (x=0). .. math:: f(x \mid p) = p^{x} (1-p)^{1-x} Parameters ---------- p : float Probability of success. :math:`0 < p < 1`. .. note:: - :math:`E(x)= p` - :math:`Var(x)= p(1-p)` """ def __init__(self, p, *args, **kwargs): super(Bernoulli, self).__init__(*args, **kwargs) self.p = p self.mode = cast(round(p), 'int8') def logp(self, value): p = self.p return bound( switch(value, log(p), log(1 - p)), 0 <= p, p <= 1) class Poisson(Discrete): """ Poisson log-likelihood. The Poisson is a discrete probability distribution. It is often used to model the number of events occurring in a fixed period of time when the times at which events occur are independent. The Poisson distribution can be derived as a limiting case of the binomial distribution. .. math:: f(x \mid \mu) = \frac{e^{-\mu}\mu^x}{x!} Parameters ---------- mu : float Expected number of occurrences during the given interval, :math:`\mu \geq 0`. .. note:: - :math:`E(x)=\mu` - :math:`Var(x)=\mu` """ def __init__(self, mu, *args, **kwargs): super(Poisson, self).__init__(*args, **kwargs) self.mu = mu self.mode = floor(mu).astype('int32') def logp(self, value): mu = self.mu return bound( logpow(mu, value) - factln(value) - mu, mu > 0, value >= 0) class NegativeBinomial(Discrete): """ Negative binomial log-likelihood. The negative binomial distribution describes a Poisson random variable whose rate parameter is gamma distributed. PyMC's chosen parameterization is based on this mixture interpretation. .. math:: f(x \mid \mu, \alpha) = \frac{\Gamma(x+\alpha)}{x! \Gamma(\alpha)} (\alpha/(\mu+\alpha))^\alpha (\mu/(\mu+\alpha))^x Parameters ---------- mu : float mu > 0 alpha : float alpha > 0 .. note:: - :math:`E[x]=\mu` """ def __init__(self, mu, alpha, *args, **kwargs): super(NegativeBinomial, self).__init__(*args, **kwargs) self.mu = mu self.alpha = alpha self.mode = floor(mu).astype('int32') def logp(self, value): mu = self.mu alpha = self.alpha # Return Poisson when alpha gets very large pois = bound(logpow(mu, value) - factln(value) - mu, mu > 0, value >= 0) negbinom = bound(gammaln(value + alpha) - factln(value) - gammaln(alpha) + logpow(mu / (mu + alpha), value) + logpow(alpha / (mu + alpha), alpha), mu > 0, alpha > 0, value >= 0) return switch(alpha > 1e10, pois, negbinom) class Geometric(Discrete): """ Geometric log-likelihood. The probability that the first success in a sequence of Bernoulli trials occurs on the x'th trial. .. math:: f(x \mid p) = p(1-p)^{x-1} Parameters ---------- p : float Probability of success on an individual trial, :math:`p \in [0,1]` .. note:: - :math:`E(X)=1/p` - :math:`Var(X)=\frac{1-p}{p^2}` """ def __init__(self, p, *args, **kwargs): super(Geometric, self).__init__(*args, **kwargs) self.p = p self.mode = 1 def logp(self, value): p = self.p return bound(log(p) + logpow(1 - p, value - 1), 0 <= p, p <= 1, value >= 1) class DiscreteUniform(Discrete): """ Discrete uniform distribution. .. math:: f(x \mid lower, upper) = \frac{1}{upper-lower} Parameters ---------- lower : int Lower limit. upper : int Upper limit (upper > lower). """ def __init__(self, lower, upper, *args, **kwargs): super(DiscreteUniform, self).__init__(*args, **kwargs) self.lower, self.upper = floor(lower).astype('int32'), floor(upper).astype('int32') self.mode = floor((upper - lower) / 2.).astype('int32') def logp(self, value): upper = self.upper lower = self.lower return bound( -log(upper - lower + 1), lower <= value, value <= upper) class Categorical(Discrete): """ Categorical log-likelihood. The most general discrete distribution. .. math:: f(x=i \mid p) = p_i for :math:`i \in 0 \ldots k-1`. Parameters ---------- p : float :math:`p > 0`, :math:`\sum p = 1` """ def __init__(self, p, *args, **kwargs): super(Categorical, self).__init__(*args, **kwargs) self.k = p.shape[0] self.p = p self.mode = argmax(p) def logp(self, value): p = self.p k = self.k return bound(log(p[value]), value >= 0, value <= (k - 1), le(abs(sum(p) - 1), 1e-5)) class ConstantDist(Discrete): """ Constant log-likelihood with parameter c={0}. Parameters ---------- value : float or int Data value(s) """ def __init__(self, c, *args, **kwargs): super(ConstantDist, self).__init__(*args, **kwargs) self.mean = self.median = self.mode = self.c = c def logp(self, value): c = self.c return bound(0, eq(value, c)) class ZeroInflatedPoisson(Discrete): def __init__(self, theta, z, *args, **kwargs): super(ZeroInflatedPoisson, self).__init__(*args, **kwargs) self.theta = theta self.z = z self.pois = Poisson.dist(theta) self.const = ConstantDist.dist(0) self.mode = self.pois.mode def logp(self, value): z = self.z return switch(z, self.pois.logp(value), self.const.logp(value))
{ "content_hash": "80610eb8d7d9fd7a8596cc2eaee20602", "timestamp": "", "source": "github", "line_count": 326, "max_line_length": 194, "avg_line_length": 26.631901840490798, "alnum_prop": 0.5187744759272057, "repo_name": "nmmarquez/pymc", "id": "fa78baa11fb4030b57548638e4c36eb92321b2a0", "size": "8682", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "pymc3/distributions/discrete.py", "mode": "33188", "license": "apache-2.0", "language": [], "symlink_target": "" }
import sys import os import json import cStringIO import tempfile import logging import traceback import codecs import math import textprocessor from collections import Counter from lib.stemutil import stem class WordCloud(textprocessor.TextProcessor): """ Generate word cloud """ def _basic_params(self): self.name = 'wordcloud' self.width = 300 self.height = 150 self.fontsize = [10, 32] self.n = 50 self.ngram = 1 self.tfidf_scoring = False def _findTfIdfScores(self, scale=True): self.freqs = Counter() self.tf_by_doc = {} self.max_tf = {} self.df = Counter() ngram = (1 if not hasattr(self, 'ngram') else self.ngram) self.stemming = getattr(self, 'stemming', False) for filename in self.files: flen = 0 self.tf_by_doc[filename] = self.getNgrams(filename, n=ngram, stemming=self.stemming) flen = sum(self.tf_by_doc[filename].values()) self.df.update(self.tf_by_doc[filename].keys()) self.freqs.update(self.tf_by_doc[filename]) for stem in self.tf_by_doc[filename].keys(): if scale: self.tf_by_doc[filename][stem] /= float(flen) # max_tf_d this_tf = self.tf_by_doc[filename][stem] else: this_tf = self.tf_by_doc[filename][stem] \ / float(flen) if stem not in self.max_tf or self.max_tf[stem] \ < this_tf: self.max_tf[stem] = this_tf self.update_progress() n = float(len(self.files)) self.idf = dict((term, math.log10(n / df)) for (term, df) in self.df.iteritems()) self.tfidf = dict((term, self.max_tf[term] * self.idf[term]) for term in self.max_tf.keys()) tfidf_values = self.tfidf.values() top_terms = min(int(len(self.freqs.keys()) * 0.7), 5000) min_score = sorted(tfidf_values, reverse=True)[min(top_terms, len(tfidf_values) - 1)] self.filtered_freqs = dict((term, freq) for (term, freq) in self.freqs.iteritems() if self.tfidf[term] > min_score and self.df[term] > 3) def _topN(self, freqs, n=None): if n is None: n = self.n final_freqs = [] top_freqs = sorted(freqs.values()) if len(top_freqs) == 0: return [] min_freq = top_freqs[-min(n, len(top_freqs))] # find nth frequency from end, or start of list for (word, freq) in freqs.iteritems(): if freq >= min_freq: final_freqs.append({'text': word, 'value': freq}) return final_freqs def _mostExtremeN(self, freqs, n=None): if n is None: n = self.n final_freqs = [] freqs_sort = sorted(freqs.values()) if len(freqs_sort) == 0: return [] min_freq_top = freqs_sort[-min(n / 2, len(freqs_sort))] max_freq_bottom = freqs_sort[min(n / 2, len(freqs_sort) - 1)] for (word, freq) in freqs.iteritems(): if freq >= min_freq_top or freq < max_freq_bottom: final_freqs.append({'text': word, 'value': freq}) return final_freqs def _findWordFreqs(self, filenames): self.stemming = getattr(self, 'stemming', False) freqs = Counter() for filename in filenames: freqs.update(self.getNgrams(filename, stemming=self.stemming)) self.update_progress() return self._topN(freqs) def process(self): logging.info('starting to process') self.template_filename = os.path.join(self.cwd, 'templates', 'wordcloud.html') logging.info('finding word frequencies') if self.tfidf_scoring: self._findTfIdfScores() freqs = self._topN(self.filtered_freqs) else: freqs = self._findWordFreqs(self.files) params = { 'DATA': freqs, 'WIDTH': self.width, 'HEIGHT': self.height, 'FONTSIZE': self.fontsize, 'FORMAT': (u'tf-idf: {0}' if self.tfidf_scoring else u'{0} occurrences in corpus' ), } self.write_html(params) if __name__ == '__main__': try: processor = WordCloud(track_progress=True) processor.process() except: logging.error(traceback.format_exc())
{ "content_hash": "3cd96672a8ab0e761642386797f5f2dc", "timestamp": "", "source": "github", "line_count": 140, "max_line_length": 102, "avg_line_length": 33.628571428571426, "alnum_prop": 0.526338147833475, "repo_name": "ChristianFrisson/papermachines", "id": "388ff5374275f962306d59e2d938170d6625e87e", "size": "4751", "binary": false, "copies": "2", "ref": "refs/heads/horizon", "path": "chrome/content/papermachines/processors/wordcloud.py", "mode": "33261", "license": "bsd-2-clause", "language": [ { "name": "Batchfile", "bytes": "2471" }, { "name": "CSS", "bytes": "3756" }, { "name": "HTML", "bytes": "8648" }, { "name": "JavaScript", "bytes": "393253" }, { "name": "Python", "bytes": "170061" }, { "name": "Shell", "bytes": "10514" } ], "symlink_target": "" }
import datetime import glob import json import logging import os import re import shutil import socket import subprocess import sys import work_queue as wq from collections import defaultdict, Counter from hashlib import sha1 from lobster import fs, util from lobster.cmssw import dash from lobster.core import unit from lobster.core import Algo from lobster.core import MergeTaskHandler from WMCore.Storage.SiteLocalConfig import loadSiteLocalConfig, SiteConfigError logger = logging.getLogger('lobster.source') class ReleaseSummary(object): """Summary of returned tasks. Prints a user-friendly summary of which tasks returned with what exit code/status. """ flags = { wq.WORK_QUEUE_RESULT_INPUT_MISSING: "missing input", # 1 wq.WORK_QUEUE_RESULT_OUTPUT_MISSING: "missing output", # 2 wq.WORK_QUEUE_RESULT_STDOUT_MISSING: "no stdout", # 4 wq.WORK_QUEUE_RESULT_SIGNAL: "signal received", # 8 wq.WORK_QUEUE_RESULT_RESOURCE_EXHAUSTION: "exhausted resources", # 16 wq.WORK_QUEUE_RESULT_TASK_TIMEOUT: "time out", # 32 wq.WORK_QUEUE_RESULT_UNKNOWN: "unclassified error", # 64 wq.WORK_QUEUE_RESULT_FORSAKEN: "unrelated error", # 128 wq.WORK_QUEUE_RESULT_MAX_RETRIES: "exceed # retries", # 256 wq.WORK_QUEUE_RESULT_TASK_MAX_RUN_TIME: "exceeded runtime" # 512 } def __init__(self): self.__exe = {} self.__wq = {} self.__taskdirs = {} self.__monitors = [] def exe(self, status, taskid): try: self.__exe[status].append(taskid) except KeyError: self.__exe[status] = [taskid] def wq(self, status, taskid): for flag in ReleaseSummary.flags.keys(): if status == flag: try: self.__wq[flag].append(taskid) except KeyError: self.__wq[flag] = [taskid] def dir(self, taskid, taskdir): self.__taskdirs[taskid] = taskdir def monitor(self, taskid): self.__monitors.append(taskid) def __str__(self): s = "received the following task(s):\n" for status in sorted(self.__exe.keys()): s += "returned with status {0}: {1}\n".format(status, ", ".join(self.__exe[status])) if status != 0: s += "parameters and logs in:\n\t{0}\n".format( "\n\t".join([self.__taskdirs[t] for t in self.__exe[status]])) for flag in sorted(self.__wq.keys()): s += "failed due to {0}: {1}\nparameters and logs in:\n\t{2}\n".format( ReleaseSummary.flags[flag], ", ".join(self.__wq[flag]), "\n\t".join([self.__taskdirs[t] for t in self.__wq[flag]])) if self.__monitors: s += "resource monitoring unavailable for the following tasks: {0}\n".format(", ".join(self.__monitors)) # Trim final newline return s[:-1] class TaskProvider(util.Timing): def __init__(self, config): util.Timing.__init__(self, 'dash', 'handler', 'updates', 'elk', 'transfers', 'cleanup', 'propagate', 'sqlite') self.config = config self.basedirs = [config.base_directory, config.startup_directory] self.workdir = config.workdir self._storage = config.storage self.statusfile = os.path.join(self.workdir, 'status.json') self.siteconf = os.path.join(self.workdir, 'siteconf') self.parrot_path = os.path.dirname(util.which('parrot_run')) self.parrot_bin = os.path.join(self.workdir, 'bin') self.parrot_lib = os.path.join(self.workdir, 'lib') self.__algo = Algo(config) self.__host = socket.getfqdn() try: siteconf = loadSiteLocalConfig() self.__ce = siteconf.siteName self.__se = siteconf.localStageOutPNN() self.__frontier_proxy = siteconf.frontierProxies[0] except (SiteConfigError, IndexError): logger.error("can't load siteconfig, defaulting to hostname") self.__ce = socket.getfqdn() self.__se = socket.getfqdn() try: self.__frontier_proxy = os.environ['HTTP_PROXY'] except KeyError: logger.error("can't determine proxy for Frontier via $HTTP_PROXY") sys.exit(1) try: with open('/etc/cvmfs/default.local') as f: lines = f.readlines() except IOError: lines = [] for l in lines: m = re.match('\s*CVMFS_HTTP_PROXY\s*=\s*[\'"]?(.*)[\'"]?', l) if m: self.__cvmfs_proxy = m.group(1).strip("\"'") break else: try: self.__cvmfs_proxy = os.environ['HTTP_PROXY'] except KeyError: logger.error("can't determine proxy for CVMFS via $HTTP_PROXY") sys.exit(1) logger.debug("using {} as proxy for CVMFS".format(self.__cvmfs_proxy)) logger.debug("using {} as proxy for Frontier".format(self.__frontier_proxy)) logger.debug("using {} as osg_version".format(self.config.advanced.osg_version)) util.sendemail("Your Lobster project has started!", self.config) self.__taskhandlers = {} self.__store = unit.UnitStore(self.config) self.__setup_inputs() self.copy_siteconf() create = not util.checkpoint(self.workdir, 'id') if create: self.taskid = 'lobster_{0}_{1}'.format( self.config.label, sha1(str(datetime.datetime.utcnow())).hexdigest()[-16:]) util.register_checkpoint(self.workdir, 'id', self.taskid) shutil.copy(self.config.base_configuration, os.path.join(self.workdir, 'config.py')) else: self.taskid = util.checkpoint(self.workdir, 'id') util.register_checkpoint(self.workdir, 'RESTARTED', str(datetime.datetime.utcnow())) if not util.checkpoint(self.workdir, 'executable'): # We can actually have more than one exe name (one per task label) # Set 'cmsRun' if any of the tasks are of that type, # or use cmd command if all tasks execute the same cmd, # or use 'noncmsRun' if task cmds are different # Using this for dashboard exe name reporting cmsconfigs = [wflow.pset for wflow in self.config.workflows] cmds = [wflow.command for wflow in self.config.workflows] if any(cmsconfigs): exename = 'cmsRun' elif all(x == cmds[0] and x is not None for x in cmds): exename = cmds[0] else: exename = 'noncmsRun' util.register_checkpoint(self.workdir, 'executable', exename) for wflow in self.config.workflows: if create and not util.checkpoint(self.workdir, wflow.label): wflow.setup(self.workdir, self.basedirs) logger.info("querying backend for {0}".format(wflow.label)) with fs.alternative(): dataset_info = wflow.dataset.get_info() logger.info("registering {0} in database".format(wflow.label)) self.__store.register_dataset(wflow, dataset_info, wflow.category.runtime) util.register_checkpoint(self.workdir, wflow.label, 'REGISTERED') elif os.path.exists(os.path.join(wflow.workdir, 'running')): for id in self.get_taskids(wflow.label): util.move(wflow.workdir, id, 'failed') for wflow in self.config.workflows: if wflow.parent: getattr(self.config.workflows, wflow.parent.label).register(wflow) if create: total_units = wflow.dataset.total_units * len(wflow.unique_arguments) self.__store.register_dependency(wflow.label, wflow.parent.label, total_units) if not util.checkpoint(self.workdir, 'sandbox cmssw version'): util.register_checkpoint(self.workdir, 'sandbox', 'CREATED') versions = set([w.version for w in self.config.workflows]) if len(versions) == 1: util.register_checkpoint(self.workdir, 'sandbox cmssw version', list(versions)[0]) if self.config.elk: if create: categories = {wflow.category.name: [] for wflow in self.config.workflows} for category in categories: for workflow in self.config.workflows: if workflow.category.name == category: categories[category].append(workflow.label) self.config.elk.create(categories) else: self.config.elk.resume() self.config.advanced.dashboard.setup(self.config) if create: self.config.save() self.config.advanced.dashboard.register_run() else: self.config.advanced.dashboard.update_task_status( (id_, dash.ABORTED) for id_ in self.__store.reset_units() ) for p in (self.parrot_bin, self.parrot_lib): if not os.path.exists(p): os.makedirs(p) for exe in ('parrot_run', 'chirp', 'chirp_put', 'chirp_get'): shutil.copy(util.which(exe), self.parrot_bin) subprocess.check_call(["strip", os.path.join(self.parrot_bin, exe)]) p_helper = os.path.join(os.path.dirname(self.parrot_path), 'lib', 'lib64', 'libparrot_helper.so') shutil.copy(p_helper, self.parrot_lib) def copy_siteconf(self): storage_in = os.path.join(os.path.dirname(__file__), 'data', 'siteconf', 'PhEDEx', 'storage.xml') storage_out = os.path.join(self.siteconf, 'PhEDEx', 'storage.xml') if not os.path.exists(os.path.dirname(storage_out)): os.makedirs(os.path.dirname(storage_out)) xml = '' for n, server in enumerate(self.config.advanced.xrootd_servers): xml += ' <lfn-to-pfn protocol="xrootd{}"'.format('' if n == 0 else '-fallback{}'.format(n)) \ + ' destination-match=".*" path-match="/+store/(.*)"' \ + ' result="root://{}//store/$1"/>\n'.format(server) with open(storage_in) as fin: with open(storage_out, 'w') as fout: fout.write(fin.read().format(xrootd_rules=xml)) jobconfig_in = os.path.join(os.path.dirname(__file__), 'data', 'siteconf', 'JobConfig', 'site-local-config.xml') jobconfig_out = os.path.join(self.siteconf, 'JobConfig', 'site-local-config.xml') if not os.path.exists(os.path.dirname(jobconfig_out)): os.makedirs(os.path.dirname(jobconfig_out)) xml = '' for n, server in enumerate(self.config.advanced.xrootd_servers): xml += ' <catalog url="trivialcatalog_file:siteconf/PhEDEx/storage.xml?protocol=xrootd{}"/>\n'.format( '' if n == 0 else '-fallback{}'.format(n)) with open(jobconfig_in) as fin: with open(jobconfig_out, 'w') as fout: fout.write(fin.read().format(xrootd_catalogs=xml)) def __find_root(self, label): while getattr(self.config.workflows, label).parent: label = getattr(self.config.workflows, label).parent return label def __setup_inputs(self): self._inputs = [ (self.siteconf, 'siteconf', False), (os.path.join(os.path.dirname(__file__), 'data', 'wrapper.sh'), 'wrapper.sh', True), (os.path.join(os.path.dirname(__file__), 'data', 'task.py'), 'task.py', True), (os.path.join(os.path.dirname(__file__), 'data', 'report.json'), 'report.json.in', True), (self.parrot_bin, 'bin', True), (self.parrot_lib, 'lib', True), ] # Files to make the task wrapper work without referencing WMCore # from somewhere else import WMCore base = os.path.dirname(WMCore.__file__) reqs = [ "__init__.py", "Algorithms", "Configuration.py", "DataStructs", "FwkJobReport", "Services", "Storage", "WMException.py", "WMExceptions.py" ] for f in reqs: self._inputs.append((os.path.join(base, f), os.path.join("python", "WMCore", f), True)) if 'X509_USER_PROXY' in os.environ: self._inputs.append((os.environ['X509_USER_PROXY'], 'proxy', False)) def get_taskids(self, label, status='running'): # Iterates over the task directories and returns all taskids found # therein. parent = os.path.join(self.workdir, label, status) for d in glob.glob(os.path.join(parent, '*', '*')): yield int(os.path.relpath(d, parent).replace(os.path.sep, '')) def get_report(self, label, task): return os.path.join(self.workdir, label, 'successful', util.id2dir(task), 'report.json') def obtain(self, total, tasks): """ Obtain tasks from the project. Will create tasks for all workflows, if possible. Merge tasks are always created, given enough successful tasks. The remaining tasks are split proportionally between the categories based on remaining resources multiplied by cores used per task. Within categories, tasks are created based on the same logic. Parameters ---------- total : int Number of cores available. tasks : dict Dictionary with category names as keys and the number of tasks in the queue as values. """ remaining = dict((wflow, self.__store.work_left(wflow.label)) for wflow in self.config.workflows) taskinfos = [] for wflow in self.config.workflows: taskinfos += self.__store.pop_unmerged_tasks(wflow.label, wflow.merge_size, 10) for label, ntasks, taper in self.__algo.run(total, tasks, remaining): infos = self.__store.pop_units(label, ntasks, taper) logger.debug("created {} tasks for workflow {}".format(len(infos), label)) taskinfos += infos if not taskinfos or len(taskinfos) == 0: return [] tasks = [] ids = [] registration = dict( zip( [t[0] for t in taskinfos], self.config.advanced.dashboard.register_tasks(t[0] for t in taskinfos) ) ) for (id, label, files, lumis, unique_arg, merge) in taskinfos: wflow = getattr(self.config.workflows, label) ids.append(id) jdir = util.taskdir(wflow.workdir, id) inputs = list(self._inputs) inputs.append((os.path.join(jdir, 'parameters.json'), 'parameters.json', False)) outputs = [(os.path.join(jdir, f), f) for f in ['report.json']] monitorid, syncid = registration[id] config = { 'mask': { 'files': None, 'lumis': None, 'events': None }, 'monitoring': { 'monitorid': monitorid, 'syncid': syncid, 'taskid': self.taskid, }, 'default host': self.__host, 'default ce': self.__ce, 'default se': self.__se, 'arguments': None, 'output files': [], 'want summary': True, 'executable': None, 'pset': None, 'prologue': None, 'epilogue': None, 'gridpack': False } cmd = 'sh wrapper.sh python task.py parameters.json' env = { 'LOBSTER_CVMFS_PROXY': self.__cvmfs_proxy, 'LOBSTER_FRONTIER_PROXY': self.__frontier_proxy, 'LOBSTER_OSG_VERSION': self.config.advanced.osg_version } if merge: missing = [] infiles = [] inreports = [] for task, _, _, _ in lumis: report = self.get_report(label, task) _, infile = list(wflow.get_outputs(task))[0] if os.path.isfile(report): inreports.append(report) infiles.append((task, infile)) else: missing.append(task) if len(missing) > 0: template = "the following have been marked as failed because their output could not be found: {0}" logger.warning(template.format(", ".join(map(str, missing)))) self.__store.update_missing(missing) if len(infiles) <= 1: # FIXME report these back to the database and then skip # them. Without failing these task ids, accounting of # running tasks is going to be messed up. logger.debug("skipping task {0} with only one input file!".format(id)) # takes care of the fields set to None in config wflow.adjust(config, env, jdir, inputs, outputs, merge, reports=inreports) files = infiles else: # takes care of the fields set to None in config wflow.adjust(config, env, jdir, inputs, outputs, merge, unique=unique_arg) handler = wflow.handler(id, files, lumis, jdir, merge=merge) # set input/output transfer parameters self._storage.preprocess(config, merge or wflow.parent) # adjust file and lumi information in config, add task specific # input/output files handler.adjust(config, inputs, outputs, self._storage) with open(os.path.join(jdir, 'parameters.json'), 'w') as f: json.dump(config, f, indent=2) f.write('\n') tasks.append(('merge' if merge else wflow.category.name, cmd, id, inputs, outputs, env, jdir)) self.__taskhandlers[id] = handler logger.info("creating task(s) {0}".format(", ".join(map(str, ids)))) self.config.advanced.dashboard.free() return tasks def release(self, tasks): fail_cleanup = [] merge_cleanup = [] input_cleanup = [] update = defaultdict(list) propagate = defaultdict(dict) input_files = defaultdict(set) summary = ReleaseSummary() transfers = defaultdict(lambda: defaultdict(Counter)) with self.measure('dash'): self.config.advanced.dashboard.update_task_status( (task.tag, dash.DONE) for task in tasks ) for task in tasks: with self.measure('updates'): handler = self.__taskhandlers[task.tag] failed, task_update, file_update, unit_update = handler.process(task, summary, transfers) wflow = getattr(self.config.workflows, handler.dataset) with self.measure('elk'): if self.config.elk: self.config.elk.index_task(task) self.config.elk.index_task_update(task_update) with self.measure('handler'): if failed: faildir = util.move(wflow.workdir, handler.id, 'failed') summary.dir(str(handler.id), faildir) fail_cleanup.extend([lf for rf, lf in handler.outputs]) else: util.move(wflow.workdir, handler.id, 'successful') merge = isinstance(handler, MergeTaskHandler) if (wflow.merge_size <= 0 or merge) and len(handler.outputs) > 0: outfn = handler.outputs[0][1] outinfo = handler.output_info for dep in wflow.dependents: propagate[dep.label][outfn] = outinfo if merge: merge_cleanup.extend(handler.input_files) if wflow.cleanup_input: input_files[handler.dataset].update(set([f for (_, _, f) in file_update])) update[(handler.dataset, handler.unit_source)].append((task_update, file_update, unit_update)) del self.__taskhandlers[task.tag] with self.measure('dash'): self.config.advanced.dashboard.update_task_status( (task.tag, dash.RETRIEVED) for task in tasks ) if len(update) > 0: with self.measure('sqlite'): logger.info(summary) self.__store.update_units(update) with self.measure('cleanup'): if len(input_files) > 0: input_cleanup.extend(self.__store.finished_files(input_files)) for cleanup in [fail_cleanup, merge_cleanup + input_cleanup]: if len(cleanup) > 0: try: fs.remove(*cleanup) except (IOError, OSError): pass except ValueError as e: logger.error("error removing {0}:\n{1}".format(task.tag, e)) with self.measure('propagate'): for label, infos in propagate.items(): unique_args = getattr(self.config.workflows, label).unique_arguments self.__store.register_files(infos, label, unique_args) if len(transfers) > 0: with self.measure('transfers'): self.__store.update_transfers(transfers) if self.config.elk: with self.measure('elk'): try: self.config.elk.index_summary(self.__store.workflow_status()) except Exception as e: logger.error('ELK failed to index summary:\n{}'.format(e)) def terminate(self): self.config.advanced.dashboard.update_task_status( (str(id), dash.CANCELLED) for id in self.__store.running_tasks() ) def done(self): left = self.__store.unfinished_units() return self.__store.merged() and left == 0 def max_taskid(self): return self.__store.max_taskid() def update(self, queue): # update dashboard status for all unfinished tasks. # WAITING_RETRIEVAL is not a valid status in dashboard, # so skipping it for now. exclude_states = (dash.DONE, dash.WAITING_RETRIEVAL) try: self.config.advanced.dashboard.update_tasks(queue, exclude_states) except Exception as e: logger.warning("could not update task states to dashboard") logger.exception(e) def update_stuck(self): """Have the unit store updated the statistics for stuck units. """ self.__store.update_workflow_stats_stuck() def update_runtime(self, category): """Update the runtime for all workflows with the corresponding category. """ update = [] for wflow in self.config.workflows: if wflow.category == category: update.append((category.runtime, wflow.label)) self.__store.update_workflow_runtime(update) def tasks_left(self): return self.__store.estimate_tasks_left() def work_left(self): return self.__store.unfinished_units()
{ "content_hash": "e50de8464f97da6b8aef44b1138a985e", "timestamp": "", "source": "github", "line_count": 582, "max_line_length": 120, "avg_line_length": 40.9106529209622, "alnum_prop": 0.5524989500209996, "repo_name": "matz-e/lobster", "id": "11c51fcf7c5e2e59273d7a0cb6331fed23f31c9c", "size": "23810", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "lobster/core/source.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "4633" }, { "name": "HTML", "bytes": "36522" }, { "name": "Python", "bytes": "436141" }, { "name": "Shell", "bytes": "7791" } ], "symlink_target": "" }
"""djangoTestAPI URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.11/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.conf.urls import url, include 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) """ from django.conf.urls import url #, include from rest_framework.urlpatterns import format_suffix_patterns from django.contrib import admin from snippets import views # def url(*args): # pass # def include(*args): # pass urlpatterns = [ url(r'^admin/', admin.site.urls), url(r'^snippets/$', views.snippet_list), url(r'^snippets/(?P<pk>[0-9]+)/$', views.snippet_detail), # url(r'^', include('snippets.urls')), ] urlpatterns = format_suffix_patterns(urlpatterns)
{ "content_hash": "7adafc59c7210923fa14a427300252f1", "timestamp": "", "source": "github", "line_count": 39, "max_line_length": 79, "avg_line_length": 29.23076923076923, "alnum_prop": 0.6885964912280702, "repo_name": "DigitalTensionExperiment/DjangoTestAPI", "id": "9e0c60d26cd73c8080b3764efc908e0eedf3b3f9", "size": "1140", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "djangoTestAPI/djangoTestAPI/urls.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "23914" } ], "symlink_target": "" }
from __future__ import print_function # Author: Sarah Knepper <sarah.knepper@intel.com> # Copyright (c) 2015 Intel Corporation. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import time from upm import pyupm_grove as grove def main(): # Create the relay switch object using GPIO pin 0 relay = grove.Relay(0) # Close and then open the relay switch 3 times, # waiting one second each time. The LED on the relay switch # will light up when the switch is on (closed). # The switch will also make a noise between transitions. for i in range (0,3): relay.on() if relay.isOn(): print(relay.name(), 'is on') time.sleep(1) relay.off() if relay.isOff(): print(relay.name(), 'is off') time.sleep(1) # Delete the relay switch object del relay if __name__ == '__main__': main()
{ "content_hash": "e57e27d890778d8dfc2bba02ed56f4e2", "timestamp": "", "source": "github", "line_count": 49, "max_line_length": 72, "avg_line_length": 38.69387755102041, "alnum_prop": 0.7078059071729957, "repo_name": "stefan-andritoiu/upm", "id": "b51e308a00e8f2453ce6dc70f61206c01ecba4b4", "size": "1918", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "examples/python/relay.py", "mode": "33261", "license": "mit", "language": [ { "name": "C", "bytes": "3304808" }, { "name": "C++", "bytes": "3506744" }, { "name": "CMake", "bytes": "152907" }, { "name": "CSS", "bytes": "18714" }, { "name": "HTML", "bytes": "32376" }, { "name": "JavaScript", "bytes": "7727" }, { "name": "Objective-C", "bytes": "4075" }, { "name": "Python", "bytes": "39692" }, { "name": "Shell", "bytes": "10722" } ], "symlink_target": "" }
import pytest @pytest.fixture def pipeline_1(): return { 'schema_version': '1', 'status': 'active', 'title': 'Test pipeline', } @pytest.fixture def pipeline_2(award, lab): return { 'schema_version': '2', 'status': 'active', 'title': 'Test pipeline', 'award': award['uuid'], 'lab': lab['uuid'], } @pytest.fixture def pipeline_7(award, lab): return { 'assay_term_name': 'MNase-seq', 'schema_version': '7', 'status': 'active', 'title': 'Test pipeline', 'award': award['uuid'], 'lab': lab['uuid'], } def test_pipeline_upgrade_1_2(upgrader, pipeline_1): value = upgrader.upgrade('pipeline', pipeline_1, target_version='2') assert value['schema_version'] == '2' assert value.get('award') is not None def test_pipeline_upgrade_2_3(upgrader, pipeline_2): value = upgrader.upgrade('pipeline', pipeline_2, current_version='2', target_version='3') assert value['schema_version'] == '3' assert 'name' not in value assert 'version' not in value assert 'end_points' not in value def test_pipeline_upgrade_7_8(upgrader, pipeline_7): value = upgrader.upgrade('pipeline', pipeline_7, current_version='7', target_version='8') assert value['schema_version'] == '8' assert 'assay_term_name' not in value assert value['assay_term_names'] == ['MNase-seq']
{ "content_hash": "f254b752e21eb62cee8e80b287f1733f", "timestamp": "", "source": "github", "line_count": 52, "max_line_length": 93, "avg_line_length": 27.442307692307693, "alnum_prop": 0.6012613875262789, "repo_name": "T2DREAM/t2dream-portal", "id": "b12e54c49c2b8b3e01de61890a08b97508dcc0f2", "size": "1427", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/encoded/tests/test_upgrade_pipeline.py", "mode": "33188", "license": "mit", "language": [ { "name": "AngelScript", "bytes": "741" }, { "name": "CSS", "bytes": "2874" }, { "name": "Gherkin", "bytes": "16776" }, { "name": "HTML", "bytes": "373076" }, { "name": "JavaScript", "bytes": "1320205" }, { "name": "Makefile", "bytes": "106" }, { "name": "Python", "bytes": "1567328" }, { "name": "SCSS", "bytes": "336182" }, { "name": "Shell", "bytes": "4199" } ], "symlink_target": "" }
""" Supports flushing metrics to graphite """ import sys import socket import logging class GraphiteStore(object): def __init__(self, host="localhost", port=2003, prefix="statsite", attempts=3): """ Implements an interface that allows metrics to be persisted to Graphite. Raises a :class:`ValueError` on bad arguments. :Parameters: - `host` : The hostname of the graphite server. - `port` : The port of the graphite server - `prefix` (optional) : A prefix to add to the keys. Defaults to 'statsite' - `attempts` (optional) : The number of re-connect retries before failing. """ # Convert the port to an int since its coming from a configuration file port = int(port) attempts = int(attempts) if port <= 0: raise ValueError("Port must be positive!") if attempts <= 1: raise ValueError("Must have at least 1 attempt!") self.host = host self.port = port self.prefix = prefix self.attempts = attempts self.sock = self._create_socket() self.logger = logging.getLogger("statsite.graphitestore") def flush(self, metrics): """ Flushes the metrics provided to Graphite. :Parameters: - `metrics` : A list of (key,value,timestamp) tuples. """ if not metrics: return # Construct the output metrics = [m.split("|") for m in metrics if m] self.logger.info("Outputting %d metrics" % len(metrics)) if self.prefix: lines = ["%s.%s %s %s" % (self.prefix, k, v, ts) for k, v, ts in metrics] else: lines = ["%s %s %s" % (k, v, ts) for k, v, ts in metrics] data = "\n".join(lines) + "\n" # Serialize writes to the socket try: self._write_metric(data) except: self.logger.exception("Failed to write out the metrics!") def close(self): """ Closes the connection. The socket will be recreated on the next flush. """ self.sock.close() def _create_socket(self): """Creates a socket and connects to the graphite server""" sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((self.host, self.port)) return sock def _write_metric(self, metric): """Tries to write a string to the socket, reconnecting on any errors""" for attempt in xrange(self.attempts): try: self.sock.sendall(metric) return except socket.error: self.logger.exception("Error while flushing to graphite. Reattempting...") self.sock = self._create_socket() self.logger.critical("Failed to flush to Graphite! Gave up after %d attempts." % self.attempts) if __name__ == "__main__": # Initialize the logger logging.basicConfig() # Intialize from our arguments graphite = GraphiteStore(*sys.argv[1:]) # Get all the inputs metrics = sys.stdin.read() # Flush graphite.flush(metrics.splitlines()) graphite.close()
{ "content_hash": "2c5effb7f42e217069700e09db0c41c3", "timestamp": "", "source": "github", "line_count": 100, "max_line_length": 103, "avg_line_length": 31.99, "alnum_prop": 0.5804939043451078, "repo_name": "atdt/statsite-pkg", "id": "e8525dccb2b2e555057fc9662ce5a8ef40b38023", "size": "3199", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "sinks/graphite.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "C", "bytes": "279957" }, { "name": "C++", "bytes": "9414" }, { "name": "Python", "bytes": "47040" }, { "name": "Ruby", "bytes": "3650" } ], "symlink_target": "" }
import sys, os import types import re import unittest from StringIO import StringIO import shutil import svnmerge import stat import atexit import getopt import locale #### # IMPORTANT NOTE TO TEST AUTHORS # # Any quoted strings inside the arguments of the parameter "cmd" must # be enclosed in double-, not single-quotes, so that the command parser # knows to keep them together. For example, do not write this: # launch("svn ci -m 'log comment'") # BAD # ...but one of these: # launch('svn ci -m "log comment"') # GOOD # launch("svn ci -m \"log comment\"") # GOOD, but why? # Otherwise, you get an error saying # '<path>/comment' is not under version control # ...when running the tests on Windows. #### # True/False constants are Python 2.2+ try: True, False except NameError: True, False = 1, 0 class StringIOWithEncoding(StringIO): def __init__(self): StringIO.__init__(self) self.encoding = sys.stdout.encoding class TestCase_kwextract(unittest.TestCase): def test_basic(self): self.assertEqual(svnmerge.kwextract("$Rev: 134 rasky $"), "134 rasky") self.assertEqual(svnmerge.kwextract("$Date: 2005-09-25 13:45 CET+1$"), "2005-09-25 13:45 CET+1") def test_failure(self): self.assertEqual(svnmerge.kwextract("$Rev: $"), "<unknown>") self.assertEqual(svnmerge.kwextract("$Date$"), "<unknown>") def reset_svnmerge(): svnmerge.opts = svnmerge.default_opts.copy() svnmerge._cache_svninfo = {} svnmerge._cache_reporoot = {} svnmerge.PathIdentifier.locobjs = {} svnmerge.PathIdentifier.repo_hints = {} class TestCase_launch(unittest.TestCase): if os.name == "nt": cmd = "attrib" else: cmd = "ls" def test_basic(self): out = svnmerge.launch(self.cmd) self.assert_(out) for o in out: self.assertEqual(o[-1], "\n") def test_failure(self): self.assertRaises(svnmerge.LaunchError, svnmerge.launch, self.cmd*10) def test_failurecode(self): try: svnmerge.launch(self.cmd*10) except svnmerge.LaunchError as (ret, cmd, out): self.assertNotEqual(ret, 0) self.assertNotEqual(ret, None) self.assert_(out) self.assertEqual(cmd, self.cmd*10) else: self.fail("svnmerge.launch did not cause a LaunchError as expected") class TestCase_PrefixLines(unittest.TestCase): def test_basic(self): self.assertEqual("zz\n", svnmerge.prefix_lines("zz", "\n")) self.assertEqual("zzfoo\n", svnmerge.prefix_lines("zz", "foo\n")) self.assertEqual("zzfoo\nzzbar\n", svnmerge.prefix_lines("zz", "foo\nbar\n")) self.assertEqual("zz\nzzfoo\n", svnmerge.prefix_lines("zz", "\nfoo\n")) self.assertEqual("zz\nzzfoo\nzzbar\n", svnmerge.prefix_lines("zz", "\nfoo\nbar\n")) class TestCase_RevisionSet(unittest.TestCase): def test_constr_string(self): rs = svnmerge.RevisionSet("10- 15, 12-48,2 ") self.assert_(17 in rs) self.assert_(2 in rs) self.assert_(9 not in rs) rs = svnmerge.RevisionSet("10: 15, 12:48,2 ") self.assert_(17 in rs) self.assert_(2 in rs) self.assert_(9 not in rs) def test_constr_dict(self): rs = svnmerge.RevisionSet({18:1, 24:1, 25:1, 43:1}) self.assert_(24 in rs) self.assert_(18 in rs) self.assert_(44 not in rs) def test_constr_error(self): self.assertRaises(ValueError, svnmerge.RevisionSet, "10-12-15") self.assertRaises(ValueError, svnmerge.RevisionSet, "10;12-15") self.assertRaises(ValueError, svnmerge.RevisionSet, "10,foo,3-15") self.assertRaises(ValueError, svnmerge.RevisionSet, "10:12:15") self.assertRaises(ValueError, svnmerge.RevisionSet, "10;12:15") self.assertRaises(ValueError, svnmerge.RevisionSet, "10,foo,3:15") def test_normalized(self): rs = svnmerge.RevisionSet("8-15,16-18, 4-6, 9, 18, 1-1, 3-3") self.assertEqual(rs.normalized(), [(1,1), (3,6), (8,18)]) self.assertEqual(str(rs), "1,3-6,8-18") rs = svnmerge.RevisionSet("8:15,16:18, 4:6, 9, 18, 1:1, 3:3") self.assertEqual(rs.normalized(), [(1,1), (3,6), (8,18)]) self.assertEqual(str(rs), "1,3-6,8-18") def test_sorted(self): "Test the sorted() function of the RevisionSet class." rs = svnmerge.RevisionSet("8-15,16-18, 4-6, 9, 18, 1-1, 3-3") self.assertEqual(rs.sorted(), [1, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]) rs = svnmerge.RevisionSet("8:15,16:18, 4:6, 9, 18, 1:1, 3:3") self.assertEqual(rs.sorted(), [1, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]) def test_length(self): rs = svnmerge.RevisionSet("3-8") self.assertEqual(len(rs), 6) rs = svnmerge.RevisionSet("3-8,4-10") self.assertEqual(len(rs), 8) rs = svnmerge.RevisionSet("1,3,5") self.assertEqual(len(rs), 3) rs = svnmerge.RevisionSet("3:8") self.assertEqual(len(rs), 6) rs = svnmerge.RevisionSet("3:8,4:10") self.assertEqual(len(rs), 8) rs = svnmerge.RevisionSet("1,3,5") self.assertEqual(len(rs), 3) def test_iter(self): try: iter except NameError: pass else: rs = svnmerge.RevisionSet("4-13,1-5,34,20-22,18-21") self.assertEqual(list(iter(rs)), range(1,14)+range(18,23)+[34]) rs = svnmerge.RevisionSet("4:13,1:5,34,20:22,18:21") self.assertEqual(list(iter(rs)), range(1,14)+range(18,23)+[34]) def test_union(self): rs = svnmerge.RevisionSet("3-8,4-10") | svnmerge.RevisionSet("7-14,1") self.assertEqual(str(rs), "1,3-14") rs = svnmerge.RevisionSet("3:8,4:10") | svnmerge.RevisionSet("7:14,1") self.assertEqual(str(rs), "1,3-14") def test_subtraction(self): rs = svnmerge.RevisionSet("3-8,4-10") - svnmerge.RevisionSet("7-14,1") self.assertEqual(str(rs), "3-6") rs = svnmerge.RevisionSet("3:8,4:10") - svnmerge.RevisionSet("7:14,1") self.assertEqual(str(rs), "3-6") def test_constr_empty(self): rs = svnmerge.RevisionSet("") self.assertEqual(str(rs), "") class TestCase_PathIdentifier(unittest.TestCase): rrp = "/trunk/contrib/client-side/svnmerge" uuid = "65390229-12b7-0310-b90b-f21a5aa7ec8e" uuidrl = 'uuid://'+uuid+rrp url= "http://svn.apache.org/repos/asf/subversion/trunk/contrib/client-side/svnmerge" ext = "uuid://65390229-12b7-0310-b90b-f21a5aa7ec8e/trunk/contrib/client-side/svnmerge" def try_pathid(self, rrp, uuid, url, ext, expected_str, expected_formats): l = svnmerge.PathIdentifier(rrp, uuid, url, ext) self.assertEqual(str(l), expected_str, "str() gave '%s' instead of '%s'" % (str(l), expected_str)) for k, v in expected_formats.items(): self.assertEqual(l.format(k), v, "format('%s') gave '%s' instead of '%s'" % (k, l.format(k), v)) reset_svnmerge() def test_PathIdentifier_just_path(self): self.try_pathid(self.rrp, None, None, None, self.rrp, { 'path' : self.rrp }) def test_PathIdentifier_uuid(self): self.try_pathid(self.rrp, self.uuid, None, None, self.uuidrl, { 'path' : self.rrp, 'uuid' : self.uuidrl }) def test_PathIdentifier_url(self): self.try_pathid(self.rrp, None, self.url, None, self.url, { 'path' : self.rrp, 'url' : self.url }) def test_PathIdentifier_prefer_url(self): self.try_pathid(self.rrp, self.uuid, self.url, None, self.url, { 'path' : self.rrp, 'url' : self.url, 'uuid' : self.uuidrl }) def test_PathIdentifier_external_form(self): self.try_pathid(self.rrp, self.uuid, self.url, self.ext, self.ext, { 'path' : self.rrp, 'url' : self.url, 'uuid' : self.uuidrl }) class TestCase_MinimalMergeIntervals(unittest.TestCase): def test_basic(self): rs = svnmerge.RevisionSet("4-8,12,18,24") phantom = svnmerge.RevisionSet("8-11,13-16,19-23") revs = svnmerge.minimal_merge_intervals(rs, phantom) self.assertEqual(revs, [(4,12), (18,24)]) class TestCase_SvnMerge(unittest.TestCase): def svnmerge(self, cmds, *args, **kwargs): return self.svnmerge2(cmds.split(), *args, **kwargs) def svnmerge2(self, args, error=False, match=None, nonmatch=None): # svnmerge's get_commit_log method needs the "encoding" method of # sys.stdout, which is not provided by StringIO out = StringIOWithEncoding() sys.stdout = sys.stderr = out try: try: # Clear svnmerge's internal caches before running any # commands. reset_svnmerge() ret = svnmerge.main(args) except SystemExit as e: ret = e.code finally: sys.stdout = sys.__stdout__ sys.stderr = sys.__stderr__ if ret is None: ret = 0 if error: self.assertNotEqual(ret, 0, "svnmerge did not fail, with this output:\n%s" % out.getvalue()) else: self.assertEqual(ret, 0, "svnmerge failed, with this output:\n%s" % out.getvalue()) if match is not None: self.assert_(re.search(match, out.getvalue()), "pattern %r not found in output:\n%s" % (match, out.getvalue())) if nonmatch is not None: self.assert_(not re.search(nonmatch, out.getvalue()), "pattern %r found in output:\n%s" % (nonmatch, out.getvalue())) return out.getvalue() def _parseoutput(self, ret, out, error=False, match=None, nonmatch=None): if error: self.assertNotEqual(ret, 0, "svnmerge did not fail, with this output:\n%s" % out) else: self.assertEqual(ret, 0, "svnmerge failed, with this output:\n%s" % out) if match is not None: self.assert_(re.search(match, out), "pattern %r not found in output:\n%s" % (match, out)) if nonmatch is not None: self.assert_(not re.search(nonmatch, out), "pattern %r found in output:\n%s" % (nonmatch, out)) return out def launch(self, cmd, **kwargs): try: out = svnmerge.launch(cmd, split_lines=False) except svnmerge.LaunchError as (ret, cmd, out): return self._parseoutput(ret, out, **kwargs) return self._parseoutput(0, out, **kwargs) class TestCase_CommandLineOptions(TestCase_SvnMerge): def test_empty(self): self.svnmerge("") def test_help_commands(self): self.svnmerge("help") self.svnmerge("--help") self.svnmerge("-h") for cmd in svnmerge.command_table.keys(): self.svnmerge("help %s" % cmd) self.svnmerge("%s --help" % cmd) self.svnmerge("%s -h" % cmd) def test_wrong_commands(self): self.svnmerge("asijdoiasjd", error=True) self.svnmerge("help asijdoiasjd", error=True) def test_wrong_option(self): self.svnmerge("--asdsad", error=True) self.svnmerge("help --asdsad", error=True) self.svnmerge("init --asdsad", error=True) self.svnmerge("--asdsad init", error=True) def test_version(self): out = self.svnmerge("--version") self.assert_(out.find("Giovanni Bajo") >= 0) out = self.svnmerge("-V") self.assert_(out.find("Giovanni Bajo") >= 0) out = self.svnmerge("init -V") self.assert_(out.find("Giovanni Bajo") >= 0) def temp_path(): try: return os.environ["TEMP"] except KeyError: pass if os.name == "posix": return "/tmp" return "." def rmtree(path): def onerror(func, path, excinfo): if func in [os.remove, os.rmdir]: if os.path.exists(path): os.chmod(path, stat.S_IWRITE) func(path) if os.path.isdir(path): shutil.rmtree(path, onerror=onerror) def get_template_path(): p = os.path.join(temp_path(), "__svnmerge_test_template") return os.path.abspath(p) def get_test_path(): p = os.path.join(temp_path(), "__svnmerge_test") return os.path.abspath(p) def abspath_to_url(path): assert path == os.path.abspath(path) path = path.replace("\\", "/") if path[0] != '/': path = '/' + path return "file://" + path class TestCase_TestRepo(TestCase_SvnMerge): def setUp(self): """Creates a working copy of a branch at r13 with the following structure, containing revisions (3-6, 13): test-branch/ test1 test2 test3 ...from a repository with the following structure: Path Created rev ---- ----------- / 0 trunk/ 3 test1 4 test2 5 test3 6 test4 9 test5 10 branches/ 1 testYYY-branch/ 11 (renamed from testXXX-branch in 12) test1 4 test2 5 test3 6 test-branch/ 13 (copied from trunk@6) test1 4 test2 5 test3 6 tags/ 2 """ self.cwd = os.getcwd() reset_svnmerge() test_path = get_test_path() template_path = get_template_path() self.template_path = template_path self.test_path = test_path self.template_repo_path = os.path.join(template_path, "repo") self.template_repo_url = abspath_to_url(self.template_repo_path) self.test_repo_path = os.path.join(test_path, "repo") self.test_repo_url = abspath_to_url(self.test_repo_path) if not os.path.isdir(template_path): rmtree(template_path) os.makedirs(template_path) os.chdir(template_path) self.multilaunch(""" svnadmin create --fs-type fsfs %(TEMPLATE_REPO_PATH)s svn mkdir -m "create /branches" %(TEMPLATE_REPO_URL)s/branches svn mkdir -m "create /tags" %(TEMPLATE_REPO_URL)s/tags svn mkdir -m "create /trunk" %(TEMPLATE_REPO_URL)s/trunk svn co %(TEMPLATE_REPO_URL)s/trunk trunk """) os.chdir("trunk") open("test1", "w").write("test 1") open("test2", "w").write("test 2") open("test3", "w").write("test 3") open("test4", "w").write("test 4") open("test5", "w").write("test 5") self.multilaunch(""" svn add test1 svn ci -m "add test1" svn add test2 svn ci -m "add test2" svn add test3 svn ci -m "add test3" svn mkdir -m "create /foobar" %(TEMPLATE_REPO_URL)s/foobar svn rm -m "remove /foobar" %(TEMPLATE_REPO_URL)s/foobar svn add test4 svn ci -m "add test4" svn add test5 svn ci -m "add test5" svn cp -r6 -m "create branch" %(TEMPLATE_REPO_URL)s/trunk %(TEMPLATE_REPO_URL)s/branches/testXXX-branch svn mv -m "rename branch" %(TEMPLATE_REPO_URL)s/branches/testXXX-branch %(TEMPLATE_REPO_URL)s/branches/testYYY-branch svn cp -r6 -m "create branch" %(TEMPLATE_REPO_URL)s/trunk %(TEMPLATE_REPO_URL)s/branches/test-branch """) os.chdir("..") self.launch("svn co %(TEMPLATE_REPO_URL)s/branches/test-branch") os.chdir(self.cwd) rmtree(self.test_path) shutil.copytree(self.template_path, self.test_path) os.chdir(self.test_path) # Relocate the test working copies from using the template # repository to the test repository so the template repository # is not affected by commits. self.launch("svn switch --relocate %(TEMPLATE_REPO_URL)s %(TEST_REPO_URL)s trunk test-branch") os.chdir("test-branch") # Always remove the template directory when the tests have # completed. atexit.register(lambda: rmtree(template_path)) def tearDown(self): os.chdir(self.cwd) rmtree(self.test_path) def command_dict(self): return { "TEMPLATE_PATH": self.template_path, "TEMPLATE_REPO_PATH": self.template_repo_path, "TEMPLATE_REPO_URL": self.template_repo_url, "TEST_PATH": self.test_path, "TEST_REPO_PATH": self.test_repo_path, "TEST_REPO_URL": self.test_repo_url, } def launch(self, cmd, *args, **kwargs): cmd = cmd % self.command_dict() return TestCase_SvnMerge.launch(self, cmd, *args, **kwargs) def multilaunch(self, cmds): for cmd in cmds.split("\n"): cmd = cmd.strip() if len(cmd) > 0: svnmerge.launch(cmd % self.command_dict()) def revert(self): self.multilaunch("svn revert -R .") def getproperty(self): out = svnmerge.launch("svn pg %s ." % svnmerge.opts["prop"]) if len(out) == 0: return None else: return out[0].strip() def getBlockedProperty(self): out = svnmerge.launch("svn pg %s ." % svnmerge.opts["block-prop"]) if len(out) == 0: return None else: return out[0].strip() def testNoWc(self): os.mkdir("foo") os.chdir("foo") self.svnmerge("init", error=True, match=r"working dir") self.svnmerge("avail", error=True, match=r"working dir") self.svnmerge("integrated", error=True, match=r"working dir") self.svnmerge("merge", error=True, match=r"working dir") self.svnmerge("block", error=True, match=r"working dir") self.svnmerge("unblock", error=True, match=r"working dir") def testCheckNoIntegrationInfo(self): self.svnmerge("avail", error=True, match=r"no integration") self.svnmerge("integrated", error=True, match=r"no integration") self.svnmerge("merge", error=True, match=r"no integration") self.svnmerge("block", error=True, match=r"no integration") self.svnmerge("unblock", error=True, match=r"no integration") def testSelfReferentialInit(self): self.svnmerge2(["init", self.test_repo_url + "/branches/test-branch"], error=True, match=r"cannot init integration source") def testAvailURL(self): # Initialize svnmerge self.svnmerge("init") self.launch("svn commit -F svnmerge-commit-message.txt", match=r"Committed revision") self.svnmerge("avail %s/branches/test-branch" % self.test_repo_url, match=r"\A9-10$") def testBlocked(self): # Initialize svnmerge self.svnmerge("init") self.launch("svn commit -F svnmerge-commit-message.txt", match=r"Committed revision") # Block revisions that have already been merged self.svnmerge("block -r5", error=True, match=r"no available revisions") # Block phantom revisions self.svnmerge("block -r8", error=True, match=r"no available revisions") # Block available revisions self.svnmerge("block -r9", match=r"'svnmerge-blocked' set") self.launch("svn commit -F svnmerge-commit-message.txt", match=r"Committed revision") # Check that the revision is still available self.svnmerge("avail", match=r"\A10$") # Check that the revision was blocked correctly self.svnmerge("avail -B", match=r"\A9$") # Check that both revisions are available with avail -A self.svnmerge("avail -A", match=r"\A9-10$") # Block all remaining revisions self.svnmerge("block", match=r"'svnmerge-blocked' set") self.launch("svn commit -F svnmerge-commit-message.txt", match=r"Committed revision") # Check that all revisions were blocked correctly self.svnmerge("avail -B", match=r"\A9-10$") # Check that all revisions are available using avail -A self.svnmerge("avail -A", match=r"\A9-10$") # Check that no revisions are available, now that they have # been blocked self.svnmerge("avail", match=r"\A\Z") # Unblock all revisions self.svnmerge("unblock", match=r"'svnmerge-blocked' deleted") self.launch("svn commit -F svnmerge-commit-message.txt", match=r"Committed revision") # Check that all revisions are available self.svnmerge("avail", match=r"\A9-10$") self.svnmerge("avail -A", match=r"\A9-10$") # Check that no revisions are blocked self.svnmerge("avail -B", match=r"\A$") def testTransitiveMerge(self): """ Test a merge of a change from testYYY-branch -> test-branch -> trunk """ os.chdir("..") self.launch("svn co %(TEST_REPO_URL)s/branches/testYYY-branch testYYY-branch") os.chdir("trunk") self.launch("svn up") self.svnmerge("init ../test-branch") self.launch('svn ci -m "init test-branch -> trunk"', match=r"Committed revision 14") os.chdir("../test-branch") self.svnmerge("init -r 1-6 ../testYYY-branch") self.launch('svn ci -m "init testYYY-branch -> test-branch"', match=r"Committed revision 15") os.chdir("../testYYY-branch") open("test6", "w").write("test6") self.launch("svn add test6") self.launch('svn ci -m "add test6"', match=r"Committed revision 16") os.chdir("../test-branch") self.svnmerge("merge -r 16") self.launch('svn ci -m "merge r16"', match=r"Committed revision 17") #os.chdir("../test-branch") open("test5", "w").write("test5") self.launch("svn add test5") self.launch('svn ci -m "add test5"', match=r"Committed revision 18") os.chdir("../trunk") self.svnmerge("block -r 18") self.launch('svn ci -m "block r18"', match=r"Committed revision 19") #os.chdir("../trunk") out = self.svnmerge("merge -r 17") self.launch('svn ci -m "merge r17 from test-branch"', match=r"Committed revision 20") p = self.getproperty() self.assertEqual(p, '/branches/test-branch:1-13,17') p = self.getBlockedProperty() self.assertEqual(p, '/branches/test-branch:18') def testTransitiveMergeWithBlock(self): """ Test a merge of a change from testYYY-branch -> test-branch -> trunk """ os.chdir("..") self.launch("svn co %(TEST_REPO_URL)s/branches/testYYY-branch testYYY-branch") os.chdir("trunk") self.launch("svn up") self.svnmerge("init ../test-branch") self.launch('svn ci -m "init test-branch -> trunk"', match=r"Committed revision 14") os.chdir("../test-branch") self.svnmerge("init -r 1-6 ../testYYY-branch") self.launch('svn ci -m "init testYYY-branch -> test-branch"', match=r"Committed revision 15") os.chdir("../testYYY-branch") open("test4", "w").write("test4") self.launch("svn add test4") self.launch('svn ci -m "add test4"', match=r"Committed revision 16") os.chdir("../test-branch") self.svnmerge("block -r 16") self.launch('svn ci -m "block r16"', match=r"Committed revision 17") #os.chdir("../test-branch") open("test5", "w").write("test5") self.launch("svn add test5") self.launch('svn ci -m "add test5"', match=r"Committed revision 18") os.chdir("../trunk") self.svnmerge("block -r 18") self.launch('svn ci -m "block r18"', match=r"Committed revision 19") #os.chdir("../trunk") self.svnmerge("merge -r 17") self.launch('svn ci -m "merge r17 from test-branch"', match=r"Committed revision 20") p = self.getproperty() self.assertEqual(p, '/branches/test-branch:1-13,17') p = self.getBlockedProperty() self.assertEqual(p, '/branches/test-branch:18') def testBasic(self): self.svnmerge("init") p = self.getproperty() self.assertEqual("/trunk:1-6", p) self.svnmerge("avail", match=r"\A9-10$") self.svnmerge("avail -v", match=r"phantom.*7-8") self.svnmerge("avail -B", match=r"\A$") self.svnmerge("avail -A", match=r"\A9-10$") self.svnmerge("avail --log", match=r"| r7.*| r8") self.svnmerge("avail --diff -r9", match=r"Index: test4") self.svnmerge("avail --log -r5", match=r"\A\Z") self.svnmerge("avail --diff -r5", match=r"\A\Z") self.svnmerge("integrated", match=r"^3-6$") self.svnmerge("integrated --log -r5", match=r"| r5 ") self.svnmerge("integrated --diff -r5", match=r"Index: test2") def test_log_msg_suggest(self): self.svnmerge("init -vf commit-log.txt", match=r"wrote commit message") self.assert_(os.path.exists("commit-log.txt")) os.remove("commit-log.txt") def testInitForce(self): open("test1", "a").write("foo") self.svnmerge("init", error=True, match=r"clean") self.svnmerge("init -F") p = self.getproperty() self.assertEqual("/trunk:1-6", p) def testUninit(self): """Test that uninit works, for both merged and blocked revisions.""" os.chdir("..") self.launch("svn co %(TEST_REPO_URL)s/branches/testYYY-branch testYYY-branch") os.chdir("trunk") # Not using switch, so must update to get latest repository rev. self.launch("svn update", match=r"At revision 13") self.svnmerge2(["init", "-r1-13", self.test_repo_url + "/branches/test-branch"]) self.launch("svn commit -F svnmerge-commit-message.txt", match=r"Committed revision 14") self.svnmerge2(["init", self.test_repo_url + "/branches/testYYY-branch"]) self.launch("svn commit -F svnmerge-commit-message.txt", match=r"Committed revision 15") # Create changes on test-branch that we can block os.chdir("..") os.chdir("test-branch") # Not using switch, so must update to get latest repository rev. self.launch("svn update", match=r"At revision 15") open("test1", "w").write("test 1-changed_on_test-branch") self.launch("svn commit -m \"Change to test1 on test-branch\"", match=r"Committed revision 16") # Create changes on testYYY-branch that we can block os.chdir("..") os.chdir("testYYY-branch") # Not using switch, so must update to get latest repository rev. self.launch("svn update", match=r"At revision 16") open("test2", "w").write("test 2-changed_on_testYYY-branch") self.launch("svn commit -m \"Change to test2 on testYYY-branch\"", match=r"Committed revision 17") # Block changes from both branches on the trunk os.chdir("..") os.chdir("trunk") # Not using switch, so must update to get latest repository rev. self.launch("svn update", match=r"At revision 17") self.svnmerge("block -S testYYY-branch", match=r"'svnmerge-blocked' set") self.launch("svn commit -F svnmerge-commit-message.txt", match=r"Committed revision 18") self.svnmerge("block -S test-branch", match=r"'svnmerge-blocked' set") self.launch("svn commit -F svnmerge-commit-message.txt", match=r"Committed revision 19") # Do the uninit self.svnmerge2(["uninit", "--source", self.test_repo_url + "/branches/testYYY-branch"]) # Check that the merged property for testYYY-branch was removed, but # not for test-branch pmerged = self.getproperty() self.assertEqual("/branches/test-branch:1-13", pmerged) # Check that the blocked property for testYYY-branch was removed, but # not for test-branch pblocked = self.getBlockedProperty() self.assertEqual("/branches/test-branch:16", pblocked) self.launch("svn commit -F svnmerge-commit-message.txt", match=r"Committed revision 20") self.svnmerge2(["uninit", "--source", self.test_repo_url + "/branches/test-branch"]) # Check that the merged and blocked properties for test-branch have been removed too pmerged = self.getproperty() self.assertEqual(None, pmerged) pblocked = self.getBlockedProperty() self.assertEqual(None, pblocked) def testUninitForce(self): self.svnmerge2(["init", self.test_repo_url + "/trunk"]) self.launch("svn commit -F svnmerge-commit-message.txt", match=r"Committed revision") self.svnmerge2(["init", self.test_repo_url + "/branches/testYYY-branch"]) self.launch("svn commit -F svnmerge-commit-message.txt", match=r"Committed revision") p = self.getproperty() # properties come back in arbitrary order, so search for them individually self.assertTrue(re.search("/branches/testYYY-branch:1-\d+?", p)) self.assertTrue(re.search("/trunk:1-\d+?", p)) open("test1", "a").write("foo") self.svnmerge("uninit --source " + self.test_repo_url + "/branches/testYYY-branch", error=True, match=r"clean") self.svnmerge("uninit -F --source " + self.test_repo_url + "/branches/testYYY-branch") p = self.getproperty() self.assert_(re.search("^/trunk:1-\d+", p)) def testCheckNoCopyfrom(self): os.chdir("..") os.chdir("trunk") self.svnmerge("init", error=True, match=r"no copyfrom") def testInitScenarios(self): """ Run various scenarios w/ svnmerge.py init and verify the default values that are set as the integrated revisions.""" # Run init with branch as merge source and trunk as merge target os.chdir("..") os.chdir("trunk") self.svnmerge("init ../test-branch") # Verify range ends at rev in which branch was created self.launch("svn proplist -v", match=r":1-13") self.revert() # Run init with TRUNK as merge source and BRANCH as merge target os.chdir("..") os.chdir("test-branch") self.svnmerge("init ../trunk") # Verify range ends at rev of trunk which was copied to create branch self.launch("svn proplist -v", match=r":1-6") self.revert() # Same thing, but with no explicit parameter (should work implicitly) self.svnmerge("init") # Verify range ends at rev of trunk which was copied to create branch self.launch("svn proplist -v", match=r":1-6") self.revert() # Run init with TRUNK as merge src, & any other branch which is not # a copy of trunk (or the source from which trunk was copied) # as the merge target. os.chdir("../trunk") os.chdir("..") self.launch('svn mkdir -m "create /other" %(TEST_REPO_URL)s/other') # creates r14 self.launch("svn co %(TEST_REPO_URL)s/other") os.chdir("other") self.svnmerge("init ../trunk") # Verify integrated range ends with merge source's latest rev as of # the time of initialization: self.launch("svn proplist -v", match=r":1-14") self.revert() # Run init w/ explicit parms; verify them self.svnmerge("init -r 1-999 ../trunk") self.launch("svn proplist -v", match=r":1-999") def testTrimmedAvailMerge(self): """Check that both avail and merge do not search for phantom revs too hard.""" self.svnmerge("init") self.svnmerge("avail -vv -r8-9", match=r"svn --non-interactive log.*-r8:9") self.svnmerge("merge -F -vv -r8-9", match=r"svn --non-interactive log.*-r8:9") self.svnmerge("avail -vv -r2", nonmatch=r"svn log") self.svnmerge("integrated", match=r"^3-6,8-9$") def testMergeRecordOnly(self): """Check that flagging revisions as manually merged works.""" self.svnmerge("init") self.svnmerge("avail -vv -r9", match=r"svn --non-interactive log.*-r9:9") self.svnmerge("merge --record-only -F -vv -r9", nonmatch=r"svn merge -r 8:9") self.svnmerge("avail -r9", match=r"\A$") self.svnmerge("integrated", match=r"^3-6,9$") self.svnmerge("integrated -r9", match=r"^9$") def testBidirectionalMerges(self): """Check that reflected revisions are recognized properly for bidirectional merges.""" os.chdir("..") os.chdir("test-branch") self.svnmerge2(["init", self.test_repo_url + "/trunk"]) self.launch("svn commit -F svnmerge-commit-message.txt", match=r"Committed revision 14") os.remove("svnmerge-commit-message.txt") os.chdir("..") os.chdir("trunk") # Not using switch, so must update to get latest repository rev. self.launch("svn update", match=r"At revision 14") self.svnmerge2(["init", "-r1-14", self.test_repo_url + "/branches/test-branch"]) self.launch("svn commit -F svnmerge-commit-message.txt", match=r"Committed revision 15") os.remove("svnmerge-commit-message.txt") open("test1", "w").write("test 1-changed_on_trunk") self.launch("svn commit -m \"Change to test1 on trunk\"", match=r"Committed revision 16") self.svnmerge("integrated", match=r"^13-14$") os.chdir("..") os.chdir("test-branch") # Not using switch, so must update to get latest repository rev. self.launch("svn update", match=r"At revision 16") # test-branch was copied from trunk's r6. So non-phantom revs # since that point should still be available to merge from # trunk to test-branch: self.svnmerge("avail -vv", match=r"\n9-10,16$") self.svnmerge("merge -vv", match=r"svn --non-interactive merge --force -r 15:16") p = self.getproperty() self.assertEqual("/trunk:1-16", p) self.svnmerge("integrated", match=r"^3-16$") self.launch("svn commit -F svnmerge-commit-message.txt", match=r"Committed revision 17") os.remove("svnmerge-commit-message.txt") open("test1", "w").write("test 1-changed_on_branch_after_merge_from_trunk") self.launch("svn commit -m \"Change to test1 on branch\"", match=r"Committed revision 18") os.chdir("..") os.chdir("trunk") # Not using switch, so must update to get latest repository rev. self.launch("svn update", match=r"At revision 18") # Check reflected revision is excluded with --bidirectional self.svnmerge("avail -vv --bidirectional", match=r"\n18$") # and without --bidirectional. self.svnmerge("avail -vv", match=r"\n18$") self.svnmerge("merge -vv", match=r"svn --non-interactive merge --force -r 17:18") p = self.getproperty() self.assertEqual("/branches/test-branch:1-18", p) self.svnmerge("integrated", match=r"^13-18$") def testBidirectionalMergesMultiBranch(self): """Check that merges from a second branch are not considered reflected for other branches.""" os.chdir("..") self.multilaunch(""" svn cp -m "Create test-branch2" %(TEST_REPO_URL)s/trunk %(TEST_REPO_URL)s/branches/test-branch2 svn co %(TEST_REPO_URL)s/branches/test-branch2 test-branch2 """) os.chdir("test-branch") self.svnmerge2(["init", "-r1-13", self.test_repo_url + "/trunk"]) self.launch("svn commit -F svnmerge-commit-message.txt", match=r"Committed revision 15") os.remove("svnmerge-commit-message.txt") os.chdir("..") os.chdir("test-branch2") self.svnmerge2(["init", "-r1-14", self.test_repo_url + "/trunk"]) self.launch("svn commit -F svnmerge-commit-message.txt", match=r"Committed revision 16") os.remove("svnmerge-commit-message.txt") os.chdir("..") os.chdir("trunk") # Not using switch, so must update to get latest repository rev. self.launch("svn update", match=r"At revision 16") self.svnmerge2(["init", "-r1-16", self.test_repo_url + "/branches/test-branch"]) self.launch("svn commit -F svnmerge-commit-message.txt", match=r"Committed revision 17") os.remove("svnmerge-commit-message.txt") self.svnmerge2(["init", "-r1-17", self.test_repo_url + "/branches/test-branch2"]) self.launch("svn commit -F svnmerge-commit-message.txt", match=r"Committed revision 18") os.remove("svnmerge-commit-message.txt") os.chdir("..") os.chdir("test-branch2") open("test1", "w").write("test 1-changed_on_branch2") self.launch("svn commit -m \"Change to test1 on branch2\"", match=r"Committed revision 19") os.chdir("..") os.chdir("trunk") # Not using switch, so must update to get latest repository rev. self.launch("svn update", match=r"At revision 19") # Merge into trunk self.svnmerge("merge -vv -S /branches/test-branch2", match=r"merge --force -r 18:19") p = self.getproperty() # allow properties to come back in arbitrary order self.assertTrue(re.search("/branches/test-branch2:1-19", p)) self.assertTrue(re.search("/branches/test-branch:1-16", p)) self.svnmerge("integrated -S branch2", match=r"^14-19$") self.svnmerge("integrated -S ../test-branch", match=r"^13-16$") self.launch("svn commit -F svnmerge-commit-message.txt", match=r"Committed revision 20") os.remove("svnmerge-commit-message.txt") os.chdir("..") os.chdir("test-branch") # Not using switch, so must update to get latest repository rev. self.launch("svn update", match=r"At revision 20") # Initialized revs should not be available for merge self.svnmerge("avail -v", match=r"initialized.*17-18") # Latest revision on trunk which was merged from test-branch2 # should be available for test-branch with --bidirectional flag. self.svnmerge("avail -vv --bidirectional", match=r"merged are:\n20$") # and also without the --bidirectional flag. self.svnmerge("avail -vv", match=r"merged are:\n20$") self.svnmerge("merge -vv", match=r"merge --force -r 19:20") p = self.getproperty() self.assertEqual("/trunk:1-20", p) self.svnmerge("integrated", match=r"^3-20$") def testRollbackWithoutInit(self): """Rollback should error out if invoked prior to init""" self.svnmerge("rollback -vv -S ../trunk", error = True, match = r"no integration info available for path") def testRollbackOutsidePossibleRange(self): """`svnmerge rollback' should error out if range contains revisions prior to SOURCE creation date.""" # Initialize svnmerge self.svnmerge2(["init", self.test_repo_url + "/trunk"]) self.launch("svn commit -F svnmerge-commit-message.txt", match = r"Committed revision 14") os.remove("svnmerge-commit-message.txt") expected_error = r"""Specified revision range falls out of the rollback range.""" self.svnmerge("rollback -vv -S ../trunk -r 2-14", error = True, match = expected_error) def testRollbackWithoutRevisionOpt(self): """`svnmerge rollback' should error out if -r option is not given""" # Initialize svnmerge self.svnmerge2(["init", self.test_repo_url + "/trunk"]) self.launch("svn commit -F svnmerge-commit-message.txt", match=r"Committed revision 14") os.remove("svnmerge-commit-message.txt") self.svnmerge("rollback -vv -S ../trunk", error = True, match = r"The '-r' option is mandatory for rollback") def testInitAndRollbackRecordOnly(self): """Init svnmerge, modify source head, merge, rollback --record-only.""" # Initialize svnmerge self.svnmerge2(["init", self.test_repo_url + "/trunk"]) self.launch("svn commit -F svnmerge-commit-message.txt", match = r"Committed revision 14") os.remove("svnmerge-commit-message.txt") # Rollback record-only expected_output = r"property 'svnmerge-integrated' set on '.'" detested_output = r""" D test2 D test3""" self.svnmerge("rollback -vv --record-only -S ../trunk -r5-7", match = expected_output, nonmatch = detested_output) def testInitAndRollback(self): """Init svnmerge, modify source head, merge, rollback.""" # Initialize svnmerge self.svnmerge2(["init", self.test_repo_url + "/trunk"]) self.launch("svn commit -F svnmerge-commit-message.txt", match = r"Committed revision 14") os.remove("svnmerge-commit-message.txt") # Svnmerge rollback r5-7 expected_output = "D\s+test2\s+D\s+test3" self.svnmerge("rollback -vv -S ../trunk -r5-7", match = expected_output) def testMergeAndRollbackEmptyRevisionRange(self): """Init svnmerge, modify source head, merge, rollback where no merge occurred.""" # Initialize svnmerge self.svnmerge2(["init", self.test_repo_url + "/trunk"]) self.launch("svn commit -F svnmerge-commit-message.txt", match = r"Committed revision 14") os.remove("svnmerge-commit-message.txt") # Make changes to trunk os.chdir("../trunk") open("newfile", "w").close() self.launch("svn add newfile") self.launch('svn commit -m "Adding newfile"', match=r"Committed revision 15") open("anothernewfile", "w").close() self.launch("svn add anothernewfile") self.launch('svn commit -m "Adding anothernewfile"', match=r"Committed revision 16") # Svnmerge block r15,16 os.chdir("../test-branch") self.launch("svn up ..", error = False) self.svnmerge("block -r 15,16 -S ../trunk") self.launch("svn commit -F svnmerge-commit-message.txt", match = r"Committed revision 17") self.svnmerge("merge -S ../trunk") self.launch("svn commit -F svnmerge-commit-message.txt") # Svnmerge rollback r15-16 self.svnmerge("rollback -vv -S ../trunk -r15-16", error = False, match = r"Nothing to rollback in revision range r15-16") def testMergeAndRollback(self): """Init svnmerge, modify source head, merge, rollback.""" # Initialize svnmerge self.svnmerge2(["init", self.test_repo_url + "/trunk"]) self.launch("svn commit -F svnmerge-commit-message.txt", match = r"Committed revision 14") os.remove("svnmerge-commit-message.txt") # Make changes to trunk os.chdir("../trunk") open("newfile", "w").close() self.launch("svn add newfile") self.launch('svn commit -m "Adding newfile"', match=r"Committed revision 15") # Svnmerge merge r15 os.chdir("../test-branch") self.svnmerge("merge -r 15 -S ../trunk") self.launch("svn commit -F svnmerge-commit-message.txt", match = r"Committed revision 16") # Svnmerge rollback r15 self.svnmerge("rollback -vv -S ../trunk -r15", match = r"-r 15:14") def testBlockMergeAndRollback(self): """Init svnmerge, block, modify head, merge, rollback.""" # Initialize svnmerge self.svnmerge2(["init", self.test_repo_url + "/trunk"]) self.launch("svn commit -F svnmerge-commit-message.txt", match = r"Committed revision 14") os.remove("svnmerge-commit-message.txt") # Make changes to trunk os.chdir("../trunk") open("newfile", "w").close() self.launch("svn add newfile") self.launch('svn commit -m "Adding newfile"', match=r"Committed revision 15") open("anothernewfile", "w").close() self.launch("svn add anothernewfile") self.launch('svn commit -m "Adding anothernewfile"', match=r"Committed revision 16") # Svnmerge block r16, merge r15 os.chdir("../test-branch") self.launch("svn up ..", error = False) self.svnmerge("block -r 16 -S ../trunk") self.launch("svn commit -F svnmerge-commit-message.txt", match = r"Committed revision 17") self.svnmerge("merge -S ../trunk", nonmatch = r"A anothernewfile", match = r"A newfile") self.launch("svn commit -F svnmerge-commit-message.txt", match = r"Committed revision 18") # Svnmerge rollback revision range 15-18 (in effect only 15,17) self.svnmerge("rollback -vv -S ../trunk -r15-18", nonmatch = r"D anothernewfile") def testMergeWithPotentialPropertyConflict(self): """Init branch B, merge changes from branch A to branch B, init branch C, and attempt a merge of changes from branch B to branch C.""" # Initialize merge info for test-branch. self.svnmerge2(["init", "-r 3-6", self.test_repo_url + "/trunk"]) self.launch("svn commit -F svnmerge-commit-message.txt", match = r"Committed revision 14") os.remove("svnmerge-commit-message.txt") # Make a change to trunk. os.chdir("../trunk") open("newfile", "w").close() self.launch("svn add newfile") self.launch('svn commit -m "Adding newfile"', match=r"Committed revision 15") # Merge a change from trunk to test-branch. os.chdir("../test-branch") self.svnmerge("merge -r 15 -S ../trunk") self.launch("svn commit -F svnmerge-commit-message.txt", match=r"Committed revision 16") # Get a WC for testYYY-branch. os.chdir("..") self.launch("svn co %s/branches/testYYY-branch" % self.test_repo_url) os.chdir("testYYY-branch") # Initialize merge info for testYYY-branch. self.svnmerge2(["init", "-r 13", self.test_repo_url + "/branches/test-branch"]) self.launch("svn commit -F svnmerge-commit-message.txt", match=r"Committed revision 17") os.remove("svnmerge-commit-message.txt") ### FIXME: Unfortunately, we're getting a conflict for the ### merge info property in the following svnmerge invocation ### due to Subversion's (reasonable) lack of property value ### merging (which isn't possible without knowing a property's ### MIME type) # Attempt a merge of changes from test-branch to # testYYY-branch. self.svnmerge("merge -r 16 -S ../test-branch") try: self.launch("svn commit -F svnmerge-commit-message.txt", match=r"Committed revision 18") except AssertionError: self.assert_(os.path.isfile("dir_conflicts.prej")) def testCommitMessageEncoding(self): """Init svnmerge, modify source head and commit with a message containing non-ASCII caracters, merge, commit, and verify the commit message was correctly encoded.""" # Initialize svnmerge self.svnmerge2(["init", self.test_repo_url + "/trunk"]) self.launch("svn commit -F svnmerge-commit-message.txt", match = r"Committed revision 14") os.remove("svnmerge-commit-message.txt") commit_msg = u"adição no repositório" input_encoding = locale.getdefaultlocale()[1] output_encoding = sys.stdout.encoding # Create a file os.chdir("../trunk") open("newfile", "w").close() # Create a message containing non-ASCII caracters. The message will be # kept inside a file, so we don't need to worry about Python or the OS # converting the command line before it's sent to svn msg_file = open('msg.txt', 'w') msg_file.write(commit_msg.encode(input_encoding)) msg_file.close() # Add the file and commit with the message above self.launch("svn add newfile") self.launch('svn commit -F msg.txt', match="Committed revision 15") os.remove('msg.txt') # Check the message was properly encoded by svn (this will currently # only work if the user config file does not override log-encoding) self.launch('svn log -r 15', match=commit_msg.encode(output_encoding)) # Merge changes into the branch commiting with the message provided by # svnmerge os.chdir("../test-branch") self.svnmerge("merge") self.launch("svn commit -F svnmerge-commit-message.txt", match="Committed revision 16") # The procedure above should have not misencoded the message self.launch('svn log -r 16', match=commit_msg.encode(output_encoding)) def test_pathid_fns(self): # (see also TestCase_pathid_fns for tests that don't need a repos) os.chdir("..") self.assertEqual(svnmerge.pathid_to_url( "/branches/testYYY-branch", "./trunk"), "%s/branches/testYYY-branch" % self.test_repo_url) self.assertTrue( svnmerge.equivalent_pathids("/branches/testYYY-branch", "/branches/testYYY-branch", "./trunk")) self.assertFalse( svnmerge.equivalent_pathids("/branches/test-branch", "/branches/testYYY-branch", "./trunk")) def test_invalid_url(self): self.svnmerge2(["init", self.test_repo_url + "/trunk"]) # warm up svnmerge self.assertEqual(svnmerge.get_svninfo("file://foo/bar"), {}) def test_dict_from_revlist_prop(self): locdict = svnmerge.dict_from_revlist_prop("/trunk:1-10 uuid://65390229-12b7-0310-b90b-f21a5aa7ec8e/branches/foo:20-30") for k, v in locdict.items(): if str(k) == '/trunk': self.assertEqual(str(v), '1-10') elif str(k) == 'uuid://65390229-12b7-0310-b90b-f21a5aa7ec8e/branches/foo': self.assertEqual(str(v), '20-30') else: self.fail("Unknown pathid '%s'" % k) def test_pathid_fns(self): os.chdir("..") # run svnmerge once to get things rolling self.svnmerge("init", error=True) branch = svnmerge.PathIdentifier.from_target("test-branch") trunk = svnmerge.PathIdentifier.from_target("trunk") yy = svnmerge.PathIdentifier.from_target("%s/branches/testYYY-branch" % self.test_repo_url) branchurl = svnmerge.PathIdentifier.from_target("%s/branches/test-branch" % self.test_repo_url) trunkurl = svnmerge.PathIdentifier.from_target("%s/trunk" % self.test_repo_url) self.assertTrue(branchurl == branch) self.assertFalse(branchurl != branch) self.assertTrue(trunkurl == trunk) self.assertTrue(yy != trunk) def test_PathIdentifier_hint_url(self): os.chdir("..") # prime the cache with our repo URL svnmerge.PathIdentifier.hint(self.test_repo_url + '/trunk') expected = svnmerge.PathIdentifier.locobjs['/trunk'] # and then we should get the same pathid for all of these self.assertEqual(expected, svnmerge.PathIdentifier.from_target('trunk')) self.assertEqual(expected, svnmerge.PathIdentifier.from_target(self.test_repo_url + '/trunk')) def test_is_pathid(self): os.chdir("..") l = svnmerge.PathIdentifier.from_target('trunk') self.assertTrue(svnmerge.is_pathid(l)) def testOptionOrder(self): """Make sure you can intermix command name, arguments and options in any order.""" # this is under TestCase_TestRepo because it assumes that '.' is a svn working dir self.svnmerge("--log avail", error=True, match=r"no integration info") # accepted self.svnmerge("-l avail", error=True, match=r"no integration info") # accepted self.svnmerge("-r123 merge", error=True, match=r"no integration info") # accepted self.svnmerge("-s -v -r92481 merge", error=True, match=r"no integration info") # accepted self.svnmerge("--log merge", error=True, match=r"option --log not recognized") self.svnmerge("--diff foobar", error=True, match=r"foobar") # This requires gnu_getopt support to be parsed if hasattr(getopt, "gnu_getopt"): self.svnmerge("-r123 merge . --log", error=True, match=r"option --log not recognized") if __name__ == "__main__": # If an existing template repository and working copy for testing # exists, then always remove it. This prevents any problems if # this test suite is modified and there exists an older template # directory that may be used. This will also prevent problems if # in a previous run of this script, the template was being created # when the script was canceled, leaving it in an inconsistent # state. template_path = get_template_path() if os.path.exists(template_path): rmtree(template_path) unittest.main()
{ "content_hash": "4125743f2e095e71ee63e15bb3f878b1", "timestamp": "", "source": "github", "line_count": 1411, "max_line_length": 133, "avg_line_length": 39.30474840538625, "alnum_prop": 0.5797075316900774, "repo_name": "YueLinHo/Subversion", "id": "69c4dbccf8c1b82a52409e75f3aaf83c3857ad30", "size": "56274", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "contrib/client-side/svnmerge/svnmerge_test.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "2407" }, { "name": "C", "bytes": "23344368" }, { "name": "C++", "bytes": "1110396" }, { "name": "CSS", "bytes": "1216" }, { "name": "Emacs Lisp", "bytes": "401177" }, { "name": "HTML", "bytes": "404487" }, { "name": "Java", "bytes": "1698548" }, { "name": "M4", "bytes": "204671" }, { "name": "Makefile", "bytes": "50827" }, { "name": "Objective-C", "bytes": "534640" }, { "name": "PLSQL", "bytes": "1622" }, { "name": "PLpgSQL", "bytes": "4534" }, { "name": "Perl", "bytes": "395610" }, { "name": "Python", "bytes": "6205629" }, { "name": "Roff", "bytes": "33424" }, { "name": "Ruby", "bytes": "437540" }, { "name": "Shell", "bytes": "196621" }, { "name": "Vim script", "bytes": "1123" }, { "name": "XSLT", "bytes": "24950" } ], "symlink_target": "" }
try: from queue import Queue as queue except ImportError: from queue import queue as queue from queue import Full import traceback from graypy import GELFHandler from threading import Thread class AsyncGELFHandler(GELFHandler, Thread): def __init__(self, *args, **kwargs): max_queue_size = kwargs.pop('max_queue_size', 10000) super(AsyncGELFHandler, self).__init__(*args, **kwargs) Thread.__init__(self) self.output_queue = queue(maxsize=max_queue_size) # Start thread self.start() def send(self, s): try: self.output_queue.put(s, block=False) except Full: # cannot log here, our queue is full.. just drop the message pass def _process_queue_record(self, s): super(AsyncGELFHandler, self).send(s) def run(self): while True: try: record = self.output_queue.get() self._process_queue_record(record) self.output_queue.task_done() except Exception as ex: # Handle log sending exception in some way. eg. traceback.print_exc(): # Exception handling is mandatory, otherwise the thread will die traceback.print_exc()
{ "content_hash": "cbeaeca7b51bbdfc76c92f37eca962fd", "timestamp": "", "source": "github", "line_count": 43, "max_line_length": 86, "avg_line_length": 29.72093023255814, "alnum_prop": 0.6001564945226917, "repo_name": "listingmirror/async-gelf-handler", "id": "e66c28c7abeaa65060bbb60fa2be0719d71ca30d", "size": "1278", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "listingmirror/async_gelf_handler.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "2109" } ], "symlink_target": "" }
from msrest.serialization import Model class LoadMetricReportInfo(Model): """Information about load reported by replica. :param name: The name of the metric. :type name: str :param value: The value of the load for the metric.. :type value: int :param last_reported_utc: The UTC time when the load is reported. :type last_reported_utc: datetime """ _attribute_map = { 'name': {'key': 'Name', 'type': 'str'}, 'value': {'key': 'Value', 'type': 'int'}, 'last_reported_utc': {'key': 'LastReportedUtc', 'type': 'iso-8601'}, } def __init__(self, name=None, value=None, last_reported_utc=None): super(LoadMetricReportInfo, self).__init__() self.name = name self.value = value self.last_reported_utc = last_reported_utc
{ "content_hash": "bf2fccc62c739e6f06ca1f72e0135add", "timestamp": "", "source": "github", "line_count": 25, "max_line_length": 76, "avg_line_length": 32.64, "alnum_prop": 0.6151960784313726, "repo_name": "lmazuel/azure-sdk-for-python", "id": "2bca2bbd8bf9918e701cf781db1cc21f4276414c", "size": "1290", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "azure-servicefabric/azure/servicefabric/models/load_metric_report_info.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "42572767" } ], "symlink_target": "" }
"""Function for interpolating formatted errors from the TensorFlow runtime. Exposes the function `interpolate` to interpolate messages with tags of the form {{type name}}. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import itertools import os import re import six from tensorflow.core.protobuf import graph_debug_info_pb2 _NAME_REGEX = r"[A-Za-z0-9_.][A-Za-z0-9_.\-/]*?" _TAG_REGEX = r"{{{{({name}) ({name})}}}}".format(name=_NAME_REGEX) _INTERPOLATION_REGEX = r"^(.*?)({tag})".format(tag=_TAG_REGEX) _INTERPOLATION_PATTERN = re.compile(_INTERPOLATION_REGEX, re.DOTALL) _ParseTag = collections.namedtuple("_ParseTag", ["type", "name"]) # Remove the last three path components from this module's file (i.e. # python/framework/error_interpolation.py) so that we have an absolute path # prefix to the root of the installation. _FRAMEWORK_COMMON_PREFIX = os.path.dirname( os.path.dirname(os.path.dirname(__file__))) # Sub-directories under the common prefix that are considered part of the # framework. _FRAMEWORK_PATH_PREFIXES = [ os.path.join(_FRAMEWORK_COMMON_PREFIX, "python") + os.sep, os.path.join(_FRAMEWORK_COMMON_PREFIX, "contrib") + os.sep, ] # Patterns of filename patterns that should be considered internal to # the TensorFlow framework. _FRAMEWORK_FILENAME_PATTERNS = [ re.compile(r"<embedded"), ] # Patterns of filename patterns that should be considered external to # TensorFlow regardless of framework prefix match. _EXTERNAL_FILENAME_PATTERNS = [ # Explicitly treat test frames as not part of the framework. re.compile(r"_test\.py$"), ] def parse_message(message): """Parses the message. Splits the message into separators and tags. Tags are named tuples representing the string {{type name}} and they are separated by separators. For example, in "123{{node Foo}}456{{node Bar}}789", there are two tags and three separators. The separators are the numeric characters. Args: message: String to parse Returns: (list of separator strings, list of _ParseTags). For example, if message is "123{{node Foo}}456" then this function returns (["123", "456"], [_ParseTag("node", "Foo")]) """ seps = [] tags = [] pos = 0 while pos < len(message): match = re.match(_INTERPOLATION_PATTERN, message[pos:]) if match: seps.append(match.group(1)) tags.append(_ParseTag(match.group(3), match.group(4))) pos += match.end() else: break seps.append(message[pos:]) return seps, tags def _compute_device_summary_from_list(name, device_assignment_list, prefix=""): """Return a summary of an op's device function stack. Args: name: The name of the op. device_assignment_list: The op._device_assignments list. prefix: An optional string prefix used before each line of the multi- line string returned by this function. Returns: A multi-line string similar to: Device assignments active during op 'foo' creation: with tf.device(/cpu:0): <test_1.py:27> with tf.device(some_func<foo.py, 123>): <test_2.py:38> The first line will have no padding to its left by default. Subsequent lines will have two spaces of left-padding. Use the prefix argument to increase indentation. """ if not device_assignment_list: message = "No device assignments were active during op '%s' creation." message %= name return prefix + message str_list = [] str_list.append( "%sDevice assignments active during op '%s' creation:" % (prefix, name)) for traceable_obj in device_assignment_list: location_summary = "<{file}:{line}>".format( file=traceable_obj.filename, line=traceable_obj.lineno) subs = { "prefix": prefix, "indent": " ", "dev_name": traceable_obj.obj, "loc": location_summary, } str_list.append( "{prefix}{indent}with tf.device({dev_name}): {loc}".format(**subs)) return "\n".join(str_list) def _compute_device_assignment_summary_from_op(op, prefix=""): # pylint: disable=protected-access return _compute_device_summary_from_list(op.name, op._device_assignments, prefix) # pylint: enable=protected-access def _compute_colocation_summary_from_dict(name, colocation_dict, prefix=""): """Return a summary of an op's colocation stack. Args: name: The op name. colocation_dict: The op._colocation_dict. prefix: An optional string prefix used before each line of the multi- line string returned by this function. Returns: A multi-line string similar to: Node-device colocations active during op creation: with tf.compat.v1.colocate_with(test_node_1): <test_1.py:27> with tf.compat.v1.colocate_with(test_node_2): <test_2.py:38> The first line will have no padding to its left by default. Subsequent lines will have two spaces of left-padding. Use the prefix argument to increase indentation. """ if not colocation_dict: message = "No node-device colocations were active during op '%s' creation." message %= name return prefix + message str_list = [] str_list.append("%sNode-device colocations active during op '%s' creation:" % (prefix, name)) for coloc_name, location in colocation_dict.items(): location_summary = "<{file}:{line}>".format( file=location.filename, line=location.lineno) subs = { "prefix": prefix, "indent": " ", "name": coloc_name, "loc": location_summary, } str_list.append( "{prefix}{indent}with tf.colocate_with({name}): {loc}".format(**subs)) return "\n".join(str_list) def _compute_colocation_summary_from_op(op, prefix=""): """Fetch colocation file, line, and nesting and return a summary string.""" # pylint: disable=protected-access return _compute_colocation_summary_from_dict(op.name, op._colocation_dict, prefix) # pylint: enable=protected-access def _is_framework_filename(filename): """Returns whether a filename should be considered a part of the framework. A file is part of the framework if it does not match a pattern in _EXTERNAL_FILENAME_PATTERNS and it either matches a pattern in _FRAMEWORK_FILENAME_PATTERNS or starts with a _FRAMEWORK_PATH_PREFIXES prefix. Args: filename: A filename string. Returns: Whether the filename should be considered to be internal to the TensorFlow framework for the purposes of reporting errors. """ for pattern in _EXTERNAL_FILENAME_PATTERNS: if pattern.search(filename): return False for pattern in _FRAMEWORK_FILENAME_PATTERNS: if pattern.search(filename): return True for prefix in _FRAMEWORK_PATH_PREFIXES: if filename.startswith(prefix): return True return False def _find_index_of_defining_frame_for_op(op): """Return index in op.traceback with first 'useful' frame. This method reads through the stack stored in op.traceback looking for the innermost frame which (hopefully) belongs to the caller. It accomplishes this by rejecting frames deemed to be part of the TensorFlow framework (by pattern matching the filename). Args: op: the Operation object for which we would like to find the defining location. Returns: Integer index into op.traceback where the first non-TF file was found (innermost to outermost), or 0 (for the outermost stack frame) if all files came from TensorFlow. """ # Index 0 of tf_traceback is the outermost frame. tf_traceback = op.traceback size = len(tf_traceback) filenames = [frame.filename for frame in tf_traceback] # We process the filenames from the innermost frame to outermost. for idx, filename in enumerate(reversed(filenames)): is_framework = _is_framework_filename(filename) if not is_framework: # Consider this to be the defining frame. return size - idx - 1 return 0 def _get_defining_frame_from_op(op): """Find and return stack frame where op was defined.""" frame_index = _find_index_of_defining_frame_for_op(op) return op.traceback[frame_index] def _compute_useful_frames(op, num): """Return a list of frames, which form a 'useful' stack. Starting from the defining frame to the outermost one, this method computes the contiguous portion of the 'useful' stack trace and returns the selected frames. Args: op: op.Operation object having a _traceback member. num: total number of frames to return. Returns: A list of frames. """ defining_frame_index = _find_index_of_defining_frame_for_op(op) # The stack trace is collected from two lines before the defining frame in the # model file to the outermost with `num` frames at most. These two extra lines # are included from the TensorFlow library to give the context which node is # defined. innermost_excluded = min(defining_frame_index + 2 + 1, len(op.traceback)) outermost_included = max(innermost_excluded - num, 0) return op.traceback[outermost_included:innermost_excluded] def create_graph_debug_info_def(func_named_operations): """Construct and returns a `GraphDebugInfo` protocol buffer. Args: func_named_operations: An iterable of (func_name, op.Operation) tuples where the Operation instances have a _traceback members. The func_name should be the empty string for operations in the top-level Graph. Returns: GraphDebugInfo protocol buffer. Raises: TypeError: If the arguments are not of the correct proto buffer type. """ # Creates an empty GraphDebugInfoDef proto. graph_debug_info_def = graph_debug_info_pb2.GraphDebugInfo() # Gets the file names and line numbers for the exported node names. Also # collects the unique file names. all_file_names = set() node_to_trace = {} for func_name, op in func_named_operations: # Gets the stack trace of the operation and then the file location. node_name = op.name + "@" + func_name node_to_trace[node_name] = _compute_useful_frames(op, 10) for frame in node_to_trace[node_name]: all_file_names.add(frame.filename) # Sets the `files` field in the GraphDebugInfo proto graph_debug_info_def.files.extend(all_file_names) # Builds a mapping between file names and index of the `files` field, so we # only store the indexes for the nodes in the GraphDebugInfo. file_to_index = dict( [(y, x) for x, y in enumerate(graph_debug_info_def.files)]) # Creates the FileLineCol proto for each node and sets the value in the # GraphDebugInfo proto. We only store the file name index for each node to # save the storage space. for node_name, frames in node_to_trace.items(): trace_def = graph_debug_info_def.traces[node_name] for frame in reversed(frames): trace_def.file_line_cols.add( file_index=file_to_index[frame.filename], line=frame.lineno) return graph_debug_info_def def compute_field_dict(op, strip_file_prefix=""): """Return a dictionary mapping interpolation tokens to values. Args: op: op.Operation object having a _traceback member. strip_file_prefix: The common path in the stacktrace. We remove the prefix from the file names. Returns: A dictionary mapping string tokens to string values. The keys are shown below along with example values. { "file": "tool_utils.py", "line": "124", "defined_at": " (defined at tool_utils.py:124)", "colocations": '''Node-device colocations active during op creation: with tf.compat.v1.colocate_with(test_node_1): <test_1.py:27> with tf.compat.v1.colocate_with(test_node_2): <test_2.py:38>''' "devices": '''Device assignments active during op 'foo' creation: with tf.device(/cpu:0): <test_1.py:27> with tf.device(some_func<foo.py, 123>): <test_2.py:38>''' "devs_and_colocs": A concatenation of colocations and devices, e.g. '''Node-device colocations active during op creation: with tf.compat.v1.colocate_with(test_node_1): <test_1.py:27> with tf.compat.v1.colocate_with(test_node_2): <test_2.py:38>''' Device assignments active during op 'foo' creation: with tf.device(/cpu:0): <test_1.py:27> with tf.device(some_func<foo.py, 123>): <test_2.py:38>''' } """ frame = _get_defining_frame_from_op(op) filename = frame.filename if filename.startswith(strip_file_prefix): filename = filename[len(strip_file_prefix):] lineno = frame.lineno defined_at = " (defined at %s:%d)" % (filename, lineno) colocation_summary = _compute_colocation_summary_from_op(op) device_summary = _compute_device_assignment_summary_from_op(op) combined_summary = "\n".join([colocation_summary, device_summary]) field_dict = { "file": filename, "line": lineno, "defined_at": defined_at, "colocations": colocation_summary, "devices": device_summary, "devs_and_colocs": combined_summary, } return field_dict def traceback_files_common_prefix(all_ops): """Determines the common prefix from the paths of the stacktrace of 'all_ops'. For example, if the paths are '/foo/bar/baz/' and '/foo/car', this would return '/foo'. Args: all_ops: All the input nodes in the form of a list of lists of ops. Returns: The common prefix. """ files = set() for ops in all_ops: if ops is None: continue for op in ops: # TODO(slebedev): switch to .filename once 2.X support is dropped. for filename, _, _, _ in op.traceback: if "<embedded" not in filename: files.add(filename) return os.path.split(os.path.commonprefix(list(files)))[0] def _sources_for_node(node, graph): """Gets the input op nodes for 'node'. Args: node: The node. graph: The graph containing the node. Returns: The unique input nodes. """ inputs = set() for name in node.node_def.input: if name.startswith("^"): name = name[1:] try: tensor = graph.get_tensor_by_name(name) op = tensor.op except (KeyError, ValueError): try: op = graph.get_operation_by_name(name) except KeyError: continue inputs.add(op) return list(inputs) def _build_error_message(op, input_ops, common_prefix): """Returns the formatted error message for the given op. Args: op: The node. input_ops: The input nodes to the 'op' node common_prefix: The prefix path common to the stacktrace of inputs. Returns: The formatted error message for the given op. The error message also includes the information about the input sources for the given op. """ field_dict = compute_field_dict(op, common_prefix) msg = "node %s%s " % (op.name, field_dict["defined_at"]) input_debug_info = [] # This stores the line numbers that we have already printed. done = set() done.add(field_dict["defined_at"]) for op_inp in input_ops: field_dict_inp = compute_field_dict(op_inp, common_prefix) if field_dict_inp["defined_at"] not in done: input_debug_info.append( " %s%s" % (op_inp.name, field_dict_inp["defined_at"])) done.add(field_dict_inp["defined_at"]) if input_debug_info: end_msg = ("\nInput Source operations connected to node %s:\n") % (op.name) end_msg += "\t\n".join(input_debug_info) else: end_msg = "" return msg, end_msg def interpolate(error_message, graph): """Interpolates an error message. The error message can contain tags of the form `{{type name}}` which will be replaced. For example: "{{node <name>}}" would get expanded to: "node <name>(defined at <path>)". Args: error_message: A string to interpolate. graph: ops.Graph object containing all nodes referenced in the error message. Returns: The string with tags of the form {{type name}} interpolated. """ seps, tags = parse_message(error_message) subs = [] end_msg = collections.defaultdict(list) tagged_ops = [] for t in tags: try: op = graph.get_operation_by_name(t.name) except KeyError: op = None if op is None: tagged_ops.append(None) else: tagged_ops.append([op] + _sources_for_node(op, graph)) common_prefix = traceback_files_common_prefix(tagged_ops) for tag, ops in zip(tags, tagged_ops): msg = "{{%s %s}}" % (tag.type, tag.name) if ops is not None: if tag.type == "node": msg, source_msg = _build_error_message(ops[0], ops[1:], common_prefix) if source_msg: end_msg["source_nodes"].append(source_msg) elif tag.type == "colocation_node": field_dict = compute_field_dict(ops[0], common_prefix) msg = "node %s%s placed on device %s " % ( ops[0].name, field_dict["defined_at"], field_dict["devices"]) end_msg["colocations"].append(field_dict["devs_and_colocs"]) if tag.type == "function_node": msg = "" subs.append(msg) if "source_nodes" in end_msg: subs.append("\n\nErrors may have originated from an input operation.") subs.append("\n".join(end_msg["source_nodes"])) end_msg.pop("source_nodes", None) for k, messages in end_msg.items(): subs.append("Additional information about %s:" % k) subs.append("\n".join(messages)) return "".join( itertools.chain(*six.moves.zip_longest(seps, subs, fillvalue="")))
{ "content_hash": "89b689b316d4459e98a00de4cc32bee9", "timestamp": "", "source": "github", "line_count": 513, "max_line_length": 80, "avg_line_length": 34.278752436647174, "alnum_prop": 0.6718794427068524, "repo_name": "chemelnucfin/tensorflow", "id": "0c6feb64008a58000f55da729c33815f7d6b9a84", "size": "18274", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "tensorflow/python/framework/error_interpolation.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Assembly", "bytes": "4913" }, { "name": "Batchfile", "bytes": "16146" }, { "name": "C", "bytes": "825231" }, { "name": "C#", "bytes": "8562" }, { "name": "C++", "bytes": "75313939" }, { "name": "CMake", "bytes": "207856" }, { "name": "Dockerfile", "bytes": "80130" }, { "name": "Go", "bytes": "1670422" }, { "name": "HTML", "bytes": "4680032" }, { "name": "Java", "bytes": "881711" }, { "name": "Jupyter Notebook", "bytes": "1113647" }, { "name": "LLVM", "bytes": "6536" }, { "name": "MLIR", "bytes": "853297" }, { "name": "Makefile", "bytes": "109340" }, { "name": "Objective-C", "bytes": "105235" }, { "name": "Objective-C++", "bytes": "258793" }, { "name": "PHP", "bytes": "38007" }, { "name": "Pascal", "bytes": "3741" }, { "name": "Pawn", "bytes": "14380" }, { "name": "Perl", "bytes": "7536" }, { "name": "Python", "bytes": "50825074" }, { "name": "RobotFramework", "bytes": "891" }, { "name": "Ruby", "bytes": "4706" }, { "name": "Shell", "bytes": "532610" }, { "name": "Smarty", "bytes": "31460" }, { "name": "Swift", "bytes": "62814" } ], "symlink_target": "" }
from django.contrib.auth import get_user_model from django.contrib.auth.decorators import login_required from django.shortcuts import render from connect.discover.forms import FilterMemberForm User = get_user_model() @login_required def dashboard(request): """ Shows all members as a list - with the capacity to filter by member skills and roles. Session containing 'show_welcome' displays custom message for our user's first visit. """ show_welcome = request.session.get('show_welcome') if show_welcome is not None: del request.session['show_welcome'] # Get additional profile data user = request.user # Display members listed_users = User.objects.filter( is_active=True ).order_by( 'full_name' ).prefetch_related( 'userskill_set', 'userskill_set__skill', 'roles', 'links', 'links__icon' ) if request.method == 'GET': form = FilterMemberForm(request.GET) if form.is_valid(): skills = form.cleaned_data['skills'] roles = form.cleaned_data['roles'] if skills: listed_users = listed_users.filter( skill__in=skills ).distinct() if roles: listed_users = listed_users.filter( roles__in=roles ).distinct() else: form = FilterMemberForm() context = { 'logged_in_user': user, 'listed_users': listed_users, 'form': form, 'show_welcome': show_welcome, } return render(request, 'discover/list.html', context) @login_required def member_map(request): """ Shows all members on a world map. """ return render(request, 'discover/map.html')
{ "content_hash": "f4050ff41518af6d57e4269ac91e0b7f", "timestamp": "", "source": "github", "line_count": 72, "max_line_length": 69, "avg_line_length": 25.09722222222222, "alnum_prop": 0.5915882678472607, "repo_name": "f3r3nc/connect", "id": "91e5f02f7c5a9733ef701cbf55cb038b83c3edd3", "size": "1807", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "connect/discover/views.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "234365" }, { "name": "Cucumber", "bytes": "32509" }, { "name": "HTML", "bytes": "102248" }, { "name": "JavaScript", "bytes": "8734" }, { "name": "Makefile", "bytes": "906" }, { "name": "Python", "bytes": "259283" }, { "name": "Ruby", "bytes": "861" } ], "symlink_target": "" }
from __future__ import absolute_import, unicode_literals from gaebusiness.gaeutil import SaveCommand, ModelSearchCommand from gaeforms.ndb.form import ModelForm from gaegraph.business_base import UpdateNode from cartaz_app.model import Cartaz class CartazPublicForm(ModelForm): """ Form used to show properties on app's home """ _model_class = Cartaz _include = [Cartaz.titulo, Cartaz.lancamento, Cartaz.de, Cartaz.genero, Cartaz.com] class CartazForm(ModelForm): """ Form used to save and update operations on app's admin page """ _model_class = Cartaz _include = [Cartaz.titulo, Cartaz.lancamento, Cartaz.de, Cartaz.genero, Cartaz.com] class CartazDetailForm(ModelForm): """ Form used to show entity details on app's admin page """ _model_class = Cartaz _include = [Cartaz.de, Cartaz.genero, Cartaz.creation, Cartaz.lancamento, Cartaz.titulo, Cartaz.com] class CartazShortForm(ModelForm): """ Form used to show entity short version on app's admin page, mainly for tables """ _model_class = Cartaz _include = [Cartaz.de, Cartaz.genero, Cartaz.creation, Cartaz.lancamento, Cartaz.titulo, Cartaz.com] class SaveCartazCommand(SaveCommand): _model_form_class = CartazForm class UpdateCartazCommand(UpdateNode): _model_form_class = CartazForm class ListCartazCommand(ModelSearchCommand): def __init__(self): super(ListCartazCommand, self).__init__(Cartaz.query_by_creation())
{ "content_hash": "1fabddd388ed3b1c228cedc4b0d11542", "timestamp": "", "source": "github", "line_count": 68, "max_line_length": 81, "avg_line_length": 26.352941176470587, "alnum_prop": 0.5970982142857143, "repo_name": "luuhfelix/ProjetoFilmes", "id": "5639e58f5f32cdc6e0021516dad7965d26190559", "size": "1816", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "backend/appengine/apps/cartaz_app/commands.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "222" }, { "name": "JavaScript", "bytes": "10174" }, { "name": "PowerShell", "bytes": "8104" }, { "name": "Python", "bytes": "124952" }, { "name": "Shell", "bytes": "2548" } ], "symlink_target": "" }
from __future__ import unicode_literals from future.builtins import str from django.contrib.admin.views.decorators import staff_member_required from django.core.exceptions import ImproperlyConfigured from django.http import HttpResponse, Http404 from django.shortcuts import get_object_or_404 from django.contrib import messages from django.template.response import TemplateResponse from mezzanine.pages.models import Page, PageMoveException from mezzanine.utils.urls import home_slug @staff_member_required def admin_page_ordering(request): """ Updates the ordering of pages via AJAX from within the admin. """ def get_id(s): s = s.split("_")[-1] return int(s) if s.isdigit() else None page = get_object_or_404(Page, id=get_id(request.POST['id'])) old_parent_id = page.parent_id new_parent_id = get_id(request.POST['parent_id']) new_parent = Page.objects.get(id=new_parent_id) if new_parent_id else None try: page.get_content_model().can_move(request, new_parent) except PageMoveException as e: messages.error(request, e) return HttpResponse('error') # Perform the page move if new_parent_id != page.parent_id: # Parent changed - set the new parent and re-order the # previous siblings. page.set_parent(new_parent) pages = Page.objects.filter(parent_id=old_parent_id) for i, page in enumerate(pages.order_by('_order')): Page.objects.filter(id=page.id).update(_order=i) # Set the new order for the moved page and its current siblings. for i, page_id in enumerate(request.POST.getlist('siblings[]')): Page.objects.filter(id=get_id(page_id)).update(_order=i) return HttpResponse("ok") def page(request, slug, template=u"pages/page.html", extra_context=None): """ Select a template for a page and render it. The request object should have a ``page`` attribute that's added via ``mezzanine.pages.middleware.PageMiddleware``. The page is loaded earlier via middleware to perform various other functions. The urlpattern that maps to this view is a catch-all pattern, in which case the page attribute won't exist, so raise a 404 then. For template selection, a list of possible templates is built up based on the current page. This list is order from most granular match, starting with a custom template for the exact page, then adding templates based on the page's parent page, that could be used for sections of a site (eg all children of the parent). Finally at the broadest level, a template for the page's content type (it's model class) is checked for, and then if none of these templates match, the default pages/page.html is used. """ from mezzanine.pages.middleware import PageMiddleware if not PageMiddleware.installed(): raise ImproperlyConfigured("mezzanine.pages.middleware.PageMiddleware " "(or a subclass of it) is missing from " + "settings.MIDDLEWARE_CLASSES or " + "settings.MIDDLEWARE") if not hasattr(request, "page") or request.page.slug != slug: raise Http404 # Check for a template name matching the page's slug. If the homepage # is configured as a page instance, the template "pages/index.html" is # used, since the slug "/" won't match a template name. template_name = str(slug) if slug != home_slug() else "index" templates = [u"pages/%s.html" % template_name] method_template = request.page.get_content_model().get_template_name() if method_template: templates.insert(0, method_template) if request.page.content_model is not None: templates.append(u"pages/%s/%s.html" % (template_name, request.page.content_model)) for parent in request.page.get_ascendants(for_user=request.user): parent_template_name = str(parent.slug) # Check for a template matching the page's content model. if request.page.content_model is not None: templates.append(u"pages/%s/%s.html" % (parent_template_name, request.page.content_model)) # Check for a template matching the page's content model. if request.page.content_model is not None: templates.append(u"pages/%s.html" % request.page.content_model) templates.append(template) return TemplateResponse(request, templates, extra_context or {})
{ "content_hash": "fcb1d77fdb6ebce8b88b46f8f847ce98", "timestamp": "", "source": "github", "line_count": 100, "max_line_length": 79, "avg_line_length": 45.02, "alnum_prop": 0.6850288760550867, "repo_name": "wbtuomela/mezzanine", "id": "86d166796915c0dbc1e9bf34763080888bfeed1e", "size": "4502", "binary": false, "copies": "9", "ref": "refs/heads/master", "path": "mezzanine/pages/views.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "CSS", "bytes": "60317" }, { "name": "HTML", "bytes": "79259" }, { "name": "JavaScript", "bytes": "453209" }, { "name": "Python", "bytes": "706912" } ], "symlink_target": "" }
''' Control a salt cloud system ''' from __future__ import absolute_import # Import python libs import json # Import salt libs import salt.utils HAS_CLOUD = False try: import saltcloud # pylint: disable=W0611 HAS_CLOUD = True except ImportError: pass # Define the module's virtual name __virtualname__ = 'saltcloud' def __virtual__(): ''' Only load if salt cloud is installed ''' if HAS_CLOUD: return __virtualname__ return (False, 'The saltcloudmod execution module failed to load: requires the saltcloud library.') def create(name, profile): ''' Create the named vm CLI Example: .. code-block:: bash salt <minion-id> saltcloud.create webserver rackspace_centos_512 ''' cmd = 'salt-cloud --out json -p {0} {1}'.format(profile, name) out = __salt__['cmd.run_stdout'](cmd, python_shell=False) try: ret = json.loads(out, object_hook=salt.utils.decode_dict) except ValueError: ret = {} return ret
{ "content_hash": "a8fc4de11b0a5155c0d6811a11f81586", "timestamp": "", "source": "github", "line_count": 47, "max_line_length": 103, "avg_line_length": 21.46808510638298, "alnum_prop": 0.643211100099108, "repo_name": "stephane-martin/salt-debian-packaging", "id": "4f6ef9fa3da110a0f8690bd1a684363063a228a2", "size": "1033", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "salt-2016.3.3/salt/modules/saltcloudmod.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "13798" }, { "name": "C", "bytes": "986" }, { "name": "Groff", "bytes": "13634346" }, { "name": "HTML", "bytes": "39558" }, { "name": "Makefile", "bytes": "20902" }, { "name": "NSIS", "bytes": "22316" }, { "name": "PowerShell", "bytes": "38719" }, { "name": "Python", "bytes": "40857506" }, { "name": "SaltStack", "bytes": "58278" }, { "name": "Scheme", "bytes": "1790" }, { "name": "Shell", "bytes": "829927" }, { "name": "Tcl", "bytes": "6532" }, { "name": "TeX", "bytes": "11632" } ], "symlink_target": "" }
from __future__ import absolute_import, unicode_literals, print_function, division class ValidationError(Exception): pass class ChangingImmutableAttributeError(Exception): pass class ResolvedObjectNotFound(Exception): def __init__(self, message=''): self.message = message.capitalize() or 'Could not find object to resolve to' class ConfigurationError(Exception): pass
{ "content_hash": "f78d2af062e68bcae5d695fc3a76cf99", "timestamp": "", "source": "github", "line_count": 18, "max_line_length": 84, "avg_line_length": 22.22222222222222, "alnum_prop": 0.7425, "repo_name": "felixbr/nosql-rest-preprocessor", "id": "d1936e59b7f7f0c420f8dffb90bfc116f18cf3c8", "size": "400", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "nosql_rest_preprocessor/exceptions.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "22962" } ], "symlink_target": "" }
from django import forms from haystack.forms import SearchForm class MySearchForm(SearchForm): q = forms.CharField(required=False, label='Query', widget=forms.TextInput(attrs={ 'type': 'search', 'autocorrect': 'off', 'class': 'form-control', 'placeholder': 'Search jobs or organizations', })) def search(self): sqs = super(MySearchForm, self).search() if not self.is_valid(): return self.no_query_found() sqs = sqs.order_by('-pub_date') return sqs def __init__(self, *args, **kwargs): self.request = kwargs.pop('request', None) super(MySearchForm, self).__init__(*args, **kwargs) if self.request: if 'q' in self.request.GET: copy = self.request.GET.copy() self.fields['q'].initial = copy['q'].strip()
{ "content_hash": "668172d2bd070e47774b754dc8e7fdcb", "timestamp": "", "source": "github", "line_count": 27, "max_line_length": 85, "avg_line_length": 32.074074074074076, "alnum_prop": 0.5762124711316398, "repo_name": "richardcornish/timgorin", "id": "9b7852b0599a1963ec6f0a9ac14e483695f3a9b8", "size": "866", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "jobboardscraper/search/forms.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "132" }, { "name": "HTML", "bytes": "14930" }, { "name": "JavaScript", "bytes": "580" }, { "name": "Python", "bytes": "21071" } ], "symlink_target": "" }
import sys import requests import json from docopt import docopt import utils import transactions import rlp from . import __version__ from . config import read_config config = read_config() api_path = config.get('api', 'api_path') assert api_path.startswith('/') and not api_path.endswith('/') DEFAULT_HOST = config.get('api', 'listen_host') DEFAULT_PORT = config.getint('api', 'listen_port') DEFAULT_GASPRICE = 10 ** 12 DEFAULT_STARTGAS = 10000 def sha3(x): return utils.sha3(x).encode('hex') def privtoaddr(x): if len(x) == 64: x = x.decode('hex') return utils.privtoaddr(x) def mktx(nonce, gasprice, startgas, to, value, data): return transactions.Transaction( int(nonce), gasprice, startgas, to, int(value), data.decode('hex') ).hex_serialize(False) def contract(nonce, gasprice, startgas, value, code): return transactions.contract( int(nonce), gasprice, startgas, int(value), code.decode('hex') ).hex_serialize(False) def sign(txdata, key): return transactions.Transaction.hex_deserialize(txdata).sign(key).hex_serialize(True) class APIClient(object): def __init__(self, host, port): self.host = host self.port = port assert api_path.startswith('/') and not api_path.endswith('/') self.base_url = "http://%s:%d%s" % (host, port, api_path) def json_get_request(self, path): assert path.startswith('/') url = self.base_url + path # print 'GET', url r = requests.get(url) # print r.status_code, r.reason, r.url, r.headers if r.status_code in [200, 201]: return r.json() else: return dict((k, getattr(r, k)) for k in ('status_code', 'reason')) def account_to_dict(self, address): return self.json_get_request(path='/accounts/%s' % address) def getbalance(self, address): return int(self.account_to_dict(address)['balance']) def getcode(self, address): return self.account_to_dict(address)['code'] def getnonce(self, address): ptxs = self.getpending()['transactions'] nonce = max([0] + [int(tx['nonce']) for tx in ptxs if tx['sender'] == address]) if nonce: return nonce + 1 return int(self.account_to_dict(address)['nonce']) def getstate(self, address): return self.account_to_dict(address)['storage'] def applytx(self, txdata): tx = transactions.Transaction.hex_deserialize(txdata) url = self.base_url + '/transactions/' # print 'PUT', url, txdata r = requests.put(url, txdata) return dict(status_code=r.status_code, reason=r.reason, url=r.url) def quicktx(self, gasprice, startgas, to, value, data, pkey_hex): nonce = self.getnonce(privtoaddr(pkey_hex)) tx = mktx(nonce, gasprice, startgas, to, value, data) return self.applytx(sign(tx, pkey_hex)) def quickcontract(self, gasprice, startgas, value, code, pkey_hex): sender = privtoaddr(pkey_hex) nonce = self.getnonce(sender) tx = contract(nonce, gasprice, startgas, value, code) formatted_rlp = [sender.decode('hex'), utils.int_to_big_endian(nonce)] addr = utils.sha3(rlp.encode(formatted_rlp))[12:].encode('hex') o = self.applytx(sign(tx, pkey_hex)) o['addr'] = addr return o def getblock(self, id): return self.json_get_request(path='/blocks/%s' % id) def getchildren(self, id): return self.json_get_request(path='/blocks/%s/children' % id) def gettx(self, id): return self.json_get_request(path='/transactions/%s' % id) def getpending(self): return self.json_get_request(path='/pending/') def tracejson(self, id): res = self.json_get_request(path='/trace/%s' % id) return json.dumps(res, indent=2) def trace(self, id): res = self.json_get_request(path='/trace/%s' % id) if 'trace' in res: out = [] for l in res['trace']: name, data = l.items()[0] order = dict(pc=-2, op=-1, stackargs=1, data=2, code=3) items = sorted(data.items(), key=lambda x: order.get(x[0], 0)) msg = ", ".join("%s=%s" % (k, v) for k, v in items) out.append("%s: %s" % (name.ljust(15), msg)) return '\n'.join(out) return res def dump(self, id): res = self.json_get_request(path='/dump/%s' % id) return json.dumps(res, sort_keys=True, indent=2) doc = \ """ethclient Usage: pyethclient getbalance [options] <address> pyethclient getcode [options] <address> pyethclient getstate [options] <address> pyethclient getnonce [options] <address> pyethclient quicktx [options] <to> <value> <data_hex> <pkey_hex> pyethclient mktx <nonce> <to> <value> <data_hex> pyethclient quicktx <to> <value> <data_hex> <pkey_hex> pyethclient mkcontract <nonce> <value> <code_hex> pyethclient quickcontract <value> <code_hex> <pkey_hex> pyethclient applytx [options] <tx_hex> pyethclient sign <tx_hex> <pkey_hex> pyethclient privtoaddr <pkey_hex> pyethclient sha3 <data> pyethclient getblock [options] <blockid_hex_or_num> pyethclient gettx [options] <txid_hex> pyethclient getpending [options] pyethclient trace [options] <txid_hex> pyethclient tracejson [options] <txid_hex> pyethclient dump [options] <tx_blk_id_hex> Options: -h --help Show this screen -v --version Show version -H --host=<host> API server host [default: %s] -p --port=<port> API server port [default: %d] -g --gasprice=<gasprice> maximum gas price [default: %d] -G --startgas=<startgas> gas provided [default: %d] -s --stdin take arguments from stdin -n --nonce by default the next nonce is looked up """ % (DEFAULT_HOST, DEFAULT_PORT, DEFAULT_GASPRICE, DEFAULT_STARTGAS) def main(): # Take arguments from stdin with -s if len(sys.argv) > 1 and sys.argv[1] == '-s': sys.argv = [sys.argv[0], sys.argv[2]] + \ sys.stdin.read().strip().split(' ') + sys.argv[3:] # Get command line arguments arguments = docopt(doc, version='pyethclient %s' % __version__) # print(arguments) host = arguments.get('--host') or DEFAULT_HOST port = int(arguments.get('--port') or DEFAULT_PORT) api = APIClient(host, port) gasprice = int(arguments.get('--gasprice') or DEFAULT_GASPRICE) startgas = int(arguments.get('--startgas') or DEFAULT_STARTGAS) cmd_map = dict(getbalance=(api.getbalance, arguments['<address>']), getcode=(api.getcode, arguments['<address>']), getstate=(api.getstate, arguments['<address>']), getnonce=(api.getnonce, arguments['<address>']), applytx=(api.applytx, arguments['<tx_hex>']), sha3=(sha3, arguments['<data>']), privtoaddr=(privtoaddr, arguments['<pkey_hex>']), mkcontract=(contract, arguments['<nonce>'], gasprice, startgas, arguments[ '<value>'], arguments['<code_hex>']), mktx=(mktx, arguments['<nonce>'], gasprice, startgas, arguments[ '<to>'], arguments['<value>'], arguments['<data_hex>']), quicktx=(api.quicktx, gasprice, startgas, arguments['<to>'], arguments[ '<value>'], arguments['<data_hex>'], arguments['<pkey_hex>']), quickcontract=(api.quickcontract, gasprice, startgas, arguments[ '<value>'], arguments['<code_hex>'], arguments['<pkey_hex>']), sign=(sign, arguments['<tx_hex>'], arguments['<pkey_hex>']), getblock=(api.getblock, arguments['<blockid_hex_or_num>']), gettx=(api.gettx, arguments['<txid_hex>']), trace=(api.trace, arguments['<txid_hex>']), tracejson=(api.tracejson, arguments['<txid_hex>']), dump=(api.dump, arguments['<tx_blk_id_hex>']), getpending=(api.getpending,) ) for k in cmd_map: if arguments.get(k): cmd_args = cmd_map.get(k) out = cmd_args[0](*cmd_args[1:]) print out break if __name__ == '__main__': main()
{ "content_hash": "27a2f7e96c4885611fbe5270a3975117", "timestamp": "", "source": "github", "line_count": 225, "max_line_length": 93, "avg_line_length": 37.413333333333334, "alnum_prop": 0.589807555238774, "repo_name": "joelcan/tools-eth-contract-dev", "id": "9814d49fad2ee427cb675931960f5f9642fea1a6", "size": "8441", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "pyethereum/pyethereum/ethclient.py", "mode": "33261", "license": "mit", "language": [ { "name": "HTML", "bytes": "1657" }, { "name": "Python", "bytes": "513392" }, { "name": "Shell", "bytes": "1202" } ], "symlink_target": "" }
""" Command helper. Copyright (c) 2013 learn_more See the file license.txt or http://opensource.org/licenses/MIT for copying permission. Lists all commands available to you (permission based). """ from commands import add, admin, commands as cmdlist, aliases as aliaslist import fnmatch def commands(connection, value = None): names = [] for command in cmdlist: command_func = cmdlist[command] if (hasattr(command_func, 'user_types') and command not in connection.rights): continue include = False if (value is None or fnmatch.fnmatch(command, value)): include = True aliases = [] for a in aliaslist: if aliaslist[a] == command: if (value is None or fnmatch.fnmatch(a, value)): include = True aliases.append(a) cmd = command if len(aliases) == 0 else ('%s (%s)' % (command, ', '.join(aliases))) if include: names.append(cmd) return 'Commands: %s' % (', '.join(names)) add(commands) def apply_script(protocol, connection, config): return protocol, connection
{ "content_hash": "a890d05015780325a9655b4d73ff7422", "timestamp": "", "source": "github", "line_count": 35, "max_line_length": 86, "avg_line_length": 28.8, "alnum_prop": 0.6984126984126984, "repo_name": "learn-more/pysnip-utils", "id": "d0ba624181bb1c149614c68aeba5daeb5275f51e", "size": "1008", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "scripts/commandhelp.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "14959" } ], "symlink_target": "" }
from django.conf import settings from django.contrib import messages from django.contrib import auth # from django.contrib.auth import get_user_model, login from django.contrib.auth import views as auth_views from django.contrib.auth.decorators import login_required from django.urls import reverse from django.http import Http404 from django.shortcuts import redirect, render from django.utils.translation import gettext, get_language from django.views.decorators.cache import never_cache from django.views.decorators.debug import sensitive_post_parameters from django.views.decorators.http import require_POST from django.template.loader import get_template, render_to_string from .decorators import login_forbidden from .forms import ( AuthenticationForm, PublicUserCreationForm, UserProfileUpdateForm, PasswordResetForm, SetPasswordForm, CocAgreementForm, ) from .models import CocRecord from reviews.context import proposals_state, reviews_state from lxml import etree import lxml.html User = auth.get_user_model() @sensitive_post_parameters() @never_cache @login_forbidden def user_signup(request): if request.method == 'POST': form = PublicUserCreationForm(data=request.POST) if form.is_valid(): user = form.save() user.send_verification_email(request) auth.login(request, user) messages.success(request, gettext( 'Sign up successful. You are now logged in.' )) return redirect('user_dashboard') else: form = PublicUserCreationForm() return render(request, 'registration/signup.html', {'form': form}) @sensitive_post_parameters() @never_cache def user_verify(request, verification_key): try: user = User.objects.get_with_verification_key(verification_key) except User.DoesNotExist: raise Http404 user.verified = True user.save() messages.success(request, gettext('Email verification successful.')) return redirect('user_dashboard') @never_cache @login_required @require_POST def request_verification(request): user = request.user user.send_verification_email(request) messages.success( request, gettext('A verification email has been sent to {email}').format( email=user.email, ), ) return redirect('user_dashboard') @login_required def user_dashboard(request): if not request.user.is_valid_speaker(): return redirect('user_profile_update') logout_next = reverse('login') return render(request, 'users/user_dashboard.html', { 'logout_next': logout_next, **proposals_state()._asdict(), **reviews_state()._asdict(), }) @login_required def user_profile_update(request): logout_next = reverse('index') if request.method == 'POST': form = UserProfileUpdateForm( data=request.POST, files=request.FILES, instance=request.user, ) if form.is_valid(): form.save() messages.success(request, gettext( 'Your profile has been updated successfully.', )) return redirect('user_dashboard') else: form = UserProfileUpdateForm(instance=request.user) return render(request, 'users/user_profile_update.html', { 'form': form, 'logout_next': logout_next, **reviews_state()._asdict(), }) def password_change_done(request): messages.success(request, gettext( 'Your new password has been applied successfully.' )) return redirect('user_dashboard') def password_reset_done(request): messages.success(request, gettext( 'An email is sent to your email account. Please check your inbox for ' 'furthur instructions to reset your password.' )) return redirect('login') def password_reset_complete(request): messages.success(request, gettext( 'Password reset successful. You can now login.' )) return redirect('login') def password_reset_confirm(request, uidb64, token): return base_password_reset_confirm( request, uidb64=uidb64, token=token, set_password_form=SetPasswordForm ) @login_required def coc_agree(request): if request.method == 'POST': form = CocAgreementForm(data=request.POST) if form.is_valid(): try: agreement = CocRecord.objects.get(user=request.user, coc_version=settings.COC_VERSION) except CocRecord.DoesNotExist: agreement = CocRecord(user=request.user, coc_version=settings.COC_VERSION) agreement.save() return redirect(request.GET.get('next')) else: form = CocAgreementForm() # Get code of conduct lang = get_language() content = render_to_string('contents/%s/about/code-of-conduct.html' % lang[:2], {}, request) tree = lxml.html.document_fromstring(content) main = tree.xpath('//main')[0] # Remove the title # Since the HTML structure has changed # need to find the direct child from main which contains h1 as its descendant # and remove it for h1 in main.xpath('//h1'): target = h1 parent = h1.getparent() while parent != main and parent != None: target = parent parent = parent.getparent() if parent == main: main.remove(target) coc = etree.tostring(main, encoding='utf-8').decode('utf-8') return render(request, 'users/coc_agreement.html', { 'form': form, 'coc': coc, **reviews_state()._asdict(), }) class PasswordChangeView(auth_views.PasswordChangeView): # cannot merely pass extra_context=reviews_state()._asdict() to # auth_views.PasswordChangeView because # we need to resolve reviews_state()._asdict() everytime when # reaching this view def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context.update(**reviews_state()._asdict()) return context login = auth_views.LoginView.as_view(authentication_form=AuthenticationForm) logout = auth_views.LogoutView.as_view() password_change = PasswordChangeView.as_view() password_reset = auth_views.PasswordResetView.as_view(form_class=PasswordResetForm, template_name='registration/password_reset.html', email_template_name='registration/password_reset_email.txt') password_reset_confirm = auth_views.PasswordResetConfirmView.as_view( form_class=SetPasswordForm )
{ "content_hash": "a491ae74e72a06a024e30e0e69c727d7", "timestamp": "", "source": "github", "line_count": 207, "max_line_length": 102, "avg_line_length": 31.555555555555557, "alnum_prop": 0.6736068585425597, "repo_name": "pycontw/pycontw2016", "id": "1d4993d0dfc81eef7852b5f963264db598806ca7", "size": "6532", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/users/views.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "236762" }, { "name": "HTML", "bytes": "605550" }, { "name": "JavaScript", "bytes": "24923" }, { "name": "Python", "bytes": "479686" }, { "name": "Shell", "bytes": "389" } ], "symlink_target": "" }
import subprocess from unittest import mock from stestr import test_processor from stestr.tests import base class TestTestProcessorFixture(base.TestCase): def setUp(self): super().setUp() self._fixture = test_processor.TestProcessorFixture( mock.sentinel.test_ids, mock.sentinel.options, mock.sentinel.cmd_template, mock.sentinel.listopt, mock.sentinel.idoption, mock.sentinel.repository, ) @mock.patch.object(subprocess, "Popen") @mock.patch.object(test_processor, "sys") def _check_start_process( self, mock_sys, mock_Popen, platform="win32", expected_fn=None ): mock_sys.platform = platform self._fixture._start_process(mock.sentinel.cmd) mock_Popen.assert_called_once_with( mock.sentinel.cmd, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE, preexec_fn=expected_fn, ) def test_start_process_win32(self): self._check_start_process() def test_start_process_linux(self): self._check_start_process( platform="linux2", expected_fn=self._fixture._clear_SIGPIPE )
{ "content_hash": "dd74a628937887b8139fab109ff2640c", "timestamp": "", "source": "github", "line_count": 43, "max_line_length": 71, "avg_line_length": 28.813953488372093, "alnum_prop": 0.6182405165456013, "repo_name": "mtreinish/stestr", "id": "302932c75e36e4f1877d47608acc3b8c68ac6de8", "size": "1784", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "stestr/tests/test_test_processor.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "326042" }, { "name": "Shell", "bytes": "93" } ], "symlink_target": "" }
import unittest from mopidy_gmusic import actor as backend_lib from tests.test_extension import ExtensionTest class LibraryTest(unittest.TestCase): def setUp(self): config = ExtensionTest.get_config() self.backend = backend_lib.GMusicBackend(config, None) def test_browse_radio_deactivated(self): config = ExtensionTest.get_config() config['gmusic']['show_radio_stations_browse'] = False self.backend = backend_lib.GMusicBackend(config, None) refs = self.backend.library.browse('gmusic:directory') for ref in refs: self.assertNotEqual(ref.uri, 'gmusic:radio') def test_browse_none(self): refs = self.backend.library.browse(None) self.assertEqual(refs, []) def test_browse_invalid(self): refs = self.backend.library.browse('gmusic:invalid_uri') self.assertEqual(refs, []) def test_browse_root(self): refs = self.backend.library.browse('gmusic:directory') found = False for ref in refs: if ref.uri == 'gmusic:album': found = True break self.assertTrue(found, 'ref \'gmusic:album\' not found') found = False for ref in refs: if ref.uri == 'gmusic:artist': found = True break self.assertTrue(found, 'ref \'gmusic:artist\' not found') found = False for ref in refs: if ref.uri == 'gmusic:radio': found = True break self.assertTrue(found, 'ref \'gmusic:radio\' not found') def test_browse_artist(self): refs = self.backend.library.browse('gmusic:artist') self.assertIsNotNone(refs) def test_browse_artist_id_invalid(self): refs = self.backend.library.browse('gmusic:artist:artist_id') self.assertIsNotNone(refs) self.assertEqual(refs, []) def test_browse_album(self): refs = self.backend.library.browse('gmusic:album') self.assertIsNotNone(refs) def test_browse_album_id_invalid(self): refs = self.backend.library.browse('gmusic:album:album_id') self.assertIsNotNone(refs) self.assertEqual(refs, []) def test_browse_radio(self): refs = self.backend.library.browse('gmusic:radio') # tests should be unable to fetch stations :( # at least IFL radio should be available self.assertEqual(len(refs), 1) found = False for ref in refs: if ref.uri == 'gmusic:radio:IFL': found = True break self.assertTrue(found, 'ref \'gmusic:radio:IFL\' not found') def test_browse_station(self): refs = self.backend.library.browse('gmusic:radio:invalid_stations_id') # tests should be unable to fetch stations :( self.assertEqual(refs, []) def test_lookup_invalid(self): refs = self.backend.library.lookup('gmusic:invalid_uri') # tests should be unable to fetch any content :( self.assertEqual(refs, []) def test_lookup_invalid_album(self): refs = self.backend.library.lookup('gmusic:album:invalid_uri') # tests should be unable to fetch any content :( self.assertEqual(refs, []) def test_lookup_invalid_artist(self): refs = self.backend.library.lookup('gmusic:artis:invalid_uri') # tests should be unable to fetch any content :( self.assertEqual(refs, []) def test_lookup_invalid_track(self): refs = self.backend.library.lookup('gmusic:track:invalid_uri') # tests should be unable to fetch any content :( self.assertEqual(refs, [])
{ "content_hash": "2286d59a2545d667198c4bfe099ea161", "timestamp": "", "source": "github", "line_count": 105, "max_line_length": 78, "avg_line_length": 35.20952380952381, "alnum_prop": 0.6159047876656749, "repo_name": "elrosti/mopidy-gmusic", "id": "911c965329dd0884dbea2d645148988192efb8df", "size": "3697", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "tests/test_library.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "55755" } ], "symlink_target": "" }
""" General utilities module """ import os, time import numpy as np import matplotlib.pylab as plt import matplotlib.cm as cm import matplotlib.animation as animation from configuration import get_config config = get_config() def animate_evolution(states, pacemakers, fname='lattice.gif', truncate=True): """ Animation evolution of lattice over time """ plt.gca().get_xaxis().set_visible(False) plt.gca().get_yaxis().set_visible(False) if truncate: # make animation smaller and faster states = states[::10] im = plt.imshow( states[0], cmap=cm.gray, interpolation='nearest', vmin=np.amin(states), vmax=np.amax(states) ) plt.colorbar(im) if len(pacemakers) > 0: plt.scatter( *zip(*[reversed(p) for p in pacemakers]), marker='+', color='red' ) def update(t): plt.suptitle(r'$t = %d$' % t) im.set_data(states[t]) return im, ani = animation.FuncAnimation( plt.gcf(), update, frames=len(states) ) ani.save(fname, writer='imagemagick', fps=10)#, dpi=200) plt.close() def save_data(fname, data): """ Try to save `data` in `directory`. Create `directory` if it does not exist """ dname = os.path.dirname(os.path.abspath(fname)) if not os.path.isdir(dname): os.makedirs(dname) np.save(fname, data) def gen_run_identifier(): """ Extract config parameters which are likely to be distinctive. Note: this requires a properly setup configuration """ return ('data_%.4f_%d_%d_%.2f_%.3f__%s' % ( \ config.beta, config.grid_size, config.t_max, config.D, config.p, time.strftime('%Y%m%d%H%M%S'))).replace('.', 'p') def timed_run(title): """ Decorator which times the decorated function """ def tmp(func): def wrapper(*args, **kwargs): print(' > %s' % title, end=' ', flush=True) start_time = time.time() res = func(*args, **kwargs) run_dur = time.time() - start_time print('(%.2fs)' % run_dur) return res return wrapper return tmp # plot data from previous run if __name__ == '__main__': import sys if len(sys.argv) != 2: print('Usage: %s <data file/dir>' % sys.argv[0]) sys.exit(1) def get_savedir(arg): """ Figure out where to save images for this data file and make sure the directory actually exists """ img_dir = 'images' pure_fname = os.path.splitext(os.path.basename(arg))[0] save_dir = os.path.join(img_dir, pure_fname) if not os.path.isdir(save_dir): os.makedirs(save_dir) return save_dir arg = sys.argv[1] if os.path.isdir(arg): for fn in os.listdir(arg): fname = os.path.join(arg, fn) camp, pacemaker, used_config = np.load(fname) out_name = os.path.join(get_savedir(fname), 'lattice_evolution.gif') animate_evolution(camp, pacemaker, fname=out_name) else: camp, pacemaker, used_config = np.load(arg) out_name = os.path.join(get_savedir(arg), 'lattice_evolution.gif') animate_evolution(camp, pacemaker, fname=out_name)
{ "content_hash": "a745fa4e81c9f8bf0f0d5b5365d83859", "timestamp": "", "source": "github", "line_count": 118, "max_line_length": 82, "avg_line_length": 27.85593220338983, "alnum_prop": 0.5841192576817767, "repo_name": "kpj/PyWave", "id": "6de23ce67710c8025e32aba5f96e854c05132ed3", "size": "3287", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "utils.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "25365" } ], "symlink_target": "" }
import json from datetime import date, timedelta from django.contrib import messages from django.contrib.contenttypes.models import ContentType from django.http import ( HttpResponse, HttpResponseRedirect ) from django.shortcuts import get_object_or_404, render from django.views.decorators.http import require_POST from elasticsearch import ElasticsearchException from elasticutils.contrib.django import F, es_required_or_50x from mobility.decorators import mobile_template from tower import ugettext as _ from fjord.analytics.utils import ( counts_to_options, zero_fill ) from fjord.base.helpers import locale_name from fjord.base.urlresolvers import reverse from fjord.base.utils import ( analyzer_required, check_new_user, smart_int, smart_date, Atom1FeedWithRelatedLinks, JSONDatetimeEncoder ) from fjord.feedback.models import Product, Response, ResponseMappingType from fjord.journal.models import Record from fjord.search.utils import es_error_statsd from fjord.translations.models import GengoJob, get_translation_systems from fjord.translations.tasks import create_translation_tasks @check_new_user @require_POST @analyzer_required def spot_translate(request, responseid): # FIXME: This is gengo-machine specific for now. resp = get_object_or_404(Response, id=responseid) system = request.POST.get('system', None) total_jobs = 0 if system and system in get_translation_systems(): total_jobs += len(create_translation_tasks(resp, system=system)) # FIXME: If there's no system specified, we should tell the user # something. I'm going to defer fixing that for now since the user # would have to be doing "sneaky" things to hit that situation. messages.success(request, '%s %s translation jobs added' % ( total_jobs, system)) return HttpResponseRedirect( reverse('response_view', args=(responseid,))) @check_new_user @mobile_template('analytics/{mobile/}response.html') def response_view(request, responseid, template): response = get_object_or_404(Response, id=responseid) mlt = None records = None errors = [] if (request.user.is_authenticated() and request.user.has_perm('analytics.can_view_dashboard')): try: # Convert it to a list to force it to execute right now. mlt = ResponseMappingType.reshape( ResponseMappingType.morelikethis(response)) except ElasticsearchException as exc: errors.append('Failed to do morelikethis: %s' % exc) records = [ (u'Response records', Record.objects.records(response)), ] jobs = GengoJob.objects.filter( object_id=response.id, content_type=ContentType.objects.get_for_model(response) ) for job in jobs: records.append( (u'Gengo job record {0}'.format(job.id), job.records) ) # We don't pass the response directly to the template and instead # do some data tweaks here to make it more palatable for viewing. return render(request, template, { 'errors': errors, 'response': response, 'mlt': mlt, 'records': records, }) def generate_json_feed(request, search): """Generates JSON feed for first 100 results""" search_query = request.GET.get('q', None) responses = search.values_dict()[:100] json_data = { 'total': len(responses), 'results': list(responses), 'query': search_query } return HttpResponse( json.dumps(json_data, cls=JSONDatetimeEncoder), content_type='application/json') def generate_atom_feed(request, search): """Generates ATOM feed for first 100 results""" search_query = request.GET.get('q', None) if search_query: title = _(u'Firefox Input: {query}').format(query=search_query) else: title = _(u'Firefox Input') # Build the non-atom dashboard url and maintain all the # querystring stuff we have dashboard_url = request.build_absolute_uri() dashboard_url = dashboard_url.replace('format=atom', '') dashboard_url = dashboard_url.replace('&&', '&') if dashboard_url.endswith(('?', '&')): dashboard_url = dashboard_url[:-1] feed = Atom1FeedWithRelatedLinks( title=title, link=dashboard_url, description=_('Search Results From Firefox Input'), author_name=_('Firefox Input'), ) for response in search[:100]: categories = { 'sentiment': _('Happy') if response.happy else _('Sad'), 'platform': response.platform, 'locale': response.locale } categories = (':'.join(item) for item in categories.items()) link_url = reverse('response_view', args=(response.id,)) link_url = request.build_absolute_uri(link_url) feed.add_item( title=_('Response id: {id}').format(id=response.id), description=response.description, link=link_url, pubdate=response.created, categories=categories, link_related=response.url_domain, ) return HttpResponse( feed.writeString('utf-8'), content_type='application/atom+xml') def generate_dashboard_url(request, output_format='atom', viewname='dashboard'): """For a given request, generates the dashboard url for the given format """ qd = request.GET.copy() # Remove anything from the querystring that isn't good for a feed: # page, start_date, end_date, etc. for mem in qd.keys(): if mem not in ('happy', 'locale', 'platform', 'product', 'version', 'q'): del qd[mem] qd['format'] = output_format return reverse(viewname) + '?' + qd.urlencode() @check_new_user @es_required_or_50x(error_template='analytics/es_down.html') @es_error_statsd def dashboard(request): template = 'analytics/dashboard.html' output_format = request.GET.get('format', None) page = smart_int(request.GET.get('page', 1), 1) # Note: If we add additional querystring fields, we need to add # them to generate_dashboard_url. search_happy = request.GET.get('happy', None) search_platform = request.GET.get('platform', None) search_locale = request.GET.get('locale', None) search_product = request.GET.get('product', None) search_version = request.GET.get('version', None) search_query = request.GET.get('q', None) search_date_start = smart_date( request.GET.get('date_start', None), fallback=None) search_date_end = smart_date( request.GET.get('date_end', None), fallback=None) search_bigram = request.GET.get('bigram', None) selected = request.GET.get('selected', None) filter_data = [] current_search = {'page': page} search = ResponseMappingType.search() f = F() # If search happy is '0' or '1', set it to False or True, respectively. search_happy = {'0': False, '1': True}.get(search_happy, None) if search_happy in [False, True]: f &= F(happy=search_happy) current_search['happy'] = int(search_happy) def unknown_to_empty(text): """Convert "Unknown" to "" to support old links""" return u'' if text.lower() == u'unknown' else text if search_platform is not None: f &= F(platform=unknown_to_empty(search_platform)) current_search['platform'] = search_platform if search_locale is not None: f &= F(locale=unknown_to_empty(search_locale)) current_search['locale'] = search_locale visible_products = [ prod.encode('utf-8') for prod in Product.objects.public().values_list('db_name', flat=True) ] # This covers the "unknown" product which is also visible. visible_products.append('') if search_product in visible_products: f &= F(product=unknown_to_empty(search_product)) current_search['product'] = search_product if search_version is not None: # Note: We only filter on version if we're filtering on # product. f &= F(version=unknown_to_empty(search_version)) current_search['version'] = search_version else: f &= F(product__in=visible_products) if search_date_start is None and search_date_end is None: selected = '7d' if search_date_end is None: search_date_end = date.today() if search_date_start is None: search_date_start = search_date_end - timedelta(days=7) # If the start and end dates are inverted, switch them into proper # chronoligcal order search_date_start, search_date_end = sorted( [search_date_start, search_date_end]) # Restrict the frontpage dashboard to only show the last 6 months # of data six_months_ago = date.today() - timedelta(days=180) search_date_start = max(six_months_ago, search_date_start) search_date_end = max(search_date_start, search_date_end) current_search['date_end'] = search_date_end.strftime('%Y-%m-%d') f &= F(created__lte=search_date_end) current_search['date_start'] = search_date_start.strftime('%Y-%m-%d') f &= F(created__gte=search_date_start) if search_query: current_search['q'] = search_query search = search.query(description__sqs=search_query) if search_bigram is not None: f &= F(description_bigrams=search_bigram) filter_data.append({ 'display': _('Bigram'), 'name': 'bigram', 'options': [{ 'count': 'all', 'name': search_bigram, 'display': search_bigram, 'value': search_bigram, 'checked': True }] }) search = search.filter(f).order_by('-created') # If the user asked for a feed, give him/her a feed! if output_format == 'atom': return generate_atom_feed(request, search) elif output_format == 'json': return generate_json_feed(request, search) # Search results and pagination if page < 1: page = 1 page_count = 20 start = page_count * (page - 1) end = start + page_count search_count = search.count() opinion_page = search[start:end] # Navigation facet data facets = search.facet( 'happy', 'platform', 'locale', 'product', 'version', size=1000, filtered=bool(search._process_filters(f.filters))) # This loop does two things. First it maps 'T' -> True and 'F' -> # False. This is probably something EU should be doing for # us. Second, it restructures the data into a more convenient # form. counts = { 'happy': {}, 'platform': {}, 'locale': {}, 'product': {}, 'version': {} } happy_sad_filter = request.GET.get('happy', None) if happy_sad_filter: if happy_sad_filter == '1': counts['happy'] = {True: 0} elif happy_sad_filter == '0': counts['happy'] = {False: 0} if search_platform: counts['platform'] = {search_platform: 0} if search_locale: counts['locale'] = {search_locale: 0} if search_product: counts['product'] = {search_product: 0} if search_version: counts['version'] = {search_version: 0} for param, terms in facets.facet_counts().items(): for term in terms: name = term['term'] if name.upper() == 'T': name = True elif name.upper() == 'F': name = False counts[param][name] = term['count'] def empty_to_unknown(text): return _('Unknown') if text == u'' else text filter_data.extend([ counts_to_options( counts['happy'].items(), name='happy', display=_('Sentiment'), display_map={True: _('Happy'), False: _('Sad')}, value_map={True: 1, False: 0}, checked=search_happy), counts_to_options( counts['product'].items(), name='product', display=_('Product'), display_map=empty_to_unknown, checked=search_product) ]) # Only show the version if we're showing a specific # product. if search_product: filter_data.append( counts_to_options( counts['version'].items(), name='version', display=_('Version'), display_map=empty_to_unknown, checked=search_version) ) else: filter_data.append({ 'display': _('Version'), 'note': _('Select product to see version breakdown') }) filter_data.extend( [ counts_to_options( counts['platform'].items(), name='platform', display=_('Platform'), display_map=empty_to_unknown, checked=search_platform), counts_to_options( counts['locale'].items(), name='locale', display=_('Locale'), checked=search_locale, display_map=locale_name), ] ) # Histogram data happy_data = [] sad_data = [] happy_f = f & F(happy=True) sad_f = f & F(happy=False) histograms = search.facet_raw( happy={ 'date_histogram': {'interval': 'day', 'field': 'created'}, 'facet_filter': search._process_filters(happy_f.filters) }, sad={ 'date_histogram': {'interval': 'day', 'field': 'created'}, 'facet_filter': search._process_filters(sad_f.filters) }, ).facet_counts() # p['time'] is number of milliseconds since the epoch. Which is # convenient, because that is what the front end wants. happy_data = dict((p['time'], p['count']) for p in histograms['happy']) sad_data = dict((p['time'], p['count']) for p in histograms['sad']) zero_fill(search_date_start, search_date_end, [happy_data, sad_data]) histogram = [ {'label': _('Happy'), 'name': 'happy', 'data': sorted(happy_data.items())}, {'label': _('Sad'), 'name': 'sad', 'data': sorted(sad_data.items())}, ] return render(request, template, { 'opinions': opinion_page, 'opinion_count': search_count, 'filter_data': filter_data, 'histogram': histogram, 'page': page, 'prev_page': page - 1 if start > 0 else None, 'next_page': page + 1 if end < search_count else None, 'current_search': current_search, 'selected': selected, 'atom_url': generate_dashboard_url(request), }) def generate_totals_histogram(search_date_start, search_date_end, search_query, prod): # Note: Not localized because it's ultra-alpha. search_date_start = search_date_start - timedelta(days=1) search = ResponseMappingType.search() if search_query: search = search.query(description__sqs=search_query) f = F() f &= F(product=prod.db_name) f &= F(created__gte=search_date_start) f &= F(created__lt=search_date_end) happy_f = f & F(happy=True) totals_histogram = search.facet_raw( total={ 'date_histogram': {'interval': 'day', 'field': 'created'}, 'facet_filter': search._process_filters(f.filters) }, happy={ 'date_histogram': {'interval': 'day', 'field': 'created'}, 'facet_filter': search._process_filters(happy_f.filters) }, ).facet_counts() totals_data = dict((p['time'], p['count']) for p in totals_histogram['total']) zero_fill(search_date_start, search_date_end, [totals_data]) totals_data = sorted(totals_data.items()) happy_data = dict((p['time'], p['count']) for p in totals_histogram['happy']) zero_fill(search_date_start, search_date_end, [happy_data]) happy_data = sorted(happy_data.items()) up_deltas = [] down_deltas = [] for i, hap in enumerate(happy_data): if i == 0: continue yesterday = 0 today = 0 # Figure out yesterday and today as a percent to one # significant digit. if happy_data[i-1][1] and totals_data[i-1][1]: yesterday = ( int(happy_data[i-1][1] * 1.0 / totals_data[i-1][1] * 1000) / 10.0 ) if happy_data[i][1] and totals_data[i][1]: today = ( int(happy_data[i][1] * 1.0 / totals_data[i][1] * 1000) / 10.0 ) if (today - yesterday) >= 0: up_deltas.append((happy_data[i][0], today - yesterday)) else: down_deltas.append((happy_data[i][0], today - yesterday)) # Nix the first total because it's not in our date range totals_data = totals_data[1:] histogram = [ { 'name': 'zero', 'data': [(totals_data[0][0], 0), (totals_data[-1][0], 0)], 'yaxis': 2, 'lines': {'show': True, 'fill': False, 'lineWidth': 1, 'shadowSize': 0}, 'color': '#dddddd', }, { 'name': 'total', 'label': 'Total # responses', 'data': totals_data, 'yaxis': 1, 'lines': {'show': True, 'fill': False}, 'points': {'show': True}, 'color': '#3E72BF', }, { 'name': 'updeltas', 'label': 'Percent change in sentiment upwards', 'data': up_deltas, 'yaxis': 2, 'bars': {'show': True, 'lineWidth': 3}, 'points': {'show': True}, 'color': '#55E744', }, { 'name': 'downdeltas', 'label': 'Percent change in sentiment downwards', 'data': down_deltas, 'yaxis': 2, 'bars': {'show': True, 'lineWidth': 3}, 'points': {'show': True}, 'color': '#E73E3E', } ] return histogram def product_dashboard_firefox(request, prod): # Note: Not localized because it's ultra-alpha. template = 'analytics/product_dashboard_firefox.html' current_search = {} search_query = request.GET.get('q', None) if search_query: current_search['q'] = search_query search_date_end = smart_date( request.GET.get('date_end', None), fallback=None) if search_date_end is None: search_date_end = date.today() current_search['date_end'] = search_date_end.strftime('%Y-%m-%d') search_date_start = smart_date( request.GET.get('date_start', None), fallback=None) if search_date_start is None: search_date_start = search_date_end - timedelta(days=7) current_search['date_start'] = search_date_start.strftime('%Y-%m-%d') histogram = generate_totals_histogram( search_date_start, search_date_end, search_query, prod) # FIXME: This is lame, but we need to make sure the item we're # looking at is the totals. assert histogram[1]['name'] == 'total' totals_sum = sum([p[1] for p in histogram[1]['data']]) search = ResponseMappingType.search() if search_query: search = search.query(description__sqs=search_query) base_f = F() base_f &= F(product=prod.db_name) base_f &= F(created__gte=search_date_start) base_f &= F(created__lt=search_date_end) search = search.filter(base_f) # Figure out the list of platforms and versions for this range. plats_and_vers = (search .facet('platform', 'version', size=50) .facet_counts()) # Figure out the "by platform" histogram platforms = [part['term'] for part in plats_and_vers['platform']] platform_facet = {} for plat in platforms: plat_f = base_f & F(platform=plat) platform_facet[plat if plat else 'unknown'] = { 'date_histogram': {'interval': 'day', 'field': 'created'}, 'facet_filter': search._process_filters(plat_f.filters) } platform_counts = search.facet_raw(**platform_facet).facet_counts() platforms_histogram = [] for key in platform_counts.keys(): data = dict((p['time'], p['count']) for p in platform_counts[key]) sum_counts = sum([p['count'] for p in platform_counts[key]]) if sum_counts < (totals_sum * 0.02): # Skip platforms where the number of responses is less than # 2% of the total. continue zero_fill(search_date_start, search_date_end, [data]) platforms_histogram.append({ 'name': key, 'label': key, 'data': sorted(data.items()), 'lines': {'show': True, 'fill': False}, 'points': {'show': True}, }) # Figure out the "by version" histogram versions = [part['term'] for part in plats_and_vers['version']] version_facet = {} for vers in versions: vers_f = base_f & F(version=vers) version_facet['v' + vers if vers else 'unknown'] = { 'date_histogram': {'interval': 'day', 'field': 'created'}, 'facet_filter': search._process_filters(vers_f.filters) } version_counts = search.facet_raw(**version_facet).facet_counts() versions_histogram = [] for key in version_counts.keys(): data = dict((p['time'], p['count']) for p in version_counts[key]) sum_counts = sum([p['count'] for p in version_counts[key]]) if sum_counts < (totals_sum * 0.02): # Skip versions where the number of responses is less than # 2% of the total. continue zero_fill(search_date_start, search_date_end, [data]) versions_histogram.append({ 'name': key, 'label': key, 'data': sorted(data.items()), 'lines': {'show': True, 'fill': False}, 'points': {'show': True}, }) return render(request, template, { 'start_date': search_date_start, 'end_date': search_date_end, 'current_search': current_search, 'platforms_histogram': platforms_histogram, 'versions_histogram': versions_histogram, 'histogram': histogram, 'product': prod }) def product_dashboard_generic(request, prod): # Note: Not localized because it's ultra-alpha. template = 'analytics/product_dashboard.html' current_search = {} search_query = request.GET.get('q', None) if search_query: current_search['q'] = search_query search_date_end = smart_date( request.GET.get('date_end', None), fallback=None) if search_date_end is None: search_date_end = date.today() current_search['date_end'] = search_date_end.strftime('%Y-%m-%d') search_date_start = smart_date( request.GET.get('date_start', None), fallback=None) if search_date_start is None: search_date_start = search_date_end - timedelta(days=7) current_search['date_start'] = search_date_start.strftime('%Y-%m-%d') histogram = generate_totals_histogram( search_date_start, search_date_end, search_query, prod) return render(request, template, { 'start_date': search_date_start, 'end_date': search_date_end, 'current_search': current_search, 'histogram': histogram, 'product': prod }) PRODUCT_TO_DASHBOARD = { 'firefox': product_dashboard_firefox } @check_new_user @es_required_or_50x(error_template='analytics/es_down.html') def product_dashboard_router(request, productslug): prod = get_object_or_404(Product, slug=productslug) # FIXME - Some products should never have public dashboards. This # should handle that. fun = PRODUCT_TO_DASHBOARD.get(productslug, product_dashboard_generic) return fun(request, prod)
{ "content_hash": "74f52b9730e54f3a2d84b633834270fd", "timestamp": "", "source": "github", "line_count": 726, "max_line_length": 78, "avg_line_length": 33.268595041322314, "alnum_prop": 0.584192439862543, "repo_name": "DESHRAJ/fjord", "id": "68753b2210aec6809b4ba49babd2515b7566d52d", "size": "24275", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "fjord/analytics/views.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "168457" }, { "name": "JavaScript", "bytes": "299449" }, { "name": "Makefile", "bytes": "4594" }, { "name": "Python", "bytes": "709245" }, { "name": "Shell", "bytes": "13991" } ], "symlink_target": "" }
""" ===================================================================== Decision boundary of label propagation versus SVM on the Iris dataset ===================================================================== Comparison for decision boundary generated on iris dataset between Label Propagation and SVM. This demonstrates Label Propagation learning a good boundary even with a small amount of labeled data. """ print __doc__ # Authors: Clay Woolam <clay@woolam.org> # Licence: BSD import numpy as np import pylab as pl from sklearn import datasets from sklearn import svm from sklearn.semi_supervised import label_propagation rng = np.random.RandomState(0) iris = datasets.load_iris() X = iris.data[:, :2] y = iris.target # step size in the mesh h = .02 y_30 = np.copy(y) y_30[rng.rand(len(y)) < 0.3] = -1 y_50 = np.copy(y) y_50[rng.rand(len(y)) < 0.5] = -1 # we create an instance of SVM and fit out data. We do not scale our # data since we want to plot the support vectors ls30 = (label_propagation.LabelSpreading().fit(X, y_30), y_30) ls50 = (label_propagation.LabelSpreading().fit(X, y_50), y_50) ls100 = (label_propagation.LabelSpreading().fit(X, y), y) rbf_svc = (svm.SVC(kernel='rbf').fit(X, y), y) # create a mesh to plot in x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # title for the plots titles = ['Label Spreading 30% data', 'Label Spreading 50% data', 'Label Spreading 100% data', 'SVC with rbf kernel'] color_map = {-1: (1, 1, 1), 0: (0, 0, .9), 1: (1, 0, 0), 2: (.8, .6, 0)} pl.set_cmap(pl.cm.Paired) for i, (clf, y_train) in enumerate((ls30, ls50, ls100, rbf_svc)): # Plot the decision boundary. For that, we will asign a color to each # point in the mesh [x_min, m_max]x[y_min, y_max]. pl.subplot(2, 2, i + 1) Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) pl.set_cmap(pl.cm.Paired) pl.contourf(xx, yy, Z) pl.axis('off') # Plot also the training points colors = [color_map[y] for y in y_train] pl.scatter(X[:, 0], X[:, 1], c=colors) pl.title(titles[i]) pl.text(.90, 0, "Unlabeled points are colored white") pl.show()
{ "content_hash": "7cd8d82a7e35675f445ea3806c25cf5c", "timestamp": "", "source": "github", "line_count": 82, "max_line_length": 73, "avg_line_length": 28.9390243902439, "alnum_prop": 0.594605983986515, "repo_name": "cdegroc/scikit-learn", "id": "d0d2e14ed342929c79b6492368813e0dfd057a77", "size": "2373", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "examples/semi_supervised/plot_label_propagation_versus_svm_iris.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "C", "bytes": "6543221" }, { "name": "C++", "bytes": "245669" }, { "name": "Python", "bytes": "2667615" }, { "name": "Shell", "bytes": "3770" } ], "symlink_target": "" }
from ampy import * from async import *
{ "content_hash": "27b34678ea12f2f7cd2c740322828d6b", "timestamp": "", "source": "github", "line_count": 2, "max_line_length": 19, "avg_line_length": 19.5, "alnum_prop": 0.7435897435897436, "repo_name": "HuFlungDu/huflungdu-ampy", "id": "b2dc92d1b6375c7c832f1b3e91979a368e3ef09b", "size": "210", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "ampy/__init__.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "125238" } ], "symlink_target": "" }
from .nominal_behavior import NominalBehavior from .store_flight_data import StoreFlightData
{ "content_hash": "5b7044c094531544d4935232fce8f1d7", "timestamp": "", "source": "github", "line_count": 2, "max_line_length": 46, "avg_line_length": 46.5, "alnum_prop": 0.8602150537634409, "repo_name": "interuss/dss", "id": "3cef39d35387bc281d6772fd1b0939dd54c0cb97", "size": "93", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "monitoring/uss_qualifier/scenarios/astm/netrid/__init__.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "518" }, { "name": "Dockerfile", "bytes": "6594" }, { "name": "Go", "bytes": "583387" }, { "name": "HTML", "bytes": "20494" }, { "name": "Jsonnet", "bytes": "601530" }, { "name": "Makefile", "bytes": "10609" }, { "name": "PLpgSQL", "bytes": "4759" }, { "name": "Python", "bytes": "948652" }, { "name": "Shell", "bytes": "76140" } ], "symlink_target": "" }
from datetime import timedelta import logging # DJANGO from django.conf import settings from django.utils.timezone import now # CELERY from celery import task # UTILS from utils.slices import get_index_slice # FBDATA from fbdata.participant import get_participants from fbdata.utils import padded_date_range # ADDATA from .activity import process_all_hourly_activity from .processing import ( get_earliest_raw_data, get_raw_data_slice, get_tldextract, process_raw_data, process_raw_data_set ) from .media import ( download_adrecord, download_fbadimage ) from .models import ( AdRecord, FBAdImage, RawData ) _RD_SLICE = 200 _AR_SLICE = 100 _FBAD_SLICE = 100 @task.task(ignore_result=False, name='addata.tasks.async_process_raw_data') def async_process_raw_data(): logger = logging.getLogger('vavs.tasks.analytics') earliest = get_earliest_raw_data() if not earliest: return start = earliest.pk end = start + _RD_SLICE start_time = now() tldextractor = get_tldextract() rdset = get_raw_data_slice(start, end) ok, error, unproc = process_raw_data_set(rdset, tldextractor=tldextractor) end_time = now() lines = ['async_process_raw_data: %s to %s' % (start, end)] lines.append('time: %s duration: %s' % (start_time, end_time-start_time)) lines.append('processed OK: %d ERRORS: %d' % (len(ok), len(error))) lines.append('unprocessed: %d' % len(unproc)) message = '\n'.join(lines) logger.info(message) @task.task(ignore_result=False, name='addata.tasks.async_purge_raw_data') def async_purge_raw_data(): logger = logging.getLogger('vavs.tasks.analytics') cutoff = now() - timedelta(days=2) RawData.objects.filter(created__lte=cutoff, status=RawData.STATUS_DONE).delete() logger.info('async_purge_raw_data') @task(ignore_result=True, name='addata.tasks.aysnc_download_adrecord_media') def aysnc_download_adrecord_media(): logger = logging.getLogger('vavs.tasks.media') try: earliest = AdRecord.objects.filter( status=FBAdImage.STATUS_NEW).earliest('id') except AdRecord.DoesNotExist: return index = earliest.id adrecords = AdRecord.objects.filter(id__range=(index, index+_AR_SLICE), status=AdRecord.STATUS_NEW) lines = ['aysnc_download_adrecord_media: %d %d, %s' % (index, adrecords.count(), now())] for adrecord in adrecords: download_adrecord(adrecord) lines.append("AdRecord %d %s %s" % (adrecord.id, adrecord.status_str(), adrecord.content_type)) message = '\n'.join(lines) logger.info(message) @task(ignore_result=True, name='addata.tasks.aysnc_download_fbadimage_media') def aysnc_download_fbadimage_media(): logger = logging.getLogger('vavs.tasks.media') try: earliest = FBAdImage.objects.filter( status=FBAdImage.STATUS_NEW).earliest('id') except FBAdImage.DoesNotExist: return index = earliest.id fbadimages = FBAdImage.objects.filter(id__range=(index, index+_FBAD_SLICE), status=FBAdImage.STATUS_NEW) lines = ['aysnc_download_fbadimage_media: %d %d, %s' % (index, fbadimages.count(), now())] for fbadimage in fbadimages: if len(fbadimage.url) > 1: download_fbadimage(fbadimage) lines.append("FBAdImage %d %s" % ( fbadimage.id, fbadimage.status_str())) else: lines.append("FBAdImage %d no URL, deleted" % fbadimage.id) fbadimage.delete() message = '\n'.join(lines) logger.info(message) @task.task(ignore_result=False, name='addata.tasks.aysnc_update_hourly_activity') def aysnc_update_hourly_activity(): logger = logging.getLogger('vavs.tasks.analytics') end = now() start = end - timedelta(days=1) lines = ['aysnc_update_hourly_activity: %s' % end] lines.append('Date: %s (%s - %s)' % (end.date(), start, end)) participants = get_participants() for user in participants: activities = process_all_hourly_activity(user, start, end) lines.append('%s activities' % user.username) lines.append('\tads:\t%d' % len(activities['ad_activities'])) lines.append('\tfbsp:\t%d' % len(activities['fbsp_activities'])) lines.append('\tfbad:\t%d' % len(activities['fbad_activities'])) message = '\n'.join(lines) logger.info(message)
{ "content_hash": "880efbacbc8ddd70e54acd2cba81543f", "timestamp": "", "source": "github", "line_count": 129, "max_line_length": 80, "avg_line_length": 36.4031007751938, "alnum_prop": 0.6213798977853492, "repo_name": "valuesandvalue/valuesandvalue", "id": "e3a75e7e03ab3f7cfed4e07d79bbce0e1b07ff68", "size": "4721", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "vavs_project/addata/tasks.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "21480" }, { "name": "JavaScript", "bytes": "80469" }, { "name": "Python", "bytes": "315797" } ], "symlink_target": "" }
from distutils.core import setup setup( name = 'csv2kmz', packages = ['csv2kmz'], version = '0.1', description = 'Converts a parsed csv file to a kmz Google Earth overlay.', author = 'aguinane', author_email = 'alexguinane@gmail.com', url = 'https://github.com/aguinane/csv2kmz', keywords = ['kml', 'kmz'], classifiers = [], license = 'MIT', )
{ "content_hash": "9adfe42937c70782cfb22d21d5e4033b", "timestamp": "", "source": "github", "line_count": 13, "max_line_length": 78, "avg_line_length": 30.53846153846154, "alnum_prop": 0.5944584382871536, "repo_name": "aguinane/csvtokmz", "id": "6f1ffff5eaee3b86f2b982d8ceb7b3d6e01d27cf", "size": "397", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "setup.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "8212" } ], "symlink_target": "" }
""" =========================================== Tractography Clustering - Available Metrics =========================================== This page lists available metrics that can be used by the tractography clustering framework. For every metric a brief description is provided explaining: what it does, when it's useful and how to use it. If you are not familiar with the tractography clustering framework, check this tutorial :ref:`clustering-framework`. .. contents:: Available Metrics :local: :depth: 1 **Note**: All examples assume a function `get_streamlines` exists. We defined here a simple function to do so. It imports the necessary modules and load a small streamline bundle. """ def get_streamlines(): from nibabel import trackvis as tv from dipy.data import get_data fname = get_data('fornix') streams, hdr = tv.read(fname) streamlines = [i[0] for i in streams] return streamlines """ .. _clustering-examples-AveragePointwiseEuclideanMetric: Average of Pointwise Euclidean Metric ===================================== **What:** Instances of `AveragePointwiseEuclideanMetric` first compute the pointwise Euclidean distance between two sequences *of same length* then return the average of those distances. This metric takes as inputs two features that are sequences containing the same number of elements. **When:** By default the `QuickBundles` clustering will resample your streamlines on-the-fly so they have 12 points. If for some reason you want to avoid this and you made sure all your streamlines have already the same number of points, you can manually provide an instance of `AveragePointwiseEuclideanMetric` to `QuickBundles`. Since the default `Feature` is the `IdentityFeature` the streamlines won't be resampled thus saving some computational time. **Note:** Inputs must be sequences of same length. """ from dipy.viz import fvtk from dipy.segment.clustering import QuickBundles from dipy.segment.metric import AveragePointwiseEuclideanMetric # Get some streamlines. streamlines = get_streamlines() # Previously defined. # Make sure our streamlines have the same number of points. from dipy.tracking.streamline import set_number_of_points streamlines = set_number_of_points(streamlines, nb_points=12) # Create the instance of `AveragePointwiseEuclideanMetric` to use. metric = AveragePointwiseEuclideanMetric() qb = QuickBundles(threshold=10., metric=metric) clusters = qb.cluster(streamlines) print("Nb. clusters:", len(clusters)) print("Cluster sizes:", map(len, clusters)) """ :: Nb. clusters: 4 Cluster sizes: [64, 191, 44, 1] .. _clustering-examples-SumPointwiseEuclideanMetric: Sum of Pointwise Euclidean Metric ================================= **What:** Instances of `SumPointwiseEuclideanMetric` first compute the pointwise Euclidean distance between two sequences *of same length* then return the sum of those distances. **When:** This metric mainly exists because it is used internally by `AveragePointwiseEuclideanMetric`. **Note:** Inputs must be sequences of same length. """ from dipy.segment.clustering import QuickBundles from dipy.segment.metric import SumPointwiseEuclideanMetric # Get some streamlines. streamlines = get_streamlines() # Previously defined. # Make sure our streamlines have the same number of points. from dipy.tracking.streamline import set_number_of_points nb_points = 12 streamlines = set_number_of_points(streamlines, nb_points=nb_points) # Create the instance of `SumPointwiseEuclideanMetric` to use. metric = SumPointwiseEuclideanMetric() qb = QuickBundles(threshold=10.*nb_points, metric=metric) clusters = qb.cluster(streamlines) print("Nb. clusters:", len(clusters)) print("Cluster sizes:", map(len, clusters)) """ :: Nb. clusters: 4 Cluster sizes: [64, 191, 44, 1] .. _clustering-examples-MinimumAverageDirectFlipMetric: Minimum Average Direct Flip Metric (MDF) ======================================== **What:** It is the metric used in the QuickBundles algorithm [Garyfallidis12]_. Instances of `MinimumAverageDirectFlipMetric` first compute the direct distance *d1* by taking the average of the pointwise Euclidean distances between two sequences *of same length*. Reverse one of the two sequences and compute the flip distance *d2* using the same approach as for *d1*. Then, return the minimum between *d1* and *d2*. **When:** This metric mainly exists because it is used internally by `AveragePointwiseEuclideanMetric`. **Note:** Inputs must be sequences of same length. """ from dipy.segment.metric import MinimumAverageDirectFlipMetric # Get some streamlines. streamlines = get_streamlines() # Previously defined. # Make sure our streamlines have the same number of points. from dipy.tracking.streamline import set_number_of_points streamlines = set_number_of_points(streamlines, nb_points=20) # Create the instance of `MinimumAverageDirectFlipMetric` to use. metric = MinimumAverageDirectFlipMetric() d = metric.dist(streamlines[0], streamlines[1]) print("MDF distance between the first two streamlines: ", d) """ :: MDF distance between the first two streamlines: 11.681308709622542 .. _clustering-examples-MinimumAverageDirectFlipMetric: Cosine Metric ============= **What:** Instances of `CosineMetric` compute the cosine distance between two vectors (for more information see the `wiki page <https://en.wikipedia.org/wiki/Cosine_similarity>`_). **When:** This metric can be useful when you *only* need information about the orientation of a streamline. **Note:** Inputs must be vectors (i.e. 1D array). """ import numpy as np from dipy.viz import fvtk from dipy.segment.clustering import QuickBundles from dipy.segment.metric import VectorOfEndpointsFeature from dipy.segment.metric import CosineMetric # Get some streamlines. streamlines = get_streamlines() # Previously defined. feature = VectorOfEndpointsFeature() metric = CosineMetric(feature) qb = QuickBundles(threshold=0.1, metric=metric) clusters = qb.cluster(streamlines) # Color each streamline according to the cluster they belong to. colormap = fvtk.create_colormap(np.arange(len(clusters))) colormap_full = np.ones((len(streamlines), 3)) for cluster, color in zip(clusters, colormap): colormap_full[cluster.indices] = color # Visualization ren = fvtk.ren() fvtk.clear(ren) ren.SetBackground(0, 0, 0) fvtk.add(ren, fvtk.streamtube(streamlines, colormap_full)) fvtk.record(ren, n_frames=1, out_path='cosine_metric.png', size=(600, 600)) """ .. figure:: cosine_metric.png :align: center **Showing the streamlines colored according to their orientation**. .. include:: ../links_names.inc .. [Garyfallidis12] Garyfallidis E. et al., QuickBundles a method for tractography simplification, Frontiers in Neuroscience, vol 6, no 175, 2012. """
{ "content_hash": "62e987e4d3c5b954c735c4c94de559ff", "timestamp": "", "source": "github", "line_count": 211, "max_line_length": 80, "avg_line_length": 32.44075829383886, "alnum_prop": 0.7430241051862674, "repo_name": "matthieudumont/dipy", "id": "f4f01c45fa2235d8958939d9fc1a9d5806ce1bf8", "size": "6845", "binary": false, "copies": "9", "ref": "refs/heads/master", "path": "doc/examples/segment_clustering_metrics.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "C", "bytes": "2844" }, { "name": "Makefile", "bytes": "3639" }, { "name": "Python", "bytes": "2944439" } ], "symlink_target": "" }
""" Tests for services relating to emails.""" from __future__ import absolute_import # pylint: disable=import-only-modules from __future__ import unicode_literals # pylint: disable=import-only-modules from constants import constants from core.domain import email_services from core.platform import models from core.tests import test_utils import feconf import python_utils (email_models,) = models.Registry.import_models([models.NAMES.email]) platform_email_services = models.Registry.import_email_services() class EmailServicesTest(test_utils.EmailTestBase): """Tests for email_services functions.""" def test_reply_info_email_objects_are_created_and_queried_correctly(self): model = email_models.GeneralFeedbackEmailReplyToIdModel.create( 'user1', 'exploration.exp1.1') reply_to_id = model.reply_to_id queried_object = ( email_services.get_feedback_thread_reply_info_by_reply_to_id( reply_to_id)) self.assertEqual(queried_object.reply_to_id, reply_to_id) self.assertEqual(queried_object.id, 'user1.exploration.exp1.1') queried_object = ( email_services.get_feedback_thread_reply_info_by_reply_to_id( 'unknown.reply.to.id')) self.assertEqual(queried_object, None) queried_object = ( email_services .get_feedback_thread_reply_info_by_user_and_thread_ids( 'user1', 'exploration.exp1.1')) self.assertEqual(queried_object.reply_to_id, reply_to_id) self.assertEqual(queried_object.id, 'user1.exploration.exp1.1') queried_object = ( email_services .get_feedback_thread_reply_info_by_user_and_thread_ids( 'user_unknown', 'invalid_thread_id')) self.assertEqual(queried_object, None) def test_send_mail_raises_exception_for_invalid_permissions(self): """Tests the send_mail exception raised for invalid user permissions.""" send_email_exception = ( self.assertRaisesRegexp( Exception, 'This app cannot send emails to users.')) with send_email_exception, self.swap(constants, 'DEV_MODE', False): email_services.send_mail( feconf.SYSTEM_EMAIL_ADDRESS, feconf.ADMIN_EMAIL_ADDRESS, 'subject', 'body', 'html', bcc_admin=False) def test_send_mail_data_properly_sent(self): """Verifies that the data sent in send_mail is correct.""" allow_emailing = self.swap(feconf, 'CAN_SEND_EMAILS', True) with allow_emailing: email_services.send_mail( feconf.SYSTEM_EMAIL_ADDRESS, feconf.ADMIN_EMAIL_ADDRESS, 'subject', 'body', 'html', bcc_admin=False) messages = self._get_sent_email_messages( feconf.ADMIN_EMAIL_ADDRESS) self.assertEqual(len(messages), 1) self.assertEqual(messages[0].subject, 'subject') self.assertEqual(messages[0].body, 'body') self.assertEqual(messages[0].html, 'html') def test_bcc_admin_flag(self): """Verifies that the bcc admin flag is working properly in send_mail. """ allow_emailing = self.swap(feconf, 'CAN_SEND_EMAILS', True) with allow_emailing: email_services.send_mail( feconf.SYSTEM_EMAIL_ADDRESS, feconf.ADMIN_EMAIL_ADDRESS, 'subject', 'body', 'html', bcc_admin=True) messages = self._get_sent_email_messages( feconf.ADMIN_EMAIL_ADDRESS) self.assertEqual(len(messages), 1) self.assertEqual(messages[0].bcc, feconf.ADMIN_EMAIL_ADDRESS) def test_reply_to_id_flag(self): """Verifies that the reply_to_id flag is working properly.""" allow_emailing = self.swap(feconf, 'CAN_SEND_EMAILS', True) reply_id = 123 with allow_emailing: email_services.send_mail( feconf.SYSTEM_EMAIL_ADDRESS, feconf.ADMIN_EMAIL_ADDRESS, 'subject', 'body', 'html', bcc_admin=False, reply_to_id=reply_id) messages = self._get_sent_email_messages( feconf.ADMIN_EMAIL_ADDRESS) self.assertEqual(len(messages), 1) self.assertEqual( messages[0].reply_to, 'reply+' + python_utils.UNICODE(reply_id) + '@' + feconf.INCOMING_EMAILS_DOMAIN_NAME) def test_send_bulk_mail_exception_for_invalid_permissions(self): """Tests the send_bulk_mail exception raised for invalid user permissions. """ send_email_exception = ( self.assertRaisesRegexp( Exception, 'This app cannot send emails to users.')) with send_email_exception, ( self.swap(constants, 'DEV_MODE', False)): email_services.send_bulk_mail( feconf.SYSTEM_EMAIL_ADDRESS, [feconf.ADMIN_EMAIL_ADDRESS], 'subject', 'body', 'html') def test_send_bulk_mail_data_properly_sent(self): """Verifies that the data sent in send_bulk_mail is correct for each user in the recipient list. """ allow_emailing = self.swap(feconf, 'CAN_SEND_EMAILS', True) recipients = [feconf.ADMIN_EMAIL_ADDRESS] with allow_emailing: email_services.send_bulk_mail( feconf.SYSTEM_EMAIL_ADDRESS, recipients, 'subject', 'body', 'html') messages = self._get_sent_email_messages( feconf.ADMIN_EMAIL_ADDRESS) self.assertEqual(len(messages), 1) self.assertEqual(messages[0].to, recipients) def test_email_not_sent_if_email_addresses_are_malformed(self): """Tests that email is not sent if recipient email address is malformed. """ # Case when malformed_recipient_email is None when calling send_mail. malformed_recipient_email = None email_exception = self.assertRaisesRegexp( ValueError, 'Malformed recipient email address: %s' % malformed_recipient_email) with self.swap(feconf, 'CAN_SEND_EMAILS', True), email_exception: email_services.send_mail( 'sender@example.com', malformed_recipient_email, 'subject', 'body', 'html') # Case when malformed_recipient_email is an empty string when # calling send_mail. malformed_recipient_email = '' email_exception = self.assertRaisesRegexp( ValueError, 'Malformed recipient email address: %s' % malformed_recipient_email) with self.swap(feconf, 'CAN_SEND_EMAILS', True), email_exception: email_services.send_mail( 'sender@example.com', malformed_recipient_email, 'subject', 'body', 'html') # Case when sender is malformed for send_mail. malformed_sender_email = 'x@x@x' email_exception = self.assertRaisesRegexp( ValueError, 'Malformed sender email address: %s' % malformed_sender_email) with self.swap(feconf, 'CAN_SEND_EMAILS', True), email_exception: email_services.send_mail( malformed_sender_email, 'recipient@example.com', 'subject', 'body', 'html') # Case when the SENDER_EMAIL in brackets of 'SENDER NAME <SENDER_EMAIL> # is malformed when calling send_mail. malformed_sender_email = 'Name <malformed_email>' email_exception = self.assertRaisesRegexp( ValueError, 'Malformed sender email address: %s' % malformed_sender_email) with self.swap(feconf, 'CAN_SEND_EMAILS', True), email_exception: email_services.send_mail( malformed_sender_email, 'recipient@example.com', 'subject', 'body', 'html') # Case when sender is malformed when calling send_bulk_mail. malformed_sender_email = 'name email@email.com' email_exception = self.assertRaisesRegexp( ValueError, 'Malformed sender email address: %s' % malformed_sender_email) with self.swap(feconf, 'CAN_SEND_EMAILS', True), email_exception: email_services.send_bulk_mail( malformed_sender_email, ['recipient@example.com'], 'subject', 'body', 'html') # Case when sender is malformed when calling send_bulk_mail. malformed_recipient_emails = ['a@a.com', 'email.com'] email_exception = self.assertRaisesRegexp( ValueError, 'Malformed recipient email address: %s' % malformed_recipient_emails[1]) with self.swap(feconf, 'CAN_SEND_EMAILS', True), email_exception: email_services.send_bulk_mail( 'sender@example.com', malformed_recipient_emails, 'subject', 'body', 'html') def test_unsuccessful_status_codes_raises_exception(self): """Test that unsuccessful status codes returned raises an exception.""" email_exception = self.assertRaisesRegexp( Exception, 'Bulk email failed to send. Please try again later or' + ' contact us to report a bug at https://www.oppia.org/contact.') allow_emailing = self.swap(feconf, 'CAN_SEND_EMAILS', True) swap_send_email_to_recipients = self.swap( platform_email_services, 'send_email_to_recipients', lambda *_: False) recipients = [feconf.ADMIN_EMAIL_ADDRESS] with allow_emailing, email_exception, swap_send_email_to_recipients: email_services.send_bulk_mail( feconf.SYSTEM_EMAIL_ADDRESS, recipients, 'subject', 'body', 'html') email_exception = self.assertRaisesRegexp( Exception, ( 'Email to %s failed to send. Please try again later or ' + 'contact us to report a bug at ' + 'https://www.oppia.org/contact.') % feconf.ADMIN_EMAIL_ADDRESS) allow_emailing = self.swap(feconf, 'CAN_SEND_EMAILS', True) swap_send_email_to_recipients = self.swap( platform_email_services, 'send_email_to_recipients', lambda *_: False) with allow_emailing, email_exception, swap_send_email_to_recipients: email_services.send_mail( feconf.SYSTEM_EMAIL_ADDRESS, feconf.ADMIN_EMAIL_ADDRESS, 'subject', 'body', 'html', bcc_admin=True)
{ "content_hash": "22e34c3119963bfdb776d3cd5e81c1c4", "timestamp": "", "source": "github", "line_count": 235, "max_line_length": 80, "avg_line_length": 44.782978723404256, "alnum_prop": 0.612599771949829, "repo_name": "prasanna08/oppia", "id": "cbc7eb3e16108b5050061955b9187f16606be996", "size": "11129", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "core/domain/email_services_test.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "97795" }, { "name": "HTML", "bytes": "1128491" }, { "name": "JavaScript", "bytes": "733121" }, { "name": "Python", "bytes": "9362251" }, { "name": "Shell", "bytes": "10639" }, { "name": "TypeScript", "bytes": "6077851" } ], "symlink_target": "" }
""" This module contains the :class:`Aggregation` class and its various subclasses. Each of these classes processes a column's data and returns a single value. For instance, :class:`Mean`, when applied to a :class:`.NumberColumn`, returns a single :class:`decimal.Decimal` value which is the average of all values in that column. Aggregations are applied to instances of :class:`.Column` using the :meth:`.Column.aggregate` method. Typically, the column is first retrieved using the :attr:`.Table.columns` attribute. Aggregations can also be applied to instances of :class:`.TableSet` using the :meth:`.Tableset.aggregate` method, in which case the result will be a new :class:`.Table` with a column for each aggregation and a row for each table in the set. """ from collections import defaultdict import datetime from agate.column_types import BooleanType, NumberType from agate.columns import BooleanColumn, DateColumn, DateTimeColumn, NumberColumn, TextColumn from agate.exceptions import NullCalculationError, UnsupportedAggregationError class Aggregation(object): #pragma: no cover """ Base class defining an operation that can be performed on a column either to yield an individual value or as part of a :class:`.TableSet` aggregate. """ def get_aggregate_column_type(self, column): """ Get the correct column type for an new column based on this aggregation. """ raise NotImplementedError() def run(self, column): """ Execute this aggregation on a given column and return the result. """ raise NotImplementedError() class HasNulls(Aggregation): """ Returns :code:`True` if the column contains null values. """ def get_aggregate_column_type(self, column): return BooleanType() def run(self, column): """ :returns: :class:`bool` """ return column.has_nulls() class Any(Aggregation): """ Returns :code:`True` if any value in a column passes a truth test. The truth test may be omitted when testing a :class:`.BooleanColumn`. :param test: A function that takes a value and returns :code:`True` or :code:`False`. """ def __init__(self, test=None): self._test = test def get_aggregate_column_type(self, column): return BooleanType() def run(self, column): """ :returns: :class:`bool` """ data = column.get_data() if isinstance(column, BooleanColumn): return any(data) elif not self._test: raise ValueError('You must supply a test function for non-BooleanColumn.') return any(self._test(d) for d in data) class All(Aggregation): """ Returns :code:`True` if all values in a column pass a truth test. The truth test may be omitted when testing a :class:`.BooleanColumn`. :param test: A function that takes a value and returns :code:`True` or :code:`False`. """ def __init__(self, test=None): self._test = test def get_aggregate_column_type(self, column): return BooleanType() def run(self, column): """ :returns: :class:`bool` """ data = column.get_data() if isinstance(column, BooleanColumn): return all(data) elif not self._test: raise ValueError('You must supply a test function for non-BooleanColumn.') return all(self._test(d) for d in data) class Length(Aggregation): """ Count the total number of values in the column. Equivalent to Python's :func:`len` function. """ def get_aggregate_column_type(self, column): return NumberType() def run(self, column): """ :returns: :class:`int` """ return len(column) class Count(Aggregation): """ Count the number of times a specific value occurs in a column. If you want to count the total number of values in a column use :class:`Length`. :param value: The value to be counted. """ def __init__(self, value): self._value = value def get_aggregate_column_type(self, column): return NumberType() def run(self, column): """ :returns: :class:`int` """ return column.get_data().count(self._value) class Min(Aggregation): """ Compute the minimum value in a column. May be applied to :class:`.DateColumn`, :class:`.DateTimeColumn` and :class:`.NumberColumn`. """ def get_aggregate_column_type(self, column): if isinstance(column, DateColumn): return DateType() elif isinstance(column, DateTimeColumn): return DateTimeType() elif isinstance(column, NumberColumn): return NumberType() raise UnsupportedAggregationError(self, column) def run(self, column): """ :returns: :class:`datetime.date` """ supported_columns = (DateColumn, DateTimeColumn, NumberColumn) if not any(isinstance(column, t) for t in supported_columns): raise UnsupportedAggregationError(self, column) return min(column.get_data_without_nulls()) class Max(Aggregation): """ Compute the maximum value in a column. May be applied to :class:`.DateColumn`, :class:`.DateTimeColumn` and :class:`.NumberColumn`. """ def get_aggregate_column_type(self, column): if isinstance(column, DateColumn): return DateType() elif isinstance(column, DateTimeColumn): return DateTimeType() elif isinstance(column, NumberColumn): return NumberType() def run(self, column): """ :returns: :class:`datetime.date` """ supported_columns = (DateColumn, DateTimeColumn, NumberColumn) if not any(isinstance(column, t) for t in supported_columns): raise UnsupportedAggregationError(self, column) return max(column.get_data_without_nulls()) class Sum(Aggregation): """ Compute the sum of a column. """ def get_aggregate_column_type(self, column): return NumberType() def run(self, column): """ :returns: :class:`decimal.Decimal`. """ if not isinstance(column, NumberColumn): raise UnsupportedAggregationError(self, column) return column.sum() class Mean(Aggregation): """ Compute the mean value of a column. """ def get_aggregate_column_type(self, column): return NumberType() def run(self, column): """ :returns: :class:`decimal.Decimal`. """ if not isinstance(column, NumberColumn): raise UnsupportedAggregationError(self, column) if column.has_nulls(): raise NullCalculationError return column.mean() class Median(Aggregation): """ Compute the median value of a column. This is the 50th percentile. See :class:`Percentiles` for implementation details. """ def get_aggregate_column_type(self, column): return NumberType() def run(self, column): """ :returns: :class:`decimal.Decimal`. """ if not isinstance(column, NumberColumn): raise UnsupportedAggregationError(self, column) if column.has_nulls(): raise NullCalculationError return column.median() class Mode(Aggregation): """ Compute the mode value of a column. """ def get_aggregate_column_type(self, column): return NumberType() def run(self, column): """ :returns: :class:`decimal.Decimal`. """ if not isinstance(column, NumberColumn): raise UnsupportedAggregationError(self, column) if column.has_nulls(): raise NullCalculationError data = column.get_data() state = defaultdict(int) for n in data: state[n] += 1 return max(state.keys(), key=lambda x: state[x]) class IQR(Aggregation): """ Compute the inter-quartile range of a column. """ def get_aggregate_column_type(self, column): return NumberType() def run(self, column): """ :returns: :class:`decimal.Decimal`. """ if not isinstance(column, NumberColumn): raise UnsupportedAggregationError(self, column) if column.has_nulls(): raise NullCalculationError percentiles = column.percentiles() return percentiles[75] - percentiles[25] class Variance(Aggregation): """ Compute the variance of a column. """ def get_aggregate_column_type(self, column): return NumberType() def run(self, column): """ :returns: :class:`decimal.Decimal`. """ if not isinstance(column, NumberColumn): raise UnsupportedAggregationError(self, column) if column.has_nulls(): raise NullCalculationError return column.variance() class StDev(Aggregation): """ Compute the standard of deviation of a column. """ def get_aggregate_column_type(self, column): return NumberType() def run(self, column): """ :returns: :class:`decimal.Decimal`. """ if not isinstance(column, NumberColumn): raise UnsupportedAggregationError(self, column) if column.has_nulls(): raise NullCalculationError return column.variance().sqrt() class MAD(Aggregation): """ Compute the `median absolute deviation <http://en.wikipedia.org/wiki/Median_absolute_deviation>`_ of a column. """ def get_aggregate_column_type(self, column): return NumberType() def _median(self, data_sorted): length = len(data_sorted) if length % 2 == 1: return data_sorted[((length + 1) // 2) - 1] half = length // 2 a = data_sorted[half - 1] b = data_sorted[half] return (a + b) / 2 def run(self, column): """ :returns: :class:`decimal.Decimal`. """ if not isinstance(column, NumberColumn): raise UnsupportedAggregationError(self, column) if column.has_nulls(): raise NullCalculationError data = column.get_data_sorted() m = column.percentiles()[50] return self._median(tuple(abs(n - m) for n in data)) class MaxLength(Aggregation): """ Calculates the longest string in a column. """ def get_aggregate_column_type(self, column): return NumberType() def run(self, column): """ :returns: :class:`int`. """ if not isinstance(column, TextColumn): raise UnsupportedAggregationError(self, column) return max([len(d) for d in column.get_data_without_nulls()])
{ "content_hash": "e95b8ca37aa947210939fcded320ef72", "timestamp": "", "source": "github", "line_count": 384, "max_line_length": 101, "avg_line_length": 28.380208333333332, "alnum_prop": 0.6165351440631308, "repo_name": "captainsafia/agate", "id": "0b5d0a9c7f739494693ecd03726c69d99d3db7f5", "size": "10921", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "agate/aggregations.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "148944" } ], "symlink_target": "" }
''' Some functions to show tree graphs. Preconditions ------------- The folowing libraries should be installed `matplotlib, networkx, graphviz, pygraphviz` Please use conda or pip. Usage ----- ``` from showtree import show_binary_tree, show_tree_graph ``` ''' import matplotlib.pyplot as plt import matplotlib import networkx as nx from networkx.drawing.nx_agraph import write_dot, graphviz_layout def uid_gen(): '''node id generator''' n = 0 while True: n += 1 yield n uid = uid_gen() def show_tree_graph(G): """ Shows a tree graph. Parameters ---------- G : NetworkX tree graph A tree graph created with NetworkX Examples -------- >>> gg = nx.balanced_tree(3, 2) >>> show_tree_graph(gg) """ plt.rcParams["figure.figsize"] = [10., 7.] pos = graphviz_layout(G, prog='dot') # null_nodes = [x for x in G.nodes if G.node[x]['label'] == ''] not_null_nodes = [ x for x in G.nodes if G.node[x].get('label', str(x)) != ''] # null_edges = [e for e in G.edges if G.node[e[1]]['label'] == ''] # not_null_edges = [e for e in G.edges if G.node[e[1]]['label'] != ''] node_lbls = nx.get_node_attributes(G, 'label') edge_lbls = nx.get_edge_attributes(G, 'label') nx.draw(G, pos, with_labels=True, nodelist=not_null_nodes if len(not_null_nodes) > 0 else None, # edgelist=not_null_edges, labels=node_lbls if len(node_lbls) > 0 else None, width=1.0, linewidths=0.0, node_size=700, node_color="#1485CC", edge_color="#cccccc", font_size=12, label="BST", alpha=1.0 ) nx.draw_networkx_edge_labels(G, pos, font_size=8, edge_labels=edge_lbls ) # nx.draw_networkx_nodes(G, pos, node_size=700, alpha=1.0, # node_color="white", nodelist=null_nodes) # nx.draw_networkx_edges(G, pos, alpha=0.9, width=6, edge_color="orange", edgelist=[(1, 'Petya')]) # plt.figure(1) plt.show() def build_binary_tree_graph(nx_graph, parent_node_id, tree_node, label_attr='data', edge_label=None): if not tree_node: node_id = next(uid) nx_graph.add_node(node_id, label='') if parent_node_id != None: nx_graph.add_edge(parent_node_id, node_id, label=edge_label) return node_id = next(uid) nx_graph.add_node(node_id, label=getattr(tree_node, label_attr, '')) if parent_node_id != None: nx_graph.add_edge(parent_node_id, node_id, label=edge_label) if tree_node.left or tree_node.right: build_binary_tree_graph( nx_graph, node_id, tree_node.left, label_attr, 'L') build_binary_tree_graph( nx_graph, node_id, tree_node.right, label_attr, 'R') def show_binary_tree(root_node, label_attr='data'): """ Shows a tree of nodes similar to: ``` class Node: def __init__(self, val=''): self.data = val self.left = None self.right = None ``` The nodes on the chart will be labeled with `data` attribute. If you want to use a different attribute change `label_attr` parameter. Parameters ---------- root_node : the root node of a tree. A tree graph created with NetworkX label_attr: an attribute used for labeling nodes Examples -------- >>> show_binary_tree(root) """ G = nx.DiGraph() build_binary_tree_graph(G, None, root_node, label_attr) show_tree_graph(G) if __name__ == '__main__': from random import sample, seed class Node: def __init__(self, val=''): self.data = val self.left = None self.right = None def add_node(root, val): if not root: return Node(val) if val < root.data: root.left = add_node(root.left, val) elif val > root.data: root.right = add_node(root.right, val) return root def build_bst(lst): bst = None for v in lst: bst = add_node(bst, v) return bst seed(1) r = sample(range(11, 100), 20) show_binary_tree(build_bst(r)) gg = nx.balanced_tree(3, 2) show_tree_graph(gg)
{ "content_hash": "663c50d2b24d09617aa6c9b07482b575", "timestamp": "", "source": "github", "line_count": 174, "max_line_length": 102, "avg_line_length": 24.982758620689655, "alnum_prop": 0.5571658615136876, "repo_name": "vadim-ivlev/STUDY", "id": "30bbd955e539e155b7d9fc2abcd96dc576f4a1c2", "size": "4448", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "coding/showtree.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "256" }, { "name": "CSS", "bytes": "153975" }, { "name": "CoffeeScript", "bytes": "167" }, { "name": "HTML", "bytes": "2681792" }, { "name": "JavaScript", "bytes": "2696471" }, { "name": "Jupyter Notebook", "bytes": "2453649" }, { "name": "Kotlin", "bytes": "63" }, { "name": "Mathematica", "bytes": "349964" }, { "name": "PHP", "bytes": "1874" }, { "name": "Perl", "bytes": "716" }, { "name": "Prolog", "bytes": "2424" }, { "name": "Python", "bytes": "53057" }, { "name": "Ruby", "bytes": "2213" }, { "name": "Shell", "bytes": "1308" }, { "name": "Vue", "bytes": "1776" }, { "name": "XSLT", "bytes": "45170" } ], "symlink_target": "" }
''' ------------------------------------------------------------------------------ Copyright 2016 Esri Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ------------------------------------------------------------------------------ ================================================== VisibilityUtilitiesTestCase.py -------------------------------------------------- requirements: ArcGIS 10.3+, Python 2.7 or Python 3.4 author: ArcGIS Solutions contact: support@esri.com company: Esri ================================================== description: Unit tests for Visibility tools ================================================== history: 11/28/2016 - mf - original coding ================================================== ''' # IMPORTS ========================================== import os import unittest import arcpy # Add parent folder to python path if running test case standalone import sys sys.path.append(os.path.normpath(os.path.join(os.path.dirname(__file__), '..'))) import UnitTestUtilities import Configuration import arcpyAssert # Add scripts to path so can call methods directly Configuration.addScriptsPath() import VisibilityUtilities # LOCALS =========================================== deleteIntermediateData = [] # intermediate datasets to be deleted debug = True # extra messaging during development # FUNCTIONS ======================================== class VisibilityUtilitiesTestCase(unittest.TestCase, arcpyAssert.FeatureClassAssertMixin): ''' ''' def setUp(self): ''' Initialization needed if running Test Case standalone ''' Configuration.GetLogger() Configuration.GetPlatform() ''' End standalone initialization ''' Configuration.Logger.debug(".....VisibilityUtilityTestCase.setup") UnitTestUtilities.checkArcPy() arcpy.env.overwriteOutput = True if arcpy.CheckExtension("Spatial") == "Available": arcpy.CheckOutExtension("Spatial") else: raise Exception("Spatial license is not available.") if arcpy.CheckExtension("3D") == "Available": arcpy.CheckOutExtension("3D") else: raise Exception("3D license is not available.") self.srWGS84 = arcpy.SpatialReference(4326) # GCS_WGS_1984 self.srWAZED = arcpy.SpatialReference(54032) # World Azimuthal Equidistant self.inputArea = os.path.join(Configuration.militaryInputDataGDB, "AreaofInterest") self.inputSurface = os.path.join(Configuration.militaryInputDataGDB, "ElevationUTM_Zone10") self.inputSigActsTable = os.path.join(Configuration.militaryInputDataGDB, "SigActs") if not arcpy.Exists(Configuration.militaryScratchGDB): Configuration.militaryScratchGDB = UnitTestUtilities.createScratch(Configuration.currentPath) def tearDown(self): Configuration.Logger.debug(".....VisibilityUtilityTestCase.teardown") arcpy.CheckInExtension("Spatial") arcpy.CheckInExtension("3D") if len(deleteIntermediateData) > 0: for i in deleteIntermediateData: if arcpy.Exists(i): if debug: arcpy.AddMessage("Removing intermediate: {0}".format(i)) arcpy.Delete_management(i) # UnitTestUtilities.deleteScratch(Configuration.militaryScratchGDB) # Test internal methods def test_getFieldNameList(self): ''' Testing internal method _getFieldNameList() ''' Configuration.Logger.info(".....VisibilityUtilityTestCase.test_getFieldNameList") junkTable = os.path.join("in_memory","junkTable") #if arcpy.Exists(junkTable): arcpy.Delete_management(junkTable) arcpy.CreateTable_management(os.path.dirname(junkTable), os.path.basename(junkTable)) deleteIntermediateData.append(junkTable) expectedNames = [arcpy.Describe(junkTable).OIDFieldName.upper(), "D1", "T2"] arcpy.AddField_management(junkTable, expectedNames[1], "DOUBLE") arcpy.AddField_management(junkTable, expectedNames[2], "TEXT") # IMPORTANT: Fields names are returned in UPPERCASE for some reason resultNames = VisibilityUtilities._getFieldNameList(junkTable, []) self.assertEqual(expectedNames, resultNames, "Did not get expected field names. Got {0} instead.".format(str(resultNames))) def test_addDoubleField(self): ''' Testing internal method _addDoubleField() ''' Configuration.Logger.info(".....VisibilityUtilityTestCase.test_addDoubleField") newFields = {"A1":[0.0, "A1 field"], "A2":[1.1, "A2 field"]} junkTable = os.path.join("in_memory","junkTable") #if arcpy.Exists(junkTable): arcpy.Delete_management(junkTable) arcpy.CreateTable_management(os.path.dirname(junkTable), os.path.basename(junkTable)) deleteIntermediateData.append(junkTable) VisibilityUtilities._addDoubleField(junkTable, newFields) resultFields = [] for f in arcpy.ListFields(junkTable): resultFields.append(f.name) expectedFields = list(["ObjectID"] + list(newFields.keys())) self.assertEqual(expectedFields, resultFields, "Expected fields {0} were not added. Got {1} instead.".format(expectedFields, resultFields)) def test_calculateFieldValue(self): ''' Testing internal method _calculateFieldValue() ''' Configuration.Logger.info(".....VisibilityUtilityTestCase.test_calculateFieldValue") expectedNames = ["D1", "T2"] junkTable = os.path.join("in_memory","junkTable") #if arcpy.Exists(junkTable): arcpy.Delete_management(junkTable) arcpy.CreateTable_management(os.path.dirname(junkTable), os.path.basename(junkTable)) arcpy.AddField_management(junkTable, expectedNames[0], "DOUBLE") arcpy.AddField_management(junkTable, expectedNames[1], "TEXT") deleteIntermediateData.append(junkTable) with arcpy.da.InsertCursor(junkTable, [expectedNames[0]]) as iCursor: for i in range(0,4): iCursor.insertRow([float(i)]) del iCursor testValue = "'valueT2'" VisibilityUtilities._calculateFieldValue(junkTable, expectedNames[1], testValue) resultFieldValueSet = set([row[0] for row in arcpy.da.SearchCursor(junkTable, [expectedNames[1]])]) self.assertEqual(len(resultFieldValueSet),1,"_calculateFieldValue returned bad field values: {0}".format(str(resultFieldValueSet))) def test_getRasterMinMax(self): ''' test internal method _getRasterMinMax ''' Configuration.Logger.info(".....VisibilityUtilityTestCase.test_getRasterMinMax") resultMin, resultMax = VisibilityUtilities._getRasterMinMax(self.inputSurface) expectedMin = int(-41) self.assertEqual(expectedMin, resultMin, "Expected minimum of {0}, but got {1}".format(expectedMin, resultMin)) expectedMax = int(1785) self.assertEqual(expectedMax, resultMax, "Expected maximum of {0}, but got {1}".format(expectedMax, resultMax)) def test_clipRasterToArea(self): ''' Compare result of _clipRasterToArea result to known, good comparison dataset ''' Configuration.Logger.info(".....VisibilityUtilityTestCase.test_clipRasterToArea") expectedOutput = os.path.join(Configuration.militaryResultsGDB, "ExpectedOutputclipRasterToArea") resultClippedRaster = os.path.join(Configuration.militaryScratchGDB, "resultClippedRaster") resultClippedRaster = VisibilityUtilities._clipRasterToArea(self.inputSurface, self.inputArea, resultClippedRaster) deleteIntermediateData.append(resultClippedRaster) result = arcpy.RasterCompare_management(expectedOutput, resultClippedRaster,"RASTER_DATASET","Columns And Rows;NoData;Pixel Value;Raster Attribute Table","","","All 1 Fraction","","").getOutput(1) self.assertEqual(result, "true", "Raster Compare failed: \n %s" % arcpy.GetMessages()) def test_getUniqueValuesFromField001(self): ''' Test _getUniqueValuesFromField with SigActs table's AttackScal field. ''' Configuration.Logger.info(".....VisibilityUtilityTestCase.test_getUniqueValuesFromField001") expectedAttackScal = ["Macro", "Micro"] resultAttackScal = VisibilityUtilities._getUniqueValuesFromField(self.inputSigActsTable, "AttackScal") self.assertEqual(len(expectedAttackScal), len(resultAttackScal), "Expected {0} unique values, but got {1}.".format(expectedAttackScal, resultAttackScal)) def test_getUniqueValuesFromField002(self): ''' Test _getUniqueValuesFromField with SigActs table's NoAttacks field. ''' Configuration.Logger.info(".....VisibilityUtilityTestCase.test_getUniqueValuesFromField002") expectedNoAttacks = [0, 1, 2, 3] resultNoAttacks = VisibilityUtilities._getUniqueValuesFromField(self.inputSigActsTable, "NoAttacks") self.assertEqual(len(expectedNoAttacks), len(resultNoAttacks), "Expected {0} unique values, but got {1}.".format(expectedNoAttacks, resultNoAttacks)) def test_getCentroid_FromPoints(self): ''' Testing _getCentroid from point feature class with 4 points. ''' Configuration.Logger.info(".....VisibilityUtilityTestCase.test_getCentroid") # make a featureclass of points pntArray = arcpy.Array([arcpy.Point(0,0), arcpy.Point(0,2), arcpy.Point(2,0), arcpy.Point(2,2)]) fc = arcpy.CreateFeatureclass_management("in_memory", "fc", "POINT", None, "DISABLED", "DISABLED", self.srWGS84)[0] with arcpy.da.InsertCursor(fc, ["SHAPE@"]) as cursor: for pnt in pntArray: cursor.insertRow([arcpy.PointGeometry(pnt)]) ##################################### #TODO: TEST FAILING when run with other test methods return ##################################### resultCentroid = VisibilityUtilities._getCentroid(fc) self.assertIsNotNone(resultCentroid) resultPoint = resultCentroid.firstPoint # determine centroid of X and Y coordinate sets pX, pY, count = 0, 0, 0 for p in pntArray: pX += p.X pY += p.Y count += 1 cX = float(pX)/float(count) cY = float(pY)/float(count) comparePoint = arcpy.Point(cX, cY) arcpy.AddMessage("comparePoint.X: {0}".format(comparePoint.X)) arcpy.AddMessage(comparePoint.X) arcpy.AddMessage("resultPoint.X: {0}".format(resultPoint.X)) arcpy.AddMessage(resultPoint.X) arcpy.AddMessage("comparePoint.X is resultPoint.X: {0}".format(comparePoint.X is resultPoint.X)) arcpy.AddMessage("comparePoint.X == resultPoint.X: {0}".format(comparePoint.X == resultPoint.X)) self.assertAlmostEqual(comparePoint.X, resultPoint.X, places=6, msg="Unexpected centroid X. Expected {0}, but got {1}".format(comparePoint.X, resultPoint.X)) self.assertAlmostEqual(comparePoint.Y, resultPoint.Y, places=6, msg="Unexpected centroid Y. Expected {0}, but got {1}".format(comparePoint.Y, resultPoint.Y)) def test_getLocalWAZED(self): ''' ''' Configuration.Logger.info(".....VisibilityUtilityTestCase.test_getLocalWAZED") testInputPoint = arcpy.PointGeometry(arcpy.Point(-11.13, 14.87), self.srWGS84) resultSR = VisibilityUtilities._getLocalWAZED(testInputPoint) # arcpy.AddMessage("======================") # arcpy.AddMessage(resultSR.exportToString()) # arcpy.AddMessage("======================") # arcpy.AddMessage(self.srWAZED.exportToString()) # arcpy.AddMessage("======================") self.assertIsNotNone(resultSR) self.assertEqual(resultSR.name, self.srWAZED.name, \ "Compare expected Spatial Reference Name: {0} with result {1} failed.".format(self.srWAZED.name, resultSR.name)) self.assertEqual(resultSR.projectionName, self.srWAZED.projectionName, \ "Compare expected Spatial Reference Name: {0} with result {1} failed.".format(self.srWAZED.projectionName, resultSR.projectionName)) # factoryCode not set by _getLocalWAZED # self.assertEqual(resultSR.factoryCode, self.srWAZED.factoryCode, \ # "Compare expected Spatial Reference Code: {0} with result {1} failed.".format(self.srWAZED.factoryCode, resultSR.factoryCode)) def test_prepPointFromSurface(self): ''' ''' # Test external methods def test_hi_lowPointByArea_lowest(self): ''' test hi_lowPointByArea for MINIMUM (lowest) setting. ''' Configuration.Logger.info(".....VisibilityUtilityTestCase.test_hi_lowPointByArea_lowest") hi_low_Switch = "MINIMUM" resultPoints = os.path.join(Configuration.militaryScratchGDB, "lowestPoints") VisibilityUtilities.hi_lowPointByArea(self.inputArea, self.inputSurface, hi_low_Switch, resultPoints) deleteIntermediateData.append(resultPoints) self.assertTrue(arcpy.Exists(resultPoints), "Output features do not exist or were not created") expectedLowest = os.path.join(Configuration.militaryResultsGDB, "ExpectedOutputLowestPt") # TODO: need to regenerate the expected feature class # self.assertFeatureClassEqualSimple(resultPoints, expectedLowest, \ # "OBJECTID", 0.0001) def test_hi_lowPointByArea_highest(self): ''' test hi_lowPointByArea for MAXIMUM (highest) setting. ''' Configuration.Logger.info(".....VisibilityUtilityTestCase.test_hi_lowPointByArea_highest") hi_low_Switch = "MAXIMUM" resultPoints = os.path.join(Configuration.militaryScratchGDB, "highestPoints") resultPoints = VisibilityUtilities.hi_lowPointByArea(self.inputArea, self.inputSurface, hi_low_Switch, resultPoints) deleteIntermediateData.append(resultPoints) self.assertTrue(arcpy.Exists(resultPoints), "Output features do not exist or were not created") expectedHighest = os.path.join(Configuration.militaryResultsGDB, "ExpectedOutputHighestPt") # TODO: need to regenerate the expected feature class # self.assertFeatureClassEqualSimple(resultPoints, expectedHighest, \ # "OBJECTID", 0.0001) # Test tool methods def test_findLocalPeaks(self): ''' test_findLocalPeaks with input 10 peaks to find ''' Configuration.Logger.info(".....VisibilityUtilityTestCase.test_findLocalPeaks") resultPoints = os.path.join(Configuration.militaryScratchGDB, "findLocalPeaks") numPoints = 16 resultPoints = VisibilityUtilities.findLocalPeaks(self.inputArea, numPoints, self.inputSurface, resultPoints) deleteIntermediateData.append(resultPoints) expectedLocalPeaks = os.path.join(Configuration.militaryResultsGDB, "ExpectedOutputFindLocalPeaks") self.assertTrue(arcpy.Exists(resultPoints), "Output features do not exist or were not created") pointCount = int(arcpy.GetCount_management(resultPoints).getOutput(0)) expectedFeatureCount = int(16) self.assertGreaterEqual(pointCount, expectedFeatureCount, "Expected %s features, but got %s" % (str(expectedFeatureCount), str(pointCount))) # TODO: need to regenerate the expected feature class #self.assertFeatureClassEqualSimple(resultPoints, expectedLocalPeaks, \ # "OBJECTID", 0.0001) # def test_addLLOSFields001(self): # ''' # Test addLLOSFields with user-defined values # ''' # pass # # def test_addLLOSFields002(self): # ''' # Test addLLOSFields with default values # ''' # pass # # def test_addRLOSObserverFields001(self): # ''' # Test addRLOSObserverFields with user-defined values # ''' # pass # # def test_addRLOSObserverFields002(self): # ''' # Test addRLOSObserverFields with default values # ''' # pass if __name__ == "__main__": unittest.main()
{ "content_hash": "9acc7fc3b5fc44996c87276328afae64", "timestamp": "", "source": "github", "line_count": 393, "max_line_length": 204, "avg_line_length": 45.73791348600509, "alnum_prop": 0.6119610570236439, "repo_name": "Esri/military-tools-geoprocessing-toolbox", "id": "fdbd9654a843f6eecfc6d3b8719f5778afbc0d4f", "size": "17991", "binary": false, "copies": "1", "ref": "refs/heads/dev", "path": "utils/test/visibility_tests/VisibilityUtilitiesTestCase.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "5466" }, { "name": "Python", "bytes": "708673" } ], "symlink_target": "" }
from models import BaseModel class ApplicationModel(BaseModel): table = "applications" db = "console" fields={ "project_url":True, "project_name":True, "storage_path":True, "app_name":True, "user_id":True, "status":True, "logs":True, "update_time":True, 'create_time':True }
{ "content_hash": "34b0ae30bf1a1d41bfbdd4769f7aa4f7", "timestamp": "", "source": "github", "line_count": 17, "max_line_length": 34, "avg_line_length": 21.647058823529413, "alnum_prop": 0.5380434782608695, "repo_name": "SimonWang2014/DockerConsoleApp", "id": "86eef429d4cd70c11340f6920fc0c467f34c3265", "size": "368", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "models/application.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "ApacheConf", "bytes": "24139" }, { "name": "CSS", "bytes": "8038" }, { "name": "HTML", "bytes": "30868" }, { "name": "JavaScript", "bytes": "35496" }, { "name": "Python", "bytes": "203201" }, { "name": "Shell", "bytes": "100" } ], "symlink_target": "" }
""" This module provides access to LDAP servers, along with some basic functionality required for Hue and User Admin to work seamlessly with LDAP. """ import ldap import ldap.filter import logging import re from django.contrib.auth.models import User import desktop.conf from desktop.lib.python_util import CaseInsensitiveDict LOG = logging.getLogger(__name__) CACHED_LDAP_CONN = None def get_connection_from_server(server=None): ldap_config = desktop.conf.LDAP.LDAP_SERVERS.get()[server] if server else desktop.conf.LDAP return get_connection(ldap_config) def get_connection(ldap_config): global CACHED_LDAP_CONN if CACHED_LDAP_CONN is not None: return CACHED_LDAP_CONN ldap_url = ldap_config.LDAP_URL.get() username = ldap_config.BIND_DN.get() password = desktop.conf.get_ldap_password(ldap_config) ldap_cert = ldap_config.LDAP_CERT.get() search_bind_authentication = ldap_config.SEARCH_BIND_AUTHENTICATION.get() if ldap_url is None: raise Exception('No LDAP URL was specified') if search_bind_authentication: return LdapConnection(ldap_config, ldap_url, username, password, ldap_cert) else: return LdapConnection(ldap_config, ldap_url, get_ldap_username(username, ldap_config.NT_DOMAIN.get()), password, ldap_cert) def get_ldap_username(username, nt_domain): if nt_domain: return '%s@%s' % (username, nt_domain) else: return username def get_ldap_user_kwargs(username): if desktop.conf.LDAP.IGNORE_USERNAME_CASE.get(): return { 'username__iexact': username } else: return { 'username': username } def get_ldap_user(username): username_kwargs = get_ldap_user_kwargs(username) return User.objects.get(**username_kwargs) def get_or_create_ldap_user(username): username_kwargs = get_ldap_user_kwargs(username) users = User.objects.filter(**username_kwargs) if users.exists(): return User.objects.get(**username_kwargs), False else: username = desktop.conf.LDAP.FORCE_USERNAME_LOWERCASE.get() and username.lower() or username return User.objects.create(username=username), True class LdapConnection(object): """ Constructor creates LDAP connection. Contains methods to easily query an LDAP server. """ def __init__(self, ldap_config, ldap_url, bind_user=None, bind_password=None, cert_file=None): """ Constructor initializes the LDAP connection """ self.ldap_config = ldap_config if cert_file is not None: ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_ALLOW) ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, cert_file) if self.ldap_config.FOLLOW_REFERRALS.get(): ldap.set_option(ldap.OPT_REFERRALS, 1) else: ldap.set_option(ldap.OPT_REFERRALS, 0) if ldap_config.DEBUG.get(): ldap.set_option(ldap.OPT_DEBUG_LEVEL, ldap_config.DEBUG_LEVEL.get()) self.ldap_handle = ldap.initialize(uri=ldap_url, trace_level=ldap_config.TRACE_LEVEL.get()) if bind_user is not None: try: self.ldap_handle.simple_bind_s(bind_user, bind_password) except: raise RuntimeError("Failed to bind to LDAP server as user %s" % bind_user) else: try: # Do anonymous bind self.ldap_handle.simple_bind_s('','') except: raise RuntimeError("Failed to bind to LDAP server anonymously") def _get_search_params(self, name, attr, find_by_dn=False): """ if we are to find this ldap object by full distinguished name, then search by setting search_dn to the 'name' rather than by filtering by 'attr'. """ base_dn = self._get_root_dn() if find_by_dn: search_dn = re.sub(r'(\w+=)', lambda match: match.group(0).upper(), name) if not search_dn.upper().endswith(base_dn.upper()): raise RuntimeError("Distinguished Name provided does not contain configured Base DN. Base DN: %(base_dn)s, DN: %(dn)s" % { 'base_dn': base_dn, 'dn': search_dn }) return (search_dn, '') else: return (base_dn, '(' + attr + '=' + name + ')') def _transform_find_user_results(self, result_data, user_name_attr): """ :param result_data: List of dictionaries that have ldap attributes and their associated values. Generally the result list from an ldapsearch request. :param user_name_attr: The ldap attribute that is returned by the server to map to ``username`` in the return dictionary. :returns list of dictionaries that take on the following form: { 'dn': <distinguished name of entry>, 'username': <ldap attribute associated with user_name_attr> 'first': <first name> 'last': <last name> 'email': <email> 'groups': <list of DNs of groups that user is a member of> } """ user_info = [] if result_data: for dn, data in result_data: # Skip Active Directory # refldap entries. if dn is not None: # Case insensitivity data = CaseInsensitiveDict.from_dict(data) # Skip unnamed entries. if user_name_attr not in data: LOG.warn('Could not find %s in ldap attributes' % user_name_attr) continue ldap_info = { 'dn': dn, 'username': data[user_name_attr][0] } if 'givenName' in data: ldap_info['first'] = data['givenName'][0] if 'sn' in data: ldap_info['last'] = data['sn'][0] if 'mail' in data: ldap_info['email'] = data['mail'][0] # memberOf and isMemberOf should be the same if they both exist if 'memberOf' in data: ldap_info['groups'] = data['memberOf'] if 'isMemberOf' in data: ldap_info['groups'] = data['isMemberOf'] user_info.append(ldap_info) return user_info def _transform_find_group_results(self, result_data, group_name_attr, group_member_attr): group_info = [] if result_data: for dn, data in result_data: # Skip Active Directory # refldap entries. if dn is not None: # Case insensitivity data = CaseInsensitiveDict.from_dict(data) # Skip unnamed entries. if group_name_attr not in data: LOG.warn('Could not find %s in ldap attributes' % group_name_attr) continue ldap_info = { 'dn': dn, 'name': data[group_name_attr][0] } if group_member_attr in data and 'posixGroup' not in data['objectClass']: ldap_info['members'] = data[group_member_attr] else: ldap_info['members'] = [] if 'posixGroup' in data['objectClass'] and 'memberUid' in data: ldap_info['posix_members'] = data['memberUid'] else: ldap_info['posix_members'] = [] group_info.append(ldap_info) return group_info def find_users(self, username_pattern, search_attr=None, user_name_attr=None, user_filter=None, find_by_dn=False, scope=ldap.SCOPE_SUBTREE): """ LDAP search helper method finding users. This supports searching for users by distinguished name, or the configured username attribute. :param username_pattern: The pattern to match ``search_attr`` against. Defaults to ``search_attr`` if none. :param search_attr: The ldap attribute to search for ``username_pattern``. Defaults to LDAP -> USERS -> USER_NAME_ATTR config value. :param user_name_attr: The ldap attribute that is returned by the server to map to ``username`` in the return dictionary. :param find_by_dn: Search by distinguished name. :param scope: ldapsearch scope. :returns: List of dictionaries that take on the following form: { 'dn': <distinguished name of entry>, 'username': <ldap attribute associated with user_name_attr> 'first': <first name> 'last': <last name> 'email': <email> 'groups': <list of DNs of groups that user is a member of> } `` """ if not search_attr: search_attr = self.ldap_config.USERS.USER_NAME_ATTR.get() if not user_name_attr: user_name_attr = search_attr if not user_filter: user_filter = self.ldap_config.USERS.USER_FILTER.get() if not user_filter.startswith('('): user_filter = '(' + user_filter + ')' # Allow wild cards on non distinguished names sanitized_name = ldap.filter.escape_filter_chars(username_pattern).replace(r'\2a', r'*') # Fix issue where \, is converted to \5c, sanitized_name = sanitized_name.replace(r'\5c,', r'\2c') search_dn, user_name_filter = self._get_search_params(sanitized_name, search_attr, find_by_dn) ldap_filter = '(&' + user_filter + user_name_filter + ')' attrlist = ['objectClass', 'isMemberOf', 'memberOf', 'givenName', 'sn', 'mail', 'dn', user_name_attr] ldap_result_id = self.ldap_handle.search(search_dn, scope, ldap_filter, attrlist) result_type, result_data = self.ldap_handle.result(ldap_result_id) if result_type == ldap.RES_SEARCH_RESULT: return self._transform_find_user_results(result_data, user_name_attr) else: return [] def find_groups(self, groupname_pattern, search_attr=None, group_name_attr=None, group_member_attr=None, group_filter=None, find_by_dn=False, scope=ldap.SCOPE_SUBTREE): """ LDAP search helper method for finding groups :param groupname_pattern: The pattern to match ``search_attr`` against. Defaults to ``search_attr`` if none. :param search_attr: The ldap attribute to search for ``groupname_pattern``. Defaults to LDAP -> GROUPS -> GROUP_NAME_ATTR config value. :param group_name_attr: The ldap attribute that is returned by the server to map to ``name`` in the return dictionary. :param find_by_dn: Search by distinguished name. :param scope: ldapsearch scope. :returns: List of dictionaries that take on the following form: { 'dn': <distinguished name of entry>, 'name': <ldap attribute associated with group_name_attr> 'first': <first name> 'last': <last name> 'email': <email> 'groups': <list of DNs of groups that user is a member of> } """ if not search_attr: search_attr = self.ldap_config.GROUPS.GROUP_NAME_ATTR.get() if not group_name_attr: group_name_attr = search_attr if not group_member_attr: group_member_attr = self.ldap_config.GROUPS.GROUP_MEMBER_ATTR.get() if not group_filter: group_filter = self.ldap_config.GROUPS.GROUP_FILTER.get() if not group_filter.startswith('('): group_filter = '(' + group_filter + ')' # Allow wild cards on non distinguished names sanitized_name = ldap.filter.escape_filter_chars(groupname_pattern).replace(r'\2a', r'*') search_dn, group_name_filter = self._get_search_params(sanitized_name, search_attr, find_by_dn) ldap_filter = '(&' + group_filter + group_name_filter + ')' attrlist = ['objectClass', 'dn', 'memberUid', group_member_attr, group_name_attr] ldap_result_id = self.ldap_handle.search(search_dn, scope, ldap_filter, attrlist) result_type, result_data = self.ldap_handle.result(ldap_result_id) if result_type == ldap.RES_SEARCH_RESULT: return self._transform_find_group_results(result_data, group_name_attr, group_member_attr) else: return [] def find_members_of_group(self, dn, search_attr, ldap_filter, scope=ldap.SCOPE_SUBTREE): if ldap_filter and not ldap_filter.startswith('('): ldap_filter = '(' + ldap_filter + ')' # Allow wild cards on non distinguished names dn = ldap.filter.escape_filter_chars(dn).replace(r'\2a', r'*') # Fix issue where \, is converted to \5c, dn = dn.replace(r'\5c,', r'\2c') search_dn, _ = self._get_search_params(dn, search_attr) ldap_filter = '(&%(ldap_filter)s(|(isMemberOf=%(group_dn)s)(memberOf=%(group_dn)s)))' % {'group_dn': dn, 'ldap_filter': ldap_filter} attrlist = ['objectClass', 'isMemberOf', 'memberOf', 'givenName', 'sn', 'mail', 'dn', search_attr] ldap_result_id = self.ldap_handle.search(search_dn, scope, ldap_filter, attrlist) result_type, result_data = self.ldap_handle.result(ldap_result_id) if result_type == ldap.RES_SEARCH_RESULT: return result_data else: return [] def find_users_of_group(self, dn): ldap_filter = self.ldap_config.USERS.USER_FILTER.get() name_attr = self.ldap_config.USERS.USER_NAME_ATTR.get() result_data = self.find_members_of_group(dn, name_attr, ldap_filter) return self._transform_find_user_results(result_data, name_attr) def find_groups_of_group(self, dn): ldap_filter = self.ldap_config.GROUPS.GROUP_FILTER.get() name_attr = self.ldap_config.GROUPS.GROUP_NAME_ATTR.get() member_attr = self.ldap_config.GROUPS.GROUP_MEMBER_ATTR.get() result_data = self.find_members_of_group(dn, name_attr, ldap_filter) return self._transform_find_group_results(result_data, name_attr, member_attr) def _get_root_dn(self): return self.ldap_config.BASE_DN.get()
{ "content_hash": "9a07400b27d9a5275e8155dff545be52", "timestamp": "", "source": "github", "line_count": 350, "max_line_length": 170, "avg_line_length": 37.30285714285714, "alnum_prop": 0.6546415441176471, "repo_name": "vitan/hue", "id": "1ec7a09aa39837ff82c4429f9b9a19f784ff62e3", "size": "13847", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "apps/useradmin/src/useradmin/ldap_access.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Assembly", "bytes": "207947" }, { "name": "C", "bytes": "2350097" }, { "name": "C++", "bytes": "178518" }, { "name": "CSS", "bytes": "502213" }, { "name": "Emacs Lisp", "bytes": "14875" }, { "name": "GAP", "bytes": "11337" }, { "name": "Genshi", "bytes": "946" }, { "name": "Groff", "bytes": "14877" }, { "name": "HTML", "bytes": "21550731" }, { "name": "Java", "bytes": "3080564" }, { "name": "JavaScript", "bytes": "2677283" }, { "name": "Makefile", "bytes": "86291" }, { "name": "Mako", "bytes": "2038826" }, { "name": "Myghty", "bytes": "936" }, { "name": "PLSQL", "bytes": "13774" }, { "name": "Perl", "bytes": "161801" }, { "name": "PigLatin", "bytes": "328" }, { "name": "Prolog", "bytes": "4590" }, { "name": "Python", "bytes": "31475669" }, { "name": "Scala", "bytes": "64604" }, { "name": "Shell", "bytes": "48346" }, { "name": "Smarty", "bytes": "130" }, { "name": "TeX", "bytes": "129526" }, { "name": "Thrift", "bytes": "100994" }, { "name": "XSLT", "bytes": "342237" } ], "symlink_target": "" }
print "Content-type: text/html" print print "<pre>" import os, sys from cgi import escape print "<strong>Python %s</strong>" % sys.version keys = os.environ.keys() keys.sort() for k in keys: print "%s\t%s" % (escape(k), escape(os.environ[k])) print "</pre>" ## end of http://code.activestate.com/recipes/52220/ }}}
{ "content_hash": "6c5de949c89a7e1c86974ca7e799b543", "timestamp": "", "source": "github", "line_count": 12, "max_line_length": 56, "avg_line_length": 26.583333333333332, "alnum_prop": 0.670846394984326, "repo_name": "bhavanaananda/DataStage", "id": "c853b437a45d434bf4ecd5eca69de33bd2048534", "size": "337", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "spike/applications/Admiral/ViewOrSubmitDatasets/cgi-bin/simplecgi.py", "mode": "33261", "license": "mit", "language": [ { "name": "C", "bytes": "16" }, { "name": "JavaScript", "bytes": "4864312" }, { "name": "PHP", "bytes": "6124" }, { "name": "Python", "bytes": "1815044" }, { "name": "Shell", "bytes": "67509" } ], "symlink_target": "" }
import logging import sys from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.db.models.loading import get_model from haystack.backends import BaseSearchBackend, BaseSearchQuery, log_query, EmptyResults from haystack.constants import ID, DJANGO_CT, DJANGO_ID from haystack.exceptions import MissingDependency, MoreLikeThisError from haystack.models import SearchResult from haystack.utils import get_identifier try: from django.db.models.sql.query import get_proxied_model except ImportError: # Likely on Django 1.0 get_proxied_model = None try: from pysolr import Solr, SolrError except ImportError: raise MissingDependency("The 'solr' backend requires the installation of 'pysolr'. Please refer to the documentation.") BACKEND_NAME = 'solr' class SearchBackend(BaseSearchBackend): # Word reserved by Solr for special use. RESERVED_WORDS = ( 'AND', 'NOT', 'OR', 'TO', ) # Characters reserved by Solr for special use. # The '\\' must come first, so as not to overwrite the other slash replacements. RESERVED_CHARACTERS = ( '\\', '+', '-', '&&', '||', '!', '(', ')', '{', '}', '[', ']', '^', '"', '~', '*', '?', ':', ) def __init__(self, site=None): super(SearchBackend, self).__init__(site) if not hasattr(settings, 'HAYSTACK_SOLR_URL'): raise ImproperlyConfigured('You must specify a HAYSTACK_SOLR_URL in your settings.') timeout = getattr(settings, 'HAYSTACK_SOLR_TIMEOUT', 10) self.conn = Solr(settings.HAYSTACK_SOLR_URL, timeout=timeout) self.log = logging.getLogger('haystack') def update(self, index, iterable, commit=True): docs = [] try: for obj in iterable: docs.append(index.full_prepare(obj)) except UnicodeDecodeError: sys.stderr.write("Chunk failed.\n") if len(docs) > 0: try: self.conn.add(docs, commit=commit, boost=index.get_field_weights()) except (IOError, SolrError), e: self.log.error("Failed to add documents to Solr: %s", e) def remove(self, obj_or_string, commit=True): solr_id = get_identifier(obj_or_string) try: kwargs = { 'commit': commit, ID: solr_id } self.conn.delete(**kwargs) except (IOError, SolrError), e: self.log.error("Failed to remove document '%s' from Solr: %s", solr_id, e) def clear(self, models=[], commit=True): try: if not models: # *:* matches all docs in Solr self.conn.delete(q='*:*', commit=commit) else: models_to_delete = [] for model in models: models_to_delete.append("%s:%s.%s" % (DJANGO_CT, model._meta.app_label, model._meta.module_name)) self.conn.delete(q=" OR ".join(models_to_delete), commit=commit) # Run an optimize post-clear. http://wiki.apache.org/solr/FAQ#head-9aafb5d8dff5308e8ea4fcf4b71f19f029c4bb99 self.conn.optimize() except (IOError, SolrError), e: if len(models): self.log.error("Failed to clear Solr index of models '%s': %s", ','.join(models_to_delete), e) else: self.log.error("Failed to clear Solr index: %s", e) @log_query def search(self, query_string, sort_by=None, start_offset=0, end_offset=None, fields='', highlight=False, facets=None, date_facets=None, query_facets=None, narrow_queries=None, spelling_query=None, limit_to_registered_models=None, result_class=None, **kwargs): if len(query_string) == 0: return { 'results': [], 'hits': 0, } kwargs = { 'fl': '* score', } if fields: kwargs['fl'] = fields if sort_by is not None: kwargs['sort'] = sort_by if start_offset is not None: kwargs['start'] = start_offset if end_offset is not None: kwargs['rows'] = end_offset - start_offset if highlight is True: kwargs['hl'] = 'true' kwargs['hl.fragsize'] = '200' if getattr(settings, 'HAYSTACK_INCLUDE_SPELLING', False) is True: kwargs['spellcheck'] = 'true' kwargs['spellcheck.collate'] = 'true' kwargs['spellcheck.count'] = 1 if spelling_query: kwargs['spellcheck.q'] = spelling_query if facets is not None: kwargs['facet'] = 'on' kwargs['facet.field'] = facets if date_facets is not None: kwargs['facet'] = 'on' kwargs['facet.date'] = date_facets.keys() kwargs['facet.date.other'] = 'none' for key, value in date_facets.items(): kwargs["f.%s.facet.date.start" % key] = self.conn._from_python(value.get('start_date')) kwargs["f.%s.facet.date.end" % key] = self.conn._from_python(value.get('end_date')) gap_by_string = value.get('gap_by').upper() gap_string = "%d%s" % (value.get('gap_amount'), gap_by_string) if value.get('gap_amount') != 1: gap_string += "S" kwargs["f.%s.facet.date.gap" % key] = '+%s/%s' % (gap_string, gap_by_string) if query_facets is not None: kwargs['facet'] = 'on' kwargs['facet.query'] = ["%s:%s" % (field, value) for field, value in query_facets] if limit_to_registered_models is None: limit_to_registered_models = getattr(settings, 'HAYSTACK_LIMIT_TO_REGISTERED_MODELS', True) if limit_to_registered_models: # Using narrow queries, limit the results to only models registered # with the current site. if narrow_queries is None: narrow_queries = set() registered_models = self.build_registered_models_list() if len(registered_models) > 0: narrow_queries.add('%s:(%s)' % (DJANGO_CT, ' OR '.join(registered_models))) if narrow_queries is not None: kwargs['fq'] = list(narrow_queries) try: raw_results = self.conn.search(query_string, **kwargs) except (IOError, SolrError), e: self.log.error("Failed to query Solr using '%s': %s", query_string, e) raw_results = EmptyResults() return self._process_results(raw_results, highlight=highlight, result_class=result_class) def more_like_this(self, model_instance, additional_query_string=None, start_offset=0, end_offset=None, limit_to_registered_models=None, result_class=None, **kwargs): # Handle deferred models. if get_proxied_model and hasattr(model_instance, '_deferred') and model_instance._deferred: model_klass = get_proxied_model(model_instance._meta) else: model_klass = type(model_instance) index = self.site.get_index(model_klass) field_name = index.get_content_field() params = { 'fl': '*,score', } if start_offset is not None: params['start'] = start_offset if end_offset is not None: params['rows'] = end_offset narrow_queries = set() if limit_to_registered_models is None: limit_to_registered_models = getattr(settings, 'HAYSTACK_LIMIT_TO_REGISTERED_MODELS', True) if limit_to_registered_models: # Using narrow queries, limit the results to only models registered # with the current site. if narrow_queries is None: narrow_queries = set() registered_models = self.build_registered_models_list() if len(registered_models) > 0: narrow_queries.add('%s:(%s)' % (DJANGO_CT, ' OR '.join(registered_models))) if additional_query_string: narrow_queries.add(additional_query_string) if narrow_queries: params['fq'] = list(narrow_queries) query = "%s:%s" % (ID, get_identifier(model_instance)) try: raw_results = self.conn.more_like_this(query, field_name, **params) except (IOError, SolrError), e: self.log.error("Failed to fetch More Like This from Solr for document '%s': %s", query, e) raw_results = EmptyResults() return self._process_results(raw_results, result_class=result_class) def _process_results(self, raw_results, highlight=False, result_class=None): if not self.site: from haystack import site else: site = self.site results = [] hits = raw_results.hits facets = {} spelling_suggestion = None if result_class is None: result_class = SearchResult if hasattr(raw_results, 'facets'): facets = { 'fields': raw_results.facets.get('facet_fields', {}), 'dates': raw_results.facets.get('facet_dates', {}), 'queries': raw_results.facets.get('facet_queries', {}), } for key in ['fields']: for facet_field in facets[key]: # Convert to a two-tuple, as Solr's json format returns a list of # pairs. facets[key][facet_field] = zip(facets[key][facet_field][::2], facets[key][facet_field][1::2]) if getattr(settings, 'HAYSTACK_INCLUDE_SPELLING', False) is True: if hasattr(raw_results, 'spellcheck'): if len(raw_results.spellcheck.get('suggestions', [])): # For some reason, it's an array of pairs. Pull off the # collated result from the end. spelling_suggestion = raw_results.spellcheck.get('suggestions')[-1] indexed_models = site.get_indexed_models() for raw_result in raw_results.docs: app_label, model_name = raw_result[DJANGO_CT].split('.') additional_fields = {} model = get_model(app_label, model_name) if model and model in indexed_models: for key, value in raw_result.items(): index = site.get_index(model) string_key = str(key) if string_key in index.fields and hasattr(index.fields[string_key], 'convert'): additional_fields[string_key] = index.fields[string_key].convert(value) else: additional_fields[string_key] = self.conn._to_python(value) del(additional_fields[DJANGO_CT]) del(additional_fields[DJANGO_ID]) del(additional_fields['score']) if raw_result[ID] in getattr(raw_results, 'highlighting', {}): additional_fields['highlighted'] = raw_results.highlighting[raw_result[ID]] result = result_class(app_label, model_name, raw_result[DJANGO_ID], raw_result['score'], searchsite=self.site, **additional_fields) results.append(result) else: hits -= 1 return { 'results': results, 'hits': hits, 'facets': facets, 'spelling_suggestion': spelling_suggestion, } def build_schema(self, fields): content_field_name = '' schema_fields = [] for field_name, field_class in fields.items(): field_data = { 'field_name': field_class.index_fieldname, 'type': 'text', 'indexed': 'true', 'stored': 'true', 'multi_valued': 'false', } if field_class.document is True: content_field_name = field_class.index_fieldname # DRL_FIXME: Perhaps move to something where, if none of these # checks succeed, call a custom method on the form that # returns, per-backend, the right type of storage? if field_class.field_type in ['date', 'datetime']: field_data['type'] = 'date' elif field_class.field_type == 'integer': field_data['type'] = 'slong' elif field_class.field_type == 'float': field_data['type'] = 'sfloat' elif field_class.field_type == 'boolean': field_data['type'] = 'boolean' elif field_class.field_type == 'ngram': field_data['type'] = 'ngram' elif field_class.field_type == 'edge_ngram': field_data['type'] = 'edge_ngram' if field_class.is_multivalued: field_data['multi_valued'] = 'true' if field_class.stored is False: field_data['stored'] = 'false' # Do this last to override `text` fields. if field_class.indexed is False: field_data['indexed'] = 'false' # If it's text and not being indexed, we probably don't want # to do the normal lowercase/tokenize/stemming/etc. dance. if field_data['type'] == 'text': field_data['type'] = 'string' # If it's a ``FacetField``, make sure we don't postprocess it. if hasattr(field_class, 'facet_for'): # If it's text, it ought to be a string. if field_data['type'] == 'text': field_data['type'] = 'string' schema_fields.append(field_data) return (content_field_name, schema_fields) class SearchQuery(BaseSearchQuery): def __init__(self, site=None, backend=None): super(SearchQuery, self).__init__(site, backend) if backend is not None: self.backend = backend else: self.backend = SearchBackend(site=site) def matching_all_fragment(self): return '*:*' def build_query_fragment(self, field, filter_type, value): result = '' # Handle when we've got a ``ValuesListQuerySet``... if hasattr(value, 'values_list'): value = list(value) if not isinstance(value, (set, list, tuple)): # Convert whatever we find to what pysolr wants. value = self.backend.conn._from_python(value) # Check to see if it's a phrase for an exact match. if ' ' in value: value = '"%s"' % value index_fieldname = self.backend.site.get_index_fieldname(field) # 'content' is a special reserved word, much like 'pk' in # Django's ORM layer. It indicates 'no special field'. if field == 'content': result = value else: filter_types = { 'exact': "%s:%s", 'gt': "%s:{%s TO *}", 'gte': "%s:[%s TO *]", 'lt': "%s:{* TO %s}", 'lte': "%s:[* TO %s]", 'startswith': "%s:%s*", } if filter_type == 'in': in_options = [] for possible_value in value: in_options.append('%s:"%s"' % (index_fieldname, self.backend.conn._from_python(possible_value))) result = "(%s)" % " OR ".join(in_options) elif filter_type == 'range': start = self.backend.conn._from_python(value[0]) end = self.backend.conn._from_python(value[1]) return "%s:[%s TO %s]" % (index_fieldname, start, end) else: result = filter_types[filter_type] % (index_fieldname, value) return result def run(self, spelling_query=None): """Builds and executes the query. Returns a list of search results.""" final_query = self.build_query() kwargs = { 'start_offset': self.start_offset, 'result_class': self.result_class, } if self.order_by: order_by_list = [] for order_by in self.order_by: if order_by.startswith('-'): order_by_list.append('%s desc' % order_by[1:]) else: order_by_list.append('%s asc' % order_by) kwargs['sort_by'] = ", ".join(order_by_list) if self.end_offset is not None: kwargs['end_offset'] = self.end_offset if self.highlight: kwargs['highlight'] = self.highlight if self.facets: kwargs['facets'] = list(self.facets) if self.date_facets: kwargs['date_facets'] = self.date_facets if self.query_facets: kwargs['query_facets'] = self.query_facets if self.narrow_queries: kwargs['narrow_queries'] = self.narrow_queries if spelling_query: kwargs['spelling_query'] = spelling_query results = self.backend.search(final_query, **kwargs) self._results = results.get('results', []) self._hit_count = results.get('hits', 0) self._facet_counts = self.post_process_facets(results) self._spelling_suggestion = results.get('spelling_suggestion', None) def run_mlt(self): """Builds and executes the query. Returns a list of search results.""" if self._more_like_this is False or self._mlt_instance is None: raise MoreLikeThisError("No instance was provided to determine 'More Like This' results.") additional_query_string = self.build_query() kwargs = { 'start_offset': self.start_offset, 'result_class': self.result_class, } if self.end_offset is not None: kwargs['end_offset'] = self.end_offset - self.start_offset results = self.backend.more_like_this(self._mlt_instance, additional_query_string, **kwargs) self._results = results.get('results', []) self._hit_count = results.get('hits', 0)
{ "content_hash": "d21861f20b5b3d34bbb1e8de62542a27", "timestamp": "", "source": "github", "line_count": 489, "max_line_length": 147, "avg_line_length": 39.259713701431494, "alnum_prop": 0.5218251901239712, "repo_name": "nathangeffen/tbonline-old", "id": "a93c5e0b73ec7fc31132bae044f0e8a33ca81be7", "size": "19198", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tbonlineproject/external/haystack/backends/solr_backend.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "64086" }, { "name": "JavaScript", "bytes": "125172" }, { "name": "Python", "bytes": "474845" } ], "symlink_target": "" }
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: lambda_info short_description: Gathers AWS Lambda function details description: - Gathers various details related to Lambda functions, including aliases, versions and event source mappings. Use module M(lambda) to manage the lambda function itself, M(lambda_alias) to manage function aliases and M(lambda_event) to manage lambda event source mappings. version_added: "2.9" options: query: description: - Specifies the resource type for which to gather information. Leave blank to retrieve all information. required: true choices: [ "aliases", "all", "config", "mappings", "policy", "versions" ] default: "all" function_name: description: - The name of the lambda function for which information is requested. aliases: [ "function", "name"] event_source_arn: description: - For query type 'mappings', this is the Amazon Resource Name (ARN) of the Amazon Kinesis or DynamoDB stream. author: Pierre Jodouin (@pjodouin) requirements: - boto3 extends_documentation_fragment: - aws - ec2 ''' EXAMPLES = ''' --- # Simple example of listing all info for a function - name: List all for a specific function lambda_info: query: all function_name: myFunction register: my_function_details # List all versions of a function - name: List function versions lambda_info: query: versions function_name: myFunction register: my_function_versions # List all lambda function versions - name: List all function lambda_info: query: all max_items: 20 register: output - name: show Lambda information debug: msg: "{{ output['function'] }}" ''' RETURN = ''' --- function: description: lambda function list returned: success type: dict function.TheName: description: lambda function information, including event, mapping, and version information returned: success type: dict ''' from ansible.module_utils.aws.core import AnsibleAWSModule from ansible.module_utils.ec2 import camel_dict_to_snake_dict, get_aws_connection_info, boto3_conn import json import datetime import sys import re try: from botocore.exceptions import ClientError except ImportError: pass # protected by AnsibleAWSModule def fix_return(node): """ fixup returned dictionary :param node: :return: """ if isinstance(node, datetime.datetime): node_value = str(node) elif isinstance(node, list): node_value = [fix_return(item) for item in node] elif isinstance(node, dict): node_value = dict([(item, fix_return(node[item])) for item in node.keys()]) else: node_value = node return node_value def alias_details(client, module): """ Returns list of aliases for a specified function. :param client: AWS API client reference (boto3) :param module: Ansible module reference :return dict: """ lambda_info = dict() function_name = module.params.get('function_name') if function_name: params = dict() if module.params.get('max_items'): params['MaxItems'] = module.params.get('max_items') if module.params.get('next_marker'): params['Marker'] = module.params.get('next_marker') try: lambda_info.update(aliases=client.list_aliases(FunctionName=function_name, **params)['Aliases']) except ClientError as e: if e.response['Error']['Code'] == 'ResourceNotFoundException': lambda_info.update(aliases=[]) else: module.fail_json_aws(e, msg="Trying to get aliases") else: module.fail_json(msg='Parameter function_name required for query=aliases.') return {function_name: camel_dict_to_snake_dict(lambda_info)} def all_details(client, module): """ Returns all lambda related facts. :param client: AWS API client reference (boto3) :param module: Ansible module reference :return dict: """ if module.params.get('max_items') or module.params.get('next_marker'): module.fail_json(msg='Cannot specify max_items nor next_marker for query=all.') lambda_info = dict() function_name = module.params.get('function_name') if function_name: lambda_info[function_name] = {} lambda_info[function_name].update(config_details(client, module)[function_name]) lambda_info[function_name].update(alias_details(client, module)[function_name]) lambda_info[function_name].update(policy_details(client, module)[function_name]) lambda_info[function_name].update(version_details(client, module)[function_name]) lambda_info[function_name].update(mapping_details(client, module)[function_name]) else: lambda_info.update(config_details(client, module)) return lambda_info def config_details(client, module): """ Returns configuration details for one or all lambda functions. :param client: AWS API client reference (boto3) :param module: Ansible module reference :return dict: """ lambda_info = dict() function_name = module.params.get('function_name') if function_name: try: lambda_info.update(client.get_function_configuration(FunctionName=function_name)) except ClientError as e: if e.response['Error']['Code'] == 'ResourceNotFoundException': lambda_info.update(function={}) else: module.fail_json_aws(e, msg="Trying to get {0} configuration".format(function_name)) else: params = dict() if module.params.get('max_items'): params['MaxItems'] = module.params.get('max_items') if module.params.get('next_marker'): params['Marker'] = module.params.get('next_marker') try: lambda_info.update(function_list=client.list_functions(**params)['Functions']) except ClientError as e: if e.response['Error']['Code'] == 'ResourceNotFoundException': lambda_info.update(function_list=[]) else: module.fail_json_aws(e, msg="Trying to get function list") functions = dict() for func in lambda_info.pop('function_list', []): functions[func['FunctionName']] = camel_dict_to_snake_dict(func) return functions return {function_name: camel_dict_to_snake_dict(lambda_info)} def mapping_details(client, module): """ Returns all lambda event source mappings. :param client: AWS API client reference (boto3) :param module: Ansible module reference :return dict: """ lambda_info = dict() params = dict() function_name = module.params.get('function_name') if function_name: params['FunctionName'] = module.params.get('function_name') if module.params.get('event_source_arn'): params['EventSourceArn'] = module.params.get('event_source_arn') if module.params.get('max_items'): params['MaxItems'] = module.params.get('max_items') if module.params.get('next_marker'): params['Marker'] = module.params.get('next_marker') try: lambda_info.update(mappings=client.list_event_source_mappings(**params)['EventSourceMappings']) except ClientError as e: if e.response['Error']['Code'] == 'ResourceNotFoundException': lambda_info.update(mappings=[]) else: module.fail_json_aws(e, msg="Trying to get source event mappings") if function_name: return {function_name: camel_dict_to_snake_dict(lambda_info)} return camel_dict_to_snake_dict(lambda_info) def policy_details(client, module): """ Returns policy attached to a lambda function. :param client: AWS API client reference (boto3) :param module: Ansible module reference :return dict: """ if module.params.get('max_items') or module.params.get('next_marker'): module.fail_json(msg='Cannot specify max_items nor next_marker for query=policy.') lambda_info = dict() function_name = module.params.get('function_name') if function_name: try: # get_policy returns a JSON string so must convert to dict before reassigning to its key lambda_info.update(policy=json.loads(client.get_policy(FunctionName=function_name)['Policy'])) except ClientError as e: if e.response['Error']['Code'] == 'ResourceNotFoundException': lambda_info.update(policy={}) else: module.fail_json_aws(e, msg="Trying to get {0} policy".format(function_name)) else: module.fail_json(msg='Parameter function_name required for query=policy.') return {function_name: camel_dict_to_snake_dict(lambda_info)} def version_details(client, module): """ Returns all lambda function versions. :param client: AWS API client reference (boto3) :param module: Ansible module reference :return dict: """ lambda_info = dict() function_name = module.params.get('function_name') if function_name: params = dict() if module.params.get('max_items'): params['MaxItems'] = module.params.get('max_items') if module.params.get('next_marker'): params['Marker'] = module.params.get('next_marker') try: lambda_info.update(versions=client.list_versions_by_function(FunctionName=function_name, **params)['Versions']) except ClientError as e: if e.response['Error']['Code'] == 'ResourceNotFoundException': lambda_info.update(versions=[]) else: module.fail_json_aws(e, msg="Trying to get {0} versions".format(function_name)) else: module.fail_json(msg='Parameter function_name required for query=versions.') return {function_name: camel_dict_to_snake_dict(lambda_info)} def main(): """ Main entry point. :return dict: ansible facts """ argument_spec = dict( function_name=dict(required=False, default=None, aliases=['function', 'name']), query=dict(required=False, choices=['aliases', 'all', 'config', 'mappings', 'policy', 'versions'], default='all'), event_source_arn=dict(required=False, default=None) ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[], required_together=[] ) # validate function_name if present function_name = module.params['function_name'] if function_name: if not re.search(r"^[\w\-:]+$", function_name): module.fail_json( msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name) ) if len(function_name) > 64: module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name)) try: region, endpoint, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) aws_connect_kwargs.update(dict(region=region, endpoint=endpoint, conn_type='client', resource='lambda' )) client = boto3_conn(module, **aws_connect_kwargs) except ClientError as e: module.fail_json_aws(e, "trying to set up boto connection") this_module = sys.modules[__name__] invocations = dict( aliases='alias_details', all='all_details', config='config_details', mappings='mapping_details', policy='policy_details', versions='version_details', ) this_module_function = getattr(this_module, invocations[module.params['query']]) all_facts = fix_return(this_module_function(client, module)) results = dict(function=all_facts, changed=False) if module.check_mode: results['msg'] = 'Check mode set but ignored for fact gathering only.' module.exit_json(**results) if __name__ == '__main__': main()
{ "content_hash": "c71cd00af290e202f2e2f4efd16be5a1", "timestamp": "", "source": "github", "line_count": 382, "max_line_length": 134, "avg_line_length": 32.15706806282723, "alnum_prop": 0.6396125040703354, "repo_name": "thaim/ansible", "id": "d0ce6dafae4eef83f61d1106f741cccac4c40395", "size": "12955", "binary": false, "copies": "9", "ref": "refs/heads/fix-broken-link", "path": "lib/ansible/modules/cloud/amazon/lambda_info.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "7" }, { "name": "Shell", "bytes": "246" } ], "symlink_target": "" }
import unittest from flask_testing import is_twill_available from .test_utils import TestSetup, TestSetupFailure, TestClientUtils, TestLiveServer, TestTeardownGraceful from .test_twill import TestTwill, TestTwillDeprecated def suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(TestSetup)) suite.addTest(unittest.makeSuite(TestSetupFailure)) suite.addTest(unittest.makeSuite(TestTeardownGraceful)) suite.addTest(unittest.makeSuite(TestClientUtils)) suite.addTest(unittest.makeSuite(TestLiveServer)) if is_twill_available: suite.addTest(unittest.makeSuite(TestTwill)) suite.addTest(unittest.makeSuite(TestTwillDeprecated)) else: print("!!! Skipping tests of Twill components\n") return suite
{ "content_hash": "cf4a11269072251519093f7c4e6c2133", "timestamp": "", "source": "github", "line_count": 21, "max_line_length": 106, "avg_line_length": 36.904761904761905, "alnum_prop": 0.7690322580645161, "repo_name": "jmagnusson/flask-testing", "id": "38658485c6e4fb7caeee66f734ccd243a3806c25", "size": "775", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "tests/__init__.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "HTML", "bytes": "265" }, { "name": "Python", "bytes": "26629" } ], "symlink_target": "" }
from itertools import product from random import choice, randint, sample from string import printable from typing import Dict, List from roboragi.data_controller.enums import Medium, Site __all__ = ['random_sites', 'random_mediums', 'random_str', 'random_dict', 'random_lookup_entries'] def __random_enum_members(enum) -> list: """ Get a list of unique random members from an enum. :param enum: the enum. :return: a list of unique random members from an enum """ return sample(list(enum), randint(1, len(enum))) def random_sites() -> List[Site]: """ Get a list of unique random sites. :return: a list of random sites. """ return __random_enum_members(Site) def random_mediums() -> List[Medium]: """ Get a list of unique random mediums. :return: a list of random mediums. """ return __random_enum_members(Medium) def random_str() -> str: """ Generate a random string. :return: the random string. """ length = randint(1, 15) return f'test_{"".join(choice(printable) for _ in range(length))}' def random_dict(depth=0): """ Generate a random dict. :return: the random dict. """ rand_lst = lambda: [random_str() for _ in range(randint(5, 10))] if depth >= 2: return choice([random_str(), rand_lst()]) res = {} for key in set(rand_lst()): type_ = choice([0, 1, 2]) if type_ == 0: res[key] = random_str() elif type_ == 1: res[key] = [random_dict(depth + 1) for _ in range(randint(5, 10))] else: res[key] = random_dict(depth + 1) return res def random_lookup_entries() -> Dict[str, Dict[Medium, Dict[Site, str]]]: """ Return a dict of random look up entries. :return: A dict of {lookup name: {(site, medium): id}} """ mediums = random_mediums() sites = random_sites() names = set(random_str() for _ in range(randint(5, 15))) res = {} for name in names: res[name] = {} for site, medium in product(sites, mediums): if medium not in res[name]: res[name][medium] = {} res[name][medium][site] = random_str() return res
{ "content_hash": "2c20eea8858903891e322744c2c81051", "timestamp": "", "source": "github", "line_count": 81, "max_line_length": 78, "avg_line_length": 27.493827160493826, "alnum_prop": 0.5855410866636731, "repo_name": "MaT1g3R/Roboragi", "id": "cb046ebf75d03edfc40cbd603c24d3f9fadf9e05", "size": "2227", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/utils.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "272031" } ], "symlink_target": "" }
import json import logging import os import sys import tempfile import time import urlparse from telemetry import test from telemetry.core import browser_options from telemetry.core import discover from telemetry.core import util from telemetry.core import wpr_modes from telemetry.page import page_measurement from telemetry.page import page_measurement_results from telemetry.page import page_runner from telemetry.page import page_set from telemetry.page import page_test from telemetry.page import profile_creator from telemetry.page import test_expectations class RecordPage(page_test.PageTest): def __init__(self, measurements): # This class overwrites PageTest.Run, so that the test method name is not # really used (except for throwing an exception if it doesn't exist). super(RecordPage, self).__init__('Run') self._action_names = set( [measurement().action_name_to_run for measurement in measurements.values() if measurement().action_name_to_run]) self.test = None def CanRunForPage(self, page): return page.url.startswith('http') def CustomizeBrowserOptionsForPageSet(self, pset, options): for page in pset: for compound_action in self._CompoundActionsForPage(page, options): for action in compound_action: action.CustomizeBrowserOptionsForPageSet(options) def WillNavigateToPage(self, page, tab): """Override to ensure all resources are fetched from network.""" tab.ClearCache(force=False) if self.test: self.test.WillNavigateToPage(page, tab) def DidNavigateToPage(self, page, tab): """Forward the call to the test.""" if self.test: self.test.DidNavigateToPage(page, tab) def Run(self, options, page, tab, results): # When recording, sleep to catch any resources that load post-onload. tab.WaitForDocumentReadyStateToBeComplete() if self.test: dummy_results = page_measurement_results.PageMeasurementResults() dummy_results.WillMeasurePage(page) self.test.MeasurePage(page, tab, dummy_results) dummy_results.DidMeasurePage() else: # TODO(tonyg): This should probably monitor resource timing for activity # and sleep until 2s since the last network event with some timeout like # 20s. We could wrap this up as WaitForNetworkIdle() and share with the # speed index metric. time.sleep(3) # Run the actions for all measurements. Reload the page between # actions. should_reload = False for compound_action in self._CompoundActionsForPage(page, options): if should_reload: self.RunNavigateSteps(page, tab) self._RunCompoundAction(page, tab, compound_action) should_reload = True def _CompoundActionsForPage(self, page, options): actions = [] for action_name in self._action_names: if not hasattr(page, action_name): continue interactive = options and options.interactive actions.append(page_test.GetCompoundActionFromPage( page, action_name, interactive)) return actions def _CreatePageSetForUrl(url): ps_name = urlparse.urlparse(url).hostname + '.json' ps_path = os.path.join(util.GetBaseDir(), 'page_sets', ps_name) ps = {'archive_data_file': '../data/%s' % ps_name, 'pages': [ { 'url': url } ] } with open(ps_path, 'w') as f: f.write(json.dumps(ps)) print 'Created new page set %s' % ps_path return page_set.PageSet.FromFile(ps_path) def Main(base_dir): measurements = { n: cls for n, cls in discover.DiscoverClasses( base_dir, base_dir, page_measurement.PageMeasurement).items() # Filter out unneeded ProfileCreators (crbug.com/319573). if not issubclass(cls, profile_creator.ProfileCreator) } tests = discover.DiscoverClasses(base_dir, base_dir, test.Test, index_by_class_name=True) options = browser_options.BrowserFinderOptions() parser = options.CreateParser('%prog <PageSet|Measurement|Test|URL>') page_runner.AddCommandLineOptions(parser) recorder = RecordPage(measurements) recorder.AddCommandLineOptions(parser) quick_args = [a for a in sys.argv[1:] if not a.startswith('-')] if len(quick_args) != 1: parser.print_usage() sys.exit(1) target = quick_args[0] if target in tests: recorder.test = tests[target]().test() recorder.test.AddCommandLineOptions(parser) parser.parse_args() ps = tests[target]().CreatePageSet(options) elif target in measurements: recorder.test = measurements[target]() recorder.test.AddCommandLineOptions(parser) _, args = parser.parse_args() ps = recorder.test.CreatePageSet(args, options) elif target.endswith('.json'): parser.parse_args() ps = page_set.PageSet.FromFile(target) elif target.startswith('http'): parser.parse_args() ps = _CreatePageSetForUrl(target) else: parser.print_usage() sys.exit(1) expectations = test_expectations.TestExpectations() # Set the archive path to something temporary. temp_target_wpr_file_path = tempfile.mkstemp()[1] ps.wpr_archive_info.AddNewTemporaryRecording(temp_target_wpr_file_path) # Do the actual recording. options.browser_options.wpr_mode = wpr_modes.WPR_RECORD options.browser_options.no_proxy_server = True recorder.CustomizeBrowserOptions(options) results = page_runner.Run(recorder, ps, expectations, options) if results.errors or results.failures: logging.warning('Some pages failed. The recording has not been updated for ' 'these pages.') logging.warning('Failed pages:\n%s', '\n'.join(zip(*results.errors + results.failures)[0])) if results.skipped: logging.warning('Some pages were skipped. The recording has not been ' 'updated for these pages.') logging.warning('Skipped pages:\n%s', '\n'.join(zip(*results.skipped)[0])) if results.successes: # Update the metadata for the pages which were recorded. ps.wpr_archive_info.AddRecordedPages(results.successes) else: os.remove(temp_target_wpr_file_path) return min(255, len(results.failures))
{ "content_hash": "9d6139625c5156d5133d8f5c9ae5e137", "timestamp": "", "source": "github", "line_count": 175, "max_line_length": 80, "avg_line_length": 35.31428571428572, "alnum_prop": 0.7003236245954693, "repo_name": "ChromiumWebApps/chromium", "id": "6aaec3578280176c4a80b7cb371ba426f0d590e9", "size": "6368", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tools/telemetry/telemetry/page/record_wpr.py", "mode": "33261", "license": "bsd-3-clause", "language": [ { "name": "ASP", "bytes": "853" }, { "name": "AppleScript", "bytes": "6973" }, { "name": "Arduino", "bytes": "464" }, { "name": "Assembly", "bytes": "52960" }, { "name": "Awk", "bytes": "8660" }, { "name": "C", "bytes": "42286199" }, { "name": "C#", "bytes": "1132" }, { "name": "C++", "bytes": "198616766" }, { "name": "CSS", "bytes": "937333" }, { "name": "DOT", "bytes": "2984" }, { "name": "Java", "bytes": "5695686" }, { "name": "JavaScript", "bytes": "21967126" }, { "name": "M", "bytes": "2190" }, { "name": "Matlab", "bytes": "2262" }, { "name": "Objective-C", "bytes": "7602057" }, { "name": "PHP", "bytes": "97817" }, { "name": "Perl", "bytes": "1210885" }, { "name": "Python", "bytes": "10774996" }, { "name": "R", "bytes": "262" }, { "name": "Shell", "bytes": "1316721" }, { "name": "Tcl", "bytes": "277091" }, { "name": "TypeScript", "bytes": "1560024" }, { "name": "XSLT", "bytes": "13493" }, { "name": "nesC", "bytes": "15243" } ], "symlink_target": "" }
"""Test for the inverse strategy.""" import random import axelrod from .test_player import TestPlayer C, D = axelrod.Actions.C, axelrod.Actions.D class TestInverse(TestPlayer): name = "Inverse" player = axelrod.Inverse expected_classifier = { 'memory_depth': float('inf'), # Long memory 'stochastic': True, 'inspects_source': False, 'manipulates_source': False, 'manipulates_state': False } def test_strategy(self): """ Test that initial strategy cooperates. """ self.first_play_test(C) def test_that_cooperate_if_opponent_has_not_defected(self): """ Test that as long as the opponent has not defected the player will cooperate. """ self.responses_test([C] * 4, [C] * 4, [C]) self.responses_test([C] * 5, [C] * 5, [C]) def test_when_opponent_has_all_Ds(self): """ Tests that if opponent has played all D then player chooses D """ self.responses_test([C], [D], [D], random_seed=5) self.responses_test([C], [D, D], [D]) self.responses_test([C] * 8, [D] * 8, [D]) def test_when_opponent_som_Ds(self): """ Tests that if opponent has played all D then player chooses D """ random.seed(5) P1 = axelrod.Inverse() P2 = axelrod.Player() self.responses_test([C] * 4, [C, D, C, D], [C], random_seed=6) self.responses_test([C] * 6, [C, C, C, C, D, D], [C]) self.responses_test([C] * 9, [D] * 8 + [C], [D]) self.responses_test([C] * 9, [D] * 8 + [C], [D], random_seed=6)
{ "content_hash": "2d99aa03b4c1dae7618be3c264dfa4f3", "timestamp": "", "source": "github", "line_count": 56, "max_line_length": 85, "avg_line_length": 29.339285714285715, "alnum_prop": 0.5538648813146683, "repo_name": "bootandy/Axelrod", "id": "62da3e9eb6f0a4cf57659e37ba3aa7c4fff94ca2", "size": "1643", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "axelrod/tests/unit/test_inverse.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "400002" }, { "name": "Shell", "bytes": "959" } ], "symlink_target": "" }
import xlsxwriter import tempfile import datetime import io import sys from bottle import HTTPResponse from os import path import metrics_util def generate_spreadsheet(config, timings, runs, url_results, all_urls, filterIDs, addTable, excludeTimeouts, fromTime, toTime): try: urls = dict( [ (x[0], (x[1], x[2])) for x in url_results ]) #make a dictionary of ids -> url/nickname run_dates = dict( [(x[0], x[1]) for x in runs]) # make a dictionary of run ids -> run job time timing_data = sorted([ (run_dates[x[1]], urls[x[2]], x[3], x[4], x[5]) for x in timings], key = lambda x: x[0]) per_site_timing_data = [ (url[0], url_id, url[1], sorted([(run_dates[t[1]], t[3], t[4], t[5]) for t in timings if url_id == t[2]], key = lambda x: x[0]) ) for (url_id, url) in urls.items()] output = io.BytesIO() workbook = xlsxwriter.Workbook(output, {'in_memory' : True}) workbook.set_properties( { 'title' : 'Website metrics from %s to %s' % (str(fromTime), str(toTime)), 'author' : 'Jon Wolfe/Anibit Technology , LLC', 'comments' : 'created by Stupid Simple Website Metrics http://anibit.com', }) #tempfilename = next(tempfile._get_candidate_names()) + ".xlsx" #temppath = path.join( config['temp_public_path'], tempfilename) #workbook = xlsxwriter.Workbook(temppath, {'in_memory' : True}) #date_format = workbook.add_format({'num_format': 'dd/mm/yy hh:mm:ss'}) date_format = workbook.add_format({'num_format': 'mmm d yyyy hh:mm:ss AM/PM'}) bad_timing_format = workbook.add_format({'font_color': '#FF0000'}) timing_format = workbook.add_format({'num_format': '0.000'}) slow_outlier_format = workbook.add_format({'bg_color' : 'FFAAAA'}) fast_outlier_format = workbook.add_format({'bg_color' : 'AAFFAA'}) if addTable or True: for ( url_name, url_id, url_nickname, site_timings) in per_site_timing_data: persite_worksheet = workbook.add_worksheet(url_nickname) persite_worksheet.set_column(0, 0, 25) persite_worksheet.set_column(1, 1, 15) persite_worksheet.set_column(2, 2, 15) persite_worksheet.set_column(3, 3, 15) persite_worksheet.set_column(4, 4, 20) persite_worksheet.write('A1', 'Url:') persite_worksheet.write('B1', url_name) persite_worksheet.write('A2', "Time") persite_worksheet.write('B2', "Time to Data (seconds)") persite_worksheet.write('C2', "Total Time") persite_worksheet.write('D2', "Download Time") row_index = 6 for site_timing in site_timings: row_str = str(row_index + 1) timestamp = metrics_util.get_time_struct (site_timing[0]) persite_worksheet.write_datetime( row_index, 0 , timestamp , date_format ) if (int(site_timing[3]) == 0): persite_worksheet.write_number( row_index, 1 , site_timing[1], timing_format ) persite_worksheet.write_number( row_index, 2 , site_timing[2], timing_format ) persite_worksheet.write_formula( row_index, 3 , "=C" + row_str + "-B" + row_str, timing_format) else: persite_worksheet.write_string( row_index, 4, "TIMED OUT", bad_timing_format) row_index = row_index + 1 persite_worksheet.write('A3', "Average"); persite_worksheet.write_formula('B3', '=AVERAGE(B7:B' + row_str + ')', timing_format) persite_worksheet.write_formula('C3', '=AVERAGE(C7:C' + row_str + ')', timing_format) persite_worksheet.write_formula('D3', '=AVERAGE(D7:D' + row_str + ')', timing_format) persite_worksheet.write('A4', "Deviation"); persite_worksheet.write_formula('B4', '=STDEV(B7:B' + row_str + ')', timing_format) persite_worksheet.write_formula('C4', '=STDEV(C7:C' + row_str + ')', timing_format) persite_worksheet.write_formula('D4', '=STDEV(D7:D' + row_str + ')', timing_format) persite_worksheet.write('A5', "Worst"); persite_worksheet.write_formula('B5', '=MAX(B7:B' + row_str + ')', timing_format) persite_worksheet.write_formula('C5', '=MAX(C7:C' + row_str + ')', timing_format) persite_worksheet.write_formula('D5', '=MAX(D7:D' + row_str + ')', timing_format) persite_worksheet.write('A6', "Best"); persite_worksheet.write_formula('B6', '=MIN(B7:B' + row_str + ')', timing_format) persite_worksheet.write_formula('C6', '=MIN(C7:C' + row_str + ')', timing_format) persite_worksheet.write_formula('D6', '=MIN(D7:D' + row_str + ')', timing_format) persite_worksheet.conditional_format('B7:B' + row_str + ')', {'type': 'cell', 'criteria': '==', 'value': 0.0, 'format': bad_timing_format}) persite_worksheet.conditional_format('C7:C' + row_str + ')', {'type': 'cell', 'criteria': '==', 'value': 0.0, 'format': bad_timing_format}) persite_worksheet.conditional_format('B7:D' + row_str + ')', {'type': 'cell', 'criteria': '>=', 'value': 'B$3+B$4', 'format': slow_outlier_format}) persite_worksheet.conditional_format('B7:D' + row_str + ')', {'type': 'cell', 'criteria': '<=', 'value': 'B$3-B$4', 'format': fast_outlier_format}) #persite_worksheet.conditional_format('C7:C' + row_str + ')', {'type': 'cell', # 'criteria': '>=', # 'value': 'C3+C4', # 'format': slow_outlier_format}) #persite_worksheet.conditional_format('C7:C' + row_str + ')', {'type': 'cell', # 'criteria': '<=', # 'value': 'C3-C4', # 'format': fast_outlier_format}) #persite_worksheet.conditional_format('D7:D' + row_str + ')', {'type': 'cell', # 'criteria': '>=', # 'value': 'D3+D4', # 'format': slow_outlier_format}) #persite_worksheet.conditional_format('D7:D' + row_str + ')', {'type': 'cell', # 'criteria': '<=', # 'value': 'D3-D4', # 'format': fast_outlier_format}) persite_worksheet.freeze_panes(6, 0) workbook.close() xlsx_data = output.getvalue() headers = dict() headers['Content-Disposition'] = 'attachment; filename="sswm_from_%s_to_%s.xlsx"' % (fromTime, toTime) headers['Accept-Ranges'] = 'bytes' headers['Content-Type'] = 'application/vnd.ms-excel' return HTTPResponse(xlsx_data, **headers) except: e = sys.exc_info() a = e return ''' <html> <head> </head> <body> <p>something went wrong</p> <p>Stack trace:</p> <p>%s</p> <script> alert('There was a problem creating the spreadsheet'); </script> </body> </html> ''' % e # return ''' # <html> #<head> #<meta http-equiv="Refresh" content="0; url=../temp/%s" /> #</head> #<body> #<p>downloading spreadsheet</p> #</body> #</html> # ''' % tempfilename
{ "content_hash": "e56e4b640229fc4039d62289a399aa2b", "timestamp": "", "source": "github", "line_count": 173, "max_line_length": 131, "avg_line_length": 49.60693641618497, "alnum_prop": 0.46900489396411094, "repo_name": "anibit/sswm", "id": "b6716e4c38afb4716ff127726788958f086e116f", "size": "8582", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "timing_result_spreadsheet.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "Python", "bytes": "174891" }, { "name": "Smarty", "bytes": "4022" } ], "symlink_target": "" }
from eventlet import patcher from eventlet.green import httplib from eventlet.green import urllib patcher.inject('test.test_urllib', globals(), ('httplib', httplib), ('urllib', urllib)) if __name__ == "__main__": test_main()
{ "content_hash": "97bf6699a16fd71d391e026b6be26c81", "timestamp": "", "source": "github", "line_count": 11, "max_line_length": 34, "avg_line_length": 22.363636363636363, "alnum_prop": 0.6585365853658537, "repo_name": "JeremyGrosser/python-eventlet", "id": "41f9e6a40165615ac7af316859b8117b0bae79a7", "size": "246", "binary": false, "copies": "8", "ref": "refs/heads/master", "path": "tests/stdlib/test_urllib.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "707895" } ], "symlink_target": "" }
def integrate(func, lower_limit, upper_limit, step): integral = 0 half_step = step / 2 cur_val = lower_limit while cur_val < upper_limit: cur_val += half_step integral += func(cur_val) * step cur_val += half_step return integral
{ "content_hash": "1e06e80f677328902631703a3ba89c9f", "timestamp": "", "source": "github", "line_count": 15, "max_line_length": 52, "avg_line_length": 16.466666666666665, "alnum_prop": 0.659919028340081, "repo_name": "felipecustodio/data_structures_C", "id": "15dc1c107391a065bfaf1f06c9ebdb44d7b12c3d", "size": "247", "binary": false, "copies": "7", "ref": "refs/heads/master", "path": "math/numerical_integration/python/numerical_integration.py", "mode": "33188", "license": "mit", "language": [], "symlink_target": "" }
import os class ProgressBar: def __init__(self, task_number, bar_opening="[", bar_ending="]", empty_char="-", filled_char="=", update_rate=0, percent_precision=1, display_percent=True, display_absolute_progress=True, bar_length=0, enable_front_char=False, front_char=">"): self.__task_number = task_number self.__bar_opening = bar_opening self.__bar_ending = bar_ending self.__empty_char = empty_char self.__filled_char = filled_char self.__update_rate = update_rate self.__percent_precision = str(percent_precision) self.__display_percent = display_percent self.__display_absolute_progress = display_absolute_progress if bar_length > 0: self.__bar_length = min(bar_length, self.__compute_max_length()) else: self.__bar_length = self.__compute_max_length() self.__enable_front_char = enable_front_char self.__front_char = front_char def begin(self): self.__update_count = 0 self.__current_length = 0 self.__current_progress = 0 print(self.__get_bar_string(), end='\r') def add_progress(self, inc=1): increment = inc if inc > 0 else 1 if self.__current_progress < self.__task_number: prev_percent = self.__get_percent_progress() self.__current_progress = min(self.__task_number, self.__current_progress + increment) self.__update_count += increment new_length = int(self.__get_progress() * self.__bar_length) if self.__update_rate > 0: need_to_update = self.__update_count >= self.__update_rate else: need_to_update = new_length > self.__current_length or prev_percent != self.__get_percent_progress() if need_to_update or self.__current_progress == self.__task_number: self.__update_count = 0 self.__current_length = new_length end_char = "\r" if self.__current_progress < self.__task_number else "\n" print(self.__get_bar_string(), end=end_char) def __get_progress(self): return float(float(self.__current_progress) / float(self.__task_number)) def __get_percent_progress(self): format_string = "{0:." + self.__percent_precision + "f}" return format_string.format(self.__get_progress() * 100) + "%" def __get_progress_fraction(self): return str(self.__current_progress) + "/" + str(self.__task_number) def __get_bar_string(self): diff = self.__bar_length - self.__current_length - (1 if self.__enable_front_char else 0) progresses = "" if self.__display_percent: progresses += " : " + self.__get_percent_progress() progresses += " (" + self.__get_progress_fraction() + ")" if self.__display_absolute_progress else "" elif self.__display_absolute_progress: progresses += " : " + self.__get_progress_fraction() front_char = self.__front_char if ( self.__enable_front_char and self.__current_progress < self.__task_number) else "" return ( self.__bar_opening + self.__current_length * self.__filled_char + front_char + diff * self.__empty_char + self.__bar_ending + progresses ) def __compute_max_length(self): sz = None try: sz = os.get_terminal_size().columns except: pass if sz is None: sz = 80 max_length = int(sz) max_length -= (len(str(self.__task_number)) * 2 + 1) if self.__display_absolute_progress else 0 max_length -= len(self.__bar_opening) max_length -= len(self.__bar_ending) max_length -= (5 + int(self.__percent_precision)) if self.__display_percent else 0 max_length -= 1 if int(self.__percent_precision) > 0 else 0 max_length -= 3 if (self.__display_percent and self.__display_absolute_progress) else 0 max_length -= 2 if (self.__display_percent or self.__display_absolute_progress) else 0 return max_length - 1
{ "content_hash": "dd375f93ea357445c228e09fa4ac54cd", "timestamp": "", "source": "github", "line_count": 93, "max_line_length": 120, "avg_line_length": 44.634408602150536, "alnum_prop": 0.5776921223801493, "repo_name": "Rabyss/HideIt", "id": "60699243d4217c9498aeac2340dd4582a7337204", "size": "4173", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "progress_bar.py", "mode": "33261", "license": "mit", "language": [ { "name": "Python", "bytes": "6587" } ], "symlink_target": "" }
""" Core Identity models for atmosphere. Note: Multiple users can 'own' an identity (IdentityMembership - group.py) """ from datetime import timedelta from django.db import models from threepio import logger from uuid import uuid5, uuid4 from core.query import only_active_memberships, contains_credential from core.models.quota import Quota class Identity(models.Model): """ An Identity is the minimal set of credentials necessary to authenticate against a single provider """ uuid = models.UUIDField(default=uuid4, unique=True, editable=False) created_by = models.ForeignKey("AtmosphereUser") provider = models.ForeignKey("Provider") quota = models.ForeignKey(Quota) @classmethod def find_instance(cls, instance_id): """ Given an instance, find the identity that created it. """ return Identity.objects.filter(instance__provider_alias=instance_id).first() @classmethod def delete_identity(cls, username, provider_location): # Do not move up. ImportError. from core.models import AtmosphereUser, Group, Credential, Quota,\ Provider, AccountProvider,\ IdentityMembership provider = Provider.objects.get(location__iexact=provider_location) user = AtmosphereUser.objects.get(username=username) group = Group.objects.get(name=username) my_ids = Identity.objects.filter( created_by=user, provider=provider) for ident in my_ids: membership_set = ident.identity_memberships.all() membership_set.delete() ident.delete() group.delete() user.delete() return def export_to_file(self, filename=None): """ Depending on the ProviderType, appropriately generate 'export data' into an appropriate source-file """ provider_type = self.provider.type.name if provider_type.lower() == 'openstack': from service.accounts.openstack_manager import AccountDriver return AccountDriver.generate_openrc(self, filename) return None def export(self): """ Depending on the ProviderType, appropriately generate 'export data', a dict. """ provider_type = self.provider.type.name if provider_type.lower() == 'openstack': from service.accounts.openstack_manager import AccountDriver return AccountDriver.export_identity(self) return None def can_share(self, django_user): """ You CAN share an identity IFF: 0. You are a staff user 1. You are the original owner of the identity 2. You are the leader of a group who contains the owner of the identity """ # This person leads a group, may be able to share. # Check 0 if django_user.is_staff: return True # Check 1 original_owner = self.created_by if original_owner == django_user: return True # Check 2 shared = False leader_groups = django_user.group_set.get(leaders__in=[django_user]) for group in leader_groups: id_member = g.identity_memberships.get(identity=self) if not id_member: continue # ASSERT: You have SHARED access to the identity shared = True if original_owner in group.user_set.all(): return True # User can't share.. Log the attempt for record-keeping if shared: logger.info("FAILED SHARE ATTEMPT: User:%s Identity:%s " "Reason: You are not a leader of any group that " "contains the actual owner of the identity (%s)." % (django_user, self, original_owner)) else: logger.info("FAILED SHARE ATTEMPT: User:%s Identity:%s " % (django_user, self)) return False def share(self, core_group, allocation=None): """ """ from core.models import IdentityMembership, Quota, Allocation existing_membership = IdentityMembership.objects.filter( member=core_group, identity=self) if existing_membership: return existing_membership[0] # Ready to create new membership for this group if not allocation: allocation = Allocation.default_allocation() new_membership = IdentityMembership.objects.get_or_create( member=core_group, identity=self, allocation=allocation)[0] return new_membership def unshare(self, core_group): """ Potential problem: 1. User X creates/imports an openstack account (& is the owner), 2. User X shares identitymembership with User Y, 3. User X or Y tries to unshare IdentityMembership with the opposing user. Solution: ONLY unshare if this user is the original owner of the identity """ from core.models import IdentityMembership existing_membership = IdentityMembership.objects.filter( member=core_group, identity=self) return existing_membership[0].delete() def get_membership(self): identity_members = self.identity_memberships.all() group_names = [id_member.member for id_member in identity_members] # TODO: Add 'rules' if we want to hide specific users (staff, etc.) return group_names @classmethod def _kwargs_to_credentials(cls, kwarg_creds): """ Takes a dictionary of `cred_*` key/values and returns back `*` key/value dictionary. Ignores any 'other' kwargs that may be present. """ credentials = {} for (c_key, c_value) in kwarg_creds.items(): if 'cred_' not in c_key.lower(): continue c_key = c_key.replace('cred_', '') credentials[c_key] = c_value return credentials @classmethod def create_identity(cls, username, provider_location, quota=None, allocation=None, max_quota=False, account_admin=False, **kwarg_creds): """ DEPRECATED: POST to v2/identities API to create an identity. """ # Do not move up. ImportError. from core.models import Group, Quota,\ Provider, AccountProvider, Allocation,\ IdentityMembership provider = Provider.objects.get(location__iexact=provider_location) credentials = cls._kwargs_to_credentials(kwarg_creds) # DEV NOTE: 'New' identities are expected to have a router name directly assigned # upon creation. If the value is not passed in, we can ask the provider to select # the router with the least 'usage' to ensure an "eventually consistent" distribution # of users->routers. topologyClsName = provider.get_config('network', 'topology', raise_exc=False) if topologyClsName == 'External Router Topology' and 'router_name' not in credentials: credentials['router_name'] = provider.select_router() (user, group) = Group.create_usergroup(username) identity = cls._get_identity(user, group, provider, quota, credentials) # NOTE: This specific query will need to be modified if we want # 2+ Identities on a single provider id_membership = identity.share(group, allocation=allocation) # ID_Membership exists. # 3. Assign admin account, if requested if account_admin: AccountProvider.objects.get_or_create( provider=id_membership.identity.provider, identity=id_membership.identity)[0] # 4. Save the user to activate profile on first-time use # FIXME: only call .save() if 'no profile' test is True. # TODO: write a 'no profile' test f() user.save() # Return the identity return identity @classmethod def _get_identity(cls, user, group, provider, quota, credentials): """ # 1. Make sure that an Identity exists for the user/group+provider # 2. Make sure that all kwargs exist as credentials for the identity """ identity_qs = Identity.objects.filter( created_by=user, provider=provider) if identity_qs.count() > 1: raise Exception("Could not uniquely identify the identity") identity = identity_qs.first() if identity: # In the future, we will only update the credentials *once* # during self._create_identity(). for (c_key, c_value) in credentials.items(): Identity.update_credential(identity, c_key, c_value) else: identity = cls._create_identity(user, group, provider, quota, credentials) return identity @classmethod def _create_identity(cls, user, group, provider, quota, credentials): # FIXME: we shouldn't have to create the uuid.. default should do this? new_uuid = uuid4() if not quota: quota = Quota.default_quota() identity = Identity.objects.create( created_by=user, provider=provider, quota=quota, uuid=str(new_uuid)) for (c_key, c_value) in credentials.items(): Identity.update_credential(identity, c_key, c_value) return identity @classmethod def update_credential(cls, identity, c_key, c_value, replace=False): from core.models import Credential test_key_exists = Credential.objects.filter( identity=identity, key=c_key) if len(test_key_exists) > 1: if not replace: raise ValueError("Found multiple entries for Credential: %s on Identity: %s" % (c_key, identity)) test_key_exists.delete() elif test_key_exists: # Single selection test_key_exists = test_key_exists.get() logger.debug( "Conflicting Key Error: Key:%s Value:%s %s Value:%s" % (c_key, test_key_exists.value, "(to replace with new value, set replace=True) New" if not replace else "Replacement", c_value)) # No Dupes... But should we really throw an Exception here? if not replace: return test_key_exists test_key_exists.value = c_value test_key_exists.save() return test_key_exists return Credential.objects.get_or_create( identity=identity, key=c_key, value=c_value)[0] def provider_uuid(self): return self.provider.uuid def is_active(self, user=None): if user: return self.identity_memberships.filter( only_active_memberships(), member__user=user).count() > 0 return self.provider.is_active() def creator_name(self): return self.created_by.username def project_name(self): project_name = self.get_credential('ex_project_name') if not project_name: project_name = self.get_credential('ex_tenant_name') if not project_name: project_name = self.get_credential('project_name') if not project_name: project_name = self.get_credential('tenant_name') if not project_name: project_name = '' return project_name def get_credential(self, key): cred = self.credential_set.filter(key=key) return cred[0].value if cred else None def get_credentials(self): cred_dict = {} for cred in self.credential_set.all(): cred_dict[cred.key] = cred.value # Hotfix to avoid errors in rtwo+OpenStack if 'ex_tenant_name' not in cred_dict: cred_dict['ex_tenant_name'] = self.project_name() return cred_dict def get_all_credentials(self): cred_dict = {} for cred in self.provider.providercredential_set.all(): cred_dict[cred.key] = cred.value # Allow overriding in the identity for cred in self.credential_set.all(): cred_dict[cred.key] = cred.value return cred_dict def get_urls(self): return [] def get_allocation(self): id_member = self.identity_memberships.all()[0] return id_member.allocation def get_total_hours(self): from service.monitoring import _get_allocation_result limit_instances = self.instance_set.all().values_list( 'provider_alias', flat=True ).distinct() result = _get_allocation_result( self, limit_instances=limit_instances) total_hours = result.total_runtime().total_seconds() / 3600.0 hours = round(total_hours, 2) return hours def get_quota(self): return self.quota def total_usage(self, start_date, end_date): # Undoubtedly will cause circular dependencies from service.monitoring import _get_allocation_result allocation_result = _get_allocation_result(self, start_date, end_date) if not allocation_result: # Flag to the plugin you have an error in counting. return -1 total_au = allocation_result.total_runtime().total_seconds() / 3600.0 return total_au def get_allocation_usage(self): # Undoubtedly will cause circular dependencies from service.monitoring import _get_allocation_result allocation_result = _get_allocation_result(self) over_allocation, diff_amount = allocation_result.total_difference() burn_time = allocation_result.get_burn_rate() # Moving from seconds to hours hourly_credit = int(allocation_result .total_credit().total_seconds() / 3600.0) hourly_runtime = int(allocation_result .total_runtime().total_seconds() / 3600.0) hourly_difference = int(diff_amount.total_seconds() / 3600.0) zero_time = allocation_result.time_to_zero() return { "threshold": hourly_credit, # Total amount "current": hourly_runtime, # Total used "remaining": hourly_difference, "ttz": zero_time, # Time Til Zero } def get_allocation_dict(self): id_member = self.identity_memberships.all()[0] allocation_dict = id_member.get_allocation_dict() return allocation_dict def get_quota_dict(self): id_member = self.identity_memberships.all()[0] # See core/models/membership.py#IdentityMembership quota_dict = id_member.get_quota_dict() allocation_dict = self.get_allocation_dict() if allocation_dict: quota_dict.update({"allocation": allocation_dict}) return quota_dict def json(self): return { 'id': self.id, 'creator': self.created_by.username, 'provider': self.provider.json(), 'credentials': [cred.json() for cred in self.credential_set.order_by('key')], } def __unicode__(self): output = "%s %s" % (self.provider, self.project_name()) return output class Meta: db_table = "identity" app_label = "core" verbose_name_plural = "identities"
{ "content_hash": "517c63510f6f7ce992dea9693a27d167", "timestamp": "", "source": "github", "line_count": 413, "max_line_length": 113, "avg_line_length": 37.67554479418886, "alnum_prop": 0.6040488431876607, "repo_name": "CCI-MOC/GUI-Backend", "id": "83903680d872514ac9cbbd8d835af89910bb4626", "size": "15560", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "core/models/identity.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "HTML", "bytes": "11571" }, { "name": "Python", "bytes": "2565922" }, { "name": "Ruby", "bytes": "1345" }, { "name": "Shell", "bytes": "42018" } ], "symlink_target": "" }
import sys import antlr3 import antlr3.tree from SimpleCLexer import SimpleCLexer from SimpleCParser import SimpleCParser from SimpleCWalker import SimpleCWalker cStream = antlr3.StringStream(open(sys.argv[1]).read()) lexer = SimpleCLexer(cStream) tStream = antlr3.CommonTokenStream(lexer) parser = SimpleCParser(tStream) r = parser.program() print "tree=" + r.tree.toStringTree() nodes = antlr3.tree.CommonTreeNodeStream(r.tree) nodes.setTokenStream(tStream) walker = SimpleCWalker(nodes) walker.program()
{ "content_hash": "6b2067759c928f0fbdee3049b0163daa", "timestamp": "", "source": "github", "line_count": 19, "max_line_length": 55, "avg_line_length": 26.842105263157894, "alnum_prop": 0.807843137254902, "repo_name": "sshrdp/mclab", "id": "bdfd46d8fea6dfe0821cdfe2ed89908d9c2276e3", "size": "510", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "lib/antlr-3.0.1/runtime/Python/examples/simplecTreeParser/simplec.py", "mode": "33188", "license": "apache-2.0", "language": [], "symlink_target": "" }
"""This component provides HA switch support for Ring Door Bell/Chimes.""" from datetime import timedelta import logging import requests from homeassistant.components.light import ColorMode, LightEntity from homeassistant.config_entries import ConfigEntry from homeassistant.core import HomeAssistant, callback from homeassistant.helpers.entity_platform import AddEntitiesCallback import homeassistant.util.dt as dt_util from . import DOMAIN from .entity import RingEntityMixin _LOGGER = logging.getLogger(__name__) # It takes a few seconds for the API to correctly return an update indicating # that the changes have been made. Once we request a change (i.e. a light # being turned on) we simply wait for this time delta before we allow # updates to take place. SKIP_UPDATES_DELAY = timedelta(seconds=5) ON_STATE = "on" OFF_STATE = "off" async def async_setup_entry( hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: AddEntitiesCallback, ) -> None: """Create the lights for the Ring devices.""" devices = hass.data[DOMAIN][config_entry.entry_id]["devices"] lights = [] for device in devices["stickup_cams"]: if device.has_capability("light"): lights.append(RingLight(config_entry.entry_id, device)) async_add_entities(lights) class RingLight(RingEntityMixin, LightEntity): """Creates a switch to turn the ring cameras light on and off.""" _attr_color_mode = ColorMode.ONOFF _attr_supported_color_modes = {ColorMode.ONOFF} def __init__(self, config_entry_id, device): """Initialize the light.""" super().__init__(config_entry_id, device) self._unique_id = device.id self._light_on = device.lights == ON_STATE self._no_updates_until = dt_util.utcnow() @callback def _update_callback(self): """Call update method.""" if self._no_updates_until > dt_util.utcnow(): return self._light_on = self._device.lights == ON_STATE self.async_write_ha_state() @property def name(self): """Name of the light.""" return f"{self._device.name} light" @property def unique_id(self): """Return a unique ID.""" return self._unique_id @property def is_on(self): """If the switch is currently on or off.""" return self._light_on def _set_light(self, new_state): """Update light state, and causes Home Assistant to correctly update.""" try: self._device.lights = new_state except requests.Timeout: _LOGGER.error("Time out setting %s light to %s", self.entity_id, new_state) return self._light_on = new_state == ON_STATE self._no_updates_until = dt_util.utcnow() + SKIP_UPDATES_DELAY self.async_write_ha_state() def turn_on(self, **kwargs): """Turn the light on for 30 seconds.""" self._set_light(ON_STATE) def turn_off(self, **kwargs): """Turn the light off.""" self._set_light(OFF_STATE)
{ "content_hash": "05d7cfb080e141728552ff5f353dae8b", "timestamp": "", "source": "github", "line_count": 102, "max_line_length": 87, "avg_line_length": 30.07843137254902, "alnum_prop": 0.6531942633637549, "repo_name": "toddeye/home-assistant", "id": "5b2cd1c54db113d8d52a0348e0ece51ccdde3e0d", "size": "3068", "binary": false, "copies": "1", "ref": "refs/heads/dev", "path": "homeassistant/components/ring/light.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "3005" }, { "name": "PLSQL", "bytes": "840" }, { "name": "Python", "bytes": "47414832" }, { "name": "Shell", "bytes": "6252" } ], "symlink_target": "" }
import sys import numpy as np import matplotlib.pyplot as plt from scipy.interpolate import interp1d from delight.io import * from delight.utils import * if len(sys.argv) < 2: raise Exception('Please provide a parameter file') params = parseParamFile(sys.argv[1], verbose=False, catFilesNeeded=False) dir_seds = params['templates_directory'] sed_names = params['templates_names'] redshiftDistGrid, redshiftGrid, redshiftGridGP = createGrids(params) numZ = redshiftGrid.size numT = len(sed_names) numB = len(params['bandNames']) numObjects = params['numObjects'] noiseLevel = params['noiseLevel'] f_mod = np.zeros((numT, numB), dtype=object) for it, sed_name in enumerate(sed_names): data = np.loadtxt(dir_seds + '/' + sed_name + '_fluxredshiftmod.txt') for jf in range(numB): f_mod[it, jf] = interp1d(redshiftGrid, data[:, jf], kind='linear') # Generate training data redshifts = np.random.uniform(low=redshiftGrid[0], high=redshiftGrid[-1], size=numObjects) types = np.random.randint(0, high=numT, size=numObjects) ell = 1e6 fluxes, fluxesVar = np.zeros((numObjects, numB)), np.zeros((numObjects, numB)) for k in range(numObjects): for i in range(numB): trueFlux = ell * f_mod[types[k], i](redshifts[k]) noise = trueFlux * noiseLevel fluxes[k, i] = trueFlux + noise * np.random.randn() fluxesVar[k, i] = noise**2. data = np.zeros((numObjects, 1 + len(params['training_bandOrder']))) bandIndices, bandNames, bandColumns, bandVarColumns, redshiftColumn,\ refBandColumn = readColumnPositions(params, prefix="training_") for ib, pf, pfv in zip(bandIndices, bandColumns, bandVarColumns): data[:, pf] = fluxes[:, ib] data[:, pfv] = fluxesVar[:, ib] data[:, redshiftColumn] = redshifts data[:, -1] = types np.savetxt(params['trainingFile'], data) # Generate Target data redshifts = np.random.uniform(low=redshiftGrid[0], high=redshiftGrid[-1], size=numObjects) types = np.random.randint(0, high=numT, size=numObjects) fluxes, fluxesVar = np.zeros((numObjects, numB)), np.zeros((numObjects, numB)) for k in range(numObjects): for i in range(numB): trueFlux = f_mod[types[k], i](redshifts[k]) noise = trueFlux * noiseLevel fluxes[k, i] = trueFlux + noise * np.random.randn() fluxesVar[k, i] = noise**2. data = np.zeros((numObjects, 1 + len(params['target_bandOrder']))) bandIndices, bandNames, bandColumns, bandVarColumns, redshiftColumn,\ refBandColumn = readColumnPositions(params, prefix="target_") for ib, pf, pfv in zip(bandIndices, bandColumns, bandVarColumns): data[:, pf] = fluxes[:, ib] data[:, pfv] = fluxesVar[:, ib] data[:, redshiftColumn] = redshifts data[:, -1] = types np.savetxt(params['targetFile'], data)
{ "content_hash": "e972bcd98949cdc7b6def4c7a3445b73", "timestamp": "", "source": "github", "line_count": 70, "max_line_length": 78, "avg_line_length": 40.8, "alnum_prop": 0.6698179271708683, "repo_name": "ixkael/Delight", "id": "5382de7f0840c27288a45974062eb3d700880b6b", "size": "2856", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "scripts/simulateWithSEDs.py", "mode": "33188", "license": "mit", "language": [ { "name": "Jupyter Notebook", "bytes": "3398758" }, { "name": "Mathematica", "bytes": "108563" }, { "name": "Python", "bytes": "223027" }, { "name": "Shell", "bytes": "1172" }, { "name": "TeX", "bytes": "106385" } ], "symlink_target": "" }
from __future__ import absolute_import, division, print_function, unicode_literals import unittest from ..expr import parse_expr class ExprTest(unittest.TestCase): def test_equal(self): e = parse_expr("foo=bar") self.assertTrue(e.eval({"foo": "bar"})) self.assertFalse(e.eval({"foo": "not-bar"})) self.assertFalse(e.eval({"not-foo": "bar"})) def test_not_equal(self): e = parse_expr("not(foo=bar)") self.assertFalse(e.eval({"foo": "bar"})) self.assertTrue(e.eval({"foo": "not-bar"})) def test_bad_not(self): with self.assertRaises(Exception): parse_expr("foo=not(bar)") def test_all(self): e = parse_expr("all(foo = bar, baz = qux)") self.assertTrue(e.eval({"foo": "bar", "baz": "qux"})) self.assertFalse(e.eval({"foo": "bar", "baz": "nope"})) self.assertFalse(e.eval({"foo": "nope", "baz": "nope"})) def test_any(self): e = parse_expr("any(foo = bar, baz = qux)") self.assertTrue(e.eval({"foo": "bar", "baz": "qux"})) self.assertTrue(e.eval({"foo": "bar", "baz": "nope"})) self.assertFalse(e.eval({"foo": "nope", "baz": "nope"}))
{ "content_hash": "476ebcc1868551c424a35d02c49243e2", "timestamp": "", "source": "github", "line_count": 34, "max_line_length": 82, "avg_line_length": 35.26470588235294, "alnum_prop": 0.5638031693077564, "repo_name": "phoad/rsocket-cpp", "id": "66b7a51b9471fa1d08c862dd7bd82dcbc1f2e2df", "size": "1517", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "build/fbcode_builder/getdeps/test/expr_test.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "1776" }, { "name": "C++", "bytes": "1167119" }, { "name": "CMake", "bytes": "42749" }, { "name": "Python", "bytes": "194459" }, { "name": "Ruby", "bytes": "704" }, { "name": "Shell", "bytes": "12981" } ], "symlink_target": "" }