text
stringlengths
4
1.02M
meta
dict
import itertools class Timestepper(object): """Calculate the time-tendencies and timestepping of the equation dstate/dt = _dstate() """ t = 0.0 tc = 0 def step(self): self.state[:] = self.state + self.dstate() self._incr_timestep() def _incr_timestep(self): self.t = self.t + self.dt self.tc = self.tc + 1 def dstate(self): raise NotImplemented() class Euler(Timestepper): def dstate(self): dstate = self.dt*self._dstate() return dstate class AdamsBashforth3(Timestepper): _pfstate, _ppfstate = 0.0, 0.0 def dstate(self): dt = self.dt fstate = self._dstate() if self.tc == 0: # first step Euler dt1 = dt dstate = dt1*fstate elif self.tc == 1: dt1 = 1.5*dt dt2 = -0.5*dt dstate = dt1*fstate + dt2*self._pfstate else: dt1 = 23./12.*dt dt2 = -16./12.*dt dt3 = 5./12.*dt dstate = dt1*fstate + dt2*self._pfstate + dt3*self._ppfstate # update the cached previous fstate values self._ppfstate, self._pfstate = self._pfstate, fstate return dstate def sync_step(*timesteppers): """Synchronize the stepping of several timesteppers. This is important if values of each at a given timestep depend on each other, e.g. if a tracer has a feedback onto other tracers or state variables. """ dstates = [obj.dstate() for obj in timesteppers] for obj, dstate in zip(timesteppers, dstates): obj.state = obj.state + dstate obj._incr_timestep()
{ "content_hash": "7ba44acdc9b153135557bfc3c7965f5b", "timestamp": "", "source": "github", "line_count": 64, "max_line_length": 81, "avg_line_length": 26.046875, "alnum_prop": 0.5704859028194361, "repo_name": "jamesp/shallowwater", "id": "3784f579c08fe5358e218c605d4e41e99a77156e", "size": "1667", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "beta_plane/timesteppers.py", "mode": "33188", "license": "mit", "language": [ { "name": "Jupyter Notebook", "bytes": "533035" }, { "name": "Python", "bytes": "107483" } ], "symlink_target": "" }
import _plotly_utils.basevalidators class ColorsrcValidator(_plotly_utils.basevalidators.SrcValidator): def __init__( self, plotly_name="colorsrc", parent_name="area.hoverlabel.font", **kwargs ): super(ColorsrcValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "none"), role=kwargs.pop("role", "info"), **kwargs )
{ "content_hash": "79e9ffc11aa3f189bc6fe5ac970ea458", "timestamp": "", "source": "github", "line_count": 14, "max_line_length": 82, "avg_line_length": 33.42857142857143, "alnum_prop": 0.5961538461538461, "repo_name": "plotly/python-api", "id": "e821aa72d7c14d74456eebbe801d70620e5141f8", "size": "468", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "packages/python/plotly/plotly/validators/area/hoverlabel/font/_colorsrc.py", "mode": "33188", "license": "mit", "language": [ { "name": "JavaScript", "bytes": "6870" }, { "name": "Makefile", "bytes": "1708" }, { "name": "Python", "bytes": "823245" }, { "name": "Shell", "bytes": "3238" } ], "symlink_target": "" }
from azure.identity import DefaultAzureCredential from azure.mgmt.cosmosdb import CosmosDBManagementClient """ # PREREQUISITES pip install azure-identity pip install azure-mgmt-cosmosdb # USAGE python cosmos_db_notebook_workspace_regenerate_auth_token.py Before run the sample, please set the values of the client ID, tenant ID and client secret of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, AZURE_CLIENT_SECRET. For more info about how to get the value, please see: https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal """ def main(): client = CosmosDBManagementClient( credential=DefaultAzureCredential(), subscription_id="subid", ) response = client.notebook_workspaces.begin_regenerate_auth_token( resource_group_name="rg1", account_name="ddb1", notebook_workspace_name="default", ).result() print(response) # x-ms-original-file: specification/cosmos-db/resource-manager/Microsoft.DocumentDB/preview/2022-08-15-preview/examples/CosmosDBNotebookWorkspaceRegenerateAuthToken.json if __name__ == "__main__": main()
{ "content_hash": "540ee95431663f95ba985091cdcc06f7", "timestamp": "", "source": "github", "line_count": 34, "max_line_length": 169, "avg_line_length": 35.14705882352941, "alnum_prop": 0.7380753138075313, "repo_name": "Azure/azure-sdk-for-python", "id": "46037ff47d46ab6549d0e981d3ac2520e175f304", "size": "1663", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "sdk/cosmos/azure-mgmt-cosmosdb/generated_samples/cosmos_db_notebook_workspace_regenerate_auth_token.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "1224" }, { "name": "Bicep", "bytes": "24196" }, { "name": "CSS", "bytes": "6089" }, { "name": "Dockerfile", "bytes": "4892" }, { "name": "HTML", "bytes": "12058" }, { "name": "JavaScript", "bytes": "8137" }, { "name": "Jinja", "bytes": "10377" }, { "name": "Jupyter Notebook", "bytes": "272022" }, { "name": "PowerShell", "bytes": "518535" }, { "name": "Python", "bytes": "715484989" }, { "name": "Shell", "bytes": "3631" } ], "symlink_target": "" }
from __future__ import print_function from os import environ from twisted.internet.defer import inlineCallbacks from autobahn.wamp.types import PublishOptions from autobahn.twisted.util import sleep from autobahn.twisted.wamp import ApplicationSession, ApplicationRunner class Component(ApplicationSession): """ An application component that publishes an event every second. """ @inlineCallbacks def onJoin(self, details): print("session attached") def on_event(i): print("Got event: {}".format(i)) yield self.subscribe(on_event, u'com.myapp.topic1') counter = 0 while True: print("publish: com.myapp.topic1", counter) pub_options = PublishOptions( acknowledge=True, exclude_me=False ) publication = yield self.publish( u'com.myapp.topic1', counter, options=pub_options, ) print("Published with publication ID {}".format(publication.id)) counter += 1 yield sleep(1) if __name__ == '__main__': runner = ApplicationRunner( environ.get("AUTOBAHN_DEMO_ROUTER", u"ws://127.0.0.1:8080/ws"), u"crossbardemo", ) runner.run(Component)
{ "content_hash": "fb7d817e5e206a9bdeaf45e1d478d26e", "timestamp": "", "source": "github", "line_count": 46, "max_line_length": 76, "avg_line_length": 28.17391304347826, "alnum_prop": 0.6080246913580247, "repo_name": "Jenselme/AutobahnPython", "id": "94a97201ee4645f2ad5f490c514dedbef9b8665f", "size": "2573", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "examples/twisted/wamp/pubsub/options/backend.py", "mode": "33188", "license": "mit", "language": [ { "name": "Makefile", "bytes": "3849" }, { "name": "Python", "bytes": "1065688" } ], "symlink_target": "" }
""" Logging package for Python. Based on PEP 282 and comments thereto in comp.lang.python. Copyright (C) 2001-2014 Vinay Sajip. All Rights Reserved. To use, simply 'import logging' and log away! """ import sys, os, time, cStringIO, traceback, warnings, weakref, collections __all__ = ['BASIC_FORMAT', 'BufferingFormatter', 'CRITICAL', 'DEBUG', 'ERROR', 'FATAL', 'FileHandler', 'Filter', 'Formatter', 'Handler', 'INFO', 'LogRecord', 'Logger', 'LoggerAdapter', 'NOTSET', 'NullHandler', 'StreamHandler', 'WARN', 'WARNING', 'addLevelName', 'basicConfig', 'captureWarnings', 'critical', 'debug', 'disable', 'error', 'exception', 'fatal', 'getLevelName', 'getLogger', 'getLoggerClass', 'info', 'log', 'makeLogRecord', 'setLoggerClass', 'warn', 'warning'] try: import codecs except ImportError: codecs = None try: import thread import threading except ImportError: thread = None __author__ = "Vinay Sajip <vinay_sajip@red-dove.com>" __status__ = "production" # Note: the attributes below are no longer maintained. __version__ = "0.5.1.2" __date__ = "07 February 2010" #--------------------------------------------------------------------------- # Miscellaneous module data #--------------------------------------------------------------------------- try: unicode _unicode = True except NameError: _unicode = False # # _srcfile is used when walking the stack to check when we've got the first # caller stack frame. # if hasattr(sys, 'frozen'): #support for py2exe _srcfile = "logging%s__init__%s" % (os.sep, __file__[-4:]) elif __file__[-4:].lower() in ['.pyc', '.pyo']: _srcfile = __file__[:-4] + '.py' else: _srcfile = __file__ _srcfile = os.path.normcase(_srcfile) # next bit filched from 1.5.2's inspect.py def currentframe(): """Return the frame object for the caller's stack frame.""" try: raise Exception except: return sys.exc_info()[2].tb_frame.f_back if hasattr(sys, '_getframe'): currentframe = lambda: sys._getframe(3) # done filching # _srcfile is only used in conjunction with sys._getframe(). # To provide compatibility with older versions of Python, set _srcfile # to None if _getframe() is not available; this value will prevent # findCaller() from being called. #if not hasattr(sys, "_getframe"): # _srcfile = None # #_startTime is used as the base when calculating the relative time of events # _startTime = time.time() # #raiseExceptions is used to see if exceptions during handling should be #propagated # raiseExceptions = 1 # # If you don't want threading information in the log, set this to zero # logThreads = 1 # # If you don't want multiprocessing information in the log, set this to zero # logMultiprocessing = 1 # # If you don't want process information in the log, set this to zero # logProcesses = 1 #--------------------------------------------------------------------------- # Level related stuff #--------------------------------------------------------------------------- # # Default levels and level names, these can be replaced with any positive set # of values having corresponding names. There is a pseudo-level, NOTSET, which # is only really there as a lower limit for user-defined levels. Handlers and # loggers are initialized with NOTSET so that they will log all messages, even # at user-defined levels. # CRITICAL = 50 FATAL = CRITICAL ERROR = 40 WARNING = 30 WARN = WARNING INFO = 20 DEBUG = 10 NOTSET = 0 # NOTE(flaper87): This is different from # python's stdlib module since pypy's # dicts are much faster when their # keys are all of the same type. # Introduced in commit 9de7b40c586f _levelToName = { CRITICAL: 'CRITICAL', ERROR: 'ERROR', WARNING: 'WARNING', INFO: 'INFO', DEBUG: 'DEBUG', NOTSET: 'NOTSET', } _nameToLevel = { 'CRITICAL': CRITICAL, 'ERROR': ERROR, 'WARN': WARNING, 'WARNING': WARNING, 'INFO': INFO, 'DEBUG': DEBUG, 'NOTSET': NOTSET, } _levelNames = dict(_levelToName) _levelNames.update(_nameToLevel) # backward compatibility def getLevelName(level): """ Return the textual representation of logging level 'level'. If the level is one of the predefined levels (CRITICAL, ERROR, WARNING, INFO, DEBUG) then you get the corresponding string. If you have associated levels with names using addLevelName then the name you have associated with 'level' is returned. If a numeric value corresponding to one of the defined levels is passed in, the corresponding string representation is returned. Otherwise, the string "Level %s" % level is returned. """ # NOTE(flaper87): Check also in _nameToLevel # if value is None. return (_levelToName.get(level) or _nameToLevel.get(level, ("Level %s" % level))) def addLevelName(level, levelName): """ Associate 'levelName' with 'level'. This is used when converting levels to text during message formatting. """ _acquireLock() try: #unlikely to cause an exception, but you never know... _levelToName[level] = levelName _nameToLevel[levelName] = level finally: _releaseLock() def _checkLevel(level): if isinstance(level, (int, long)): rv = level elif str(level) == level: if level not in _nameToLevel: raise ValueError("Unknown level: %r" % level) rv = _nameToLevel[level] else: raise TypeError("Level not an integer or a valid string: %r" % level) return rv #--------------------------------------------------------------------------- # Thread-related stuff #--------------------------------------------------------------------------- # #_lock is used to serialize access to shared data structures in this module. #This needs to be an RLock because fileConfig() creates and configures #Handlers, and so might arbitrary user threads. Since Handler code updates the #shared dictionary _handlers, it needs to acquire the lock. But if configuring, #the lock would already have been acquired - so we need an RLock. #The same argument applies to Loggers and Manager.loggerDict. # if thread: _lock = threading.RLock() else: _lock = None def _acquireLock(): """ Acquire the module-level lock for serializing access to shared data. This should be released with _releaseLock(). """ if _lock: _lock.acquire() def _releaseLock(): """ Release the module-level lock acquired by calling _acquireLock(). """ if _lock: _lock.release() #--------------------------------------------------------------------------- # The logging record #--------------------------------------------------------------------------- class LogRecord(object): """ A LogRecord instance represents an event being logged. LogRecord instances are created every time something is logged. They contain all the information pertinent to the event being logged. The main information passed in is in msg and args, which are combined using str(msg) % args to create the message field of the record. The record also includes information such as when the record was created, the source line where the logging call was made, and any exception information to be logged. """ def __init__(self, name, level, pathname, lineno, msg, args, exc_info, func=None): """ Initialize a logging record with interesting information. """ ct = time.time() self.name = name self.msg = msg # # The following statement allows passing of a dictionary as a sole # argument, so that you can do something like # logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2}) # Suggested by Stefan Behnel. # Note that without the test for args[0], we get a problem because # during formatting, we test to see if the arg is present using # 'if self.args:'. If the event being logged is e.g. 'Value is %d' # and if the passed arg fails 'if self.args:' then no formatting # is done. For example, logger.warn('Value is %d', 0) would log # 'Value is %d' instead of 'Value is 0'. # For the use case of passing a dictionary, this should not be a # problem. # Issue #21172: a request was made to relax the isinstance check # to hasattr(args[0], '__getitem__'). However, the docs on string # formatting still seem to suggest a mapping object is required. # Thus, while not removing the isinstance check, it does now look # for collections.Mapping rather than, as before, dict. if (args and len(args) == 1 and isinstance(args[0], collections.Mapping) and args[0]): args = args[0] self.args = args self.levelname = getLevelName(level) self.levelno = level self.pathname = pathname try: self.filename = os.path.basename(pathname) self.module = os.path.splitext(self.filename)[0] except (TypeError, ValueError, AttributeError): self.filename = pathname self.module = "Unknown module" self.exc_info = exc_info self.exc_text = None # used to cache the traceback text self.lineno = lineno self.funcName = func self.created = ct self.msecs = (ct - int(ct)) * 1000 self.relativeCreated = (self.created - _startTime) * 1000 if logThreads and thread: self.thread = thread.get_ident() self.threadName = threading.current_thread().name else: self.thread = None self.threadName = None if not logMultiprocessing: self.processName = None else: self.processName = 'MainProcess' mp = sys.modules.get('multiprocessing') if mp is not None: # Errors may occur if multiprocessing has not finished loading # yet - e.g. if a custom import hook causes third-party code # to run when multiprocessing calls import. See issue 8200 # for an example try: self.processName = mp.current_process().name except StandardError: pass if logProcesses and hasattr(os, 'getpid'): self.process = os.getpid() else: self.process = None def __str__(self): return '<LogRecord: %s, %s, %s, %s, "%s">'%(self.name, self.levelno, self.pathname, self.lineno, self.msg) def getMessage(self): """ Return the message for this LogRecord. Return the message for this LogRecord after merging any user-supplied arguments with the message. """ if not _unicode: #if no unicode support... msg = str(self.msg) else: msg = self.msg if not isinstance(msg, basestring): try: msg = str(self.msg) except UnicodeError: msg = self.msg #Defer encoding till later if self.args: msg = msg % self.args return msg def makeLogRecord(dict): """ Make a LogRecord whose attributes are defined by the specified dictionary, This function is useful for converting a logging event received over a socket connection (which is sent as a dictionary) into a LogRecord instance. """ rv = LogRecord(None, None, "", 0, "", (), None, None) rv.__dict__.update(dict) return rv #--------------------------------------------------------------------------- # Formatter classes and functions #--------------------------------------------------------------------------- class Formatter(object): """ Formatter instances are used to convert a LogRecord to text. Formatters need to know how a LogRecord is constructed. They are responsible for converting a LogRecord to (usually) a string which can be interpreted by either a human or an external system. The base Formatter allows a formatting string to be specified. If none is supplied, the default value of "%s(message)\\n" is used. The Formatter can be initialized with a format string which makes use of knowledge of the LogRecord attributes - e.g. the default value mentioned above makes use of the fact that the user's message and arguments are pre- formatted into a LogRecord's message attribute. Currently, the useful attributes in a LogRecord are described by: %(name)s Name of the logger (logging channel) %(levelno)s Numeric logging level for the message (DEBUG, INFO, WARNING, ERROR, CRITICAL) %(levelname)s Text logging level for the message ("DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL") %(pathname)s Full pathname of the source file where the logging call was issued (if available) %(filename)s Filename portion of pathname %(module)s Module (name portion of filename) %(lineno)d Source line number where the logging call was issued (if available) %(funcName)s Function name %(created)f Time when the LogRecord was created (time.time() return value) %(asctime)s Textual time when the LogRecord was created %(msecs)d Millisecond portion of the creation time %(relativeCreated)d Time in milliseconds when the LogRecord was created, relative to the time the logging module was loaded (typically at application startup time) %(thread)d Thread ID (if available) %(threadName)s Thread name (if available) %(process)d Process ID (if available) %(message)s The result of record.getMessage(), computed just as the record is emitted """ converter = time.localtime def __init__(self, fmt=None, datefmt=None): """ Initialize the formatter with specified format strings. Initialize the formatter either with the specified format string, or a default as described above. Allow for specialized date formatting with the optional datefmt argument (if omitted, you get the ISO8601 format). """ if fmt: self._fmt = fmt else: self._fmt = "%(message)s" self.datefmt = datefmt def formatTime(self, record, datefmt=None): """ Return the creation time of the specified LogRecord as formatted text. This method should be called from format() by a formatter which wants to make use of a formatted time. This method can be overridden in formatters to provide for any specific requirement, but the basic behaviour is as follows: if datefmt (a string) is specified, it is used with time.strftime() to format the creation time of the record. Otherwise, the ISO8601 format is used. The resulting string is returned. This function uses a user-configurable function to convert the creation time to a tuple. By default, time.localtime() is used; to change this for a particular formatter instance, set the 'converter' attribute to a function with the same signature as time.localtime() or time.gmtime(). To change it for all formatters, for example if you want all logging times to be shown in GMT, set the 'converter' attribute in the Formatter class. """ ct = self.converter(record.created) if datefmt: s = time.strftime(datefmt, ct) else: t = time.strftime("%Y-%m-%d %H:%M:%S", ct) s = "%s,%03d" % (t, record.msecs) return s def formatException(self, ei): """ Format and return the specified exception information as a string. This default implementation just uses traceback.print_exception() """ sio = cStringIO.StringIO() traceback.print_exception(ei[0], ei[1], ei[2], None, sio) s = sio.getvalue() sio.close() if s[-1:] == "\n": s = s[:-1] return s def usesTime(self): """ Check if the format uses the creation time of the record. """ return self._fmt.find("%(asctime)") >= 0 def format(self, record): """ Format the specified record as text. The record's attribute dictionary is used as the operand to a string formatting operation which yields the returned string. Before formatting the dictionary, a couple of preparatory steps are carried out. The message attribute of the record is computed using LogRecord.getMessage(). If the formatting string uses the time (as determined by a call to usesTime(), formatTime() is called to format the event time. If there is exception information, it is formatted using formatException() and appended to the message. """ record.message = record.getMessage() if self.usesTime(): record.asctime = self.formatTime(record, self.datefmt) s = self._fmt % record.__dict__ if record.exc_info: # Cache the traceback text to avoid converting it multiple times # (it's constant anyway) if not record.exc_text: record.exc_text = self.formatException(record.exc_info) if record.exc_text: if s[-1:] != "\n": s = s + "\n" try: s = s + record.exc_text except UnicodeError: # Sometimes filenames have non-ASCII chars, which can lead # to errors when s is Unicode and record.exc_text is str # See issue 8924. # We also use replace for when there are multiple # encodings, e.g. UTF-8 for the filesystem and latin-1 # for a script. See issue 13232. s = s + record.exc_text.decode(sys.getfilesystemencoding(), 'replace') return s # # The default formatter to use when no other is specified # _defaultFormatter = Formatter() class BufferingFormatter(object): """ A formatter suitable for formatting a number of records. """ def __init__(self, linefmt=None): """ Optionally specify a formatter which will be used to format each individual record. """ if linefmt: self.linefmt = linefmt else: self.linefmt = _defaultFormatter def formatHeader(self, records): """ Return the header string for the specified records. """ return "" def formatFooter(self, records): """ Return the footer string for the specified records. """ return "" def format(self, records): """ Format the specified records and return the result as a string. """ rv = "" if len(records) > 0: rv = rv + self.formatHeader(records) for record in records: rv = rv + self.linefmt.format(record) rv = rv + self.formatFooter(records) return rv #--------------------------------------------------------------------------- # Filter classes and functions #--------------------------------------------------------------------------- class Filter(object): """ Filter instances are used to perform arbitrary filtering of LogRecords. Loggers and Handlers can optionally use Filter instances to filter records as desired. The base filter class only allows events which are below a certain point in the logger hierarchy. For example, a filter initialized with "A.B" will allow events logged by loggers "A.B", "A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If initialized with the empty string, all events are passed. """ def __init__(self, name=''): """ Initialize a filter. Initialize with the name of the logger which, together with its children, will have its events allowed through the filter. If no name is specified, allow every event. """ self.name = name self.nlen = len(name) def filter(self, record): """ Determine if the specified record is to be logged. Is the specified record to be logged? Returns 0 for no, nonzero for yes. If deemed appropriate, the record may be modified in-place. """ if self.nlen == 0: return 1 elif self.name == record.name: return 1 elif record.name.find(self.name, 0, self.nlen) != 0: return 0 return (record.name[self.nlen] == ".") class Filterer(object): """ A base class for loggers and handlers which allows them to share common code. """ def __init__(self): """ Initialize the list of filters to be an empty list. """ self.filters = [] def addFilter(self, filter): """ Add the specified filter to this handler. """ if not (filter in self.filters): self.filters.append(filter) def removeFilter(self, filter): """ Remove the specified filter from this handler. """ if filter in self.filters: self.filters.remove(filter) def filter(self, record): """ Determine if a record is loggable by consulting all the filters. The default is to allow the record to be logged; any filter can veto this and the record is then dropped. Returns a zero value if a record is to be dropped, else non-zero. """ rv = 1 for f in self.filters: if not f.filter(record): rv = 0 break return rv #--------------------------------------------------------------------------- # Handler classes and functions #--------------------------------------------------------------------------- _handlers = weakref.WeakValueDictionary() #map of handler names to handlers _handlerList = [] # added to allow handlers to be removed in reverse of order initialized def _removeHandlerRef(wr): """ Remove a handler reference from the internal cleanup list. """ # This function can be called during module teardown, when globals are # set to None. It can also be called from another thread. So we need to # pre-emptively grab the necessary globals and check if they're None, # to prevent race conditions and failures during interpreter shutdown. acquire, release, handlers = _acquireLock, _releaseLock, _handlerList if acquire and release and handlers: acquire() try: if wr in handlers: handlers.remove(wr) finally: release() def _addHandlerRef(handler): """ Add a handler to the internal cleanup list using a weak reference. """ _acquireLock() try: _handlerList.append(weakref.ref(handler, _removeHandlerRef)) finally: _releaseLock() class Handler(Filterer): """ Handler instances dispatch logging events to specific destinations. The base handler class. Acts as a placeholder which defines the Handler interface. Handlers can optionally use Formatter instances to format records as desired. By default, no formatter is specified; in this case, the 'raw' message as determined by record.message is logged. """ def __init__(self, level=NOTSET): """ Initializes the instance - basically setting the formatter to None and the filter list to empty. """ Filterer.__init__(self) self._name = None self.level = _checkLevel(level) self.formatter = None # Add the handler to the global _handlerList (for cleanup on shutdown) _addHandlerRef(self) self.createLock() def get_name(self): return self._name def set_name(self, name): _acquireLock() try: if self._name in _handlers: del _handlers[self._name] self._name = name if name: _handlers[name] = self finally: _releaseLock() name = property(get_name, set_name) def createLock(self): """ Acquire a thread lock for serializing access to the underlying I/O. """ if thread: self.lock = threading.RLock() else: self.lock = None def acquire(self): """ Acquire the I/O thread lock. """ if self.lock: self.lock.acquire() def release(self): """ Release the I/O thread lock. """ if self.lock: self.lock.release() def setLevel(self, level): """ Set the logging level of this handler. """ self.level = _checkLevel(level) def format(self, record): """ Format the specified record. If a formatter is set, use it. Otherwise, use the default formatter for the module. """ if self.formatter: fmt = self.formatter else: fmt = _defaultFormatter return fmt.format(record) def emit(self, record): """ Do whatever it takes to actually log the specified logging record. This version is intended to be implemented by subclasses and so raises a NotImplementedError. """ raise NotImplementedError('emit must be implemented ' 'by Handler subclasses') def handle(self, record): """ Conditionally emit the specified logging record. Emission depends on filters which may have been added to the handler. Wrap the actual emission of the record with acquisition/release of the I/O thread lock. Returns whether the filter passed the record for emission. """ rv = self.filter(record) if rv: self.acquire() try: self.emit(record) finally: self.release() return rv def setFormatter(self, fmt): """ Set the formatter for this handler. """ self.formatter = fmt def flush(self): """ Ensure all logging output has been flushed. This version does nothing and is intended to be implemented by subclasses. """ pass def close(self): """ Tidy up any resources used by the handler. This version removes the handler from an internal map of handlers, _handlers, which is used for handler lookup by name. Subclasses should ensure that this gets called from overridden close() methods. """ #get the module data lock, as we're updating a shared structure. _acquireLock() try: #unlikely to raise an exception, but you never know... if self._name and self._name in _handlers: del _handlers[self._name] finally: _releaseLock() def handleError(self, record): """ Handle errors which occur during an emit() call. This method should be called from handlers when an exception is encountered during an emit() call. If raiseExceptions is false, exceptions get silently ignored. This is what is mostly wanted for a logging system - most users will not care about errors in the logging system, they are more interested in application errors. You could, however, replace this with a custom handler if you wish. The record which was being processed is passed in to this method. """ if raiseExceptions and sys.stderr: # see issue 13807 ei = sys.exc_info() try: traceback.print_exception(ei[0], ei[1], ei[2], None, sys.stderr) sys.stderr.write('Logged from file %s, line %s\n' % ( record.filename, record.lineno)) except IOError: pass # see issue 5971 finally: del ei class StreamHandler(Handler): """ A handler class which writes logging records, appropriately formatted, to a stream. Note that this class does not close the stream, as sys.stdout or sys.stderr may be used. """ def __init__(self, stream=None): """ Initialize the handler. If stream is not specified, sys.stderr is used. """ Handler.__init__(self) if stream is None: stream = sys.stderr self.stream = stream def flush(self): """ Flushes the stream. """ self.acquire() try: if self.stream and hasattr(self.stream, "flush"): self.stream.flush() finally: self.release() def emit(self, record): """ Emit a record. If a formatter is specified, it is used to format the record. The record is then written to the stream with a trailing newline. If exception information is present, it is formatted using traceback.print_exception and appended to the stream. If the stream has an 'encoding' attribute, it is used to determine how to do the output to the stream. """ try: msg = self.format(record) stream = self.stream fs = "%s\n" if not _unicode: #if no unicode support... stream.write(fs % msg) else: try: if (isinstance(msg, unicode) and getattr(stream, 'encoding', None)): ufs = u'%s\n' try: stream.write(ufs % msg) except UnicodeEncodeError: #Printing to terminals sometimes fails. For example, #with an encoding of 'cp1251', the above write will #work if written to a stream opened or wrapped by #the codecs module, but fail when writing to a #terminal even when the codepage is set to cp1251. #An extra encoding step seems to be needed. stream.write((ufs % msg).encode(stream.encoding)) else: stream.write(fs % msg) except UnicodeError: stream.write(fs % msg.encode("UTF-8")) self.flush() except (KeyboardInterrupt, SystemExit): raise except: self.handleError(record) class FileHandler(StreamHandler): """ A handler class which writes formatted logging records to disk files. """ def __init__(self, filename, mode='a', encoding=None, delay=0): """ Open the specified file and use it as the stream for logging. """ #keep the absolute path, otherwise derived classes which use this #may come a cropper when the current directory changes if codecs is None: encoding = None self.baseFilename = os.path.abspath(filename) self.mode = mode self.encoding = encoding self.delay = delay if delay: #We don't open the stream, but we still need to call the #Handler constructor to set level, formatter, lock etc. Handler.__init__(self) self.stream = None else: StreamHandler.__init__(self, self._open()) def close(self): """ Closes the stream. """ self.acquire() try: if self.stream: self.flush() if hasattr(self.stream, "close"): self.stream.close() self.stream = None # Issue #19523: call unconditionally to # prevent a handler leak when delay is set StreamHandler.close(self) finally: self.release() def _open(self): """ Open the current base file with the (original) mode and encoding. Return the resulting stream. """ if self.encoding is None: stream = open(self.baseFilename, self.mode) else: stream = codecs.open(self.baseFilename, self.mode, self.encoding) return stream def emit(self, record): """ Emit a record. If the stream was not opened because 'delay' was specified in the constructor, open it before calling the superclass's emit. """ if self.stream is None: self.stream = self._open() StreamHandler.emit(self, record) #--------------------------------------------------------------------------- # Manager classes and functions #--------------------------------------------------------------------------- class PlaceHolder(object): """ PlaceHolder instances are used in the Manager logger hierarchy to take the place of nodes for which no loggers have been defined. This class is intended for internal use only and not as part of the public API. """ def __init__(self, alogger): """ Initialize with the specified logger being a child of this placeholder. """ #self.loggers = [alogger] self.loggerMap = { alogger : None } def append(self, alogger): """ Add the specified logger as a child of this placeholder. """ #if alogger not in self.loggers: if alogger not in self.loggerMap: #self.loggers.append(alogger) self.loggerMap[alogger] = None # # Determine which class to use when instantiating loggers. # _loggerClass = None def setLoggerClass(klass): """ Set the class to be used when instantiating a logger. The class should define __init__() such that only a name argument is required, and the __init__() should call Logger.__init__() """ if klass != Logger: if not issubclass(klass, Logger): raise TypeError("logger not derived from logging.Logger: " + klass.__name__) global _loggerClass _loggerClass = klass def getLoggerClass(): """ Return the class to be used when instantiating a logger. """ return _loggerClass class Manager(object): """ There is [under normal circumstances] just one Manager instance, which holds the hierarchy of loggers. """ def __init__(self, rootnode): """ Initialize the manager with the root node of the logger hierarchy. """ self.root = rootnode self.disable = 0 self.emittedNoHandlerWarning = 0 self.loggerDict = {} self.loggerClass = None def getLogger(self, name): """ Get a logger with the specified name (channel name), creating it if it doesn't yet exist. This name is a dot-separated hierarchical name, such as "a", "a.b", "a.b.c" or similar. If a PlaceHolder existed for the specified name [i.e. the logger didn't exist but a child of it did], replace it with the created logger and fix up the parent/child references which pointed to the placeholder to now point to the logger. """ rv = None if not isinstance(name, basestring): raise TypeError('A logger name must be string or Unicode') if isinstance(name, unicode): name = name.encode('utf-8') _acquireLock() try: if name in self.loggerDict: rv = self.loggerDict[name] if isinstance(rv, PlaceHolder): ph = rv rv = (self.loggerClass or _loggerClass)(name) rv.manager = self self.loggerDict[name] = rv self._fixupChildren(ph, rv) self._fixupParents(rv) else: rv = (self.loggerClass or _loggerClass)(name) rv.manager = self self.loggerDict[name] = rv self._fixupParents(rv) finally: _releaseLock() return rv def setLoggerClass(self, klass): """ Set the class to be used when instantiating a logger with this Manager. """ if klass != Logger: if not issubclass(klass, Logger): raise TypeError("logger not derived from logging.Logger: " + klass.__name__) self.loggerClass = klass def _fixupParents(self, alogger): """ Ensure that there are either loggers or placeholders all the way from the specified logger to the root of the logger hierarchy. """ name = alogger.name i = name.rfind(".") rv = None while (i > 0) and not rv: substr = name[:i] if substr not in self.loggerDict: self.loggerDict[substr] = PlaceHolder(alogger) else: obj = self.loggerDict[substr] if isinstance(obj, Logger): rv = obj else: assert isinstance(obj, PlaceHolder) obj.append(alogger) i = name.rfind(".", 0, i - 1) if not rv: rv = self.root alogger.parent = rv def _fixupChildren(self, ph, alogger): """ Ensure that children of the placeholder ph are connected to the specified logger. """ name = alogger.name namelen = len(name) for c in ph.loggerMap.keys(): #The if means ... if not c.parent.name.startswith(nm) if c.parent.name[:namelen] != name: alogger.parent = c.parent c.parent = alogger #--------------------------------------------------------------------------- # Logger classes and functions #--------------------------------------------------------------------------- class Logger(Filterer): """ Instances of the Logger class represent a single logging channel. A "logging channel" indicates an area of an application. Exactly how an "area" is defined is up to the application developer. Since an application can have any number of areas, logging channels are identified by a unique string. Application areas can be nested (e.g. an area of "input processing" might include sub-areas "read CSV files", "read XLS files" and "read Gnumeric files"). To cater for this natural nesting, channel names are organized into a namespace hierarchy where levels are separated by periods, much like the Java or Python package namespace. So in the instance given above, channel names might be "input" for the upper level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels. There is no arbitrary limit to the depth of nesting. """ def __init__(self, name, level=NOTSET): """ Initialize the logger with a name and an optional level. """ Filterer.__init__(self) self.name = name self.level = _checkLevel(level) self.parent = None self.propagate = 1 self.handlers = [] self.disabled = 0 def setLevel(self, level): """ Set the logging level of this logger. """ self.level = _checkLevel(level) def debug(self, msg, *args, **kwargs): """ Log 'msg % args' with severity 'DEBUG'. To pass exception information, use the keyword argument exc_info with a true value, e.g. logger.debug("Houston, we have a %s", "thorny problem", exc_info=1) """ if self.isEnabledFor(DEBUG): self._log(DEBUG, msg, args, **kwargs) def info(self, msg, *args, **kwargs): """ Log 'msg % args' with severity 'INFO'. To pass exception information, use the keyword argument exc_info with a true value, e.g. logger.info("Houston, we have a %s", "interesting problem", exc_info=1) """ if self.isEnabledFor(INFO): self._log(INFO, msg, args, **kwargs) def warning(self, msg, *args, **kwargs): """ Log 'msg % args' with severity 'WARNING'. To pass exception information, use the keyword argument exc_info with a true value, e.g. logger.warning("Houston, we have a %s", "bit of a problem", exc_info=1) """ if self.isEnabledFor(WARNING): self._log(WARNING, msg, args, **kwargs) warn = warning def error(self, msg, *args, **kwargs): """ Log 'msg % args' with severity 'ERROR'. To pass exception information, use the keyword argument exc_info with a true value, e.g. logger.error("Houston, we have a %s", "major problem", exc_info=1) """ if self.isEnabledFor(ERROR): self._log(ERROR, msg, args, **kwargs) def exception(self, msg, *args, **kwargs): """ Convenience method for logging an ERROR with exception information. """ kwargs['exc_info'] = 1 self.error(msg, *args, **kwargs) def critical(self, msg, *args, **kwargs): """ Log 'msg % args' with severity 'CRITICAL'. To pass exception information, use the keyword argument exc_info with a true value, e.g. logger.critical("Houston, we have a %s", "major disaster", exc_info=1) """ if self.isEnabledFor(CRITICAL): self._log(CRITICAL, msg, args, **kwargs) fatal = critical def log(self, level, msg, *args, **kwargs): """ Log 'msg % args' with the integer severity 'level'. To pass exception information, use the keyword argument exc_info with a true value, e.g. logger.log(level, "We have a %s", "mysterious problem", exc_info=1) """ if not isinstance(level, int): if raiseExceptions: raise TypeError("level must be an integer") else: return if self.isEnabledFor(level): self._log(level, msg, args, **kwargs) def findCaller(self): """ Find the stack frame of the caller so that we can note the source file name, line number and function name. """ f = currentframe() #On some versions of IronPython, currentframe() returns None if #IronPython isn't run with -X:Frames. if f is not None: f = f.f_back rv = "(unknown file)", 0, "(unknown function)" while hasattr(f, "f_code"): co = f.f_code filename = os.path.normcase(co.co_filename) if filename == _srcfile: f = f.f_back continue rv = (co.co_filename, f.f_lineno, co.co_name) break return rv def makeRecord(self, name, level, fn, lno, msg, args, exc_info, func=None, extra=None): """ A factory method which can be overridden in subclasses to create specialized LogRecords. """ rv = LogRecord(name, level, fn, lno, msg, args, exc_info, func) if extra is not None: for key in extra: if (key in ["message", "asctime"]) or (key in rv.__dict__): raise KeyError("Attempt to overwrite %r in LogRecord" % key) rv.__dict__[key] = extra[key] return rv def _log(self, level, msg, args, exc_info=None, extra=None): """ Low-level logging routine which creates a LogRecord and then calls all the handlers of this logger to handle the record. """ if _srcfile: #IronPython doesn't track Python frames, so findCaller raises an #exception on some versions of IronPython. We trap it here so that #IronPython can use logging. try: fn, lno, func = self.findCaller() except ValueError: fn, lno, func = "(unknown file)", 0, "(unknown function)" else: fn, lno, func = "(unknown file)", 0, "(unknown function)" if exc_info: if not isinstance(exc_info, tuple): exc_info = sys.exc_info() record = self.makeRecord(self.name, level, fn, lno, msg, args, exc_info, func, extra) self.handle(record) def handle(self, record): """ Call the handlers for the specified record. This method is used for unpickled records received from a socket, as well as those created locally. Logger-level filtering is applied. """ if (not self.disabled) and self.filter(record): self.callHandlers(record) def addHandler(self, hdlr): """ Add the specified handler to this logger. """ _acquireLock() try: if not (hdlr in self.handlers): self.handlers.append(hdlr) finally: _releaseLock() def removeHandler(self, hdlr): """ Remove the specified handler from this logger. """ _acquireLock() try: if hdlr in self.handlers: self.handlers.remove(hdlr) finally: _releaseLock() def callHandlers(self, record): """ Pass a record to all relevant handlers. Loop through all handlers for this logger and its parents in the logger hierarchy. If no handler was found, output a one-off error message to sys.stderr. Stop searching up the hierarchy whenever a logger with the "propagate" attribute set to zero is found - that will be the last logger whose handlers are called. """ c = self found = 0 while c: for hdlr in c.handlers: found = found + 1 if record.levelno >= hdlr.level: hdlr.handle(record) if not c.propagate: c = None #break out else: c = c.parent if (found == 0) and raiseExceptions and not self.manager.emittedNoHandlerWarning: sys.stderr.write("No handlers could be found for logger" " \"%s\"\n" % self.name) self.manager.emittedNoHandlerWarning = 1 def getEffectiveLevel(self): """ Get the effective level for this logger. Loop through this logger and its parents in the logger hierarchy, looking for a non-zero logging level. Return the first one found. """ logger = self while logger: if logger.level: return logger.level logger = logger.parent return NOTSET def isEnabledFor(self, level): """ Is this logger enabled for level 'level'? """ if self.manager.disable >= level: return 0 return level >= self.getEffectiveLevel() def getChild(self, suffix): """ Get a logger which is a descendant to this one. This is a convenience method, such that logging.getLogger('abc').getChild('def.ghi') is the same as logging.getLogger('abc.def.ghi') It's useful, for example, when the parent logger is named using __name__ rather than a literal string. """ if self.root is not self: suffix = '.'.join((self.name, suffix)) return self.manager.getLogger(suffix) class RootLogger(Logger): """ A root logger is not that different to any other logger, except that it must have a logging level and there is only one instance of it in the hierarchy. """ def __init__(self, level): """ Initialize the logger with the name "root". """ Logger.__init__(self, "root", level) _loggerClass = Logger class LoggerAdapter(object): """ An adapter for loggers which makes it easier to specify contextual information in logging output. """ def __init__(self, logger, extra): """ Initialize the adapter with a logger and a dict-like object which provides contextual information. This constructor signature allows easy stacking of LoggerAdapters, if so desired. You can effectively pass keyword arguments as shown in the following example: adapter = LoggerAdapter(someLogger, dict(p1=v1, p2="v2")) """ self.logger = logger self.extra = extra def process(self, msg, kwargs): """ Process the logging message and keyword arguments passed in to a logging call to insert contextual information. You can either manipulate the message itself, the keyword args or both. Return the message and kwargs modified (or not) to suit your needs. Normally, you'll only need to override this one method in a LoggerAdapter subclass for your specific needs. """ kwargs["extra"] = self.extra return msg, kwargs def debug(self, msg, *args, **kwargs): """ Delegate a debug call to the underlying logger, after adding contextual information from this adapter instance. """ msg, kwargs = self.process(msg, kwargs) self.logger.debug(msg, *args, **kwargs) def info(self, msg, *args, **kwargs): """ Delegate an info call to the underlying logger, after adding contextual information from this adapter instance. """ msg, kwargs = self.process(msg, kwargs) self.logger.info(msg, *args, **kwargs) def warning(self, msg, *args, **kwargs): """ Delegate a warning call to the underlying logger, after adding contextual information from this adapter instance. """ msg, kwargs = self.process(msg, kwargs) self.logger.warning(msg, *args, **kwargs) def error(self, msg, *args, **kwargs): """ Delegate an error call to the underlying logger, after adding contextual information from this adapter instance. """ msg, kwargs = self.process(msg, kwargs) self.logger.error(msg, *args, **kwargs) def exception(self, msg, *args, **kwargs): """ Delegate an exception call to the underlying logger, after adding contextual information from this adapter instance. """ msg, kwargs = self.process(msg, kwargs) kwargs["exc_info"] = 1 self.logger.error(msg, *args, **kwargs) def critical(self, msg, *args, **kwargs): """ Delegate a critical call to the underlying logger, after adding contextual information from this adapter instance. """ msg, kwargs = self.process(msg, kwargs) self.logger.critical(msg, *args, **kwargs) def log(self, level, msg, *args, **kwargs): """ Delegate a log call to the underlying logger, after adding contextual information from this adapter instance. """ msg, kwargs = self.process(msg, kwargs) self.logger.log(level, msg, *args, **kwargs) def isEnabledFor(self, level): """ See if the underlying logger is enabled for the specified level. """ return self.logger.isEnabledFor(level) root = RootLogger(WARNING) Logger.root = root Logger.manager = Manager(Logger.root) #--------------------------------------------------------------------------- # Configuration classes and functions #--------------------------------------------------------------------------- BASIC_FORMAT = "%(levelname)s:%(name)s:%(message)s" def basicConfig(**kwargs): """ Do basic configuration for the logging system. This function does nothing if the root logger already has handlers configured. It is a convenience method intended for use by simple scripts to do one-shot configuration of the logging package. The default behaviour is to create a StreamHandler which writes to sys.stderr, set a formatter using the BASIC_FORMAT format string, and add the handler to the root logger. A number of optional keyword arguments may be specified, which can alter the default behaviour. filename Specifies that a FileHandler be created, using the specified filename, rather than a StreamHandler. filemode Specifies the mode to open the file, if filename is specified (if filemode is unspecified, it defaults to 'a'). format Use the specified format string for the handler. datefmt Use the specified date/time format. level Set the root logger level to the specified level. stream Use the specified stream to initialize the StreamHandler. Note that this argument is incompatible with 'filename' - if both are present, 'stream' is ignored. Note that you could specify a stream created using open(filename, mode) rather than passing the filename and mode in. However, it should be remembered that StreamHandler does not close its stream (since it may be using sys.stdout or sys.stderr), whereas FileHandler closes its stream when the handler is closed. """ # Add thread safety in case someone mistakenly calls # basicConfig() from multiple threads _acquireLock() try: if len(root.handlers) == 0: filename = kwargs.get("filename") if filename: mode = kwargs.get("filemode", 'a') hdlr = FileHandler(filename, mode) else: stream = kwargs.get("stream") hdlr = StreamHandler(stream) fs = kwargs.get("format", BASIC_FORMAT) dfs = kwargs.get("datefmt", None) fmt = Formatter(fs, dfs) hdlr.setFormatter(fmt) root.addHandler(hdlr) level = kwargs.get("level") if level is not None: root.setLevel(level) finally: _releaseLock() #--------------------------------------------------------------------------- # Utility functions at module level. # Basically delegate everything to the root logger. #--------------------------------------------------------------------------- def getLogger(name=None): """ Return a logger with the specified name, creating it if necessary. If no name is specified, return the root logger. """ if name: return Logger.manager.getLogger(name) else: return root #def getRootLogger(): # """ # Return the root logger. # # Note that getLogger('') now does the same thing, so this function is # deprecated and may disappear in the future. # """ # return root def critical(msg, *args, **kwargs): """ Log a message with severity 'CRITICAL' on the root logger. """ if len(root.handlers) == 0: basicConfig() root.critical(msg, *args, **kwargs) fatal = critical def error(msg, *args, **kwargs): """ Log a message with severity 'ERROR' on the root logger. """ if len(root.handlers) == 0: basicConfig() root.error(msg, *args, **kwargs) def exception(msg, *args, **kwargs): """ Log a message with severity 'ERROR' on the root logger, with exception information. """ kwargs['exc_info'] = 1 error(msg, *args, **kwargs) def warning(msg, *args, **kwargs): """ Log a message with severity 'WARNING' on the root logger. """ if len(root.handlers) == 0: basicConfig() root.warning(msg, *args, **kwargs) warn = warning def info(msg, *args, **kwargs): """ Log a message with severity 'INFO' on the root logger. """ if len(root.handlers) == 0: basicConfig() root.info(msg, *args, **kwargs) def debug(msg, *args, **kwargs): """ Log a message with severity 'DEBUG' on the root logger. """ if len(root.handlers) == 0: basicConfig() root.debug(msg, *args, **kwargs) def log(level, msg, *args, **kwargs): """ Log 'msg % args' with the integer severity 'level' on the root logger. """ if len(root.handlers) == 0: basicConfig() root.log(level, msg, *args, **kwargs) def disable(level): """ Disable all logging calls of severity 'level' and below. """ root.manager.disable = level def shutdown(handlerList=_handlerList): """ Perform any cleanup actions in the logging system (e.g. flushing buffers). Should be called at application exit. """ for wr in reversed(handlerList[:]): #errors might occur, for example, if files are locked #we just ignore them if raiseExceptions is not set try: h = wr() if h: try: h.acquire() h.flush() h.close() except (IOError, ValueError): # Ignore errors which might be caused # because handlers have been closed but # references to them are still around at # application exit. pass finally: h.release() except: if raiseExceptions: raise #else, swallow #Let's try and shutdown automatically on application exit... import atexit atexit.register(shutdown) # Null handler class NullHandler(Handler): """ This handler does nothing. It's intended to be used to avoid the "No handlers could be found for logger XXX" one-off warning. This is important for library code, which may contain code to log events. If a user of the library does not configure logging, the one-off warning might be produced; to avoid this, the library developer simply needs to instantiate a NullHandler and add it to the top-level logger of the library module or package. """ def handle(self, record): pass def emit(self, record): pass def createLock(self): self.lock = None # Warnings integration _warnings_showwarning = None def _showwarning(message, category, filename, lineno, file=None, line=None): """ Implementation of showwarnings which redirects to logging, which will first check to see if the file parameter is None. If a file is specified, it will delegate to the original warnings implementation of showwarning. Otherwise, it will call warnings.formatwarning and will log the resulting string to a warnings logger named "py.warnings" with level logging.WARNING. """ if file is not None: if _warnings_showwarning is not None: _warnings_showwarning(message, category, filename, lineno, file, line) else: s = warnings.formatwarning(message, category, filename, lineno, line) logger = getLogger("py.warnings") if not logger.handlers: logger.addHandler(NullHandler()) logger.warning("%s", s) def captureWarnings(capture): """ If capture is true, redirect all warnings to the logging package. If capture is False, ensure that warnings are not redirected to logging but to their original destinations. """ global _warnings_showwarning if capture: if _warnings_showwarning is None: _warnings_showwarning = warnings.showwarning warnings.showwarning = _showwarning else: if _warnings_showwarning is not None: warnings.showwarning = _warnings_showwarning _warnings_showwarning = None
{ "content_hash": "9297ffb7f3616ce21b8b03fbb11a1bbe", "timestamp": "", "source": "github", "line_count": 1734, "max_line_length": 93, "avg_line_length": 34.825836216839676, "alnum_prop": 0.5804630058952109, "repo_name": "andela-earinde/bellatrix-py", "id": "f6498d24440ed8fe6d42e42ac50666005340380d", "size": "61375", "binary": false, "copies": "8", "ref": "refs/heads/master", "path": "app/js/lib/lib/modules/logging/__init__.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "8833" }, { "name": "HTML", "bytes": "2381" }, { "name": "JavaScript", "bytes": "12775582" }, { "name": "Python", "bytes": "15057969" } ], "symlink_target": "" }
try: import utime as time except ImportError: import time import _thread # function to check the interned string def check(s, val): assert type(s) == str assert int(s) == val # main thread function def th(base, n): for i in range(n): # this will intern the string and check it exec("check('%u', %u)" % (base + i, base + i)) with lock: global n_finished n_finished += 1 lock = _thread.allocate_lock() n_thread = 4 n_finished = 0 n_qstr_per_thread = 100 # make 1000 for a more stressful test (uses more heap) # spawn threads for i in range(n_thread): _thread.start_new_thread(th, (i * n_qstr_per_thread, n_qstr_per_thread)) # wait for threads to finish while n_finished < n_thread: time.sleep(1) print("pass")
{ "content_hash": "6f2eed40c8855ecc919cb74ba6fb12df", "timestamp": "", "source": "github", "line_count": 37, "max_line_length": 79, "avg_line_length": 21.135135135135137, "alnum_prop": 0.6368286445012787, "repo_name": "adafruit/circuitpython", "id": "2099f94bdbf5c78dce8db9cc81951c88d4d31a3a", "size": "942", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "tests/thread/thread_qstr1.py", "mode": "33188", "license": "mit", "language": [ { "name": "Assembly", "bytes": "10241" }, { "name": "C", "bytes": "18450191" }, { "name": "C++", "bytes": "476" }, { "name": "CMake", "bytes": "18203" }, { "name": "CSS", "bytes": "316" }, { "name": "HTML", "bytes": "10126" }, { "name": "JavaScript", "bytes": "13854" }, { "name": "Jinja", "bytes": "11034" }, { "name": "Makefile", "bytes": "330832" }, { "name": "Python", "bytes": "1423935" }, { "name": "Shell", "bytes": "18681" } ], "symlink_target": "" }
"""passlib.handlers.digests - plain hash digests """ #============================================================================= # imports #============================================================================= # core from base64 import b64encode, b64decode from hashlib import md5, sha1 import logging; log = logging.getLogger(__name__) import re # site # pkg from passlib.handlers.misc import plaintext from passlib.utils import unix_crypt_schemes, to_unicode from passlib.utils.compat import uascii_to_str, unicode, u from passlib.utils.decor import classproperty import passlib.utils.handlers as uh # local __all__ = [ "ldap_plaintext", "ldap_md5", "ldap_sha1", "ldap_salted_md5", "ldap_salted_sha1", ##"get_active_ldap_crypt_schemes", "ldap_des_crypt", "ldap_bsdi_crypt", "ldap_md5_crypt", "ldap_sha1_crypt" "ldap_bcrypt", "ldap_sha256_crypt", "ldap_sha512_crypt", ] #============================================================================= # ldap helpers #============================================================================= class _Base64DigestHelper(uh.StaticHandler): """helper for ldap_md5 / ldap_sha1""" # XXX: could combine this with hex digests in digests.py ident = None # required - prefix identifier _hash_func = None # required - hash function _hash_regex = None # required - regexp to recognize hash checksum_chars = uh.PADDED_BASE64_CHARS @classproperty def _hash_prefix(cls): """tell StaticHandler to strip ident from checksum""" return cls.ident def _calc_checksum(self, secret): if isinstance(secret, unicode): secret = secret.encode("utf-8") chk = self._hash_func(secret).digest() return b64encode(chk).decode("ascii") class _SaltedBase64DigestHelper(uh.HasRawSalt, uh.HasRawChecksum, uh.GenericHandler): """helper for ldap_salted_md5 / ldap_salted_sha1""" setting_kwds = ("salt", "salt_size") checksum_chars = uh.PADDED_BASE64_CHARS ident = None # required - prefix identifier _hash_func = None # required - hash function _hash_regex = None # required - regexp to recognize hash min_salt_size = max_salt_size = 4 # NOTE: openldap implementation uses 4 byte salt, # but it's been reported (issue 30) that some servers use larger salts. # the semi-related rfc3112 recommends support for up to 16 byte salts. min_salt_size = 4 default_salt_size = 4 max_salt_size = 16 @classmethod def from_string(cls, hash): hash = to_unicode(hash, "ascii", "hash") m = cls._hash_regex.match(hash) if not m: raise uh.exc.InvalidHashError(cls) try: data = b64decode(m.group("tmp").encode("ascii")) except TypeError: raise uh.exc.MalformedHashError(cls) cs = cls.checksum_size assert cs return cls(checksum=data[:cs], salt=data[cs:]) def to_string(self): data = self.checksum + self.salt hash = self.ident + b64encode(data).decode("ascii") return uascii_to_str(hash) def _calc_checksum(self, secret): if isinstance(secret, unicode): secret = secret.encode("utf-8") return self._hash_func(secret + self.salt).digest() #============================================================================= # implementations #============================================================================= class ldap_md5(_Base64DigestHelper): """This class stores passwords using LDAP's plain MD5 format, and follows the :ref:`password-hash-api`. The :meth:`~passlib.ifc.PasswordHash.hash` and :meth:`~passlib.ifc.PasswordHash.genconfig` methods have no optional keywords. """ name = "ldap_md5" ident = u("{MD5}") _hash_func = md5 _hash_regex = re.compile(u(r"^\{MD5\}(?P<chk>[+/a-zA-Z0-9]{22}==)$")) class ldap_sha1(_Base64DigestHelper): """This class stores passwords using LDAP's plain SHA1 format, and follows the :ref:`password-hash-api`. The :meth:`~passlib.ifc.PasswordHash.hash` and :meth:`~passlib.ifc.PasswordHash.genconfig` methods have no optional keywords. """ name = "ldap_sha1" ident = u("{SHA}") _hash_func = sha1 _hash_regex = re.compile(u(r"^\{SHA\}(?P<chk>[+/a-zA-Z0-9]{27}=)$")) class ldap_salted_md5(_SaltedBase64DigestHelper): """This class stores passwords using LDAP's salted MD5 format, and follows the :ref:`password-hash-api`. It supports a 4-16 byte salt. The :meth:`~passlib.ifc.PasswordHash.using` method accepts the following optional keywords: :type salt: bytes :param salt: Optional salt string. If not specified, one will be autogenerated (this is recommended). If specified, it may be any 4-16 byte string. :type salt_size: int :param salt_size: Optional number of bytes to use when autogenerating new salts. Defaults to 4 bytes for compatibility with the LDAP spec, but some systems use larger salts, and Passlib supports any value between 4-16. :type relaxed: bool :param relaxed: By default, providing an invalid value for one of the other keywords will result in a :exc:`ValueError`. If ``relaxed=True``, and the error can be corrected, a :exc:`~passlib.exc.PasslibHashWarning` will be issued instead. Correctable errors include ``salt`` strings that are too long. .. versionadded:: 1.6 .. versionchanged:: 1.6 This format now supports variable length salts, instead of a fix 4 bytes. """ name = "ldap_salted_md5" ident = u("{SMD5}") checksum_size = 16 _hash_func = md5 _hash_regex = re.compile(u(r"^\{SMD5\}(?P<tmp>[+/a-zA-Z0-9]{27,}={0,2})$")) class ldap_salted_sha1(_SaltedBase64DigestHelper): """This class stores passwords using LDAP's salted SHA1 format, and follows the :ref:`password-hash-api`. It supports a 4-16 byte salt. The :meth:`~passlib.ifc.PasswordHash.using` method accepts the following optional keywords: :type salt: bytes :param salt: Optional salt string. If not specified, one will be autogenerated (this is recommended). If specified, it may be any 4-16 byte string. :type salt_size: int :param salt_size: Optional number of bytes to use when autogenerating new salts. Defaults to 4 bytes for compatibility with the LDAP spec, but some systems use larger salts, and Passlib supports any value between 4-16. :type relaxed: bool :param relaxed: By default, providing an invalid value for one of the other keywords will result in a :exc:`ValueError`. If ``relaxed=True``, and the error can be corrected, a :exc:`~passlib.exc.PasslibHashWarning` will be issued instead. Correctable errors include ``salt`` strings that are too long. .. versionadded:: 1.6 .. versionchanged:: 1.6 This format now supports variable length salts, instead of a fix 4 bytes. """ name = "ldap_salted_sha1" ident = u("{SSHA}") checksum_size = 20 _hash_func = sha1 _hash_regex = re.compile(u(r"^\{SSHA\}(?P<tmp>[+/a-zA-Z0-9]{32,}={0,2})$")) class ldap_plaintext(plaintext): """This class stores passwords in plaintext, and follows the :ref:`password-hash-api`. This class acts much like the generic :class:`!passlib.hash.plaintext` handler, except that it will identify a hash only if it does NOT begin with the ``{XXX}`` identifier prefix used by RFC2307 passwords. The :meth:`~passlib.ifc.PasswordHash.hash`, :meth:`~passlib.ifc.PasswordHash.genhash`, and :meth:`~passlib.ifc.PasswordHash.verify` methods all require the following additional contextual keyword: :type encoding: str :param encoding: This controls the character encoding to use (defaults to ``utf-8``). This encoding will be used to encode :class:`!unicode` passwords under Python 2, and decode :class:`!bytes` hashes under Python 3. .. versionchanged:: 1.6 The ``encoding`` keyword was added. """ # NOTE: this subclasses plaintext, since all it does differently # is override identify() name = "ldap_plaintext" _2307_pat = re.compile(u(r"^\{\w+\}.*$")) @uh.deprecated_method(deprecated="1.7", removed="2.0") @classmethod def genconfig(cls): # Overridding plaintext.genconfig() since it returns "", # but have to return non-empty value due to identify() below return "!" @classmethod def identify(cls, hash): # NOTE: identifies all strings EXCEPT those with {XXX} prefix hash = uh.to_unicode_for_identify(hash) return bool(hash) and cls._2307_pat.match(hash) is None #============================================================================= # {CRYPT} wrappers # the following are wrappers around the base crypt algorithms, # which add the ldap required {CRYPT} prefix #============================================================================= ldap_crypt_schemes = [ 'ldap_' + name for name in unix_crypt_schemes ] def _init_ldap_crypt_handlers(): # NOTE: I don't like to implicitly modify globals() like this, # but don't want to write out all these handlers out either :) g = globals() for wname in unix_crypt_schemes: name = 'ldap_' + wname g[name] = uh.PrefixWrapper(name, wname, prefix=u("{CRYPT}"), lazy=True) del g _init_ldap_crypt_handlers() ##_lcn_host = None ##def get_host_ldap_crypt_schemes(): ## global _lcn_host ## if _lcn_host is None: ## from passlib.hosts import host_context ## schemes = host_context.schemes() ## _lcn_host = [ ## "ldap_" + name ## for name in unix_crypt_names ## if name in schemes ## ] ## return _lcn_host #============================================================================= # eof #=============================================================================
{ "content_hash": "e6e23c22fccd2dfc1b12914ecc10428a", "timestamp": "", "source": "github", "line_count": 272, "max_line_length": 159, "avg_line_length": 37.16544117647059, "alnum_prop": 0.6006528835690969, "repo_name": "williamfeng323/py-web", "id": "4356f071e83fb4e56254d0208fc207f73a3de852", "size": "10109", "binary": false, "copies": "5", "ref": "refs/heads/master", "path": "flask/lib/python3.6/site-packages/passlib/handlers/ldap_digests.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "39957" }, { "name": "CSS", "bytes": "6270" }, { "name": "HTML", "bytes": "6046" }, { "name": "JavaScript", "bytes": "6264" }, { "name": "Mako", "bytes": "10018" }, { "name": "Python", "bytes": "15554131" }, { "name": "Shell", "bytes": "6007" } ], "symlink_target": "" }
from azure.identity import DefaultAzureCredential from azure.mgmt.appcomplianceautomation import AppComplianceAutomationToolForMicrosoft365 """ # PREREQUISITES pip install azure-identity pip install azure-mgmt-appcomplianceautomation # USAGE python snapshot_download_compliance_detailed_pdf_report.py Before run the sample, please set the values of the client ID, tenant ID and client secret of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, AZURE_CLIENT_SECRET. For more info about how to get the value, please see: https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal """ def main(): client = AppComplianceAutomationToolForMicrosoft365( credential=DefaultAzureCredential(), ) response = client.snapshot.begin_download( report_name="testReportName", snapshot_name="testSnapshotName", parameters={ "downloadType": "ComplianceDetailedPdfReport", "offerGuid": "00000000-0000-0000-0000-000000000000", "reportCreatorTenantId": "00000000-0000-0000-0000-000000000000", }, ).result() print(response) # x-ms-original-file: specification/appcomplianceautomation/resource-manager/Microsoft.AppComplianceAutomation/preview/2022-11-16-preview/examples/Snapshot_ComplianceDetailedPdfReport_Download.json if __name__ == "__main__": main()
{ "content_hash": "02f3fe0bc914faa0ab6a23b0bb5c18a8", "timestamp": "", "source": "github", "line_count": 37, "max_line_length": 197, "avg_line_length": 38.810810810810814, "alnum_prop": 0.7423398328690808, "repo_name": "Azure/azure-sdk-for-python", "id": "1570d71f23b2cf16d4dd5a6e7f426bc79835ce7e", "size": "1904", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "sdk/appcomplianceautomation/azure-mgmt-appcomplianceautomation/generated_samples/snapshot_download_compliance_detailed_pdf_report.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "1224" }, { "name": "Bicep", "bytes": "24196" }, { "name": "CSS", "bytes": "6089" }, { "name": "Dockerfile", "bytes": "4892" }, { "name": "HTML", "bytes": "12058" }, { "name": "JavaScript", "bytes": "8137" }, { "name": "Jinja", "bytes": "10377" }, { "name": "Jupyter Notebook", "bytes": "272022" }, { "name": "PowerShell", "bytes": "518535" }, { "name": "Python", "bytes": "715484989" }, { "name": "Shell", "bytes": "3631" } ], "symlink_target": "" }
import asyncio import iterm2 async def update(connection, theme): # Themes have space-delimited attributes, one of which will be light or dark. parts = theme.split(" ") if "dark" in parts: preset = await iterm2.ColorPreset.async_get(connection, "base16-summerfruit-dark-256") else: preset = await iterm2.ColorPreset.async_get(connection, "base16-summerfruit-light-256") # Update the list of all profiles and iterate over them. profiles=await iterm2.PartialProfile.async_query(connection) for partial in profiles: # Fetch the full profile and then set the color preset in it. profile = await partial.async_get_full_profile() await profile.async_set_color_preset(preset) async def main(connection): app = await iterm2.async_get_app(connection) await update(connection, await app.async_get_variable("effectiveTheme")) async with iterm2.VariableMonitor(connection, iterm2.VariableScopes.APP, "effectiveTheme", None) as mon: while True: # Block until theme changes theme = await mon.async_get() await update(connection, theme) iterm2.run_forever(main)
{ "content_hash": "f156f6525d359fa89928d3f22b4d1441", "timestamp": "", "source": "github", "line_count": 28, "max_line_length": 108, "avg_line_length": 41.964285714285715, "alnum_prop": 0.7038297872340425, "repo_name": "timriley/dotfiles", "id": "f71d54b96a3930c6779dc4acaf02a9055a09e4e1", "size": "1374", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "Library/Application Support/iTerm2/Scripts/AutoLaunch/change_color_preset_on_theme_change.py", "mode": "33188", "license": "mit", "language": [ { "name": "Lua", "bytes": "15780" }, { "name": "Python", "bytes": "1374" }, { "name": "Shell", "bytes": "20068" } ], "symlink_target": "" }
from neon import NervanaObject from neon.transforms import CrossEntropyBinary, Logistic from neon.util.persist import load_obj from neon.layers import Merge, Activation class Model(NervanaObject): """ Basic model class which stores a list of layers describing the model. Can train the layer weights on a dataset, evaluate on a test set and serialize the mode. Additional functionality can be added to fit through callback functions. Arguments: layers (list): List of layers that compose a model. name (str): Model name. Defaults to "model" optimizer (Optimizer): Optimizer object which defines the learning rule for updating model parameters (ie DescentMomentum, AdaDelta) """ def __init__(self, layers=[], name="model", optimizer=None): super(Model, self).__init__(name) self.optimizer = optimizer self.params = None self.states = None self.epoch_index = 0 self.finished = False self.layers = [] self.layers_to_optimize = [] for layer in layers: if isinstance(layer, list): self.layers.extend(layer) else: self.layers.append(layer) for layer in self.layers: if layer.has_params: self.layers_to_optimize.append(layer) elif isinstance(layer, Merge): self.layers_to_optimize += layer.layers_to_optimize def set_shortcut(self): # infer whether bprop shortcut can be used on final activation # self.cost should be set to run this otherwise do nothing lastlayer = self.layers[-1] try: if self.cost.costfunc.__class__ is CrossEntropyBinary: if (lastlayer.__class__ is Activation and lastlayer.transform.__class__ is Logistic): lastlayer.transform.set_shortcut(True) except: # if any attributes are not set or any other exception # is thrown leave transform.shortcut as is (do nothing) pass def load_weights(self, weight_path): """ Loads the layer weights saved in weight_path from serialize(). Arguments: weight_path (str): File containing serialized python dict with layer weights and states. """ pdict = load_obj(weight_path) self.epoch_index = pdict['epoch_index'] param_layers = [l for l in self.layers_to_optimize] param_dict_list = pdict['layer_params_states'] for l, ps in zip(param_layers, param_dict_list): l.set_params(ps['params']) if 'states' in ps: l.set_states(ps['states']) def fit(self, dataset, cost, optimizer, num_epochs, callbacks): """ Trains the model parameters on a dataset by minimizing the cost function through gradient descent and updates the layer weights according to a learning rule defined in optimizer. Arguments: dataset (iterator): An iterable of minibatches where each element is a (x, y) tuple where x is the input data and y are the labels. x is of dimension (feature_size, batch_size) y is of dimension (label_size, batch_size) Length of the iterator is num_batches which is num_data / batch_size cost (Cost): Defines the function which the model is minimizing based on the output of the last layer and the input labels optimizer (Optimizer): Defines the learning rule for updating the model parameters num_epochs: Number of times to iterate over the dataset. """ self.cost = cost self.set_shortcut() # infer if bprop shortcut can be used self.optimizer = optimizer self.total_cost = self.be.empty((1, 1)) callbacks.on_train_begin(num_epochs) while self.epoch_index < num_epochs and not self.finished: callbacks.on_epoch_begin(self.epoch_index) self._epoch_fit(dataset, callbacks) callbacks.on_epoch_end(self.epoch_index) self.epoch_index += 1 callbacks.on_train_end() def _epoch_fit(self, dataset, callbacks): """ Helper function for fit which performs training on a dataset for one epoch. Arguments: dataset (iterable): Dataset iterator to perform fit on """ epoch = self.epoch_index self.total_cost[:] = 0 # iterate through minibatches of the dataset for mb_idx, (x, t) in enumerate(dataset): callbacks.on_minibatch_begin(epoch, mb_idx) x = self.fprop(x) self.total_cost[:] = self.total_cost + self.cost.get_cost(x, t) # deltas back propagate through layers # for every layer in reverse except the 0th one delta = self.cost.get_errors(x, t) self.bprop(delta) self.optimizer.optimize(self.layers_to_optimize, epoch=epoch) callbacks.on_minibatch_end(epoch, mb_idx) # now we divide total cost by the number of batches, # so it was never total cost, but sum of averages # across all the minibatches we trained on self.total_cost[:] = self.total_cost / dataset.nbatches def fprop(self, x, inference=False): """ Forward propagates a minibatch x through the model. Arguments: x (Tensor): Input minibatch data inference (bool): Flag for performing training or inference Only affects batch norm and dropout layers. Returns: Tensor: the output of the final layer in the model """ for l in self.layers: x = l.fprop(x, inference) return x def bprop(self, delta, do_acts=True): """ Back propagates the error of a minibatch through the model. Arguments: delta (Tensor): Derivative of cost with respect to the last layer's output do_acts (bool): Whether to compute the output deltas of layer. The first layer does not need to compute output deltas and so do_acts is set to False. """ for l in reversed(self.layers[1:]): delta = l.bprop(delta) return self.layers[0].bprop(delta, do_acts=False) def eval(self, dataset, metric): """ Evaluates a model on a dataset according to an input metric. Arguments: datasets (iterable): dataset to evaluate on. metric (Cost): what function to evaluate dataset on. """ running_error = 0.0 nprocessed = 0 dataset.reset() for x, t in dataset: x = self.fprop(x, inference=True) # This logic is for handling partial batch sizes at the end of the dataset bsz = min(dataset.ndata - nprocessed, self.be.bsz) metric(x, t) running_error += metric.outputs.get()[:, :bsz].sum() nprocessed += bsz running_error /= nprocessed return running_error def get_description(self): """ Gets a description of the model required to reconstruct the model with no weights like from a yaml file. Returns: dict: Description of each component of the model. """ pdict = dict() pdict['backend'] = 'gpu' pdict['cost'] = self.cost.costfunc.__class__.__name__ pdict['layers'] = [l.get_description() for l in self.layers] if self.optimizer: pdict['optimizer'] = self.optimizer.get_description() return pdict # serialize tells how to write out the parameters we've learned so # far and associate them with layers. it can ignore layers with no # learned parameters. the model stores states to pass to the # optimizers. if we're saving the model out for inference, we # don't need to remember states. def serialize(self, keep_states=True): """ Creates a dictionary storing the layer parameters and epochs complete. Arguments: keep_states (bool): Whether to save optimizer states. Returns: dict: Model data including layer parameters and epochs complete. """ pdict = dict() params_states = [l.get_params_serialize(keep_states) for l in self.layers_to_optimize] pdict['layer_params_states'] = params_states # start training again on the next epoch pdict['epoch_index'] = self.epoch_index + 1 return pdict
{ "content_hash": "314bdb2f53971dd24bbe7fb2b3f155c0", "timestamp": "", "source": "github", "line_count": 234, "max_line_length": 94, "avg_line_length": 37.24786324786325, "alnum_prop": 0.604290959155576, "repo_name": "zolegus/neon", "id": "d8f5e579aca19f96b0a21ef8b94854143715bb6e", "size": "9458", "binary": false, "copies": "5", "ref": "refs/heads/master", "path": "neon/models/model.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "4620" }, { "name": "C++", "bytes": "13448" }, { "name": "CSS", "bytes": "810211" }, { "name": "Cuda", "bytes": "87750" }, { "name": "Makefile", "bytes": "8939" }, { "name": "Python", "bytes": "771791" } ], "symlink_target": "" }
""" EasyBuild support for GCC compiler toolchain. @author: Kenneth Hoste (Ghent University) """ from easybuild.toolchains.compiler.gcc import Gcc class GccToolchain(Gcc): """Simple toolchain with just the GCC compilers.""" NAME = 'GCC'
{ "content_hash": "da2774a814fe917117fcb5e8e1a4b00f", "timestamp": "", "source": "github", "line_count": 12, "max_line_length": 55, "avg_line_length": 20.666666666666668, "alnum_prop": 0.7258064516129032, "repo_name": "ULHPC/modules", "id": "60115094d76b9d21caca81e0758c48d2787a5492", "size": "1306", "binary": false, "copies": "5", "ref": "refs/heads/devel", "path": "easybuild/easybuild-framework/easybuild/toolchains/gcc.py", "mode": "33188", "license": "mit", "language": [ { "name": "Groff", "bytes": "36174" }, { "name": "Perl", "bytes": "34780" }, { "name": "Python", "bytes": "2711250" }, { "name": "Ruby", "bytes": "932" }, { "name": "Shell", "bytes": "51560" } ], "symlink_target": "" }
from faps.genotypeArray import genotypeArray from faps.alogsumexp import alogsumexp import numpy as np from warnings import warn class paternityArray(object): """ Likelihoods of that any of a set of candidate males is the true father of each offspring individual, assuming the mother is known. Call the wrapper function `paternity_array`. Parameters ---------- likelihood: array Array of log likelihoods of paternity. Rows index offspring and columns candidates. The ij^th element is the likelihood that candidate j is the sire of offspring i. lik_absent: array Vector of log likelihoods that the true sire of each offspring is missing from the sample of candidate males, and hence that offspring alleles are drawn from population allele frequencies. Should be the same length as the number of rows in `likelihood`. offspring: array-like Identifiers for each offspring individual. mothers: array-like Identifiers of the mother of each individual, if these are known. fathers: array-like Identifiers of the father of each individual, if these are known. candidates: array-like Identifiers of the candidate fathers. mu: float Point estimate of the genotyping error rate. Note that sibship clustering is unstable if mu_input is set to exactly zero. Any zero values will therefore be set to a very small number close to zero (10^-12). missing_parents : float between zero and one Input value for the proportion of adults who are missing from the sample. This is used to weight the probabilties of paternity for each father relative to the probability that a father was not sampled. Defaults to zero, but you should definitely change this. purge: vector of str, or float between zero and one, optional Individuals who can be removed from the paternity array a priori. If a float is given, that proportion of individuals is removed from the array at random. Alternatively a string or vector of strings containing names of specific candidate fathers can be supplied. selfing_rate: float between zero and one, optional Input value for the prior probability of self-fertilisation. clashes: array, optional Array of the numbers of opposing homozygous incompatibilities for all possible parent-offspring dyads. This should have a row for every offspring and a column for every candidate father, and hence be the same shape as the `likelihood` array. max_clashes: int, optional Maximum number of double-homozygous incompatibilities allowed between offspring and candidate fathers. Dyads with more incompatibilities than this will have their probability set to zero. Returns ------- prob_array: array Array of probabilities of paternity, accounting for the probability that a sire has not been sampled. This is the array `likelihood` with vector `lik_absent` appended, with rows normalised to sum to one. """ def __init__(self, likelihood, lik_absent, offspring, mothers, fathers, candidates, missing_parents=0, mu=None, purge=None, selfing_rate=None, clashes = None, max_clashes = None, covariate=0): self.mu = mu self.offspring = offspring self.mothers = mothers self.fathers = fathers self.candidates = candidates self.lik_array = likelihood self.lik_absent = lik_absent self.clashes = clashes self.covariate = covariate self.mu = mu self.purge = purge self.missing_parents = missing_parents self.selfing_rate = selfing_rate self.max_clashes = max_clashes def add_covariate(self, covariate): """ Include a vector of (log) probabilities associated with covariates that provide additional information about paternity beyond that provided by genetic information (e.g. geographic distances). Parameters ---------- covariate: 1-d array Vector of (log) probabilities of paternity based on non-genetic information, with one element for every candidate father. If this is a function of multiple sources they should be multiplied and included in this vector. If a list of offspring arrays have been supplied, this should be a list of vectors. Returns ------- No output is printed; the covariate is added to the paternityArray as the attribute 'covariate'. Any existing information is overwritten. The vector is appended with an additional zero to allow for the final column of a the prob_array item in a paternityArray that accounts for the probability of missing fathers. """ if isinstance(covariate, np.ndarray): if len(covariate.shape) > 1: raise ValueError("covariate should be a 1-d array, but has shape {}".format(covariate.shape)) if len(self.candidates) != covariate.shape[0]: raise ValueError("Length of vector of covariates ({}) does not match the number of fathers ({})".format(len(self.candidates), covariate.shape[0])) if not all(covariate <= 0): warn("Not all values in covariate are less or equal to zero. Is it possible probabilities have not been log transformed?") covariate = np.append(covariate, 0) self.covariate = covariate return None else: raise TypeError("covariate should be a 1-d NumPy array.") def prob_array(self): """ Construct an array of log posterior probabilities that each offspring is sired by each of the candidate males in the sample, or that the true father is not present in the sample. Rows are normalised to some to one. Additional arguments can specify the proportion of missing fathers, and the rate of self-fertilisation. Parameters ---------- None. Returns ------- An array with a row for each offspring individual and column for each candidate male, with an extra final column for the probability that the offspring is drawn from population allele frequencies. Each element is a log probability, and as such each row sums to one. Examples -------- from faps import * import numpy as np # Generate a population of adults allele_freqs = np.random.uniform(0.3,0.5,50) adults = make_parents(20, allele_freqs) # Mate the first adult to the next three. mother = adults.subset(0) progeny = make_sibships(adults, 0, [1,2,3], 5, 'x') # Create paternityArray patlik = paternity_array( progeny, mother, adults, mu=0.0013, missing_parents=0.2 ) # View posterior probabilities of paternity (G matrix in the FAPS paper) patlik.prob_array() # Change some parameters that will affect the G matrix. patlik.missing_parents = 0.3 patlik.selfing_rate = 0 # Explicitly set some candidates to have probabilities of paternity to zero patlik.purge = 'base_0' # Another way to remove the mother patlik.purge = ['base_15', 'base_16', 'base_17'] # remove a list of specific candidates patlik.purge = 0.2 # Throw two candidates out at random # Another way to do the previous steps in a direct call to paternity_array. paternity_array( progeny, mother, adults, mu=0.0013, missing_parents=0.2, selfing_rate=0, purge = 0.2 ) """ new_array = np.append(self.lik_array, self.lik_absent[:,np.newaxis], 1) # set log lik of individuals to be purged to -Inf if self.purge is not None: nc = len(self.candidates) # If a float is given, remove candidates at random. if isinstance(self.purge, float): if self.purge <= 0 or self.purge >= 1: raise ValueError(" Error: purge must be between zero and one.") # Random set of candidate indices to be purged. ix = np.random.choice(range(nc), np.round(self.purge*nc).astype('int'), replace=False) with np.errstate(divide='ignore'): new_array[:, ix] = np.log(0) # If one or more integers is given, remove candidates at those indices elif isinstance(self.purge, (list, np.ndarray, int, str)): if isinstance(self.purge, (int, str)): self.purge = [self.purge] # If all entries are strings, find the names of the candidates. if all([isinstance(x, str) for x in self.purge]): if not all(np.isin(self.purge, self.candidates)): raise ValueError("One or more names in paternityArray.purge are not found in paternityArray.candidates") self.purge = [np.where(x == self.candidates)[0][0] for x in self.purge] with np.errstate(divide='ignore'): new_array[:, self.purge] = np.log(0) else: raise TypeError("Error: purge should be a float or list of floats between zero and one.") # correct for input parameter for proportion of missing fathers. if not isinstance(self.missing_parents, (int, float)): raise TypeError("missing_parents should be between 0 and 1.") # apply correction for the prior on number of missing parents. if self.missing_parents < 0 or self.missing_parents >1: raise ValueError("missing_parents must be between 0 and 1!") # if missing_parents is between zero and one, correct the likelihoods. if self.missing_parents >= 0 and self.missing_parents <= 1: if self.missing_parents ==0: warn("Missing_parents set to 0. Only continue if you are sure you really have 100% of possible fathers.") if self.missing_parents ==1: warn("Missing_parents set to 100%. Are you sure this is what you mean?") with np.errstate(divide='ignore'): new_array[:, -1] = new_array[:, -1] + np.log( self.missing_parents) new_array[:,:-1] = new_array[:,:-1] + np.log(1-self.missing_parents) # if missing_parents is 0, set the term for unrelated fathers to zero. if self.missing_parents == 0: with np.errstate(divide='ignore'): new_array[:,-1] = np.log(0) # correct for selfing rate. if self.selfing_rate is not None: if not isinstance(self.selfing_rate, (int, float)): raise TypeError("selfing_rate should be between 0 and 1.") if self.selfing_rate < 0 or self.selfing_rate >1: raise ValueError("selfing_rate must be between 0 and 1.") if self.selfing_rate >=0 and self.selfing_rate <=1: if self.selfing_rate == 1: warn("selfing_rate set to 100%. Are you sure that is what you meant?") ix = range(len(self.offspring)) with np.errstate(divide='ignore'): maternal_pos = [np.where(np.array(self.candidates) == self.mothers[i])[0][0] for i in ix] # positions of the mothers new_array[ix, maternal_pos] += np.log(self.selfing_rate) # set the likelihood dyads with many incompatibilities to zero if self.max_clashes is not None: if not isinstance(self.max_clashes, int): raise TypeError("paternityArray.max_clashes should be a positive integer") if self.max_clashes <= 0: raise ValueError("paternityArray.max_clashes should be greater than zero.") if self.clashes is None: raise TypeError("Unable to adjust for number of incompatible homozygous loci because `paternityArray.clashes` is `None`.") elif self.clashes.shape != self.lik_array.shape: raise ValueError("Shape of the likelihood array does not match that of the array of clashes.") else: inc = np.append(self.clashes, np.zeros(self.lik_absent.shape)[:,np.newaxis], 1) # add an extra column so the shapes match with np.errstate(divide='ignore'): ix = np.log(inc <= self.max_clashes) # index elements to alter new_array = new_array + ix # normalise so rows sum to one. new_array = new_array - alogsumexp(new_array, axis=1)[:,np.newaxis] return new_array def subset(self, indices): """ Subset offspring in a paternity array. Parameters ---------- indices: List or array of integers Positions of individuals to subset. Returns ------- A paternityArray object for the individuals indexed by `indices`. Examples -------- from faps import * import numpy as np # Generate a population of adults allele_freqs = np.random.uniform(0.3,0.5,50) adults = make_parents(20, allele_freqs) # Mate the first adult to the next three. mother = adults.subset(0) progeny = make_sibships(adults, 0, [1,2,3], 5, 'x') # Create paternityArray patlik = paternity_array(progeny, mother, adults, mu=0.0013) # Pull out data for only the first family patlik.subset([0,1,2,3,4]) """ # If index is for a single individual, make it a list anyway. if isinstance(indices, int): individuals = [indices] # Subset original data. new_array = paternityArray( likelihood = self.lik_array[indices], lik_absent = self.lik_absent[indices], offspring = self.offspring[indices], mothers = self.mothers[indices], fathers = self.fathers[indices], candidates = self.candidates, missing_parents= self.missing_parents, purge = self.purge, mu = self.mu, selfing_rate = self.selfing_rate, max_clashes = self.max_clashes ) # Add additional attributes is these exist if self.mu is not None: new_array.mu = self.mu if self.clashes is not None: new_array.clashes = self.clashes if self.covariate is not None: new_array.covariate = self.covariate # Return substted paternityArray return new_array def split(self, by, return_dict=True): """ Split up a paternityArray into groups according to some grouping factor. For example, divide an array for multiple half-sibling arrays by the ID of their mothers. Parameters ---------- by: array-like Vector containing grouping labels for each individual. return_dict: logical If True, the output is returned as a dictionary of paternityArray objects indexed by entries in `by`. If False, a list is returned. Defaults to True. Returns ------- A dictionary of paternityArray objects. Examples -------- from faps import * import numpy as np # Generate a population of adults allele_freqs = np.random.uniform(0.3,0.5,50) adults = make_parents(20, allele_freqs) # Mate the first adult to the next three. mother = adults.subset(0) progeny = make_sibships(adults, 0, [1,2,3], 5, 'x') # Create paternityArray patlik = paternity_array(progeny, mother, adults, mu=0.0013) # Pull out data for only the first family patlik.split(by=progeny.fathers) """ groups = np.unique(by) ix = [np.where(by == i)[0] for i in groups] # Split into separate arrays. if return_dict: output = {k:self.subset(i) for k,i in zip(groups, ix)} else: output = [self.subset(i) for i in ix] #return output return output def write(self, path, decimals=3): """ Write a matrix of (unnormalised) likelihoods of paternity to disk. Parameters ---------- path: str Path to write to. decimals: int Number of decimal places to be saved to disk for likleihood values. Returns ------- A CSV file indexing offspring ID, mother ID, followed by a matrix of likelihoods that each candidate male is the true father of each individual. The final column is the likelihood that the paternal alleles are drawn from population allele frequencies. """ # append offspring and mother IDs onto the likelihood array. # append likelihoods of abset fathers on the back. newdata = np.append(self.offspring[:,np.newaxis], np.append(self.mothers[:,np.newaxis], np.append(np.around(self.lik_array, decimals), np.around(self.lik_absent[:,np.newaxis], decimals), 1),1),1) # headers cn = ','.join(self.candidates ) cn = 'offspringID,motherID,' + cn + ',missing_father' # write to disk np.savetxt(path, newdata, fmt='%s', delimiter=',', comments='', header=cn)
{ "content_hash": "3f22156475bf71dad667df945170eaba", "timestamp": "", "source": "github", "line_count": 392, "max_line_length": 196, "avg_line_length": 45.380102040816325, "alnum_prop": 0.6116701332283996, "repo_name": "ellisztamas/faps", "id": "9ad808f06882e4b4daa88f4beddfe87b720388c5", "size": "17789", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "faps/paternityArray.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "234542" } ], "symlink_target": "" }
''' program a script that prints the name of your current working directory, makes a new file in it and then put some arbitrary text in that new file '''
{ "content_hash": "47151d6104b435b24bac07e5cbd01794", "timestamp": "", "source": "github", "line_count": 4, "max_line_length": 73, "avg_line_length": 38.75, "alnum_prop": 0.7548387096774194, "repo_name": "noisebridge/PythonClass", "id": "39181593754106a0373ea2ea75238a6b1565029d", "size": "155", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "instructors/lessons/practical_utils/examples/3-prob-newfile.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "3255" }, { "name": "HTML", "bytes": "524536" }, { "name": "Jupyter Notebook", "bytes": "493067" }, { "name": "Mako", "bytes": "824" }, { "name": "Perl", "bytes": "34109" }, { "name": "Python", "bytes": "474536" }, { "name": "Shell", "bytes": "263" } ], "symlink_target": "" }
import pyaf.Bench.TS_datasets as tsds import tests.artificial.process_artificial_dataset as art art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 0, transform = "Fisher", sigma = 0.0, exog_count = 0, ar_order = 12);
{ "content_hash": "d6c7e267d079e29b0a154ecc1532088a", "timestamp": "", "source": "github", "line_count": 7, "max_line_length": 162, "avg_line_length": 37.42857142857143, "alnum_prop": 0.7022900763358778, "repo_name": "antoinecarme/pyaf", "id": "0ecbf43c5c7be1c0ef88e01996df3edf686468fc", "size": "262", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/artificial/transf_Fisher/trend_Lag1Trend/cycle_0/ar_12/test_artificial_1024_Fisher_Lag1Trend_0_12_0.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Makefile", "bytes": "6773299" }, { "name": "Procfile", "bytes": "24" }, { "name": "Python", "bytes": "54209093" }, { "name": "R", "bytes": "807" }, { "name": "Shell", "bytes": "3619" } ], "symlink_target": "" }
from boto.dynamodb2.table import Table from contextlib import contextmanager import pytest from mycroft.models.redshift_clusters import RedshiftClusters from tests.data.mock_config import MOCK_CONFIG from tests.data.redshift_cluster import SAMPLE_REDSHIFT_ID from tests.data.redshift_cluster import SAMPLE_CLUSTER_ITEMS from tests.models.test_abstract_records import dynamodb_connection # noqa from mycroft.models.aws_connections import get_avro_schema from tests.models.test_abstract_records import NAME_TO_SCHEMA @contextmanager def make_temp_redshift_clusters_table( connection, table_name=MOCK_CONFIG['aws_config']['redshift_clusters'] ): table = Table.create( table_name, schema=NAME_TO_SCHEMA['redshift_clusters'], connection=connection ) try: yield table finally: assert table.delete() class FakeRedshiftCluster(object): def __init__(self, fake_input_record): """ FakeRedshiftCluster supports a partial interface for RedshiftCluster namely, the __init__ and get methods """ self._fake_record = fake_input_record def get(self, **kwargs): output_dict = dict((key, self._fake_record.get(key, value)) for key, value in kwargs.iteritems()) return output_dict class TestRedshiftClusters(object): @pytest.yield_fixture # noqa def redshift_clusters(self, dynamodb_connection): avro_schema = get_avro_schema('mycroft/avro/redshift_cluster.json') with make_temp_redshift_clusters_table(dynamodb_connection, 'RedshiftClusters') as table: redshift_clusters = RedshiftClusters( persistence_object=table, avro_schema_object=avro_schema ) for redshift_cluster in SAMPLE_CLUSTER_ITEMS: assert redshift_clusters.put(**redshift_cluster) yield redshift_clusters @classmethod def create_fake_redshift_cluster(cls, keyword_dict): return FakeRedshiftCluster(keyword_dict) def _get_redshift_clusters(self, dynamodb_connection): # noqa """ WARNING -- this method requires cleanup; the user must remember to delete the table once complete. For example: >>> NEW_JOB = {'redshift_id': 'rs_1', 'host': 'host', 'port': 5439} >>> def cool_test_fn(dynamodb_connection): >>> trc = TestRedshiftClusters() >>> table, redshift_clusters = trc._get_redshift_clusters(dynamodb_connection) >>> assert redshift_clusters.put(**NEW_JOB) >>> yield redshift_clusters >>> assert table.delete() # THIS IS THE KEY CLEANUP!! """ avro_schema = get_avro_schema('mycroft/avro/redshift_cluster.json') table = Table.create( 'RedshiftClusters', schema=NAME_TO_SCHEMA['redshift_clusters'], connection=dynamodb_connection) return table, RedshiftClusters(persistence_object=table, avro_schema_object=avro_schema) def test_get(self, redshift_clusters): # query by a value # make sure it includes only the value we are looking for cluster = redshift_clusters.get(redshift_id=SAMPLE_REDSHIFT_ID) # We have to convert the iterable to list because # there are multiple properties we want to verify assert cluster.get(redshift_id=None)['redshift_id'] == SAMPLE_REDSHIFT_ID
{ "content_hash": "791aad97145f06cb63284a82bf9ccc85", "timestamp": "", "source": "github", "line_count": 89, "max_line_length": 97, "avg_line_length": 38.7752808988764, "alnum_prop": 0.6638655462184874, "repo_name": "Yelp/mycroft", "id": "8d6eb1a0cabed50c252ed76199c55461fec6cb09", "size": "3475", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "mycroft/tests/models/test_redshift_cluster.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "389" }, { "name": "Dockerfile", "bytes": "1402" }, { "name": "HTML", "bytes": "32973" }, { "name": "JavaScript", "bytes": "41476" }, { "name": "Makefile", "bytes": "2094" }, { "name": "Python", "bytes": "590882" }, { "name": "Shell", "bytes": "1011" } ], "symlink_target": "" }
from django.contrib import admin # from tcc.models import MyModel # admin.site.register(MyModel, MyModelAdmin)
{ "content_hash": "90c69cabe9cbdae459e63856c558dd87", "timestamp": "", "source": "github", "line_count": 4, "max_line_length": 44, "avg_line_length": 28, "alnum_prop": 0.8035714285714286, "repo_name": "pterk/django-tcc-tmp", "id": "60a67e2708106b167a1a09b0c2da3cf2fc2bbdec", "size": "112", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "tcc/admin.py", "mode": "33188", "license": "mit", "language": [ { "name": "JavaScript", "bytes": "8052" }, { "name": "Python", "bytes": "58622" }, { "name": "Shell", "bytes": "4511" } ], "symlink_target": "" }
import os import unittest from telemetry import page as page_module from telemetry import value from telemetry.page import page_set from telemetry.value import none_values from telemetry.value import scalar class TestBase(unittest.TestCase): def setUp(self): ps = page_set.PageSet(file_path=os.path.dirname(__file__)) ps.AddUserStory(page_module.Page('http://www.bar.com/', ps, ps.base_dir)) ps.AddUserStory(page_module.Page('http://www.baz.com/', ps, ps.base_dir)) ps.AddUserStory(page_module.Page('http://www.foo.com/', ps, ps.base_dir)) self.page_set = ps @property def pages(self): return self.page_set.pages class ValueTest(TestBase): def testBuildbotValueType(self): page0 = self.pages[0] v = scalar.ScalarValue(page0, 'x', 'unit', 3, important=True) self.assertEquals('default', v.GetBuildbotDataType( value.COMPUTED_PER_PAGE_SUMMARY_OUTPUT_CONTEXT)) self.assertEquals([3], v.GetBuildbotValue()) self.assertEquals(('x', page0.display_name), v.GetChartAndTraceNameForPerPageResult()) v = scalar.ScalarValue(page0, 'x', 'unit', 3, important=False) self.assertEquals( 'unimportant', v.GetBuildbotDataType(value.COMPUTED_PER_PAGE_SUMMARY_OUTPUT_CONTEXT)) def testScalarSamePageMerging(self): page0 = self.pages[0] v0 = scalar.ScalarValue(page0, 'x', 'unit', 1) v1 = scalar.ScalarValue(page0, 'x', 'unit', 2) self.assertTrue(v1.IsMergableWith(v0)) vM = scalar.ScalarValue.MergeLikeValuesFromSamePage([v0, v1]) self.assertEquals(page0, vM.page) self.assertEquals('x', vM.name) self.assertEquals('unit', vM.units) self.assertEquals(True, vM.important) self.assertEquals([1, 2], vM.values) def testScalarDifferentPageMerging(self): page0 = self.pages[0] page1 = self.pages[1] v0 = scalar.ScalarValue(page0, 'x', 'unit', 1) v1 = scalar.ScalarValue(page1, 'x', 'unit', 2) vM = scalar.ScalarValue.MergeLikeValuesFromDifferentPages([v0, v1]) self.assertEquals(None, vM.page) self.assertEquals('x', vM.name) self.assertEquals('unit', vM.units) self.assertEquals(True, vM.important) self.assertEquals([1, 2], vM.values) def testScalarWithNoneValueMerging(self): page0 = self.pages[0] v0 = scalar.ScalarValue(page0, 'x', 'unit', 1) v1 = scalar.ScalarValue(page0, 'x', 'unit', None, none_value_reason='n') self.assertTrue(v1.IsMergableWith(v0)) vM = scalar.ScalarValue.MergeLikeValuesFromSamePage([v0, v1]) self.assertEquals(None, vM.values) self.assertEquals(none_values.MERGE_FAILURE_REASON, vM.none_value_reason) def testScalarWithNoneValueMustHaveNoneReason(self): page0 = self.pages[0] self.assertRaises(none_values.NoneValueMissingReason, lambda: scalar.ScalarValue(page0, 'x', 'unit', None)) def testScalarWithNoneReasonMustHaveNoneValue(self): page0 = self.pages[0] self.assertRaises(none_values.ValueMustHaveNoneValue, lambda: scalar.ScalarValue(page0, 'x', 'unit', 1, none_value_reason='n')) def testAsDict(self): v = scalar.ScalarValue(None, 'x', 'unit', 42, important=False) d = v.AsDictWithoutBaseClassEntries() self.assertEquals(d, { 'value': 42 }) def testNoneValueAsDict(self): v = scalar.ScalarValue(None, 'x', 'unit', None, important=False, none_value_reason='n') d = v.AsDictWithoutBaseClassEntries() self.assertEquals(d, { 'value': None, 'none_value_reason': 'n' }) def testFromDictInt(self): d = { 'type': 'scalar', 'name': 'x', 'units': 'unit', 'value': 42 } v = value.Value.FromDict(d, {}) self.assertTrue(isinstance(v, scalar.ScalarValue)) self.assertEquals(v.value, 42) def testFromDictFloat(self): d = { 'type': 'scalar', 'name': 'x', 'units': 'unit', 'value': 42.4 } v = value.Value.FromDict(d, {}) self.assertTrue(isinstance(v, scalar.ScalarValue)) self.assertEquals(v.value, 42.4) def testFromDictNoneValue(self): d = { 'type': 'scalar', 'name': 'x', 'units': 'unit', 'value': None, 'none_value_reason': 'n' } v = value.Value.FromDict(d, {}) self.assertTrue(isinstance(v, scalar.ScalarValue)) self.assertEquals(v.value, None) self.assertEquals(v.none_value_reason, 'n')
{ "content_hash": "3981cbbc73ad262e24c14f43fc711c0f", "timestamp": "", "source": "github", "line_count": 143, "max_line_length": 78, "avg_line_length": 31.615384615384617, "alnum_prop": 0.64145100641451, "repo_name": "mohamed--abdel-maksoud/chromium.src", "id": "14abd21818e742020bdae11b7b2ce3a98d72ae57", "size": "4683", "binary": false, "copies": "12", "ref": "refs/heads/nw12", "path": "tools/telemetry/telemetry/value/scalar_unittest.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "AppleScript", "bytes": "6973" }, { "name": "Arduino", "bytes": "464" }, { "name": "Assembly", "bytes": "34522" }, { "name": "Batchfile", "bytes": "8451" }, { "name": "C", "bytes": "4205474" }, { "name": "C++", "bytes": "227982243" }, { "name": "CSS", "bytes": "875874" }, { "name": "Dart", "bytes": "74976" }, { "name": "Go", "bytes": "18155" }, { "name": "HTML", "bytes": "27190037" }, { "name": "Java", "bytes": "7645280" }, { "name": "JavaScript", "bytes": "18828195" }, { "name": "Makefile", "bytes": "96270" }, { "name": "Objective-C", "bytes": "1226550" }, { "name": "Objective-C++", "bytes": "7575073" }, { "name": "PHP", "bytes": "97817" }, { "name": "PLpgSQL", "bytes": "248854" }, { "name": "Perl", "bytes": "63937" }, { "name": "Protocol Buffer", "bytes": "418340" }, { "name": "Python", "bytes": "8032766" }, { "name": "Shell", "bytes": "464218" }, { "name": "Standard ML", "bytes": "4965" }, { "name": "XSLT", "bytes": "418" }, { "name": "nesC", "bytes": "18335" } ], "symlink_target": "" }
import numpy as np import pandas as pd from sklearn.ensemble import RandomForestRegressor, BaggingRegressor from nltk.stem.snowball import SnowballStemmer stemmer = SnowballStemmer('english') df_train = pd.read_csv('C:/Git/Kaggle.HomeDepot/ProjectSearchRelevance.Python/ProductSearchRelevance/input/train.csv', encoding="ISO-8859-1") df_test = pd.read_csv('C:/Git/Kaggle.HomeDepot/ProjectSearchRelevance.Python/ProductSearchRelevance/input/test.csv', encoding="ISO-8859-1") df_pro_desc = pd.read_csv('C:/Git/Kaggle.HomeDepot/ProjectSearchRelevance.Python/ProductSearchRelevance/input/product_descriptions.csv') #Get number of rows in the train dataframe num_train = df_train.shape[0] df_train.head() #functon that takes in a s #splits s into words #then makes all words lower #iterate through all of the words #and get tehm stem word #return each word joined by a space def str_stemmer(s): return " ".join([stemmer.stem(word) for word in s.lower().split()]) str_stemmer("angle bracket") #given two words #find how many times word 1 is in word 2 def str_common_word(str1, str2): return sum(int(str2.find(word)>=0) for word in str1.split()) #bring train and test together #then bring in description #for 1 massive data frame df_all = pd.concat((df_train, df_test), axis=0, ignore_index=True) df_all = pd.merge(df_all, df_pro_desc, how='left', on='product_uid') df_all.describe() df_all.head() #stem all of the different fields df_all['search_term'] = df_all['search_term'].map(lambda x:str_stemmer(x)) df_all['product_title'] = df_all['product_title'].map(lambda x:str_stemmer(x)) df_all['product_description'] = df_all['product_description'].map(lambda x:str_stemmer(x)) df_all['len_of_query'] = df_all['search_term'].map(lambda x:len(x.split())).astype(np.int64) df_all['product_info'] = df_all['search_term']+"\t"+df_all['product_title']+"\t"+df_all['product_description'] df_all['word_in_title'] = df_all['product_info'].map(lambda x:str_common_word(x.split('\t')[0],x.split('\t')[1])) df_all['word_in_description'] = df_all['product_info'].map(lambda x:str_common_word(x.split('\t')[0],x.split('\t')[2])) df_all = df_all.drop(['search_term','product_title','product_description','product_info'],axis=1) df_train = df_all.iloc[:num_train] df_test = df_all.iloc[num_train:] id_test = df_test['id'] y_train = df_train['relevance'].values X_train = df_train.drop(['id','relevance'],axis=1).values X_test = df_test.drop(['id','relevance'],axis=1).values rf = RandomForestRegressor(n_estimators=15, max_depth=6, random_state=0) clf = BaggingRegressor(rf, n_estimators=45, max_samples=0.1, random_state=25) clf.fit(X_train, y_train) y_pred = clf.predict(X_test) #C:\Users\DIXON15\Documents\Python Scripts pd.DataFrame({"id": id_test, "relevance": y_pred}).to_csv('pythonSubmission.csv',index=False)
{ "content_hash": "ff60300153e9a2a8edcc06119628bd56", "timestamp": "", "source": "github", "line_count": 68, "max_line_length": 141, "avg_line_length": 41.35294117647059, "alnum_prop": 0.7272403982930299, "repo_name": "jamessdixon/Kaggle.HomeDepot", "id": "7e133122586fa8abea379c825901e325a97df9a7", "size": "2812", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "ProjectSearchRelevance.Python/ProductSearchRelevance/sklearn_random_forest.py", "mode": "33188", "license": "mit", "language": [ { "name": "F#", "bytes": "72772" }, { "name": "Jupyter Notebook", "bytes": "349724" }, { "name": "Python", "bytes": "2812" }, { "name": "R", "bytes": "6107" } ], "symlink_target": "" }
""" AWS SNS platform for notify component. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/notify.aws_sns/ """ import logging import json import voluptuous as vol from homeassistant.const import ( CONF_PLATFORM, CONF_NAME) from homeassistant.components.notify import ( ATTR_TITLE, ATTR_TITLE_DEFAULT, ATTR_TARGET, PLATFORM_SCHEMA, BaseNotificationService) import homeassistant.helpers.config_validation as cv _LOGGER = logging.getLogger(__name__) REQUIREMENTS = ["boto3==1.4.7"] CONF_REGION = 'region_name' CONF_ACCESS_KEY_ID = 'aws_access_key_id' CONF_SECRET_ACCESS_KEY = 'aws_secret_access_key' CONF_PROFILE_NAME = 'profile_name' ATTR_CREDENTIALS = 'credentials' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_REGION, default='us-east-1'): cv.string, vol.Inclusive(CONF_ACCESS_KEY_ID, ATTR_CREDENTIALS): cv.string, vol.Inclusive(CONF_SECRET_ACCESS_KEY, ATTR_CREDENTIALS): cv.string, vol.Exclusive(CONF_PROFILE_NAME, ATTR_CREDENTIALS): cv.string, }) def get_service(hass, config, discovery_info=None): """Get the AWS SNS notification service.""" # pylint: disable=import-error import boto3 aws_config = config.copy() del aws_config[CONF_PLATFORM] del aws_config[CONF_NAME] profile = aws_config.get(CONF_PROFILE_NAME) if profile is not None: boto3.setup_default_session(profile_name=profile) del aws_config[CONF_PROFILE_NAME] sns_client = boto3.client("sns", **aws_config) return AWSSNS(sns_client) class AWSSNS(BaseNotificationService): """Implement the notification service for the AWS SNS service.""" def __init__(self, sns_client): """Initialize the service.""" self.client = sns_client def send_message(self, message="", **kwargs): """Send notification to specified SNS ARN.""" targets = kwargs.get(ATTR_TARGET) if not targets: _LOGGER.info("At least 1 target is required") return message_attributes = {k: {"StringValue": json.dumps(v), "DataType": "String"} for k, v in kwargs.items() if v} for target in targets: self.client.publish(TargetArn=target, Message=message, Subject=kwargs.get(ATTR_TITLE, ATTR_TITLE_DEFAULT), MessageAttributes=message_attributes)
{ "content_hash": "df8aefa58770a51ce17651276d4cf24d", "timestamp": "", "source": "github", "line_count": 79, "max_line_length": 74, "avg_line_length": 32.0253164556962, "alnum_prop": 0.6486166007905139, "repo_name": "stefan-jonasson/home-assistant", "id": "c94e3abaa96fcab02537ffc86886ea06da8e83c2", "size": "2530", "binary": false, "copies": "9", "ref": "refs/heads/dev", "path": "homeassistant/components/notify/aws_sns.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "4056" }, { "name": "Python", "bytes": "8360711" }, { "name": "Ruby", "bytes": "517" }, { "name": "Shell", "bytes": "12658" } ], "symlink_target": "" }
"""Test OKID""" import os from os.path import join import unittest import numpy as np from modred import OKID, parallel, util from modred.py2to3 import range # Useful for debugging, makes plots plot = False if plot: try: import matplotlib.pyplot as plt except: plot = False def diff(arr_measured, arr_true, normalize=False): err = np.mean((arr_measured - arr_true)**2) if normalize: return err / np.mean(arr_measured ** 2) else: return err @unittest.skipIf(parallel.is_distributed(), 'Only test OKID in serial') class TestOKID(unittest.TestCase): def setUp(self): self.test_dir = join(os.path.dirname(__file__), 'files_OKID') def tearDown(self): pass def test_OKID(self): rtol = 1e-8 atol = 1e-10 for case in ['SISO', 'SIMO', 'MISO', 'MIMO']: inputs = util.load_array_text( join(join(self.test_dir, case), 'inputs.txt')) outputs = util.load_array_text( join(join(self.test_dir, case), 'outputs.txt')) (num_inputs, nt) = inputs.shape (num_outputs, nt2) = outputs.shape assert(nt2 == nt) Markovs_true = np.zeros((nt, num_outputs, num_inputs)) tmp = util.load_array_text( join(join(self.test_dir, case), 'Markovs_Matlab_output1.txt')) tmp = tmp.reshape((num_inputs, -1)) num_Markovs_OKID = tmp.shape[1] Markovs_Matlab = np.zeros( (num_Markovs_OKID, num_outputs, num_inputs)) for i_out in range(num_outputs): data = util.load_array_text( join(join( self.test_dir, case), 'Markovs_Matlab_output%d.txt' % (i_out + 1))) if num_inputs > 1: data = np.swapaxes(data, 0, 1) Markovs_Matlab[:, i_out, :] = data data = util.load_array_text(join( join(self.test_dir, case), 'Markovs_true_output%d.txt' % (i_out + 1))) if num_inputs > 1: data = np.swapaxes(data, 0, 1) Markovs_true[:,i_out,:] = data Markovs_python = OKID(inputs, outputs, num_Markovs_OKID) if plot: plt.figure(figsize=(14,10)) for output_num in range(num_outputs): for input_num in range(num_inputs): plt.subplot(num_outputs, num_inputs, output_num*(num_inputs) + input_num + 1) plt.hold(True) plt.plot(Markovs_true[:,output_num,input_num],'k*-') plt.plot(Markovs_Matlab[:,output_num,input_num],'b--') plt.plot(Markovs_python[:,output_num,input_num],'r.') plt.legend(['True', 'Matlab OKID', 'Python OKID']) plt.title('Input %d to output %d'%(input_num+1, output_num+1)) plt.show() np.testing.assert_allclose( Markovs_python.squeeze(), Markovs_Matlab.squeeze(), rtol=rtol, atol=atol) np.testing.assert_allclose( Markovs_python.squeeze(), Markovs_true[:num_Markovs_OKID].squeeze(), rtol=rtol, atol=atol) if __name__ == '__main__': unittest.main()
{ "content_hash": "9a00685c99f903e3ff43df41806f5d99", "timestamp": "", "source": "github", "line_count": 103, "max_line_length": 78, "avg_line_length": 33.57281553398058, "alnum_prop": 0.5101214574898786, "repo_name": "belson17/modred", "id": "d7fcbb73118b5910dfb9852e3e86de9e63e3d592", "size": "3480", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "modred/tests/testokid.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "MATLAB", "bytes": "5666" }, { "name": "Python", "bytes": "525224" } ], "symlink_target": "" }
""" Covariance metric (baseline method) This method does not "learn" anything, rather it calculates the covariance matrix of the input data. This is a simple baseline method first introduced in On the Generalized Distance in Statistics, P.C.Mahalanobis, 1936 """ from __future__ import absolute_import import numpy as np from sklearn.base import TransformerMixin from .base_metric import MahalanobisMixin from ._util import transformer_from_metric class Covariance(MahalanobisMixin, TransformerMixin): """Covariance metric (baseline method) Attributes ---------- transformer_ : `numpy.ndarray`, shape=(num_dims, n_features) The linear transformation ``L`` deduced from the learned Mahalanobis metric (See function `transformer_from_metric`.) """ def __init__(self, preprocessor=None): super(Covariance, self).__init__(preprocessor) def fit(self, X, y=None): """ X : data matrix, (n x d) y : unused """ X = self._prepare_inputs(X, ensure_min_samples=2) M = np.cov(X, rowvar = False) if M.ndim == 0: M = 1./M else: M = np.linalg.inv(M) self.transformer_ = transformer_from_metric(np.atleast_2d(M)) return self
{ "content_hash": "de926397b14a941180126592f3fd8538", "timestamp": "", "source": "github", "line_count": 45, "max_line_length": 74, "avg_line_length": 26.666666666666668, "alnum_prop": 0.6875, "repo_name": "all-umass/metric-learn", "id": "7a04923da78c8477eba9109791888ca266611c59", "size": "1200", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "metric_learn/covariance.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "81880" } ], "symlink_target": "" }
from shrubbery.conf import Settings, Setting class settings(Settings): OBJECT_IDENTITY_DB_TABLE = Setting(default=None) OBJECT_IDENTITY_DB_COLUMN = Setting(default='identity_id')
{ "content_hash": "e56096f8e9bdbf97089b2e62b3d41e23", "timestamp": "", "source": "github", "line_count": 5, "max_line_length": 62, "avg_line_length": 37.4, "alnum_prop": 0.7700534759358288, "repo_name": "emulbreh/shrubbery", "id": "250d78a9f29eff286d80175432f44b5b232ca6ac", "size": "187", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "shrubbery/polymorph/conf.py", "mode": "33188", "license": "mit", "language": [ { "name": "JavaScript", "bytes": "186" }, { "name": "Python", "bytes": "170934" } ], "symlink_target": "" }
from __future__ import absolute_import, division, print_function, unicode_literals import pytest ; pytest #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- # Standard library imports from os.path import dirname, join from shutil import copy import warnings # External imports from six import string_types # Bokeh imports from bokeh._testing.util.api import verify_all from bokeh.util.warnings import BokehDeprecationWarning, BokehUserWarning # Module under test import bokeh as b #----------------------------------------------------------------------------- # Setup #----------------------------------------------------------------------------- ALL = ( '__version__', 'license', 'sampledata', ) _LICENSE = """\ Copyright (c) 2012 - 2018, Anaconda, Inc., and Bokeh Contributors All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. Neither the name of Anaconda nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ #----------------------------------------------------------------------------- # General API #----------------------------------------------------------------------------- Test___all__ = verify_all(b, ALL) def test___version___type(): assert isinstance(b.__version__, string_types) def test___version___defined(): assert b.__version__ != 'unknown' def test_license(capsys): # This sucks, but when testing from source, there's no guarantee that # setup.py has been called to install the license file otherwise. copy( join(dirname(__file__), '..', '..', 'LICENSE.txt'), join(dirname(__file__), '..') ) b.license() out, err = capsys.readouterr() assert out == _LICENSE class TestWarnings(object): @pytest.mark.parametrize('cat', (BokehDeprecationWarning, BokehUserWarning)) def test_bokeh_custom(self, cat): r = warnings.formatwarning("message", cat, "line", "lineno") assert r == "%s: %s\n" %(cat.__name__, "message") def test_general_default(self): r = warnings.formatwarning("message", RuntimeWarning, "line", "lineno") assert r == "line:lineno: RuntimeWarning: message\n" def test_filters(self): assert ('always', None, BokehUserWarning, None, 0) in warnings.filters assert ('always', None, BokehDeprecationWarning, None, 0) in warnings.filters
{ "content_hash": "67afb691bd3f42d82f99047c74e71705", "timestamp": "", "source": "github", "line_count": 102, "max_line_length": 85, "avg_line_length": 36.490196078431374, "alnum_prop": 0.6413218699623858, "repo_name": "mindriot101/bokeh", "id": "6d5c9f6308600690575b1a7555e7004b397995e0", "size": "4242", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "bokeh/tests/test___init__.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Batchfile", "bytes": "5455" }, { "name": "CSS", "bytes": "413395" }, { "name": "CoffeeScript", "bytes": "1995470" }, { "name": "HTML", "bytes": "1545838" }, { "name": "JavaScript", "bytes": "4747" }, { "name": "Makefile", "bytes": "5785" }, { "name": "Python", "bytes": "1381168" }, { "name": "Shell", "bytes": "13857" } ], "symlink_target": "" }
import numpy as np import pytest from pandas import CategoricalIndex import pandas._testing as tm class TestFillNA: def test_fillna_categorical(self): # GH#11343 idx = CategoricalIndex([1.0, np.nan, 3.0, 1.0], name="x") # fill by value in categories exp = CategoricalIndex([1.0, 1.0, 3.0, 1.0], name="x") tm.assert_index_equal(idx.fillna(1.0), exp) cat = idx._data # fill by value not in categories raises TypeError on EA, casts on CI msg = "Cannot setitem on a Categorical with a new category" with pytest.raises(TypeError, match=msg): cat.fillna(2.0) result = idx.fillna(2.0) expected = idx.astype(object).fillna(2.0) tm.assert_index_equal(result, expected) def test_fillna_copies_with_no_nas(self): # Nothing to fill, should still get a copy for the Categorical method, # but OK to get a view on CategoricalIndex method ci = CategoricalIndex([0, 1, 1]) result = ci.fillna(0) assert result is not ci assert tm.shares_memory(result, ci) # But at the EA level we always get a copy. cat = ci._data result = cat.fillna(0) assert result._ndarray is not cat._ndarray assert result._ndarray.base is None assert not tm.shares_memory(result, cat) def test_fillna_validates_with_no_nas(self): # We validate the fill value even if fillna is a no-op ci = CategoricalIndex([2, 3, 3]) cat = ci._data msg = "Cannot setitem on a Categorical with a new category" res = ci.fillna(False) # nothing to fill, so we dont cast tm.assert_index_equal(res, ci) # Same check directly on the Categorical with pytest.raises(TypeError, match=msg): cat.fillna(False)
{ "content_hash": "f4253bcd25c66d4108375afc56b68f3b", "timestamp": "", "source": "github", "line_count": 54, "max_line_length": 78, "avg_line_length": 34.25925925925926, "alnum_prop": 0.6183783783783784, "repo_name": "datapythonista/pandas", "id": "09de578f3c649e5a90278f11b1e3cd5b1d0646d5", "size": "1850", "binary": false, "copies": "2", "ref": "refs/heads/main", "path": "pandas/tests/indexes/categorical/test_fillna.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Batchfile", "bytes": "131" }, { "name": "C", "bytes": "355524" }, { "name": "CSS", "bytes": "1662" }, { "name": "Cython", "bytes": "1178139" }, { "name": "Dockerfile", "bytes": "1933" }, { "name": "HTML", "bytes": "456449" }, { "name": "Makefile", "bytes": "505" }, { "name": "Python", "bytes": "19048364" }, { "name": "Shell", "bytes": "10511" }, { "name": "Smarty", "bytes": "8486" }, { "name": "XSLT", "bytes": "1196" } ], "symlink_target": "" }
"""This modules generates a string from a CFG""" import re import string import six class CNFGenerator(object): """Use Chomsky to transform to CNF""" def __init__(self, grammar_rules): """Parse input grammar rules""" parsed_grammar = [] for grammar_rule in grammar_rules: if isinstance(grammar_rule, six.string_types): rule = grammar_rule else: rule = grammar_rule[0] """Split the rules based on : character""" m = re.compile(":").search(rule) if not m: continue else: if m.start() == 0: return else: left_hand_side = rule[0:m.start()].strip() if m.end() == len(rule): return else: rest = string.strip(rule[m.end():]) if rest == "[]": right_hand_side = [] else: combined_rulesets = string.split(rest, "|") right_hand_side = [] for i in combined_rulesets: l = string.split(i) if len(l) > 1: l = tuple(string.split(i)) else: l = l[0] parsed_grammar.append((left_hand_side, l)) """Assing the parsed rules as the new grammar rules""" self.grammar_rules = parsed_grammar self.grammar_terminals_map = {} self.grammar_nonterminals_map = {} self.new_grammar_nonterminals = {} self.init_symbol = self.grammar_rules[0][0] """The grammar rules can be used to obtain the nonterminals""" self.grammar_nonterminals = {r[0] for r in self.grammar_rules} """The nonterminals can be used to distinguish the terminals""" self.grammar_terminals = set() for rule in self.grammar_rules: """ Each rule can be a symbol ot a set of symbols If rule is a non terminal symbol, add it to the grammar terminals If rule is a set of symbols, add each non terminal symbol to the grammar terminals""" if isinstance(rule[1], six.string_types): if rule[1] not in self.grammar_nonterminals: self.grammar_terminals.add(rule[1]) else: for rule_symbol in rule[1]: if rule_symbol not in self.grammar_nonterminals: self.grammar_terminals.add(rule_symbol) for rule_index in xrange(len(self.grammar_rules)): if self.grammar_rules[rule_index][0] not in self.grammar_nonterminals_map: self.grammar_nonterminals_map[self.grammar_rules[rule_index][0]] = {rule_index} else: self.grammar_nonterminals_map[self.grammar_rules[rule_index][0]].add(rule_index) """Create a reverse map from symbols to rules for both terminals and nonterminals""" for a in self.grammar_terminals: for rule_index in xrange(len(self.grammar_rules)): if self.grammar_rules[rule_index][1] == a: if a not in self.grammar_terminals_map: self.grammar_terminals_map[a] = {rule_index} else: self.grammar_terminals_map[a].add(rule_index) for rule_index in xrange(len(self.grammar_rules)): if self.grammar_rules[rule_index][0] not in self.grammar_nonterminals_map: self.grammar_nonterminals_map[self.grammar_rules[rule_index][0]] = {rule_index} else: self.grammar_nonterminals_map[self.grammar_rules[rule_index][0]].add(rule_index) """Remove all unitary rules""" self.counter = 0 self.unitary = {} f = 1 while f: f = 0 self.unitary = set([rule for rule in self.grammar_rules if (isinstance(rule[1], six.string_types) and rule[1] in self.grammar_nonterminals) or (len(rule[1]) == 1 and rule[1][0] in self.grammar_nonterminals)]) for u in self.unitary: if isinstance(u[1], six.string_types): ui = u[1] else: ui = u[1][0] if ui in self.grammar_nonterminals_map: for rule_index in self.grammar_nonterminals_map[ui]: if (u[0], self.grammar_rules[rule_index][1]) not in self.grammar_rules: f = 1 self.grammar_rules.append((u[0], self.grammar_rules[rule_index][1])) self.grammar_nonterminals_map[u[0]].add(len(self.grammar_rules) - 1) for u in self.unitary: self.grammar_rules.remove(u) for a in self.grammar_terminals: for rule_index in xrange(len(self.grammar_rules)): if not isinstance(self.grammar_rules[rule_index][1], six.string_types) and len( self.grammar_rules[rule_index][1]) >= 2 and a in self.grammar_rules[rule_index][1]: if a not in self.new_grammar_nonterminals: self.new_grammar_nonterminals[a] = "A@" + a self.grammar_nonterminals.add(self.new_grammar_nonterminals[a]) self.grammar_rules.append((self.new_grammar_nonterminals[a], a)) symbolslist = list(self.grammar_rules[rule_index][1]) for symbol_index in xrange(len(symbolslist)): if symbolslist[symbol_index] == a: symbolslist[symbol_index] = self.new_grammar_nonterminals[a] self.grammar_rules[rule_index] = (self.grammar_rules[rule_index][0], tuple(symbolslist)) total_rules = len(self.grammar_rules) for rule_index in xrange(total_rules): if not isinstance(self.grammar_rules[rule_index][1], six.string_types) and len(self.grammar_rules[rule_index][1]) > 2: self.next_rule(self.grammar_rules[rule_index][0], self.grammar_rules[rule_index][1], rule_index) def next_rule(self, left_hand_side, right_hand_side, rule_index): if not isinstance(right_hand_side, six.string_types) and len(right_hand_side) == 2: self.grammar_rules[rule_index] = (left_hand_side, right_hand_side) return nta = "A@" + "_" + str(self.counter) self.grammar_nonterminals.add(nta) self.counter += 1 self.grammar_rules.append((left_hand_side, (right_hand_side[0], nta))) self.next_rule(nta, right_hand_side[1:], rule_index) class CFGGenerator(object): """This class generates a string from a CFG""" grammar = None resolved = None bfs_queue = None maxstate = 0 def __init__(self, cfgr=None, optimized=1, splitstring=0, maxstate=0): """ Object initialization Args: cfgr (CNF): grammar for the random objects optimized (bool): mode of operation - if enabled not all CNF rules are included (mitigate O(n^3)) splitstring (bool): A boolean for enabling or disabling the splitting of symbols using a space maxstate (int): The maxstate is used for generating in a dynamic way the CNF rules that were not included due to the optimization. As a result, the algorithm generates these rules only if required. Returns: None """ self.grammar = None self.resolved = None self.bfs_queue = None self.maxstate = 0 self.grammar = cfgr self.optimized = optimized self.splitstring = splitstring # Because of the optimization, there are some non # existing terminals on the generated list if self.optimized: self._clean_terminals() self.resolved = {} self.bfs_queue = [] self.maxstate = maxstate def generate(self): """ Generates a new random string from the start symbol Args: None Returns: str: The generated string """ result = self._gen(self.optimized, self.splitstring) if self.splitstring and result is not None: result = result[1:] return result def _clean_terminals(self): """ Because of the optimization, there are some non existing terminals on the generated list. Remove them by checking for terms in form Ax,x """ new_terminals = [] for term in self.grammar.grammar_terminals: x_term = term.rfind('@') y_term = term.rfind('A') if y_term > x_term: x_term = y_term ids = term[x_term + 1:].split(',') if len(ids) < 2: """It'input_string a normal terminal, not a state""" new_terminals.append(term) self.grammar.grammar_terminals = new_terminals def _check_self_to_empty(self, stateid): """ Because of the optimization, the rule for empty states is missing A check takes place live Args: stateid (int): The state identifier Returns: bool: A true or false response """ x_term = stateid.rfind('@') y_term = stateid.rfind('A') if y_term > x_term: x_term = y_term ids = stateid[x_term + 1:].split(',') if len(ids) < 2: return 0 if ids[0] == ids[1]: # print 'empty' return 1 return 0 def _check_intemediate(self, myntr, maxstate): """ For each state Apq which is a known terminal, this function searches for rules Apr -> Apq Aqr and Arq -> Arp Apq where Aqr is also a known terminal or Arp is also a known terminal. It is mainly used as an optimization in order to avoid the O(n^3) for generating all the Apq -> Apr Arq rules during the PDA to CFG procedure. Args: myntr (str): The examined non terminal that was poped out of the queue maxstate (int): The maxstate is used for generating in a dynamic way the CNF rules that were not included due to the optimization. As a result, the algorithm generates these rules only if required. Returns: bool: Returns true if the algorithm was applied at least one time """ # print 'BFS Dictionary Update - Intermediate' x_term = myntr.rfind('@') y_term = myntr.rfind('A') if y_term > x_term: x_term = y_term ids = myntr[x_term + 1:].split(',') if len(ids) < 2: return 0 i = ids[0] j = ids[1] r = 0 find = 0 while r < maxstate: if r != i and r != j: if 'A' + i + ',' + \ repr(r) not in self.resolved \ and 'A' + j + ',' + repr(r) in self.resolved: self.resolved[ 'A' + i + ',' + repr(r)] = self.resolved[myntr] \ + self.resolved['A' + j + ',' + repr(r)] if self._checkfinal('A' + i + ',' + repr(r)): return self.resolved['A' + i + ',' + repr(r)] if 'A' + i + ',' + repr(r) not in self.bfs_queue: self.bfs_queue.append('A' + i + ',' + repr(r)) find = 1 if 'A' + repr(r) + ',' + j not in self.resolved and 'A' + \ repr(r) + ',' + i in self.resolved: self.resolved[ 'A' + repr(r) + ',' + j] = self.resolved['A' + repr(r) + ',' + i] \ + self.resolved[myntr] if self._checkfinal('A' + repr(r) + ',' + j): return self.resolved['A' + repr(r) + ',' + j] if 'A' + repr(r) + ',' + j not in self.bfs_queue: self.bfs_queue.append('A' + repr(r) + ',' + j) find = 1 r = r + 1 if find == 1: return 1 return 0 def _check_self_replicate(self, myntr): """ For each Rule B -> c where c is a known terminal, this function searches for B occurences in rules with the form A -> B and sets A -> c. """ # print 'BFS Dictionary Update - Self Replicate' find = 0 for nonterm in self.grammar.grammar_nonterminals_map: for i in self.grammar.grammar_nonterminals_map[nonterm]: if self.grammar.grammar_rules[i][0] not in self.resolved and not isinstance( self.grammar.grammar_rules[i][1], (set, tuple)) \ and self.grammar.grammar_rules[i][1] == myntr: self.resolved[self.grammar.grammar_rules[i][0]] = self.resolved[myntr] if self._checkfinal(self.grammar.grammar_rules[i][0]): return self.resolved[self.grammar.grammar_rules[i][0]] if self.grammar.grammar_rules[i][0] not in self.bfs_queue: self.bfs_queue.append(self.grammar.grammar_rules[i][0]) find = 1 if find == 1: return 1 return 0 def _check_self_nonterminals(self, optimized): """ For each Rule A -> BC where B and C are known terminals (B -> c1 and C -> c2), this function searches replaces A to the corresponding terminals A -> c1c2 """ # print 'BFS Dictionary Update - Self Non Terminals' find = 0 for nt in self.grammar.grammar_nonterminals_map: for i in self.grammar.grammar_nonterminals_map[nt]: if (self.grammar.grammar_rules[i][0] not in self.resolved\ or self.grammar.grammar_rules[i][0] == 'S') \ and isinstance(self.grammar.grammar_rules[i][1], (set, tuple)): # All rules are in CNF form, so first check the A -> BC rules part_a = None if optimized and self._check_self_to_empty( self.grammar.grammar_rules[i][1][0]): part_a = '' elif self.grammar.grammar_rules[i][1][0] in self.resolved: part_a = self.resolved[self.grammar.grammar_rules[i][1][0]] part_b = None if optimized and self._check_self_to_empty( self.grammar.grammar_rules[i][1][1]): part_b = '' elif self.grammar.grammar_rules[i][1][1] in self.resolved: part_b = self.resolved[self.grammar.grammar_rules[i][1][1]] if part_a is not None and part_b is not None: self.resolved[self.grammar.grammar_rules[i][0]] = part_a + part_b # print 'Non Terminals Resolving # '+self.g.Rules[i][0]+": "+ # self.Resolved[self.g.Rules[i][0]] if self._checkfinal(self.grammar.grammar_rules[i][0]): return self.resolved[self.grammar.grammar_rules[i][0]] if self.grammar.grammar_rules[i][0] not in self.bfs_queue: self.bfs_queue.append(self.grammar.grammar_rules[i][0]) find = 1 if find == 1: return 1 return 0 def _checkfinal(self, nonterminal): if nonterminal == 'S': return 1 return 0 def _gen(self, optimized, splitstring): """Generates a new random object generated from the nonterminal Args: optimized (bool): mode of operation - if enabled not all CNF rules are included (mitigate O(n^3)) splitstring (bool): A boolean for enabling or disabling Returns: str: The generated string """ # Define Dictionary that holds resolved rules # (only in form A -> terminals sequence) self.resolved = {} # First update Resolved dictionary by adding rules # that contain only terminals (resolved rules) for nt in self.grammar.grammar_nonterminals_map: for i in self.grammar.grammar_nonterminals_map[nt]: if self.grammar.grammar_rules[i][0] not in self.resolved\ and not isinstance(self.grammar.grammar_rules[i][1], (set, tuple)): if self.grammar.grammar_rules[i][1] != '@empty_set' \ and self.grammar.grammar_rules[i][1] in self.grammar.grammar_terminals: if splitstring: self.resolved[ self.grammar.grammar_rules[i][0]] = self.grammar.grammar_rules[i][1] else: if self.grammar.grammar_rules[i][1] == '&': self.resolved[self.grammar.grammar_rules[i][0]] = ' ' else: self.resolved[ self.grammar.grammar_rules[i][0]] = self.grammar.grammar_rules[i][1] # print 'ResolvingA '+self.g.Rules[i][0]+": "+ # self.g.Rules[i][1] if self._checkfinal(self.grammar.grammar_rules[i][0]): return self.resolved[self.grammar.grammar_rules[i][0]] if self.grammar.grammar_rules[i][0] not in self.bfs_queue: self.bfs_queue.append(self.grammar.grammar_rules[i][0]) if self.grammar.grammar_rules[i][1] == '@empty_set': self.resolved[self.grammar.grammar_rules[i][0]] = '' # print 'ResolvingB '+self.g.Rules[i][0]+": " self.bfs_queue.append(self.grammar.grammar_rules[i][0]) if optimized and self._check_self_to_empty( self.grammar.grammar_rules[i][1]): self.resolved[self.grammar.grammar_rules[i][0]] = '' # print 'ResolvingC '+self.g.Rules[i][0]+": " if self.grammar.grammar_rules[i][0] not in self.bfs_queue: self.bfs_queue.append(self.grammar.grammar_rules[i][0]) # Then try to use the rules from Resolved dictionary and check # if there is another rule that can be resolved. # This should be done in a while loop change = 1 while change: change = 0 if not change: ret = self._check_self_nonterminals(optimized) if ret == 1: change = 1 elif ret != 0: return ret if not change: while not change and len(self.bfs_queue) > 0: myntr = self.bfs_queue.pop() ret = self._check_self_replicate(myntr) if ret == 1: change = 1 elif ret != 0: return ret if optimized and self._check_intemediate( myntr, self.maxstate): change = 1 break
{ "content_hash": "531e4f9223d44b1369a72fb237e1aef1", "timestamp": "", "source": "github", "line_count": 425, "max_line_length": 130, "avg_line_length": 47.265882352941176, "alnum_prop": 0.5025388291517324, "repo_name": "GeorgeArgyros/symautomata", "id": "60fecad8ca551ce3b471f060c16d7f0cee388fe5", "size": "20088", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "symautomata/cfggenerator.py", "mode": "33188", "license": "mit", "language": [ { "name": "Makefile", "bytes": "57" }, { "name": "Python", "bytes": "224672" } ], "symlink_target": "" }
''' Tools for making nice plots Uses matplotlib backend (currently wx or gtk) to provide tools for editing matplotlib plots after they have been created NOTE: I have imported all of pyplot into this module, so you may call it as though you were calling pyplot directly. The difference is that with some of the functions, such as plot, a window will be brought up allowing you to toggle the visibility of the artists ''' import matplotlib if __name__ != '__main__': from matplotlib.pyplot import * def plot( *args, **kwargs): ''' Works the same as matplotlib.pyplot.plot, with the addition of a panel being created which allows the user to toggle visibility of the artists in the axis ''' matplotlib.pyplot.plot( *args, **kwargs ) create_plot_browser( matplotlib.pyplot.gca() ) # end plot def scatter( *args, **kwargs): ''' Works the same as matplotlib.pyplot.scatter, with the addition of a panel being created which allows the user to toggle visibility of the artists in the axis ''' matplotlib.pyplot.scatter( *args, **kwargs ) create_plot_browser( matplotlib.pyplot.gca() ) # end scatter def create_plot_browser( ax ): '''Creates a window for modifying how artists are displayed in an axis''' # Call browser creation code appropriate to the current backend backend = matplotlib.get_backend().lower() if backend == 'wxagg': import plotting_tools_wx plotting_tools_wx.create_plot_browser( ax ) elif backend == 'gtkagg': import plotting_tools_gtk plotting_tools_gtk.create_plot_browser( ax ) # end create_plot_browser if __name__=='__main__': import argparse parser = argparse.ArgumentParser('Test plotting_tools module') parser.add_argument('backend', help='Backend to use with matplotlib. Currently supports wxagg and gtkagg') args = parser.parse_args() matplotlib.use( args.backend ) import matplotlib.pyplot fig = matplotlib.pyplot.figure() ax = fig.add_subplot(111) ax.plot( [1,2,3] ) ax.scatter( [1,2,3], [3,2,1], color='g') ax.plot( [2, 2, 1], 'r' ) fig.show() create_plot_browser( ax ) matplotlib.pyplot.show()
{ "content_hash": "05a11c99a499d33026ce4a252386b50b", "timestamp": "", "source": "github", "line_count": 73, "max_line_length": 110, "avg_line_length": 30.410958904109588, "alnum_prop": 0.6792792792792792, "repo_name": "bthcode/cmake_scipy_ctypes_example", "id": "bb59d04ba5a0f94e5eae38fb5221a690a7b04c74", "size": "2242", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "src/python/plotting_tools.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "C++", "bytes": "20334" }, { "name": "CMake", "bytes": "3725" }, { "name": "Jupyter Notebook", "bytes": "11797" }, { "name": "Python", "bytes": "86164" }, { "name": "Shell", "bytes": "1212" } ], "symlink_target": "" }
from __future__ import absolute_import from __future__ import division from __future__ import print_function from dreamer import control def random_episodes( env_ctor, num_episodes, num_steps, outdir=None, isolate_envs='none'): # If using environment processes or threads, we should also use them here to # avoid loading their dependencies into the global name space. This way, # their imports will be isolated from the main process and later created envs # do not inherit them via global state but import their own copies. env, _ = control.create_env(env_ctor, isolate_envs) env = control.wrappers.CollectDataset(env, outdir) episodes = [] if outdir else None while num_episodes > 0 or num_steps > 0: policy = lambda env, obs: env.action_space.sample() done = False obs = env.reset() while not done: action = policy(env, obs) obs, _, done, info = env.step(action) episode = env._get_episode() episodes.append(episode) num_episodes -= 1 num_steps -= len(episode['reward']) try: env.close() except AttributeError: pass return episodes
{ "content_hash": "61b15a29726e773e788476e6b883d803", "timestamp": "", "source": "github", "line_count": 32, "max_line_length": 79, "avg_line_length": 34.84375, "alnum_prop": 0.6977578475336322, "repo_name": "google-research/dreamer", "id": "5c6507b3035338b49af50f3925db3e2f560c6b3e", "size": "1722", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "dreamer/control/random_episodes.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "267411" } ], "symlink_target": "" }
import copy from airflow.contrib.hooks.gcp_dataflow_hook import DataFlowHook from airflow.models import BaseOperator from airflow.utils.decorators import apply_defaults class DataFlowJavaOperator(BaseOperator): """ Start a Java Cloud DataFlow batch job. The parameters of the operation will be passed to the job. It's a good practice to define dataflow_* parameters in the default_args of the dag like the project, zone and staging location. ``` default_args = { 'dataflow_default_options': { 'project': 'my-gcp-project', 'zone': 'europe-west1-d', 'stagingLocation': 'gs://my-staging-bucket/staging/' } } ``` You need to pass the path to your dataflow as a file reference with the ``jar`` parameter, the jar needs to be a self executing jar. Use ``options`` to pass on options to your job. ``` t1 = DataFlowOperation( task_id='datapflow_example', jar='{{var.value.gcp_dataflow_base}}pipeline/build/libs/pipeline-example-1.0.jar', options={ 'autoscalingAlgorithm': 'BASIC', 'maxNumWorkers': '50', 'start': '{{ds}}', 'partitionType': 'DAY' }, dag=my-dag) ``` Both ``jar`` and ``options`` are templated so you can use variables in them. """ template_fields = ['options', 'jar'] ui_color = '#0273d4' @apply_defaults def __init__( self, jar, dataflow_default_options={}, options={}, gcp_conn_id='google_cloud_default', delegate_to=None, *args, **kwargs): """ Create a new DataFlowJavaOperator. For more detail on about job submission have a look at the reference: https://cloud.google.com/dataflow/pipelines/specifying-exec-params :param jar: The reference to a self executing DataFlow jar. :type jar: string :param dataflow_default_options: Map of default job options. :type dataflow_default_options: dict :param options: Map of job specific options. :type options: dict :param gcp_conn_id: The connection ID to use connecting to Google Cloud Platform. :type gcp_conn_id: string :param delegate_to: The account to impersonate, if any. For this to work, the service account making the request must have domain-wide delegation enabled. :type delegate_to: string """ super(DataFlowJavaOperator, self).__init__(*args, **kwargs) self.gcp_conn_id = gcp_conn_id self.delegate_to = delegate_to self.jar = jar self.dataflow_default_options = dataflow_default_options self.options = options def execute(self, context): hook = DataFlowHook(gcp_conn_id=self.gcp_conn_id, delegate_to=self.delegate_to) dataflow_options = copy.copy(self.dataflow_default_options) dataflow_options.update(self.options) hook.start_java_dataflow(self.task_id, dataflow_options, self.jar)
{ "content_hash": "37a05fea8db7769ca1ca7a009e5a5439", "timestamp": "", "source": "github", "line_count": 91, "max_line_length": 90, "avg_line_length": 34.20879120879121, "alnum_prop": 0.6190170253774494, "repo_name": "ty707/airflow", "id": "8f61e18ab5d6a61a18b1dfc296650253e833f727", "size": "3680", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "airflow/contrib/operators/dataflow_operator.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "56952" }, { "name": "HTML", "bytes": "129811" }, { "name": "JavaScript", "bytes": "1370838" }, { "name": "Mako", "bytes": "1037" }, { "name": "Python", "bytes": "1242487" }, { "name": "Shell", "bytes": "17782" } ], "symlink_target": "" }
from __future__ import print_function from os.path import * import re # from parseBrackets import parseBrackets from parseDirectiveArgs import parseDirectiveArguments class MyError(Exception): def __init__(self, value): self.value = value def __str__(self): return repr(self.value) assertVariants = 'Fail|Equal|True|False|LessThan|LessThanOrEqual|GreaterThan|GreaterThanOrEqual' assertVariants += '|IsMemberOf|Contains|Any|All|NotAll|None|IsPermutationOf' assertVariants += '|ExceptionRaised|SameShape|IsNaN|IsFinite' def cppSetLineAndFile(line, file): return "#line " + str(line) + ' "' + file + '"\n' def getSubroutineName(line): try: m = re.match('\s*subroutine\s+(\w*)\s*(\\([\w\s,]*\\))?\s*(!.*)*$', line, re.IGNORECASE) return m.groups()[0] except: raise MyError('Improper format in declaration of test procedure.') def parseArgsFirstRest(directiveName,line): """If the @-directive has more than one argument, parse into first and rest strings. Added for assertAssociated. """ if directiveName != '': m = re.match('\s*'+directiveName+'\s*\\((.*\w.*)\\)\s*$',line,re.IGNORECASE) if m: argStr = m.groups()[0] else: return None else: argStr = line args = parseDirectiveArguments(argStr) if args == []: returnArgs = None elif len(args) == 1: returnArgs = [args[0]] else: returnArgs = [args[0],','.join(args[1:])] return returnArgs def parseArgsFirstSecondRest(directiveName,line): """If the @-directive must have at least two arguments, parse into first, second, and rest strings. Added for assertAssociated. """ args1 = parseArgsFirstRest(directiveName,line) returnArgs = None if args1 != None: if len(args1) == 1: returnArgs = args1 elif len(args1) == 2: args2 = parseArgsFirstRest('',args1[1]) returnArgs = [args1[0]] + args2 elif len(args1) == 3: print(-999,'parseArgsFirstSecondRest::error!') returnArgs = None return returnArgs def getSelfObjectName(line): m = re.match('\s*subroutine\s+\w*\s*\\(\s*(\w+)\s*(,\s*\w+\s*)*\\)\s*$', line, re.IGNORECASE) if m: return m.groups()[0] else: return m def getTypeName(line): m = re.match('\s*type(.*::\s*|\s+)(\w*)\s*$', line, re.IGNORECASE) return m.groups()[1] class Action(): def apply(self, line): m = self.match(line) if m: self.action(m, line) return m class AtTest(Action): def __init__(self, parser): self.parser = parser self.keyword = '@test' def match(self, line): m = re.match('\s*'+self.keyword+'(\s*(\\(.*\\))?\s*$)', line, re.IGNORECASE) return m def action(self, m, line): options = re.match('\s*'+self.keyword+'\s*\\((.*)\\)\s*$', line, re.IGNORECASE) method = {} if options: npesOption = re.search('npes\s*=\s*\\[([0-9,\s]+)\\]', options.groups()[0], re.IGNORECASE) if npesOption: npesString = npesOption.groups()[0] npes = map(int, npesString.split(',')) method['npRequests'] = npes #ifdef is optional matchIfdef = re.match('.*ifdef\s*=\s*(\w+)', options.groups()[0], re.IGNORECASE) if matchIfdef: ifdef = matchIfdef.groups()[0] method['ifdef'] = ifdef matchIfndef = re.match('.*ifndef\s*=\s*(\w+)', options.groups()[0], re.IGNORECASE) if matchIfndef: ifndef = matchIfndef.groups()[0] method['ifndef'] = ifndef matchType = re.match('.*type\s*=\s*(\w+)', options.groups()[0], re.IGNORECASE) if matchType: print ('Type', matchType.groups()[0]) method['type'] = matchType.groups()[0] paramOption = re.search('testParameters\s*=\s*[{](.*)[}]', options.groups()[0], re.IGNORECASE) if paramOption: paramExpr = paramOption.groups()[0] method['testParameters'] = paramExpr casesOption = re.search('cases\s*=\s*(\\[[0-9,\s]+\\])', options.groups()[0], re.IGNORECASE) if casesOption: method['cases'] = casesOption.groups()[0] nextLine = self.parser.nextLine() method['name'] = getSubroutineName(nextLine) # save "self" name for use with @mpiAssert self.parser.currentSelfObjectName = getSelfObjectName(nextLine) # save "self" name for use with @mpiAssert dummyArgument = getSelfObjectName(nextLine) if dummyArgument: method['selfObjectName'] = dummyArgument self.parser.userTestMethods.append(method) self.parser.commentLine(line) self.parser.outputFile.write(nextLine) # deprecated - should now just use @test class AtMpiTest(AtTest): def __init__(self, parser): self.parser = parser self.keyword = '@mpitest' class AtTestCase(Action): def __init__(self, parser): self.parser = parser def match(self, line): m = re.match('\s*@testcase\s*(|\\(.*\\))\s*$', line, re.IGNORECASE) return m def action(self, m, line): options = re.match('\s*@testcase\s*\\((.*)\\)\s*$', line, re.IGNORECASE) if options: value = re.search('constructor\s*=\s*(\w*)', options.groups()[0], re.IGNORECASE) if value: self.parser.userTestCase['constructor'] = value.groups()[0] value = re.search('npes\s*=\s*\\[([0-9,\s]+)\\]', options.groups()[0], re.IGNORECASE) if value: npesString = value.groups()[0] npes = map(int,npesString.split(',')) self.parser.userTestCase['npRequests'] = npes value = re.search('cases\s*=\s*(\\[[0-9,\s]+\\])', options.groups()[0], re.IGNORECASE) if value: cases = value.groups()[0] self.parser.userTestCase['cases'] = cases value = re.search('testParameters\s*=\s*[{](.*)[}]', options.groups()[0], re.IGNORECASE) if value: paramExpr = value.groups()[0] self.parser.userTestCase['testParameters'] = paramExpr nextLine = self.parser.nextLine() self.parser.userTestCase['type']=getTypeName(nextLine) self.parser.commentLine(line) self.parser.outputFile.write(nextLine) class AtSuite(Action): def __init__(self, parser): self.parser = parser def match(self, line): nameRe = "'\w+'|" + """\w+""" m = re.match("\s*@suite\s*\\(\s*name\s*=\s*("+nameRe+")\s*\\)\s*$", line, re.IGNORECASE) return m def action(self, m, line): self.parser.suiteName=m.groups()[0][1:-1] self.parser.wrapModuleName = 'Wrap' + self.parser.suiteName class AtBegin(Action): def __init__(self, parser): self.parser = parser def match(self, line): m = re.match('\s*module\s+(\w*)\s*$', line, re.IGNORECASE) return m def action(self, m, line): self.parser.userModuleName = m.groups()[0] self.parser.wrapModuleName = 'Wrap' + self.parser.userModuleName if not self.parser.suiteName: self.parser.suiteName = self.parser.userModuleName + "_suite" self.parser.outputFile.write(line) class AtAssert(Action): def __init__(self, parser): self.parser = parser def match(self, line): m = re.match('\s*@assert('+assertVariants+')\s*\\((.*\w.*)\\)\s*$', line, re.IGNORECASE) return m def appendSourceLocation(self, fileHandle, fileName, lineNumber): fileHandle.write(" & location=SourceLocation( &\n") fileHandle.write(" & '" + str(basename(fileName)) + "', &\n") fileHandle.write(" & " + str(lineNumber) + ")") def action(self, m, line): p = self.parser p.outputFile.write(cppSetLineAndFile(p.currentLineNumber, p.fileName)) p.outputFile.write(" call assert"+m.groups()[0]+"(" + m.groups()[1] + ", &\n") self.appendSourceLocation(p.outputFile, p.fileName, p.currentLineNumber) p.outputFile.write(" )\n") p.outputFile.write(" if (anyExceptions()) return\n") p.outputFile.write(cppSetLineAndFile(p.currentLineNumber+1, p.fileName)) class AtAssertAssociated(Action): def __init__(self,parser): self.parser = parser def match(self, line): m = re.match('\s*@assertassociated\s*\\((.*\w.*)\\)\s*$', line, re.IGNORECASE) if not m: m = re.match( \ '\s*@assertassociated\s*\\((\s*([^,]*\w.*),\s*([^,]*\w.*),(.*\w*.*))\\)\s*$', \ line, re.IGNORECASE) # How to get both (a,b) and (a,b,c) to match? if not m: m = re.match( \ '\s*@assertassociated\s*\\((\s*([^,]*\w.*),\s*([^,]*\w.*))\\)\s*$', \ line, re.IGNORECASE) return m def appendSourceLocation(self, fileHandle, fileName, lineNumber): fileHandle.write(" & location=SourceLocation( &\n") fileHandle.write(" & '" + str(basename(fileName)) + "', &\n") fileHandle.write(" & " + str(lineNumber) + ")") def action(self, m, line): p = self.parser # args = parseArgsFirstRest('@assertassociated',line) args = parseArgsFirstSecondRest('@assertassociated',line) # print(9000,line) # print(9001,args) p.outputFile.write(cppSetLineAndFile(p.currentLineNumber, p.fileName)) if len(args) > 1: if re.match('.*message=.*',args[1],re.IGNORECASE): p.outputFile.write(" call assertTrue(associated(" + args[0] + "), " + args[1] + ", &\n") elif len(args) > 2: p.outputFile.write(" call assertTrue(associated(" + args[0] + "," + args[1] + "), " + args[2] + ", &\n") else: p.outputFile.write(" call assertTrue(associated(" + args[0] + "," + args[1] + "), &\n") else: p.outputFile.write(" call assertTrue(associated(" + args[0] + "), &\n") self.appendSourceLocation(p.outputFile, p.fileName, p.currentLineNumber) p.outputFile.write(" )\n") p.outputFile.write(" if (anyExceptions()) return\n") p.outputFile.write(cppSetLineAndFile(p.currentLineNumber+1, p.fileName)) class AtAssertNotAssociated(Action): def __init__(self,parser): self.parser = parser self.name='@assertnotassociated' def match(self, line): m = re.match('\s*@assert(not|un)associated\s*\\((.*\w.*)\\)\s*$', line, re.IGNORECASE) if m: self.name='@assert'+m.groups()[0]+'associated' else: self.name='@assertnotassociated' if not m: m = re.match( \ '\s*@assert(not|un)associated\s*\\((\s*([^,]*\w.*),\s*([^,]*\w.*),(.*\w*.*))\\)\s*$', \ line, re.IGNORECASE) # How to get both (a,b) and (a,b,c) to match? if not m: m = re.match( \ '\s*@assert(not|un)associated\s*\\((\s*([^,]*\w.*),\s*([^,]*\w.*))\\)\s*$', \ line, re.IGNORECASE) if m: self.name='@assert'+m.groups()[0]+'associated' else: self.name='@assertnotassociated' return m def appendSourceLocation(self, fileHandle, fileName, lineNumber): fileHandle.write(" & location=SourceLocation( &\n") fileHandle.write(" & '" + str(basename(fileName)) + "', &\n") fileHandle.write(" & " + str(lineNumber) + ")") def action(self, m, line): p = self.parser #-- args = parseArgsFirstRest('@assertassociated',line) #ok args = parseArgsFirstSecondRest('@assertassociated',line) args = parseArgsFirstSecondRest(self.name,line) # print(9000,line) # print(9001,args) p.outputFile.write(cppSetLineAndFile(p.currentLineNumber, p.fileName)) if len(args) > 1: if re.match('.*message=.*',args[1],re.IGNORECASE): p.outputFile.write(" call assertFalse(associated(" + args[0] + "), " + args[1] + ", &\n") elif len(args) > 2: p.outputFile.write(" call assertFalse(associated(" + args[0] + "," + args[1] + "), " + args[2] + ", &\n") else: p.outputFile.write(" call assertFalse(associated(" + args[0] + "," + args[1] + "), &\n") else: p.outputFile.write(" call assertFalse(associated(" + args[0] + "), &\n") self.appendSourceLocation(p.outputFile, p.fileName, p.currentLineNumber) p.outputFile.write(" )\n") p.outputFile.write(" if (anyExceptions()) return\n") p.outputFile.write(cppSetLineAndFile(p.currentLineNumber+1, p.fileName)) class AtAssertEqualUserDefined(Action): """Convenience directive replacing (a,b) with a call to assertTrue(a==b) and an error message, if none is provided when invoked. """ def __init__(self,parser): self.parser = parser def match(self, line): m = re.match( \ '\s*@assertequaluserdefined\s*\\((\s*([^,]*\w.*),\s*([^,]*\w.*),(.*\w*.*))\\)\s*$', \ line, re.IGNORECASE) # How to get both (a,b) and (a,b,c) to match? if not m: m = re.match( \ '\s*@assertequaluserdefined\s*\\((\s*([^,]*\w.*),\s*([^,]*\w.*))\\)\s*$', \ line, re.IGNORECASE) return m def appendSourceLocation(self, fileHandle, fileName, lineNumber): fileHandle.write(" & location=SourceLocation( &\n") fileHandle.write(" & '" + str(basename(fileName)) + "', &\n") fileHandle.write(" & " + str(lineNumber) + ")") def action(self, m, line): p = self.parser args = parseArgsFirstSecondRest('@assertequaluserdefined',line) p.outputFile.write(cppSetLineAndFile(p.currentLineNumber, p.fileName)) if len(args) > 2: p.outputFile.write(" call assertTrue(" \ + args[0] + "==" + args[1] + ", " + args[2] + ", &\n") else: p.outputFile.write(" call assertTrue(" \ + args[0] + "==" + args[1] + ", &\n") if not re.match('.*message=.*',line,re.IGNORECASE): p.outputFile.write(" & message='<" + args[0] + "> not equal to <" + args[1] + ">', &\n") self.appendSourceLocation(p.outputFile, p.fileName, p.currentLineNumber) p.outputFile.write(" )\n") p.outputFile.write(" if (anyExceptions()) return\n") p.outputFile.write(cppSetLineAndFile(p.currentLineNumber+1, p.fileName)) class AtAssertEquivalent(Action): """Convenience directive replacing (a,b) with a call to assertTrue(a.eqv.b) and an error message, if none is provided when invoked. """ def __init__(self,parser): self.parser = parser def match(self, line): m = re.match( \ '\s*@assertequivalent\s*\\((\s*([^,]*\w.*),\s*([^,]*\w.*),(.*\w*.*))\\)\s*$', \ line, re.IGNORECASE) # How to get both (a,b) and (a,b,c) to match? if not m: m = re.match( \ '\s*@assertequivalent\s*\\((\s*([^,]*\w.*),\s*([^,]*\w.*))\\)\s*$', \ line, re.IGNORECASE) return m def appendSourceLocation(self, fileHandle, fileName, lineNumber): fileHandle.write(" & location=SourceLocation( &\n") fileHandle.write(" & '" + str(basename(fileName)) + "', &\n") fileHandle.write(" & " + str(lineNumber) + ")") def action(self, m, line): p = self.parser args = parseArgsFirstSecondRest('@assertequivalent',line) p.outputFile.write(cppSetLineAndFile(p.currentLineNumber, p.fileName)) if len(args) > 2: p.outputFile.write(" call assertTrue(" \ + args[0] + ".eqv." + args[1] + ", " + args[2] + ", &\n") else: p.outputFile.write(" call assertTrue(" \ + args[0] + ".eqv." + args[1] + ", &\n") if not re.match('.*message=.*',line,re.IGNORECASE): p.outputFile.write(" & message='<" + args[0] + "> not equal to <" + args[1] + ">', &\n") self.appendSourceLocation(p.outputFile, p.fileName, p.currentLineNumber) p.outputFile.write(" )\n") p.outputFile.write(" if (anyExceptions()) return\n") p.outputFile.write(cppSetLineAndFile(p.currentLineNumber+1, p.fileName)) class AtMpiAssert(Action): def __init__(self, parser): self.parser = parser def match(self, line): m = re.match('\s*@mpiassert('+assertVariants+')\s*\\((.*\w.*)\\)\s*$', line, re.IGNORECASE) return m def appendSourceLocation(self, fileHandle, fileName, lineNumber): fileHandle.write(" & location=SourceLocation( &\n") fileHandle.write(" & '" + str(basename(fileName)) + "', &\n") fileHandle.write(" & " + str(lineNumber) + ")") def action(self, m, line): p = self.parser p.outputFile.write(cppSetLineAndFile(p.currentLineNumber, p.fileName)) p.outputFile.write(" call assert"+m.groups()[0]+"(" + m.groups()[1] + ", &\n") self.appendSourceLocation(p.outputFile, p.fileName, p.currentLineNumber) p.outputFile.write(" )\n") # 'this' object may not exist if test is commented out. if hasattr(p,'currentSelfObjectName'): p.outputFile.write(" if (anyExceptions("+p.currentSelfObjectName+"%context)) return\n") p.outputFile.write(cppSetLineAndFile(p.currentLineNumber+1, p.fileName)) class AtBefore(Action): def __init__(self, parser): self.parser = parser def match(self, line): m = re.match('\s*@before\s*$', line, re.IGNORECASE) return m def action(self, m, line): nextLine = self.parser.nextLine() self.parser.userTestCase['setUp'] = getSubroutineName(nextLine) self.parser.commentLine(line) self.parser.outputFile.write(nextLine) class AtAfter(Action): def __init__(self, parser): self.parser = parser def match(self, line): m = re.match('\s*@after\s*$', line, re.IGNORECASE) return m def action(self, m, line): nextLine = self.parser.nextLine() self.parser.userTestCase['tearDown'] = getSubroutineName(nextLine) self.parser.commentLine(line) self.parser.outputFile.write(nextLine) class AtTestParameter(Action): def __init__(self, parser): self.parser = parser def match(self, line): m = re.match('\s*@testParameter\s*(|.*)$', line, re.IGNORECASE) return m def action(self, m, line): options = re.match('\s*@testParameter\s*\\((.*)\\)\s*$', line, re.IGNORECASE) self.parser.commentLine(line) nextLine = self.parser.nextLine() if not 'testParameterType' in self.parser.userTestCase: self.parser.userTestCase['testParameterType'] = getTypeName(nextLine) self.parser.outputFile.write(nextLine) if options: value = re.search('constructor\s*=\s*(\w*)', options.groups()[0], re.IGNORECASE) if value: self.parser.userTestCase['testParameterConstructor'] = value.groups()[0] else: self.parser.userTestCase['testParameterConstructor'] = self.parser.userTestCase['testParameterType'] class Parser(): def __init__(self, inputFileName, outputFileName): def getBaseName(fileName): from os.path import basename, splitext base = basename(fileName) return splitext(base)[0] self.fileName = inputFileName self.inputFile = open(inputFileName, 'r') self.outputFile = open(outputFileName, 'w') self.defaultSuiteName = getBaseName(inputFileName) + "_suite" self.suiteName = '' self.currentLineNumber = 0 self.userModuleName = '' # if any self.userTestCase = {} self.userTestCase['setUpMethod'] = '' self.userTestCase['tearDownMethod'] = '' self.userTestCase['defaultTestParameterNpes'] = [] # is MPI if not empty self.userTestCase['defaultTestParametersExpr'] = '' self.userTestCase['defaultTestParameterCases'] = [] self.userTestMethods = [] # each entry is a dictionary self.wrapModuleName = "Wrap" + getBaseName(inputFileName) self.currentLineNumber = 0 self.actions=[] self.actions.append(AtTest(self)) self.actions.append(AtMpiTest(self)) self.actions.append(AtTestCase(self)) self.actions.append(AtSuite(self)) self.actions.append(AtBegin(self)) self.actions.append(AtAssert(self)) self.actions.append(AtAssertAssociated(self)) # self.actions.append(AtAssertAssociatedWith(self)) self.actions.append(AtAssertNotAssociated(self)) # self.actions.append(AtAssertNotAssociatedWith(self)) self.actions.append(AtAssertEqualUserDefined(self)) self.actions.append(AtAssertEquivalent(self)) self.actions.append(AtMpiAssert(self)) self.actions.append(AtBefore(self)) self.actions.append(AtAfter(self)) self.actions.append(AtTestParameter(self)) def commentLine(self, line): self.outputFile.write(re.sub('@','!@',line)) def run(self): def parse(line): for action in self.actions: if (action.apply(line)): return self.outputFile.write(line) while True: line = self.nextLine() if not line: break parse(line) if (not self.suiteName): self.suiteName = self.defaultSuiteName if ('testParameterType' in self.userTestCase and (not 'constructor' in self.userTestCase)): self.userTestCase['constructor'] = self.userTestCase['testParameterType'] self.makeWrapperModule() def isComment(self, line): return re.match('\s*(!.*|)$', line) def nextLine(self): while True: self.currentLineNumber += 1 line = self.inputFile.readline() if not line: break if (self.isComment(line)): self.outputFile.write(line) pass else: break return line def printHeader(self): self.outputFile.write('\n') self.outputFile.write('module ' + self.wrapModuleName + '\n') self.outputFile.write(' use pFUnit_mod\n') if (self.userModuleName): self.outputFile.write(' use ' + self.userModuleName + '\n') self.outputFile.write(' implicit none\n') self.outputFile.write(' private\n\n') def printTail(self): self.outputFile.write('\n') self.outputFile.write('end module ' + self.wrapModuleName + '\n\n') def printWrapUserTestCase(self): self.outputFile.write(' public :: WrapUserTestCase\n') self.outputFile.write(' public :: makeCustomTest\n') self.outputFile.write(' type, extends(' + self.userTestCase['type'] + ') :: WrapUserTestCase\n') self.outputFile.write(' procedure(userTestMethod), nopass, pointer :: testMethodPtr\n') self.outputFile.write(' contains\n') self.outputFile.write(' procedure :: runMethod\n') self.outputFile.write(' end type WrapUserTestCase\n\n') self.outputFile.write(' abstract interface\n') self.outputFile.write(' subroutine userTestMethod(this)\n') if self.userModuleName: self.outputFile.write(' use ' + self.userModuleName + '\n') if 'type' in self.userTestCase: self.outputFile.write(' class (' + self.userTestCase['type'] + '), intent(inout) :: this\n') self.outputFile.write(' end subroutine userTestMethod\n') self.outputFile.write(' end interface\n\n') def printRunMethod(self): self.outputFile.write(' subroutine runMethod(this)\n') self.outputFile.write(' class (WrapUserTestCase), intent(inout) :: this\n\n') self.outputFile.write(' call this%testMethodPtr(this)\n') self.outputFile.write(' end subroutine runMethod\n\n') def printParameterHeader(self, type): self.outputFile.write(' type (' + type + '), allocatable :: testParameters(:)\n') self.outputFile.write(' type (' + type + ') :: testParameter\n') self.outputFile.write(' integer :: iParam \n') self.outputFile.write(' integer, allocatable :: cases(:) \n') self.outputFile.write(' \n') def printMakeSuite(self): self.outputFile.write('function ' + self.suiteName + '() result(suite)\n') self.outputFile.write(' use pFUnit_mod\n') if (self.userModuleName): self.outputFile.write(' use ' + self.userModuleName + '\n') self.outputFile.write(' use '+ self.wrapModuleName + '\n') self.outputFile.write(' type (TestSuite) :: suite\n\n') if not self.userModuleName: for testMethod in self.userTestMethods: if ('ifdef' in testMethod): self.outputFile.write('#ifdef ' + testMethod['ifdef'] + '\n') elif ('ifndef' in testMethod): self.outputFile.write('#ifndef ' + testMethod['ifndef'] + '\n') self.outputFile.write(' external ' + testMethod['name'] + '\n') if ('ifdef' in testMethod or 'ifndef' in testMethod): self.outputFile.write('#endif\n') self.outputFile.write('\n') if 'setUp' in self.userTestCase: self.outputFile.write(' external ' + self.userTestCase['setUp'] + '\n') if 'tearDown' in self.userTestCase: self.outputFile.write(' external ' + self.userTestCase['tearDown'] + '\n') self.outputFile.write('\n') if 'testParameterType' in self.userTestCase: type = self.userTestCase['testParameterType'] self.printParameterHeader(type) self.outputFile.write(" suite = newTestSuite('" + self.suiteName + "')\n\n") for testMethod in self.userTestMethods: if ('ifdef' in testMethod): self.outputFile.write('#ifdef ' + testMethod['ifdef'] + '\n') elif ('ifndef' in testMethod): self.outputFile.write('#ifndef ' + testMethod['ifndef'] + '\n') if 'type' in self.userTestCase: self.addUserTestMethod(testMethod) else: if 'npRequests' in testMethod: self.addMpiTestMethod(testMethod) else: # vanilla self.addSimpleTestMethod(testMethod) self.outputFile.write('\n') if ('ifdef' in testMethod or 'ifndef' in testMethod): self.outputFile.write('#endif\n') self.outputFile.write('\nend function ' + self.suiteName + '\n\n') def addSimpleTestMethod(self, testMethod): args = "'" + testMethod['name'] + "', " + testMethod['name'] if 'setUp' in testMethod: args += ', ' + testMethod['setUp'] elif 'setUp' in self.userTestCase: args += ', ' + self.userTestCase['setUp'] if 'tearDown' in testMethod: args += ', ' + testMethod['tearDown'] elif 'tearDown' in self.userTestCase: args += ', ' + self.userTestCase['tearDown'] if 'type' in testMethod: type = testMethod['type'] else: type = 'newTestMethod' self.outputFile.write(' call suite%addTest(' + type + '(' + args + '))\n') def addMpiTestMethod(self, testMethod): for npes in testMethod['npRequests']: args = "'" + testMethod['name'] + "', " + testMethod['name'] + ", " + str(npes) if 'setUp' in testMethod: args += ', ' + testMethod['setUp'] elif 'setUp' in self.userTestCase: args += ', ' + self.userTestCase['setUp'] if 'tearDown' in testMethod: args += ', ' + testMethod['tearDown'] elif 'tearDown' in self.userTestCase: args += ', ' + self.userTestCase['tearDown'] if 'type' in testMethod: type = testMethod['type'] else: type = 'newMpiTestMethod' self.outputFile.write(' call suite%addTest(' + type + '(' + args + '))\n') def addUserTestMethod(self, testMethod): args = "'" + testMethod['name'] + "', " + testMethod['name'] if 'npRequests' in testMethod: npRequests = testMethod['npRequests'] else: if 'npRequests' in self.userTestCase: npRequests = self.userTestCase['npRequests'] else: npRequests = [1] if 'cases' in testMethod: cases = testMethod['cases'] elif 'cases' in self.userTestCase: cases = self.userTestCase['cases'] testParameterArg = '' # unless if 'cases' in locals(): testParameterArg = ', testParameter' self.outputFile.write(' cases = ' + testMethod['cases'] + '\n') self.outputFile.write(' testParameters = [(' + self.userTestCase['testParameterConstructor'] + '(cases(iCase)), iCase = 1, size(cases))]\n\n') if 'testParameterType' in self.userTestCase: if 'testParameters' in testMethod: testParameters = testMethod['testParameters'] elif 'testParameters' in self.userTestCase: testParameters = self.userTestCase['testParameters'] isMpiTestCase = 'npRequests' in self.userTestCase isMpiTestCase = isMpiTestCase or any('npRequests' in testMethod for testMethod in self.userTestMethods) if 'testParameters' in locals(): testParameterArg = ', testParameter' self.outputFile.write(' testParameters = ' + testParameters + '\n\n') elif isMpiTestCase: testParameterArg = ', testParameter' for npes in npRequests: if 'testParameters' in locals() or 'cases' in locals(): self.outputFile.write(' do iParam = 1, size(testParameters)\n') self.outputFile.write(' testParameter = testParameters(iParam)\n') if isMpiTestCase: self.outputFile.write(' call testParameter%setNumProcessesRequested(' + str(npes) + ')\n') self.outputFile.write(' call suite%addTest(makeCustomTest(' + args + testParameterArg + '))\n') if 'cases' in locals() or 'testParameters' in locals(): self.outputFile.write(' end do\n') def printMakeCustomTest(self, isMpiTestCase): args = 'methodName, testMethod' declareArgs = '#ifdef INTEL_13\n' declareArgs += ' use pfunit_mod, only: testCase\n' declareArgs += '#endif\n' declareArgs += ' type (WrapUserTestCase) :: aTest\n' declareArgs += '#ifdef INTEL_13\n' declareArgs += ' target :: aTest\n' declareArgs += ' class (WrapUserTestCase), pointer :: p\n' declareArgs += '#endif\n' declareArgs += ' character(len=*), intent(in) :: methodName\n' declareArgs += ' procedure(userTestMethod) :: testMethod\n' if 'testParameterType' in self.userTestCase: args += ', testParameter' declareArgs += ' type (' + self.userTestCase['testParameterType'] + '), intent(in) :: testParameter\n' self.outputFile.write(' function makeCustomTest(' + args + ') result(aTest)\n') self.outputFile.write(declareArgs) if 'constructor' in self.userTestCase: if 'testParameterType' in self.userTestCase: constructor = self.userTestCase['constructor'] + '(testParameter)' else: constructor = self.userTestCase['constructor'] + '()' self.outputFile.write(' aTest%' + self.userTestCase['type'] + ' = ' + constructor + '\n\n') self.outputFile.write(' aTest%testMethodPtr => testMethod\n') self.outputFile.write('#ifdef INTEL_13\n') self.outputFile.write(' p => aTest\n') self.outputFile.write(' call p%setName(methodName)\n') self.outputFile.write('#else\n') self.outputFile.write(' call aTest%setName(methodName)\n') self.outputFile.write('#endif\n') if 'testParameterType' in self.userTestCase: self.outputFile.write(' call aTest%setTestParameter(testParameter)\n') self.outputFile.write(' end function makeCustomTest\n') def makeWrapperModule(self): # ! Start here self.printHeader() if 'type' in self.userTestCase: self.printWrapUserTestCase() self.outputFile.write('contains\n\n') if 'type' in self.userTestCase: self.printRunMethod() if 'type' in self.userTestCase: isMpiTestCase = 'npRequests' in self.userTestCase isMpiTestCase = isMpiTestCase or any('npRequests' in testMethod for testMethod in self.userTestMethods) if isMpiTestCase and not 'testParameterType' in self.userTestCase: self.userTestCase['testParameterType'] = 'MpiTestParameter' self.printMakeCustomTest(isMpiTestCase) self.printTail() self.printMakeSuite() def final(self): self.inputFile.close() self.outputFile.close() if __name__ == "__main__": import sys print("Processing file", sys.argv[1]) p = Parser(sys.argv[1], sys.argv[2]) p.run() p.final() print(" ... Done. Results in", sys.argv[2])
{ "content_hash": "11cac19123bca6d0c1177098fccc582f", "timestamp": "", "source": "github", "line_count": 894, "max_line_length": 122, "avg_line_length": 38.45973154362416, "alnum_prop": 0.5648721752028619, "repo_name": "LungNoodle/lungsim", "id": "65e858adebeb4a8c9f7d03eef2b4f65180383969", "size": "34426", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "tests/pFUnit-3.2.9/bin/pFUnitParser.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "140076" }, { "name": "CMake", "bytes": "73863" }, { "name": "Fortran", "bytes": "1010258" }, { "name": "Makefile", "bytes": "6206" }, { "name": "Python", "bytes": "120926" }, { "name": "SWIG", "bytes": "5076" }, { "name": "Shell", "bytes": "2174" } ], "symlink_target": "" }
import flickrapi import json import time import os #Flickrapi documentation: https://stuvel.eu/flickrapi-doc/2-calling.html #FIRST: get your own API-keys! api_key = u"YOUR_API_KEY_HERE" #Request your own key and place the key inside the quotes. api_secret = u"YOUR_API_SECRET_HERE" #Request your own key and place the secret inside the quotes. flickr = flickrapi.FlickrAPI(api_key, api_secret, format='json') flickr.authenticate_via_browser(perms='read') #Requires read authentification: https://www.flickr.com/services/api/flickr.photos.getWithGeoData.html (Needs to be done once per Computer running this) userfile = open("users.txt", "r") users = userfile.readlines() print str(len(users)) + " users will be looked up with the flickr API" print "This may take some time, please ensure a decent internet connection." print "A progress-file will be created to keep track of all done users" print "This progress-file will come in handy if you interrupt the script, or if the connection gets interrupted" state = os.path.isfile("prog.txt") if state == True: prog = open("prog.txt", "r") progr = prog.readlines() progress = [] for item in progr: item = item.strip() progress.append(item) prog.close() else: saveto= open("userlocations.csv", "w") prog = open("prog.txt", "w") saveto.close() prog.close() progress = [] prog = open("prog.txt", "a") saveto = open("userlocations.csv", "a") print "All preparations done." time.sleep(5) for user in users: user = user.strip() if user not in progress: location_var = flickr.profile.getProfile(api_key= api_key, user_id = user) parsedloc = json.loads(location_var.decode('utf-8')) for key in parsedloc: a = type(parsedloc[key]) if str(a) == "<type 'dict'>": b = parsedloc[key] for key in b: try: country = str(b["country"].encode("utf-8")) city = str(b["city"].encode("utf-8")) except: #This will write 'null' for every user who has no location set! country = "null" city = "null" if len(country) ==0: country = "null" if len(city) ==0: city = "null" print city print country saveto.write('"' + user+ '";"' + country + '";"' + city + '";"' + "\n") prog.write(user+ "\n")
{ "content_hash": "e8e53d10f57d38eb967bcc668186bdbc", "timestamp": "", "source": "github", "line_count": 78, "max_line_length": 220, "avg_line_length": 33.782051282051285, "alnum_prop": 0.5707779886148008, "repo_name": "Frederic-P/flickr-API-Scraper", "id": "1f0d9ace21d8c5cc0ed39eb199c3684d89245a35", "size": "2678", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "User Scraper.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "12076" } ], "symlink_target": "" }
''' evaluate result ''' from keras.models import load_model from keras.utils import np_utils import numpy as np import os import sys # add path sys.path.append('../') sys.path.append('../tools') from tools import conf from tools import load_data from tools import prepare # input sentence dimensions step_length = conf.ner_step_length pos_length = conf.ner_pos_length chunk_length = conf.ner_chunk_length gazetteer_length = conf.BIOES_gazetteer_length IOB = conf.ner_BIOES_decode test_data = load_data.load_ner(dataset='eng.testb', form='BIOES') best_epoch = sys.argv[1] model_name = os.path.basename(__file__)[9:-3] folder_path = './model/%s'%model_name model_path = '%s/model_epoch_%s.h5'%(folder_path, best_epoch) result = open('%s/predict.txt'%folder_path, 'w') def convert(chunktags): # convert BIOES to BIO for p, q in enumerate(chunktags): if q.startswith("E-"): chunktags[p] = "I-" + q[2:] elif q.startswith("S-"): if p==0: chunktags[p] = "I-" + q[2:] elif q[2:]==chunktags[p-1][2:]: chunktags[p] = "B-" + q[2:] elif q[2:]!=chunktags[p-1][2:]: chunktags[p] = "I-" + q[2:] elif q.startswith("B-"): if p==0: chunktags[p] = "I-" + q[2:] else: if q[2:]!=chunktags[p-1][2:]: chunktags[p] = "I-" + q[2:] return chunktags print('loading model...') model = load_model(model_path) print('loading model finished.') for each in test_data: embed_index, hash_index, pos, chunk, label, length, sentence = prepare.prepare_ner(batch=[each], gram='bi', form='BIOES') pos = np.array([(np.concatenate([np_utils.to_categorical(p, pos_length), np.zeros((step_length-length[l], pos_length))])) for l,p in enumerate(pos)]) chunk = np.array([(np.concatenate([np_utils.to_categorical(c, chunk_length), np.zeros((step_length-length[l], chunk_length))])) for l,c in enumerate(chunk)]) gazetteer, length_2 = prepare.prepare_gazetteer_BIOES(batch=[each], gazetteer='conll') gazetteer = np.array([(np.concatenate([a, np.zeros((step_length-length_2[l], gazetteer_length))])) for l,a in enumerate(gazetteer)]) prob = model.predict_on_batch([embed_index, hash_index, pos, chunk, gazetteer]) for i, l in enumerate(length): predict_label = np_utils.categorical_probas_to_classes(prob[i]) chunktags = [IOB[j] for j in predict_label][:l] word_pos_chunk = list(zip(*each)) # convert word_pos_chunk = list(zip(*word_pos_chunk)) word_pos_chunk = [list(x) for x in word_pos_chunk] word_pos_chunk[3] = convert(word_pos_chunk[3]) word_pos_chunk = list(zip(*word_pos_chunk)) #convert chunktags = convert(chunktags) for ind, chunktag in enumerate(chunktags): result.write(' '.join(word_pos_chunk[ind])+' '+chunktag+'\n') result.write('\n') result.close() print('epoch %s predict over !'%best_epoch) os.system('../tools/conlleval < %s/predict.txt'%folder_path)
{ "content_hash": "377d3673f9e82954fe4d8e57b48daecf", "timestamp": "", "source": "github", "line_count": 95, "max_line_length": 161, "avg_line_length": 32.11578947368421, "alnum_prop": 0.6224188790560472, "repo_name": "danche354/Sequence-Labeling", "id": "91e7d60b99146c08069b815781230bcac5a92706", "size": "3051", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "ner_BIOES/evaluate-senna-hash-2-pos-chunk-conll-gazetteer-BIOES-128-64.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "702241" } ], "symlink_target": "" }
from collections import OrderedDict, namedtuple import json import os from perf_analysis import PerfAnalysis from wlgen import RTA, Periodic, Ramp from test_wlgen import WlgenSelfBase class RTABase(WlgenSelfBase): """ Common functionality for testing RTA Doesn't have "Test" in the name so that nosetests doesn't try to run it directly """ tools = ['rt-app'] def get_expected_command(self, rta_wload): """Return the rt-app command we should execute when `run` is called""" rta_path = os.path.join(self.target.executables_directory, 'rt-app') json_path = os.path.join(rta_wload.run_dir, rta_wload.json) return '{} {} 2>&1'.format(rta_path, json_path) def setUp(self): super(RTABase, self).setUp() # Can't calibrate rt-app because: # - Need to set performance governor # - Need to use SCHED_FIFO + high priority # We probably don't have permissions so use a dummy calibration. self.calibration = {c: 100 for c in range(len(self.target.cpuinfo.cpu_names))} os.makedirs(self.host_out_dir) def assert_output_file_exists(self, path): """Assert that a file was created in host_out_dir""" path = os.path.join(self.host_out_dir, path) self.assertTrue(os.path.isfile(path), 'No output file {} from rt-app'.format(path)) def assert_can_read_logfile(self, exp_tasks): """Assert that the perf_analysis module understands the log output""" pa = PerfAnalysis(self.host_out_dir) self.assertSetEqual(set(exp_tasks), set(pa.tasks())) class TestRTAProfile(RTABase): def test_profile_periodic_smoke(self): """ Smoketest Periodic rt-app workload Creates a workload using Periodic, tests that the JSON has the expected content, then tests that it can be run. """ rtapp = RTA(self.target, name='test', calibration=self.calibration) rtapp.conf( kind = 'profile', params = { 'task_p20': Periodic( period_ms = 100, duty_cycle_pct = 20, duration_s = 1, ).get(), }, run_dir=self.target_run_dir ) with open(rtapp.json) as f: conf = json.load(f) [phase] = conf['tasks']['task_p20']['phases'].values() self.assertDictEqual(phase, { 'loop': 10, 'run': 20000, 'timer': { 'period': 100000, 'ref': 'task_p20' } }) rtapp.run(out_dir=self.host_out_dir) rtapp_cmds = [c for c in self.target.executed_commands if 'rt-app' in c] self.assertListEqual(rtapp_cmds, [self.get_expected_command(rtapp)]) self.assert_output_file_exists('output.log') self.assert_output_file_exists('test_00.json') self.assert_output_file_exists('rt-app-task_p20-0.log') self.assert_can_read_logfile(exp_tasks=['task_p20']) class TestRTAComposition(RTABase): def test_composition(self): """ Test RTA task composition with __add__ Creates a composed workload by +-ing RTATask objects, tests that the JSON has the expected content, then tests running the workload """ rtapp = RTA(self.target, name='test', calibration=self.calibration) light = Periodic(duty_cycle_pct=10, duration_s=1.0, period_ms=10) start_pct = 10 end_pct = 90 delta_pct = 20 num_ramp_phases = ((end_pct - start_pct) / delta_pct) + 1 ramp = Ramp(start_pct=start_pct, end_pct=end_pct, delta_pct=delta_pct, time_s=1, period_ms=50) heavy = Periodic(duty_cycle_pct=90, duration_s=0.1, period_ms=100) lrh_task = light + ramp + heavy rtapp.conf( kind = 'profile', params = { 'task_ramp': lrh_task.get() }, run_dir=self.target_run_dir ) with open(rtapp.json) as f: conf = json.load(f, object_pairs_hook=OrderedDict) phases = conf['tasks']['task_ramp']['phases'].values() exp_phases = [ # Light phase: { "loop": 100, "run": 1000, "timer": { "period": 10000, "ref": "task_ramp" } }, # Ramp phases: { "loop": 20, "run": 5000, "timer": { "period": 50000, "ref": "task_ramp" } }, { "loop": 20, "run": 15000, "timer": { "period": 50000, "ref": "task_ramp" } }, { "loop": 20, "run": 25000, "timer": { "period": 50000, "ref": "task_ramp" } }, { "loop": 20, "run": 35000, "timer": { "period": 50000, "ref": "task_ramp" } }, { "loop": 20, "run": 45000, "timer": { "period": 50000, "ref": "task_ramp" } }, # Heavy phase: { "loop": 1, "run": 90000, "timer": { "period": 100000, "ref": "task_ramp" } }] self.assertListEqual(phases, exp_phases) rtapp.run(out_dir=self.host_out_dir) rtapp_cmds = [c for c in self.target.executed_commands if 'rt-app' in c] self.assertListEqual(rtapp_cmds, [self.get_expected_command(rtapp)]) self.assert_output_file_exists('output.log') self.assert_output_file_exists('test_00.json') self.assert_output_file_exists('rt-app-task_ramp-0.log') self.assert_can_read_logfile(exp_tasks=['task_ramp']) def test_invalid_composition(self): """Test that you can't compose tasks with a delay in the second task""" t1 = Periodic() t2 = Periodic(delay_s=1) # Should work fine if delayed task is the first one try: t3 = t2 + t1 except Exception as e: raise AssertionError("Couldn't compose tasks: {}".format(e)) # But not the other way around with self.assertRaises(ValueError): t3 = t1 + t2 class TestRTACustom(RTABase): def _test_custom_smoke(self, calibration): """ Test RTA custom workload Creates an rt-app workload using 'custom' and checks that the json roughly matches the file we provided. If we have root, attempts to run the workload. """ json_path = os.path.join(os.getenv('LISA_HOME'), 'assets', 'mp3-short.json') rtapp = RTA(self.target, name='test', calibration=calibration) # Configure this RTApp instance to: rtapp.conf(kind='custom', params=json_path, duration=5, run_dir=self.target_run_dir) with open(rtapp.json) as f: conf = json.load(f) # Convert k to str because the json loader gives us unicode strings tasks = set([str(k) for k in conf['tasks'].keys()]) self.assertSetEqual( tasks, set(['AudioTick', 'AudioOut', 'AudioTrack', 'mp3.decoder', 'OMXCall'])) # Would like to try running the workload but mp3-short.json has nonzero # 'priority' fields, and we probably don't have permission for that # unless we're root. if self.target.is_rooted: rtapp.run(out_dir=self.host_out_dir) rtapp_cmds = [c for c in self.target.executed_commands if 'rt-app' in c] self.assertListEqual(rtapp_cmds, [self.get_expected_command(rtapp)]) self.assert_output_file_exists('output.log') self.assert_output_file_exists('test_00.json') def test_custom_smoke_calib(self): """Test RTA custom workload (providing calibration)""" self._test_custom_smoke(self.calibration) def test_custom_smoke_no_calib(self): """Test RTA custom workload (providing no calibration)""" self._test_custom_smoke(None) DummyBlModule = namedtuple('bl', ['bigs']) class TestRTACalibrationConf(RTABase): """Test setting the "calibration" field of rt-app config""" def _get_calib_conf(self, calibration): rtapp = RTA(self.target, name='test', calibration=calibration) rtapp.conf( kind = 'profile', params = {'t1': Periodic().get()}, run_dir=self.target_run_dir ) with open(rtapp.json) as f: return json.load(f)['global']['calibration'] def test_calibration_conf_pload(self): """Test that the smallest pload value is used, if provided""" cpus = range(self.target.number_of_cpus) conf = self._get_calib_conf(dict(zip(cpus, [c + 100 for c in cpus]))) self.assertEqual(conf, 100, 'Calibration not set to minimum pload value') def test_calibration_conf_bl(self): """Test that a big CPU is used if big.LITTLE data is available""" self.target.modules.append('bl') self.target.bl = DummyBlModule([1, 2]) conf = self._get_calib_conf(None) self.assertIn(conf, ['CPU{}'.format(c) for c in self.target.bl.bigs], 'Calibration not set to use a big CPU') def test_calibration_conf_nodata(self): """Test that the last CPU is used if no data is available""" conf = self._get_calib_conf(None) cpu = self.target.number_of_cpus - 1 self.assertEqual(conf, 'CPU{}'.format(cpu), 'Calibration not set to highest numbered CPU')
{ "content_hash": "3f4c92857c537a1896b9e0ef4ded8cb7", "timestamp": "", "source": "github", "line_count": 304, "max_line_length": 80, "avg_line_length": 33.75328947368421, "alnum_prop": 0.5300652957801384, "repo_name": "arnoldlu/lisa", "id": "12aa21a4e8a07fd13d38cd3245667c941031fc48", "size": "10898", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "tests/lisa/test_wlgen_rtapp.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Jupyter Notebook", "bytes": "40598164" }, { "name": "Python", "bytes": "609335" }, { "name": "Shell", "bytes": "2066" } ], "symlink_target": "" }
from __future__ import unicode_literals from moto.core.exceptions import RESTError class ELBClientError(RESTError): code = 400 class DuplicateTagKeysError(ELBClientError): def __init__(self, cidr): super(DuplicateTagKeysError, self).__init__( "DuplicateTagKeys", "Tag key was specified more than once: {0}" .format(cidr)) class LoadBalancerNotFoundError(ELBClientError): def __init__(self, cidr): super(LoadBalancerNotFoundError, self).__init__( "LoadBalancerNotFound", "The specified load balancer does not exist: {0}" .format(cidr)) class TooManyTagsError(ELBClientError): def __init__(self): super(TooManyTagsError, self).__init__( "LoadBalancerNotFound", "The quota for the number of tags that can be assigned to a load balancer has been reached") class BadHealthCheckDefinition(ELBClientError): def __init__(self): super(BadHealthCheckDefinition, self).__init__( "ValidationError", "HealthCheck Target must begin with one of HTTP, TCP, HTTPS, SSL") class DuplicateLoadBalancerName(ELBClientError): def __init__(self, name): super(DuplicateLoadBalancerName, self).__init__( "DuplicateLoadBalancerName", "The specified load balancer name already exists for this account: {0}" .format(name))
{ "content_hash": "60bac2abaaa71ec9c58561844284d579", "timestamp": "", "source": "github", "line_count": 49, "max_line_length": 104, "avg_line_length": 29.20408163265306, "alnum_prop": 0.6457023060796646, "repo_name": "heddle317/moto", "id": "897bd6dd1dccdb0aa40dd72e4a8775e23ff598f2", "size": "1431", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "moto/elb/exceptions.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "HTML", "bytes": "5848" }, { "name": "Java", "bytes": "1688" }, { "name": "JavaScript", "bytes": "756" }, { "name": "Makefile", "bytes": "630" }, { "name": "Python", "bytes": "2633276" }, { "name": "Ruby", "bytes": "188" } ], "symlink_target": "" }
""" Management command to update all snippet cached values. """ from django.core.management.base import BaseCommand from ...models import set_cached_snippet, Snippet class Command(BaseCommand): help = "Updates the cache values for all snippets" def handle(self, *args, **kwargs): # Already iterating, skip the extra count query count = 0 for snippet in Snippet.objects.all(): set_cached_snippet(snippet.key) count += 1 self.stdout.write( "Refreshed the cache for {0} snippets.".format(count))
{ "content_hash": "15a0cdc91f91eef7ecb050df0cf25503", "timestamp": "", "source": "github", "line_count": 20, "max_line_length": 70, "avg_line_length": 28.9, "alnum_prop": 0.6505190311418685, "repo_name": "adw0rd/django-addendum-inline", "id": "36d45965e9bcc6ae1e265fb1603bfdf1e927ee7e", "size": "578", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "addendum/management/commands/refresh_snippet_cache.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "HTML", "bytes": "5523" }, { "name": "Python", "bytes": "36743" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.db import models, migrations import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('note', '0002_myuser'), ] operations = [ migrations.AlterModelOptions( name='myuser', options={'verbose_name_plural': 'users', 'verbose_name': 'user'}, ), migrations.AddField( model_name='myuser', name='date_joined', field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined'), preserve_default=True, ), migrations.AddField( model_name='myuser', name='email', field=models.EmailField(verbose_name='email address', max_length=75, blank=True), preserve_default=True, ), migrations.AddField( model_name='myuser', name='first_name', field=models.CharField(verbose_name='first name', max_length=30, blank=True), preserve_default=True, ), migrations.AddField( model_name='myuser', name='is_active', field=models.BooleanField(default=True, verbose_name='active', help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.'), preserve_default=True, ), migrations.AddField( model_name='myuser', name='is_staff', field=models.BooleanField(default=False, verbose_name='staff status', help_text='Designates whether the user can log into this admin site.'), preserve_default=True, ), migrations.AddField( model_name='myuser', name='last_name', field=models.CharField(verbose_name='last name', max_length=30, blank=True), preserve_default=True, ), migrations.AddField( model_name='myuser', name='username', field=models.CharField(default='dany', max_length=30, help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', unique=True, verbose_name='username'), preserve_default=False, ), ]
{ "content_hash": "093bf8e068d349f6ea68ae2b7ce88303", "timestamp": "", "source": "github", "line_count": 60, "max_line_length": 190, "avg_line_length": 37.766666666666666, "alnum_prop": 0.5856134157105031, "repo_name": "LeMeteore/boomer2", "id": "9dca92fa3d6e92cd41282d3bd384329f49bca787", "size": "2290", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "note/migrations/0003_auto_20150305_0955.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "Python", "bytes": "15242" } ], "symlink_target": "" }
"""Implementation of tf.metrics module.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.distribute import distribution_strategy_context from tensorflow.python.eager import context from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import confusion_matrix from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn from tensorflow.python.ops import sets from tensorflow.python.ops import sparse_ops from tensorflow.python.ops import state_ops from tensorflow.python.ops import variable_scope from tensorflow.python.ops import weights_broadcast_ops from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util.deprecation import deprecated from tensorflow.python.util.tf_export import tf_export def metric_variable(shape, dtype, validate_shape=True, name=None): """Create variable in `GraphKeys.(LOCAL|METRIC_VARIABLES)` collections. If running in a `DistributionStrategy` context, the variable will be "replica local". This means: * The returned object will be a container with separate variables per replica of the model. * When writing to the variable, e.g. using `assign_add` in a metric update, the update will be applied to the variable local to the replica. * To get a metric's result value, we need to sum the variable values across the replicas before computing the final answer. Furthermore, the final answer should be computed once instead of in every replica. Both of these are accomplished by running the computation of the final result value inside `distribution_strategy_context.get_replica_context().merge_call(fn)`. Inside the `merge_call()`, ops are only added to the graph once and access to a replica-local variable in a computation returns the sum across all replicas. Args: shape: Shape of the created variable. dtype: Type of the created variable. validate_shape: (Optional) Whether shape validation is enabled for the created variable. name: (Optional) String name of the created variable. Returns: A (non-trainable) variable initialized to zero, or if inside a `DistributionStrategy` scope a replica-local variable container. """ # Note that synchronization "ON_READ" implies trainable=False. return variable_scope.variable( lambda: array_ops.zeros(shape, dtype), collections=[ ops.GraphKeys.LOCAL_VARIABLES, ops.GraphKeys.METRIC_VARIABLES ], validate_shape=validate_shape, synchronization=variable_scope.VariableSynchronization.ON_READ, aggregation=variable_scope.VariableAggregation.SUM, name=name) def _remove_squeezable_dimensions(predictions, labels, weights): """Squeeze or expand last dim if needed. Squeezes last dim of `predictions` or `labels` if their rank differs by 1 (using confusion_matrix.remove_squeezable_dimensions). Squeezes or expands last dim of `weights` if its rank differs by 1 from the new rank of `predictions`. If `weights` is scalar, it is kept scalar. This will use static shape if available. Otherwise, it will add graph operations, which could result in a performance hit. Args: predictions: Predicted values, a `Tensor` of arbitrary dimensions. labels: Optional label `Tensor` whose dimensions match `predictions`. weights: Optional weight scalar or `Tensor` whose dimensions match `predictions`. Returns: Tuple of `predictions`, `labels` and `weights`. Each of them possibly has the last dimension squeezed, `weights` could be extended by one dimension. """ predictions = ops.convert_to_tensor(predictions) if labels is not None: labels, predictions = confusion_matrix.remove_squeezable_dimensions( labels, predictions) predictions.get_shape().assert_is_compatible_with(labels.get_shape()) if weights is None: return predictions, labels, None weights = ops.convert_to_tensor(weights) weights_shape = weights.get_shape() weights_rank = weights_shape.ndims if weights_rank == 0: return predictions, labels, weights predictions_shape = predictions.get_shape() predictions_rank = predictions_shape.ndims if (predictions_rank is not None) and (weights_rank is not None): # Use static rank. if weights_rank - predictions_rank == 1: weights = array_ops.squeeze(weights, [-1]) elif predictions_rank - weights_rank == 1: weights = array_ops.expand_dims(weights, [-1]) else: # Use dynamic rank. weights_rank_tensor = array_ops.rank(weights) rank_diff = weights_rank_tensor - array_ops.rank(predictions) def _maybe_expand_weights(): return control_flow_ops.cond( math_ops.equal(rank_diff, -1), lambda: array_ops.expand_dims(weights, [-1]), lambda: weights) # Don't attempt squeeze if it will fail based on static check. if ((weights_rank is not None) and (not weights_shape.dims[-1].is_compatible_with(1))): maybe_squeeze_weights = lambda: weights else: maybe_squeeze_weights = lambda: array_ops.squeeze(weights, [-1]) def _maybe_adjust_weights(): return control_flow_ops.cond( math_ops.equal(rank_diff, 1), maybe_squeeze_weights, _maybe_expand_weights) # If weights are scalar, do nothing. Otherwise, try to add or remove a # dimension to match predictions. weights = control_flow_ops.cond( math_ops.equal(weights_rank_tensor, 0), lambda: weights, _maybe_adjust_weights) return predictions, labels, weights def _maybe_expand_labels(labels, predictions): """If necessary, expand `labels` along last dimension to match `predictions`. Args: labels: `Tensor` or `SparseTensor` with shape [D1, ... DN, num_labels] or [D1, ... DN]. The latter implies num_labels=1, in which case the result is an expanded `labels` with shape [D1, ... DN, 1]. predictions: `Tensor` with shape [D1, ... DN, num_classes]. Returns: `labels` with the same rank as `predictions`. Raises: ValueError: if `labels` has invalid shape. """ with ops.name_scope(None, 'expand_labels', (labels, predictions)) as scope: labels = sparse_tensor.convert_to_tensor_or_sparse_tensor(labels) # If sparse, expand sparse shape. if isinstance(labels, sparse_tensor.SparseTensor): return control_flow_ops.cond( math_ops.equal( array_ops.rank(predictions), array_ops.size(labels.dense_shape) + 1), lambda: sparse_ops.sparse_reshape( # pylint: disable=g-long-lambda labels, shape=array_ops.concat((labels.dense_shape, (1,)), 0), name=scope), lambda: labels) # Otherwise, try to use static shape. labels_rank = labels.get_shape().ndims if labels_rank is not None: predictions_rank = predictions.get_shape().ndims if predictions_rank is not None: if predictions_rank == labels_rank: return labels if predictions_rank == labels_rank + 1: return array_ops.expand_dims(labels, -1, name=scope) raise ValueError( 'Unexpected labels shape %s for predictions shape %s.' % (labels.get_shape(), predictions.get_shape())) # Otherwise, use dynamic shape. return control_flow_ops.cond( math_ops.equal(array_ops.rank(predictions), array_ops.rank(labels) + 1), lambda: array_ops.expand_dims(labels, -1, name=scope), lambda: labels) def _safe_scalar_div(numerator, denominator, name): """Divides two values, returning 0 if the denominator is 0. Args: numerator: A scalar `float64` `Tensor`. denominator: A scalar `float64` `Tensor`. name: Name for the returned op. Returns: 0 if `denominator` == 0, else `numerator` / `denominator` """ numerator.get_shape().with_rank_at_most(1) denominator.get_shape().with_rank_at_most(1) return math_ops.div_no_nan(numerator, denominator, name=name) def _streaming_confusion_matrix(labels, predictions, num_classes, weights=None): """Calculate a streaming confusion matrix. Calculates a confusion matrix. For estimation over a stream of data, the function creates an `update_op` operation. Args: labels: A `Tensor` of ground truth labels with shape [batch size] and of type `int32` or `int64`. The tensor will be flattened if its rank > 1. predictions: A `Tensor` of prediction results for semantic labels, whose shape is [batch size] and type `int32` or `int64`. The tensor will be flattened if its rank > 1. num_classes: The possible number of labels the prediction task can have. This value must be provided, since a confusion matrix of dimension = [num_classes, num_classes] will be allocated. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). Returns: total_cm: A `Tensor` representing the confusion matrix. update_op: An operation that increments the confusion matrix. """ # Local variable to accumulate the predictions in the confusion matrix. total_cm = metric_variable( [num_classes, num_classes], dtypes.float64, name='total_confusion_matrix') # Cast the type to int64 required by confusion_matrix_ops. predictions = math_ops.to_int64(predictions) labels = math_ops.to_int64(labels) num_classes = math_ops.to_int64(num_classes) # Flatten the input if its rank > 1. if predictions.get_shape().ndims > 1: predictions = array_ops.reshape(predictions, [-1]) if labels.get_shape().ndims > 1: labels = array_ops.reshape(labels, [-1]) if (weights is not None) and (weights.get_shape().ndims > 1): weights = array_ops.reshape(weights, [-1]) # Accumulate the prediction to current confusion matrix. current_cm = confusion_matrix.confusion_matrix( labels, predictions, num_classes, weights=weights, dtype=dtypes.float64) update_op = state_ops.assign_add(total_cm, current_cm) return total_cm, update_op def _aggregate_across_replicas(metrics_collections, metric_value_fn, *args): """Aggregate metric value across replicas.""" def fn(distribution, *a): """Call `metric_value_fn` in the correct control flow context.""" if hasattr(distribution.extended, '_outer_control_flow_context'): # If there was an outer context captured before this method was called, # then we enter that context to create the metric value op. If the # caputred context is `None`, ops.control_dependencies(None) gives the # desired behavior. Else we use `Enter` and `Exit` to enter and exit the # captured context. # This special handling is needed because sometimes the metric is created # inside a while_loop (and perhaps a TPU rewrite context). But we don't # want the value op to be evaluated every step or on the TPU. So we # create it outside so that it can be evaluated at the end on the host, # once the update ops have been evaluted. # pylint: disable=protected-access if distribution.extended._outer_control_flow_context is None: with ops.control_dependencies(None): metric_value = metric_value_fn(distribution, *a) else: distribution.extended._outer_control_flow_context.Enter() metric_value = metric_value_fn(distribution, *a) distribution.extended._outer_control_flow_context.Exit() # pylint: enable=protected-access else: metric_value = metric_value_fn(distribution, *a) if metrics_collections: ops.add_to_collections(metrics_collections, metric_value) return metric_value return distribution_strategy_context.get_replica_context().merge_call( fn, args=args) @tf_export(v1=['metrics.mean']) def mean(values, weights=None, metrics_collections=None, updates_collections=None, name=None): """Computes the (weighted) mean of the given values. The `mean` function creates two local variables, `total` and `count` that are used to compute the average of `values`. This average is ultimately returned as `mean` which is an idempotent operation that simply divides `total` by `count`. For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `mean`. `update_op` increments `total` with the reduced sum of the product of `values` and `weights`, and it increments `count` with the reduced sum of `weights`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: values: A `Tensor` of arbitrary dimensions. weights: Optional `Tensor` whose rank is either 0, or the same rank as `values`, and must be broadcastable to `values` (i.e., all dimensions must be either `1`, or the same as the corresponding `values` dimension). metrics_collections: An optional list of collections that `mean` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. name: An optional variable_scope name. Returns: mean: A `Tensor` representing the current mean, the value of `total` divided by `count`. update_op: An operation that increments the `total` and `count` variables appropriately and whose value matches `mean_value`. Raises: ValueError: If `weights` is not `None` and its shape doesn't match `values`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.mean is not supported when eager execution ' 'is enabled.') with variable_scope.variable_scope(name, 'mean', (values, weights)): values = math_ops.to_float(values) total = metric_variable([], dtypes.float32, name='total') count = metric_variable([], dtypes.float32, name='count') if weights is None: num_values = math_ops.to_float(array_ops.size(values)) else: values, _, weights = _remove_squeezable_dimensions( predictions=values, labels=None, weights=weights) weights = weights_broadcast_ops.broadcast_weights( math_ops.to_float(weights), values) values = math_ops.multiply(values, weights) num_values = math_ops.reduce_sum(weights) update_total_op = state_ops.assign_add(total, math_ops.reduce_sum(values)) with ops.control_dependencies([values]): update_count_op = state_ops.assign_add(count, num_values) def compute_mean(_, t, c): return math_ops.div_no_nan(t, math_ops.maximum(c, 0), name='value') mean_t = _aggregate_across_replicas( metrics_collections, compute_mean, total, count) update_op = math_ops.div_no_nan( update_total_op, math_ops.maximum(update_count_op, 0), name='update_op') if updates_collections: ops.add_to_collections(updates_collections, update_op) return mean_t, update_op @tf_export(v1=['metrics.accuracy']) def accuracy(labels, predictions, weights=None, metrics_collections=None, updates_collections=None, name=None): """Calculates how often `predictions` matches `labels`. The `accuracy` function creates two local variables, `total` and `count` that are used to compute the frequency with which `predictions` matches `labels`. This frequency is ultimately returned as `accuracy`: an idempotent operation that simply divides `total` by `count`. For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `accuracy`. Internally, an `is_correct` operation computes a `Tensor` with elements 1.0 where the corresponding elements of `predictions` and `labels` match and 0.0 otherwise. Then `update_op` increments `total` with the reduced sum of the product of `weights` and `is_correct`, and it increments `count` with the reduced sum of `weights`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: The ground truth values, a `Tensor` whose shape matches `predictions`. predictions: The predicted values, a `Tensor` of any shape. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that `accuracy` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. name: An optional variable_scope name. Returns: accuracy: A `Tensor` representing the accuracy, the value of `total` divided by `count`. update_op: An operation that increments the `total` and `count` variables appropriately and whose value matches `accuracy`. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.accuracy is not supported when eager ' 'execution is enabled.') predictions, labels, weights = _remove_squeezable_dimensions( predictions=predictions, labels=labels, weights=weights) predictions.get_shape().assert_is_compatible_with(labels.get_shape()) if labels.dtype != predictions.dtype: predictions = math_ops.cast(predictions, labels.dtype) is_correct = math_ops.to_float(math_ops.equal(predictions, labels)) return mean(is_correct, weights, metrics_collections, updates_collections, name or 'accuracy') def _confusion_matrix_at_thresholds(labels, predictions, thresholds, weights=None, includes=None): """Computes true_positives, false_negatives, true_negatives, false_positives. This function creates up to four local variables, `true_positives`, `true_negatives`, `false_positives` and `false_negatives`. `true_positive[i]` is defined as the total weight of values in `predictions` above `thresholds[i]` whose corresponding entry in `labels` is `True`. `false_negatives[i]` is defined as the total weight of values in `predictions` at most `thresholds[i]` whose corresponding entry in `labels` is `True`. `true_negatives[i]` is defined as the total weight of values in `predictions` at most `thresholds[i]` whose corresponding entry in `labels` is `False`. `false_positives[i]` is defined as the total weight of values in `predictions` above `thresholds[i]` whose corresponding entry in `labels` is `False`. For estimation of these metrics over a stream of data, for each metric the function respectively creates an `update_op` operation that updates the variable and returns its value. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: A `Tensor` whose shape matches `predictions`. Will be cast to `bool`. predictions: A floating point `Tensor` of arbitrary shape and whose values are in the range `[0, 1]`. thresholds: A python list or tuple of float thresholds in `[0, 1]`. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). includes: Tuple of keys to return, from 'tp', 'fn', 'tn', fp'. If `None`, default to all four. Returns: values: Dict of variables of shape `[len(thresholds)]`. Keys are from `includes`. update_ops: Dict of operations that increments the `values`. Keys are from `includes`. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if `includes` contains invalid keys. """ all_includes = ('tp', 'fn', 'tn', 'fp') if includes is None: includes = all_includes else: for include in includes: if include not in all_includes: raise ValueError('Invalid key: %s.' % include) with ops.control_dependencies([ check_ops.assert_greater_equal( predictions, math_ops.cast(0.0, dtype=predictions.dtype), message='predictions must be in [0, 1]'), check_ops.assert_less_equal( predictions, math_ops.cast(1.0, dtype=predictions.dtype), message='predictions must be in [0, 1]') ]): predictions, labels, weights = _remove_squeezable_dimensions( predictions=math_ops.to_float(predictions), labels=math_ops.cast(labels, dtype=dtypes.bool), weights=weights) num_thresholds = len(thresholds) # Reshape predictions and labels. predictions_2d = array_ops.reshape(predictions, [-1, 1]) labels_2d = array_ops.reshape( math_ops.cast(labels, dtype=dtypes.bool), [1, -1]) # Use static shape if known. num_predictions = predictions_2d.get_shape().as_list()[0] # Otherwise use dynamic shape. if num_predictions is None: num_predictions = array_ops.shape(predictions_2d)[0] thresh_tiled = array_ops.tile( array_ops.expand_dims(array_ops.constant(thresholds), [1]), array_ops.stack([1, num_predictions])) # Tile the predictions after thresholding them across different thresholds. pred_is_pos = math_ops.greater( array_ops.tile(array_ops.transpose(predictions_2d), [num_thresholds, 1]), thresh_tiled) if ('fn' in includes) or ('tn' in includes): pred_is_neg = math_ops.logical_not(pred_is_pos) # Tile labels by number of thresholds label_is_pos = array_ops.tile(labels_2d, [num_thresholds, 1]) if ('fp' in includes) or ('tn' in includes): label_is_neg = math_ops.logical_not(label_is_pos) if weights is not None: weights = weights_broadcast_ops.broadcast_weights( math_ops.to_float(weights), predictions) weights_tiled = array_ops.tile( array_ops.reshape(weights, [1, -1]), [num_thresholds, 1]) thresh_tiled.get_shape().assert_is_compatible_with( weights_tiled.get_shape()) else: weights_tiled = None values = {} update_ops = {} if 'tp' in includes: true_p = metric_variable( [num_thresholds], dtypes.float32, name='true_positives') is_true_positive = math_ops.to_float( math_ops.logical_and(label_is_pos, pred_is_pos)) if weights_tiled is not None: is_true_positive *= weights_tiled update_ops['tp'] = state_ops.assign_add(true_p, math_ops.reduce_sum( is_true_positive, 1)) values['tp'] = true_p if 'fn' in includes: false_n = metric_variable( [num_thresholds], dtypes.float32, name='false_negatives') is_false_negative = math_ops.to_float( math_ops.logical_and(label_is_pos, pred_is_neg)) if weights_tiled is not None: is_false_negative *= weights_tiled update_ops['fn'] = state_ops.assign_add(false_n, math_ops.reduce_sum( is_false_negative, 1)) values['fn'] = false_n if 'tn' in includes: true_n = metric_variable( [num_thresholds], dtypes.float32, name='true_negatives') is_true_negative = math_ops.to_float( math_ops.logical_and(label_is_neg, pred_is_neg)) if weights_tiled is not None: is_true_negative *= weights_tiled update_ops['tn'] = state_ops.assign_add(true_n, math_ops.reduce_sum( is_true_negative, 1)) values['tn'] = true_n if 'fp' in includes: false_p = metric_variable( [num_thresholds], dtypes.float32, name='false_positives') is_false_positive = math_ops.to_float( math_ops.logical_and(label_is_neg, pred_is_pos)) if weights_tiled is not None: is_false_positive *= weights_tiled update_ops['fp'] = state_ops.assign_add(false_p, math_ops.reduce_sum( is_false_positive, 1)) values['fp'] = false_p return values, update_ops def _aggregate_variable(v, collections): f = lambda distribution, value: distribution.read_var(value) return _aggregate_across_replicas(collections, f, v) @tf_export(v1=['metrics.auc']) def auc(labels, predictions, weights=None, num_thresholds=200, metrics_collections=None, updates_collections=None, curve='ROC', name=None, summation_method='trapezoidal'): """Computes the approximate AUC via a Riemann sum. The `auc` function creates four local variables, `true_positives`, `true_negatives`, `false_positives` and `false_negatives` that are used to compute the AUC. To discretize the AUC curve, a linearly spaced set of thresholds is used to compute pairs of recall and precision values. The area under the ROC-curve is therefore computed using the height of the recall values by the false positive rate, while the area under the PR-curve is the computed using the height of the precision values by the recall. This value is ultimately returned as `auc`, an idempotent operation that computes the area under a discretized curve of precision versus recall values (computed using the aforementioned variables). The `num_thresholds` variable controls the degree of discretization with larger numbers of thresholds more closely approximating the true AUC. The quality of the approximation may vary dramatically depending on `num_thresholds`. For best results, `predictions` should be distributed approximately uniformly in the range [0, 1] and not peaked around 0 or 1. The quality of the AUC approximation may be poor if this is not the case. Setting `summation_method` to 'minoring' or 'majoring' can help quantify the error in the approximation by providing lower or upper bound estimate of the AUC. For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `auc`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: A `Tensor` whose shape matches `predictions`. Will be cast to `bool`. predictions: A floating point `Tensor` of arbitrary shape and whose values are in the range `[0, 1]`. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). num_thresholds: The number of thresholds to use when discretizing the roc curve. metrics_collections: An optional list of collections that `auc` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. curve: Specifies the name of the curve to be computed, 'ROC' [default] or 'PR' for the Precision-Recall-curve. name: An optional variable_scope name. summation_method: Specifies the Riemann summation method used (https://en.wikipedia.org/wiki/Riemann_sum): 'trapezoidal' [default] that applies the trapezoidal rule; 'careful_interpolation', a variant of it differing only by a more correct interpolation scheme for PR-AUC - interpolating (true/false) positives but not the ratio that is precision; 'minoring' that applies left summation for increasing intervals and right summation for decreasing intervals; 'majoring' that does the opposite. Note that 'careful_interpolation' is strictly preferred to 'trapezoidal' (to be deprecated soon) as it applies the same method for ROC, and a better one (see Davis & Goadrich 2006 for details) for the PR curve. Returns: auc: A scalar `Tensor` representing the current area-under-curve. update_op: An operation that increments the `true_positives`, `true_negatives`, `false_positives` and `false_negatives` variables appropriately and whose value matches `auc`. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.auc is not supported when eager execution ' 'is enabled.') with variable_scope.variable_scope(name, 'auc', (labels, predictions, weights)): if curve != 'ROC' and curve != 'PR': raise ValueError('curve must be either ROC or PR, %s unknown' % (curve)) kepsilon = 1e-7 # to account for floating point imprecisions thresholds = [ (i + 1) * 1.0 / (num_thresholds - 1) for i in range(num_thresholds - 2) ] thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon] values, update_ops = _confusion_matrix_at_thresholds( labels, predictions, thresholds, weights) # Add epsilons to avoid dividing by 0. epsilon = 1.0e-6 def interpolate_pr_auc(tp, fp, fn): """Interpolation formula inspired by section 4 of Davis & Goadrich 2006. Note here we derive & use a closed formula not present in the paper - as follows: Modeling all of TP (true positive weight), FP (false positive weight) and their sum P = TP + FP (positive weight) as varying linearly within each interval [A, B] between successive thresholds, we get Precision = (TP_A + slope * (P - P_A)) / P with slope = dTP / dP = (TP_B - TP_A) / (P_B - P_A). The area within the interval is thus (slope / total_pos_weight) times int_A^B{Precision.dP} = int_A^B{(TP_A + slope * (P - P_A)) * dP / P} int_A^B{Precision.dP} = int_A^B{slope * dP + intercept * dP / P} where intercept = TP_A - slope * P_A = TP_B - slope * P_B, resulting in int_A^B{Precision.dP} = TP_B - TP_A + intercept * log(P_B / P_A) Bringing back the factor (slope / total_pos_weight) we'd put aside, we get slope * [dTP + intercept * log(P_B / P_A)] / total_pos_weight where dTP == TP_B - TP_A. Note that when P_A == 0 the above calculation simplifies into int_A^B{Precision.dTP} = int_A^B{slope * dTP} = slope * (TP_B - TP_A) which is really equivalent to imputing constant precision throughout the first bucket having >0 true positives. Args: tp: true positive counts fp: false positive counts fn: false negative counts Returns: pr_auc: an approximation of the area under the P-R curve. """ dtp = tp[:num_thresholds - 1] - tp[1:] p = tp + fp prec_slope = math_ops.div_no_nan( dtp, math_ops.maximum(p[:num_thresholds - 1] - p[1:], 0), name='prec_slope') intercept = tp[1:] - math_ops.multiply(prec_slope, p[1:]) safe_p_ratio = array_ops.where( math_ops.logical_and(p[:num_thresholds - 1] > 0, p[1:] > 0), math_ops.div_no_nan( p[:num_thresholds - 1], math_ops.maximum(p[1:], 0), name='recall_relative_ratio'), array_ops.ones_like(p[1:])) return math_ops.reduce_sum( math_ops.div_no_nan( prec_slope * (dtp + intercept * math_ops.log(safe_p_ratio)), math_ops.maximum(tp[1:] + fn[1:], 0), name='pr_auc_increment'), name='interpolate_pr_auc') def compute_auc(tp, fn, tn, fp, name): """Computes the roc-auc or pr-auc based on confusion counts.""" if curve == 'PR': if summation_method == 'trapezoidal': logging.warning( 'Trapezoidal rule is known to produce incorrect PR-AUCs; ' 'please switch to "careful_interpolation" instead.') elif summation_method == 'careful_interpolation': # This one is a bit tricky and is handled separately. return interpolate_pr_auc(tp, fp, fn) rec = math_ops.div(tp + epsilon, tp + fn + epsilon) if curve == 'ROC': fp_rate = math_ops.div(fp, fp + tn + epsilon) x = fp_rate y = rec else: # curve == 'PR'. prec = math_ops.div(tp + epsilon, tp + fp + epsilon) x = rec y = prec if summation_method in ('trapezoidal', 'careful_interpolation'): # Note that the case ('PR', 'careful_interpolation') has been handled # above. return math_ops.reduce_sum( math_ops.multiply(x[:num_thresholds - 1] - x[1:], (y[:num_thresholds - 1] + y[1:]) / 2.), name=name) elif summation_method == 'minoring': return math_ops.reduce_sum( math_ops.multiply(x[:num_thresholds - 1] - x[1:], math_ops.minimum(y[:num_thresholds - 1], y[1:])), name=name) elif summation_method == 'majoring': return math_ops.reduce_sum( math_ops.multiply(x[:num_thresholds - 1] - x[1:], math_ops.maximum(y[:num_thresholds - 1], y[1:])), name=name) else: raise ValueError('Invalid summation_method: %s' % summation_method) # sum up the areas of all the trapeziums def compute_auc_value(_, values): return compute_auc(values['tp'], values['fn'], values['tn'], values['fp'], 'value') auc_value = _aggregate_across_replicas( metrics_collections, compute_auc_value, values) update_op = compute_auc(update_ops['tp'], update_ops['fn'], update_ops['tn'], update_ops['fp'], 'update_op') if updates_collections: ops.add_to_collections(updates_collections, update_op) return auc_value, update_op @tf_export(v1=['metrics.mean_absolute_error']) def mean_absolute_error(labels, predictions, weights=None, metrics_collections=None, updates_collections=None, name=None): """Computes the mean absolute error between the labels and predictions. The `mean_absolute_error` function creates two local variables, `total` and `count` that are used to compute the mean absolute error. This average is weighted by `weights`, and it is ultimately returned as `mean_absolute_error`: an idempotent operation that simply divides `total` by `count`. For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `mean_absolute_error`. Internally, an `absolute_errors` operation computes the absolute value of the differences between `predictions` and `labels`. Then `update_op` increments `total` with the reduced sum of the product of `weights` and `absolute_errors`, and it increments `count` with the reduced sum of `weights` If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: A `Tensor` of the same shape as `predictions`. predictions: A `Tensor` of arbitrary shape. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that `mean_absolute_error` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. name: An optional variable_scope name. Returns: mean_absolute_error: A `Tensor` representing the current mean, the value of `total` divided by `count`. update_op: An operation that increments the `total` and `count` variables appropriately and whose value matches `mean_absolute_error`. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.mean_absolute_error is not supported ' 'when eager execution is enabled.') predictions, labels, weights = _remove_squeezable_dimensions( predictions=predictions, labels=labels, weights=weights) absolute_errors = math_ops.abs(predictions - labels) return mean(absolute_errors, weights, metrics_collections, updates_collections, name or 'mean_absolute_error') @tf_export(v1=['metrics.mean_cosine_distance']) def mean_cosine_distance(labels, predictions, dim, weights=None, metrics_collections=None, updates_collections=None, name=None): """Computes the cosine distance between the labels and predictions. The `mean_cosine_distance` function creates two local variables, `total` and `count` that are used to compute the average cosine distance between `predictions` and `labels`. This average is weighted by `weights`, and it is ultimately returned as `mean_distance`, which is an idempotent operation that simply divides `total` by `count`. For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `mean_distance`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: A `Tensor` of arbitrary shape. predictions: A `Tensor` of the same shape as `labels`. dim: The dimension along which the cosine distance is computed. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). Also, dimension `dim` must be `1`. metrics_collections: An optional list of collections that the metric value variable should be added to. updates_collections: An optional list of collections that the metric update ops should be added to. name: An optional variable_scope name. Returns: mean_distance: A `Tensor` representing the current mean, the value of `total` divided by `count`. update_op: An operation that increments the `total` and `count` variables appropriately. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.mean_cosine_distance is not supported when ' 'eager execution is enabled.') predictions, labels, weights = _remove_squeezable_dimensions( predictions=predictions, labels=labels, weights=weights) radial_diffs = math_ops.multiply(predictions, labels) radial_diffs = math_ops.reduce_sum( radial_diffs, axis=[ dim, ], keepdims=True) mean_distance, update_op = mean(radial_diffs, weights, None, None, name or 'mean_cosine_distance') mean_distance = math_ops.subtract(1.0, mean_distance) update_op = math_ops.subtract(1.0, update_op) if metrics_collections: ops.add_to_collections(metrics_collections, mean_distance) if updates_collections: ops.add_to_collections(updates_collections, update_op) return mean_distance, update_op @tf_export(v1=['metrics.mean_per_class_accuracy']) def mean_per_class_accuracy(labels, predictions, num_classes, weights=None, metrics_collections=None, updates_collections=None, name=None): """Calculates the mean of the per-class accuracies. Calculates the accuracy for each class, then takes the mean of that. For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates the accuracy of each class and returns them. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: A `Tensor` of ground truth labels with shape [batch size] and of type `int32` or `int64`. The tensor will be flattened if its rank > 1. predictions: A `Tensor` of prediction results for semantic labels, whose shape is [batch size] and type `int32` or `int64`. The tensor will be flattened if its rank > 1. num_classes: The possible number of labels the prediction task can have. This value must be provided, since two variables with shape = [num_classes] will be allocated. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that `mean_per_class_accuracy' should be added to. updates_collections: An optional list of collections `update_op` should be added to. name: An optional variable_scope name. Returns: mean_accuracy: A `Tensor` representing the mean per class accuracy. update_op: An operation that updates the accuracy tensor. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.mean_per_class_accuracy is not supported ' 'when eager execution is enabled.') with variable_scope.variable_scope(name, 'mean_accuracy', (predictions, labels, weights)): labels = math_ops.to_int64(labels) # Flatten the input if its rank > 1. if labels.get_shape().ndims > 1: labels = array_ops.reshape(labels, [-1]) if predictions.get_shape().ndims > 1: predictions = array_ops.reshape(predictions, [-1]) # Check if shape is compatible. predictions.get_shape().assert_is_compatible_with(labels.get_shape()) total = metric_variable([num_classes], dtypes.float32, name='total') count = metric_variable([num_classes], dtypes.float32, name='count') ones = array_ops.ones([array_ops.size(labels)], dtypes.float32) if labels.dtype != predictions.dtype: predictions = math_ops.cast(predictions, labels.dtype) is_correct = math_ops.to_float(math_ops.equal(predictions, labels)) if weights is not None: if weights.get_shape().ndims > 1: weights = array_ops.reshape(weights, [-1]) weights = math_ops.to_float(weights) is_correct *= weights ones *= weights update_total_op = state_ops.scatter_add(total, labels, ones) update_count_op = state_ops.scatter_add(count, labels, is_correct) def compute_mean_accuracy(_, count, total): per_class_accuracy = math_ops.div_no_nan( count, math_ops.maximum(total, 0), name=None) mean_accuracy_v = math_ops.reduce_mean( per_class_accuracy, name='mean_accuracy') return mean_accuracy_v mean_accuracy_v = _aggregate_across_replicas( metrics_collections, compute_mean_accuracy, count, total) update_op = math_ops.div_no_nan( update_count_op, math_ops.maximum(update_total_op, 0), name='update_op') if updates_collections: ops.add_to_collections(updates_collections, update_op) return mean_accuracy_v, update_op @tf_export(v1=['metrics.mean_iou']) def mean_iou(labels, predictions, num_classes, weights=None, metrics_collections=None, updates_collections=None, name=None): """Calculate per-step mean Intersection-Over-Union (mIOU). Mean Intersection-Over-Union is a common evaluation metric for semantic image segmentation, which first computes the IOU for each semantic class and then computes the average over classes. IOU is defined as follows: IOU = true_positive / (true_positive + false_positive + false_negative). The predictions are accumulated in a confusion matrix, weighted by `weights`, and mIOU is then calculated from it. For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `mean_iou`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: A `Tensor` of ground truth labels with shape [batch size] and of type `int32` or `int64`. The tensor will be flattened if its rank > 1. predictions: A `Tensor` of prediction results for semantic labels, whose shape is [batch size] and type `int32` or `int64`. The tensor will be flattened if its rank > 1. num_classes: The possible number of labels the prediction task can have. This value must be provided, since a confusion matrix of dimension = [num_classes, num_classes] will be allocated. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that `mean_iou` should be added to. updates_collections: An optional list of collections `update_op` should be added to. name: An optional variable_scope name. Returns: mean_iou: A `Tensor` representing the mean intersection-over-union. update_op: An operation that increments the confusion matrix. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.mean_iou is not supported when ' 'eager execution is enabled.') with variable_scope.variable_scope(name, 'mean_iou', (predictions, labels, weights)): # Check if shape is compatible. predictions.get_shape().assert_is_compatible_with(labels.get_shape()) total_cm, update_op = _streaming_confusion_matrix(labels, predictions, num_classes, weights) def compute_mean_iou(_, total_cm): """Compute the mean intersection-over-union via the confusion matrix.""" sum_over_row = math_ops.to_float(math_ops.reduce_sum(total_cm, 0)) sum_over_col = math_ops.to_float(math_ops.reduce_sum(total_cm, 1)) cm_diag = math_ops.to_float(array_ops.diag_part(total_cm)) denominator = sum_over_row + sum_over_col - cm_diag # The mean is only computed over classes that appear in the # label or prediction tensor. If the denominator is 0, we need to # ignore the class. num_valid_entries = math_ops.reduce_sum( math_ops.cast( math_ops.not_equal(denominator, 0), dtype=dtypes.float32)) # If the value of the denominator is 0, set it to 1 to avoid # zero division. denominator = array_ops.where( math_ops.greater(denominator, 0), denominator, array_ops.ones_like(denominator)) iou = math_ops.div(cm_diag, denominator) # If the number of valid entries is 0 (no classes) we return 0. result = array_ops.where( math_ops.greater(num_valid_entries, 0), math_ops.reduce_sum(iou, name='mean_iou') / num_valid_entries, 0) return result # TODO(priyag): Use outside_compilation if in TPU context. mean_iou_v = _aggregate_across_replicas( metrics_collections, compute_mean_iou, total_cm) if updates_collections: ops.add_to_collections(updates_collections, update_op) return mean_iou_v, update_op @tf_export(v1=['metrics.mean_relative_error']) def mean_relative_error(labels, predictions, normalizer, weights=None, metrics_collections=None, updates_collections=None, name=None): """Computes the mean relative error by normalizing with the given values. The `mean_relative_error` function creates two local variables, `total` and `count` that are used to compute the mean relative absolute error. This average is weighted by `weights`, and it is ultimately returned as `mean_relative_error`: an idempotent operation that simply divides `total` by `count`. For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `mean_reative_error`. Internally, a `relative_errors` operation divides the absolute value of the differences between `predictions` and `labels` by the `normalizer`. Then `update_op` increments `total` with the reduced sum of the product of `weights` and `relative_errors`, and it increments `count` with the reduced sum of `weights`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: A `Tensor` of the same shape as `predictions`. predictions: A `Tensor` of arbitrary shape. normalizer: A `Tensor` of the same shape as `predictions`. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that `mean_relative_error` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. name: An optional variable_scope name. Returns: mean_relative_error: A `Tensor` representing the current mean, the value of `total` divided by `count`. update_op: An operation that increments the `total` and `count` variables appropriately and whose value matches `mean_relative_error`. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.mean_relative_error is not supported when ' 'eager execution is enabled.') predictions, labels, weights = _remove_squeezable_dimensions( predictions=predictions, labels=labels, weights=weights) predictions, normalizer = confusion_matrix.remove_squeezable_dimensions( predictions, normalizer) predictions.get_shape().assert_is_compatible_with(normalizer.get_shape()) relative_errors = array_ops.where( math_ops.equal(normalizer, 0.0), array_ops.zeros_like(labels), math_ops.div(math_ops.abs(labels - predictions), normalizer)) return mean(relative_errors, weights, metrics_collections, updates_collections, name or 'mean_relative_error') @tf_export(v1=['metrics.mean_squared_error']) def mean_squared_error(labels, predictions, weights=None, metrics_collections=None, updates_collections=None, name=None): """Computes the mean squared error between the labels and predictions. The `mean_squared_error` function creates two local variables, `total` and `count` that are used to compute the mean squared error. This average is weighted by `weights`, and it is ultimately returned as `mean_squared_error`: an idempotent operation that simply divides `total` by `count`. For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `mean_squared_error`. Internally, a `squared_error` operation computes the element-wise square of the difference between `predictions` and `labels`. Then `update_op` increments `total` with the reduced sum of the product of `weights` and `squared_error`, and it increments `count` with the reduced sum of `weights`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: A `Tensor` of the same shape as `predictions`. predictions: A `Tensor` of arbitrary shape. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that `mean_squared_error` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. name: An optional variable_scope name. Returns: mean_squared_error: A `Tensor` representing the current mean, the value of `total` divided by `count`. update_op: An operation that increments the `total` and `count` variables appropriately and whose value matches `mean_squared_error`. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.mean_squared_error is not supported when ' 'eager execution is enabled.') predictions, labels, weights = _remove_squeezable_dimensions( predictions=predictions, labels=labels, weights=weights) squared_error = math_ops.square(labels - predictions) return mean(squared_error, weights, metrics_collections, updates_collections, name or 'mean_squared_error') @tf_export(v1=['metrics.mean_tensor']) def mean_tensor(values, weights=None, metrics_collections=None, updates_collections=None, name=None): """Computes the element-wise (weighted) mean of the given tensors. In contrast to the `mean` function which returns a scalar with the mean, this function returns an average tensor with the same shape as the input tensors. The `mean_tensor` function creates two local variables, `total_tensor` and `count_tensor` that are used to compute the average of `values`. This average is ultimately returned as `mean` which is an idempotent operation that simply divides `total` by `count`. For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `mean`. `update_op` increments `total` with the reduced sum of the product of `values` and `weights`, and it increments `count` with the reduced sum of `weights`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: values: A `Tensor` of arbitrary dimensions. weights: Optional `Tensor` whose rank is either 0, or the same rank as `values`, and must be broadcastable to `values` (i.e., all dimensions must be either `1`, or the same as the corresponding `values` dimension). metrics_collections: An optional list of collections that `mean` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. name: An optional variable_scope name. Returns: mean: A float `Tensor` representing the current mean, the value of `total` divided by `count`. update_op: An operation that increments the `total` and `count` variables appropriately and whose value matches `mean_value`. Raises: ValueError: If `weights` is not `None` and its shape doesn't match `values`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.mean_tensor is not supported when ' 'eager execution is enabled.') with variable_scope.variable_scope(name, 'mean', (values, weights)): values = math_ops.to_float(values) total = metric_variable( values.get_shape(), dtypes.float32, name='total_tensor') count = metric_variable( values.get_shape(), dtypes.float32, name='count_tensor') num_values = array_ops.ones_like(values) if weights is not None: values, _, weights = _remove_squeezable_dimensions( predictions=values, labels=None, weights=weights) weights = weights_broadcast_ops.broadcast_weights( math_ops.to_float(weights), values) values = math_ops.multiply(values, weights) num_values = math_ops.multiply(num_values, weights) update_total_op = state_ops.assign_add(total, values) with ops.control_dependencies([values]): update_count_op = state_ops.assign_add(count, num_values) compute_mean = lambda _, t, c: math_ops.div_no_nan( # pylint: disable=g-long-lambda t, math_ops.maximum(c, 0), name='value') mean_t = _aggregate_across_replicas( metrics_collections, compute_mean, total, count) update_op = math_ops.div_no_nan( update_total_op, math_ops.maximum(update_count_op, 0), name='update_op') if updates_collections: ops.add_to_collections(updates_collections, update_op) return mean_t, update_op @tf_export(v1=['metrics.percentage_below']) def percentage_below(values, threshold, weights=None, metrics_collections=None, updates_collections=None, name=None): """Computes the percentage of values less than the given threshold. The `percentage_below` function creates two local variables, `total` and `count` that are used to compute the percentage of `values` that fall below `threshold`. This rate is weighted by `weights`, and it is ultimately returned as `percentage` which is an idempotent operation that simply divides `total` by `count`. For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `percentage`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: values: A numeric `Tensor` of arbitrary size. threshold: A scalar threshold. weights: Optional `Tensor` whose rank is either 0, or the same rank as `values`, and must be broadcastable to `values` (i.e., all dimensions must be either `1`, or the same as the corresponding `values` dimension). metrics_collections: An optional list of collections that the metric value variable should be added to. updates_collections: An optional list of collections that the metric update ops should be added to. name: An optional variable_scope name. Returns: percentage: A `Tensor` representing the current mean, the value of `total` divided by `count`. update_op: An operation that increments the `total` and `count` variables appropriately. Raises: ValueError: If `weights` is not `None` and its shape doesn't match `values`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.percentage_below is not supported when ' 'eager execution is enabled.') is_below_threshold = math_ops.to_float(math_ops.less(values, threshold)) return mean(is_below_threshold, weights, metrics_collections, updates_collections, name or 'percentage_below_threshold') def _count_condition(values, weights=None, metrics_collections=None, updates_collections=None): """Sums the weights of cases where the given values are True. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: values: A `bool` `Tensor` of arbitrary size. weights: Optional `Tensor` whose rank is either 0, or the same rank as `values`, and must be broadcastable to `values` (i.e., all dimensions must be either `1`, or the same as the corresponding `values` dimension). metrics_collections: An optional list of collections that the metric value variable should be added to. updates_collections: An optional list of collections that the metric update ops should be added to. Returns: value_tensor: A `Tensor` representing the current value of the metric. update_op: An operation that accumulates the error from a batch of data. Raises: ValueError: If `weights` is not `None` and its shape doesn't match `values`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. """ check_ops.assert_type(values, dtypes.bool) count = metric_variable([], dtypes.float32, name='count') values = math_ops.to_float(values) if weights is not None: with ops.control_dependencies((check_ops.assert_rank_in( weights, (0, array_ops.rank(values))),)): weights = math_ops.to_float(weights) values = math_ops.multiply(values, weights) value_tensor = _aggregate_variable(count, metrics_collections) update_op = state_ops.assign_add(count, math_ops.reduce_sum(values)) if updates_collections: ops.add_to_collections(updates_collections, update_op) return value_tensor, update_op @tf_export(v1=['metrics.false_negatives']) def false_negatives(labels, predictions, weights=None, metrics_collections=None, updates_collections=None, name=None): """Computes the total number of false negatives. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: The ground truth values, a `Tensor` whose dimensions must match `predictions`. Will be cast to `bool`. predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will be cast to `bool`. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that the metric value variable should be added to. updates_collections: An optional list of collections that the metric update ops should be added to. name: An optional variable_scope name. Returns: value_tensor: A `Tensor` representing the current value of the metric. update_op: An operation that accumulates the error from a batch of data. Raises: ValueError: If `weights` is not `None` and its shape doesn't match `values`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.false_negatives is not supported when ' 'eager execution is enabled.') with variable_scope.variable_scope(name, 'false_negatives', (predictions, labels, weights)): predictions, labels, weights = _remove_squeezable_dimensions( predictions=math_ops.cast(predictions, dtype=dtypes.bool), labels=math_ops.cast(labels, dtype=dtypes.bool), weights=weights) is_false_negative = math_ops.logical_and( math_ops.equal(labels, True), math_ops.equal(predictions, False)) return _count_condition(is_false_negative, weights, metrics_collections, updates_collections) @tf_export(v1=['metrics.false_negatives_at_thresholds']) def false_negatives_at_thresholds(labels, predictions, thresholds, weights=None, metrics_collections=None, updates_collections=None, name=None): """Computes false negatives at provided threshold values. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: A `Tensor` whose shape matches `predictions`. Will be cast to `bool`. predictions: A floating point `Tensor` of arbitrary shape and whose values are in the range `[0, 1]`. thresholds: A python list or tuple of float thresholds in `[0, 1]`. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that `false_negatives` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. name: An optional variable_scope name. Returns: false_negatives: A float `Tensor` of shape `[len(thresholds)]`. update_op: An operation that updates the `false_negatives` variable and returns its current value. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.false_negatives_at_thresholds is not ' 'supported when eager execution is enabled.') with variable_scope.variable_scope(name, 'false_negatives', (predictions, labels, weights)): values, update_ops = _confusion_matrix_at_thresholds( labels, predictions, thresholds, weights=weights, includes=('fn',)) fn_value = _aggregate_variable(values['fn'], metrics_collections) if updates_collections: ops.add_to_collections(updates_collections, update_ops['fn']) return fn_value, update_ops['fn'] @tf_export(v1=['metrics.false_positives']) def false_positives(labels, predictions, weights=None, metrics_collections=None, updates_collections=None, name=None): """Sum the weights of false positives. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: The ground truth values, a `Tensor` whose dimensions must match `predictions`. Will be cast to `bool`. predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will be cast to `bool`. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that the metric value variable should be added to. updates_collections: An optional list of collections that the metric update ops should be added to. name: An optional variable_scope name. Returns: value_tensor: A `Tensor` representing the current value of the metric. update_op: An operation that accumulates the error from a batch of data. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.false_positives is not supported when ' 'eager execution is enabled.') with variable_scope.variable_scope(name, 'false_positives', (predictions, labels, weights)): predictions, labels, weights = _remove_squeezable_dimensions( predictions=math_ops.cast(predictions, dtype=dtypes.bool), labels=math_ops.cast(labels, dtype=dtypes.bool), weights=weights) is_false_positive = math_ops.logical_and( math_ops.equal(labels, False), math_ops.equal(predictions, True)) return _count_condition(is_false_positive, weights, metrics_collections, updates_collections) @tf_export(v1=['metrics.false_positives_at_thresholds']) def false_positives_at_thresholds(labels, predictions, thresholds, weights=None, metrics_collections=None, updates_collections=None, name=None): """Computes false positives at provided threshold values. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: A `Tensor` whose shape matches `predictions`. Will be cast to `bool`. predictions: A floating point `Tensor` of arbitrary shape and whose values are in the range `[0, 1]`. thresholds: A python list or tuple of float thresholds in `[0, 1]`. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that `false_positives` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. name: An optional variable_scope name. Returns: false_positives: A float `Tensor` of shape `[len(thresholds)]`. update_op: An operation that updates the `false_positives` variable and returns its current value. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.false_positives_at_thresholds is not ' 'supported when eager execution is enabled.') with variable_scope.variable_scope(name, 'false_positives', (predictions, labels, weights)): values, update_ops = _confusion_matrix_at_thresholds( labels, predictions, thresholds, weights=weights, includes=('fp',)) fp_value = _aggregate_variable(values['fp'], metrics_collections) if updates_collections: ops.add_to_collections(updates_collections, update_ops['fp']) return fp_value, update_ops['fp'] @tf_export(v1=['metrics.true_negatives']) def true_negatives(labels, predictions, weights=None, metrics_collections=None, updates_collections=None, name=None): """Sum the weights of true_negatives. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: The ground truth values, a `Tensor` whose dimensions must match `predictions`. Will be cast to `bool`. predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will be cast to `bool`. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that the metric value variable should be added to. updates_collections: An optional list of collections that the metric update ops should be added to. name: An optional variable_scope name. Returns: value_tensor: A `Tensor` representing the current value of the metric. update_op: An operation that accumulates the error from a batch of data. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.true_negatives is not ' 'supported when eager execution is enabled.') with variable_scope.variable_scope(name, 'true_negatives', (predictions, labels, weights)): predictions, labels, weights = _remove_squeezable_dimensions( predictions=math_ops.cast(predictions, dtype=dtypes.bool), labels=math_ops.cast(labels, dtype=dtypes.bool), weights=weights) is_true_negative = math_ops.logical_and( math_ops.equal(labels, False), math_ops.equal(predictions, False)) return _count_condition(is_true_negative, weights, metrics_collections, updates_collections) @tf_export(v1=['metrics.true_negatives_at_thresholds']) def true_negatives_at_thresholds(labels, predictions, thresholds, weights=None, metrics_collections=None, updates_collections=None, name=None): """Computes true negatives at provided threshold values. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: A `Tensor` whose shape matches `predictions`. Will be cast to `bool`. predictions: A floating point `Tensor` of arbitrary shape and whose values are in the range `[0, 1]`. thresholds: A python list or tuple of float thresholds in `[0, 1]`. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that `true_negatives` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. name: An optional variable_scope name. Returns: true_negatives: A float `Tensor` of shape `[len(thresholds)]`. update_op: An operation that updates the `true_negatives` variable and returns its current value. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.true_negatives_at_thresholds is not ' 'supported when eager execution is enabled.') with variable_scope.variable_scope(name, 'true_negatives', (predictions, labels, weights)): values, update_ops = _confusion_matrix_at_thresholds( labels, predictions, thresholds, weights=weights, includes=('tn',)) tn_value = _aggregate_variable(values['tn'], metrics_collections) if updates_collections: ops.add_to_collections(updates_collections, update_ops['tn']) return tn_value, update_ops['tn'] @tf_export(v1=['metrics.true_positives']) def true_positives(labels, predictions, weights=None, metrics_collections=None, updates_collections=None, name=None): """Sum the weights of true_positives. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: The ground truth values, a `Tensor` whose dimensions must match `predictions`. Will be cast to `bool`. predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will be cast to `bool`. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that the metric value variable should be added to. updates_collections: An optional list of collections that the metric update ops should be added to. name: An optional variable_scope name. Returns: value_tensor: A `Tensor` representing the current value of the metric. update_op: An operation that accumulates the error from a batch of data. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.true_positives is not ' 'supported when eager execution is enabled.') with variable_scope.variable_scope(name, 'true_positives', (predictions, labels, weights)): predictions, labels, weights = _remove_squeezable_dimensions( predictions=math_ops.cast(predictions, dtype=dtypes.bool), labels=math_ops.cast(labels, dtype=dtypes.bool), weights=weights) is_true_positive = math_ops.logical_and( math_ops.equal(labels, True), math_ops.equal(predictions, True)) return _count_condition(is_true_positive, weights, metrics_collections, updates_collections) @tf_export(v1=['metrics.true_positives_at_thresholds']) def true_positives_at_thresholds(labels, predictions, thresholds, weights=None, metrics_collections=None, updates_collections=None, name=None): """Computes true positives at provided threshold values. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: A `Tensor` whose shape matches `predictions`. Will be cast to `bool`. predictions: A floating point `Tensor` of arbitrary shape and whose values are in the range `[0, 1]`. thresholds: A python list or tuple of float thresholds in `[0, 1]`. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that `true_positives` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. name: An optional variable_scope name. Returns: true_positives: A float `Tensor` of shape `[len(thresholds)]`. update_op: An operation that updates the `true_positives` variable and returns its current value. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.true_positives_at_thresholds is not ' 'supported when eager execution is enabled.') with variable_scope.variable_scope(name, 'true_positives', (predictions, labels, weights)): values, update_ops = _confusion_matrix_at_thresholds( labels, predictions, thresholds, weights=weights, includes=('tp',)) tp_value = _aggregate_variable(values['tp'], metrics_collections) if updates_collections: ops.add_to_collections(updates_collections, update_ops['tp']) return tp_value, update_ops['tp'] @tf_export(v1=['metrics.precision']) def precision(labels, predictions, weights=None, metrics_collections=None, updates_collections=None, name=None): """Computes the precision of the predictions with respect to the labels. The `precision` function creates two local variables, `true_positives` and `false_positives`, that are used to compute the precision. This value is ultimately returned as `precision`, an idempotent operation that simply divides `true_positives` by the sum of `true_positives` and `false_positives`. For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `precision`. `update_op` weights each prediction by the corresponding value in `weights`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: The ground truth values, a `Tensor` whose dimensions must match `predictions`. Will be cast to `bool`. predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will be cast to `bool`. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that `precision` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. name: An optional variable_scope name. Returns: precision: Scalar float `Tensor` with the value of `true_positives` divided by the sum of `true_positives` and `false_positives`. update_op: `Operation` that increments `true_positives` and `false_positives` variables appropriately and whose value matches `precision`. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.precision is not ' 'supported when eager execution is enabled.') with variable_scope.variable_scope(name, 'precision', (predictions, labels, weights)): predictions, labels, weights = _remove_squeezable_dimensions( predictions=math_ops.cast(predictions, dtype=dtypes.bool), labels=math_ops.cast(labels, dtype=dtypes.bool), weights=weights) true_p, true_positives_update_op = true_positives( labels, predictions, weights, metrics_collections=None, updates_collections=None, name=None) false_p, false_positives_update_op = false_positives( labels, predictions, weights, metrics_collections=None, updates_collections=None, name=None) def compute_precision(tp, fp, name): return array_ops.where( math_ops.greater(tp + fp, 0), math_ops.div(tp, tp + fp), 0, name) def once_across_replicas(_, true_p, false_p): return compute_precision(true_p, false_p, 'value') p = _aggregate_across_replicas(metrics_collections, once_across_replicas, true_p, false_p) update_op = compute_precision(true_positives_update_op, false_positives_update_op, 'update_op') if updates_collections: ops.add_to_collections(updates_collections, update_op) return p, update_op @tf_export(v1=['metrics.precision_at_thresholds']) def precision_at_thresholds(labels, predictions, thresholds, weights=None, metrics_collections=None, updates_collections=None, name=None): """Computes precision values for different `thresholds` on `predictions`. The `precision_at_thresholds` function creates four local variables, `true_positives`, `true_negatives`, `false_positives` and `false_negatives` for various values of thresholds. `precision[i]` is defined as the total weight of values in `predictions` above `thresholds[i]` whose corresponding entry in `labels` is `True`, divided by the total weight of values in `predictions` above `thresholds[i]` (`true_positives[i] / (true_positives[i] + false_positives[i])`). For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `precision`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: The ground truth values, a `Tensor` whose dimensions must match `predictions`. Will be cast to `bool`. predictions: A floating point `Tensor` of arbitrary shape and whose values are in the range `[0, 1]`. thresholds: A python list or tuple of float thresholds in `[0, 1]`. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that `auc` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. name: An optional variable_scope name. Returns: precision: A float `Tensor` of shape `[len(thresholds)]`. update_op: An operation that increments the `true_positives`, `true_negatives`, `false_positives` and `false_negatives` variables that are used in the computation of `precision`. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.precision_at_thresholds is not ' 'supported when eager execution is enabled.') with variable_scope.variable_scope(name, 'precision_at_thresholds', (predictions, labels, weights)): values, update_ops = _confusion_matrix_at_thresholds( labels, predictions, thresholds, weights, includes=('tp', 'fp')) # Avoid division by zero. epsilon = 1e-7 def compute_precision(tp, fp, name): return math_ops.div(tp, epsilon + tp + fp, name='precision_' + name) def precision_across_replicas(_, values): return compute_precision(values['tp'], values['fp'], 'value') prec = _aggregate_across_replicas( metrics_collections, precision_across_replicas, values) update_op = compute_precision(update_ops['tp'], update_ops['fp'], 'update_op') if updates_collections: ops.add_to_collections(updates_collections, update_op) return prec, update_op @tf_export(v1=['metrics.recall']) def recall(labels, predictions, weights=None, metrics_collections=None, updates_collections=None, name=None): """Computes the recall of the predictions with respect to the labels. The `recall` function creates two local variables, `true_positives` and `false_negatives`, that are used to compute the recall. This value is ultimately returned as `recall`, an idempotent operation that simply divides `true_positives` by the sum of `true_positives` and `false_negatives`. For estimation of the metric over a stream of data, the function creates an `update_op` that updates these variables and returns the `recall`. `update_op` weights each prediction by the corresponding value in `weights`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: The ground truth values, a `Tensor` whose dimensions must match `predictions`. Will be cast to `bool`. predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will be cast to `bool`. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that `recall` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. name: An optional variable_scope name. Returns: recall: Scalar float `Tensor` with the value of `true_positives` divided by the sum of `true_positives` and `false_negatives`. update_op: `Operation` that increments `true_positives` and `false_negatives` variables appropriately and whose value matches `recall`. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.recall is not supported is not ' 'supported when eager execution is enabled.') with variable_scope.variable_scope(name, 'recall', (predictions, labels, weights)): predictions, labels, weights = _remove_squeezable_dimensions( predictions=math_ops.cast(predictions, dtype=dtypes.bool), labels=math_ops.cast(labels, dtype=dtypes.bool), weights=weights) true_p, true_positives_update_op = true_positives( labels, predictions, weights, metrics_collections=None, updates_collections=None, name=None) false_n, false_negatives_update_op = false_negatives( labels, predictions, weights, metrics_collections=None, updates_collections=None, name=None) def compute_recall(true_p, false_n, name): return array_ops.where( math_ops.greater(true_p + false_n, 0), math_ops.div(true_p, true_p + false_n), 0, name) def once_across_replicas(_, true_p, false_n): return compute_recall(true_p, false_n, 'value') rec = _aggregate_across_replicas( metrics_collections, once_across_replicas, true_p, false_n) update_op = compute_recall(true_positives_update_op, false_negatives_update_op, 'update_op') if updates_collections: ops.add_to_collections(updates_collections, update_op) return rec, update_op def _at_k_name(name, k=None, class_id=None): if k is not None: name = '%s_at_%d' % (name, k) else: name = '%s_at_k' % (name) if class_id is not None: name = '%s_class%d' % (name, class_id) return name def _select_class_id(ids, selected_id): """Filter all but `selected_id` out of `ids`. Args: ids: `int64` `Tensor` or `SparseTensor` of IDs. selected_id: Int id to select. Returns: `SparseTensor` of same dimensions as `ids`. This contains only the entries equal to `selected_id`. """ ids = sparse_tensor.convert_to_tensor_or_sparse_tensor(ids) if isinstance(ids, sparse_tensor.SparseTensor): return sparse_ops.sparse_retain(ids, math_ops.equal(ids.values, selected_id)) # TODO(ptucker): Make this more efficient, maybe add a sparse version of # tf.equal and tf.reduce_any? # Shape of filled IDs is the same as `ids` with the last dim collapsed to 1. ids_shape = array_ops.shape(ids, out_type=dtypes.int64) ids_last_dim = array_ops.size(ids_shape) - 1 filled_selected_id_shape = math_ops.reduced_shape(ids_shape, array_ops.reshape( ids_last_dim, [1])) # Intersect `ids` with the selected ID. filled_selected_id = array_ops.fill(filled_selected_id_shape, math_ops.to_int64(selected_id)) result = sets.set_intersection(filled_selected_id, ids) return sparse_tensor.SparseTensor( indices=result.indices, values=result.values, dense_shape=ids_shape) def _maybe_select_class_id(labels, predictions_idx, selected_id=None): """If class ID is specified, filter all other classes. Args: labels: `int64` `Tensor` or `SparseTensor` with shape [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of target classes for the associated prediction. Commonly, N=1 and `labels` has shape [batch_size, num_labels]. [D1, ... DN] must match `predictions_idx`. predictions_idx: `int64` `Tensor` of class IDs, with shape [D1, ... DN, k] where N >= 1. Commonly, N=1 and `predictions_idx` has shape [batch size, k]. selected_id: Int id to select. Returns: Tuple of `labels` and `predictions_idx`, possibly with classes removed. """ if selected_id is None: return labels, predictions_idx return (_select_class_id(labels, selected_id), _select_class_id(predictions_idx, selected_id)) def _sparse_true_positive_at_k(labels, predictions_idx, class_id=None, weights=None, name=None): """Calculates true positives for recall@k and precision@k. If `class_id` is specified, calculate binary true positives for `class_id` only. If `class_id` is not specified, calculate metrics for `k` predicted vs `n` label classes, where `n` is the 2nd dimension of `labels_sparse`. Args: labels: `int64` `Tensor` or `SparseTensor` with shape [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of target classes for the associated prediction. Commonly, N=1 and `labels` has shape [batch_size, num_labels]. [D1, ... DN] must match `predictions_idx`. predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`, top `k` predicted classes. For rank `n`, the first `n-1` dimensions must match `labels`. class_id: Class for which we want binary metrics. weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of `labels`. If the latter, it must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). name: Name of operation. Returns: A [D1, ... DN] `Tensor` of true positive counts. """ with ops.name_scope(name, 'true_positives', (predictions_idx, labels, weights)): labels, predictions_idx = _maybe_select_class_id(labels, predictions_idx, class_id) tp = sets.set_size(sets.set_intersection(predictions_idx, labels)) tp = math_ops.to_double(tp) if weights is not None: with ops.control_dependencies((weights_broadcast_ops.assert_broadcastable( weights, tp),)): weights = math_ops.to_double(weights) tp = math_ops.multiply(tp, weights) return tp def _streaming_sparse_true_positive_at_k(labels, predictions_idx, k=None, class_id=None, weights=None, name=None): """Calculates weighted per step true positives for recall@k and precision@k. If `class_id` is specified, calculate binary true positives for `class_id` only. If `class_id` is not specified, calculate metrics for `k` predicted vs `n` label classes, where `n` is the 2nd dimension of `labels`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: `int64` `Tensor` or `SparseTensor` with shape [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of target classes for the associated prediction. Commonly, N=1 and `labels` has shape [batch_size, num_labels]. [D1, ... DN] must match `predictions_idx`. predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`, top `k` predicted classes. For rank `n`, the first `n-1` dimensions must match `labels`. k: Integer, k for @k metric. This is only used for default op name. class_id: Class for which we want binary metrics. weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of `labels`. If the latter, it must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). name: Name of new variable, and namespace for other dependent ops. Returns: A tuple of `Variable` and update `Operation`. Raises: ValueError: If `weights` is not `None` and has an incompatible shape. """ with ops.name_scope(name, _at_k_name('true_positive', k, class_id=class_id), (predictions_idx, labels, weights)) as scope: tp = _sparse_true_positive_at_k( predictions_idx=predictions_idx, labels=labels, class_id=class_id, weights=weights) batch_total_tp = math_ops.to_double(math_ops.reduce_sum(tp)) var = metric_variable([], dtypes.float64, name=scope) return var, state_ops.assign_add(var, batch_total_tp, name='update') def _sparse_false_negative_at_k(labels, predictions_idx, class_id=None, weights=None): """Calculates false negatives for recall@k. If `class_id` is specified, calculate binary true positives for `class_id` only. If `class_id` is not specified, calculate metrics for `k` predicted vs `n` label classes, where `n` is the 2nd dimension of `labels_sparse`. Args: labels: `int64` `Tensor` or `SparseTensor` with shape [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of target classes for the associated prediction. Commonly, N=1 and `labels` has shape [batch_size, num_labels]. [D1, ... DN] must match `predictions_idx`. predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`, top `k` predicted classes. For rank `n`, the first `n-1` dimensions must match `labels`. class_id: Class for which we want binary metrics. weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of `labels`. If the latter, it must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). Returns: A [D1, ... DN] `Tensor` of false negative counts. """ with ops.name_scope(None, 'false_negatives', (predictions_idx, labels, weights)): labels, predictions_idx = _maybe_select_class_id(labels, predictions_idx, class_id) fn = sets.set_size( sets.set_difference(predictions_idx, labels, aminusb=False)) fn = math_ops.to_double(fn) if weights is not None: with ops.control_dependencies((weights_broadcast_ops.assert_broadcastable( weights, fn),)): weights = math_ops.to_double(weights) fn = math_ops.multiply(fn, weights) return fn def _streaming_sparse_false_negative_at_k(labels, predictions_idx, k, class_id=None, weights=None, name=None): """Calculates weighted per step false negatives for recall@k. If `class_id` is specified, calculate binary true positives for `class_id` only. If `class_id` is not specified, calculate metrics for `k` predicted vs `n` label classes, where `n` is the 2nd dimension of `labels`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: `int64` `Tensor` or `SparseTensor` with shape [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of target classes for the associated prediction. Commonly, N=1 and `labels` has shape [batch_size, num_labels]. [D1, ... DN] must match `predictions_idx`. predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`, top `k` predicted classes. For rank `n`, the first `n-1` dimensions must match `labels`. k: Integer, k for @k metric. This is only used for default op name. class_id: Class for which we want binary metrics. weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of `labels`. If the latter, it must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). name: Name of new variable, and namespace for other dependent ops. Returns: A tuple of `Variable` and update `Operation`. Raises: ValueError: If `weights` is not `None` and has an incompatible shape. """ with ops.name_scope(name, _at_k_name('false_negative', k, class_id=class_id), (predictions_idx, labels, weights)) as scope: fn = _sparse_false_negative_at_k( predictions_idx=predictions_idx, labels=labels, class_id=class_id, weights=weights) batch_total_fn = math_ops.to_double(math_ops.reduce_sum(fn)) var = metric_variable([], dtypes.float64, name=scope) return var, state_ops.assign_add(var, batch_total_fn, name='update') @tf_export(v1=['metrics.recall_at_k']) def recall_at_k(labels, predictions, k, class_id=None, weights=None, metrics_collections=None, updates_collections=None, name=None): """Computes recall@k of the predictions with respect to sparse labels. If `class_id` is specified, we calculate recall by considering only the entries in the batch for which `class_id` is in the label, and computing the fraction of them for which `class_id` is in the top-k `predictions`. If `class_id` is not specified, we'll calculate recall as how often on average a class among the labels of a batch entry is in the top-k `predictions`. `sparse_recall_at_k` creates two local variables, `true_positive_at_<k>` and `false_negative_at_<k>`, that are used to compute the recall_at_k frequency. This frequency is ultimately returned as `recall_at_<k>`: an idempotent operation that simply divides `true_positive_at_<k>` by total (`true_positive_at_<k>` + `false_negative_at_<k>`). For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `recall_at_<k>`. Internally, a `top_k` operation computes a `Tensor` indicating the top `k` `predictions`. Set operations applied to `top_k` and `labels` calculate the true positives and false negatives weighted by `weights`. Then `update_op` increments `true_positive_at_<k>` and `false_negative_at_<k>` using these values. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: `int64` `Tensor` or `SparseTensor` with shape [D1, ... DN, num_labels] or [D1, ... DN], where the latter implies num_labels=1. N >= 1 and num_labels is the number of target classes for the associated prediction. Commonly, N=1 and `labels` has shape [batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values should be in range [0, num_classes), where num_classes is the last dimension of `predictions`. Values outside this range always count towards `false_negative_at_<k>`. predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where N >= 1. Commonly, N=1 and predictions has shape [batch size, num_classes]. The final dimension contains the logit values for each class. [D1, ... DN] must match `labels`. k: Integer, k for @k metric. class_id: Integer class ID for which we want binary metrics. This should be in range [0, num_classes), where num_classes is the last dimension of `predictions`. If class_id is outside this range, the method returns NAN. weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of `labels`. If the latter, it must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that values should be added to. updates_collections: An optional list of collections that updates should be added to. name: Name of new update operation, and namespace for other dependent ops. Returns: recall: Scalar `float64` `Tensor` with the value of `true_positives` divided by the sum of `true_positives` and `false_negatives`. update_op: `Operation` that increments `true_positives` and `false_negatives` variables appropriately, and whose value matches `recall`. Raises: ValueError: If `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.recall_at_k is not ' 'supported when eager execution is enabled.') with ops.name_scope(name, _at_k_name('recall', k, class_id=class_id), (predictions, labels, weights)) as scope: _, top_k_idx = nn.top_k(predictions, k) return recall_at_top_k( labels=labels, predictions_idx=top_k_idx, k=k, class_id=class_id, weights=weights, metrics_collections=metrics_collections, updates_collections=updates_collections, name=scope) @tf_export(v1=['metrics.recall_at_top_k']) def recall_at_top_k(labels, predictions_idx, k=None, class_id=None, weights=None, metrics_collections=None, updates_collections=None, name=None): """Computes recall@k of top-k predictions with respect to sparse labels. Differs from `recall_at_k` in that predictions must be in the form of top `k` class indices, whereas `recall_at_k` expects logits. Refer to `recall_at_k` for more details. Args: labels: `int64` `Tensor` or `SparseTensor` with shape [D1, ... DN, num_labels] or [D1, ... DN], where the latter implies num_labels=1. N >= 1 and num_labels is the number of target classes for the associated prediction. Commonly, N=1 and `labels` has shape [batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values should be in range [0, num_classes), where num_classes is the last dimension of `predictions`. Values outside this range always count towards `false_negative_at_<k>`. predictions_idx: Integer `Tensor` with shape [D1, ... DN, k] where N >= 1. Commonly, N=1 and predictions has shape [batch size, k]. The final dimension contains the top `k` predicted class indices. [D1, ... DN] must match `labels`. k: Integer, k for @k metric. Only used for the default op name. class_id: Integer class ID for which we want binary metrics. This should be in range [0, num_classes), where num_classes is the last dimension of `predictions`. If class_id is outside this range, the method returns NAN. weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of `labels`. If the latter, it must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that values should be added to. updates_collections: An optional list of collections that updates should be added to. name: Name of new update operation, and namespace for other dependent ops. Returns: recall: Scalar `float64` `Tensor` with the value of `true_positives` divided by the sum of `true_positives` and `false_negatives`. update_op: `Operation` that increments `true_positives` and `false_negatives` variables appropriately, and whose value matches `recall`. Raises: ValueError: If `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. """ with ops.name_scope(name, _at_k_name('recall', k, class_id=class_id), (predictions_idx, labels, weights)) as scope: labels = _maybe_expand_labels(labels, predictions_idx) top_k_idx = math_ops.to_int64(predictions_idx) tp, tp_update = _streaming_sparse_true_positive_at_k( predictions_idx=top_k_idx, labels=labels, k=k, class_id=class_id, weights=weights) fn, fn_update = _streaming_sparse_false_negative_at_k( predictions_idx=top_k_idx, labels=labels, k=k, class_id=class_id, weights=weights) def compute_recall(_, tp, fn): return math_ops.div(tp, math_ops.add(tp, fn), name=scope) metric = _aggregate_across_replicas( metrics_collections, compute_recall, tp, fn) update = math_ops.div( tp_update, math_ops.add(tp_update, fn_update), name='update') if updates_collections: ops.add_to_collections(updates_collections, update) return metric, update @tf_export(v1=['metrics.recall_at_thresholds']) def recall_at_thresholds(labels, predictions, thresholds, weights=None, metrics_collections=None, updates_collections=None, name=None): """Computes various recall values for different `thresholds` on `predictions`. The `recall_at_thresholds` function creates four local variables, `true_positives`, `true_negatives`, `false_positives` and `false_negatives` for various values of thresholds. `recall[i]` is defined as the total weight of values in `predictions` above `thresholds[i]` whose corresponding entry in `labels` is `True`, divided by the total weight of `True` values in `labels` (`true_positives[i] / (true_positives[i] + false_negatives[i])`). For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `recall`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: The ground truth values, a `Tensor` whose dimensions must match `predictions`. Will be cast to `bool`. predictions: A floating point `Tensor` of arbitrary shape and whose values are in the range `[0, 1]`. thresholds: A python list or tuple of float thresholds in `[0, 1]`. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that `recall` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. name: An optional variable_scope name. Returns: recall: A float `Tensor` of shape `[len(thresholds)]`. update_op: An operation that increments the `true_positives`, `true_negatives`, `false_positives` and `false_negatives` variables that are used in the computation of `recall`. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.recall_at_thresholds is not ' 'supported when eager execution is enabled.') with variable_scope.variable_scope(name, 'recall_at_thresholds', (predictions, labels, weights)): values, update_ops = _confusion_matrix_at_thresholds( labels, predictions, thresholds, weights, includes=('tp', 'fn')) # Avoid division by zero. epsilon = 1e-7 def compute_recall(tp, fn, name): return math_ops.div(tp, epsilon + tp + fn, name='recall_' + name) def recall_across_replicas(_, values): return compute_recall(values['tp'], values['fn'], 'value') rec = _aggregate_across_replicas( metrics_collections, recall_across_replicas, values) update_op = compute_recall(update_ops['tp'], update_ops['fn'], 'update_op') if updates_collections: ops.add_to_collections(updates_collections, update_op) return rec, update_op @tf_export(v1=['metrics.root_mean_squared_error']) def root_mean_squared_error(labels, predictions, weights=None, metrics_collections=None, updates_collections=None, name=None): """Computes the root mean squared error between the labels and predictions. The `root_mean_squared_error` function creates two local variables, `total` and `count` that are used to compute the root mean squared error. This average is weighted by `weights`, and it is ultimately returned as `root_mean_squared_error`: an idempotent operation that takes the square root of the division of `total` by `count`. For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `root_mean_squared_error`. Internally, a `squared_error` operation computes the element-wise square of the difference between `predictions` and `labels`. Then `update_op` increments `total` with the reduced sum of the product of `weights` and `squared_error`, and it increments `count` with the reduced sum of `weights`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: A `Tensor` of the same shape as `predictions`. predictions: A `Tensor` of arbitrary shape. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that `root_mean_squared_error` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. name: An optional variable_scope name. Returns: root_mean_squared_error: A `Tensor` representing the current mean, the value of `total` divided by `count`. update_op: An operation that increments the `total` and `count` variables appropriately and whose value matches `root_mean_squared_error`. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.root_mean_squared_error is not ' 'supported when eager execution is enabled.') predictions, labels, weights = _remove_squeezable_dimensions( predictions=predictions, labels=labels, weights=weights) mse, update_mse_op = mean_squared_error(labels, predictions, weights, None, None, name or 'root_mean_squared_error') once_across_replicas = lambda _, mse: math_ops.sqrt(mse) rmse = _aggregate_across_replicas( metrics_collections, once_across_replicas, mse) update_rmse_op = math_ops.sqrt(update_mse_op) if updates_collections: ops.add_to_collections(updates_collections, update_rmse_op) return rmse, update_rmse_op @tf_export(v1=['metrics.sensitivity_at_specificity']) def sensitivity_at_specificity(labels, predictions, specificity, weights=None, num_thresholds=200, metrics_collections=None, updates_collections=None, name=None): """Computes the specificity at a given sensitivity. The `sensitivity_at_specificity` function creates four local variables, `true_positives`, `true_negatives`, `false_positives` and `false_negatives` that are used to compute the sensitivity at the given specificity value. The threshold for the given specificity value is computed and used to evaluate the corresponding sensitivity. For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `sensitivity`. `update_op` increments the `true_positives`, `true_negatives`, `false_positives` and `false_negatives` counts with the weight of each case found in the `predictions` and `labels`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. For additional information about specificity and sensitivity, see the following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity Args: labels: The ground truth values, a `Tensor` whose dimensions must match `predictions`. Will be cast to `bool`. predictions: A floating point `Tensor` of arbitrary shape and whose values are in the range `[0, 1]`. specificity: A scalar value in range `[0, 1]`. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). num_thresholds: The number of thresholds to use for matching the given specificity. metrics_collections: An optional list of collections that `sensitivity` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. name: An optional variable_scope name. Returns: sensitivity: A scalar `Tensor` representing the sensitivity at the given `specificity` value. update_op: An operation that increments the `true_positives`, `true_negatives`, `false_positives` and `false_negatives` variables appropriately and whose value matches `sensitivity`. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, if `weights` is not `None` and its shape doesn't match `predictions`, or if `specificity` is not between 0 and 1, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.sensitivity_at_specificity is not ' 'supported when eager execution is enabled.') if specificity < 0 or specificity > 1: raise ValueError('`specificity` must be in the range [0, 1].') with variable_scope.variable_scope(name, 'sensitivity_at_specificity', (predictions, labels, weights)): kepsilon = 1e-7 # to account for floating point imprecisions thresholds = [ (i + 1) * 1.0 / (num_thresholds - 1) for i in range(num_thresholds - 2) ] thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon] values, update_ops = _confusion_matrix_at_thresholds( labels, predictions, thresholds, weights) def compute_sensitivity_at_specificity(tp, tn, fp, fn, name): specificities = math_ops.div(tn, tn + fp + kepsilon) tf_index = math_ops.argmin(math_ops.abs(specificities - specificity), 0) tf_index = math_ops.cast(tf_index, dtypes.int32) # Now, we have the implicit threshold, so compute the sensitivity: return math_ops.div(tp[tf_index], tp[tf_index] + fn[tf_index] + kepsilon, name) def sensitivity_across_replicas(_, values): return compute_sensitivity_at_specificity( values['tp'], values['tn'], values['fp'], values['fn'], 'value') sensitivity = _aggregate_across_replicas( metrics_collections, sensitivity_across_replicas, values) update_op = compute_sensitivity_at_specificity( update_ops['tp'], update_ops['tn'], update_ops['fp'], update_ops['fn'], 'update_op') if updates_collections: ops.add_to_collections(updates_collections, update_op) return sensitivity, update_op def _expand_and_tile(tensor, multiple, dim=0, name=None): """Slice `tensor` shape in 2, then tile along the sliced dimension. A new dimension is inserted in shape of `tensor` before `dim`, then values are tiled `multiple` times along the new dimension. Args: tensor: Input `Tensor` or `SparseTensor`. multiple: Integer, number of times to tile. dim: Integer, dimension along which to tile. name: Name of operation. Returns: `Tensor` result of expanding and tiling `tensor`. Raises: ValueError: if `multiple` is less than 1, or `dim` is not in `[-rank(tensor), rank(tensor)]`. """ if multiple < 1: raise ValueError('Invalid multiple %s, must be > 0.' % multiple) with ops.name_scope(name, 'expand_and_tile', (tensor, multiple, dim)) as scope: # Sparse. tensor = sparse_tensor.convert_to_tensor_or_sparse_tensor(tensor) if isinstance(tensor, sparse_tensor.SparseTensor): if dim < 0: expand_dims = array_ops.reshape( array_ops.size(tensor.dense_shape) + dim, [1]) else: expand_dims = [dim] expanded_shape = array_ops.concat( (array_ops.slice(tensor.dense_shape, [0], expand_dims), [1], array_ops.slice(tensor.dense_shape, expand_dims, [-1])), 0, name='expanded_shape') expanded = sparse_ops.sparse_reshape( tensor, shape=expanded_shape, name='expand') if multiple == 1: return expanded return sparse_ops.sparse_concat( dim - 1 if dim < 0 else dim, [expanded] * multiple, name=scope) # Dense. expanded = array_ops.expand_dims( tensor, dim if (dim >= 0) else (dim - 1), name='expand') if multiple == 1: return expanded ones = array_ops.ones_like(array_ops.shape(tensor)) tile_multiples = array_ops.concat( (ones[:dim], (multiple,), ones[dim:]), 0, name='multiples') return array_ops.tile(expanded, tile_multiples, name=scope) def _num_relevant(labels, k): """Computes number of relevant values for each row in labels. For labels with shape [D1, ... DN, num_labels], this is the minimum of `num_labels` and `k`. Args: labels: `int64` `Tensor` or `SparseTensor` with shape [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of target classes for the associated prediction. Commonly, N=1 and `labels` has shape [batch_size, num_labels]. k: Integer, k for @k metric. Returns: Integer `Tensor` of shape [D1, ... DN], where each value is the number of relevant values for that row. Raises: ValueError: if inputs have invalid dtypes or values. """ if k < 1: raise ValueError('Invalid k=%s.' % k) with ops.name_scope(None, 'num_relevant', (labels,)) as scope: # For SparseTensor, calculate separate count for each row. labels = sparse_tensor.convert_to_tensor_or_sparse_tensor(labels) if isinstance(labels, sparse_tensor.SparseTensor): return math_ops.minimum(sets.set_size(labels), k, name=scope) # For dense Tensor, calculate scalar count based on last dimension, and # tile across labels shape. labels_shape = array_ops.shape(labels) labels_size = labels_shape[-1] num_relevant_scalar = math_ops.minimum(labels_size, k) return array_ops.fill(labels_shape[0:-1], num_relevant_scalar, name=scope) def _sparse_average_precision_at_top_k(labels, predictions_idx): """Computes average precision@k of predictions with respect to sparse labels. From en.wikipedia.org/wiki/Information_retrieval#Average_precision, formula for each row is: AveP = sum_{i=1...k} P_{i} * rel_{i} / num_relevant_items A "row" is the elements in dimension [D1, ... DN] of `predictions_idx`, `labels`, and the result `Tensors`. In the common case, this is [batch_size]. Each row of the results contains the average precision for that row. Args: labels: `int64` `Tensor` or `SparseTensor` with shape [D1, ... DN, num_labels] or [D1, ... DN], where the latter implies num_labels=1. N >= 1 and num_labels is the number of target classes for the associated prediction. Commonly, N=1 and `labels` has shape [batch_size, num_labels]. [D1, ... DN] must match `predictions_idx`. Values should be in range [0, num_classes). predictions_idx: Integer `Tensor` with shape [D1, ... DN, k] where N >= 1. Commonly, N=1 and `predictions_idx` has shape [batch size, k]. The final dimension must be set and contains the top `k` predicted class indices. [D1, ... DN] must match `labels`. Values should be in range [0, num_classes). Returns: `float64` `Tensor` of shape [D1, ... DN], where each value is the average precision for that row. Raises: ValueError: if the last dimension of predictions_idx is not set. """ with ops.name_scope(None, 'average_precision', (predictions_idx, labels)) as scope: predictions_idx = math_ops.to_int64(predictions_idx, name='predictions_idx') if predictions_idx.get_shape().ndims == 0: raise ValueError('The rank of predictions_idx must be at least 1.') k = predictions_idx.get_shape().as_list()[-1] if k is None: raise ValueError('The last dimension of predictions_idx must be set.') labels = _maybe_expand_labels(labels, predictions_idx) # Expand dims to produce [D1, ... DN, k, 1] tensor. This gives us a separate # prediction for each k, so we can calculate separate true positive values # for each k. predictions_idx_per_k = array_ops.expand_dims( predictions_idx, -1, name='predictions_idx_per_k') # Replicate labels k times to produce [D1, ... DN, k, num_labels] tensor. labels_per_k = _expand_and_tile( labels, multiple=k, dim=-1, name='labels_per_k') # The following tensors are all of shape [D1, ... DN, k], containing values # per row, per k value. # `relevant_per_k` (int32) - Relevance indicator, 1 if the prediction at # that k value is correct, 0 otherwise. This is the "rel_{i}" term from # the formula above. # `tp_per_k` (int32) - True positive counts. # `retrieved_per_k` (int32) - Number of predicted values at each k. This is # the precision denominator. # `precision_per_k` (float64) - Precision at each k. This is the "P_{i}" # term from the formula above. # `relevant_precision_per_k` (float64) - Relevant precisions; i.e., # precisions at all k for which relevance indicator is true. relevant_per_k = _sparse_true_positive_at_k( labels_per_k, predictions_idx_per_k, name='relevant_per_k') tp_per_k = math_ops.cumsum(relevant_per_k, axis=-1, name='tp_per_k') retrieved_per_k = math_ops.cumsum( array_ops.ones_like(relevant_per_k), axis=-1, name='retrieved_per_k') precision_per_k = math_ops.div( math_ops.to_double(tp_per_k), math_ops.to_double(retrieved_per_k), name='precision_per_k') relevant_precision_per_k = math_ops.multiply( precision_per_k, math_ops.to_double(relevant_per_k), name='relevant_precision_per_k') # Reduce along k dimension to get the sum, yielding a [D1, ... DN] tensor. precision_sum = math_ops.reduce_sum( relevant_precision_per_k, axis=(-1,), name='precision_sum') # Divide by number of relevant items to get average precision. These are # the "num_relevant_items" and "AveP" terms from the formula above. num_relevant_items = math_ops.to_double(_num_relevant(labels, k)) return math_ops.div(precision_sum, num_relevant_items, name=scope) def _streaming_sparse_average_precision_at_top_k(labels, predictions_idx, weights=None, metrics_collections=None, updates_collections=None, name=None): """Computes average precision@k of predictions with respect to sparse labels. `sparse_average_precision_at_top_k` creates two local variables, `average_precision_at_<k>/total` and `average_precision_at_<k>/max`, that are used to compute the frequency. This frequency is ultimately returned as `average_precision_at_<k>`: an idempotent operation that simply divides `average_precision_at_<k>/total` by `average_precision_at_<k>/max`. For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `precision_at_<k>`. Set operations applied to `top_k` and `labels` calculate the true positives and false positives weighted by `weights`. Then `update_op` increments `true_positive_at_<k>` and `false_positive_at_<k>` using these values. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: `int64` `Tensor` or `SparseTensor` with shape [D1, ... DN, num_labels] or [D1, ... DN], where the latter implies num_labels=1. N >= 1 and num_labels is the number of target classes for the associated prediction. Commonly, N=1 and `labels` has shape [batch_size, num_labels]. [D1, ... DN] must match `predictions_idx`. Values should be in range [0, num_classes). predictions_idx: Integer `Tensor` with shape [D1, ... DN, k] where N >= 1. Commonly, N=1 and `predictions_idx` has shape [batch size, k]. The final dimension contains the top `k` predicted class indices. [D1, ... DN] must match `labels`. Values should be in range [0, num_classes). weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of `labels`. If the latter, it must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that values should be added to. updates_collections: An optional list of collections that updates should be added to. name: Name of new update operation, and namespace for other dependent ops. Returns: mean_average_precision: Scalar `float64` `Tensor` with the mean average precision values. update: `Operation` that increments variables appropriately, and whose value matches `metric`. """ with ops.name_scope(name, 'average_precision_at_top_k', (predictions_idx, labels, weights)) as scope: # Calculate per-example average precision, and apply weights. average_precision = _sparse_average_precision_at_top_k( predictions_idx=predictions_idx, labels=labels) if weights is not None: weights = weights_broadcast_ops.broadcast_weights( math_ops.to_double(weights), average_precision) average_precision = math_ops.multiply(average_precision, weights) # Create accumulation variables and update ops for max average precision and # total average precision. with ops.name_scope(None, 'max', (average_precision,)) as max_scope: # `max` is the max possible precision. Since max for any row is 1.0: # - For the unweighted case, this is just the number of rows. # - For the weighted case, it's the sum of the weights broadcast across # `average_precision` rows. max_var = metric_variable([], dtypes.float64, name=max_scope) if weights is None: batch_max = math_ops.to_double( array_ops.size(average_precision, name='batch_max')) else: batch_max = math_ops.reduce_sum(weights, name='batch_max') max_update = state_ops.assign_add(max_var, batch_max, name='update') with ops.name_scope(None, 'total', (average_precision,)) as total_scope: total_var = metric_variable([], dtypes.float64, name=total_scope) batch_total = math_ops.reduce_sum(average_precision, name='batch_total') total_update = state_ops.assign_add(total_var, batch_total, name='update') # Divide total by max to get mean, for both vars and the update ops. def precision_across_replicas(_, total_var, max_var): return _safe_scalar_div(total_var, max_var, name='mean') mean_average_precision = _aggregate_across_replicas( metrics_collections, precision_across_replicas, total_var, max_var) update = _safe_scalar_div(total_update, max_update, name=scope) if updates_collections: ops.add_to_collections(updates_collections, update) return mean_average_precision, update @tf_export(v1=['metrics.sparse_average_precision_at_k']) @deprecated(None, 'Use average_precision_at_k instead') def sparse_average_precision_at_k(labels, predictions, k, weights=None, metrics_collections=None, updates_collections=None, name=None): """Renamed to `average_precision_at_k`, please use that method instead.""" return average_precision_at_k( labels=labels, predictions=predictions, k=k, weights=weights, metrics_collections=metrics_collections, updates_collections=updates_collections, name=name) @tf_export(v1=['metrics.average_precision_at_k']) def average_precision_at_k(labels, predictions, k, weights=None, metrics_collections=None, updates_collections=None, name=None): """Computes average precision@k of predictions with respect to sparse labels. `average_precision_at_k` creates two local variables, `average_precision_at_<k>/total` and `average_precision_at_<k>/max`, that are used to compute the frequency. This frequency is ultimately returned as `average_precision_at_<k>`: an idempotent operation that simply divides `average_precision_at_<k>/total` by `average_precision_at_<k>/max`. For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `precision_at_<k>`. Internally, a `top_k` operation computes a `Tensor` indicating the top `k` `predictions`. Set operations applied to `top_k` and `labels` calculate the true positives and false positives weighted by `weights`. Then `update_op` increments `true_positive_at_<k>` and `false_positive_at_<k>` using these values. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: `int64` `Tensor` or `SparseTensor` with shape [D1, ... DN, num_labels] or [D1, ... DN], where the latter implies num_labels=1. N >= 1 and num_labels is the number of target classes for the associated prediction. Commonly, N=1 and `labels` has shape [batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values should be in range [0, num_classes), where num_classes is the last dimension of `predictions`. Values outside this range are ignored. predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where N >= 1. Commonly, N=1 and `predictions` has shape [batch size, num_classes]. The final dimension contains the logit values for each class. [D1, ... DN] must match `labels`. k: Integer, k for @k metric. This will calculate an average precision for range `[1,k]`, as documented above. weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of `labels`. If the latter, it must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that values should be added to. updates_collections: An optional list of collections that updates should be added to. name: Name of new update operation, and namespace for other dependent ops. Returns: mean_average_precision: Scalar `float64` `Tensor` with the mean average precision values. update: `Operation` that increments variables appropriately, and whose value matches `metric`. Raises: ValueError: if k is invalid. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.sparse_average_precision_at_k is not ' 'supported when eager execution is enabled.') if k < 1: raise ValueError('Invalid k=%s.' % k) with ops.name_scope(name, _at_k_name('average_precision', k), (predictions, labels, weights)) as scope: # Calculate top k indices to produce [D1, ... DN, k] tensor. _, predictions_idx = nn.top_k(predictions, k) return _streaming_sparse_average_precision_at_top_k( labels=labels, predictions_idx=predictions_idx, weights=weights, metrics_collections=metrics_collections, updates_collections=updates_collections, name=scope) def _sparse_false_positive_at_k(labels, predictions_idx, class_id=None, weights=None): """Calculates false positives for precision@k. If `class_id` is specified, calculate binary true positives for `class_id` only. If `class_id` is not specified, calculate metrics for `k` predicted vs `n` label classes, where `n` is the 2nd dimension of `labels_sparse`. Args: labels: `int64` `Tensor` or `SparseTensor` with shape [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of target classes for the associated prediction. Commonly, N=1 and `labels` has shape [batch_size, num_labels]. [D1, ... DN] must match `predictions_idx`. predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`, top `k` predicted classes. For rank `n`, the first `n-1` dimensions must match `labels`. class_id: Class for which we want binary metrics. weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of `labels`. If the latter, it must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). Returns: A [D1, ... DN] `Tensor` of false positive counts. """ with ops.name_scope(None, 'false_positives', (predictions_idx, labels, weights)): labels, predictions_idx = _maybe_select_class_id(labels, predictions_idx, class_id) fp = sets.set_size( sets.set_difference(predictions_idx, labels, aminusb=True)) fp = math_ops.to_double(fp) if weights is not None: with ops.control_dependencies((weights_broadcast_ops.assert_broadcastable( weights, fp),)): weights = math_ops.to_double(weights) fp = math_ops.multiply(fp, weights) return fp def _streaming_sparse_false_positive_at_k(labels, predictions_idx, k=None, class_id=None, weights=None, name=None): """Calculates weighted per step false positives for precision@k. If `class_id` is specified, calculate binary true positives for `class_id` only. If `class_id` is not specified, calculate metrics for `k` predicted vs `n` label classes, where `n` is the 2nd dimension of `labels`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: `int64` `Tensor` or `SparseTensor` with shape [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of target classes for the associated prediction. Commonly, N=1 and `labels` has shape [batch_size, num_labels]. [D1, ... DN] must match `predictions_idx`. predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`, top `k` predicted classes. For rank `n`, the first `n-1` dimensions must match `labels`. k: Integer, k for @k metric. This is only used for default op name. class_id: Class for which we want binary metrics. weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of `labels`. If the latter, it must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). name: Name of new variable, and namespace for other dependent ops. Returns: A tuple of `Variable` and update `Operation`. Raises: ValueError: If `weights` is not `None` and has an incompatible shape. """ with ops.name_scope(name, _at_k_name('false_positive', k, class_id=class_id), (predictions_idx, labels, weights)) as scope: fp = _sparse_false_positive_at_k( predictions_idx=predictions_idx, labels=labels, class_id=class_id, weights=weights) batch_total_fp = math_ops.to_double(math_ops.reduce_sum(fp)) var = metric_variable([], dtypes.float64, name=scope) return var, state_ops.assign_add(var, batch_total_fp, name='update') @tf_export(v1=['metrics.precision_at_top_k']) def precision_at_top_k(labels, predictions_idx, k=None, class_id=None, weights=None, metrics_collections=None, updates_collections=None, name=None): """Computes precision@k of the predictions with respect to sparse labels. Differs from `sparse_precision_at_k` in that predictions must be in the form of top `k` class indices, whereas `sparse_precision_at_k` expects logits. Refer to `sparse_precision_at_k` for more details. Args: labels: `int64` `Tensor` or `SparseTensor` with shape [D1, ... DN, num_labels] or [D1, ... DN], where the latter implies num_labels=1. N >= 1 and num_labels is the number of target classes for the associated prediction. Commonly, N=1 and `labels` has shape [batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values should be in range [0, num_classes), where num_classes is the last dimension of `predictions`. Values outside this range are ignored. predictions_idx: Integer `Tensor` with shape [D1, ... DN, k] where N >= 1. Commonly, N=1 and predictions has shape [batch size, k]. The final dimension contains the top `k` predicted class indices. [D1, ... DN] must match `labels`. k: Integer, k for @k metric. Only used for the default op name. class_id: Integer class ID for which we want binary metrics. This should be in range [0, num_classes], where num_classes is the last dimension of `predictions`. If `class_id` is outside this range, the method returns NAN. weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of `labels`. If the latter, it must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that values should be added to. updates_collections: An optional list of collections that updates should be added to. name: Name of new update operation, and namespace for other dependent ops. Returns: precision: Scalar `float64` `Tensor` with the value of `true_positives` divided by the sum of `true_positives` and `false_positives`. update_op: `Operation` that increments `true_positives` and `false_positives` variables appropriately, and whose value matches `precision`. Raises: ValueError: If `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.precision_at_top_k is not ' 'supported when eager execution is enabled.') with ops.name_scope(name, _at_k_name('precision', k, class_id=class_id), (predictions_idx, labels, weights)) as scope: labels = _maybe_expand_labels(labels, predictions_idx) top_k_idx = math_ops.to_int64(predictions_idx) tp, tp_update = _streaming_sparse_true_positive_at_k( predictions_idx=top_k_idx, labels=labels, k=k, class_id=class_id, weights=weights) fp, fp_update = _streaming_sparse_false_positive_at_k( predictions_idx=top_k_idx, labels=labels, k=k, class_id=class_id, weights=weights) def precision_across_replicas(_, tp, fp): return math_ops.div(tp, math_ops.add(tp, fp), name=scope) metric = _aggregate_across_replicas( metrics_collections, precision_across_replicas, tp, fp) update = math_ops.div( tp_update, math_ops.add(tp_update, fp_update), name='update') if updates_collections: ops.add_to_collections(updates_collections, update) return metric, update @tf_export(v1=['metrics.sparse_precision_at_k']) @deprecated(None, 'Use precision_at_k instead') def sparse_precision_at_k(labels, predictions, k, class_id=None, weights=None, metrics_collections=None, updates_collections=None, name=None): """Renamed to `precision_at_k`, please use that method instead.""" return precision_at_k( labels=labels, predictions=predictions, k=k, class_id=class_id, weights=weights, metrics_collections=metrics_collections, updates_collections=updates_collections, name=name) @tf_export(v1=['metrics.precision_at_k']) def precision_at_k(labels, predictions, k, class_id=None, weights=None, metrics_collections=None, updates_collections=None, name=None): """Computes precision@k of the predictions with respect to sparse labels. If `class_id` is specified, we calculate precision by considering only the entries in the batch for which `class_id` is in the top-k highest `predictions`, and computing the fraction of them for which `class_id` is indeed a correct label. If `class_id` is not specified, we'll calculate precision as how often on average a class among the top-k classes with the highest predicted values of a batch entry is correct and can be found in the label for that entry. `precision_at_k` creates two local variables, `true_positive_at_<k>` and `false_positive_at_<k>`, that are used to compute the precision@k frequency. This frequency is ultimately returned as `precision_at_<k>`: an idempotent operation that simply divides `true_positive_at_<k>` by total (`true_positive_at_<k>` + `false_positive_at_<k>`). For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `precision_at_<k>`. Internally, a `top_k` operation computes a `Tensor` indicating the top `k` `predictions`. Set operations applied to `top_k` and `labels` calculate the true positives and false positives weighted by `weights`. Then `update_op` increments `true_positive_at_<k>` and `false_positive_at_<k>` using these values. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: `int64` `Tensor` or `SparseTensor` with shape [D1, ... DN, num_labels] or [D1, ... DN], where the latter implies num_labels=1. N >= 1 and num_labels is the number of target classes for the associated prediction. Commonly, N=1 and `labels` has shape [batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values should be in range [0, num_classes), where num_classes is the last dimension of `predictions`. Values outside this range are ignored. predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where N >= 1. Commonly, N=1 and predictions has shape [batch size, num_classes]. The final dimension contains the logit values for each class. [D1, ... DN] must match `labels`. k: Integer, k for @k metric. class_id: Integer class ID for which we want binary metrics. This should be in range [0, num_classes], where num_classes is the last dimension of `predictions`. If `class_id` is outside this range, the method returns NAN. weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of `labels`. If the latter, it must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that values should be added to. updates_collections: An optional list of collections that updates should be added to. name: Name of new update operation, and namespace for other dependent ops. Returns: precision: Scalar `float64` `Tensor` with the value of `true_positives` divided by the sum of `true_positives` and `false_positives`. update_op: `Operation` that increments `true_positives` and `false_positives` variables appropriately, and whose value matches `precision`. Raises: ValueError: If `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.sparse_precision_at_k is not ' 'supported when eager execution is enabled.') with ops.name_scope(name, _at_k_name('precision', k, class_id=class_id), (predictions, labels, weights)) as scope: _, top_k_idx = nn.top_k(predictions, k) return precision_at_top_k( labels=labels, predictions_idx=top_k_idx, k=k, class_id=class_id, weights=weights, metrics_collections=metrics_collections, updates_collections=updates_collections, name=scope) @tf_export(v1=['metrics.specificity_at_sensitivity']) def specificity_at_sensitivity(labels, predictions, sensitivity, weights=None, num_thresholds=200, metrics_collections=None, updates_collections=None, name=None): """Computes the specificity at a given sensitivity. The `specificity_at_sensitivity` function creates four local variables, `true_positives`, `true_negatives`, `false_positives` and `false_negatives` that are used to compute the specificity at the given sensitivity value. The threshold for the given sensitivity value is computed and used to evaluate the corresponding specificity. For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `specificity`. `update_op` increments the `true_positives`, `true_negatives`, `false_positives` and `false_negatives` counts with the weight of each case found in the `predictions` and `labels`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. For additional information about specificity and sensitivity, see the following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity Args: labels: The ground truth values, a `Tensor` whose dimensions must match `predictions`. Will be cast to `bool`. predictions: A floating point `Tensor` of arbitrary shape and whose values are in the range `[0, 1]`. sensitivity: A scalar value in range `[0, 1]`. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). num_thresholds: The number of thresholds to use for matching the given sensitivity. metrics_collections: An optional list of collections that `specificity` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. name: An optional variable_scope name. Returns: specificity: A scalar `Tensor` representing the specificity at the given `specificity` value. update_op: An operation that increments the `true_positives`, `true_negatives`, `false_positives` and `false_negatives` variables appropriately and whose value matches `specificity`. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, if `weights` is not `None` and its shape doesn't match `predictions`, or if `sensitivity` is not between 0 and 1, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.specificity_at_sensitivity is not ' 'supported when eager execution is enabled.') if sensitivity < 0 or sensitivity > 1: raise ValueError('`sensitivity` must be in the range [0, 1].') with variable_scope.variable_scope(name, 'specificity_at_sensitivity', (predictions, labels, weights)): kepsilon = 1e-7 # to account for floating point imprecisions thresholds = [ (i + 1) * 1.0 / (num_thresholds - 1) for i in range(num_thresholds - 2) ] thresholds = [0.0 - kepsilon] + thresholds + [1.0 - kepsilon] values, update_ops = _confusion_matrix_at_thresholds( labels, predictions, thresholds, weights) def compute_specificity_at_sensitivity(tp, tn, fp, fn, name): """Computes the specificity at the given sensitivity. Args: tp: True positives. tn: True negatives. fp: False positives. fn: False negatives. name: The name of the operation. Returns: The specificity using the aggregated values. """ sensitivities = math_ops.div(tp, tp + fn + kepsilon) # We'll need to use this trick until tf.argmax allows us to specify # whether we should use the first or last index in case of ties. min_val = math_ops.reduce_min(math_ops.abs(sensitivities - sensitivity)) indices_at_minval = math_ops.equal( math_ops.abs(sensitivities - sensitivity), min_val) indices_at_minval = math_ops.to_int64(indices_at_minval) indices_at_minval = math_ops.cumsum(indices_at_minval) tf_index = math_ops.argmax(indices_at_minval, 0) tf_index = math_ops.cast(tf_index, dtypes.int32) # Now, we have the implicit threshold, so compute the specificity: return math_ops.div(tn[tf_index], tn[tf_index] + fp[tf_index] + kepsilon, name) def specificity_across_replicas(_, values): return compute_specificity_at_sensitivity( values['tp'], values['tn'], values['fp'], values['fn'], 'value') specificity = _aggregate_across_replicas( metrics_collections, specificity_across_replicas, values) update_op = compute_specificity_at_sensitivity( update_ops['tp'], update_ops['tn'], update_ops['fp'], update_ops['fn'], 'update_op') if updates_collections: ops.add_to_collections(updates_collections, update_op) return specificity, update_op
{ "content_hash": "9b6bd0bb640215455eee9de8c18d186e", "timestamp": "", "source": "github", "line_count": 3653, "max_line_length": 88, "avg_line_length": 44.11716397481522, "alnum_prop": 0.658866964507322, "repo_name": "asimshankar/tensorflow", "id": "ec39b1790e340a0d194dea8ab3419ca78fc9d126", "size": "161849", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tensorflow/python/ops/metrics_impl.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Assembly", "bytes": "4882" }, { "name": "Batchfile", "bytes": "10132" }, { "name": "C", "bytes": "490070" }, { "name": "C#", "bytes": "8446" }, { "name": "C++", "bytes": "52677142" }, { "name": "CMake", "bytes": "207176" }, { "name": "Dockerfile", "bytes": "39454" }, { "name": "Go", "bytes": "1290930" }, { "name": "HTML", "bytes": "4680032" }, { "name": "Java", "bytes": "890529" }, { "name": "Jupyter Notebook", "bytes": "2618412" }, { "name": "LLVM", "bytes": "6536" }, { "name": "Makefile", "bytes": "68402" }, { "name": "Objective-C", "bytes": "16140" }, { "name": "Objective-C++", "bytes": "102518" }, { "name": "PHP", "bytes": "5172" }, { "name": "Pascal", "bytes": "221" }, { "name": "Perl", "bytes": "7536" }, { "name": "PureBasic", "bytes": "25356" }, { "name": "Python", "bytes": "43038983" }, { "name": "RobotFramework", "bytes": "891" }, { "name": "Ruby", "bytes": "838" }, { "name": "Shell", "bytes": "497659" }, { "name": "Smarty", "bytes": "6976" } ], "symlink_target": "" }
from sys import platform from functools import wraps, partial from itertools import count from weakref import WeakValueDictionary from errno import errorcode from six import text_type as _text_type from six import integer_types as integer_types from OpenSSL._util import ( ffi as _ffi, lib as _lib, exception_from_error_queue as _exception_from_error_queue, native as _native) from OpenSSL.crypto import ( FILETYPE_PEM, _PassphraseHelper, PKey, X509Name, X509, X509Store) _unspecified = object() try: _memoryview = memoryview except NameError: class _memoryview(object): pass OPENSSL_VERSION_NUMBER = _lib.OPENSSL_VERSION_NUMBER SSLEAY_VERSION = _lib.SSLEAY_VERSION SSLEAY_CFLAGS = _lib.SSLEAY_CFLAGS SSLEAY_PLATFORM = _lib.SSLEAY_PLATFORM SSLEAY_DIR = _lib.SSLEAY_DIR SSLEAY_BUILT_ON = _lib.SSLEAY_BUILT_ON SENT_SHUTDOWN = _lib.SSL_SENT_SHUTDOWN RECEIVED_SHUTDOWN = _lib.SSL_RECEIVED_SHUTDOWN SSLv2_METHOD = 1 SSLv3_METHOD = 2 SSLv23_METHOD = 3 TLSv1_METHOD = 4 TLSv1_1_METHOD = 5 TLSv1_2_METHOD = 6 OP_NO_SSLv2 = _lib.SSL_OP_NO_SSLv2 OP_NO_SSLv3 = _lib.SSL_OP_NO_SSLv3 OP_NO_TLSv1 = _lib.SSL_OP_NO_TLSv1 OP_NO_TLSv1_1 = getattr(_lib, "SSL_OP_NO_TLSv1_1", 0) OP_NO_TLSv1_2 = getattr(_lib, "SSL_OP_NO_TLSv1_2", 0) try: MODE_RELEASE_BUFFERS = _lib.SSL_MODE_RELEASE_BUFFERS except AttributeError: pass OP_SINGLE_DH_USE = _lib.SSL_OP_SINGLE_DH_USE OP_EPHEMERAL_RSA = _lib.SSL_OP_EPHEMERAL_RSA OP_MICROSOFT_SESS_ID_BUG = _lib.SSL_OP_MICROSOFT_SESS_ID_BUG OP_NETSCAPE_CHALLENGE_BUG = _lib.SSL_OP_NETSCAPE_CHALLENGE_BUG OP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG = _lib.SSL_OP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG OP_SSLREF2_REUSE_CERT_TYPE_BUG = _lib.SSL_OP_SSLREF2_REUSE_CERT_TYPE_BUG OP_MICROSOFT_BIG_SSLV3_BUFFER = _lib.SSL_OP_MICROSOFT_BIG_SSLV3_BUFFER try: OP_MSIE_SSLV2_RSA_PADDING = _lib.SSL_OP_MSIE_SSLV2_RSA_PADDING except AttributeError: pass OP_SSLEAY_080_CLIENT_DH_BUG = _lib.SSL_OP_SSLEAY_080_CLIENT_DH_BUG OP_TLS_D5_BUG = _lib.SSL_OP_TLS_D5_BUG OP_TLS_BLOCK_PADDING_BUG = _lib.SSL_OP_TLS_BLOCK_PADDING_BUG OP_DONT_INSERT_EMPTY_FRAGMENTS = _lib.SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS OP_CIPHER_SERVER_PREFERENCE = _lib.SSL_OP_CIPHER_SERVER_PREFERENCE OP_TLS_ROLLBACK_BUG = _lib.SSL_OP_TLS_ROLLBACK_BUG OP_PKCS1_CHECK_1 = _lib.SSL_OP_PKCS1_CHECK_1 OP_PKCS1_CHECK_2 = _lib.SSL_OP_PKCS1_CHECK_2 OP_NETSCAPE_CA_DN_BUG = _lib.SSL_OP_NETSCAPE_CA_DN_BUG OP_NETSCAPE_DEMO_CIPHER_CHANGE_BUG= _lib.SSL_OP_NETSCAPE_DEMO_CIPHER_CHANGE_BUG try: OP_NO_COMPRESSION = _lib.SSL_OP_NO_COMPRESSION except AttributeError: pass OP_NO_QUERY_MTU = _lib.SSL_OP_NO_QUERY_MTU OP_COOKIE_EXCHANGE = _lib.SSL_OP_COOKIE_EXCHANGE OP_NO_TICKET = _lib.SSL_OP_NO_TICKET OP_ALL = _lib.SSL_OP_ALL VERIFY_PEER = _lib.SSL_VERIFY_PEER VERIFY_FAIL_IF_NO_PEER_CERT = _lib.SSL_VERIFY_FAIL_IF_NO_PEER_CERT VERIFY_CLIENT_ONCE = _lib.SSL_VERIFY_CLIENT_ONCE VERIFY_NONE = _lib.SSL_VERIFY_NONE SESS_CACHE_OFF = _lib.SSL_SESS_CACHE_OFF SESS_CACHE_CLIENT = _lib.SSL_SESS_CACHE_CLIENT SESS_CACHE_SERVER = _lib.SSL_SESS_CACHE_SERVER SESS_CACHE_BOTH = _lib.SSL_SESS_CACHE_BOTH SESS_CACHE_NO_AUTO_CLEAR = _lib.SSL_SESS_CACHE_NO_AUTO_CLEAR SESS_CACHE_NO_INTERNAL_LOOKUP = _lib.SSL_SESS_CACHE_NO_INTERNAL_LOOKUP SESS_CACHE_NO_INTERNAL_STORE = _lib.SSL_SESS_CACHE_NO_INTERNAL_STORE SESS_CACHE_NO_INTERNAL = _lib.SSL_SESS_CACHE_NO_INTERNAL SSL_ST_CONNECT = _lib.SSL_ST_CONNECT SSL_ST_ACCEPT = _lib.SSL_ST_ACCEPT SSL_ST_MASK = _lib.SSL_ST_MASK SSL_ST_INIT = _lib.SSL_ST_INIT SSL_ST_BEFORE = _lib.SSL_ST_BEFORE SSL_ST_OK = _lib.SSL_ST_OK SSL_ST_RENEGOTIATE = _lib.SSL_ST_RENEGOTIATE SSL_CB_LOOP = _lib.SSL_CB_LOOP SSL_CB_EXIT = _lib.SSL_CB_EXIT SSL_CB_READ = _lib.SSL_CB_READ SSL_CB_WRITE = _lib.SSL_CB_WRITE SSL_CB_ALERT = _lib.SSL_CB_ALERT SSL_CB_READ_ALERT = _lib.SSL_CB_READ_ALERT SSL_CB_WRITE_ALERT = _lib.SSL_CB_WRITE_ALERT SSL_CB_ACCEPT_LOOP = _lib.SSL_CB_ACCEPT_LOOP SSL_CB_ACCEPT_EXIT = _lib.SSL_CB_ACCEPT_EXIT SSL_CB_CONNECT_LOOP = _lib.SSL_CB_CONNECT_LOOP SSL_CB_CONNECT_EXIT = _lib.SSL_CB_CONNECT_EXIT SSL_CB_HANDSHAKE_START = _lib.SSL_CB_HANDSHAKE_START SSL_CB_HANDSHAKE_DONE = _lib.SSL_CB_HANDSHAKE_DONE class Error(Exception): """ An error occurred in an `OpenSSL.SSL` API. """ _raise_current_error = partial(_exception_from_error_queue, Error) class WantReadError(Error): pass class WantWriteError(Error): pass class WantX509LookupError(Error): pass class ZeroReturnError(Error): pass class SysCallError(Error): pass class _VerifyHelper(object): def __init__(self, connection, callback): self._problems = [] @wraps(callback) def wrapper(ok, store_ctx): cert = X509.__new__(X509) cert._x509 = _lib.X509_STORE_CTX_get_current_cert(store_ctx) error_number = _lib.X509_STORE_CTX_get_error(store_ctx) error_depth = _lib.X509_STORE_CTX_get_error_depth(store_ctx) try: result = callback(connection, cert, error_number, error_depth, ok) except Exception as e: self._problems.append(e) return 0 else: if result: _lib.X509_STORE_CTX_set_error(store_ctx, _lib.X509_V_OK) return 1 else: return 0 self.callback = _ffi.callback( "int (*)(int, X509_STORE_CTX *)", wrapper) def raise_if_problem(self): if self._problems: try: _raise_current_error() except Error: pass raise self._problems.pop(0) def _asFileDescriptor(obj): fd = None if not isinstance(obj, integer_types): meth = getattr(obj, "fileno", None) if meth is not None: obj = meth() if isinstance(obj, integer_types): fd = obj if not isinstance(fd, integer_types): raise TypeError("argument must be an int, or have a fileno() method.") elif fd < 0: raise ValueError( "file descriptor cannot be a negative integer (%i)" % (fd,)) return fd def SSLeay_version(type): """ Return a string describing the version of OpenSSL in use. :param type: One of the SSLEAY_ constants defined in this module. """ return _ffi.string(_lib.SSLeay_version(type)) class Session(object): pass class Context(object): """ :py:obj:`OpenSSL.SSL.Context` instances define the parameters for setting up new SSL connections. """ _methods = { SSLv3_METHOD: "SSLv3_method", SSLv23_METHOD: "SSLv23_method", TLSv1_METHOD: "TLSv1_method", TLSv1_1_METHOD: "TLSv1_1_method", TLSv1_2_METHOD: "TLSv1_2_method", } _methods = dict( (identifier, getattr(_lib, name)) for (identifier, name) in _methods.items() if getattr(_lib, name, None) is not None) def __init__(self, method): """ :param method: One of SSLv2_METHOD, SSLv3_METHOD, SSLv23_METHOD, or TLSv1_METHOD. """ if not isinstance(method, integer_types): raise TypeError("method must be an integer") try: method_func = self._methods[method] except KeyError: raise ValueError("No such protocol") method_obj = method_func() if method_obj == _ffi.NULL: # TODO: This is untested. _raise_current_error() context = _lib.SSL_CTX_new(method_obj) if context == _ffi.NULL: # TODO: This is untested. _raise_current_error() context = _ffi.gc(context, _lib.SSL_CTX_free) self._context = context self._passphrase_helper = None self._passphrase_callback = None self._passphrase_userdata = None self._verify_helper = None self._verify_callback = None self._info_callback = None self._tlsext_servername_callback = None self._app_data = None # SSL_CTX_set_app_data(self->ctx, self); # SSL_CTX_set_mode(self->ctx, SSL_MODE_ENABLE_PARTIAL_WRITE | # SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER | # SSL_MODE_AUTO_RETRY); self.set_mode(_lib.SSL_MODE_ENABLE_PARTIAL_WRITE) def load_verify_locations(self, cafile, capath=None): """ Let SSL know where we can find trusted certificates for the certificate chain :param cafile: In which file we can find the certificates :param capath: In which directory we can find the certificates :return: None """ if cafile is None: cafile = _ffi.NULL elif not isinstance(cafile, bytes): raise TypeError("cafile must be None or a byte string") if capath is None: capath = _ffi.NULL elif not isinstance(capath, bytes): raise TypeError("capath must be None or a byte string") load_result = _lib.SSL_CTX_load_verify_locations(self._context, cafile, capath) if not load_result: _raise_current_error() def _wrap_callback(self, callback): @wraps(callback) def wrapper(size, verify, userdata): return callback(size, verify, self._passphrase_userdata) return _PassphraseHelper( FILETYPE_PEM, wrapper, more_args=True, truncate=True) def set_passwd_cb(self, callback, userdata=None): """ Set the passphrase callback :param callback: The Python callback to use :param userdata: (optional) A Python object which will be given as argument to the callback :return: None """ if not callable(callback): raise TypeError("callback must be callable") self._passphrase_helper = self._wrap_callback(callback) self._passphrase_callback = self._passphrase_helper.callback _lib.SSL_CTX_set_default_passwd_cb( self._context, self._passphrase_callback) self._passphrase_userdata = userdata def set_default_verify_paths(self): """ Use the platform-specific CA certificate locations :return: None """ set_result = _lib.SSL_CTX_set_default_verify_paths(self._context) if not set_result: # TODO: This is untested. _raise_current_error() def use_certificate_chain_file(self, certfile): """ Load a certificate chain from a file :param certfile: The name of the certificate chain file :return: None """ if isinstance(certfile, _text_type): # Perhaps sys.getfilesystemencoding() could be better? certfile = certfile.encode("utf-8") if not isinstance(certfile, bytes): raise TypeError("certfile must be bytes or unicode") result = _lib.SSL_CTX_use_certificate_chain_file(self._context, certfile) if not result: _raise_current_error() def use_certificate_file(self, certfile, filetype=FILETYPE_PEM): """ Load a certificate from a file :param certfile: The name of the certificate file :param filetype: (optional) The encoding of the file, default is PEM :return: None """ if isinstance(certfile, _text_type): # Perhaps sys.getfilesystemencoding() could be better? certfile = certfile.encode("utf-8") if not isinstance(certfile, bytes): raise TypeError("certfile must be bytes or unicode") if not isinstance(filetype, integer_types): raise TypeError("filetype must be an integer") use_result = _lib.SSL_CTX_use_certificate_file(self._context, certfile, filetype) if not use_result: _raise_current_error() def use_certificate(self, cert): """ Load a certificate from a X509 object :param cert: The X509 object :return: None """ if not isinstance(cert, X509): raise TypeError("cert must be an X509 instance") use_result = _lib.SSL_CTX_use_certificate(self._context, cert._x509) if not use_result: _raise_current_error() def add_extra_chain_cert(self, certobj): """ Add certificate to chain :param certobj: The X509 certificate object to add to the chain :return: None """ if not isinstance(certobj, X509): raise TypeError("certobj must be an X509 instance") copy = _lib.X509_dup(certobj._x509) add_result = _lib.SSL_CTX_add_extra_chain_cert(self._context, copy) if not add_result: # TODO: This is untested. _lib.X509_free(copy) _raise_current_error() def _raise_passphrase_exception(self): if self._passphrase_helper is None: _raise_current_error() exception = self._passphrase_helper.raise_if_problem(Error) if exception is not None: raise exception def use_privatekey_file(self, keyfile, filetype=_unspecified): """ Load a private key from a file :param keyfile: The name of the key file :param filetype: (optional) The encoding of the file, default is PEM :return: None """ if isinstance(keyfile, _text_type): # Perhaps sys.getfilesystemencoding() could be better? keyfile = keyfile.encode("utf-8") if not isinstance(keyfile, bytes): raise TypeError("keyfile must be a byte string") if filetype is _unspecified: filetype = FILETYPE_PEM elif not isinstance(filetype, integer_types): raise TypeError("filetype must be an integer") use_result = _lib.SSL_CTX_use_PrivateKey_file( self._context, keyfile, filetype) if not use_result: self._raise_passphrase_exception() def use_privatekey(self, pkey): """ Load a private key from a PKey object :param pkey: The PKey object :return: None """ if not isinstance(pkey, PKey): raise TypeError("pkey must be a PKey instance") use_result = _lib.SSL_CTX_use_PrivateKey(self._context, pkey._pkey) if not use_result: self._raise_passphrase_exception() def check_privatekey(self): """ Check that the private key and certificate match up :return: None (raises an exception if something's wrong) """ def load_client_ca(self, cafile): """ Load the trusted certificates that will be sent to the client (basically telling the client "These are the guys I trust"). Does not actually imply any of the certificates are trusted; that must be configured separately. :param cafile: The name of the certificates file :return: None """ def set_session_id(self, buf): """ Set the session identifier. This is needed if you want to do session resumption. :param buf: A Python object that can be safely converted to a string :returns: None """ def set_session_cache_mode(self, mode): """ Enable/disable session caching and specify the mode used. :param mode: One or more of the SESS_CACHE_* flags (combine using bitwise or) :returns: The previously set caching mode. """ if not isinstance(mode, integer_types): raise TypeError("mode must be an integer") return _lib.SSL_CTX_set_session_cache_mode(self._context, mode) def get_session_cache_mode(self): """ :returns: The currently used cache mode. """ return _lib.SSL_CTX_get_session_cache_mode(self._context) def set_verify(self, mode, callback): """ Set the verify mode and verify callback :param mode: The verify mode, this is either VERIFY_NONE or VERIFY_PEER combined with possible other flags :param callback: The Python callback to use :return: None See SSL_CTX_set_verify(3SSL) for further details. """ if not isinstance(mode, integer_types): raise TypeError("mode must be an integer") if not callable(callback): raise TypeError("callback must be callable") self._verify_helper = _VerifyHelper(self, callback) self._verify_callback = self._verify_helper.callback _lib.SSL_CTX_set_verify(self._context, mode, self._verify_callback) def set_verify_depth(self, depth): """ Set the verify depth :param depth: An integer specifying the verify depth :return: None """ if not isinstance(depth, integer_types): raise TypeError("depth must be an integer") _lib.SSL_CTX_set_verify_depth(self._context, depth) def get_verify_mode(self): """ Get the verify mode :return: The verify mode """ return _lib.SSL_CTX_get_verify_mode(self._context) def get_verify_depth(self): """ Get the verify depth :return: The verify depth """ return _lib.SSL_CTX_get_verify_depth(self._context) def load_tmp_dh(self, dhfile): """ Load parameters for Ephemeral Diffie-Hellman :param dhfile: The file to load EDH parameters from :return: None """ if not isinstance(dhfile, bytes): raise TypeError("dhfile must be a byte string") bio = _lib.BIO_new_file(dhfile, b"r") if bio == _ffi.NULL: _raise_current_error() bio = _ffi.gc(bio, _lib.BIO_free) dh = _lib.PEM_read_bio_DHparams(bio, _ffi.NULL, _ffi.NULL, _ffi.NULL) dh = _ffi.gc(dh, _lib.DH_free) _lib.SSL_CTX_set_tmp_dh(self._context, dh) def set_cipher_list(self, cipher_list): """ Change the cipher list :param cipher_list: A cipher list, see ciphers(1) :return: None """ if isinstance(cipher_list, _text_type): cipher_list = cipher_list.encode("ascii") if not isinstance(cipher_list, bytes): raise TypeError("cipher_list must be bytes or unicode") result = _lib.SSL_CTX_set_cipher_list(self._context, cipher_list) if not result: _raise_current_error() def set_client_ca_list(self, certificate_authorities): """ Set the list of preferred client certificate signers for this server context. This list of certificate authorities will be sent to the client when the server requests a client certificate. :param certificate_authorities: a sequence of X509Names. :return: None """ name_stack = _lib.sk_X509_NAME_new_null() if name_stack == _ffi.NULL: # TODO: This is untested. _raise_current_error() try: for ca_name in certificate_authorities: if not isinstance(ca_name, X509Name): raise TypeError( "client CAs must be X509Name objects, not %s objects" % ( type(ca_name).__name__,)) copy = _lib.X509_NAME_dup(ca_name._name) if copy == _ffi.NULL: # TODO: This is untested. _raise_current_error() push_result = _lib.sk_X509_NAME_push(name_stack, copy) if not push_result: _lib.X509_NAME_free(copy) _raise_current_error() except: _lib.sk_X509_NAME_free(name_stack) raise _lib.SSL_CTX_set_client_CA_list(self._context, name_stack) def add_client_ca(self, certificate_authority): """ Add the CA certificate to the list of preferred signers for this context. The list of certificate authorities will be sent to the client when the server requests a client certificate. :param certificate_authority: certificate authority's X509 certificate. :return: None """ if not isinstance(certificate_authority, X509): raise TypeError("certificate_authority must be an X509 instance") add_result = _lib.SSL_CTX_add_client_CA( self._context, certificate_authority._x509) if not add_result: # TODO: This is untested. _raise_current_error() def set_timeout(self, timeout): """ Set session timeout :param timeout: The timeout in seconds :return: The previous session timeout """ if not isinstance(timeout, integer_types): raise TypeError("timeout must be an integer") return _lib.SSL_CTX_set_timeout(self._context, timeout) def get_timeout(self): """ Get the session timeout :return: The session timeout """ return _lib.SSL_CTX_get_timeout(self._context) def set_info_callback(self, callback): """ Set the info callback :param callback: The Python callback to use :return: None """ @wraps(callback) def wrapper(ssl, where, return_code): callback(Connection._reverse_mapping[ssl], where, return_code) self._info_callback = _ffi.callback( "void (*)(const SSL *, int, int)", wrapper) _lib.SSL_CTX_set_info_callback(self._context, self._info_callback) def get_app_data(self): """ Get the application data (supplied via set_app_data()) :return: The application data """ return self._app_data def set_app_data(self, data): """ Set the application data (will be returned from get_app_data()) :param data: Any Python object :return: None """ self._app_data = data def get_cert_store(self): """ Get the certificate store for the context. :return: A X509Store object or None if it does not have one. """ store = _lib.SSL_CTX_get_cert_store(self._context) if store == _ffi.NULL: # TODO: This is untested. return None pystore = X509Store.__new__(X509Store) pystore._store = store return pystore def set_options(self, options): """ Add options. Options set before are not cleared! :param options: The options to add. :return: The new option bitmask. """ if not isinstance(options, integer_types): raise TypeError("options must be an integer") return _lib.SSL_CTX_set_options(self._context, options) def set_mode(self, mode): """ Add modes via bitmask. Modes set before are not cleared! :param mode: The mode to add. :return: The new mode bitmask. """ if not isinstance(mode, integer_types): raise TypeError("mode must be an integer") return _lib.SSL_CTX_set_mode(self._context, mode) def set_tlsext_servername_callback(self, callback): """ Specify a callback function to be called when clients specify a server name. :param callback: The callback function. It will be invoked with one argument, the Connection instance. """ @wraps(callback) def wrapper(ssl, alert, arg): callback(Connection._reverse_mapping[ssl]) return 0 self._tlsext_servername_callback = _ffi.callback( "int (*)(const SSL *, int *, void *)", wrapper) _lib.SSL_CTX_set_tlsext_servername_callback( self._context, self._tlsext_servername_callback) ContextType = Context class Connection(object): """ """ _reverse_mapping = WeakValueDictionary() def __init__(self, context, socket=None): """ Create a new Connection object, using the given OpenSSL.SSL.Context instance and socket. :param context: An SSL Context to use for this connection :param socket: The socket to use for transport layer """ if not isinstance(context, Context): raise TypeError("context must be a Context instance") ssl = _lib.SSL_new(context._context) self._ssl = _ffi.gc(ssl, _lib.SSL_free) self._context = context self._reverse_mapping[self._ssl] = self if socket is None: self._socket = None # Don't set up any gc for these, SSL_free will take care of them. self._into_ssl = _lib.BIO_new(_lib.BIO_s_mem()) self._from_ssl = _lib.BIO_new(_lib.BIO_s_mem()) if self._into_ssl == _ffi.NULL or self._from_ssl == _ffi.NULL: # TODO: This is untested. _raise_current_error() _lib.SSL_set_bio(self._ssl, self._into_ssl, self._from_ssl) else: self._into_ssl = None self._from_ssl = None self._socket = socket set_result = _lib.SSL_set_fd(self._ssl, _asFileDescriptor(self._socket)) if not set_result: # TODO: This is untested. _raise_current_error() def __getattr__(self, name): """ Look up attributes on the wrapped socket object if they are not found on the Connection object. """ return getattr(self._socket, name) def _raise_ssl_error(self, ssl, result): if self._context._verify_helper is not None: self._context._verify_helper.raise_if_problem() error = _lib.SSL_get_error(ssl, result) if error == _lib.SSL_ERROR_WANT_READ: raise WantReadError() elif error == _lib.SSL_ERROR_WANT_WRITE: raise WantWriteError() elif error == _lib.SSL_ERROR_ZERO_RETURN: raise ZeroReturnError() elif error == _lib.SSL_ERROR_WANT_X509_LOOKUP: # TODO: This is untested. raise WantX509LookupError() elif error == _lib.SSL_ERROR_SYSCALL: if _lib.ERR_peek_error() == 0: if result < 0: if platform == "win32": errno = _ffi.getwinerror()[0] else: errno = _ffi.errno raise SysCallError(errno, errorcode[errno]) else: raise SysCallError(-1, "Unexpected EOF") else: # TODO: This is untested. _raise_current_error() elif error == _lib.SSL_ERROR_NONE: pass else: _raise_current_error() def get_context(self): """ Get session context """ return self._context def set_context(self, context): """ Switch this connection to a new session context :param context: A :py:class:`Context` instance giving the new session context to use. """ if not isinstance(context, Context): raise TypeError("context must be a Context instance") _lib.SSL_set_SSL_CTX(self._ssl, context._context) self._context = context def get_servername(self): """ Retrieve the servername extension value if provided in the client hello message, or None if there wasn't one. :return: A byte string giving the server name or :py:data:`None`. """ name = _lib.SSL_get_servername(self._ssl, _lib.TLSEXT_NAMETYPE_host_name) if name == _ffi.NULL: return None return _ffi.string(name) def set_tlsext_host_name(self, name): """ Set the value of the servername extension to send in the client hello. :param name: A byte string giving the name. """ if not isinstance(name, bytes): raise TypeError("name must be a byte string") elif b"\0" in name: raise TypeError("name must not contain NUL byte") # XXX I guess this can fail sometimes? _lib.SSL_set_tlsext_host_name(self._ssl, name) def pending(self): """ Get the number of bytes that can be safely read from the connection :return: The number of bytes available in the receive buffer. """ return _lib.SSL_pending(self._ssl) def send(self, buf, flags=0): """ Send data on the connection. NOTE: If you get one of the WantRead, WantWrite or WantX509Lookup exceptions on this, you have to call the method again with the SAME buffer. :param buf: The string to send :param flags: (optional) Included for compatibility with the socket API, the value is ignored :return: The number of bytes written """ if isinstance(buf, _memoryview): buf = buf.tobytes() if not isinstance(buf, bytes): raise TypeError("data must be a byte string") result = _lib.SSL_write(self._ssl, buf, len(buf)) self._raise_ssl_error(self._ssl, result) return result write = send def sendall(self, buf, flags=0): """ Send "all" data on the connection. This calls send() repeatedly until all data is sent. If an error occurs, it's impossible to tell how much data has been sent. :param buf: The string to send :param flags: (optional) Included for compatibility with the socket API, the value is ignored :return: The number of bytes written """ if isinstance(buf, _memoryview): buf = buf.tobytes() if not isinstance(buf, bytes): raise TypeError("buf must be a byte string") left_to_send = len(buf) total_sent = 0 data = _ffi.new("char[]", buf) while left_to_send: result = _lib.SSL_write(self._ssl, data + total_sent, left_to_send) self._raise_ssl_error(self._ssl, result) total_sent += result left_to_send -= result def recv(self, bufsiz, flags=None): """ Receive data on the connection. NOTE: If you get one of the WantRead, WantWrite or WantX509Lookup exceptions on this, you have to call the method again with the SAME buffer. :param bufsiz: The maximum number of bytes to read :param flags: (optional) Included for compatibility with the socket API, the value is ignored :return: The string read from the Connection """ buf = _ffi.new("char[]", bufsiz) result = _lib.SSL_read(self._ssl, buf, bufsiz) self._raise_ssl_error(self._ssl, result) return _ffi.buffer(buf, result)[:] read = recv def _handle_bio_errors(self, bio, result): if _lib.BIO_should_retry(bio): if _lib.BIO_should_read(bio): raise WantReadError() elif _lib.BIO_should_write(bio): # TODO: This is untested. raise WantWriteError() elif _lib.BIO_should_io_special(bio): # TODO: This is untested. I think io_special means the socket # BIO has a not-yet connected socket. raise ValueError("BIO_should_io_special") else: # TODO: This is untested. raise ValueError("unknown bio failure") else: # TODO: This is untested. _raise_current_error() def bio_read(self, bufsiz): """ When using non-socket connections this function reads the "dirty" data that would have traveled away on the network. :param bufsiz: The maximum number of bytes to read :return: The string read. """ if self._from_ssl is None: raise TypeError("Connection sock was not None") if not isinstance(bufsiz, integer_types): raise TypeError("bufsiz must be an integer") buf = _ffi.new("char[]", bufsiz) result = _lib.BIO_read(self._from_ssl, buf, bufsiz) if result <= 0: self._handle_bio_errors(self._from_ssl, result) return _ffi.buffer(buf, result)[:] def bio_write(self, buf): """ When using non-socket connections this function sends "dirty" data that would have traveled in on the network. :param buf: The string to put into the memory BIO. :return: The number of bytes written """ if self._into_ssl is None: raise TypeError("Connection sock was not None") if not isinstance(buf, bytes): raise TypeError("buf must be a byte string") result = _lib.BIO_write(self._into_ssl, buf, len(buf)) if result <= 0: self._handle_bio_errors(self._into_ssl, result) return result def renegotiate(self): """ Renegotiate the session :return: True if the renegotiation can be started, false otherwise """ def do_handshake(self): """ Perform an SSL handshake (usually called after renegotiate() or one of set_*_state()). This can raise the same exceptions as send and recv. :return: None. """ result = _lib.SSL_do_handshake(self._ssl) self._raise_ssl_error(self._ssl, result) def renegotiate_pending(self): """ Check if there's a renegotiation in progress, it will return false once a renegotiation is finished. :return: Whether there's a renegotiation in progress """ def total_renegotiations(self): """ Find out the total number of renegotiations. :return: The number of renegotiations. """ return _lib.SSL_total_renegotiations(self._ssl) def connect(self, addr): """ Connect to remote host and set up client-side SSL :param addr: A remote address :return: What the socket's connect method returns """ _lib.SSL_set_connect_state(self._ssl) return self._socket.connect(addr) def connect_ex(self, addr): """ Connect to remote host and set up client-side SSL. Note that if the socket's connect_ex method doesn't return 0, SSL won't be initialized. :param addr: A remove address :return: What the socket's connect_ex method returns """ connect_ex = self._socket.connect_ex self.set_connect_state() return connect_ex(addr) def accept(self): """ Accept incoming connection and set up SSL on it :return: A (conn,addr) pair where conn is a Connection and addr is an address """ client, addr = self._socket.accept() conn = Connection(self._context, client) conn.set_accept_state() return (conn, addr) def bio_shutdown(self): """ When using non-socket connections this function signals end of data on the input for this connection. :return: None """ if self._from_ssl is None: raise TypeError("Connection sock was not None") _lib.BIO_set_mem_eof_return(self._into_ssl, 0) def shutdown(self): """ Send closure alert :return: True if the shutdown completed successfully (i.e. both sides have sent closure alerts), false otherwise (i.e. you have to wait for a ZeroReturnError on a recv() method call """ result = _lib.SSL_shutdown(self._ssl) if result < 0: # TODO: This is untested. _raise_current_error() elif result > 0: return True else: return False def get_cipher_list(self): """ Get the session cipher list :return: A list of cipher strings """ ciphers = [] for i in count(): result = _lib.SSL_get_cipher_list(self._ssl, i) if result == _ffi.NULL: break ciphers.append(_native(_ffi.string(result))) return ciphers def get_client_ca_list(self): """ Get CAs whose certificates are suggested for client authentication. :return: If this is a server connection, a list of X509Names representing the acceptable CAs as set by :py:meth:`OpenSSL.SSL.Context.set_client_ca_list` or :py:meth:`OpenSSL.SSL.Context.add_client_ca`. If this is a client connection, the list of such X509Names sent by the server, or an empty list if that has not yet happened. """ ca_names = _lib.SSL_get_client_CA_list(self._ssl) if ca_names == _ffi.NULL: # TODO: This is untested. return [] result = [] for i in range(_lib.sk_X509_NAME_num(ca_names)): name = _lib.sk_X509_NAME_value(ca_names, i) copy = _lib.X509_NAME_dup(name) if copy == _ffi.NULL: # TODO: This is untested. _raise_current_error() pyname = X509Name.__new__(X509Name) pyname._name = _ffi.gc(copy, _lib.X509_NAME_free) result.append(pyname) return result def makefile(self): """ The makefile() method is not implemented, since there is no dup semantics for SSL connections :raise NotImplementedError """ raise NotImplementedError("Cannot make file object of OpenSSL.SSL.Connection") def get_app_data(self): """ Get application data :return: The application data """ return self._app_data def set_app_data(self, data): """ Set application data :param data - The application data :return: None """ self._app_data = data def get_shutdown(self): """ Get shutdown state :return: The shutdown state, a bitvector of SENT_SHUTDOWN, RECEIVED_SHUTDOWN. """ return _lib.SSL_get_shutdown(self._ssl) def set_shutdown(self, state): """ Set shutdown state :param state - bitvector of SENT_SHUTDOWN, RECEIVED_SHUTDOWN. :return: None """ if not isinstance(state, integer_types): raise TypeError("state must be an integer") _lib.SSL_set_shutdown(self._ssl, state) def state_string(self): """ Get a verbose state description :return: A string representing the state """ def server_random(self): """ Get a copy of the server hello nonce. :return: A string representing the state """ if self._ssl.session == _ffi.NULL: return None return _ffi.buffer( self._ssl.s3.server_random, _lib.SSL3_RANDOM_SIZE)[:] def client_random(self): """ Get a copy of the client hello nonce. :return: A string representing the state """ if self._ssl.session == _ffi.NULL: return None return _ffi.buffer( self._ssl.s3.client_random, _lib.SSL3_RANDOM_SIZE)[:] def master_key(self): """ Get a copy of the master key. :return: A string representing the state """ if self._ssl.session == _ffi.NULL: return None return _ffi.buffer( self._ssl.session.master_key, self._ssl.session.master_key_length)[:] def sock_shutdown(self, *args, **kwargs): """ See shutdown(2) :return: What the socket's shutdown() method returns """ return self._socket.shutdown(*args, **kwargs) def get_peer_certificate(self): """ Retrieve the other side's certificate (if any) :return: The peer's certificate """ cert = _lib.SSL_get_peer_certificate(self._ssl) if cert != _ffi.NULL: pycert = X509.__new__(X509) pycert._x509 = _ffi.gc(cert, _lib.X509_free) return pycert return None def get_peer_cert_chain(self): """ Retrieve the other side's certificate (if any) :return: A list of X509 instances giving the peer's certificate chain, or None if it does not have one. """ cert_stack = _lib.SSL_get_peer_cert_chain(self._ssl) if cert_stack == _ffi.NULL: return None result = [] for i in range(_lib.sk_X509_num(cert_stack)): # TODO could incref instead of dup here cert = _lib.X509_dup(_lib.sk_X509_value(cert_stack, i)) pycert = X509.__new__(X509) pycert._x509 = _ffi.gc(cert, _lib.X509_free) result.append(pycert) return result def want_read(self): """ Checks if more data has to be read from the transport layer to complete an operation. :return: True iff more data has to be read """ return _lib.SSL_want_read(self._ssl) def want_write(self): """ Checks if there is data to write to the transport layer to complete an operation. :return: True iff there is data to write """ return _lib.SSL_want_write(self._ssl) def set_accept_state(self): """ Set the connection to work in server mode. The handshake will be handled automatically by read/write. :return: None """ _lib.SSL_set_accept_state(self._ssl) def set_connect_state(self): """ Set the connection to work in client mode. The handshake will be handled automatically by read/write. :return: None """ _lib.SSL_set_connect_state(self._ssl) def get_session(self): """ Returns the Session currently used. @return: An instance of :py:class:`OpenSSL.SSL.Session` or :py:obj:`None` if no session exists. """ session = _lib.SSL_get1_session(self._ssl) if session == _ffi.NULL: return None pysession = Session.__new__(Session) pysession._session = _ffi.gc(session, _lib.SSL_SESSION_free) return pysession def set_session(self, session): """ Set the session to be used when the TLS/SSL connection is established. :param session: A Session instance representing the session to use. :returns: None """ if not isinstance(session, Session): raise TypeError("session must be a Session instance") result = _lib.SSL_set_session(self._ssl, session._session) if not result: _raise_current_error() ConnectionType = Connection # This is similar to the initialization calls at the end of OpenSSL/crypto.py # but is exercised mostly by the Context initializer. _lib.SSL_library_init()
{ "content_hash": "c9593f75786d7bd1428302f0cf94f948", "timestamp": "", "source": "github", "line_count": 1423, "max_line_length": 93, "avg_line_length": 30.403373155305694, "alnum_prop": 0.5890347633136095, "repo_name": "deandunbar/html2bwml", "id": "a257f160f0911cc45110e44884e9b7995f4fa64c", "size": "43264", "binary": false, "copies": "5", "ref": "refs/heads/master", "path": "venv/lib/python2.7/site-packages/OpenSSL/SSL.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "1446631" }, { "name": "C++", "bytes": "252" }, { "name": "CSS", "bytes": "41725" }, { "name": "Groff", "bytes": "28" }, { "name": "HTML", "bytes": "180158" }, { "name": "JavaScript", "bytes": "101826" }, { "name": "Python", "bytes": "9800739" }, { "name": "Shell", "bytes": "3931" } ], "symlink_target": "" }
""" Copyright (c) 2014, Facebook, Inc. All rights reserved. This source code is licensed under the BSD-style license found in the LICENSE file in the root directory of this source tree. An additional grant of patent rights can be found in the PATENTS file in the same directory. """ from setuptools import setup setup( name='fbpca', version='1.0', author='Facebook Inc', author_email='opensource@fb.com', maintainer='tulloch@fb.com', maintainer_email='tulloch@fb.com', url='https://www.facebook.com', description='Fast computations of PCA/SVD/eigendecompositions via randomized methods', py_modules=['fbpca'], license='BSD License', platforms='Any', long_description=open('README.rst').read(), classifiers=[ 'Intended Audience :: Science/Research', 'Intended Audience :: Developers', 'License :: OSI Approved', 'Programming Language :: C', 'Programming Language :: Python', 'Topic :: Software Development', 'Topic :: Scientific/Engineering', 'Operating System :: Microsoft :: Windows', 'Operating System :: POSIX', 'Operating System :: Unix', 'Operating System :: MacOS', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', ] )
{ "content_hash": "cf6e888739d68eed837b89b4eb164114", "timestamp": "", "source": "github", "line_count": 41, "max_line_length": 90, "avg_line_length": 34.48780487804878, "alnum_prop": 0.6414427157001414, "repo_name": "linearregression/fbpca", "id": "f10573b606034e708fcce0bfffa6492d62a9bee4", "size": "1414", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "setup.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Makefile", "bytes": "239" }, { "name": "Python", "bytes": "65030" } ], "symlink_target": "" }
import time, uuid import threading import traceback import sys import os import socket import collections import dbus.service from dbus.mainloop.glib import DBusGMainLoop try: from gi.repository import GObject except ImportError: import gobject as GObject scriptDir = os.path.dirname(os.path.realpath(__file__)) sys.path.append( scriptDir + "/../common" ) import core, controlpanel as cp DBusGMainLoop(set_as_default=True) # import json DBUS_BRIDGE_NAME = 'com.devicehive.alljoyn.bridge' DBUS_BRIDGE_PATH = '/com/devicehive/alljoyn/bridge' DBUS_BUS_NAME = 'com.devicehive.alljoyn.SmartHome' DBUS_BUS_PATH = '/com/devicehive/alljoyn/SmartHome' SMART_PLUG_SVC = 'org.allseen.SmartHome.SmartPlug' bus = dbus.SystemBus() bus_name = dbus.service.BusName(DBUS_BUS_NAME, bus) class SmartPlug(): def __init__(self, busname, name): self._id = 'c50ded5d5dfc4de28eb296e921d9a6e2' #uuid.uuid4().hex self._name = name about_props = { 'AppId': dbus.ByteArray(bytes.fromhex(self.id)), 'DefaultLanguage': 'en', 'DeviceName': self.name, 'DeviceId': self.id, 'AppName': 'Controlee', 'Manufacturer': 'DeviceHive', 'DateOfManufacture': '2015-07-09', 'ModelNumber': 'smart Plug', 'SupportedLanguages': ['en'], 'Description': 'DeviceHive Alljoyn Bridge Device', 'SoftwareVersion': '1.0', 'HardwareVersion': '1.0', 'SupportUrl': 'devicehive.com', 'AJSoftwareVersion': '14.06.00a Tag "v14.06.00a"' } self._container = core.BusContainer(busname, DBUS_BUS_PATH + '/' + self.id) controlpanel = cp.ControlPanelService(self._container, self.name) rootcontainer = cp.ContainerService(self._container, controlpanel.relative("en")) rootcontainer.SetOptParam(cp.CONTAINER_METADATA_LAYOUT_HINTS, [cp.CONTAINER_LAYOUT_VERTICAL, cp.CONTAINER_LAYOUT_HORIZONTAL]) rootcontainer.SetOptParam(cp.WIDGET_METADATA_BGCOLOR, dbus.UInt32(2003199)) # rootcontainer.SetOptParam(cp.WIDGET_METADATA_LABEL, statepropertywidget = cp.PropertyService(self._container, rootcontainer.relative("1State")) statepropertywidget.SetOptParam(cp.WIDGET_METADATA_LABEL, 'State') statepropertywidget.SetOptParam(cp.WIDGET_METADATA_BGCOLOR, dbus.UInt32(1280)) statepropertywidget.SetOptParam(cp.PROPERTY_METADATA_HINTS, [cp.PROPERTY_WIDGET_HINT_TEXTLABEL]) statepropertywidget.SetValue(dbus.String("Switch Off", variant_level=2)) controlscontainer = cp.ContainerService(self._container, rootcontainer.relative("2ControlsContainer")) controlscontainer.SetOptParam(cp.CONTAINER_METADATA_LAYOUT_HINTS, [cp.CONTAINER_LAYOUT_HORIZONTAL]) controlscontainer.SetOptParam(cp.WIDGET_METADATA_BGCOLOR, dbus.UInt32(512)) onactionwidget = cp.ActionService(self._container, controlscontainer.relative("1On")) onactionwidget.SetOptParam(cp.WIDGET_METADATA_BGCOLOR, dbus.UInt32(1024)) onactionwidget.SetOptParam(cp.WIDGET_METADATA_LABEL, "On") offactionwidget = cp.ActionService(self._container, controlscontainer.relative("2Off")) offactionwidget.SetOptParam(cp.WIDGET_METADATA_BGCOLOR, dbus.UInt32(1024)) offactionwidget.SetOptParam(cp.WIDGET_METADATA_LABEL, "Off") offactionwidget.SetStates(cp.WIDGET_STATE_DISABLED) def lightsOn(): onactionwidget.SetStates(cp.WIDGET_STATE_DISABLED) offactionwidget.SetStates(cp.WIDGET_STATE_ENABLED) statepropertywidget.SetValue(dbus.String("Switch On", variant_level=1)) def lightsOff(): onactionwidget.SetStates(cp.WIDGET_STATE_ENABLED) offactionwidget.SetStates(cp.WIDGET_STATE_DISABLED) statepropertywidget.SetValue(dbus.String("Switch Off", variant_level=1)) onactionwidget.SetHandler(lightsOn) offactionwidget.SetHandler(lightsOff) measurecontainer = cp.ContainerService(self._container, rootcontainer.relative("3MeasureContainer")) measurecontainer.SetOptParam(cp.CONTAINER_METADATA_LAYOUT_HINTS, [cp.CONTAINER_LAYOUT_VERTICAL]) measurecontainer.SetOptParam(cp.WIDGET_METADATA_BGCOLOR, dbus.UInt32(512)) measurecontainer.SetOptParam(cp.WIDGET_METADATA_LABEL, "Measure Properties") voltagepropertywidget = cp.PropertyService(self._container, measurecontainer.relative("1VoltageProperty")) voltagepropertywidget.SetOptParam(cp.WIDGET_METADATA_LABEL, 'Volt(V):') voltagepropertywidget.SetOptParam(cp.WIDGET_METADATA_BGCOLOR, dbus.UInt32(1280)) voltagepropertywidget.SetOptParam(cp.PROPERTY_METADATA_HINTS, [cp.PROPERTY_WIDGET_HINT_TEXTLABEL]) voltagepropertywidget.SetValue(dbus.String("118.9194", variant_level=2)) currentpropertywidget = cp.PropertyService(self._container, measurecontainer.relative("2CurrentProperty")) currentpropertywidget.SetOptParam(cp.WIDGET_METADATA_LABEL, 'Curr(A):') currentpropertywidget.SetOptParam(cp.WIDGET_METADATA_BGCOLOR, dbus.UInt32(1280)) currentpropertywidget.SetOptParam(cp.PROPERTY_METADATA_HINTS, [cp.PROPERTY_WIDGET_HINT_TEXTLABEL]) currentpropertywidget.SetValue(dbus.String("0.0000", variant_level=2)) requencypropertywidget = cp.PropertyService(self._container, measurecontainer.relative("3FrequencyProperty")) requencypropertywidget.SetOptParam(cp.WIDGET_METADATA_LABEL, 'Freq(Hz):') requencypropertywidget.SetOptParam(cp.WIDGET_METADATA_BGCOLOR, dbus.UInt32(1280)) requencypropertywidget.SetOptParam(cp.PROPERTY_METADATA_HINTS, [cp.PROPERTY_WIDGET_HINT_TEXTLABEL]) requencypropertywidget.SetValue(dbus.String("60.0", variant_level=2)) powerpropertywidget = cp.PropertyService(self._container, measurecontainer.relative("4PowerProperty")) powerpropertywidget.SetOptParam(cp.WIDGET_METADATA_LABEL, 'Watt(W):') powerpropertywidget.SetOptParam(cp.WIDGET_METADATA_BGCOLOR, dbus.UInt32(1280)) powerpropertywidget.SetOptParam(cp.PROPERTY_METADATA_HINTS, [cp.PROPERTY_WIDGET_HINT_TEXTLABEL]) powerpropertywidget.SetValue(dbus.String("0.0000", variant_level=2)) accumengpropertywidget = cp.PropertyService(self._container, measurecontainer.relative("5AccumulateEnergy")) accumengpropertywidget.SetOptParam(cp.WIDGET_METADATA_LABEL, 'ACCU(KWH):') accumengpropertywidget.SetOptParam(cp.WIDGET_METADATA_BGCOLOR, dbus.UInt32(1280)) accumengpropertywidget.SetOptParam(cp.PROPERTY_METADATA_HINTS, [cp.PROPERTY_WIDGET_HINT_TEXTLABEL]) accumengpropertywidget.SetValue(dbus.String("0.0000", variant_level=2)) pwrfactorpropertywidget = cp.PropertyService(self._container, measurecontainer.relative("6PowerFactorProperty")) pwrfactorpropertywidget.SetOptParam(cp.WIDGET_METADATA_LABEL, 'PF(%):') pwrfactorpropertywidget.SetOptParam(cp.WIDGET_METADATA_BGCOLOR, dbus.UInt32(1280)) pwrfactorpropertywidget.SetOptParam(cp.PROPERTY_METADATA_HINTS, [cp.PROPERTY_WIDGET_HINT_TEXTLABEL]) pwrfactorpropertywidget.SetValue(dbus.String("000", variant_level=2)) getpropsactionwidget = cp.ActionService(self._container, measurecontainer.relative("7GetProperties")) getpropsactionwidget.SetOptParam(cp.WIDGET_METADATA_BGCOLOR, dbus.UInt32(1024)) getpropsactionwidget.SetOptParam(cp.WIDGET_METADATA_LABEL, "Get Properties") self._services = [ core.AboutService(self._container, about_props) ,core.ConfigService(self._container, self.name) , controlpanel , rootcontainer , controlscontainer, offactionwidget, onactionwidget , measurecontainer, voltagepropertywidget, currentpropertywidget, requencypropertywidget, powerpropertywidget, accumengpropertywidget, pwrfactorpropertywidget, getpropsactionwidget , statepropertywidget ] print("Registered %s on dbus" % self.name) @property def id(self): return self._id @property def name(self): return self._name def publish(self, bridge): service = self._services[0] bridge.AddService(self._container.bus.get_name(), self._container.relative('').rstrip('/'), SMART_PLUG_SVC, # ignore_reply=True reply_handler=lambda id: print("ID: %s" % id), error_handler=lambda err: print("Error: %s" % err) ) print("Published %s on bridge" % self.name) def worker(): try: bridge = dbus.Interface(bus.get_object(DBUS_BRIDGE_NAME, DBUS_BRIDGE_PATH), dbus_interface='com.devicehive.alljoyn.bridge') time.sleep(2) plug = SmartPlug(bus_name, 'ACPlug') plug.publish(bridge) return except Exception as err: print(err) traceback.print_exc() os._exit(1) def main(): # init d-bus GObject.threads_init() dbus.mainloop.glib.threads_init() # lamps = [LampService(mac) for mac in argv] # start mainloop loop = GObject.MainLoop() worker_thread = threading.Thread(target=worker,) worker_thread.start() try: loop.run() except (KeyboardInterrupt, SystemExit): # for lamp in lamps: # lamp.deinit() loop.quit() worker_thread.join() if __name__ == "__main__": main()
{ "content_hash": "c4f57f4462f403a1cabb3d638443a0f2", "timestamp": "", "source": "github", "line_count": 220, "max_line_length": 131, "avg_line_length": 40.986363636363635, "alnum_prop": 0.7362759232560718, "repo_name": "devicehive/IoT-framework", "id": "abbaf2f70133bd31b521f02f7b83260439e3a288", "size": "9037", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "examples/alljoyn/controlpanel/smartplug.py", "mode": "33261", "license": "mit", "language": [ { "name": "C", "bytes": "26477" }, { "name": "C++", "bytes": "2143" }, { "name": "Go", "bytes": "91380" }, { "name": "JavaScript", "bytes": "8912" }, { "name": "Python", "bytes": "17513" }, { "name": "Shell", "bytes": "20104" } ], "symlink_target": "" }
import unittest from mock import patch, mock_open, mock, Mock import requests from DataGeneration.MapboxAPIWrapper import MapboxAPIWrapper from DataGeneration.MapLocation import MapLocation from DataGeneration.MapboxAPIWrapper import MapboxAPIError from sys import version_info if version_info.major == 2: import __builtin__ as builtins else: import builtins class CustomHTTPException(Exception): pass class CustomConnException(Exception): pass class TestMapboxAPIWrapper(unittest.TestCase): def setUp(self): self.wrapper = MapboxAPIWrapper() self.expected_dict = { u'origin': { u'geometry': { u'type': u'Point', u'coordinates': [ 50.032, 40.54453 ] }, u'type': u'Feature', u'properties': { u'name': u'McAllister Street' } }, u'routes': [ { u'duration': 61045, u'distance': 221074, u'steps': [], u'summary': u'' } ], u'destination': { u'geometry': { u'type': u'Point', u'coordinates': [ 51.0345, 41.2314 ] }, u'type': u'Feature', u'properties': { u'name': u'Logan Circle Northwest' } }, u'waypoints': []} def test_mapbox_api_wrapper_class_exists(self): self.assertIsInstance(self.wrapper, MapboxAPIWrapper) # constructor tests def test_map_api_wrapper_instantiates_empty_key(self): wrapper = MapboxAPIWrapper() self.assertEqual(wrapper.key, "") # load_api_key_from_file tests @patch('MapboxAPIWrapper.os.path') def test_mapbox_load_api_key_checks_for_file_existance(self, mock_os_path): mock_os_path.exists.return_value = True with patch.object(builtins, 'open', mock_open(read_data='api_key')): self.wrapper.load_api_key_from_file() mock_os_path.exists.assert_called_once_with('api_key.txt') @patch('MapboxAPIWrapper.os.path') def test_mapbox_load_api_key_from_file_fails_bad_path(self, mock_os_path): mock_os_path.exists.return_value = False self.assertRaises(ValueError, self.wrapper.load_api_key_from_file) @patch('MapboxAPIWrapper.os.path') def test_mapbox_load_api_key_from_file_opens_file(self, mock_os_path): mock_os_path.exists.return_value = True with patch.object(builtins, 'open', mock_open(read_data='api_key')): self.wrapper.load_api_key_from_file(filename='abc.txt') self.assertEquals('api_key', self.wrapper.key) @patch('MapboxAPIWrapper.os.path') def test_mapbox_load_api_key_from_file_accepts_filename(self, mock_os_path): mock_os_path.exists.return_value = True with patch.object(builtins, 'open', mock_open(read_data='api_key')): self.wrapper.load_api_key_from_file(filename='abc.txt') mock_os_path.exists.assert_called_once_with('abc.txt') # _construct_request_string tests def test_construct_request_string_returns_string(self): self.wrapper.key = 'api_key' request_string = self.wrapper._construct_request_string(MapLocation(), MapLocation()) self.assertIsInstance(request_string, str) def test_constuct_request_string_errors_if_empty_key(self): with self.assertRaises(UnboundLocalError): self.wrapper._construct_request_string(MapLocation(), MapLocation()) def test_construct_request_string_produces_correct_output(self): origin = MapLocation(latitude=50.032, longitude=40.54453) destination = MapLocation(latitude=51.0345, longitude=41.2314) self.wrapper.key = 'api_key' self.assertEqual('https://api.mapbox.com/v4/directions/mapbox.walking/' '40.54453,50.032;41.2314,51.0345.json?alternatives=' 'false&instructions=text&geometry=false&steps=false&&' 'access_token=api_key', self.wrapper._construct_request_string(origin, destination), 'incorrect request string returned') def test_construct_request_string_can_use_driving_mode(self): origin = MapLocation(latitude=50.032, longitude=40.54453) destination = MapLocation(latitude=51.0345, longitude=41.2314) self.wrapper.key = 'api_key' self.assertEqual('https://api.mapbox.com/v4/directions/mapbox.driving/' '40.54453,50.032;41.2314,51.0345.json?alternatives=' 'false&instructions=text&geometry=false&steps=false&&' 'access_token=api_key', self.wrapper._construct_request_string(origin, destination, mode='driving'), 'incorrect request string returned') # make_api_call tests @patch('MapboxAPIWrapper.requests.get') def test_call_api_calls_requests_get(self, mock_get): mock_response = Mock() mock_response.json.return_value = self.expected_dict mock_get.return_value = mock_response url = 'https://api.mapbox.com/v4/directions/mapbox.walking/' \ '50.032,40.54453;51.0345,41.2314.json?alternatives=' \ 'false&instructions=text&geometry=false&steps=false&&' \ 'access_token=api_key' response_dict = self.wrapper._call_api(request_url=url) mock_get.assert_called_once_with(url=url) mock_response.json.assert_called_once_with() self.assertEqual(response_dict, self.expected_dict) @patch('DataGeneration.MapboxAPIWrapper._handle_http_error') @patch('MapboxAPIWrapper.requests.get') def test_call_api_handles_http_error(self, mock_get, mock_http_error_handler): mock_response = Mock() http_error = requests.exceptions.HTTPError() mock_response.raise_for_status.side_effect = http_error mock_get.return_value = mock_response mock_http_error_handler.side_effect = CustomHTTPException() url = 'https://api.mapbox.com/v4/directions/mapbox.walking/' \ '50.032,40.54453;51.0345,41.2314.json?alternatives=' \ 'false&instructions=text&geometry=false&steps=false&&' \ 'access_token=api_key' with self.assertRaises(CustomHTTPException): self.wrapper._call_api(request_url=url) mock_get.assert_called_once_with(url=url) self.assertEqual(1, mock_response.raise_for_status.call_count) self.assertEqual(0, mock_response.json.call_count) mock_http_error_handler.assert_called_once_with(http_error) @mock.patch('DataGeneration.MapboxAPIWrapper._handle_connection_error') @mock.patch('MapboxAPIWrapper.requests.get') def test_call_api_connection_error(self, mock_get, mock_conn_error_handler): # Make the patched `requests.get` raise a connection error conn_error = requests.exceptions.ConnectionError() mock_get.side_effect = conn_error # Make the patched error handler raise a custom exception mock_conn_error_handler.side_effect = CustomConnException() url = 'https://api.mapbox.com/v4/directions/mapbox.walking/' \ '50.032,40.54453;51.0345,41.2314.json?alternatives=' \ 'false&instructions=text&geometry=false&steps=false&&' \ 'access_token=api_key' with self.assertRaises(CustomConnException): self.wrapper._call_api(request_url=url) # Check that the function tried and failed to make 3 calls expected_calls = [mock.call(url=url)] * 3 self.assertEqual(expected_calls, mock_get.call_args_list) # Make sure that the connection error handler is called mock_conn_error_handler.assert_called_once_with(conn_error) @mock.patch('MapboxAPIWrapper.requests.get') def test_get_connection_error_then_success(self, mock_get): # construct a response object for a successful call mock_response = Mock() mock_response.json.return_value = self.expected_dict # Make an instance of ConnectionError for the failure case conn_error = requests.exceptions.ConnectionError() # Give the patched get a list of side effects mock_get.side_effect = [conn_error, conn_error, mock_response] url = 'https://api.mapbox.com/v4/directions/mapbox.walking/' \ '50.032,40.54453;51.0345,41.2314.json?alternatives=' \ 'false&instructions=text&geometry=false&steps=false&&' \ 'access_token=api_key' response_dict = self.wrapper._call_api(request_url=url) # Check that the function made the expected internal calls expected_calls = [mock.call(url=url)] * 3 self.assertEqual(expected_calls, mock_get.call_args_list) self.assertEqual(1, mock_response.json.call_count) # Check the result self.assertEqual(response_dict, self.expected_dict) # get_distance_from_api tests def test_get_distance_from_api_constructs_request_string(self): self.wrapper._construct_request_string = Mock(return_value='request') self.wrapper._call_api = Mock(return_value=[]) self.wrapper._parse_response = Mock() origin = MapLocation(1, 1, 1) destination = MapLocation(2, 2, 2) self.wrapper.get_distance_from_api(origin, destination) self.wrapper._construct_request_string.\ assert_called_once_with(origin, destination, 'walking') def test_get_distance_from_api_passes_in_mode_string(self): self.wrapper._construct_request_string = Mock(return_value='request') self.wrapper._call_api = Mock(return_value=[]) self.wrapper._parse_response = Mock() origin = MapLocation(1, 1, 1) destination = MapLocation(2, 2, 2) self.wrapper.get_distance_from_api(origin, destination, mode='driving') self.wrapper._construct_request_string. \ assert_called_once_with(origin, destination, 'driving') def test_get_distance_from_api_calls_make_api_call(self): self.wrapper._construct_request_string = Mock(return_value='request') self.wrapper._call_api = Mock(return_value=[]) self.wrapper._parse_response = Mock() self.wrapper.get_distance_from_api(MapLocation(), MapLocation()) self.wrapper._call_api.assert_called_once_with('request') def test_get_distance_from_api_parses_response(self): self.wrapper._construct_request_string = Mock() self.wrapper._call_api = Mock(return_value="json") self.wrapper._parse_response = Mock(return_value=[5, 10]) dist = self.wrapper.get_distance_from_api(MapLocation(), MapLocation()) self.wrapper._parse_response.assert_called_once_with("json") self.assertEqual([5, 10], dist) # _parse_response tests def test_parse_response_returns_tuple(self): self.assertIsInstance(self.wrapper._parse_response(self.expected_dict), dict) def test_parse_response_returns_distance_value_in_first_element(self): parsed_response = self.wrapper._parse_response(self.expected_dict) self.assertEqual(221074, parsed_response["distance"]) def test_parse_response_returns_duration_value_in_second_element(self): parsed_response = self.wrapper._parse_response(self.expected_dict) self.assertEqual(61045, parsed_response["time"]) # error tests def test_handle_http_error_raises_MapboxAPIError(self): with self.assertRaises(MapboxAPIError): self.wrapper._handle_http_error(Exception()) def test_handle_connection_error_raises_MapboxAPIError(self): with self.assertRaises(MapboxAPIError): self.wrapper._handle_connection_error(Exception())
{ "content_hash": "09d0c2af7daf2b05105a5b5c6556d991", "timestamp": "", "source": "github", "line_count": 297, "max_line_length": 80, "avg_line_length": 42.18855218855219, "alnum_prop": 0.613487629688747, "repo_name": "skorasaurus/RTAHeatMap", "id": "07f9def9c39aab6458e52598feae4f0f21a9e597", "size": "12530", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "DataGeneration/tests/test_MapboxAPIWrapper.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "75023" } ], "symlink_target": "" }
"""Configuration file for sniffer.""" # pylint: disable=superfluous-parens,bad-continuation import time import subprocess from sniffer.api import select_runnable, file_validator, runnable try: from pync import Notifier except ImportError: notify = None else: notify = Notifier.notify watch_paths = ["parks", "tests"] class Options(object): group = int(time.time()) # unique per run show_coverage = False rerun_args = None targets = [ (('make', 'test-unit', 'DISABLE_COVERAGE=true'), "Unit Tests", True), (('make', 'test-all'), "Integration Tests", False), (('make', 'check'), "Static Analysis", True), ] @select_runnable('run_targets') @file_validator def python_files(filename): return filename.endswith('.py') @select_runnable('run_targets') @file_validator def html_files(filename): return filename.split('.')[-1] in ['html', 'css', 'js'] @runnable def run_targets(*args): """Run targets for Python.""" Options.show_coverage = 'coverage' in args count = 0 for count, (command, title, retry) in enumerate(Options.targets, start=1): success = call(command, title, retry) if not success: message = "✅ " * (count - 1) + "❌" show_notification(message, title) return False message = "✅ " * count title = "All Targets" show_notification(message, title) show_coverage() return True def call(command, title, retry): """Run a command-line program and display the result.""" if Options.rerun_args: command, title, retry = Options.rerun_args Options.rerun_args = None success = call(command, title, retry) if not success: return False print("") print("$ %s" % ' '.join(command)) failure = subprocess.call(command) if failure and retry: Options.rerun_args = command, title, retry return not failure def show_notification(message, title): """Show a user notification.""" if notify and title: notify(message, title=title, group=Options.group) def show_coverage(): """Launch the coverage report.""" if Options.show_coverage: subprocess.call(['make', 'read-coverage']) Options.show_coverage = False
{ "content_hash": "7656c49f9d80486989ab3dea1810e550", "timestamp": "", "source": "github", "line_count": 96, "max_line_length": 78, "avg_line_length": 23.770833333333332, "alnum_prop": 0.6301489921121823, "repo_name": "friendlycode/gr-parks", "id": "773b7a3c023b398df5167563f0a8316f6e98590b", "size": "2288", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "scent.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "8650" }, { "name": "HTML", "bytes": "3742" }, { "name": "JavaScript", "bytes": "13603" }, { "name": "Makefile", "bytes": "7251" }, { "name": "Python", "bytes": "93153" }, { "name": "Shell", "bytes": "209" } ], "symlink_target": "" }
import json import os.path import numpy as np p = os.path.dirname(os.path.realpath(__file__)) def sse_indices(): with open(os.path.join(p, '../data/sse_50.json')) as f: sse_indices = json.load(f) return np.asarray(sse_indices) def get_merged(index, *fields): with open(os.path.join(p, '../data/merged/mobile_website/{}.json'.format(index))) as f: data = json.load(f) return np.asarray([[x[j] for j in fields] for x in data]) def get_merged_old(index, *fields): with open(os.path.join(p, '../data/merged/desktop_website/{}.json'.format(index))) as f: data = json.load(f) return np.asarray([[x[j] for j in fields] for x in data])
{ "content_hash": "99d65f692b26bac7813584457bae2b27", "timestamp": "", "source": "github", "line_count": 22, "max_line_length": 70, "avg_line_length": 31.636363636363637, "alnum_prop": 0.625, "repo_name": "hotpxl/nebuchadnezzar", "id": "1cd069e42bc2db6506fb90ab90c6364878de4d83", "size": "696", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "stats/data.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "187590" }, { "name": "CoffeeScript", "bytes": "22676" }, { "name": "HTML", "bytes": "49100" }, { "name": "JavaScript", "bytes": "246677" }, { "name": "Makefile", "bytes": "215" }, { "name": "Python", "bytes": "198517" }, { "name": "Shell", "bytes": "1084" }, { "name": "TeX", "bytes": "100388" } ], "symlink_target": "" }
import os import time import unittest from time import sleep from selenium import webdriver from device_finder import DeviceFinder def log(msg): print (time.strftime("%H:%M:%S") + ": " + msg) class TestdroidAndroid(unittest.TestCase): def screenshot(self, name): self.screenShotCount = 1 screenshotName = str(self.screenShotCount) + "_" + name + ".png" log ("Taking screenshot: " + screenshotName) sleep(1) # wait for animations to complete before taking screenshot self.driver.save_screenshot(self.screenshotDir + "/" + screenshotName) self.screenShotCount += 1 def setUp(self): ## ## IMPORTANT: Set the following parameters. ## You can set the parameters outside the script with environment variables. ## If env var is not set the string after or is used. ## self.screenshotDir = os.environ.get('TESTDROID_SCREENSHOTS') or "/absolute/path/to/desired/directory" testdroid_url = os.environ.get('TESTDROID_URL') or "https://cloud.testdroid.com" testdroid_apiKey = os.environ.get('TESTDROID_APIKEY') or "" # testdroid_app = os.environ.get('TESTDROID_APP') or "" appium_url = os.environ.get('TESTDROID_APPIUM_URL') or 'http://appium.testdroid.com/wd/hub' # Options to select device # 1) Set environment variable TESTDROID_DEVICE # 2) Set device name to this python script # 3) Do not set #1 and #2 and let DeviceFinder to find free device for you deviceFinder = None testdroid_device = os.environ.get('TESTDROID_DEVICE') or "Samsung Galaxy Tab 3 10.1 GT-P5210 4.4.2" deviceFinder = DeviceFinder(url=testdroid_url) if testdroid_device == "": # Loop will not exit until free device is found while testdroid_device == "": testdroid_device = deviceFinder.available_free_android_device() desired_capabilities_cloud = {} desired_capabilities_cloud['testdroid_apiKey'] = testdroid_apiKey desired_capabilities_cloud['testdroid_target'] = 'chrome' desired_capabilities_cloud['testdroid_project'] = 'Appium Chrome Demo' desired_capabilities_cloud['testdroid_testrun'] = 'TestRun A' desired_capabilities_cloud['testdroid_device'] = testdroid_device # desired_capabilities_cloud['testdroid_app'] = testdroid_app desired_capabilities_cloud['platformName'] = 'Android' desired_capabilities_cloud['deviceName'] = 'AndroidDevice' desired_capabilities_cloud['browserName'] = 'chrome' desired_caps = desired_capabilities_cloud; log ("Will save screenshots at: " + self.screenshotDir) # set up webdriver log ("WebDriver request initiated. Waiting for response, this typically takes 2-3 mins") self.driver = webdriver.Remote(appium_url, desired_caps) log ("Loading page http://testdroid.com") self.driver.get("http://testdroid.com") def tearDown(self): log ("Quitting") self.driver.quit() def testSample(self): log ("Taking screenshot of home page: '1_home.png'") self.driver.save_screenshot(self.screenshotDir + "/1_home.png") log ("Finding 'Products'") elem = self.driver.find_element_by_xpath('//*[@id="menu"]/ul/li[1]/a') log ("Clicking 'Products'") elem.click() log ("Taking screenshot of 'Products' page: '2_products.png'") self.driver.save_screenshot(self.screenshotDir + "/2_products.png") log ("Finding 'Learn More'") elem = self.driver.find_element_by_xpath('//*[@id="products"]/div[1]/div/div[1]/div[3]/a') log ("Clicking 'Learn More'") elem.click() log ("Taking screenshot of 'Learn More' page: '3_learnmore.png'") self.driver.save_screenshot(self.screenshotDir + "/3_learnmore.png") log ("Finding 'Supported Frameworks'") elem = self.driver.find_element_by_xpath('//*[@id="topBox"]/div[1]/div/a[2]') log ("Clicking 'Supported Frameworks'") elem.click() log ("Taking screenshot of 'Supported Framworks' page: '4_supportedframeworks.png'") self.driver.save_screenshot(self.screenshotDir + "/4_supportedframeworks.png") log ("quitting") if __name__ == "__main__": suite = unittest.TestLoader().loadTestsFromTestCase(TestdroidAndroid) unittest.TextTestRunner(verbosity=2).run(suite)
{ "content_hash": "f7853e1946346456852df7251147804e", "timestamp": "", "source": "github", "line_count": 109, "max_line_length": 109, "avg_line_length": 41.26605504587156, "alnum_prop": 0.6438417074255225, "repo_name": "teppomalinen/testdroid-samples", "id": "751042718aa6ac07d473a2548efe5555654eb4b4", "size": "4657", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "appium/sample-scripts/python/testdroid_chrome.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C#", "bytes": "2816" }, { "name": "CSS", "bytes": "6444" }, { "name": "Cucumber", "bytes": "1721" }, { "name": "HTML", "bytes": "2399" }, { "name": "Java", "bytes": "140770" }, { "name": "JavaScript", "bytes": "102904" }, { "name": "Python", "bytes": "37006" }, { "name": "Ruby", "bytes": "29406" }, { "name": "Shell", "bytes": "2267" } ], "symlink_target": "" }
""" Implicit differentiation of ridge regression. ============================================= """ from absl import app import jax import jax.numpy as jnp from jaxopt import implicit_diff from jaxopt import linear_solve from jaxopt import OptaxSolver import optax from sklearn import datasets from sklearn import model_selection from sklearn import preprocessing def ridge_objective(params, l2reg, data): """Ridge objective function.""" X_tr, y_tr = data residuals = jnp.dot(X_tr, params) - y_tr return 0.5 * jnp.mean(residuals ** 2) + 0.5 * l2reg * jnp.sum(params ** 2) @implicit_diff.custom_root(jax.grad(ridge_objective)) def ridge_solver(init_params, l2reg, data): """Solve ridge regression by conjugate gradient.""" X_tr, y_tr = data def matvec(u): return jnp.dot(X_tr.T, jnp.dot(X_tr, u)) return linear_solve.solve_cg(matvec=matvec, b=jnp.dot(X_tr.T, y_tr), ridge=len(y_tr) * l2reg, init=init_params, maxiter=20) # Perhaps confusingly, theta is a parameter of the outer objective, # but l2reg = jnp.exp(theta) is an hyper-parameter of the inner objective. def outer_objective(theta, init_inner, data): """Validation loss.""" X_tr, X_val, y_tr, y_val = data # We use the bijective mapping l2reg = jnp.exp(theta) # both to optimize in log-space and to ensure positivity. l2reg = jnp.exp(theta) w_fit = ridge_solver(init_inner, l2reg, (X_tr, y_tr)) y_pred = jnp.dot(X_val, w_fit) loss_value = jnp.mean((y_pred - y_val) ** 2) # We return w_fit as auxiliary data. # Auxiliary data is stored in the optimizer state (see below). return loss_value, w_fit def main(argv): del argv # Prepare data. X, y = datasets.load_boston(return_X_y=True) X = preprocessing.normalize(X) # data = (X_tr, X_val, y_tr, y_val) data = model_selection.train_test_split(X, y, test_size=0.33, random_state=0) # Initialize solver. solver = OptaxSolver(opt=optax.adam(1e-2), fun=outer_objective, has_aux=True) theta = 1.0 init_w = jnp.zeros(X.shape[1]) state = solver.init_state(theta, init_inner=init_w, data=data) # Run outer loop. for _ in range(50): theta, state = solver.update(params=theta, state=state, init_inner=init_w, data=data) # The auxiliary data returned by the outer loss is stored in the state. init_w = state.aux print(f"[Step {state.iter_num}] Validation loss: {state.value:.3f}.") if __name__ == "__main__": app.run(main)
{ "content_hash": "ea9333a24074341f38fbf67a30a018ee", "timestamp": "", "source": "github", "line_count": 80, "max_line_length": 79, "avg_line_length": 32.175, "alnum_prop": 0.635975135975136, "repo_name": "google/jaxopt", "id": "36ae112f70ce6db64cdd03b7f77a618e365430a1", "size": "3150", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "examples/implicit_diff/ridge_reg_implicit_diff.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "670734" } ], "symlink_target": "" }
import shlex import subprocess def main(): args = shlex.split('git log --pretty=format:"%an <%ae>"') p = subprocess.Popen(args, stdout=subprocess.PIPE) out = p.communicate()[0].split('\n') done = set() unique = [] counts = {} for line in reversed(out): line = line.strip() if not len(line): continue name, email = line.split(' <') name = name.strip() email = '<' + email.strip() if not name in done: done.add(name) unique.append(name) record = counts.setdefault(name, [0, email]) record[0] += 1 if not email in record[1:]: record.append(email) print('List of contributors ordered by date of the first commit,' ' with commit counts:') for line in unique: record = counts[line] print('%6d %s %s' % (record[0], line, ', '.join(record[1:]))) if __name__ == '__main__': main()
{ "content_hash": "7efea9acf9c090b467a7f7d92eea3822", "timestamp": "", "source": "github", "line_count": 38, "max_line_length": 69, "avg_line_length": 25.13157894736842, "alnum_prop": 0.5361256544502618, "repo_name": "RexFuzzle/sfepy", "id": "41a7461ea7ae3e2c3e1a870ecef56441ab8f5b38", "size": "977", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "script/show_authors.py", "mode": "33261", "license": "bsd-3-clause", "language": [ { "name": "C", "bytes": "443017" }, { "name": "C++", "bytes": "2619" }, { "name": "GLSL", "bytes": "6058" }, { "name": "Makefile", "bytes": "184" }, { "name": "Python", "bytes": "2420488" }, { "name": "Shell", "bytes": "71" } ], "symlink_target": "" }
"""Ops for boosted_trees.""" from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_boosted_trees_ops from tensorflow.python.ops import resources # Re-exporting ops used by other modules. # pylint: disable=unused-import from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_aggregate_stats from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_bucketize from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_calculate_best_feature_split as calculate_best_feature_split from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_calculate_best_feature_split_v2 as calculate_best_feature_split_v2 from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_calculate_best_gains_per_feature as calculate_best_gains_per_feature from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_center_bias as center_bias from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_create_quantile_stream_resource as create_quantile_stream_resource from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_example_debug_outputs as example_debug_outputs from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_make_quantile_summaries as make_quantile_summaries from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_make_stats_summary as make_stats_summary from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_predict as predict from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_quantile_stream_resource_add_summaries as quantile_add_summaries from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_quantile_stream_resource_deserialize as quantile_resource_deserialize from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_quantile_stream_resource_flush as quantile_flush from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_quantile_stream_resource_get_bucket_boundaries as get_bucket_boundaries from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_quantile_stream_resource_handle_op as quantile_resource_handle_op from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_sparse_aggregate_stats from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_sparse_calculate_best_feature_split as sparse_calculate_best_feature_split from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_training_predict as training_predict from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_update_ensemble as update_ensemble from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_update_ensemble_v2 as update_ensemble_v2 from tensorflow.python.ops.gen_boosted_trees_ops import is_boosted_trees_quantile_stream_resource_initialized as is_quantile_resource_initialized # pylint: enable=unused-import from tensorflow.python.training import saver class PruningMode: """Class for working with Pruning modes.""" NO_PRUNING, PRE_PRUNING, POST_PRUNING = range(0, 3) _map = {'none': NO_PRUNING, 'pre': PRE_PRUNING, 'post': POST_PRUNING} @classmethod def from_str(cls, mode): if mode in cls._map: return cls._map[mode] else: raise ValueError( 'pruning_mode mode must be one of: {}. Found: {}'.format(', '.join( sorted(cls._map)), mode)) class QuantileAccumulatorSaveable(saver.BaseSaverBuilder.SaveableObject): """SaveableObject implementation for QuantileAccumulator.""" def __init__(self, resource_handle, create_op, num_streams, name): self.resource_handle = resource_handle self._num_streams = num_streams self._create_op = create_op bucket_boundaries = get_bucket_boundaries(self.resource_handle, self._num_streams) slice_spec = '' specs = [] def make_save_spec(tensor, suffix): return saver.BaseSaverBuilder.SaveSpec(tensor, slice_spec, name + suffix) for i in range(self._num_streams): specs += [ make_save_spec(bucket_boundaries[i], '_bucket_boundaries_' + str(i)) ] super(QuantileAccumulatorSaveable, self).__init__(self.resource_handle, specs, name) def restore(self, restored_tensors, unused_tensor_shapes): bucket_boundaries = restored_tensors with ops.control_dependencies([self._create_op]): return quantile_resource_deserialize( self.resource_handle, bucket_boundaries=bucket_boundaries) class QuantileAccumulator(): """SaveableObject implementation for QuantileAccumulator. The bucket boundaries are serialized and deserialized from checkpointing. """ def __init__(self, epsilon, num_streams, num_quantiles, name=None, max_elements=None): self._eps = epsilon self._num_streams = num_streams self._num_quantiles = num_quantiles with ops.name_scope(name, 'QuantileAccumulator') as name: self._name = name self.resource_handle = self._create_resource() self._init_op = self._initialize() is_initialized_op = self.is_initialized() resources.register_resource(self.resource_handle, self._init_op, is_initialized_op) ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, QuantileAccumulatorSaveable( self.resource_handle, self._init_op, self._num_streams, self.resource_handle.name)) def _create_resource(self): return quantile_resource_handle_op( container='', shared_name=self._name, name=self._name) def _initialize(self): return create_quantile_stream_resource(self.resource_handle, self._eps, self._num_streams) @property def initializer(self): if self._init_op is None: self._init_op = self._initialize() return self._init_op def is_initialized(self): return is_quantile_resource_initialized(self.resource_handle) def _serialize_to_tensors(self): raise NotImplementedError('When the need arises, TF2 compatibility can be ' 'added by implementing this method, along with ' '_restore_from_tensors below.') def _restore_from_tensors(self, restored_tensors): raise NotImplementedError('When the need arises, TF2 compatibility can be ' 'added by implementing this method, along with ' '_serialize_to_tensors above.') def add_summaries(self, float_columns, example_weights): summaries = make_quantile_summaries(float_columns, example_weights, self._eps) summary_op = quantile_add_summaries(self.resource_handle, summaries) return summary_op def flush(self): return quantile_flush(self.resource_handle, self._num_quantiles) def get_bucket_boundaries(self): return get_bucket_boundaries(self.resource_handle, self._num_streams) class _TreeEnsembleSavable(saver.BaseSaverBuilder.SaveableObject): """SaveableObject implementation for TreeEnsemble.""" def __init__(self, resource_handle, create_op, name): """Creates a _TreeEnsembleSavable object. Args: resource_handle: handle to the decision tree ensemble variable. create_op: the op to initialize the variable. name: the name to save the tree ensemble variable under. """ stamp_token, serialized = ( gen_boosted_trees_ops.boosted_trees_serialize_ensemble(resource_handle)) # slice_spec is useful for saving a slice from a variable. # It's not meaningful the tree ensemble variable. So we just pass an empty # value. slice_spec = '' specs = [ saver.BaseSaverBuilder.SaveSpec(stamp_token, slice_spec, name + '_stamp'), saver.BaseSaverBuilder.SaveSpec(serialized, slice_spec, name + '_serialized'), ] super(_TreeEnsembleSavable, self).__init__(resource_handle, specs, name) self.resource_handle = resource_handle self._create_op = create_op def restore(self, restored_tensors, unused_restored_shapes): """Restores the associated tree ensemble from 'restored_tensors'. Args: restored_tensors: the tensors that were loaded from a checkpoint. unused_restored_shapes: the shapes this object should conform to after restore. Not meaningful for trees. Returns: The operation that restores the state of the tree ensemble variable. """ with ops.control_dependencies([self._create_op]): return gen_boosted_trees_ops.boosted_trees_deserialize_ensemble( self.resource_handle, stamp_token=restored_tensors[0], tree_ensemble_serialized=restored_tensors[1]) class TreeEnsemble(): """Creates TreeEnsemble resource.""" def __init__(self, name, stamp_token=0, is_local=False, serialized_proto=''): self._stamp_token = stamp_token self._serialized_proto = serialized_proto self._is_local = is_local with ops.name_scope(name, 'TreeEnsemble') as name: self._name = name self.resource_handle = self._create_resource() self._init_op = self._initialize() is_initialized_op = self.is_initialized() # Adds the variable to the savable list. if not is_local: ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, _TreeEnsembleSavable( self.resource_handle, self.initializer, self.resource_handle.name)) resources.register_resource( self.resource_handle, self.initializer, is_initialized_op, is_shared=not is_local) def _create_resource(self): return gen_boosted_trees_ops.boosted_trees_ensemble_resource_handle_op( container='', shared_name=self._name, name=self._name) def _initialize(self): return gen_boosted_trees_ops.boosted_trees_create_ensemble( self.resource_handle, self._stamp_token, tree_ensemble_serialized=self._serialized_proto) @property def initializer(self): if self._init_op is None: self._init_op = self._initialize() return self._init_op def is_initialized(self): return gen_boosted_trees_ops.is_boosted_trees_ensemble_initialized( self.resource_handle) def _serialize_to_tensors(self): raise NotImplementedError('When the need arises, TF2 compatibility can be ' 'added by implementing this method, along with ' '_restore_from_tensors below.') def _restore_from_tensors(self, restored_tensors): raise NotImplementedError('When the need arises, TF2 compatibility can be ' 'added by implementing this method, along with ' '_serialize_to_tensors above.') def get_stamp_token(self): """Returns the current stamp token of the resource.""" stamp_token, _, _, _, _ = ( gen_boosted_trees_ops.boosted_trees_get_ensemble_states( self.resource_handle)) return stamp_token def get_states(self): """Returns states of the tree ensemble. Returns: stamp_token, num_trees, num_finalized_trees, num_attempted_layers and range of the nodes in the latest layer. """ (stamp_token, num_trees, num_finalized_trees, num_attempted_layers, nodes_range) = ( gen_boosted_trees_ops.boosted_trees_get_ensemble_states( self.resource_handle)) # Use identity to give names. return (array_ops.identity(stamp_token, name='stamp_token'), array_ops.identity(num_trees, name='num_trees'), array_ops.identity(num_finalized_trees, name='num_finalized_trees'), array_ops.identity( num_attempted_layers, name='num_attempted_layers'), array_ops.identity(nodes_range, name='last_layer_nodes_range')) def serialize(self): """Serializes the ensemble into proto and returns the serialized proto. Returns: stamp_token: int64 scalar Tensor to denote the stamp of the resource. serialized_proto: string scalar Tensor of the serialized proto. """ return gen_boosted_trees_ops.boosted_trees_serialize_ensemble( self.resource_handle) def deserialize(self, stamp_token, serialized_proto): """Deserialize the input proto and resets the ensemble from it. Args: stamp_token: int64 scalar Tensor to denote the stamp of the resource. serialized_proto: string scalar Tensor of the serialized proto. Returns: Operation (for dependencies). """ return gen_boosted_trees_ops.boosted_trees_deserialize_ensemble( self.resource_handle, stamp_token, serialized_proto)
{ "content_hash": "04f852351cfb7861cc59197c15846684", "timestamp": "", "source": "github", "line_count": 295, "max_line_length": 145, "avg_line_length": 44.23050847457627, "alnum_prop": 0.6861587982832618, "repo_name": "tensorflow/tensorflow-pywrap_saved_model", "id": "74f529a8dec1aaffc96761b7f50009a71c9f4e7a", "size": "13737", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tensorflow/python/ops/boosted_trees_ops.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "36962" }, { "name": "C", "bytes": "1392153" }, { "name": "C#", "bytes": "13584" }, { "name": "C++", "bytes": "125860957" }, { "name": "CMake", "bytes": "182324" }, { "name": "Cython", "bytes": "5003" }, { "name": "Dockerfile", "bytes": "416133" }, { "name": "Go", "bytes": "2123155" }, { "name": "HTML", "bytes": "4686483" }, { "name": "Java", "bytes": "1074438" }, { "name": "Jupyter Notebook", "bytes": "792906" }, { "name": "LLVM", "bytes": "6536" }, { "name": "MLIR", "bytes": "11347297" }, { "name": "Makefile", "bytes": "2760" }, { "name": "Objective-C", "bytes": "172666" }, { "name": "Objective-C++", "bytes": "300208" }, { "name": "Pawn", "bytes": "5552" }, { "name": "Perl", "bytes": "7536" }, { "name": "Python", "bytes": "42738981" }, { "name": "Roff", "bytes": "5034" }, { "name": "Ruby", "bytes": "9214" }, { "name": "Shell", "bytes": "621427" }, { "name": "Smarty", "bytes": "89545" }, { "name": "SourcePawn", "bytes": "14625" }, { "name": "Starlark", "bytes": "7720442" }, { "name": "Swift", "bytes": "78435" }, { "name": "Vim Snippet", "bytes": "58" } ], "symlink_target": "" }
""" A Shell Script """ __version__ = '0.0.0' #============================================================================= def main( argv ): """ Script execution entry point @param argv Arguments passed to the script @return Exit code (0 = success) """ # imports when using this as a script import argparse # create and configure an argument parser parser = argparse.ArgumentParser( description = 'A Shell Script' ) parser.add_argument( '-v', '--version', default = False, help = 'Display script version.', action = 'store_true' ) # parse the arguments args = parser.parse_args( argv[ 1 : ] ) # check for version request if args.version == True: print 'Version', __version__ # return success return 0 #============================================================================= if __name__ == "__main__": import sys sys.exit( main( sys.argv ) )
{ "content_hash": "b6f24077cc19c19b3e3c05474c91e7c4", "timestamp": "", "source": "github", "line_count": 46, "max_line_length": 78, "avg_line_length": 22.152173913043477, "alnum_prop": 0.4661432777232581, "repo_name": "zhester/hzpy", "id": "5a8a7b36f0c35af951b2457491c179bb09d0a5b3", "size": "1043", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "development/script.py", "mode": "33261", "license": "bsd-2-clause", "language": [ { "name": "ApacheConf", "bytes": "37" }, { "name": "CSS", "bytes": "55" }, { "name": "HTML", "bytes": "309" }, { "name": "JavaScript", "bytes": "109" }, { "name": "Python", "bytes": "484663" } ], "symlink_target": "" }
import subprocess import os import threading import time import pytest from ffmpy import FFmpeg, FFRuntimeError, FFExecutableNotFoundError def test_invalid_executable_path(): ff = FFmpeg(executable='/tmp/foo/bar/ffmpeg') with pytest.raises(FFExecutableNotFoundError) as exc_info: ff.run() assert str(exc_info.value) == "Executable '/tmp/foo/bar/ffmpeg' not found" def test_no_redirection(): global_options = '--stdin none --stdout oneline --stderr multiline --exit-code 0' ff = FFmpeg(global_options=global_options) stdout, stderr = ff.run() assert stdout is None assert stderr is None def test_redirect_to_devnull(): global_options = '--stdin none --stdout oneline --stderr multiline --exit-code 0' ff = FFmpeg(global_options=global_options) devnull = open(os.devnull, 'wb') stdout, stderr = ff.run(stdout=devnull, stderr=devnull) assert stdout is None assert stderr is None def test_redirect_to_pipe(): global_options = '--stdin none --stdout oneline --stderr multiline --exit-code 0' ff = FFmpeg(global_options=global_options) stdout, stderr = ff.run(stdout=subprocess.PIPE, stderr=subprocess.PIPE) assert stdout == b'This is printed to stdout' assert stderr == b'These are\nmultiple lines\nprinted to stderr' def test_input(): global_options = '--stdin pipe --stdout oneline --stderr multiline --exit-code 0' ff = FFmpeg(global_options=global_options) stdout, stderr = ff.run( input_data=b'my input data', stdout=subprocess.PIPE, stderr=subprocess.PIPE ) assert stdout == b'my input data\nThis is printed to stdout' assert stderr == b'These are\nmultiple lines\nprinted to stderr' def test_non_zero_exitcode(): global_options = '--stdin none --stdout multiline --stderr multiline --exit-code 42' ff = FFmpeg(global_options=global_options) with pytest.raises(FFRuntimeError) as exc_info: ff.run(stdout=subprocess.PIPE, stderr=subprocess.PIPE) assert exc_info.value.cmd == ( "ffmpeg --stdin none --stdout multiline --stderr multiline --exit-code 42" ) assert exc_info.value.exit_code == 42 assert exc_info.value.stdout == b'These are\nmultiple lines\nprinted to stdout' assert exc_info.value.stderr == b'These are\nmultiple lines\nprinted to stderr' assert str(exc_info.value) == ( "`ffmpeg --stdin none --stdout multiline --stderr multiline --exit-code 42` " 'exited with status 42\n\n' 'STDOUT:\n' 'These are\n' 'multiple lines\n' 'printed to stdout\n\n' 'STDERR:\n' 'These are\n' 'multiple lines\n' 'printed to stderr' ) def test_non_zero_exitcode_no_stderr(): global_options = '--stdin none --stdout multiline --stderr none --exit-code 42' ff = FFmpeg(global_options=global_options) with pytest.raises(FFRuntimeError) as exc_info: ff.run(stdout=subprocess.PIPE, stderr=subprocess.PIPE) assert exc_info.value.cmd == ( "ffmpeg --stdin none --stdout multiline --stderr none --exit-code 42" ) assert exc_info.value.exit_code == 42 assert exc_info.value.stdout == b'These are\nmultiple lines\nprinted to stdout' assert exc_info.value.stderr == b'' assert str(exc_info.value) == ( "`ffmpeg --stdin none --stdout multiline --stderr none --exit-code 42` " 'exited with status 42\n\n' 'STDOUT:\n' 'These are\n' 'multiple lines\n' 'printed to stdout\n\n' 'STDERR:\n' ) def test_non_zero_exitcode_no_stdout(): global_options = '--stdin none --stdout none --stderr multiline --exit-code 42' ff = FFmpeg(global_options=global_options) with pytest.raises(FFRuntimeError) as exc_info: ff.run(stdout=subprocess.PIPE, stderr=subprocess.PIPE) assert exc_info.value.cmd == ( "ffmpeg --stdin none --stdout none --stderr multiline --exit-code 42" ) assert exc_info.value.exit_code == 42 assert exc_info.value.stdout == b'' assert exc_info.value.stderr == b'These are\nmultiple lines\nprinted to stderr' assert str(exc_info.value) == ( "`ffmpeg --stdin none --stdout none --stderr multiline --exit-code 42` " 'exited with status 42\n\n' 'STDOUT:\n' '\n\n' 'STDERR:\n' 'These are\n' 'multiple lines\n' 'printed to stderr' ) def test_non_zero_exitcode_no_stdout_and_stderr(): global_options = '--stdin none --stdout none --stderr none --exit-code 42' ff = FFmpeg(global_options=global_options) with pytest.raises(FFRuntimeError) as exc_info: ff.run(stdout=subprocess.PIPE, stderr=subprocess.PIPE) assert exc_info.value.cmd == ( "ffmpeg --stdin none --stdout none --stderr none --exit-code 42" ) assert exc_info.value.exit_code == 42 assert exc_info.value.stdout == b'' assert exc_info.value.stderr == b'' assert str(exc_info.value) == ( "`ffmpeg --stdin none --stdout none --stderr none --exit-code 42` " 'exited with status 42\n\n' 'STDOUT:\n' '\n\n' 'STDERR:\n' ) def test_raise_exception_with_stdout_stderr_none(): global_options = '--stdin none --stdout none --stderr none --exit-code 42' ff = FFmpeg(global_options=global_options) with pytest.raises(FFRuntimeError) as exc_info: ff.run() assert str(exc_info.value) == ( "`ffmpeg --stdin none --stdout none --stderr none --exit-code 42` " 'exited with status 42\n\n' 'STDOUT:\n' '\n\n' 'STDERR:\n' ) def test_terminate_process(): global_options = '--long-run' ff = FFmpeg(global_options=global_options) thread_1 = threading.Thread(target=ff.run) thread_1.start() while not ff.process: time.sleep(0.05) print(ff.process.returncode) ff.process.terminate() thread_1.join() assert ff.process.returncode == -15
{ "content_hash": "20734ada6ab70f2ea45f31cc82b13a36", "timestamp": "", "source": "github", "line_count": 179, "max_line_length": 88, "avg_line_length": 33.43016759776536, "alnum_prop": 0.6457219251336899, "repo_name": "wchill/ffmpy3", "id": "f155495347a40177b312b6626575c147c791bd11", "size": "5984", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "tests/test_cmd_execution.py", "mode": "33188", "license": "mit", "language": [ { "name": "Go", "bytes": "1271" }, { "name": "Python", "bytes": "21961" } ], "symlink_target": "" }
import horizon from horizon.dashboards.nova import dashboard class Images(horizon.Panel): name = "Images" slug = 'images' dashboard.Nova.register(Images)
{ "content_hash": "a7d18c947a80a5abb5a4e6ce9ad5b67d", "timestamp": "", "source": "github", "line_count": 10, "max_line_length": 45, "avg_line_length": 16.6, "alnum_prop": 0.7409638554216867, "repo_name": "asomya/test", "id": "13302efc6745d56028120bf7001a979744ea17aa", "size": "975", "binary": false, "copies": "9", "ref": "refs/heads/quantum-integration", "path": "horizon/dashboards/nova/images_and_snapshots/images/panel.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "33751" }, { "name": "JavaScript", "bytes": "140887" }, { "name": "Python", "bytes": "839260" }, { "name": "Shell", "bytes": "11581" } ], "symlink_target": "" }
import pika import time connection = pika.BlockingConnection( pika.ConnectionParameters( "localhost")) channel = connection.channel() channel.queue_declare(queue="hello", durable=True) print(" [*] Waiting for messages. To exit press CTRL+C") def callback(ch, method, properties, body): print(" [x] Received %r" % (body,)) time.sleep( body.count(".") ) print(" [x] Done") ch.basic_ack(delivery_tag = method.delivery_tag) channel.basic_qos(prefetch_count=1) channel.basic_consume(callback, queue="hello") channel.start_consuming()
{ "content_hash": "62d63dd20ca30e7811b9cac87aaf91d5", "timestamp": "", "source": "github", "line_count": 23, "max_line_length": 56, "avg_line_length": 25.17391304347826, "alnum_prop": 0.6787564766839378, "repo_name": "tongxindao/shiyanlou", "id": "ddb26cb072d921a903d06f580fd1a0dc9d0db5c1", "size": "601", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "shiyanlou_cs630/chapter_02/worker.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "265212" }, { "name": "C++", "bytes": "686" }, { "name": "CSS", "bytes": "261341" }, { "name": "HTML", "bytes": "945024" }, { "name": "Java", "bytes": "115" }, { "name": "JavaScript", "bytes": "475129" }, { "name": "Makefile", "bytes": "750" }, { "name": "Mako", "bytes": "412" }, { "name": "Python", "bytes": "529824" }, { "name": "Shell", "bytes": "384" } ], "symlink_target": "" }
"""The tests for the automation component.""" import asyncio import logging from unittest.mock import Mock, patch import pytest from homeassistant.components import logbook import homeassistant.components.automation as automation from homeassistant.components.automation import ( ATTR_SOURCE, DOMAIN, EVENT_AUTOMATION_RELOADED, EVENT_AUTOMATION_TRIGGERED, SERVICE_TRIGGER, ) from homeassistant.const import ( ATTR_ENTITY_ID, ATTR_NAME, EVENT_HOMEASSISTANT_STARTED, SERVICE_RELOAD, SERVICE_TOGGLE, SERVICE_TURN_OFF, SERVICE_TURN_ON, STATE_OFF, STATE_ON, ) from homeassistant.core import Context, CoreState, State, callback from homeassistant.exceptions import HomeAssistantError, Unauthorized from homeassistant.setup import async_setup_component import homeassistant.util.dt as dt_util from tests.common import ( assert_setup_component, async_capture_events, async_mock_service, mock_restore_cache, ) from tests.components.logbook.test_init import MockLazyEventPartialState @pytest.fixture def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation") async def test_service_data_not_a_dict(hass, calls): """Test service data not dict.""" with assert_setup_component(0, automation.DOMAIN): assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "trigger": {"platform": "event", "event_type": "test_event"}, "action": {"service": "test.automation", "data": 100}, } }, ) async def test_service_specify_data(hass, calls): """Test service data.""" assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "alias": "hello", "trigger": {"platform": "event", "event_type": "test_event"}, "action": { "service": "test.automation", "data_template": { "some": "{{ trigger.platform }} - " "{{ trigger.event.event_type }}" }, }, } }, ) time = dt_util.utcnow() with patch("homeassistant.helpers.script.utcnow", return_value=time): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 1 assert calls[0].data["some"] == "event - test_event" state = hass.states.get("automation.hello") assert state is not None assert state.attributes.get("last_triggered") == time async def test_service_specify_entity_id(hass, calls): """Test service data.""" assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "trigger": {"platform": "event", "event_type": "test_event"}, "action": {"service": "test.automation", "entity_id": "hello.world"}, } }, ) hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 1 assert ["hello.world"] == calls[0].data.get(ATTR_ENTITY_ID) async def test_service_specify_entity_id_list(hass, calls): """Test service data.""" assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "trigger": {"platform": "event", "event_type": "test_event"}, "action": { "service": "test.automation", "entity_id": ["hello.world", "hello.world2"], }, } }, ) hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 1 assert ["hello.world", "hello.world2"] == calls[0].data.get(ATTR_ENTITY_ID) async def test_two_triggers(hass, calls): """Test triggers.""" assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "trigger": [ {"platform": "event", "event_type": "test_event"}, {"platform": "state", "entity_id": "test.entity"}, ], "action": {"service": "test.automation"}, } }, ) hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 1 hass.states.async_set("test.entity", "hello") await hass.async_block_till_done() assert len(calls) == 2 async def test_trigger_service_ignoring_condition(hass, caplog, calls): """Test triggers.""" assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "alias": "test", "trigger": [{"platform": "event", "event_type": "test_event"}], "condition": { "condition": "numeric_state", "entity_id": "non.existing", "above": "1", }, "action": {"service": "test.automation"}, } }, ) caplog.clear() caplog.set_level(logging.WARNING) hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 0 assert len(caplog.record_tuples) == 1 assert caplog.record_tuples[0][1] == logging.WARNING await hass.services.async_call( "automation", "trigger", {"entity_id": "automation.test"}, blocking=True ) assert len(calls) == 1 await hass.services.async_call( "automation", "trigger", {"entity_id": "automation.test", "skip_condition": True}, blocking=True, ) assert len(calls) == 2 await hass.services.async_call( "automation", "trigger", {"entity_id": "automation.test", "skip_condition": False}, blocking=True, ) assert len(calls) == 2 async def test_two_conditions_with_and(hass, calls): """Test two and conditions.""" entity_id = "test.entity" assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "trigger": [{"platform": "event", "event_type": "test_event"}], "condition": [ {"condition": "state", "entity_id": entity_id, "state": "100"}, { "condition": "numeric_state", "entity_id": entity_id, "below": 150, }, ], "action": {"service": "test.automation"}, } }, ) hass.states.async_set(entity_id, 100) hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 1 hass.states.async_set(entity_id, 101) hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 1 hass.states.async_set(entity_id, 151) hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 1 async def test_shorthand_conditions_template(hass, calls): """Test shorthand nation form in conditions.""" assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "trigger": [{"platform": "event", "event_type": "test_event"}], "condition": "{{ is_state('test.entity', 'hello') }}", "action": {"service": "test.automation"}, } }, ) hass.states.async_set("test.entity", "hello") hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 1 hass.states.async_set("test.entity", "goodbye") hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 1 async def test_automation_list_setting(hass, calls): """Event is not a valid condition.""" assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: [ { "trigger": {"platform": "event", "event_type": "test_event"}, "action": {"service": "test.automation"}, }, { "trigger": {"platform": "event", "event_type": "test_event_2"}, "action": {"service": "test.automation"}, }, ] }, ) hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 1 hass.bus.async_fire("test_event_2") await hass.async_block_till_done() assert len(calls) == 2 async def test_automation_calling_two_actions(hass, calls): """Test if we can call two actions from automation async definition.""" assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "trigger": {"platform": "event", "event_type": "test_event"}, "action": [ {"service": "test.automation", "data": {"position": 0}}, {"service": "test.automation", "data": {"position": 1}}, ], } }, ) hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 2 assert calls[0].data["position"] == 0 assert calls[1].data["position"] == 1 async def test_shared_context(hass, calls): """Test that the shared context is passed down the chain.""" assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: [ { "alias": "hello", "trigger": {"platform": "event", "event_type": "test_event"}, "action": {"event": "test_event2"}, }, { "alias": "bye", "trigger": {"platform": "event", "event_type": "test_event2"}, "action": {"service": "test.automation"}, }, ] }, ) context = Context() first_automation_listener = Mock() event_mock = Mock() hass.bus.async_listen("test_event2", first_automation_listener) hass.bus.async_listen(EVENT_AUTOMATION_TRIGGERED, event_mock) hass.bus.async_fire("test_event", context=context) await hass.async_block_till_done() # Ensure events was fired assert first_automation_listener.call_count == 1 assert event_mock.call_count == 2 # Verify automation triggered evenet for 'hello' automation args, _ = event_mock.call_args_list[0] first_trigger_context = args[0].context assert first_trigger_context.parent_id == context.id # Ensure event data has all attributes set assert args[0].data.get(ATTR_NAME) is not None assert args[0].data.get(ATTR_ENTITY_ID) is not None assert args[0].data.get(ATTR_SOURCE) is not None # Ensure context set correctly for event fired by 'hello' automation args, _ = first_automation_listener.call_args assert args[0].context is first_trigger_context # Ensure the 'hello' automation state has the right context state = hass.states.get("automation.hello") assert state is not None assert state.context is first_trigger_context # Verify automation triggered evenet for 'bye' automation args, _ = event_mock.call_args_list[1] second_trigger_context = args[0].context assert second_trigger_context.parent_id == first_trigger_context.id # Ensure event data has all attributes set assert args[0].data.get(ATTR_NAME) is not None assert args[0].data.get(ATTR_ENTITY_ID) is not None assert args[0].data.get(ATTR_SOURCE) is not None # Ensure the service call from the second automation # shares the same context assert len(calls) == 1 assert calls[0].context is second_trigger_context async def test_services(hass, calls): """Test the automation services for turning entities on/off.""" entity_id = "automation.hello" assert hass.states.get(entity_id) is None assert not automation.is_on(hass, entity_id) assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "alias": "hello", "trigger": {"platform": "event", "event_type": "test_event"}, "action": {"service": "test.automation"}, } }, ) assert hass.states.get(entity_id) is not None assert automation.is_on(hass, entity_id) hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 1 await hass.services.async_call( automation.DOMAIN, SERVICE_TURN_OFF, { ATTR_ENTITY_ID: entity_id, }, blocking=True, ) assert not automation.is_on(hass, entity_id) hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 1 await hass.services.async_call( automation.DOMAIN, SERVICE_TOGGLE, {ATTR_ENTITY_ID: entity_id}, blocking=True ) assert automation.is_on(hass, entity_id) hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 2 await hass.services.async_call( automation.DOMAIN, SERVICE_TOGGLE, {ATTR_ENTITY_ID: entity_id}, blocking=True, ) assert not automation.is_on(hass, entity_id) hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 2 await hass.services.async_call( automation.DOMAIN, SERVICE_TOGGLE, {ATTR_ENTITY_ID: entity_id}, blocking=True ) await hass.services.async_call( automation.DOMAIN, SERVICE_TRIGGER, {ATTR_ENTITY_ID: entity_id}, blocking=True ) assert len(calls) == 3 await hass.services.async_call( automation.DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: entity_id}, blocking=True ) await hass.services.async_call( automation.DOMAIN, SERVICE_TRIGGER, {ATTR_ENTITY_ID: entity_id}, blocking=True ) assert len(calls) == 4 await hass.services.async_call( automation.DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: entity_id}, blocking=True ) assert automation.is_on(hass, entity_id) async def test_reload_config_service(hass, calls, hass_admin_user, hass_read_only_user): """Test the reload config service.""" assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "alias": "hello", "trigger": {"platform": "event", "event_type": "test_event"}, "action": { "service": "test.automation", "data_template": {"event": "{{ trigger.event.event_type }}"}, }, } }, ) assert hass.states.get("automation.hello") is not None assert hass.states.get("automation.bye") is None listeners = hass.bus.async_listeners() assert listeners.get("test_event") == 1 assert listeners.get("test_event2") is None hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 1 assert calls[0].data.get("event") == "test_event" test_reload_event = async_capture_events(hass, EVENT_AUTOMATION_RELOADED) with patch( "homeassistant.config.load_yaml_config_file", autospec=True, return_value={ automation.DOMAIN: { "alias": "bye", "trigger": {"platform": "event", "event_type": "test_event2"}, "action": { "service": "test.automation", "data_template": {"event": "{{ trigger.event.event_type }}"}, }, } }, ): with pytest.raises(Unauthorized): await hass.services.async_call( automation.DOMAIN, SERVICE_RELOAD, context=Context(user_id=hass_read_only_user.id), blocking=True, ) await hass.services.async_call( automation.DOMAIN, SERVICE_RELOAD, context=Context(user_id=hass_admin_user.id), blocking=True, ) # De-flake ?! await hass.async_block_till_done() assert len(test_reload_event) == 1 assert hass.states.get("automation.hello") is None assert hass.states.get("automation.bye") is not None listeners = hass.bus.async_listeners() assert listeners.get("test_event") is None assert listeners.get("test_event2") == 1 hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 1 hass.bus.async_fire("test_event2") await hass.async_block_till_done() assert len(calls) == 2 assert calls[1].data.get("event") == "test_event2" async def test_reload_config_when_invalid_config(hass, calls): """Test the reload config service handling invalid config.""" with assert_setup_component(1, automation.DOMAIN): assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "alias": "hello", "trigger": {"platform": "event", "event_type": "test_event"}, "action": { "service": "test.automation", "data_template": {"event": "{{ trigger.event.event_type }}"}, }, } }, ) assert hass.states.get("automation.hello") is not None hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 1 assert calls[0].data.get("event") == "test_event" with patch( "homeassistant.config.load_yaml_config_file", autospec=True, return_value={automation.DOMAIN: "not valid"}, ): await hass.services.async_call(automation.DOMAIN, SERVICE_RELOAD, blocking=True) assert hass.states.get("automation.hello") is None hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 1 async def test_reload_config_handles_load_fails(hass, calls): """Test the reload config service.""" assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "alias": "hello", "trigger": {"platform": "event", "event_type": "test_event"}, "action": { "service": "test.automation", "data_template": {"event": "{{ trigger.event.event_type }}"}, }, } }, ) assert hass.states.get("automation.hello") is not None hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 1 assert calls[0].data.get("event") == "test_event" with patch( "homeassistant.config.load_yaml_config_file", side_effect=HomeAssistantError("bla"), ): await hass.services.async_call(automation.DOMAIN, SERVICE_RELOAD, blocking=True) assert hass.states.get("automation.hello") is not None hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 2 @pytest.mark.parametrize("service", ["turn_off_stop", "turn_off_no_stop", "reload"]) async def test_automation_stops(hass, calls, service): """Test that turning off / reloading stops any running actions as appropriate.""" entity_id = "automation.hello" test_entity = "test.entity" config = { automation.DOMAIN: { "alias": "hello", "trigger": {"platform": "event", "event_type": "test_event"}, "action": [ {"event": "running"}, {"wait_template": "{{ is_state('test.entity', 'goodbye') }}"}, {"service": "test.automation"}, ], } } assert await async_setup_component(hass, automation.DOMAIN, config) running = asyncio.Event() @callback def running_cb(event): running.set() hass.bus.async_listen_once("running", running_cb) hass.states.async_set(test_entity, "hello") hass.bus.async_fire("test_event") await running.wait() if service == "turn_off_stop": await hass.services.async_call( automation.DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: entity_id}, blocking=True, ) elif service == "turn_off_no_stop": await hass.services.async_call( automation.DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: entity_id, automation.CONF_STOP_ACTIONS: False}, blocking=True, ) else: with patch( "homeassistant.config.load_yaml_config_file", autospec=True, return_value=config, ): await hass.services.async_call( automation.DOMAIN, SERVICE_RELOAD, blocking=True ) hass.states.async_set(test_entity, "goodbye") await hass.async_block_till_done() assert len(calls) == (1 if service == "turn_off_no_stop" else 0) async def test_automation_restore_state(hass): """Ensure states are restored on startup.""" time = dt_util.utcnow() mock_restore_cache( hass, ( State("automation.hello", STATE_ON), State("automation.bye", STATE_OFF, {"last_triggered": time}), ), ) config = { automation.DOMAIN: [ { "alias": "hello", "trigger": {"platform": "event", "event_type": "test_event_hello"}, "action": {"service": "test.automation"}, }, { "alias": "bye", "trigger": {"platform": "event", "event_type": "test_event_bye"}, "action": {"service": "test.automation"}, }, ] } assert await async_setup_component(hass, automation.DOMAIN, config) state = hass.states.get("automation.hello") assert state assert state.state == STATE_ON assert state.attributes["last_triggered"] is None state = hass.states.get("automation.bye") assert state assert state.state == STATE_OFF assert state.attributes["last_triggered"] == time calls = async_mock_service(hass, "test", "automation") assert automation.is_on(hass, "automation.bye") is False hass.bus.async_fire("test_event_bye") await hass.async_block_till_done() assert len(calls) == 0 assert automation.is_on(hass, "automation.hello") hass.bus.async_fire("test_event_hello") await hass.async_block_till_done() assert len(calls) == 1 async def test_initial_value_off(hass): """Test initial value off.""" calls = async_mock_service(hass, "test", "automation") assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "alias": "hello", "initial_state": "off", "trigger": {"platform": "event", "event_type": "test_event"}, "action": {"service": "test.automation", "entity_id": "hello.world"}, } }, ) assert not automation.is_on(hass, "automation.hello") hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 0 async def test_initial_value_on(hass): """Test initial value on.""" hass.state = CoreState.not_running calls = async_mock_service(hass, "test", "automation") assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "alias": "hello", "initial_state": "on", "trigger": {"platform": "event", "event_type": "test_event"}, "action": { "service": "test.automation", "entity_id": ["hello.world", "hello.world2"], }, } }, ) assert automation.is_on(hass, "automation.hello") await hass.async_start() await hass.async_block_till_done() hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 1 async def test_initial_value_off_but_restore_on(hass): """Test initial value off and restored state is turned on.""" hass.state = CoreState.not_running calls = async_mock_service(hass, "test", "automation") mock_restore_cache(hass, (State("automation.hello", STATE_ON),)) await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "alias": "hello", "initial_state": "off", "trigger": {"platform": "event", "event_type": "test_event"}, "action": {"service": "test.automation", "entity_id": "hello.world"}, } }, ) assert not automation.is_on(hass, "automation.hello") await hass.async_start() hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 0 async def test_initial_value_on_but_restore_off(hass): """Test initial value on and restored state is turned off.""" calls = async_mock_service(hass, "test", "automation") mock_restore_cache(hass, (State("automation.hello", STATE_OFF),)) assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "alias": "hello", "initial_state": "on", "trigger": {"platform": "event", "event_type": "test_event"}, "action": {"service": "test.automation", "entity_id": "hello.world"}, } }, ) assert automation.is_on(hass, "automation.hello") hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 1 async def test_no_initial_value_and_restore_off(hass): """Test initial value off and restored state is turned on.""" calls = async_mock_service(hass, "test", "automation") mock_restore_cache(hass, (State("automation.hello", STATE_OFF),)) assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "alias": "hello", "trigger": {"platform": "event", "event_type": "test_event"}, "action": {"service": "test.automation", "entity_id": "hello.world"}, } }, ) assert not automation.is_on(hass, "automation.hello") hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 0 async def test_automation_is_on_if_no_initial_state_or_restore(hass): """Test initial value is on when no initial state or restored state.""" calls = async_mock_service(hass, "test", "automation") assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "alias": "hello", "trigger": {"platform": "event", "event_type": "test_event"}, "action": {"service": "test.automation", "entity_id": "hello.world"}, } }, ) assert automation.is_on(hass, "automation.hello") hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 1 async def test_automation_not_trigger_on_bootstrap(hass): """Test if automation is not trigger on bootstrap.""" hass.state = CoreState.not_running calls = async_mock_service(hass, "test", "automation") assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "alias": "hello", "trigger": {"platform": "event", "event_type": "test_event"}, "action": {"service": "test.automation", "entity_id": "hello.world"}, } }, ) assert automation.is_on(hass, "automation.hello") hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 0 hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED) await hass.async_block_till_done() assert automation.is_on(hass, "automation.hello") hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 1 assert ["hello.world"] == calls[0].data.get(ATTR_ENTITY_ID) async def test_automation_bad_trigger(hass, caplog): """Test bad trigger configuration.""" assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "alias": "hello", "trigger": {"platform": "automation"}, "action": [], } }, ) assert "Integration 'automation' does not provide trigger support." in caplog.text async def test_automation_with_error_in_script(hass, caplog): """Test automation with an error in script.""" assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "alias": "hello", "trigger": {"platform": "event", "event_type": "test_event"}, "action": {"service": "test.automation", "entity_id": "hello.world"}, } }, ) hass.bus.async_fire("test_event") await hass.async_block_till_done() assert "Service not found" in caplog.text assert "Traceback" not in caplog.text async def test_automation_with_error_in_script_2(hass, caplog): """Test automation with an error in script.""" assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "alias": "hello", "trigger": {"platform": "event", "event_type": "test_event"}, "action": {"service": None, "entity_id": "hello.world"}, } }, ) hass.bus.async_fire("test_event") await hass.async_block_till_done() assert "string value is None" in caplog.text async def test_automation_restore_last_triggered_with_initial_state(hass): """Ensure last_triggered is restored, even when initial state is set.""" time = dt_util.utcnow() mock_restore_cache( hass, ( State("automation.hello", STATE_ON), State("automation.bye", STATE_ON, {"last_triggered": time}), State("automation.solong", STATE_OFF, {"last_triggered": time}), ), ) config = { automation.DOMAIN: [ { "alias": "hello", "initial_state": "off", "trigger": {"platform": "event", "event_type": "test_event"}, "action": {"service": "test.automation"}, }, { "alias": "bye", "initial_state": "off", "trigger": {"platform": "event", "event_type": "test_event"}, "action": {"service": "test.automation"}, }, { "alias": "solong", "initial_state": "on", "trigger": {"platform": "event", "event_type": "test_event"}, "action": {"service": "test.automation"}, }, ] } await async_setup_component(hass, automation.DOMAIN, config) state = hass.states.get("automation.hello") assert state assert state.state == STATE_OFF assert state.attributes["last_triggered"] is None state = hass.states.get("automation.bye") assert state assert state.state == STATE_OFF assert state.attributes["last_triggered"] == time state = hass.states.get("automation.solong") assert state assert state.state == STATE_ON assert state.attributes["last_triggered"] == time async def test_extraction_functions(hass): """Test extraction functions.""" assert await async_setup_component( hass, DOMAIN, { DOMAIN: [ { "alias": "test1", "trigger": {"platform": "state", "entity_id": "sensor.trigger_1"}, "condition": { "condition": "state", "entity_id": "light.condition_state", "state": "on", }, "action": [ { "service": "test.script", "data": {"entity_id": "light.in_both"}, }, { "service": "test.script", "data": {"entity_id": "light.in_first"}, }, { "domain": "light", "device_id": "device-in-both", "entity_id": "light.bla", "type": "turn_on", }, ], }, { "alias": "test2", "trigger": { "platform": "device", "domain": "light", "type": "turned_on", "entity_id": "light.trigger_2", "device_id": "trigger-device-2", }, "condition": { "condition": "device", "device_id": "condition-device", "domain": "light", "type": "is_on", "entity_id": "light.bla", }, "action": [ { "service": "test.script", "data": {"entity_id": "light.in_both"}, }, { "condition": "state", "entity_id": "sensor.condition", "state": "100", }, {"scene": "scene.hello"}, { "domain": "light", "device_id": "device-in-both", "entity_id": "light.bla", "type": "turn_on", }, { "domain": "light", "device_id": "device-in-last", "entity_id": "light.bla", "type": "turn_on", }, ], }, ] }, ) assert set(automation.automations_with_entity(hass, "light.in_both")) == { "automation.test1", "automation.test2", } assert set(automation.entities_in_automation(hass, "automation.test1")) == { "sensor.trigger_1", "light.condition_state", "light.in_both", "light.in_first", } assert set(automation.automations_with_device(hass, "device-in-both")) == { "automation.test1", "automation.test2", } assert set(automation.devices_in_automation(hass, "automation.test2")) == { "trigger-device-2", "condition-device", "device-in-both", "device-in-last", } async def test_logbook_humanify_automation_triggered_event(hass): """Test humanifying Automation Trigger event.""" hass.config.components.add("recorder") await async_setup_component(hass, automation.DOMAIN, {}) await async_setup_component(hass, "logbook", {}) entity_attr_cache = logbook.EntityAttributeCache(hass) event1, event2 = list( logbook.humanify( hass, [ MockLazyEventPartialState( EVENT_AUTOMATION_TRIGGERED, {ATTR_ENTITY_ID: "automation.hello", ATTR_NAME: "Hello Automation"}, ), MockLazyEventPartialState( EVENT_AUTOMATION_TRIGGERED, { ATTR_ENTITY_ID: "automation.bye", ATTR_NAME: "Bye Automation", ATTR_SOURCE: "source of trigger", }, ), ], entity_attr_cache, {}, ) ) assert event1["name"] == "Hello Automation" assert event1["domain"] == "automation" assert event1["message"] == "has been triggered" assert event1["entity_id"] == "automation.hello" assert event2["name"] == "Bye Automation" assert event2["domain"] == "automation" assert event2["message"] == "has been triggered by source of trigger" assert event2["entity_id"] == "automation.bye" async def test_automation_variables(hass, caplog): """Test automation variables.""" calls = async_mock_service(hass, "test", "automation") assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: [ { "variables": { "test_var": "defined_in_config", "event_type": "{{ trigger.event.event_type }}", "this_variables": "{{this.entity_id}}", }, "trigger": {"platform": "event", "event_type": "test_event"}, "action": { "service": "test.automation", "data": { "value": "{{ test_var }}", "event_type": "{{ event_type }}", "this_template": "{{this.entity_id}}", "this_variables": "{{this_variables}}", }, }, }, { "variables": { "test_var": "defined_in_config", }, "trigger": {"platform": "event", "event_type": "test_event_2"}, "condition": { "condition": "template", "value_template": "{{ trigger.event.data.pass_condition }}", }, "action": { "service": "test.automation", }, }, { "variables": { "test_var": "{{ trigger.event.data.break + 1 }}", }, "trigger": {"platform": "event", "event_type": "test_event_3"}, "action": { "service": "test.automation", }, }, ] }, ) hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 1 assert calls[0].data["value"] == "defined_in_config" assert calls[0].data["event_type"] == "test_event" # Verify this available to all templates assert calls[0].data.get("this_template") == "automation.automation_0" # Verify this available during variables rendering assert calls[0].data.get("this_variables") == "automation.automation_0" assert "Error rendering variables" not in caplog.text hass.bus.async_fire("test_event_2") await hass.async_block_till_done() assert len(calls) == 1 hass.bus.async_fire("test_event_2", {"pass_condition": True}) await hass.async_block_till_done() assert len(calls) == 2 assert "Error rendering variables" not in caplog.text hass.bus.async_fire("test_event_3") await hass.async_block_till_done() assert len(calls) == 2 assert "Error rendering variables" in caplog.text hass.bus.async_fire("test_event_3", {"break": 0}) await hass.async_block_till_done() assert len(calls) == 3 async def test_automation_trigger_variables(hass, caplog): """Test automation trigger variables.""" calls = async_mock_service(hass, "test", "automation") assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: [ { "variables": { "event_type": "{{ trigger.event.event_type }}", }, "trigger_variables": { "test_var": "defined_in_config", }, "trigger": {"platform": "event", "event_type": "test_event"}, "action": { "service": "test.automation", "data": { "value": "{{ test_var }}", "event_type": "{{ event_type }}", }, }, }, { "variables": { "event_type": "{{ trigger.event.event_type }}", "test_var": "overridden_in_config", }, "trigger_variables": { "test_var": "defined_in_config", "this_trigger_variables": "{{this.entity_id}}", }, "trigger": {"platform": "event", "event_type": "test_event_2"}, "action": { "service": "test.automation", "data": { "value": "{{ test_var }}", "event_type": "{{ event_type }}", "this_template": "{{this.entity_id}}", "this_trigger_variables": "{{this_trigger_variables}}", }, }, }, ] }, ) hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 1 assert calls[0].data["value"] == "defined_in_config" assert calls[0].data["event_type"] == "test_event" hass.bus.async_fire("test_event_2") await hass.async_block_till_done() assert len(calls) == 2 assert calls[1].data["value"] == "overridden_in_config" assert calls[1].data["event_type"] == "test_event_2" # Verify this available to all templates assert calls[1].data.get("this_template") == "automation.automation_1" # Verify this available during trigger variables rendering assert calls[1].data.get("this_trigger_variables") == "automation.automation_1" assert "Error rendering variables" not in caplog.text async def test_automation_bad_trigger_variables(hass, caplog): """Test automation trigger variables accessing hass is rejected.""" calls = async_mock_service(hass, "test", "automation") assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: [ { "trigger_variables": { "test_var": "{{ states('foo.bar') }}", }, "trigger": {"platform": "event", "event_type": "test_event"}, "action": { "service": "test.automation", }, }, ] }, ) hass.bus.async_fire("test_event") assert "Use of 'states' is not supported in limited templates" in caplog.text await hass.async_block_till_done() assert len(calls) == 0 async def test_automation_this_var_always(hass, caplog): """Test automation always has reference to this, even with no variable or trigger variables configured.""" calls = async_mock_service(hass, "test", "automation") assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: [ { "trigger": {"platform": "event", "event_type": "test_event"}, "action": { "service": "test.automation", "data": { "this_template": "{{this.entity_id}}", }, }, }, ] }, ) hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 1 # Verify this available to all templates assert calls[0].data.get("this_template") == "automation.automation_0" assert "Error rendering variables" not in caplog.text async def test_blueprint_automation(hass, calls): """Test blueprint automation.""" assert await async_setup_component( hass, "automation", { "automation": { "use_blueprint": { "path": "test_event_service.yaml", "input": { "trigger_event": "blueprint_event", "service_to_call": "test.automation", }, } } }, ) hass.bus.async_fire("blueprint_event") await hass.async_block_till_done() assert len(calls) == 1 assert automation.entities_in_automation(hass, "automation.automation_0") == [ "light.kitchen" ] async def test_blueprint_automation_bad_config(hass, caplog): """Test blueprint automation with bad inputs.""" assert await async_setup_component( hass, "automation", { "automation": { "use_blueprint": { "path": "test_event_service.yaml", "input": { "trigger_event": "blueprint_event", "service_to_call": {"dict": "not allowed"}, }, } } }, ) assert "generated invalid automation" in caplog.text async def test_trigger_service(hass, calls): """Test the automation trigger service.""" assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "alias": "hello", "trigger": {"platform": "event", "event_type": "test_event"}, "action": { "service": "test.automation", "data_template": {"trigger": "{{ trigger }}"}, }, } }, ) context = Context() await hass.services.async_call( "automation", "trigger", {"entity_id": "automation.hello"}, blocking=True, context=context, ) assert len(calls) == 1 assert calls[0].data.get("trigger") == {"platform": None} assert calls[0].context.parent_id is context.id async def test_trigger_condition_implicit_id(hass, calls): """Test triggers.""" assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "trigger": [ {"platform": "event", "event_type": "test_event1"}, {"platform": "event", "event_type": "test_event2"}, {"platform": "event", "event_type": "test_event3"}, ], "action": { "choose": [ { "conditions": {"condition": "trigger", "id": [0, "2"]}, "sequence": { "service": "test.automation", "data": {"param": "one"}, }, }, { "conditions": {"condition": "trigger", "id": "1"}, "sequence": { "service": "test.automation", "data": {"param": "two"}, }, }, ] }, } }, ) hass.bus.async_fire("test_event1") await hass.async_block_till_done() assert len(calls) == 1 assert calls[-1].data.get("param") == "one" hass.bus.async_fire("test_event2") await hass.async_block_till_done() assert len(calls) == 2 assert calls[-1].data.get("param") == "two" hass.bus.async_fire("test_event3") await hass.async_block_till_done() assert len(calls) == 3 assert calls[-1].data.get("param") == "one" async def test_trigger_condition_explicit_id(hass, calls): """Test triggers.""" assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "trigger": [ {"platform": "event", "event_type": "test_event1", "id": "one"}, {"platform": "event", "event_type": "test_event2", "id": "two"}, ], "action": { "choose": [ { "conditions": {"condition": "trigger", "id": "one"}, "sequence": { "service": "test.automation", "data": {"param": "one"}, }, }, { "conditions": {"condition": "trigger", "id": "two"}, "sequence": { "service": "test.automation", "data": {"param": "two"}, }, }, ] }, } }, ) hass.bus.async_fire("test_event1") await hass.async_block_till_done() assert len(calls) == 1 assert calls[-1].data.get("param") == "one" hass.bus.async_fire("test_event2") await hass.async_block_till_done() assert len(calls) == 2 assert calls[-1].data.get("param") == "two"
{ "content_hash": "fa87024a7a6329a04f23f65c9eaa555b", "timestamp": "", "source": "github", "line_count": 1544, "max_line_length": 110, "avg_line_length": 32.729922279792746, "alnum_prop": 0.5140991392104483, "repo_name": "FreekingDean/home-assistant", "id": "214b2ea20e8a0d2fef0a1363397a20a81078493e", "size": "50535", "binary": false, "copies": "4", "ref": "refs/heads/dev", "path": "tests/components/automation/test_init.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "2335" }, { "name": "Python", "bytes": "36746639" }, { "name": "Shell", "bytes": "4910" } ], "symlink_target": "" }
"""Test exporting functions.""" # Authors: MNE Developers # # License: BSD-3-Clause from datetime import datetime, timezone from mne.io import RawArray from mne.io.meas_info import create_info from pathlib import Path import os.path as op import pytest import numpy as np from numpy.testing import (assert_allclose, assert_array_almost_equal, assert_array_equal) from mne import (read_epochs_eeglab, Epochs, read_evokeds, read_evokeds_mff, Annotations) from mne.datasets import testing, misc from mne.export import export_evokeds, export_evokeds_mff from mne.io import read_raw_fif, read_raw_eeglab, read_raw_edf from mne.utils import (_check_eeglabio_installed, requires_version, object_diff, _check_edflib_installed, _resource_path) from mne.tests.test_epochs import _get_data base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data') fname_evoked = op.join(base_dir, 'test-ave.fif') data_path = testing.data_path(download=False) egi_evoked_fname = op.join(data_path, 'EGI', 'test_egi_evoked.mff') misc_path = misc.data_path(download=False) @requires_version('pymatreader') @pytest.mark.skipif(not _check_eeglabio_installed(strict=False), reason='eeglabio not installed') def test_export_raw_eeglab(tmp_path): """Test saving a Raw instance to EEGLAB's set format.""" fname = (Path(__file__).parent.parent.parent / "io" / "tests" / "data" / "test_raw.fif") raw = read_raw_fif(fname, preload=True) raw.apply_proj() temp_fname = op.join(str(tmp_path), 'test.set') raw.export(temp_fname) raw.drop_channels([ch for ch in ['epoc'] if ch in raw.ch_names]) raw_read = read_raw_eeglab(temp_fname, preload=True) assert raw.ch_names == raw_read.ch_names cart_coords = np.array([d['loc'][:3] for d in raw.info['chs']]) # just xyz cart_coords_read = np.array([d['loc'][:3] for d in raw_read.info['chs']]) assert_allclose(cart_coords, cart_coords_read) assert_allclose(raw.times, raw_read.times) assert_allclose(raw.get_data(), raw_read.get_data()) # test overwrite with pytest.raises(FileExistsError, match='Destination file exists'): raw.export(temp_fname, overwrite=False) raw.export(temp_fname, overwrite=True) # test pathlib.Path files raw.export(Path(temp_fname), overwrite=True) # test warning with unapplied projectors raw = read_raw_fif(fname, preload=True) with pytest.warns(RuntimeWarning, match='Raw instance has unapplied projectors.'): raw.export(temp_fname, overwrite=True) @pytest.mark.skipif(not _check_edflib_installed(strict=False), reason='edflib-python not installed') def test_double_export_edf(tmp_path): """Test exporting an EDF file multiple times.""" rng = np.random.RandomState(123456) format = 'edf' ch_types = ['eeg', 'eeg', 'stim', 'ecog', 'ecog', 'seeg', 'eog', 'ecg', 'emg', 'dbs', 'bio'] info = create_info(len(ch_types), sfreq=1000, ch_types=ch_types) data = rng.random(size=(len(ch_types), 1000)) * 1e-5 # include subject info and measurement date info['subject_info'] = dict(first_name='mne', last_name='python', birthday=(1992, 1, 20), sex=1, hand=3) raw = RawArray(data, info) # export once temp_fname = tmp_path / f'test.{format}' raw.export(temp_fname, add_ch_type=True) raw_read = read_raw_edf(temp_fname, infer_types=True, preload=True) # export again raw_read.load_data() raw_read.export(temp_fname, add_ch_type=True, overwrite=True) raw_read = read_raw_edf(temp_fname, infer_types=True, preload=True) # stim channel should be dropped raw.drop_channels('2') assert raw.ch_names == raw_read.ch_names # only compare the original length, since extra zeros are appended orig_raw_len = len(raw) assert_array_almost_equal( raw.get_data(), raw_read.get_data()[:, :orig_raw_len], decimal=4) assert_allclose( raw.times, raw_read.times[:orig_raw_len], rtol=0, atol=1e-5) # check channel types except for 'bio', which loses its type orig_ch_types = raw.get_channel_types() read_ch_types = raw_read.get_channel_types() assert_array_equal(orig_ch_types, read_ch_types) @pytest.mark.skipif(not _check_edflib_installed(strict=False), reason='edflib-python not installed') def test_export_edf_annotations(tmp_path): """Test that exporting EDF preserves annotations.""" rng = np.random.RandomState(123456) format = 'edf' ch_types = ['eeg', 'eeg', 'stim', 'ecog', 'ecog', 'seeg', 'eog', 'ecg', 'emg', 'dbs', 'bio'] ch_names = np.arange(len(ch_types)).astype(str).tolist() info = create_info(ch_names, sfreq=1000, ch_types=ch_types) data = rng.random(size=(len(ch_names), 2000)) * 1.e-5 raw = RawArray(data, info) annotations = Annotations( onset=[0.01, 0.05, 0.90, 1.05], duration=[0, 1, 0, 0], description=['test1', 'test2', 'test3', 'test4']) raw.set_annotations(annotations) # export temp_fname = op.join(str(tmp_path), f'test.{format}') raw.export(temp_fname) # read in the file raw_read = read_raw_edf(temp_fname, preload=True) assert_array_equal(raw.annotations.onset, raw_read.annotations.onset) assert_array_equal(raw.annotations.duration, raw_read.annotations.duration) assert_array_equal(raw.annotations.description, raw_read.annotations.description) @pytest.mark.skipif(not _check_edflib_installed(strict=False), reason='edflib-python not installed') def test_rawarray_edf(tmp_path): """Test saving a Raw array with integer sfreq to EDF.""" rng = np.random.RandomState(12345) format = 'edf' ch_types = ['eeg', 'eeg', 'stim', 'ecog', 'seeg', 'eog', 'ecg', 'emg', 'dbs', 'bio'] ch_names = np.arange(len(ch_types)).astype(str).tolist() info = create_info(ch_names, sfreq=1000, ch_types=ch_types) data = rng.random(size=(len(ch_names), 1000)) * 1e-5 # include subject info and measurement date subject_info = dict(first_name='mne', last_name='python', birthday=(1992, 1, 20), sex=1, hand=3) info['subject_info'] = subject_info raw = RawArray(data, info) time_now = datetime.now() meas_date = datetime(year=time_now.year, month=time_now.month, day=time_now.day, hour=time_now.hour, minute=time_now.minute, second=time_now.second, tzinfo=timezone.utc) raw.set_meas_date(meas_date) temp_fname = op.join(str(tmp_path), f'test.{format}') raw.export(temp_fname, add_ch_type=True) raw_read = read_raw_edf(temp_fname, infer_types=True, preload=True) # stim channel should be dropped raw.drop_channels('2') assert raw.ch_names == raw_read.ch_names # only compare the original length, since extra zeros are appended orig_raw_len = len(raw) assert_array_almost_equal( raw.get_data(), raw_read.get_data()[:, :orig_raw_len], decimal=4) assert_allclose( raw.times, raw_read.times[:orig_raw_len], rtol=0, atol=1e-5) # check channel types except for 'bio', which loses its type orig_ch_types = raw.get_channel_types() read_ch_types = raw_read.get_channel_types() assert_array_equal(orig_ch_types, read_ch_types) assert raw.info['meas_date'] == raw_read.info['meas_date'] # channel name can't be longer than 16 characters with the type added raw_bad = raw.copy() raw_bad.rename_channels({'1': 'abcdefghijklmnopqrstuvwxyz'}) with pytest.raises(RuntimeError, match='Signal label'), \ pytest.warns(RuntimeWarning, match='Data has a non-integer'): raw_bad.export(temp_fname, overwrite=True) # include bad birthday that is non-EDF compliant bad_info = info.copy() bad_info['subject_info']['birthday'] = (1700, 1, 20) raw = RawArray(data, bad_info) with pytest.raises(RuntimeError, match='Setting patient birth date'): raw.export(temp_fname, overwrite=True) # include bad measurement date that is non-EDF compliant raw = RawArray(data, info) meas_date = datetime(year=1984, month=1, day=1, tzinfo=timezone.utc) raw.set_meas_date(meas_date) with pytest.raises(RuntimeError, match='Setting start date time'): raw.export(temp_fname, overwrite=True) # test that warning is raised if there are non-voltage based channels raw = RawArray(data, info) with pytest.warns(RuntimeWarning, match='The unit'): raw.set_channel_types({'9': 'hbr'}) with pytest.warns(RuntimeWarning, match='Non-voltage channels'): raw.export(temp_fname, overwrite=True) # data should match up to the non-accepted channel raw_read = read_raw_edf(temp_fname, preload=True) orig_raw_len = len(raw) assert_array_almost_equal( raw.get_data()[:-1, :], raw_read.get_data()[:, :orig_raw_len], decimal=4) assert_allclose( raw.times, raw_read.times[:orig_raw_len], rtol=0, atol=1e-5) # the data should still match though raw_read = read_raw_edf(temp_fname, preload=True) raw.drop_channels('2') assert raw.ch_names == raw_read.ch_names orig_raw_len = len(raw) assert_array_almost_equal( raw.get_data(), raw_read.get_data()[:, :orig_raw_len], decimal=4) assert_allclose( raw.times, raw_read.times[:orig_raw_len], rtol=0, atol=1e-5) @pytest.mark.skipif(not _check_edflib_installed(strict=False), reason='edflib-python not installed') @pytest.mark.parametrize( ['dataset', 'format'], [ ['test', 'edf'], pytest.param('misc', 'edf', marks=[pytest.mark.slowtest, misc._pytest_mark()]), ]) def test_export_raw_edf(tmp_path, dataset, format): """Test saving a Raw instance to EDF format.""" if dataset == 'test': fname = _resource_path('mne.io.tests.data', 'test_raw.fif') raw = read_raw_fif(fname) elif dataset == 'misc': fname = op.join(misc_path, 'ecog', 'sample_ecog_ieeg.fif') raw = read_raw_fif(fname) # only test with EEG channels raw.pick_types(eeg=True, ecog=True, seeg=True) raw.load_data() orig_ch_names = raw.ch_names temp_fname = op.join(str(tmp_path), f'test.{format}') # test runtime errors with pytest.raises(RuntimeError, match='The maximum'), \ pytest.warns(RuntimeWarning, match='Data has a non-integer'): raw.export(temp_fname, physical_range=(-1e6, 0)) with pytest.raises(RuntimeError, match='The minimum'), \ pytest.warns(RuntimeWarning, match='Data has a non-integer'): raw.export(temp_fname, physical_range=(0, 1e6)) if dataset == 'test': with pytest.warns(RuntimeWarning, match='Data has a non-integer'): raw.export(temp_fname) elif dataset == 'misc': with pytest.warns(RuntimeWarning, match='EDF format requires'): raw.export(temp_fname) if 'epoc' in raw.ch_names: raw.drop_channels(['epoc']) raw_read = read_raw_edf(temp_fname, preload=True) assert orig_ch_names == raw_read.ch_names # only compare the original length, since extra zeros are appended orig_raw_len = len(raw) # assert data and times are not different # Due to the physical range of the data, reading and writing is # not lossless. For example, a physical min/max of -/+ 3200 uV # will result in a resolution of 0.09 uV. This resolution # though is acceptable for most EEG manufacturers. assert_array_almost_equal( raw.get_data(), raw_read.get_data()[:, :orig_raw_len], decimal=4) # Due to the data record duration limitations of EDF files, one # cannot store arbitrary float sampling rate exactly. Usually this # results in two sampling rates that are off by very low number of # decimal points. This for practical purposes does not matter # but will result in an error when say the number of time points # is very very large. assert_allclose( raw.times, raw_read.times[:orig_raw_len], rtol=0, atol=1e-5) @requires_version('pymatreader') @pytest.mark.skipif(not _check_eeglabio_installed(strict=False), reason='eeglabio not installed') @pytest.mark.parametrize('preload', (True, False)) def test_export_epochs_eeglab(tmp_path, preload): """Test saving an Epochs instance to EEGLAB's set format.""" raw, events = _get_data()[:2] raw.load_data() epochs = Epochs(raw, events, preload=preload) temp_fname = op.join(str(tmp_path), 'test.set') epochs.export(temp_fname) epochs.drop_channels([ch for ch in ['epoc', 'STI 014'] if ch in epochs.ch_names]) epochs_read = read_epochs_eeglab(temp_fname) assert epochs.ch_names == epochs_read.ch_names cart_coords = np.array([d['loc'][:3] for d in epochs.info['chs']]) # just xyz cart_coords_read = np.array([d['loc'][:3] for d in epochs_read.info['chs']]) assert_allclose(cart_coords, cart_coords_read) assert_array_equal(epochs.events[:, 0], epochs_read.events[:, 0]) # latency assert epochs.event_id.keys() == epochs_read.event_id.keys() # just keys assert_allclose(epochs.times, epochs_read.times) assert_allclose(epochs.get_data(), epochs_read.get_data()) # test overwrite with pytest.raises(FileExistsError, match='Destination file exists'): epochs.export(temp_fname, overwrite=False) epochs.export(temp_fname, overwrite=True) # test pathlib.Path files epochs.export(Path(temp_fname), overwrite=True) # test warning with unapplied projectors epochs = Epochs(raw, events, preload=preload, proj=False) with pytest.warns(RuntimeWarning, match='Epochs instance has unapplied projectors.'): epochs.export(Path(temp_fname), overwrite=True) @requires_version('mffpy', '0.5.7') @testing.requires_testing_data @pytest.mark.parametrize('fmt', ('auto', 'mff')) @pytest.mark.parametrize('do_history', (True, False)) def test_export_evokeds_to_mff(tmp_path, fmt, do_history): """Test exporting evoked dataset to MFF.""" evoked = read_evokeds_mff(egi_evoked_fname) export_fname = op.join(str(tmp_path), 'evoked.mff') history = [ { 'name': 'Test Segmentation', 'method': 'Segmentation', 'settings': ['Setting 1', 'Setting 2'], 'results': ['Result 1', 'Result 2'] }, { 'name': 'Test Averaging', 'method': 'Averaging', 'settings': ['Setting 1', 'Setting 2'], 'results': ['Result 1', 'Result 2'] } ] if do_history: export_evokeds_mff(export_fname, evoked, history=history) else: export_evokeds(export_fname, evoked) # Drop non-EEG channels evoked = [ave.drop_channels(['ECG', 'EMG']) for ave in evoked] evoked_exported = read_evokeds_mff(export_fname) assert len(evoked) == len(evoked_exported) for ave, ave_exported in zip(evoked, evoked_exported): # Compare infos assert object_diff(ave_exported.info, ave.info) == '' # Compare data assert_allclose(ave_exported.data, ave.data) # Compare properties assert ave_exported.nave == ave.nave assert ave_exported.kind == ave.kind assert ave_exported.comment == ave.comment assert_allclose(ave_exported.times, ave.times) # test overwrite with pytest.raises(FileExistsError, match='Destination file exists'): if do_history: export_evokeds_mff(export_fname, evoked, history=history, overwrite=False) else: export_evokeds(export_fname, evoked, overwrite=False) if do_history: export_evokeds_mff(export_fname, evoked, history=history, overwrite=True) else: export_evokeds(export_fname, evoked, overwrite=True) @requires_version('mffpy', '0.5.7') @testing.requires_testing_data def test_export_to_mff_no_device(): """Test no device type throws ValueError.""" evoked = read_evokeds_mff(egi_evoked_fname, condition='Category 1') evoked.info['device_info'] = None with pytest.raises(ValueError, match='No device type.'): export_evokeds('output.mff', evoked) @requires_version('mffpy', '0.5.7') def test_export_to_mff_incompatible_sfreq(): """Test non-whole number sampling frequency throws ValueError.""" evoked = read_evokeds(fname_evoked) with pytest.raises(ValueError, match=f'sfreq: {evoked[0].info["sfreq"]}'): export_evokeds('output.mff', evoked) @pytest.mark.parametrize('fmt,ext', [ ('EEGLAB', 'set'), ('EDF', 'edf'), ('BrainVision', 'eeg') ]) def test_export_evokeds_unsupported_format(fmt, ext): """Test exporting evoked dataset to non-supported formats.""" evoked = read_evokeds(fname_evoked) with pytest.raises(NotImplementedError, match=f'Export to {fmt} not imp'): export_evokeds(f'output.{ext}', evoked)
{ "content_hash": "f8b10a843fcd4ca5f3c097175a4f66db", "timestamp": "", "source": "github", "line_count": 426, "max_line_length": 79, "avg_line_length": 40.83098591549296, "alnum_prop": 0.6381510865815798, "repo_name": "Eric89GXL/mne-python", "id": "2cca928914030c91bf5e59f8894052eda4859177", "size": "17418", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "mne/export/tests/test_export.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Csound Document", "bytes": "69806" }, { "name": "Makefile", "bytes": "3928" }, { "name": "Python", "bytes": "6164915" }, { "name": "Shell", "bytes": "936" } ], "symlink_target": "" }
""" jenkins-hash lookup3 algorithm implementation """ from asyncio import Lock from ..._buffered_producer._partition_resolver import ( generate_hash_code, ) # pylint: disable=protected-access class PartitionResolver: def __init__(self, partitions): self._idx = -1 self._partitions = partitions self._partitions_cnt = len(self._partitions) self._lock = Lock() async def get_next_partition_id(self): """ round-robin partition assignment """ async with self._lock: self._idx += 1 self._idx %= self._partitions_cnt return self._partitions[self._idx] async def get_partition_id_by_partition_key(self, partition_key): hash_code = generate_hash_code(partition_key) return self._partitions[abs(hash_code % self._partitions_cnt)]
{ "content_hash": "9f192fefb9c52a73f9b13d1a6f22aef5", "timestamp": "", "source": "github", "line_count": 29, "max_line_length": 70, "avg_line_length": 29.620689655172413, "alnum_prop": 0.629802095459837, "repo_name": "Azure/azure-sdk-for-python", "id": "b741029e9bcb44918e061780165fce94c79cb9fb", "size": "1204", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "sdk/eventhub/azure-eventhub/azure/eventhub/aio/_buffered_producer/_partition_resolver_async.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "1224" }, { "name": "Bicep", "bytes": "24196" }, { "name": "CSS", "bytes": "6089" }, { "name": "Dockerfile", "bytes": "4892" }, { "name": "HTML", "bytes": "12058" }, { "name": "JavaScript", "bytes": "8137" }, { "name": "Jinja", "bytes": "10377" }, { "name": "Jupyter Notebook", "bytes": "272022" }, { "name": "PowerShell", "bytes": "518535" }, { "name": "Python", "bytes": "715484989" }, { "name": "Shell", "bytes": "3631" } ], "symlink_target": "" }
from sparse.core import GaussSeidel from sparse.util import parse_information from time import time #URL = "http://profs.info.uaic.ro/~ancai/CN/lab/4/m_rar_2014_1.txt" URL = "http://profs.info.uaic.ro/~ancai/CN/lab/4/m_rar_2014_2.txt" def main(): """Test sparse module""" matrix, values = parse_information(URL) solver = GaussSeidel(matrix, values) matrix.sort() processing = time() print "[i] Matrix size: {}".format(matrix.size) print solver.solve() print solver.norm() print "[i] Done in {}".format(time() - processing) print "[i] Without printing" processing = time() solver.solve() solver.norm() print "[i] Done in {}".format(time() - processing) if __name__ == "__main__": # print "The module has not been designed to be used in this way." main()
{ "content_hash": "6d96a76cdeb2420c52c73a7c218609e1", "timestamp": "", "source": "github", "line_count": 27, "max_line_length": 70, "avg_line_length": 30.40740740740741, "alnum_prop": 0.6431181485992692, "repo_name": "c-square/homework", "id": "b74c609b94067b28d281dcf34c7c14e327751292", "size": "844", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "Licență/Anul III/CN/sparse/sparse/gui.py", "mode": "33188", "license": "mit", "language": [ { "name": "Apex", "bytes": "12340" }, { "name": "C", "bytes": "1547" }, { "name": "C++", "bytes": "269593" }, { "name": "Haskell", "bytes": "14450" }, { "name": "Python", "bytes": "151009" }, { "name": "R", "bytes": "16961" }, { "name": "TeX", "bytes": "84806" } ], "symlink_target": "" }
"""Sentry Integration""" import logging from functools import wraps from airflow.configuration import conf from airflow.utils.session import find_session_idx, provide_session from airflow.utils.state import State log = logging.getLogger(__name__) class DummySentry: """Blank class for Sentry.""" @classmethod def add_tagging(cls, task_instance): """Blank function for tagging.""" @classmethod def add_breadcrumbs(cls, task_instance, session=None): """Blank function for breadcrumbs.""" @classmethod def enrich_errors(cls, run): """Blank function for formatting a TaskInstance._run_raw_task.""" return run def flush(self): """Blank function for flushing errors.""" Sentry: DummySentry = DummySentry() if conf.getboolean("sentry", 'sentry_on', fallback=False): import sentry_sdk # Verify blinker installation from blinker import signal # noqa: F401 from sentry_sdk.integrations.flask import FlaskIntegration from sentry_sdk.integrations.logging import ignore_logger class ConfiguredSentry(DummySentry): """Configure Sentry SDK.""" SCOPE_DAG_RUN_TAGS = frozenset(("data_interval_end", "data_interval_start", "execution_date")) SCOPE_TASK_TAGS = frozenset(("operator",)) SCOPE_TASK_INSTANCE_TAGS = frozenset(("task_id", "dag_id", "try_number")) SCOPE_TAGS = SCOPE_DAG_RUN_TAGS | SCOPE_TASK_TAGS | SCOPE_TASK_INSTANCE_TAGS SCOPE_CRUMBS = frozenset(("task_id", "state", "operator", "duration")) UNSUPPORTED_SENTRY_OPTIONS = frozenset( ( "integrations", "in_app_include", "in_app_exclude", "ignore_errors", "before_breadcrumb", "transport", ) ) def __init__(self): """Initialize the Sentry SDK.""" ignore_logger("airflow.task") ignore_logger("airflow.jobs.backfill_job.BackfillJob") executor_name = conf.get("core", "EXECUTOR") sentry_flask = FlaskIntegration() # LoggingIntegration is set by default. integrations = [sentry_flask] if executor_name == "CeleryExecutor": from sentry_sdk.integrations.celery import CeleryIntegration sentry_celery = CeleryIntegration() integrations.append(sentry_celery) dsn = None sentry_config_opts = conf.getsection("sentry") or {} if sentry_config_opts: sentry_config_opts.pop("sentry_on") old_way_dsn = sentry_config_opts.pop("sentry_dsn", None) new_way_dsn = sentry_config_opts.pop("dsn", None) # supported backward compatibility with old way dsn option dsn = old_way_dsn or new_way_dsn unsupported_options = self.UNSUPPORTED_SENTRY_OPTIONS.intersection(sentry_config_opts.keys()) if unsupported_options: log.warning( "There are unsupported options in [sentry] section: %s", ", ".join(unsupported_options), ) sentry_config_opts['before_send'] = conf.getimport('sentry', 'before_send', fallback=None) if dsn: sentry_sdk.init(dsn=dsn, integrations=integrations, **sentry_config_opts) else: # Setting up Sentry using environment variables. log.debug("Defaulting to SENTRY_DSN in environment.") sentry_sdk.init(integrations=integrations, **sentry_config_opts) def add_tagging(self, task_instance): """Function to add tagging for a task_instance.""" dag_run = task_instance.dag_run task = task_instance.task with sentry_sdk.configure_scope() as scope: for tag_name in self.SCOPE_TASK_INSTANCE_TAGS: attribute = getattr(task_instance, tag_name) scope.set_tag(tag_name, attribute) for tag_name in self.SCOPE_DAG_RUN_TAGS: attribute = getattr(dag_run, tag_name) scope.set_tag(tag_name, attribute) scope.set_tag("operator", task.__class__.__name__) @provide_session def add_breadcrumbs(self, task_instance, session=None): """Function to add breadcrumbs inside of a task_instance.""" if session is None: return dr = task_instance.get_dagrun(session) task_instances = dr.get_task_instances( state={State.SUCCESS, State.FAILED}, session=session, ) for ti in task_instances: data = {} for crumb_tag in self.SCOPE_CRUMBS: data[crumb_tag] = getattr(ti, crumb_tag) sentry_sdk.add_breadcrumb(category="completed_tasks", data=data, level="info") def enrich_errors(self, func): """ Wrap TaskInstance._run_raw_task and LocalTaskJob._run_mini_scheduler_on_child_tasks to support task specific tags and breadcrumbs. """ session_args_idx = find_session_idx(func) @wraps(func) def wrapper(_self, *args, **kwargs): # Wrapping the _run_raw_task function with push_scope to contain # tags and breadcrumbs to a specific Task Instance try: session = kwargs.get('session', args[session_args_idx]) except IndexError: session = None with sentry_sdk.push_scope(): try: return func(_self, *args, **kwargs) except Exception as e: # Is a LocalTaskJob get the task instance if hasattr(_self, 'task_instance'): task_instance = _self.task_instance else: task_instance = _self self.add_tagging(task_instance) self.add_breadcrumbs(task_instance, session=session) sentry_sdk.capture_exception(e) raise return wrapper def flush(self): sentry_sdk.flush() Sentry = ConfiguredSentry()
{ "content_hash": "d941bbb9758caad9017c55cce3cce5a6", "timestamp": "", "source": "github", "line_count": 172, "max_line_length": 109, "avg_line_length": 37.80813953488372, "alnum_prop": 0.5571274796247886, "repo_name": "bolkedebruin/airflow", "id": "948ecb86292b78e15ed18067f5c74ef1b267afd1", "size": "7291", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "airflow/sentry.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "25286" }, { "name": "Dockerfile", "bytes": "40459" }, { "name": "HCL", "bytes": "3786" }, { "name": "HTML", "bytes": "157840" }, { "name": "JavaScript", "bytes": "167972" }, { "name": "Jinja", "bytes": "33382" }, { "name": "Jupyter Notebook", "bytes": "2933" }, { "name": "Mako", "bytes": "1339" }, { "name": "Python", "bytes": "19287942" }, { "name": "Shell", "bytes": "645244" }, { "name": "TypeScript", "bytes": "173854" } ], "symlink_target": "" }
"""Test that FakeFilesystem calls work identically to a real filesystem.""" import os import os.path import shutil import sys import tempfile import time import unittest import fake_filesystem class FakeFilesystemVsRealTest(unittest.TestCase): _FAKE_FS_BASE = '/fakefs' def _Paths(self, path): """For a given path, return paths in the real and fake filesystems.""" if not path: return (None, None) return (os.path.join(self.real_base, path), os.path.join(self.fake_base, path)) def _CreateTestFile(self, file_type, path, contents=None): """Create a dir, file, or link in both the real fs and the fake.""" self._created_files.append([file_type, path, contents]) real_path, fake_path = self._Paths(path) if file_type == 'd': os.mkdir(real_path) self.fake_os.mkdir(fake_path) if file_type == 'f': fh = open(real_path, 'w') fh.write(contents or '') fh.close() fh = self.fake_open(fake_path, 'w') fh.write(contents or '') fh.close() # l for symlink, h for hard link if file_type in ('l', 'h'): real_target, fake_target = (contents, contents) # If it begins with '/', make it relative to the base. You can't go # creating files in / for the real file system. if contents.startswith('/'): real_target, fake_target = self._Paths(contents[1:]) if file_type == 'l': os.symlink(real_target, real_path) self.fake_os.symlink(fake_target, fake_path) elif file_type == 'h': os.link(real_target, real_path) self.fake_os.link(fake_target, fake_path) def setUp(self): # Base paths in the real and test file systems. We keep them different # so that missing features in the fake don't fall through to the base # operations and magically succeed. tsname = 'fakefs.%s' % time.time() # Fully expand the base_path - required on OS X. self.real_base = os.path.realpath( os.path.join(tempfile.gettempdir(), tsname)) os.mkdir(self.real_base) self.fake_base = self._FAKE_FS_BASE # Make sure we can write to the physical testing temp directory. self.assertTrue(self.real_base.startswith('/')) self.assertTrue(os.access(self.real_base, os.W_OK)) self.fake_filesystem = fake_filesystem.FakeFilesystem() self.fake_filesystem.CreateDirectory(self.fake_base) self.fake_os = fake_filesystem.FakeOsModule(self.fake_filesystem) self.fake_open = fake_filesystem.FakeFileOpen(self.fake_filesystem) self._created_files = [] os.chdir(self.real_base) self.fake_os.chdir(self.fake_base) def tearDown(self): # We have to remove all the files from the real FS. Doing the same for the # fake FS is optional, but doing it is an extra sanity check. try: rev_files = self._created_files[:] rev_files.reverse() for info in rev_files: real_path, fake_path = self._Paths(info[1]) if info[0] == 'd': try: os.rmdir(real_path) except OSError as e: if 'Directory not empty' in e: self.fail('Real path %s not empty: %s : %s' % ( real_path, e, os.listdir(real_path))) else: raise self.fake_os.rmdir(fake_path) if info[0] == 'f' or info[0] == 'l': os.remove(real_path) self.fake_os.remove(fake_path) finally: shutil.rmtree(self.real_base) def _GetErrno(self, raised_error): try: return (raised_error and raised_error.errno) or None except AttributeError: return None def _CompareBehaviors(self, method_name, path, real, fake, method_returns_path=False): """Invoke an os method in both real and fake contexts and compare results. Invoke a real filesystem method with a path to a real file and invoke a fake filesystem method with a path to a fake file and compare the results. We expect some calls to throw Exceptions, so we catch those and compare them. Args: method_name: Name of method being tested, for use in error messages. path: potential path to a file in the real and fake file systems, passing an empty tuple indicates that no arguments to pass to method. real: built-in system library or method from the built-in system library which takes a path as an arg and returns some value. fake: fake_filsystem object or method from a fake_filesystem class which takes a path as an arg and returns some value. method_returns_path: True if the method returns a path, and thus we must compensate for expected difference between real and fake. Returns: A description of the difference in behavior, or None. """ # pylint: disable-msg=C6403 def _ErrorClass(e): return (e and e.__class__.__name__) or 'None' errs = 0 real_value = None fake_value = None real_err = None fake_err = None method_call = '%s' % method_name method_call += '()' if path == () else '(%s)' % path # Catching Exception below gives a lint warning, but it's what we need. try: args = [] if path == () else [path] real_method = real if not callable(real): real_method = getattr(real, method_name) real_value = str(real_method(*args)) except Exception as e: # pylint: disable-msg=W0703 real_err = e errs += 1 try: fake_method = fake if not callable(fake): fake_method = getattr(fake, method_name) args = [] if path == () else [path] fake_value = str(fake_method(*args)) except Exception as e: # pylint: disable-msg=W0703 fake_err = e errs += 1 # We only compare on the error class because the acutal error contents # is almost always different because of the file paths. if _ErrorClass(real_err) != _ErrorClass(fake_err): if real_err is None: return '%s: real version returned %s, fake raised %s' % ( method_call, real_value, _ErrorClass(fake_err)) if fake_err is None: return '%s: real version raised %s, fake returned %s' % ( method_call, _ErrorClass(real_err), fake_value) return '%s: real version raised %s, fake raised %s' % ( method_call, _ErrorClass(real_err), _ErrorClass(fake_err)) real_errno = self._GetErrno(real_err) fake_errno = self._GetErrno(fake_err) if real_errno != fake_errno: return '%s(%s): both raised %s, real errno %s, fake errno %s' % ( method_name, path, _ErrorClass(real_err), real_errno, fake_errno) # If the method is supposed to return a full path AND both values # begin with the expected full path, then trim it off. if method_returns_path: if (real_value and fake_value and real_value.startswith(self.real_base) and fake_value.startswith(self.fake_base)): real_value = real_value[len(self.real_base):] fake_value = fake_value[len(self.fake_base):] if real_value != fake_value: return '%s: real return %s, fake returned %s' % ( method_call, real_value, fake_value) return None def assertOsMethodBehaviorMatches(self, method_name, path, method_returns_path=False): """Invoke an os method in both real and fake contexts and compare. For a given method name (from the os module) and a path, compare the behavior of the system provided module against the fake_filesytem module. We expect results and/or Exceptions raised to be identical. Args: method_name: Name of method being tested. path: potential path to a file in the real and fake file systems. method_returns_path: True if the method returns a path, and thus we must compensate for expected difference between real and fake. Returns: A description of the difference in behavior, or None. """ return self._CompareBehaviors(method_name, path, os, self.fake_os, method_returns_path) def DiffOpenMethodBehavior(self, method_name, path, mode, data, method_returns_data=True): """Invoke an open method in both real and fkae contexts and compare. Args: method_name: Name of method being tested. path: potential path to a file in the real and fake file systems. mode: how to open the file. data: any data to pass to the method. method_returns_data: True if a method returns some sort of data. For a given method name (from builtin open) and a path, compare the behavior of the system provided module against the fake_filesytem module. We expect results and/or Exceptions raised to be identical. Returns: A description of the difference in behavior, or None. """ with open(path, mode) as real_fh: with self.fake_open(path, mode) as fake_fh: return self._CompareBehaviors(method_name, data, real_fh, fake_fh, method_returns_data) def DiffOsPathMethodBehavior(self, method_name, path, method_returns_path=False): """Invoke an os.path method in both real and fake contexts and compare. For a given method name (from the os.path module) and a path, compare the behavior of the system provided module against the fake_filesytem module. We expect results and/or Exceptions raised to be identical. Args: method_name: Name of method being tested. path: potential path to a file in the real and fake file systems. method_returns_path: True if the method returns a path, and thus we must compensate for expected difference between real and fake. Returns: A description of the difference in behavior, or None. """ return self._CompareBehaviors(method_name, path, os.path, self.fake_os.path, method_returns_path) def assertOsPathMethodBehaviorMatches(self, method_name, path, method_returns_path=False): """Assert that an os.path behaves the same in both real and fake contexts. Wraps DiffOsPathMethodBehavior, raising AssertionError if any differences are reported. Args: method_name: Name of method being tested. path: potential path to a file in the real and fake file systems. method_returns_path: True if the method returns a path, and thus we must compensate for expected difference between real and fake. Raises: AssertionError if there is any difference in behavior. """ diff = self.DiffOsPathMethodBehavior(method_name, path, method_returns_path) if diff: self.fail(diff) def assertAllOsBehaviorsMatch(self, path): os_method_names = ['readlink'] os_method_names_no_args = ['getcwd'] if sys.version_info < (3, 0): os_method_names_no_args.append('getcwdu') os_path_method_names = ['isabs', 'isdir', 'isfile', 'islink', 'exists', 'lexists', ] wrapped_methods = [['access', self._AccessReal, self._AccessFake], ['stat.size', self._StatSizeReal, self._StatSizeFake], ['lstat.size', self._LstatSizeReal, self._LstatSizeFake] ] differences = [] for method_name in os_method_names: diff = self.assertOsMethodBehaviorMatches(method_name, path) if diff: differences.append(diff) for method_name in os_method_names_no_args: diff = self.assertOsMethodBehaviorMatches(method_name, (), method_returns_path=True) if diff: differences.append(diff) for method_name in os_path_method_names: diff = self.DiffOsPathMethodBehavior(method_name, path) if diff: differences.append(diff) for m in wrapped_methods: diff = self._CompareBehaviors(m[0], path, m[1], m[2]) if diff: differences.append(diff) if differences: self.fail('Behaviors do not match for %s:\n %s' % (path, '\n '.join(differences))) def assertFileHandleBehaviorsMatch(self, path, mode, data): write_method_names = ['write', 'writelines'] read_method_names = ['read', 'readlines'] other_method_names = ['truncate', 'flush', 'close'] differences = [] for method_name in write_method_names: diff = self.DiffOpenMethodBehavior(method_name, path, mode, data) if diff: differences.append(diff) for method_name in read_method_names + other_method_names: diff = self.DiffOpenMethodBehavior(method_name, path, mode, ()) if diff: differences.append(diff) if differences: self.fail('Behaviors do not match for %s:\n %s' % (path, '\n '.join(differences))) # Helpers for checks which are not straight method calls. def _AccessReal(self, path): return os.access(path, 0o777777) def _AccessFake(self, path): return self.fake_os.access(path, 0o777777) def _StatSizeReal(self, path): real_path, unused_fake_path = self._Paths(path) # fake_filesystem.py does not implement stat().st_size for directories if os.path.isdir(real_path): return None return os.stat(real_path).st_size def _StatSizeFake(self, path): unused_real_path, fake_path = self._Paths(path) # fake_filesystem.py does not implement stat().st_size for directories if self.fake_os.path.isdir(fake_path): return None return self.fake_os.stat(fake_path).st_size def _LstatSizeReal(self, path): real_path, unused_fake_path = self._Paths(path) if os.path.isdir(real_path): return None size = os.lstat(real_path).st_size # Account for the difference in the lengths of the absolute paths. if os.path.islink(real_path): if os.readlink(real_path).startswith('/'): size -= len(self.real_base) return size def _LstatSizeFake(self, path): unused_real_path, fake_path = self._Paths(path) size = 0 if self.fake_os.path.isdir(fake_path): return None size = self.fake_os.lstat(fake_path).st_size # Account for the difference in the lengths of the absolute paths. if self.fake_os.path.islink(fake_path): if self.fake_os.readlink(fake_path).startswith('/'): size -= len(self.fake_base) return size def testIsabs(self): # We do not have to create any files for isabs. self.assertOsPathMethodBehaviorMatches('isabs', None) self.assertOsPathMethodBehaviorMatches('isabs', '') self.assertOsPathMethodBehaviorMatches('isabs', '/') self.assertOsPathMethodBehaviorMatches('isabs', '/a') self.assertOsPathMethodBehaviorMatches('isabs', 'a') def testNonePath(self): self.assertAllOsBehaviorsMatch(None) def testEmptyPath(self): self.assertAllOsBehaviorsMatch('') def testRootPath(self): self.assertAllOsBehaviorsMatch('/') def testNonExistantFile(self): self.assertAllOsBehaviorsMatch('foo') def testEmptyFile(self): self._CreateTestFile('f', 'aFile') self.assertAllOsBehaviorsMatch('aFile') def testFileWithContents(self): self._CreateTestFile('f', 'aFile', 'some contents') self.assertAllOsBehaviorsMatch('aFile') def testSymLinkToEmptyFile(self): self._CreateTestFile('f', 'aFile') self._CreateTestFile('l', 'link_to_empty', 'aFile') self.assertAllOsBehaviorsMatch('link_to_empty') def TBD_testHardLinkToEmptyFile(self): self._CreateTestFile('f', 'aFile') self._CreateTestFile('h', 'link_to_empty', 'aFile') self.assertAllOsBehaviorsMatch('link_to_empty') def testSymLinkToRealFile(self): self._CreateTestFile('f', 'aFile', 'some contents') self._CreateTestFile('l', 'link_to_file', 'aFile') self.assertAllOsBehaviorsMatch('link_to_file') def TBD_testHardLinkToRealFile(self): self._CreateTestFile('f', 'aFile', 'some contents') self._CreateTestFile('h', 'link_to_file', 'aFile') self.assertAllOsBehaviorsMatch('link_to_file') def testBrokenSymLink(self): self._CreateTestFile('l', 'broken_link', 'broken') self._CreateTestFile('l', 'loop', '/a/loop') self.assertAllOsBehaviorsMatch('broken_link') def testFileInAFolder(self): self._CreateTestFile('d', 'a') self._CreateTestFile('d', 'a/b') self._CreateTestFile('f', 'a/b/file', 'contents') self.assertAllOsBehaviorsMatch('a/b/file') def testAbsoluteSymLinkToFolder(self): self._CreateTestFile('d', 'a') self._CreateTestFile('d', 'a/b') self._CreateTestFile('f', 'a/b/file', 'contents') self._CreateTestFile('l', 'a/link', '/a/b') self.assertAllOsBehaviorsMatch('a/link/file') def testLinkToFolderAfterChdir(self): self._CreateTestFile('d', 'a') self._CreateTestFile('d', 'a/b') self._CreateTestFile('f', 'a/b/file', 'contents') self._CreateTestFile('l', 'a/link', '/a/b') real_dir, fake_dir = self._Paths('a/b') os.chdir(real_dir) self.fake_os.chdir(fake_dir) self.assertAllOsBehaviorsMatch('file') def testRelativeSymLinkToFolder(self): self._CreateTestFile('d', 'a') self._CreateTestFile('d', 'a/b') self._CreateTestFile('f', 'a/b/file', 'contents') self._CreateTestFile('l', 'a/link', 'b') self.assertAllOsBehaviorsMatch('a/link/file') def testSymLinkToParent(self): # Soft links on HFS+ / OS X behave differently. if os.uname()[0] != 'Darwin': self._CreateTestFile('d', 'a') self._CreateTestFile('d', 'a/b') self._CreateTestFile('l', 'a/b/c', '..') self.assertAllOsBehaviorsMatch('a/b/c') def testPathThroughSymLinkToParent(self): self._CreateTestFile('d', 'a') self._CreateTestFile('f', 'a/target', 'contents') self._CreateTestFile('d', 'a/b') self._CreateTestFile('l', 'a/b/c', '..') self.assertAllOsBehaviorsMatch('a/b/c/target') def testSymLinkToSiblingDirectory(self): self._CreateTestFile('d', 'a') self._CreateTestFile('d', 'a/b') self._CreateTestFile('d', 'a/sibling_of_b') self._CreateTestFile('f', 'a/sibling_of_b/target', 'contents') self._CreateTestFile('l', 'a/b/c', '../sibling_of_b') self.assertAllOsBehaviorsMatch('a/b/c/target') def testSymLinkToSiblingDirectoryNonExistantFile(self): self._CreateTestFile('d', 'a') self._CreateTestFile('d', 'a/b') self._CreateTestFile('d', 'a/sibling_of_b') self._CreateTestFile('f', 'a/sibling_of_b/target', 'contents') self._CreateTestFile('l', 'a/b/c', '../sibling_of_b') self.assertAllOsBehaviorsMatch('a/b/c/file_does_not_exist') def testBrokenSymLinkToSiblingDirectory(self): self._CreateTestFile('d', 'a') self._CreateTestFile('d', 'a/b') self._CreateTestFile('d', 'a/sibling_of_b') self._CreateTestFile('f', 'a/sibling_of_b/target', 'contents') self._CreateTestFile('l', 'a/b/c', '../broken_sibling_of_b') self.assertAllOsBehaviorsMatch('a/b/c/target') def testRelativePath(self): self._CreateTestFile('d', 'a') self._CreateTestFile('d', 'a/b') self._CreateTestFile('d', 'a/sibling_of_b') self._CreateTestFile('f', 'a/sibling_of_b/target', 'contents') self.assertAllOsBehaviorsMatch('a/b/../sibling_of_b/target') def testBrokenRelativePath(self): self._CreateTestFile('d', 'a') self._CreateTestFile('d', 'a/b') self._CreateTestFile('d', 'a/sibling_of_b') self._CreateTestFile('f', 'a/sibling_of_b/target', 'contents') self.assertAllOsBehaviorsMatch('a/b/../broken/target') def testBadRelativePath(self): self._CreateTestFile('d', 'a') self._CreateTestFile('f', 'a/target', 'contents') self._CreateTestFile('d', 'a/b') self._CreateTestFile('d', 'a/sibling_of_b') self._CreateTestFile('f', 'a/sibling_of_b/target', 'contents') self.assertAllOsBehaviorsMatch('a/b/../broken/../target') def testGetmtimeNonexistantPath(self): self.assertOsPathMethodBehaviorMatches('getmtime', 'no/such/path') def testBuiltinOpenModes(self): self._CreateTestFile('f', 'read', 'some contents') self._CreateTestFile('f', 'write', 'some contents') self._CreateTestFile('f', 'append', 'some contents') self.assertFileHandleBehaviorsMatch('read', 'r', 'other contents') self.assertFileHandleBehaviorsMatch('write', 'w', 'other contents') self.assertFileHandleBehaviorsMatch('append', 'a', 'other contents') self._CreateTestFile('f', 'readplus', 'some contents') self._CreateTestFile('f', 'writeplus', 'some contents') self.assertFileHandleBehaviorsMatch('readplus', 'r+', 'other contents') self.assertFileHandleBehaviorsMatch('writeplus', 'w+', 'other contents') def main(unused_argv): unittest.main() if __name__ == '__main__': unittest.main()
{ "content_hash": "ff411bd7cb82761048bdf4981cd538d1", "timestamp": "", "source": "github", "line_count": 538, "max_line_length": 80, "avg_line_length": 38.888475836431226, "alnum_prop": 0.646257527960998, "repo_name": "rec/echomesh", "id": "993e0d3ae1486a5eac90e16c069fc26b056c14b3", "size": "21543", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "code/python/external/fake/fake_filesystem_vs_real_test.py", "mode": "33261", "license": "mit", "language": [ { "name": "C", "bytes": "5723427" }, { "name": "C++", "bytes": "10326191" }, { "name": "CSS", "bytes": "2048" }, { "name": "HTML", "bytes": "22355" }, { "name": "Java", "bytes": "25383" }, { "name": "M4", "bytes": "32321" }, { "name": "Makefile", "bytes": "215120" }, { "name": "Objective-C", "bytes": "93003" }, { "name": "Objective-C++", "bytes": "394207" }, { "name": "Python", "bytes": "1117634" }, { "name": "Shell", "bytes": "735767" } ], "symlink_target": "" }
"""The volume type access extension.""" from oslo_utils import uuidutils import six import webob from cinder.api import extensions from cinder.api.openstack import wsgi from cinder.api import xmlutil from cinder import exception from cinder.i18n import _ from cinder.volume import volume_types soft_authorize = extensions.soft_extension_authorizer('volume', 'volume_type_access') authorize = extensions.extension_authorizer('volume', 'volume_type_access') def make_volume_type(elem): elem.set('{%s}is_public' % Volume_type_access.namespace, '%s:is_public' % Volume_type_access.alias) def make_volume_type_access(elem): elem.set('volume_type_id') elem.set('project_id') class VolumeTypeTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('volume_type', selector='volume_type') make_volume_type(root) alias = Volume_type_access.alias namespace = Volume_type_access.namespace return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace}) class VolumeTypesTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('volume_types') elem = xmlutil.SubTemplateElement( root, 'volume_type', selector='volume_types') make_volume_type(elem) alias = Volume_type_access.alias namespace = Volume_type_access.namespace return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace}) class VolumeTypeAccessTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('volume_type_access') elem = xmlutil.SubTemplateElement(root, 'access', selector='volume_type_access') make_volume_type_access(elem) return xmlutil.MasterTemplate(root, 1) def _marshall_volume_type_access(vol_type): rval = [] for project_id in vol_type['projects']: rval.append({'volume_type_id': vol_type['id'], 'project_id': project_id}) return {'volume_type_access': rval} class VolumeTypeAccessController(object): """The volume type access API controller for the OpenStack API.""" def __init__(self): super(VolumeTypeAccessController, self).__init__() @wsgi.serializers(xml=VolumeTypeAccessTemplate) def index(self, req, type_id): context = req.environ['cinder.context'] authorize(context) try: vol_type = volume_types.get_volume_type( context, type_id, expected_fields=['projects']) except exception.VolumeTypeNotFound: explanation = _("Volume type not found.") raise webob.exc.HTTPNotFound(explanation=explanation) if vol_type['is_public']: expl = _("Access list not available for public volume types.") raise webob.exc.HTTPNotFound(explanation=expl) return _marshall_volume_type_access(vol_type) class VolumeTypeActionController(wsgi.Controller): """The volume type access API controller for the OpenStack API.""" def _check_body(self, body, action_name): if not self.is_valid_body(body, action_name): raise webob.exc.HTTPBadRequest() access = body[action_name] project = access.get('project') if not uuidutils.is_uuid_like(project): msg = _("Bad project format: " "project is not in proper format (%s)") % project raise webob.exc.HTTPBadRequest(explanation=msg) def _extend_vol_type(self, vol_type_rval, vol_type_ref): if vol_type_ref: key = "%s:is_public" % (Volume_type_access.alias) vol_type_rval[key] = vol_type_ref.get('is_public', True) @wsgi.extends def show(self, req, resp_obj, id): context = req.environ['cinder.context'] if soft_authorize(context): # Attach our slave template to the response object resp_obj.attach(xml=VolumeTypeTemplate()) vol_type = req.cached_resource_by_id(id, name='types') self._extend_vol_type(resp_obj.obj['volume_type'], vol_type) @wsgi.extends def index(self, req, resp_obj): context = req.environ['cinder.context'] if soft_authorize(context): # Attach our slave template to the response object resp_obj.attach(xml=VolumeTypesTemplate()) for vol_type_rval in list(resp_obj.obj['volume_types']): type_id = vol_type_rval['id'] vol_type = req.cached_resource_by_id(type_id, name='types') self._extend_vol_type(vol_type_rval, vol_type) @wsgi.extends def detail(self, req, resp_obj): context = req.environ['cinder.context'] if soft_authorize(context): # Attach our slave template to the response object resp_obj.attach(xml=VolumeTypesTemplate()) for vol_type_rval in list(resp_obj.obj['volume_types']): type_id = vol_type_rval['id'] vol_type = req.cached_resource_by_id(type_id, name='types') self._extend_vol_type(vol_type_rval, vol_type) @wsgi.extends(action='create') def create(self, req, body, resp_obj): context = req.environ['cinder.context'] if soft_authorize(context): # Attach our slave template to the response object resp_obj.attach(xml=VolumeTypeTemplate()) type_id = resp_obj.obj['volume_type']['id'] vol_type = req.cached_resource_by_id(type_id, name='types') self._extend_vol_type(resp_obj.obj['volume_type'], vol_type) @wsgi.action('addProjectAccess') def _addProjectAccess(self, req, id, body): context = req.environ['cinder.context'] authorize(context, action="addProjectAccess") self._check_body(body, 'addProjectAccess') project = body['addProjectAccess']['project'] try: volume_types.add_volume_type_access(context, id, project) except exception.VolumeTypeAccessExists as err: raise webob.exc.HTTPConflict(explanation=six.text_type(err)) except exception.VolumeTypeNotFound as err: raise webob.exc.HTTPNotFound(explanation=six.text_type(err)) return webob.Response(status_int=202) @wsgi.action('removeProjectAccess') def _removeProjectAccess(self, req, id, body): context = req.environ['cinder.context'] authorize(context, action="removeProjectAccess") self._check_body(body, 'removeProjectAccess') project = body['removeProjectAccess']['project'] try: volume_types.remove_volume_type_access(context, id, project) except (exception.VolumeTypeNotFound, exception.VolumeTypeAccessNotFound) as err: raise webob.exc.HTTPNotFound(explanation=six.text_type(err)) return webob.Response(status_int=202) class Volume_type_access(extensions.ExtensionDescriptor): """Volume type access support.""" name = "VolumeTypeAccess" alias = "os-volume-type-access" namespace = ("http://docs.openstack.org/volume/" "ext/os-volume-type-access/api/v1") updated = "2014-06-26T00:00:00Z" def get_resources(self): resources = [] res = extensions.ResourceExtension( Volume_type_access.alias, VolumeTypeAccessController(), parent=dict(member_name='type', collection_name='types')) resources.append(res) return resources def get_controller_extensions(self): controller = VolumeTypeActionController() extension = extensions.ControllerExtension(self, 'types', controller) return [extension]
{ "content_hash": "444d1e5cdfa9361400e0af7928a193c2", "timestamp": "", "source": "github", "line_count": 203, "max_line_length": 77, "avg_line_length": 38.49753694581281, "alnum_prop": 0.6381317978246961, "repo_name": "abusse/cinder", "id": "b91672f396a22d86b5e301b544fa9ea27c986783", "size": "8390", "binary": false, "copies": "10", "ref": "refs/heads/master", "path": "cinder/api/contrib/volume_type_access.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "ApacheConf", "bytes": "3555" }, { "name": "Python", "bytes": "10554152" }, { "name": "Shell", "bytes": "8415" } ], "symlink_target": "" }
import enum import logging import phonenumbers from flask_wtf import FlaskForm from phonenumbers.phonenumberutil import NumberParseException from structlog import wrap_logger from wtforms import ( HiddenField, PasswordField, RadioField, StringField, SubmitField, TextAreaField, ) from wtforms.validators import Email, EqualTo, Length, ValidationError from frontstage import app from frontstage.common.validators import DataRequired, InputRequired from frontstage.form import Form from frontstage.i18n.translations import Translate translations = Translate("form_messages") _ = translations.translate logger = wrap_logger(logging.getLogger(__name__)) class EnrolmentCodeForm(FlaskForm): enrolment_code = StringField( _("Enrolment Code"), [InputRequired(), Length(min=12, max=12, message=_("Re-enter the code and " "try again"))] ) class RegistrationForm(FlaskForm): first_name = StringField( _("First name"), validators=[ InputRequired(_("First name is required")), Length(max=254, message=_("Your first name must be less than 254 " "characters")), ], ) last_name = StringField( _("Last name"), validators=[ InputRequired(_("Last name is required")), Length(max=254, message=_("Your last name must be less than 254 characters")), ], ) email_address = StringField( _("Enter your email address"), validators=[ InputRequired(_("Email address is required")), Email(message=_("Invalid email address")), Length(max=254, message=_("Your email must be less than 254 characters")), EqualTo("email_address_confirm", message=app.config["EMAIL_MATCH_ERROR_TEXT"]), ], ) email_address_confirm = StringField(_("Re-type your email address")) password = PasswordField( _("Create a password"), validators=[ DataRequired(_("Password is required")), EqualTo("password_confirm", message=app.config["PASSWORD_MATCH_ERROR_TEXT"]), Length( min=app.config["PASSWORD_MIN_LENGTH"], max=app.config["PASSWORD_MAX_LENGTH"], message=app.config["PASSWORD_CRITERIA_ERROR_TEXT"], ), ], ) password_confirm = PasswordField(_("Re-type your password")) phone_number = StringField( _("Telephone number"), validators=[ DataRequired(_("Phone number is required")), Length(min=9, max=15, message=_("This should be a valid phone number between 9 and 15 " "digits")), ], default=None, ) enrolment_code = HiddenField("Enrolment Code") @staticmethod def validate_phone_number(form, field): try: logger.info("Checking this is a valid phone number") input_number = phonenumbers.parse( field.data, "GB" ) # Default region GB (44), unless country code added by user if not phonenumbers.is_possible_number(input_number): raise ValidationError(_("This should be a valid telephone number between 9 and 15 digits")) if not phonenumbers.is_valid_number(input_number): raise ValidationError(_("Please use a valid telephone number e.g. 01632 496 0018.")) except NumberParseException: logger.info("There is a number parse exception in the phonenumber field") raise ValidationError(_("This should be a valid telephone number e.g. 01632 496 0018. ")) @staticmethod def validate_email_address(_, field): email = field.data return _validate_email_address(email) @staticmethod def validate_password(_, field): password = field.data if ( password.isalnum() or not any(char.isupper() for char in password) or not any(char.isdigit() for char in password) ): raise ValidationError(app.config["PASSWORD_CRITERIA_ERROR_TEXT"]) class LoginForm(FlaskForm): username = StringField( _("Email Address"), [InputRequired(_("Email Address is required")), Email(_("Invalid email address"))] ) password = PasswordField(_("Password"), [InputRequired(_("Password is required"))]) @staticmethod def validate_username(form, field): email = field.data return _validate_email_address(email) def _validate_email_address(email): """ Validates an email address, using regex to conform to GDS standards. :param field: Field containing email address for validation. """ if "@" not in email: logger.info("No @ in email address") raise ValidationError(_("Invalid email address")) local_part, domain_part = email.rsplit("@", 1) logger.info("Checking if the email address contains a space or quotes in the local part") # this extends the email validator to check if there is whitespace in the email or quotes surrounding local part if " " in email: logger.info("Space found in email address") raise ValidationError(_("Invalid email address")) if local_part.startswith('"') and local_part.endswith('"'): logger.info("Quotes found in local part of email") raise ValidationError(_("Invalid email address")) class ForgotPasswordForm(FlaskForm): email_address = StringField( _("Enter your email address"), validators=[ InputRequired(_("Email address is required")), Email(message=_("Invalid email address")), Length(max=254, message=_("Your email must be less than 254 characters")), ], ) @staticmethod def validate_email_address(_, field): email = field.data return _validate_email_address(email) class ResetPasswordForm(FlaskForm): password = PasswordField( _("New password"), validators=[ DataRequired(_("Password is required")), EqualTo("password_confirm", message=app.config["PASSWORD_MATCH_ERROR_TEXT"]), Length( min=app.config["PASSWORD_MIN_LENGTH"], max=app.config["PASSWORD_MAX_LENGTH"], message=app.config["PASSWORD_CRITERIA_ERROR_TEXT"], ), ], ) password_confirm = PasswordField(_("Re-type new password")) @staticmethod def validate_password(form, field): password = field.data if ( password.isalnum() or not any(char.isupper() for char in password) or not any(char.isdigit() for char in password) ): raise ValidationError(app.config["PASSWORD_CRITERIA_ERROR_TEXT"]) class SecureMessagingForm(FlaskForm): send = SubmitField(label=_("Send"), id="send-message-btn") subject = StringField(_("Subject")) body = TextAreaField( _("Message"), validators=[ DataRequired(_("Message is required")), Length(max=50000, message=_("Message must be less than 50000 " "characters")), ], ) msg_id = HiddenField("Message id") thread_id = HiddenField("Thread id") hidden_subject = HiddenField("Hidden Subject") class RespondentStatus(enum.IntEnum): CREATED = 0 ACTIVE = 1 SUSPENDED = 2 class OptionsForm(Form): option = RadioField( "Label", choices=[ ("value", "contact_details"), ("value", "change_password"), ("value", "share_surveys"), ("value", "transfer_surveys"), ], ) class HelpOptionsForm(Form): option = RadioField( "Label", choices=[("value", "help-completing-this-survey"), ("value", "info-about-this-survey")] ) class AccountSurveySelectBusinessForm(Form): option = RadioField("Label") class AccountSurveySelectSurveyForm(Form): option = RadioField("Label") class HelpInfoAboutThisSurveyForm(Form): option = RadioField( "Label", choices=[ ("value", "exemption-completing-survey"), ("value", "why-selected"), ("value", "time-to-complete"), ("value", "how-long-selected-for"), ("value", "penalties"), ("value", "info-something-else"), ], ) class HelpCompletingThisSurveyForm(Form): option = RadioField( "Label", choices=[ ("value", "answer-survey-question"), ("value", "do-not-have-specific-figures"), ("value", "unable-to-return-by-deadline"), ("value", "completing-this-survey-something-else"), ], ) class HelpInfoAboutTheONSForm(Form): option = RadioField( "Label", choices=[ ("value", "who-is-the-ons"), ("value", "how-safe-is-my-data"), ("value", "info-ons-something-else"), ], ) class HelpSomethingElseForm(Form): option = RadioField( "Label", choices=[ ("value", "my-survey-is-not-listed"), ("value", "something-else"), ], ) class ContactDetailsChangeForm(FlaskForm): first_name = StringField( _("First name"), validators=[ DataRequired(_("First name is required")), Length(max=254, message=_("Your first name must be less than 254 " "characters")), ], ) last_name = StringField( _("Last name"), validators=[ DataRequired(_("Last name is required")), Length(max=254, message=_("Your last name must be less than 254 characters")), ], ) phone_number = StringField( _("Telephone number"), validators=[ DataRequired(_("Phone number is required")), Length(min=9, max=15, message=_("This should be a valid phone number between 9 and 15 " "digits")), ], default=None, ) email_address = StringField( _("Email address"), validators=[ DataRequired(_("Email address is required")), Email(message=_("Invalid email address")), Length(max=254, message=_("Your email must have fewer than 254 characters")), ], ) class ConfirmEmailChangeForm(FlaskForm): email_address = HiddenField("Email address") class ChangePasswordFrom(FlaskForm): password = PasswordField(_("type your password"), validators=[DataRequired(_("Your current password is required"))]) new_password = PasswordField( _("Create a new password"), validators=[ DataRequired(_("Your new password is required")), EqualTo("new_password_confirm", message=app.config["PASSWORD_MATCH_ERROR_TEXT"]), Length( min=app.config["PASSWORD_MIN_LENGTH"], max=app.config["PASSWORD_MAX_LENGTH"], message=app.config["PASSWORD_CRITERIA_ERROR_TEXT"], ), ], ) new_password_confirm = PasswordField(_("Re-type your new password")) @staticmethod def validate_new_password(form, field): new_password = field.data if ( new_password.isalnum() or not any(char.isupper() for char in new_password) or not any(char.isdigit() for char in new_password) ): raise ValidationError(app.config["PASSWORD_CRITERIA_ERROR_TEXT"]) class AccountSurveyShareRecipientEmailForm(FlaskForm): email_address = StringField( _("Enter recipient email address"), validators=[ InputRequired(_("You need to enter an email address")), Email(message=_("Invalid email address")), Length(max=254, message=_("Your email must be less than 254 characters")), ], ) @staticmethod def validate_email_address(_, field): email = field.data return _validate_email_address(email) class HelpForm(Form): option = RadioField("Label", choices=[("value", "info-ons"), ("value", "password"), ("value", "something-else")]) class HelpInfoOnsForm(Form): option = RadioField("Label", choices=[("value", "ons"), ("value", "data"), ("value", "info-something-else")]) class HelpPasswordForm(Form): option = RadioField( "Label", choices=[ ("value", "reset-email"), ("value", "password-not-accept"), ("value", "reset-password"), ("value", "password-something-else"), ], ) class PendingSurveyRegistrationForm(FlaskForm): first_name = StringField( _("First name"), validators=[ InputRequired(_("First name is required")), Length(max=254, message=_("Your first name must be less than 254 " "characters")), ], ) last_name = StringField( _("Last name"), validators=[ InputRequired(_("Last name is required")), Length(max=254, message=_("Your last name must be less than 254 characters")), ], ) email = HiddenField("Enter your email address") password = PasswordField( _("Create a password"), validators=[ DataRequired(_("Password is required")), EqualTo("password_confirm", message=app.config["PASSWORD_MATCH_ERROR_TEXT"]), Length( min=app.config["PASSWORD_MIN_LENGTH"], max=app.config["PASSWORD_MAX_LENGTH"], message=app.config["PASSWORD_CRITERIA_ERROR_TEXT"], ), ], ) password_confirm = PasswordField(_("Re-type your password")) phone_number = StringField( _("Telephone number"), validators=[ DataRequired(_("Phone number is required")), Length(min=9, max=15, message=_("This should be a valid phone number between 9 and 15 " "digits")), ], default=None, ) batch_no = HiddenField("Batch number") @staticmethod def validate_phone_number(form, field): try: logger.info("Checking this is a valid phone number") input_number = phonenumbers.parse( field.data, "GB" ) # Default region GB (44), unless country code added by user if not phonenumbers.is_possible_number(input_number): raise ValidationError(_("This should be a valid telephone number between 9 and 15 digits")) if not phonenumbers.is_valid_number(input_number): raise ValidationError(_("Please use a valid telephone number e.g. 01632 496 0018.")) except NumberParseException: logger.info("There is a number parse exception in the phonenumber field") raise ValidationError(_("This should be a valid telephone number e.g. 01632 496 0018. ")) @staticmethod def validate_password(_, field): password = field.data if ( password.isalnum() or not any(char.isupper() for char in password) or not any(char.isdigit() for char in password) ): raise ValidationError(app.config["PASSWORD_CRITERIA_ERROR_TEXT"])
{ "content_hash": "1b919cd326d49a41a30e110a15869a01", "timestamp": "", "source": "github", "line_count": 456, "max_line_length": 120, "avg_line_length": 33.19298245614035, "alnum_prop": 0.5985068710359408, "repo_name": "ONSdigital/ras-frontstage", "id": "bbbf255d9bd23778a2c8d1a238ff24e793b83779", "size": "15136", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "frontstage/models.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "592" }, { "name": "Dockerfile", "bytes": "621" }, { "name": "HTML", "bytes": "269090" }, { "name": "Makefile", "bytes": "824" }, { "name": "Python", "bytes": "705890" }, { "name": "Shell", "bytes": "2874" } ], "symlink_target": "" }
import re import subprocess from setup_product.setup_cleanup import ( create_bq_dataset, create_bq_table, delete_bq_table, upload_data_to_bq_table, ) def test_import_products_bq(table_id_prefix): dataset = "products" valid_products_table = f"{table_id_prefix}products" product_schema = "../resources/product_schema.json" valid_products_source_file = "../resources/products.json" create_bq_dataset(dataset) create_bq_table(dataset, valid_products_table, product_schema) upload_data_to_bq_table( dataset, valid_products_table, valid_products_source_file, product_schema ) output = str( subprocess.check_output( f"python import_products_big_query_table.py {dataset} {valid_products_table}", shell=True, ) ) delete_bq_table(dataset, valid_products_table) assert re.match(".*import products from big query table request.*", output) assert re.match(".*the operation was started.*", output) assert re.match( ".*projects/.*/locations/global/catalogs/default_catalog/branches/0/operations/import-products.*", output, ) assert re.match(".*number of successfully imported products.*?316.*", output) assert re.match(".*number of failures during the importing.*?0.*", output)
{ "content_hash": "083560afbebe4b34aabad2afd580b422", "timestamp": "", "source": "github", "line_count": 41, "max_line_length": 106, "avg_line_length": 32.170731707317074, "alnum_prop": 0.6747536012130402, "repo_name": "googleapis/python-retail", "id": "a388365be30c8b189eeea68e72d5d938a59b0f23", "size": "1917", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "samples/interactive-tutorials/product/import_products_bq_test.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "2050" }, { "name": "Python", "bytes": "7420556" }, { "name": "Shell", "bytes": "30660" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.db import models, migrations import datetime import django.utils.timezone from django.conf import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('wishlist_app', '0022_auto_20151001_1829'), ] operations = [ migrations.AddField( model_name='invite', name='created_date', field=models.DateField(default=django.utils.timezone.now), ), migrations.AddField( model_name='invite', name='inviter', field=models.ForeignKey(related_name='invited_by', default=1, to=settings.AUTH_USER_MODEL), preserve_default=False, ), migrations.AddField( model_name='invite', name='used', field=models.BooleanField(default=False), ), migrations.AlterField( model_name='role', name='created_date', field=models.DateTimeField(default=datetime.datetime.now), ), migrations.AlterField( model_name='role', name='modified_date', field=models.DateTimeField(default=datetime.datetime.now), ), ]
{ "content_hash": "69e36a9aa37a5930518b8d60f947212b", "timestamp": "", "source": "github", "line_count": 43, "max_line_length": 103, "avg_line_length": 30.232558139534884, "alnum_prop": 0.5953846153846154, "repo_name": "pclements12/PyWishlist", "id": "220ea56be29da0ca6f2f837de0cad595cb1dc36d", "size": "1324", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "wishlist_app/migrations/0023_auto_20151021_1654.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "953" }, { "name": "HTML", "bytes": "65001" }, { "name": "JavaScript", "bytes": "55206" }, { "name": "Python", "bytes": "110685" } ], "symlink_target": "" }
from nose.tools import * # flake8: noqa from urlparse import urlparse from api.base.settings.defaults import API_BASE from tests.base import ApiTestCase from tests.factories import ( ProjectFactory, RegistrationFactory, AuthUserFactory ) node_url_for = lambda n_id: '/{}nodes/{}/'.format(API_BASE, n_id) class TestNodeRegistrationList(ApiTestCase): def setUp(self): super(TestNodeRegistrationList, self).setUp() self.user = AuthUserFactory() self.project = ProjectFactory(is_public=False, creator=self.user) self.registration_project = RegistrationFactory(creator=self.user, project=self.project) self.project.save() self.private_url = '/{}nodes/{}/registrations/'.format(API_BASE, self.project._id) self.public_project = ProjectFactory(is_public=True, creator=self.user) self.public_registration_project = RegistrationFactory(creator=self.user, project=self.public_project) self.public_project.save() self.public_url = '/{}nodes/{}/registrations/'.format(API_BASE, self.public_project._id) self.user_two = AuthUserFactory() def test_return_public_registrations_logged_out(self): res = self.app.get(self.public_url) assert_equal(res.status_code, 200) assert_equal(res.content_type, 'application/vnd.api+json') assert_equal(res.json['data'][0]['attributes']['registration'], True) url = res.json['data'][0]['relationships']['registered_from']['links']['related']['href'] assert_equal(urlparse(url).path, '/{}nodes/{}/'.format(API_BASE, self.public_project._id)) assert_equal(res.json['data'][0]['type'], 'registrations') def test_return_public_registrations_logged_in(self): res = self.app.get(self.public_url, auth=self.user.auth) assert_equal(res.status_code, 200) assert_equal(res.json['data'][0]['attributes']['registration'], True) url = res.json['data'][0]['relationships']['registered_from']['links']['related']['href'] assert_equal(urlparse(url).path, '/{}nodes/{}/'.format(API_BASE, self.public_project._id)) assert_equal(res.content_type, 'application/vnd.api+json') assert_equal(res.json['data'][0]['type'], 'registrations') def test_return_private_registrations_logged_out(self): res = self.app.get(self.private_url, expect_errors=True) assert_equal(res.status_code, 401) assert 'detail' in res.json['errors'][0] def test_return_private_registrations_logged_in_contributor(self): res = self.app.get(self.private_url, auth=self.user.auth) assert_equal(res.status_code, 200) assert_equal(res.json['data'][0]['attributes']['registration'], True) url = res.json['data'][0]['relationships']['registered_from']['links']['related']['href'] assert_equal(urlparse(url).path, '/{}nodes/{}/'.format(API_BASE, self.project._id)) assert_equal(res.content_type, 'application/vnd.api+json') assert_equal(res.json['data'][0]['type'], 'registrations') def test_return_private_registrations_logged_in_non_contributor(self): res = self.app.get(self.private_url, auth=self.user_two.auth, expect_errors=True) assert_equal(res.status_code, 403) assert 'detail' in res.json['errors'][0]
{ "content_hash": "71392b3df8ffbcc9714812b92923d39f", "timestamp": "", "source": "github", "line_count": 68, "max_line_length": 110, "avg_line_length": 48.955882352941174, "alnum_prop": 0.6683688795434064, "repo_name": "haoyuchen1992/osf.io", "id": "13ea69097cd3e4f62a365e3fc3f038908d67a219", "size": "3353", "binary": false, "copies": "2", "ref": "refs/heads/develop", "path": "api_tests/nodes/views/test_node_registrations_list.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "119433" }, { "name": "HTML", "bytes": "34310" }, { "name": "JavaScript", "bytes": "1214045" }, { "name": "Mako", "bytes": "542037" }, { "name": "Python", "bytes": "3730523" }, { "name": "Shell", "bytes": "1927" } ], "symlink_target": "" }
""" Turn a catalog of photometry from PS1 into an HDF5 file """ from __future__ import division, print_function __author__ = "adrn <adrn@astro.columbia.edu>" # Third-party from astropy import log as logger from astropy.io import ascii import h5py import numpy.lib.recfunctions as nprf # Project from globber.core import ps1_isoc_to_XCov # HACK: name should be customizable from globber.ngc5897 import mixing_matrix def main(iso_filename, XCov_filename, interpolate=True, overwrite=False): # FOR PARSEC ISOCHRONE (reversing it for interpolation) iso = ascii.read(iso_filename, header_start=13)[:114][::-1] iso = nprf.stack_arrays((iso[:25], iso[27:]),usemask=False) # because of stupid red clump turnaround # FOR DARTMOUTH ISOCHRONE (reversing it for interpolation) # iso = ascii.read(iso_filename, header_start=8)[::-1] # output hdf5 file with h5py.File(XCov_filename, mode='r+') as f: # feature and covariance matrices for all stars X = ps1_isoc_to_XCov(iso, W=mixing_matrix, interpolate=interpolate) if 'isochrone' in f and overwrite: f.__delitem__('isochrone') logger.debug("Overwriting isochrone data") if 'isochrone' not in f: g = f.create_group('isochrone') else: g = f['isochrone'] if 'X' not in f['isochrone']: g.create_dataset('X', X.shape, dtype='f', data=X) f.flush() logger.debug("Saved isochrone to {}".format(XCov_filename)) if __name__ == "__main__": from argparse import ArgumentParser import logging # Define parser object parser = ArgumentParser(description="") parser.add_argument("-v", "--verbose", action="store_true", dest="verbose", default=False, help="Be chatty! (default = False)") parser.add_argument("-q", "--quiet", action="store_true", dest="quiet", default=False, help="Be quiet! (default = False)") parser.add_argument("-o", "--overwrite", action="store_true", dest="overwrite", default=False, help="DESTROY.") parser.add_argument("-i", "--iso-file", dest="iso_filename", required=True, type=str, help="Path to isochrone file.") parser.add_argument("-x", "--xcov-file", dest="XCov_filename", required=True, type=str, help="Path to XCov file.") args = parser.parse_args() # Set logger level based on verbose flags if args.verbose: logger.setLevel(logging.DEBUG) elif args.quiet: logger.setLevel(logging.ERROR) else: logger.setLevel(logging.INFO) main(iso_filename=args.iso_filename, XCov_filename=args.XCov_filename, overwrite=args.overwrite)
{ "content_hash": "ce7346d8a6b823d397df25708361d490", "timestamp": "", "source": "github", "line_count": 79, "max_line_length": 104, "avg_line_length": 35.037974683544306, "alnum_prop": 0.6296965317919075, "repo_name": "adrn/globber", "id": "970ae8728ededadfc543da62158478a5106a726a", "size": "2768", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "scripts/isochrone-to-xcov.py", "mode": "33188", "license": "mit", "language": [ { "name": "Jupyter Notebook", "bytes": "194975" }, { "name": "Python", "bytes": "99814" }, { "name": "Shell", "bytes": "2459" } ], "symlink_target": "" }
from fake_switches.command_processing.base_command_processor import BaseCommandProcessor class ConfigVrfCommandProcessor(BaseCommandProcessor): def init(self, switch_configuration, terminal_controller, logger, piping_processor, *args): super(ConfigVrfCommandProcessor, self).init(switch_configuration, terminal_controller, logger, piping_processor) self.vrf = args[0] def get_prompt(self): return "SSH@%s(config-vrf-%s)#" % (self.switch_configuration.name, self.vrf.name) def do_exit(self): self.is_done = True
{ "content_hash": "b0e6a6b3bc55f14a66551d2440553b8b", "timestamp": "", "source": "github", "line_count": 13, "max_line_length": 120, "avg_line_length": 43, "alnum_prop": 0.7352415026833632, "repo_name": "internaphosting/fake-switches", "id": "a156f92c15fec395f2d9596e62dc9631ad41036a", "size": "1133", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "fake_switches/brocade/command_processor/config_vrf.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "587681" } ], "symlink_target": "" }
"""depotexample URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.11/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.conf.urls import url, include 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) """ from django.conf.urls import url from django.contrib import admin import views urlpatterns = [ url(r'^admin/', admin.site.urls), url(r'^$', views.index) ]
{ "content_hash": "984abbc1c1c1360a181e2051a98127dd", "timestamp": "", "source": "github", "line_count": 24, "max_line_length": 79, "avg_line_length": 33.791666666666664, "alnum_prop": 0.6966707768187423, "repo_name": "amol-/depot", "id": "043de6bb2e78afa32c9765d2aba7577464b56c24", "size": "811", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "examples/django/depotexample/urls.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "161671" } ], "symlink_target": "" }
import sys sys.path.append('.') sys.path.append('..') from tasks import execute_comment_task if __name__ == '__main__': execute_comment_task()
{ "content_hash": "8d35babf84adc98c3d0d08bd7d402571", "timestamp": "", "source": "github", "line_count": 10, "max_line_length": 38, "avg_line_length": 15, "alnum_prop": 0.64, "repo_name": "ResolveWang/WeiboSpider", "id": "9d51df8e38565afd5ed3cbbb99d88eeb7d87d9c8", "size": "150", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "first_task_execution/comment_first.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "2197379" }, { "name": "Shell", "bytes": "623" } ], "symlink_target": "" }
import demistomock as demisto from CommonServerPython import * from JSONFeedApiModule import * # noqa: E402 def main(): params = {k: v for k, v in demisto.params().items() if v is not None} params['feed_name_to_config'] = { 'CIDR': { 'url': 'https://api.fastly.com/public-ip-list', 'extractor': "addresses[].{ip:@}", 'indicator': 'ip', 'indicator_type': FeedIndicatorType.CIDR, }, 'IPv6CIDR': { 'url': 'https://api.fastly.com/public-ip-list', 'extractor': "ipv6_addresses[].{ip:@}", 'indicator': 'ip', 'indicator_type': FeedIndicatorType.IPv6CIDR, }, } feed_main(params, 'Fastly Feed', 'fastly') if __name__ in ('__main__', '__builtin__', 'builtins'): main()
{ "content_hash": "7cbf11703c26aef0909a8f5279fb3ccb", "timestamp": "", "source": "github", "line_count": 30, "max_line_length": 73, "avg_line_length": 27.2, "alnum_prop": 0.5367647058823529, "repo_name": "demisto/content", "id": "00c2b3d1d6ed51aa9cf6e526db59088e6254ee39", "size": "816", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "Packs/FeedFastly/Integrations/FeedFastly/FeedFastly.py", "mode": "33188", "license": "mit", "language": [ { "name": "Dockerfile", "bytes": "2146" }, { "name": "HTML", "bytes": "205901" }, { "name": "JavaScript", "bytes": "1584075" }, { "name": "PowerShell", "bytes": "442288" }, { "name": "Python", "bytes": "47881712" }, { "name": "Rich Text Format", "bytes": "480911" }, { "name": "Shell", "bytes": "108066" }, { "name": "YARA", "bytes": "1185" } ], "symlink_target": "" }
"""Git implementation of _version.py.""" import errno import os import re import subprocess import sys def get_keywords(): """Get the keywords needed to look up the version information.""" # these strings will be replaced by git during git-archive. # setup.py/versioneer.py will grep for the variable names, so they must # each be defined on a line of their own. _version.py will just call # get_keywords(). git_refnames = "$Format:%d$" git_full = "$Format:%H$" git_date = "$Format:%ci$" keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} return keywords class VersioneerConfig: """Container for Versioneer configuration parameters.""" def get_config(): """Create, populate and return the VersioneerConfig() object.""" # these strings are filled in when 'setup.py versioneer' creates # _version.py cfg = VersioneerConfig() cfg.VCS = "git" cfg.style = "pep440" cfg.tag_prefix = "" cfg.parentdir_prefix = "great_expectations-" cfg.versionfile_source = "great_expectations/_version.py" cfg.verbose = False return cfg class NotThisMethod(Exception): """Exception raised if a method is not valid for the current scenario.""" LONG_VERSION_PY = {} HANDLERS = {} def register_vcs_handler(vcs, method): # decorator """Decorator to mark a method as the handler for a particular VCS.""" def decorate(f): """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f return decorate def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): """Call the given command(s).""" assert isinstance(commands, list) for c in commands: try: dispcmd = str([c] + args) # remember shell=False, so use git.cmd on windows, not just git p = subprocess.Popen( [c] + args, cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None), ) break except OSError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print(f"unable to run {dispcmd}") print(e) return None, None else: if verbose: print(f"unable to find command, tried {commands}") return None, None stdout = p.communicate()[0].strip() if sys.version_info[0] >= 3: stdout = stdout.decode() if p.returncode != 0: if verbose: print(f"unable to run {dispcmd} (error)") print(f"stdout was {stdout}") return None, p.returncode return stdout, p.returncode def versions_from_parentdir(parentdir_prefix, root, verbose): """Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both the project name and a version string. We will also support searching up two directory levels for an appropriately named parent directory """ rootdirs = [] for i in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): return { "version": dirname[len(parentdir_prefix) :], "full-revisionid": None, "dirty": False, "error": None, "date": None, } else: rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: print( "Tried directories %s but none started with prefix %s" % (str(rootdirs), parentdir_prefix) ) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") @register_vcs_handler("git", "get_keywords") def git_get_keywords(versionfile_abs): """Extract version information from the given file.""" # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. keywords = {} try: f = open(versionfile_abs) for line in f.readlines(): if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["full"] = mo.group(1) if line.strip().startswith("git_date ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["date"] = mo.group(1) f.close() except OSError: pass return keywords @register_vcs_handler("git", "keywords") def git_versions_from_keywords(keywords, tag_prefix, verbose): """Get version information from git keywords.""" if not keywords: raise NotThisMethod("no keywords at all, weird") date = keywords.get("date") if date is not None: # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 # -like" string, which we must then edit to make compliant), because # it's been around since git-1.5.3, and it's too difficult to # discover which version we're using, or to work around using an # older one. date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = {r.strip() for r in refnames.strip("()").split(",")} # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = {r[len(TAG) :] for r in refs if r.startswith(TAG)} if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = {r for r in refs if re.search(r"\d", r)} if verbose: print(f"discarding '{','.join(refs - tags)}', no digits") if verbose: print(f"likely tags: {','.join(sorted(tags))}") for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix) :] if verbose: print(f"picking {r}") return { "version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None, "date": date, } # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") return { "version": "0+unknown", "full-revisionid": keywords["full"].strip(), "dirty": False, "error": "no suitable tags", "date": None, } @register_vcs_handler("git", "pieces_from_vcs") def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True) if rc != 0: if verbose: print(f"Directory {root} not under git control") raise NotThisMethod("'git rev-parse --git-dir' returned error") # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) describe_out, rc = run_command( GITS, [ "describe", "--tags", "--dirty", "--always", "--long", "--match", f"{tag_prefix}*", ], cwd=root, ) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out # look for -dirty suffix dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: git_describe = git_describe[: git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe) if not mo: # unparsable. Maybe git-describe is misbehaving? pieces["error"] = f"unable to parse git-describe output: '{describe_out}'" return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) pieces["error"] = "tag '{}' doesn't start with prefix '{}'".format( full_tag, tag_prefix, ) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix) :] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) # commit: short hex revision ID pieces["short"] = mo.group(3) else: # HEX: no tags pieces["closest-tag"] = None count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[ 0 ].strip() pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces def plus_or_dot(pieces): """Return a + if we don't already have one, else return a .""" if "+" in pieces.get("closest-tag", ""): return "." return "+" def render_pep440(pieces): """Build up version string, with post-release "local version identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty Exceptions: 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += plus_or_dot(pieces) rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_pre(pieces): """TAG[.post.devDISTANCE] -- No -dirty. Exceptions: 1: no tags. 0.post.devDISTANCE """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += ".post.dev%d" % pieces["distance"] else: # exception #1 rendered = "0.post.dev%d" % pieces["distance"] return rendered def render_pep440_post(pieces): """TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that .dev0 sorts backwards (a dirty tree will appear "older" than the corresponding clean one), but you shouldn't be releasing software with -dirty anyways. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += f"g{pieces['short']}" else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += f"+g{pieces['short']}" return rendered def render_pep440_old(pieces): """TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. Eexceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" return rendered def render_git_describe(pieces): """TAG[-DISTANCE-gHEX][-dirty]. Like 'git describe --tags --dirty --always'. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render_git_describe_long(pieces): """TAG-DISTANCE-gHEX[-dirty]. Like 'git describe --tags --dirty --always -long'. The distance/hash is unconditional. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render(pieces, style): """Render the given version pieces into the requested style.""" if pieces["error"]: return { "version": "unknown", "full-revisionid": pieces.get("long"), "dirty": None, "error": pieces["error"], "date": None, } if not style or style == "default": style = "pep440" # the default if style == "pep440": rendered = render_pep440(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": rendered = render_git_describe(pieces) elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: raise ValueError(f"unknown style '{style}'") return { "version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None, "date": pieces.get("date"), } def get_versions(): """Get version information or return default if unable to do so.""" # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have # __file__, we can work backwards from there to the root. Some # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which # case we can only use expanded keywords. cfg = get_config() verbose = cfg.verbose try: return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) except NotThisMethod: pass try: root = os.path.realpath(__file__) # versionfile_source is the relative path from the top of the source # tree (where the .git directory might live) to this file. Invert # this to find the root from __file__. for _ in cfg.versionfile_source.split("/"): root = os.path.dirname(root) except NameError: return { "version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to find root of source tree", "date": None, } try: pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) return render(pieces, cfg.style) except NotThisMethod: pass try: if cfg.parentdir_prefix: return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) except NotThisMethod: pass return { "version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version", "date": None, }
{ "content_hash": "e4b02b74bb21f976a418584a76d5ab63", "timestamp": "", "source": "github", "line_count": 547, "max_line_length": 87, "avg_line_length": 32.80438756855576, "alnum_prop": 0.5701627284886313, "repo_name": "great-expectations/great_expectations", "id": "330c3b32c15e08f092c687cdb550cfbe2e010def", "size": "18418", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "great_expectations/_version.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "23771" }, { "name": "Dockerfile", "bytes": "2388" }, { "name": "HTML", "bytes": "27311" }, { "name": "JavaScript", "bytes": "45960" }, { "name": "Jinja", "bytes": "66650" }, { "name": "Jupyter Notebook", "bytes": "816323" }, { "name": "Lua", "bytes": "3489" }, { "name": "Makefile", "bytes": "657" }, { "name": "Python", "bytes": "15728777" }, { "name": "Shell", "bytes": "2930" } ], "symlink_target": "" }
from object_recognition_msgs.msg import ObjectRecognitionAction, ObjectRecognitionGoal import actionlib import rospy import unittest def on_result(status, result): rospy.loginfo('Received result from ORK.') class TestActionlib(unittest.TestCase): def test_actionlib(self): rospy.init_node('ork_client') client = actionlib.SimpleActionClient('recognize_objects', ObjectRecognitionAction) client.wait_for_server() start = rospy.Time.now() # for checking the round trip time. goal = ObjectRecognitionGoal() # Sample region of interest for object detection (disabled by default) # goal.use_roi = True # goal.filter_limits = [-0.4, 0.4, -1.0, 0.2, 0.01, 1.5] client.send_goal(goal, done_cb=on_result) client.wait_for_result() # wait indefinitely for a result # print out the round trip time. rospy.loginfo('Time for 1 detection: %s', (rospy.Time.now() - start).to_sec()) if __name__ == '__main__': import rosunit rosunit.unitrun('ecto_object_recognition_ros', 'test_actionlib', TestActionlib)
{ "content_hash": "06d31f087bfeb5d192eff694252419b1", "timestamp": "", "source": "github", "line_count": 34, "max_line_length": 91, "avg_line_length": 32.73529411764706, "alnum_prop": 0.6738544474393531, "repo_name": "WalkingMachine/sara_commun", "id": "97077d25b91a8943ea8ce8cb6779e3d9962125a3", "size": "1135", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "wm_ork/object_recognition_ros/test/test_client.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "CMake", "bytes": "6113" } ], "symlink_target": "" }
from __future__ import absolute_import import os from launchpad import settings import tornado.web class MainHandler(tornado.web.RequestHandler): """ Return files from app and node_modules otherwise return index.html""" def initialize(self): self.mimetypes = settings.CONF_OPTIONS["mimetypes"] self.default_extension = settings.CONF_OPTIONS["defaultextension"] self.dir_base = "www/" @staticmethod def get_file_path(path): path_segment = path.split('?',1)[0] return os.path.join(settings.doc_dir, path_segment) def get(self, path): filetype = path.split('.')[-1] if filetype not in self.mimetypes: path = path + '.' + self.default_extension filetype = self.default_extension try: with open(self.get_file_path(self.dir_base + path), 'rb') as request_page: self.write(request_page.read()) self.set_status(200) if filetype in list(self.mimetypes.keys()): self.set_header("Content-Type", self.mimetypes[filetype]) except: with open(self.get_file_path(self.dir_base + 'index.html'), 'rb') as request_page: self.write(request_page.read()) self.set_status(200) def default(self, path): self.set_status(405) self.write({"error":{"title":"Method Not Allowed"}})
{ "content_hash": "568bf5b691f58c2f7c40738ca852cce9", "timestamp": "", "source": "github", "line_count": 41, "max_line_length": 94, "avg_line_length": 34.68292682926829, "alnum_prop": 0.6047819971870605, "repo_name": "totalknowledge/angular2-launchpad", "id": "16c46e2cd0cea13c0dcef640ee9793da784a82e8", "size": "1422", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "launchpad/handlers/mainhandler.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "80" }, { "name": "HTML", "bytes": "2385" }, { "name": "JavaScript", "bytes": "1646" }, { "name": "Python", "bytes": "18352" }, { "name": "TypeScript", "bytes": "6431" } ], "symlink_target": "" }
import argparse import os import sys from osa_toolkit import tools def args(arg_list): parser = argparse.ArgumentParser( usage='%(prog)s', description='OpenStack Ansible Configuration Generator', epilog='Licensed "Apache2.0"', ) parser.add_argument( '--base', '-b', help="Base file to be used.", ) parser.add_argument( '--conf_dir', '-c', help=("Directory of service-specific configuration files.\n" "Only files ending in *.aio will be processed"), ) parser.add_argument( '--output', '-o', help=("Path to combined output file, defaults to " "./openstack_user_config.yml"), default=os.path.join(os.getcwd(), 'openstack_user_config.yml') ) return vars(parser.parse_args(arg_list)) if __name__ == "__main__": script_args = args(sys.argv[1:]) config = tools.make_example_config( script_args['base'], script_args['conf_dir'] ) tools.write_example_config(script_args['output'], config)
{ "content_hash": "469599b30050f3d0159b19117a8a9b83", "timestamp": "", "source": "github", "line_count": 45, "max_line_length": 70, "avg_line_length": 24.2, "alnum_prop": 0.5775941230486685, "repo_name": "yanyao/openstack-ansible", "id": "64f47dbce2c522f67a83d825c3d666ab40153f53", "size": "1756", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "scripts/gen-config.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "206590" }, { "name": "Shell", "bytes": "93254" } ], "symlink_target": "" }
import os DEBUG = True TEMPLATE_DEBUG = True TEMPLATE_DIRS = (os.getcwd(),) SECRET_KEY = "not-so-secret" STATIC_URL = "/static/" INSTALLED_APPS = ('django.contrib.staticfiles',)
{ "content_hash": "19edb2ab0033b74c8b433e792b39740a", "timestamp": "", "source": "github", "line_count": 9, "max_line_length": 48, "avg_line_length": 20, "alnum_prop": 0.7, "repo_name": "pcx/shoot", "id": "e7cc6bf9d6bd7f592af92ee204c841a1d1997664", "size": "180", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "shoot/django_settings.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "4765" } ], "symlink_target": "" }
import sys import os if __name__ == '__main__': bp_ = os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0]))) if bp_ not in [os.path.abspath(x) for x in sys.path]: sys.path.insert(0, bp_) pref = "../../../../../../src" sys.path.insert(0, pref) sys.path.insert(0, pref + "/delegate/geni/v3/rspecs/") sys.path.insert(0, pref + "/delegate/geni/v3/rspecs/tnrm") from commons import validate from advertisement_parser import TNRMv3AdvertisementParser def main(argv=None): if not argv: argv = sys.argv print 'Start the test environment' print '=== TNRMv3AdvertisementParser ===' rspec = TNRMv3AdvertisementParser("adv_rspec_example.xml") (result, error) = validate(rspec.get_rspec()) if not result: print "Validation failure: %s" % error else: print "Validation success!" print 'Bye Bye...' return True if __name__ == '__main__': sys.exit(main())
{ "content_hash": "c9dc6a3e0c7c8cc033d09543a03e2ef2", "timestamp": "", "source": "github", "line_count": 35, "max_line_length": 72, "avg_line_length": 27.65714285714286, "alnum_prop": 0.6115702479338843, "repo_name": "ict-felix/stack", "id": "31bc939e3e29a9acc9371f8b02d72ff2bd7fe7aa", "size": "1055", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "modules/resource/orchestrator/test/delegate/geni/v3/rspecs/tnrm/adv_examples.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "337811" }, { "name": "Elixir", "bytes": "17243" }, { "name": "Emacs Lisp", "bytes": "1098" }, { "name": "Groff", "bytes": "1735" }, { "name": "HTML", "bytes": "660363" }, { "name": "Java", "bytes": "18362" }, { "name": "JavaScript", "bytes": "838960" }, { "name": "Makefile", "bytes": "11581" }, { "name": "Perl", "bytes": "5416" }, { "name": "Python", "bytes": "8073455" }, { "name": "Shell", "bytes": "259720" } ], "symlink_target": "" }
def max_cropping(width, height, image_width, image_height, free_crop=False): if free_crop: return [0, 0, image_width, image_height] ratio = width / float(height) if image_width < image_height * ratio: # width fits fully, height needs to be cropped offset = int(round((image_height - (image_width / ratio)) / 2)) return [0, offset, image_width, image_height - offset] # height fits fully, width needs to be cropped offset = int(round((image_width - (image_height * ratio)) / 2)) return [offset, 0, image_width - offset, image_height]
{ "content_hash": "7d425918ac7ea9a40134a7b996f9babf", "timestamp": "", "source": "github", "line_count": 13, "max_line_length": 76, "avg_line_length": 45.46153846153846, "alnum_prop": 0.6446700507614214, "repo_name": "pgferretti/wable_test", "id": "e06f58723b09f98d64fd1e779c774b695b84a73e", "size": "591", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "image_cropping/utils.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "119198" }, { "name": "HTML", "bytes": "39396" }, { "name": "JavaScript", "bytes": "120213" }, { "name": "Python", "bytes": "84516" }, { "name": "Shell", "bytes": "1564" } ], "symlink_target": "" }
import lldb from lldbsuite.test.lldbtest import * from lldbsuite.test.decorators import * import os class TestTargetSourceMap(TestBase): mydir = TestBase.compute_mydir(__file__) @no_debug_info_test def test_source_map(self): """Test target.source-map' functionality.""" def assertBreakpointWithSourceMap(src_path): # Set a breakpoint after we remap source and verify that it succeeds bp = target.BreakpointCreateByLocation(src_path, 2) self.assertEquals(bp.GetNumLocations(), 1, "make sure breakpoint was resolved with map") # Now make sure that we can actually FIND the source file using this # remapping: retval = lldb.SBCommandReturnObject() self.dbg.GetCommandInterpreter().HandleCommand("source list -f main.c -l 2", retval) self.assertTrue(retval.Succeeded(), "source list didn't succeed.") self.assertNotEqual(retval.GetOutput(), None, "We got no ouput from source list") self.assertTrue("return" in retval.GetOutput(), "We didn't find the source file...") # Set the target soure map to map "./" to the current test directory src_dir = self.getSourceDir() src_path = os.path.join(src_dir, "main.c") yaml_path = os.path.join(src_dir, "a.yaml") yaml_base, ext = os.path.splitext(yaml_path) obj_path = self.getBuildArtifact("main.o") self.yaml2obj(yaml_path, obj_path) # Create a target with the object file we just created from YAML target = self.dbg.CreateTarget(obj_path) # Set a breakpoint before we remap source and verify that it fails bp = target.BreakpointCreateByLocation(src_path, 2) self.assertEquals(bp.GetNumLocations(), 0, "make sure no breakpoints were resolved without map") valid_path = os.path.dirname(src_dir) valid_path2 = os.path.dirname(valid_path) invalid_path = src_dir + "invalid_path" invalid_path2 = src_dir + "invalid_path2" # We make sure the error message contains all the invalid paths self.expect( 'settings set target.source-map . "%s" . "%s" . "%s" . "%s' \ % (invalid_path, src_dir, invalid_path2, valid_path), substrs=[ 'error: the replacement path doesn\'t exist: "%s"' % (invalid_path), 'the replacement path doesn\'t exist: "%s"' % (invalid_path2), ], error=True, ) self.expect( 'settings show target.source-map', substrs=[ '[0] "." -> "%s"' % (src_dir), '[1] "." -> "%s"' % (valid_path), ], ) assertBreakpointWithSourceMap(src_path) # Attempts to replace an index to an invalid mapping should have no effect. # Modifications to valid mappings should work. self.expect( 'settings replace target.source-map 0 . "%s" . "%s"' % (invalid_path, valid_path2), substrs=[ 'error: the replacement path doesn\'t exist: "%s"' % (invalid_path), ], error=True, ) self.expect( 'settings show target.source-map', substrs=[ '[0] "." -> "%s"' % (src_dir), '[1] "." -> "%s"' % (valid_path2), ] ) assertBreakpointWithSourceMap(src_path) # Let's clear and add the mapping back with insert-after self.runCmd('settings remove target.source-map 0') self.expect( 'settings show target.source-map', substrs=['[0] "." -> "%s"' % (valid_path2)], ) self.expect( 'settings insert-after target.source-map 0 . "%s" . "%s" . "%s"' \ % (invalid_path, invalid_path2, src_dir), substrs=[ 'error: the replacement path doesn\'t exist: "%s"' % (invalid_path), 'the replacement path doesn\'t exist: "%s"' % (invalid_path2), ], error=True, ) self.expect( 'settings show target.source-map', substrs=[ '[0] "." -> "%s"' % (valid_path2), '[1] "." -> "%s"' % (src_dir), ] ) # Let's clear using remove and add the mapping in with append self.runCmd('settings remove target.source-map 1') self.expect( 'settings show target.source-map', substrs=[ '[0] "." -> "%s"' % (valid_path2), ] ) self.runCmd('settings clear target.source-map') self.expect( 'settings append target.source-map . "%s" . "%s" . "%s"' % (invalid_path, src_dir, invalid_path2), substrs=[ 'error: the replacement path doesn\'t exist: "%s"' % (invalid_path), 'the replacement path doesn\'t exist: "%s"' % (invalid_path2), ], error=True, ) self.expect( 'settings show target.source-map', substrs=[ '[0] "." -> "%s"' % (src_dir), ] ) assertBreakpointWithSourceMap(src_path)
{ "content_hash": "394786060f68dad5ba84c5705dfb5193", "timestamp": "", "source": "github", "line_count": 134, "max_line_length": 110, "avg_line_length": 39.507462686567166, "alnum_prop": 0.5345674348318852, "repo_name": "endlessm/chromium-browser", "id": "6457c766813eb7b4520ea862205bc850d77a7510", "size": "5294", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "third_party/llvm/lldb/test/API/functionalities/source-map/TestTargetSourceMap.py", "mode": "33188", "license": "bsd-3-clause", "language": [], "symlink_target": "" }
"""Utilities for the reinforcement trainer.""" import sys sys.path.insert(0, '.') # nopep8 import asyncio import logging import os import multiprocessing import subprocess import fcntl from absl import flags from utils import * def expand_cmd_str(cmd): result = ' '.join(flags.FlagValues().read_flags_from_files(cmd)) if cmd[0] == 'mpiexec' or cmd[0] == 'mpirun': result = ' \\\n-host '.join(result.split(' -host ')) # avoid buffer too big to block I/O return result[:8192] def get_cmd_name(cmd): if cmd[0] == 'python' or cmd[0] == 'python3': path = cmd[1] for index in range(len(cmd)): if cmd[index] == 'bazel-bin/cc/selfplay': path = cmd[index] break if cmd[index] == 'bazel-bin/cc/eval': path = cmd[index] break elif cmd[0] == 'mpirun' or cmd[0] == 'mpiexec': for index in range(len(cmd)): if cmd[index] == 'train.py': path = cmd[index] break if cmd[index] == 'bazel-bin/cc/selfplay': path = cmd[index] break if cmd[index] == 'bazel-bin/cc/eval': path = cmd[index] break if cmd[index] == 'python' or cmd[index] == 'python3': path = cmd[index+1] else: path = cmd[0] return os.path.splitext(os.path.basename(path))[0] async def checked_run(*cmd): """Run the given subprocess command in a coroutine. Args: *cmd: the command to run and its arguments. Returns: The output that the command wrote to stdout. Raises: RuntimeError: if the command returns a non-zero result. """ # Start the subprocess. logging.info('Running: %s', expand_cmd_str(cmd)) with logged_timer('{} finished'.format(get_cmd_name(cmd))): p = await asyncio.create_subprocess_exec( *cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.STDOUT) # Stream output from the process stdout. chunks = [] while True: chunk = await p.stdout.read(16 * 1024) if not chunk: break chunks.append(chunk) # Wait for the process to finish, check it was successful & build stdout. await p.wait() stdout = b''.join(chunks).decode()[:-1] if p.returncode: raise RuntimeError('Return code {} from process: {}\n{}'.format( p.returncode, expand_cmd_str(cmd), stdout)) return stdout async def checked_run_distributed(genvs, num_instance, hosts, proclists, numa_nodes, seed, log_path, *cmd): mpi_cmd = ['mpiexec', '-outfile-pattern', '{}/out-{}-{}-%r.txt'.format(log_path, get_cmd_name(cmd), seed)] for genv in genvs: mpi_cmd = mpi_cmd + ['-genv', genv] num_nodes = len(hosts) instance_per_node = num_instance // num_nodes instance_remaining = num_instance - num_nodes * instance_per_node for index in range(num_nodes): if index < instance_remaining: instance_to_launch = instance_per_node + 1 else: instance_to_launch = instance_per_node if index > 0: mpi_cmd = mpi_cmd + [':'] mpi_cmd = mpi_cmd + ['-host', hosts[index]] if proclists != None: mpi_cmd = mpi_cmd + ['-env', 'KMP_AFFINITY=granularity=fine,compact,1,{}'.format(proclists[index])] if numa_nodes != None: mpi_cmd = mpi_cmd + ['numactl', '-l', '-N', numa_nodes[index]] if num_instance > 1: mpi_cmd = mpi_cmd + ['python3', 'ml_perf/execute.py', '--num_instance={}'.format(instance_to_launch), '--'] mpi_cmd = mpi_cmd + [*cmd] if seed != None: # ensure different seed for different node mpi_cmd = mpi_cmd + ['--seed={}'.format(seed + index*1023779831)] result = await checked_run(*mpi_cmd) for index in range(num_nodes): filename = '{}/out-{}-{}-{}.txt'.format(log_path, get_cmd_name(cmd), seed, index) outfile = open(filename, 'r') result += outfile.read() outfile.close() return result def checked_run_mi(num_instance, *cmd): name = get_cmd_name(cmd) logging.debug('Running %s*%d: %s', name, num_instance, expand_cmd_str(cmd)) num_parallel_instance = int(multiprocessing.cpu_count()) procs=[None]*num_parallel_instance results = [""]*num_parallel_instance result_list = [] cur_instance = 0 # add new proc into procs while cur_instance < num_instance or not all ( proc is None for proc in procs): if None in procs and cur_instance < num_instance: index = procs.index(None) subproc_cmd = [ 'OMP_NUM_THREADS=1', 'KMP_AFFINITY=granularity=fine,proclist=[{}],explicit'.format( ','.join(str(i) for i in list(range( index, index+1)))), *cmd, '--instance_id={}'.format(cur_instance), ] subproc_cmd = ' '.join(subproc_cmd) if (cur_instance == 0): logging.debug("subproc_cmd = {}".format(subproc_cmd)) procs[index] = subprocess.Popen(subproc_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) proc_count = 0 for i in range(num_parallel_instance): if procs[i] != None: proc_count += 1 logging.debug('started instance {} in proc {}. proc count = {}'.format( cur_instance, index, proc_count)) # change stdout of the process to non-blocking # this is for collect output in a single thread flags = fcntl.fcntl(procs[index].stdout, fcntl.F_GETFL) fcntl.fcntl(procs[index].stdout, fcntl.F_SETFL, flags | os.O_NONBLOCK) cur_instance += 1 for index in range(num_parallel_instance): if procs[index] != None: # collect proc output while True: try: line = procs[index].stdout.readline() if line == b'': break results[index] = results[index] + line.decode() except IOError: break ret_val = procs[index].poll() if ret_val == None: continue elif ret_val != 0: logging.info(results[index]) raise RuntimeError( 'Non-zero return code (%d) executing %s' % ( ret_val, subproc_cmd)) if index == 0: logging.debug(results[index]) result_list.append(results[index]) results[index] = "" procs[index] = None proc_count = 0 for i in range(num_parallel_instance): if procs[i] != None: proc_count += 1 logging.debug('proc {} finished. proc count = {}'.format( index, proc_count)) time.sleep(0.001) # avoid busy loop return result_list def wait(aws): """Waits for all of the awaitable objects (e.g. coroutines) in aws to finish. All the awaitable objects are waited for, even if one of them raises an exception. When one or more awaitable raises an exception, the exception from the awaitable with the lowest index in the aws list will be reraised. Args: aws: a single awaitable, or list awaitables. Returns: If aws is a single awaitable, its result. If aws is a list of awaitables, a list containing the of each awaitable in the list. Raises: Exception: if any of the awaitables raises. """ aws_list = aws if isinstance(aws, list) else [aws] results = asyncio.get_event_loop().run_until_complete(asyncio.gather( *aws_list, return_exceptions=True)) # If any of the cmds failed, re-raise the error. for result in results: if isinstance(result, Exception): raise result return results if isinstance(aws, list) else results[0]
{ "content_hash": "aa144cb931311a28d06c4a82825088e4", "timestamp": "", "source": "github", "line_count": 237, "max_line_length": 107, "avg_line_length": 32.270042194092824, "alnum_prop": 0.5954497907949791, "repo_name": "mlperf/training_results_v0.6", "id": "bf30f7528969a52978354e850c63ffac85906840", "size": "8224", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "Intel/benchmarks/minigo/implementations/tensorflow/ml_perf/utils.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "ANTLR", "bytes": "1731" }, { "name": "Batchfile", "bytes": "13941" }, { "name": "C", "bytes": "208630" }, { "name": "C++", "bytes": "10999411" }, { "name": "CMake", "bytes": "129712" }, { "name": "CSS", "bytes": "64767" }, { "name": "Clojure", "bytes": "396764" }, { "name": "Cuda", "bytes": "2272433" }, { "name": "Dockerfile", "bytes": "67820" }, { "name": "Groovy", "bytes": "62557" }, { "name": "HTML", "bytes": "19753082" }, { "name": "Java", "bytes": "166294" }, { "name": "JavaScript", "bytes": "71846" }, { "name": "Julia", "bytes": "408765" }, { "name": "Jupyter Notebook", "bytes": "2713169" }, { "name": "Lua", "bytes": "4430" }, { "name": "MATLAB", "bytes": "34903" }, { "name": "Makefile", "bytes": "115694" }, { "name": "Perl", "bytes": "1535873" }, { "name": "Perl 6", "bytes": "7280" }, { "name": "PowerShell", "bytes": "6150" }, { "name": "Python", "bytes": "24905683" }, { "name": "R", "bytes": "351865" }, { "name": "Roff", "bytes": "293052" }, { "name": "Scala", "bytes": "1189019" }, { "name": "Shell", "bytes": "794096" }, { "name": "Smalltalk", "bytes": "3497" }, { "name": "TypeScript", "bytes": "361164" } ], "symlink_target": "" }
"""A collection of sphinx docstrings from the wild.""" import ast FunctionDef = ast.FunctionDef if hasattr(ast, 'AsyncFunctionDef'): FunctionDef = (ast.FunctionDef, ast.AsyncFunctionDef) def publish_msgstr(app, source, source_path, source_line, config, settings): # From https://github.com/sphinx-doc/sphinx # File: sphinx/transforms/il8n.py """Publish msgstr (single line) into docutils document :param sphinx.application.Sphinx app: sphinx application :param unicode source: source text :param unicode source_path: source path for warning indication :param source_line: source line for warning indication :param sphinx.config.Config config: sphinx config :param docutils.frontend.Values settings: docutils settings :return: document :rtype: docutils.nodes.document """ ... # Expected item head to end with TokenType.COLON but was TokenType.WORD 'app' # noqa def _strip_basic_auth(url): # From https://github.com/sphinx-doc/sphinx # File: sphinx/ext/intersphinx.py """Returns *url* with basic auth credentials removed. Also returns the basic auth username and password if they're present in *url*. E.g.: https://user:pass@example.com => https://example.com *url* need not include basic auth credentials. :param url: url which may or may not contain basic auth credentials :type url: ``str`` :return: *url* with any basic auth creds removed :rtype: ``str`` """ ... def extract_original_messages(self): # From https://github.com/sphinx-doc/sphinx # File: sphinx/addnodes.py """Extract translation messages. :returns: list of extracted messages or messages generator """ ... def read_requirements(fh, resolve=False): # From https://github.com/pypa/pipenv # File: pipenv/patched/safety/util.py """ Reads requirements from a file like object and (optionally) from referenced files. :param fh: file like object to read from :param resolve: boolean. resolves referenced files. :return: generator """ # noqa ... def copytree(self, destination, symlinks=False): # File: sphinx/testing/path.py """ Recursively copy a directory to the given `destination`. If the given `destination` does not exist it will be created. :param symlinks: If ``True`` symbolic links in the source tree result in symbolic links in the destination tree otherwise the contents of the files pointed to by the symbolic links are copied. """ # Expected item to start with TokenType.COLON but was TokenType.INDENT def rmtree(self, ignore_errors=False, onerror=None): # File: sphinx/testing/path.py """ Removes the file or directory and any files or directories it may contain. :param ignore_errors: If ``True`` errors are silently ignored, otherwise an exception is raised in case an error occurs. :param onerror: A callback which gets called with the arguments `func`, `path` and `exc_info`. `func` is one of :func:`os.listdir`, :func:`os.remove` or :func:`os.rmdir`. `path` is the argument to the function which caused it to fail and `exc_info` is a tuple as returned by :func:`sys.exc_info`. """ # Expected item to start with TokenType.COLON but was TokenType.INDENT def test_params(request): # File: sphinx/testing/fixtures.py """ test parameters that is specified by 'pytest.mark.test_params' :param Union[str] shared_result: If the value is provided, app._status and app._warning objects will be shared in the parametrized test functions and/or test functions that have same 'shared_result' value. **NOTE**: You can not specify shared_result and srcdir in same time. """ # Expected item head to end with TokenType.COLON but was TokenType.WORD 'shared_result' def add_uids(doctree, condition): # File: sphinx/versioning.py """Add a unique id to every node in the `doctree` which matches the condition and yield the nodes. :param doctree: A :class:`docutils.nodes.document` instance. :param condition: A callable which returns either ``True`` or ``False`` for a given node. """ # Expected item to start with TokenType.COLON but was TokenType.INDENT def merge_doctrees(old, new, condition): # File: sphinx/versioning.py """Merge the `old` doctree with the `new` one while looking at nodes matching the `condition`. Each node which replaces another one or has been added to the `new` doctree will be yielded. :param condition: A callable which returns either ``True`` or ``False`` for a given node. """ # Expected item to start with TokenType.COLON but was TokenType.INDENT def _read_from_url(url, config=None): # File: sphinx/ext/intersphinx.py """Reads data from *url* with an HTTP *GET*. This function supports fetching from resources which use basic HTTP auth as laid out by RFC1738 § 3.1. See § 5 for grammar definitions for URLs. .. seealso: https://www.ietf.org/rfc/rfc1738.txt :param url: URL of an HTTP resource :type url: ``str`` :return: data read from resource described by *url* :rtype: ``file``-like object """ # Expected item to start with TokenType.COLON but was TokenType.NEWLINE def _get_safe_url(url): # File: sphinx/ext/intersphinx.py """Gets version of *url* with basic auth passwords obscured. This function returns results suitable for printing and logging. E.g.: https://user:12345@example.com => https://user@example.com :param url: a url :type url: ``str`` :return: *url* with password removed :rtype: ``str`` """ # Expected item to start with TokenType.COLON but was TokenType.NEWLINE def find_catalog_source_files(*args): # File: sphinx/util/i18n.py """ :param list locale_dirs: list of path as `['locale_dir1', 'locale_dir2', ...]` to find translation catalogs. Each path contains a structure such as `<locale>/LC_MESSAGES/domain.po`. :param str locale: a language as `'en'` :param list domains: list of domain names to get. If empty list or None is specified, get all domain names. default is None. :param boolean gettext_compact: * False: keep domains directory structure (default). * True: domains in the sub directory will be merged into 1 file. :param boolean force_all: Set True if you want to get all catalogs rather than updated catalogs. default is False. :return: [CatalogInfo(), ...] """ # Expected item head to end with TokenType.COLON but was TokenType.WORD 'locale' def get_full_module_name(node): # File: sphinx/util/nodes.py """ return full module dotted path like: 'docutils.nodes.paragraph' :param nodes.Node node: target node :return: full module dotted path """ # Expected item head to end with TokenType.COLON but was TokenType.WORD 'node' def set_application(self, app): # File: sphinx/parsers.py """set_application will be called from Sphinx to set app and other instance variables :param sphinx.application.Sphinx app: Sphinx application object """ # Expected item head to end with TokenType.COLON but was TokenType.WORD 'app' def write_bytes(sef, bytes, append=False): # File: sphinx/testing/path.py """ Writes the given `bytes` to the file. :param append: If ``True`` given `bytes` are added at the end of the file. """ # Expected item to start with TokenType.COLON but was TokenType.INDENT def repr_domxml(node, length=80): # File: sphinx/util/nodes.py """ return DOM XML representation of the specified node like: '<paragraph translatable="False"><inline classes="versionmodified">New in version...' :param nodes.Node node: target node :param int length: length of return value to be striped. if false-value is specified, repr_domxml returns full of DOM XML representation. :return: DOM XML representation """ # Expected item head to end with TokenType.COLON but was TokenType.WORD 'node' def docstrings(): """Get all of the docstrings in this file (including this one.) :return: The docstrings in this file. :rtype: List[str] """ with open(__file__, 'r') as fin: data = fin.read() this_script = ast.parse(data) functions = [x for x in this_script.body if isinstance(x, FunctionDef)] return list(map(ast.get_docstring, functions))
{ "content_hash": "afc99115f17a0aae7cb8245638530197", "timestamp": "", "source": "github", "line_count": 260, "max_line_length": 91, "avg_line_length": 33.26538461538462, "alnum_prop": 0.6762631518094577, "repo_name": "terrencepreilly/darglint", "id": "e16b22526ed8f9c784b3998ceae3e9b559452e77", "size": "8651", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/sphinx_docstrings.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "1960" }, { "name": "Elm", "bytes": "25621" }, { "name": "HTML", "bytes": "391" }, { "name": "Makefile", "bytes": "1145" }, { "name": "Python", "bytes": "678190" } ], "symlink_target": "" }
import diventi.accounts.models from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('accounts', '0146_auto_20190516_0901'), ] operations = [ migrations.AlterModelManagers( name='diventiuser', managers=[ ('objects', diventi.accounts.models.DiventiUserManager()), ], ), ]
{ "content_hash": "1624b64da535c266ed1dd955921de969", "timestamp": "", "source": "github", "line_count": 18, "max_line_length": 74, "avg_line_length": 22.38888888888889, "alnum_prop": 0.5831265508684863, "repo_name": "flavoi/diventi", "id": "747eb84cda00f23b3e6582757d7c5850de64c7dd", "size": "452", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "diventi/accounts/migrations/0147_auto_20190516_0907.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "HTML", "bytes": "385265" }, { "name": "Procfile", "bytes": "46" }, { "name": "Python", "bytes": "826530" } ], "symlink_target": "" }
import json import awsiot import logging import Adafruit_DHT DHT11 = 11 DHT22 = 22 AM2302 = 22 SENSORS = [DHT11, DHT22, AM2302] def pub(temp, humid): if args.topic is not None and len(args.topic) > 0: for t in args.topic: publisher.publish(t, json.dumps({"temperature": temp, "humidity": humid, awsiot.MESSAGE: "temperature: {} humidity: {}".format(temp, humid)})) publisher.publish(awsiot.iot_thing_topic(args.thing), awsiot.iot_payload(awsiot.REPORTED, {'temperature': temp, 'humidity': humid})) if __name__ == "__main__": parser = awsiot.iot_arg_parser() parser.add_argument("-p", "--pin", help="gpio pin (using BCM numbering)", type=int, required=True) parser.add_argument("-y", "--dht_type", help="DHT sensor type %s" % SENSORS, type=int, default=Adafruit_DHT.DHT22) args = parser.parse_args() logging.basicConfig(filename=awsiot.LOG_FILE, level=args.log_level, format=awsiot.LOG_FORMAT) publisher = awsiot.MQTT(args.endpoint, args.rootCA, args.cert, args.key) humidity, temperature = Adafruit_DHT.read_retry(args.dht_type, args.pin) if humidity is not None and temperature is not None: logging.info("DHT {} temperature {} humidity {}".format(args.pin, temperature, humidity)) pub(temperature, humidity) else: logging.warn("Can't read temperature/humidity from DHT {}".format(args.pin)) publisher.disconnect()
{ "content_hash": "4d7529b51314e080f92d78a4ef0e594a", "timestamp": "", "source": "github", "line_count": 38, "max_line_length": 118, "avg_line_length": 39.68421052631579, "alnum_prop": 0.6412466843501327, "repo_name": "stevewoolley/IoT", "id": "50f13ba00a29797d092d66eb82fba94831baeed6", "size": "1531", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "dht_pub.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "61090" }, { "name": "Shell", "bytes": "1719" } ], "symlink_target": "" }
from django import template from django.conf import settings from django.db import models from news.models import Article, Section import re register = template.Library() @register.inclusion_tag('news/tags/section_snippet.html') def render_section_list(): return {'section_list': Section.objects.all()} @register.inclusion_tag('news/tags/monthly_archive_snippet.html') def render_month_links(): return { 'dates': Article.objects.dates('published', 'month'), } @register.inclusion_tag('news/tags/yearly_archive_snippet.html') def render_month_links(): return { 'dates': Article.objects.dates('published', 'year'), } class LatestArticlesNode(template.Node): def __init__(self, limit, var_name): self.limit = limit self.var_name = var_name def render(self, context): article_list = Article.objects.current() if article_list and (int(self.limit) == 1): context[self.var_name] = article_list[0] else: context[self.var_name] = article_list[:int(self.limit)] return '' @register.tag def latest_news_articles(parser, token): """ Gets any number of latest articles and stores them in a varable. Syntax:: latest_news_articles [limit] as [var_name] Example usage:: latest_news_articles 10 as latest_articles_list """ #import ipdb; ipdb.set_trace() bits = token.contents.split() return LatestArticlesNode(bits[1], bits[-1]) class NewsSectionsNode(template.Node): def __init__(self, var_name): self.var_name = var_name def render(self, context): section_list = Sections.objects.all() context[self.var_name] = section_list return '' @register.tag def get_news_sections(parser, token): """ Gets all blog categories. Syntax:: get_news_sections as [var_name] Example usage:: get_news_sections as section_list """ bits = token.split.contents() return NewsSectionsNode(bits[-1]) class LatestSectionArticlesNode(template.Node): def __init__(self, section, context_var): self.section = template.Variable(section) self.context_var = context_var def render(self, context): section = self.section.resolve(context) try: article = Article.objects.filter( section__slug=section ).latest("published") except Article.DoesNotExist: article = None context[self.context_var] = article return u"" @register.tag def latest_section_articles(parser, token): """ latest_section_articles "articles" as latest_section_articles """ bits = token.split_contents() return LatestSectionArticlesNode(bits[1], bits[-1])
{ "content_hash": "669d8c9aa4c2ee8cd6669bf52631467a", "timestamp": "", "source": "github", "line_count": 113, "max_line_length": 68, "avg_line_length": 24.761061946902654, "alnum_prop": 0.6411722659042173, "repo_name": "ilendl2/chrisdev-cookiecutter", "id": "4ca20092342b2349b9f30c460667ccf490a1a30a", "size": "2798", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "{{cookiecutter.repo_name}}/{{cookiecutter.project_name}}/news/templatetags/news_tags.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "21545" }, { "name": "JavaScript", "bytes": "8393" }, { "name": "PHP", "bytes": "238" }, { "name": "Perl", "bytes": "84" }, { "name": "Python", "bytes": "168424" }, { "name": "Shell", "bytes": "5146" } ], "symlink_target": "" }
def nn( ns = None , name = None ) : """ generate name-space ware entity name """ # namespace + name given if ( ns is not None ) and ( name is not None ) : return "{%s}%s"%(ns,name) # single parameter input -> assuming a name without namespace if ( ns is not None ) and ( name is None ) : name = ns ; ns = name ; # name without name-space if ( ns is None ) and ( name is not None ) : return "%s"%(name) #invalid input raise ValueError("Missing the requred input name!")
{ "content_hash": "a7d2730df3a08eb1c75fef63e84b01c1", "timestamp": "", "source": "github", "line_count": 19, "max_line_length": 66, "avg_line_length": 29.36842105263158, "alnum_prop": 0.5519713261648745, "repo_name": "DREAM-ODA-OS/tools", "id": "e8aa2f38fbf9ac0beefc79460ba41f37365f387a", "size": "2078", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "metadata/xml_utils.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "629815" } ], "symlink_target": "" }
try: from numbers import Integral except ImportError: Integral = (int, long) import os import re import subprocess import sys from collections import defaultdict if not hasattr(__builtins__, 'any'): def any(genexpr): for expr in genexpr: if expr: return True return False ID_PY = b""" import sys if hasattr(sys, 'subversion'): subversion = sys.subversion[0] else: subversion = 'CPython' print("%s %s %s %s" % ( subversion, sys.version_info[0], sys.version_info[1], sys.version_info[2])) """ class PythonIdentity(object): class InvalidError(Exception): pass @staticmethod def get(): if hasattr(sys, 'subversion'): subversion = sys.subversion[0] else: subversion = 'CPython' return PythonIdentity(subversion, sys.version_info[0], sys.version_info[1], sys.version_info[2]) @staticmethod def from_id_string(id_string): values = id_string.split() if len(values) != 4: raise PythonIdentity.InvalidError("Invalid id string: %s" % id_string) return PythonIdentity(str(values[0]), int(values[1]), int(values[2]), int(values[3])) def __init__(self, interpreter, major, minor, subminor): for var in (major, minor, subminor): assert isinstance(var, Integral) self._interpreter = interpreter self._version = (major, minor, subminor) @property def interpreter(self): return self._interpreter @property def version(self): return self._version def hashbang(self): # TODO(wickman) Must be a better way. return '#!/usr/bin/env python%s.%s' % self._version[0:2] def __str__(self): return '%s-%s.%s.%s' % (self._interpreter, self._version[0], self._version[1], self._version[2]) def __repr__(self): return 'PythonIdentity("%s", %s, %s, %s)' % ( self._interpreter, self._version[0], self._version[1], self._version[2]) class PythonInterpreter(object): REGEXEN = ( re.compile(r'python$'), re.compile(r'python[23].[0-9]$'), re.compile(r'pypy$'), re.compile(r'pypy-1.[0-9]$'), ) @staticmethod def get(): return PythonInterpreter(sys.executable, interpreter=PythonIdentity.get()) @staticmethod def all(paths=os.getenv('PATH').split(':')): return PythonInterpreter.filter(PythonInterpreter.find(paths)) @staticmethod def from_binary(binary): po = subprocess.Popen([binary], stdin=subprocess.PIPE, stdout=subprocess.PIPE) so, _ = po.communicate(ID_PY) return PythonInterpreter(binary, PythonIdentity.from_id_string(so.decode('utf8'))) def __init__(self, binary=sys.executable, interpreter=None): """ :binary => binary of python interpreter (if None, default to sys.executable) """ self._binary = binary if binary == sys.executable and interpreter is None: self._identity = PythonIdentity.get() else: self._identity = interpreter or PythonInterpreter.from_binary(binary).identity() def binary(self): return self._binary def identity(self): return self._identity def __repr__(self): return 'PythonInterpreter(%r, %r)' % (self._binary, self._identity) @staticmethod def find(paths): """ Given a list of files or directories, try to detect python interpreters amongst them. Returns a list of PythonInterpreter objects. """ pythons = [] for path in paths: def expand_path(path): if os.path.isfile(path): return [path] elif os.path.isdir(path): return (os.path.join(path, fn) for fn in os.listdir(path)) return [] for fn in expand_path(path): basefile = os.path.basename(fn) if any(matcher.match(basefile) is not None for matcher in PythonInterpreter.REGEXEN): try: pythons.append(PythonInterpreter.from_binary(fn)) except: continue return pythons @staticmethod def filter(pythons): """ Given a map of python interpreters in the format provided by PythonInterpreter.find(), filter out duplicate versions and versions we would prefer not to use. Returns a map in the same format as find. """ good = [] MAJOR, MINOR, SUBMINOR = range(3) def version_filter(version): return (version[MAJOR] == 2 and version[MINOR] >= 6 or version[MAJOR] == 3 and version[MINOR] >= 2) all_versions = set(interpreter.identity().version for interpreter in pythons) good_versions = filter(version_filter, all_versions) for version in good_versions: # For each candidate, use the latest version we find on the filesystem. candidates = defaultdict(list) for interp in pythons: if interp.identity().version == version: candidates[interp.identity().interpreter].append(interp) for interp_class in candidates: candidates[interp_class].sort( key=lambda interp: os.path.getmtime(interp.binary()), reverse=True) good.append(candidates[interp_class].pop(0)) return good
{ "content_hash": "209a57e35a59cdf0ed6e54ffeea04a76", "timestamp": "", "source": "github", "line_count": 176, "max_line_length": 100, "avg_line_length": 28.556818181818183, "alnum_prop": 0.6508157580580979, "repo_name": "foursquare/commons-old", "id": "5433bc09cfe00bbbd02703f46955039f77ca63c6", "size": "5026", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "src/python/twitter/common/python/interpreter.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Java", "bytes": "2164475" }, { "name": "Python", "bytes": "1285839" }, { "name": "Scala", "bytes": "24999" }, { "name": "Shell", "bytes": "6233" }, { "name": "Smalltalk", "bytes": "10614" } ], "symlink_target": "" }
""" P1 tests for Account """ # Import Local Modules from marvin.cloudstackTestCase import cloudstackTestCase from marvin.lib.utils import (random_gen, cleanup_resources) from marvin.lib.base import (Domain, Account, ServiceOffering, VirtualMachine, Network, User, NATRule, Template, PublicIPAddress) from marvin.lib.common import (get_domain, get_zone, get_template, list_accounts, list_virtual_machines, list_service_offering, list_templates, list_users, get_builtin_template_info, wait_for_cleanup) from nose.plugins.attrib import attr from marvin.cloudstackException import CloudstackAPIException import time class Services: """Test Account Services """ def __init__(self): self.services = { "domain": { "name": "Domain", }, "account": { "email": "test@test.com", "firstname": "Test", "lastname": "User", "username": "test", # Random characters are appended for unique # username "password": "fr3sca", }, "user": { "email": "user@test.com", "firstname": "User", "lastname": "User", "username": "User", # Random characters are appended for unique # username "password": "fr3sca", }, "service_offering": { "name": "Tiny Instance", "displaytext": "Tiny Instance", "cpunumber": 1, "cpuspeed": 100, # in MHz "memory": 128, # In MBs }, "virtual_machine": { "displayname": "Test VM", "username": "root", "password": "password", "ssh_port": 22, "hypervisor": 'XenServer', # Hypervisor type should be same as # hypervisor type of cluster "privateport": 22, "publicport": 22, "protocol": 'TCP', }, "template": { "displaytext": "Public Template", "name": "Public template", "ostype": 'CentOS 5.3 (64-bit)', "url": "", "hypervisor": '', "format": '', "isfeatured": True, "ispublic": True, "isextractable": True, "templatefilter": "self" }, "natrule": { "publicport": 22, "privateport": 22, "protocol": 'TCP', }, "ostype": 'CentOS 5.3 (64-bit)', # Cent OS 5.3 (64 bit) "sleep": 60, "timeout": 10, } class TestAccounts(cloudstackTestCase): @classmethod def setUpClass(cls): cls.testClient = super(TestAccounts, cls).getClsTestClient() cls.api_client = cls.testClient.getApiClient() cls.services = Services().services cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests()) cls.services['mode'] = cls.zone.networktype cls.template = get_template( cls.api_client, cls.zone.id, cls.services["ostype"] ) cls.services["virtual_machine"]["zoneid"] = cls.zone.id cls.services["virtual_machine"]["template"] = cls.template.id cls.service_offering = ServiceOffering.create( cls.api_client, cls.services["service_offering"] ) cls._cleanup = [cls.service_offering] return @classmethod def tearDownClass(cls): try: # Cleanup resources used cleanup_resources(cls.api_client, cls._cleanup) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) return def setUp(self): self.apiclient = self.testClient.getApiClient() self.dbclient = self.testClient.getDbConnection() self.cleanup = [] return def tearDown(self): try: # Clean up, terminate the created accounts, domains etc cleanup_resources(self.apiclient, self.cleanup) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) return @attr( tags=[ "advanced", "basic", "eip", "advancedns", "sg"], required_hardware="false") def test_01_create_account(self): """Test Create Account and user for that account """ # Validate the following # 1. Create an Account. Verify the account is created. # 2. Create User associated with that account. Verify the created user # Create an account account = Account.create( self.apiclient, self.services["account"] ) self.debug("Created account: %s" % account.name) self.cleanup.append(account) list_accounts_response = list_accounts( self.apiclient, id=account.id ) self.assertEqual( isinstance(list_accounts_response, list), True, "Check list accounts for valid data" ) self.assertNotEqual( len(list_accounts_response), 0, "Check List Account response" ) account_response = list_accounts_response[0] self.assertEqual( account.accounttype, account_response.accounttype, "Check Account Type of Created account" ) self.assertEqual( account.name, account_response.name, "Check Account Name of Created account" ) # Create an User associated with account user = User.create( self.apiclient, self.services["user"], account=account.name, domainid=account.domainid ) self.debug("Created user: %s" % user.id) list_users_response = list_users( self.apiclient, id=user.id ) self.assertEqual( isinstance(list_users_response, list), True, "Check list users for valid data" ) self.assertNotEqual( len(list_users_response), 0, "Check List User response" ) user_response = list_users_response[0] self.assertEqual( user.username, user_response.username, "Check username of Created user" ) self.assertEqual( user.state, user_response.state, "Check state of created user" ) return class TestRemoveUserFromAccount(cloudstackTestCase): @classmethod def setUpClass(cls): cls.testClient = super( TestRemoveUserFromAccount, cls).getClsTestClient() cls.api_client = cls.testClient.getApiClient() cls.services = Services().services cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests()) cls.services['mode'] = cls.zone.networktype cls.template = get_template( cls.api_client, cls.zone.id, cls.services["ostype"] ) cls.services["virtual_machine"]["zoneid"] = cls.zone.id cls.services["virtual_machine"]["template"] = cls.template.id cls.service_offering = ServiceOffering.create( cls.api_client, cls.services["service_offering"] ) # Create an account cls.account = Account.create( cls.api_client, cls.services["account"] ) cls._cleanup = [cls.account, cls.service_offering, ] return @classmethod def tearDownClass(cls): try: # Cleanup resources used cleanup_resources(cls.api_client, cls._cleanup) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) return def setUp(self): self.apiclient = self.testClient.getApiClient() self.dbclient = self.testClient.getDbConnection() self.cleanup = [] return def tearDown(self): try: # Clean up, terminate the created instance, users etc cleanup_resources(self.apiclient, self.cleanup) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) return @attr( tags=[ "advanced", "basic", "eip", "advancedns", "sg"], required_hardware="false") def test_01_user_remove_VM_running(self): """Test Remove one user from the account """ # Validate the following # 1. Create an account with 2 users. # 2. Start 2 VMs; one for each user of the account # 3. Remove one user from the account. Verify that account # still exists. # 4. Verify that VM started by the removed user are still running # Create an User associated with account and VMs user_1 = User.create( self.apiclient, self.services["user"], account=self.account.name, domainid=self.account.domainid ) self.debug("Created user: %s" % user_1.id) user_2 = User.create( self.apiclient, self.services["user"], account=self.account.name, domainid=self.account.domainid ) self.debug("Created user: %s" % user_2.id) self.cleanup.append(user_2) vm_1 = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id ) self.debug("Deployed VM in account: %s, ID: %s" % ( self.account.name, vm_1.id )) self.cleanup.append(vm_1) vm_2 = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id ) self.debug("Deployed VM in account: %s, ID: %s" % ( self.account.name, vm_2.id )) self.cleanup.append(vm_2) # Remove one of the user self.debug("Deleting user: %s" % user_1.id) user_1.delete(self.apiclient) # Account should exist after deleting user accounts_response = list_accounts( self.apiclient, id=self.account.id ) self.assertEqual( isinstance(accounts_response, list), True, "Check for valid list accounts response" ) self.assertNotEqual( len(accounts_response), 0, "Check List Account response" ) vm_response = list_virtual_machines( self.apiclient, account=self.account.name, domainid=self.account.domainid ) self.assertEqual( isinstance(vm_response, list), True, "Check for valid list VM response" ) self.assertNotEqual( len(vm_response), 0, "Check List VM response" ) # VMs associated with that account should be running for vm in vm_response: self.assertEqual( vm.state, 'Running', "Check state of VMs associated with account" ) return class TestNonRootAdminsPrivileges(cloudstackTestCase): @classmethod def setUpClass(cls): cls.testClient = super( TestNonRootAdminsPrivileges, cls).getClsTestClient() cls.api_client = cls.testClient.getApiClient() cls.services = Services().services cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests()) cls.services['mode'] = cls.zone.networktype # Create an account, domain etc cls.domain = Domain.create( cls.api_client, cls.services["domain"], ) cls.account = Account.create( cls.api_client, cls.services["account"], admin=True, domainid=cls.domain.id ) cls._cleanup = [ cls.account, cls.domain ] return @classmethod def tearDownClass(cls): try: # Cleanup resources used cleanup_resources(cls.api_client, cls._cleanup) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) return def setUp(self): self.apiclient = self.testClient.getApiClient() self.dbclient = self.testClient.getDbConnection() self.cleanup = [] return def tearDown(self): try: # Clean up, terminate the created accounts cleanup_resources(self.apiclient, self.cleanup) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) return @attr( tags=[ "advanced", "basic", "eip", "advancedns", "sg"], required_hardware="false") def test_01_non_root_admin_Privileges(self): """Test to verify Non Root admin previleges""" # Validate the following # 1. Create few accounts/users in ROOT domain # 2. Verify listAccounts API gives only accounts associated with new # domain. # Create accounts for ROOT domain account_1 = Account.create( self.apiclient, self.services["account"] ) self.debug("Created account: %s" % account_1.name) self.cleanup.append(account_1) account_2 = Account.create( self.apiclient, self.services["account"] ) self.debug("Created account: %s" % account_2.name) self.cleanup.append(account_2) accounts_response = list_accounts( self.apiclient, domainid=self.domain.id, listall=True ) self.assertEqual( isinstance(accounts_response, list), True, "Check list accounts response for valid data" ) self.assertEqual( len(accounts_response), 1, "Check List accounts response" ) # Verify only account associated with domain is listed for account in accounts_response: self.assertEqual( account.domainid, self.domain.id, "Check domain ID of account" ) return class TestServiceOfferingSiblings(cloudstackTestCase): @classmethod def setUpClass(cls): cls.api_client = super( TestServiceOfferingSiblings, cls ).getClsTestClient().getApiClient() cls.services = Services().services # Create Domains, accounts etc cls.domain_1 = Domain.create( cls.api_client, cls.services["domain"] ) cls.domain_2 = Domain.create( cls.api_client, cls.services["domain"] ) cls.service_offering = ServiceOffering.create( cls.api_client, cls.services["service_offering"], domainid=cls.domain_1.id ) # Create account for doamin_1 cls.account_1 = Account.create( cls.api_client, cls.services["account"], admin=True, domainid=cls.domain_1.id ) # Create an account for domain_2 cls.account_2 = Account.create( cls.api_client, cls.services["account"], admin=True, domainid=cls.domain_2.id ) cls._cleanup = [ cls.account_1, cls.account_2, cls.service_offering, cls.domain_1, cls.domain_2, ] return @classmethod def tearDownClass(cls): try: # Cleanup resources used cleanup_resources(cls.api_client, cls._cleanup) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) return def setUp(self): self.apiclient = self.testClient.getApiClient() self.dbclient = self.testClient.getDbConnection() self.cleanup = [] return def tearDown(self): try: # Clean up, terminate the created domains, accounts cleanup_resources(self.apiclient, self.cleanup) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) return @attr( tags=[ "advanced", "basic", "eip", "advancedns", "sg"], required_hardware="false") def test_01_service_offering_siblings(self): """Test to verify service offerings at same level in hierarchy""" # Validate the following # 1. Verify service offering is visible for domain_1 # 2. Verify service offering is not visible for domain_2 service_offerings = list_service_offering( self.apiclient, domainid=self.domain_1.id ) self.assertEqual( isinstance(service_offerings, list), True, "Check if valid list service offerings response" ) self.assertNotEqual( len(service_offerings), 0, "Check List Service Offerings response" ) for service_offering in service_offerings: self.debug("Validating service offering: %s" % service_offering.id) self.assertEqual( service_offering.id, self.service_offering.id, "Check Service offering ID for domain" + str(self.domain_1.name) ) # Verify private service offering is not visible to other domain service_offerings = list_service_offering( self.apiclient, domainid=self.domain_2.id ) self.assertEqual( service_offerings, None, "Check List Service Offerings response for other domain" ) return class TestServiceOfferingHierarchy(cloudstackTestCase): @classmethod def setUpClass(cls): cls.api_client = super( TestServiceOfferingHierarchy, cls ).getClsTestClient().getApiClient() cls.services = Services().services # Create domain, service offerings etc cls.domain_1 = Domain.create( cls.api_client, cls.services["domain"] ) cls.domain_2 = Domain.create( cls.api_client, cls.services["domain"], parentdomainid=cls.domain_1.id ) cls.service_offering = ServiceOffering.create( cls.api_client, cls.services["service_offering"], domainid=cls.domain_1.id ) # Create account for doamin_1 cls.account_1 = Account.create( cls.api_client, cls.services["account"], admin=True, domainid=cls.domain_1.id ) # Create an account for domain_2 cls.account_2 = Account.create( cls.api_client, cls.services["account"], admin=True, domainid=cls.domain_2.id ) cls._cleanup = [ cls.account_2, cls.domain_2, cls.service_offering, cls.account_1, cls.domain_1, ] return @classmethod def tearDownClass(cls): try: # Cleanup resources used cleanup_resources(cls.api_client, cls._cleanup) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) return def setUp(self): self.apiclient = self.testClient.getApiClient() self.dbclient = self.testClient.getDbConnection() self.cleanup = [] return def tearDown(self): try: # Clean up, terminate the created instance, volumes and snapshots cleanup_resources(self.apiclient, self.cleanup) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) return @attr( tags=[ "advanced", "basic", "eip", "advancedns", "sg"], required_hardware="false") def test_01_service_offering_hierarchy(self): """Test to verify service offerings at same level in hierarchy""" # Validate the following # 1. Verify service offering is visible for domain_1 # 2. Verify service offering is also visible for domain_2 service_offerings = list_service_offering( self.apiclient, domainid=self.domain_1.id ) self.assertEqual( isinstance(service_offerings, list), True, "Check List Service Offerings for a valid response" ) self.assertNotEqual( len(service_offerings), 0, "Check List Service Offerings response" ) for service_offering in service_offerings: self.assertEqual( service_offering.id, self.service_offering.id, "Check Service offering ID for domain" + str(self.domain_1.name) ) # Verify private service offering is not visible to other domain service_offerings = list_service_offering( self.apiclient, domainid=self.domain_2.id ) self.assertEqual( service_offerings, None, "Check List Service Offerings for a valid response" ) return class TestTemplateHierarchy(cloudstackTestCase): @classmethod def setUpClass(cls): cls.testClient = super(TestTemplateHierarchy, cls).getClsTestClient() cls.api_client = cls.testClient.getApiClient() cls.hypervisor = cls.testClient.getHypervisorInfo() cls.services = Services().services cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests()) cls.services['mode'] = cls.zone.networktype # Create domains, accounts and template cls.domain_1 = Domain.create( cls.api_client, cls.services["domain"] ) cls.domain_2 = Domain.create( cls.api_client, cls.services["domain"], parentdomainid=cls.domain_1.id ) # Create account for doamin_1 cls.account_1 = Account.create( cls.api_client, cls.services["account"], admin=True, domainid=cls.domain_1.id ) # Create an account for domain_2 cls.account_2 = Account.create( cls.api_client, cls.services["account"], admin=True, domainid=cls.domain_2.id ) cls._cleanup = [ cls.account_2, cls.domain_2, cls.account_1, cls.domain_1, ] builtin_info = get_builtin_template_info(cls.api_client, cls.zone.id) cls.services["template"]["url"] = builtin_info[0] cls.services["template"]["hypervisor"] = builtin_info[1] cls.services["template"]["format"] = builtin_info[2] # Register new template cls.template = Template.register( cls.api_client, cls.services["template"], zoneid=cls.zone.id, account=cls.account_1.name, domainid=cls.domain_1.id, hypervisor=cls.hypervisor ) # Wait for template to download cls.template.download(cls.api_client) # Wait for template status to be changed across time.sleep(60) return @classmethod def tearDownClass(cls): try: # Cleanup resources used cleanup_resources(cls.api_client, cls._cleanup) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) return def setUp(self): self.apiclient = self.testClient.getApiClient() self.dbclient = self.testClient.getDbConnection() self.cleanup = [] return def tearDown(self): try: # Clean up, terminate the created instance, volumes and snapshots cleanup_resources(self.apiclient, self.cleanup) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) return @attr(tags=["advanced", "basic", "eip", "advancedns", "sg"], required_hardware="true") def test_01_template_hierarchy(self): """Test to verify template at same level in hierarchy""" # Validate the following # 1. Verify template is visible for domain_1 # 2. Verify template is also visible for domain_2 # Sleep to ensure that template state is reflected across templates = list_templates( self.apiclient, templatefilter='self', account=self.account_1.name, domainid=self.domain_1.id ) self.assertEqual( isinstance(templates, list), True, "Template response %s is not a list" % templates ) self.assertNotEqual( len(templates), 0, "No templates found" ) for template in templates: self.assertEqual( template.id, self.template.id, "Check Template ID for domain" + str(self.domain_1.name) ) # Verify private service offering is not visible to other domain templates = list_templates( self.apiclient, id=self.template.id, templatefilter='all', account=self.account_2.name, domainid=self.domain_2.id ) self.assertEqual( isinstance(templates, list), True, "Template response %s is not a list" % templates ) self.assertNotEqual( len(templates), 0, "No templates found" ) for template in templates: self.assertEqual( template.id, self.template.id, "Check Template ID for domain" + str(self.domain_2.name) ) return class TestAddVmToSubDomain(cloudstackTestCase): @classmethod def setUpClass(cls): cls.testClient = super(TestAddVmToSubDomain, cls).getClsTestClient() cls.api_client = cls.testClient.getApiClient() cls.services = Services().services cls.domain = get_domain(cls.api_client) cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests()) cls.services['mode'] = cls.zone.networktype cls.sub_domain = Domain.create( cls.api_client, cls.services["domain"], parentdomainid=cls.domain.id ) # Create account for doamin_1 cls.account_1 = Account.create( cls.api_client, cls.services["account"], admin=True, domainid=cls.domain.id ) # Create an account for domain_2 cls.account_2 = Account.create( cls.api_client, cls.services["account"], admin=True, domainid=cls.sub_domain.id ) cls.service_offering = ServiceOffering.create( cls.api_client, cls.services["service_offering"], domainid=cls.domain.id ) cls._cleanup = [ cls.account_2, cls.account_1, cls.sub_domain, cls.service_offering ] cls.template = get_template( cls.api_client, cls.zone.id, cls.services["ostype"] ) cls.services["virtual_machine"]["zoneid"] = cls.zone.id cls.vm_1 = VirtualMachine.create( cls.api_client, cls.services["virtual_machine"], templateid=cls.template.id, accountid=cls.account_1.name, domainid=cls.account_1.domainid, serviceofferingid=cls.service_offering.id ) cls.vm_2 = VirtualMachine.create( cls.api_client, cls.services["virtual_machine"], templateid=cls.template.id, accountid=cls.account_2.name, domainid=cls.account_2.domainid, serviceofferingid=cls.service_offering.id ) return @classmethod def tearDownClass(cls): try: # Clean up, terminate the created resources cleanup_resources(cls.api_client, cls._cleanup) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) return def setUp(self): self.apiclient = self.testClient.getApiClient() self.dbclient = self.testClient.getDbConnection() self.cleanup = [] return def tearDown(self): try: # Clean up, terminate the created resources cleanup_resources(self.apiclient, self.cleanup) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) return @attr( tags=[ "advanced", "basic", "eip", "advancedns", "sg"], required_hardware="false") def test_01_add_vm_to_subdomain(self): """ Test Sub domain allowed to launch VM when a Domain level zone is created""" # Validate the following # 1. Verify VM created by Account_1 is in Running state # 2. Verify VM created by Account_2 is in Running state vm_response = list_virtual_machines( self.apiclient, id=self.vm_1.id ) self.assertEqual( isinstance(vm_response, list), True, "Check List VM for a valid response" ) self.assertNotEqual( len(vm_response), 0, "Check List Template response" ) for vm in vm_response: self.debug("VM ID: %s and state: %s" % (vm.id, vm.state)) self.assertEqual( vm.state, 'Running', "Check State of Virtual machine" ) vm_response = list_virtual_machines( self.apiclient, id=self.vm_2.id ) self.assertNotEqual( len(vm_response), 0, "Check List Template response" ) for vm in vm_response: self.debug("VM ID: %s and state: %s" % (vm.id, vm.state)) self.assertEqual( vm.state, 'Running', "Check State of Virtual machine" ) return class TestUserDetails(cloudstackTestCase): @classmethod def setUpClass(cls): cls.testClient = super(TestUserDetails, cls).getClsTestClient() cls.api_client = cls.testClient.getApiClient() cls.services = Services().services cls.domain = get_domain(cls.api_client) cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests()) cls.services['mode'] = cls.zone.networktype cls._cleanup = [] return @classmethod def tearDownClass(cls): try: # Cleanup resources used cleanup_resources(cls.api_client, cls._cleanup) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) return def setUp(self): self.apiclient = self.testClient.getApiClient() self.dbclient = self.testClient.getDbConnection() self.cleanup = [] return def tearDown(self): try: # Clean up, terminate the created network offerings cleanup_resources(self.apiclient, self.cleanup) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) return @attr(tags=[ "role", "accounts", "simulator", "advanced", "advancedns", "basic", "eip", "sg" ]) def test_updateUserDetails(self): """Test user update API """ # Steps for test scenario # 1. create a user account # 2. update the user details (firstname, lastname, user) with # updateUser API # 3. listUsers in the account # 4. delete the account # Validate the following # 1. listAccounts should show account created successfully # 2. updateUser API should return valid response # 3. user should be updated with new details self.debug("Creating an user account..") self.account = Account.create( self.apiclient, self.services["account"], domainid=self.domain.id ) self.cleanup.append(self.account) # Fetching the user details of account self.debug( "Fetching user details for account: %s" % self.account.name) users = User.list( self.apiclient, account=self.account.name, domainid=self.account.domainid ) self.assertEqual( isinstance(users, list), True, "List users should return a valid list for account" ) user_1 = users[0] self.debug("Updating the details of user: %s" % user_1.name) firstname = random_gen() lastname = random_gen() self.debug("New firstname: %s, lastname: %s" % (firstname, lastname)) User.update( self.apiclient, user_1.id, firstname=firstname, lastname=lastname ) # Fetching the user details of account self.debug( "Fetching user details for user: %s" % user_1.name) users = User.list( self.apiclient, id=user_1.id, listall=True ) self.assertEqual( isinstance(users, list), True, "List users should return a valid list for account" ) user_1 = users[0] self.assertEqual( user_1.firstname, firstname, "User's first name should be updated with new one" ) self.assertEqual( user_1.lastname, lastname, "User's last name should be updated with new one" ) return @attr(tags=[ "role", "accounts", "simulator", "advanced", "advancedns", "basic", "eip", "sg" ]) def test_updateAdminDetails(self): """Test update admin details """ # Steps for test scenario # 1. create a admin account # 2. update the user details (firstname, lastname, user) with # updateUser API # 3. listUsers in the account # 4. delete the account # Validate the following # 1. listAccounts should show account created successfully # 2. updateUser API should return valid response # 3. user should be updated with new details self.debug("Creating a ROOT admin account") self.account = Account.create( self.apiclient, self.services["account"], admin=True, ) self.cleanup.append(self.account) # Fetching the user details of account self.debug( "Fetching user details for account: %s" % self.account.name) users = User.list( self.apiclient, account=self.account.name, domainid=self.account.domainid ) self.assertEqual( isinstance(users, list), True, "List users should return a valid list for account" ) user_1 = users[0] self.debug("Updating the details of user: %s" % user_1.name) firstname = random_gen() lastname = random_gen() self.debug("New firstname: %s, lastname: %s" % (firstname, lastname)) User.update( self.apiclient, user_1.id, firstname=firstname, lastname=lastname ) # Fetching the user details of account self.debug( "Fetching user details for user: %s" % user_1.name) users = User.list( self.apiclient, id=user_1.id, listall=True ) self.assertEqual( isinstance(users, list), True, "List users should return a valid list for account" ) user_1 = users[0] self.assertEqual( user_1.firstname, firstname, "User's first name should be updated with new one" ) self.assertEqual( user_1.lastname, lastname, "User's last name should be updated with new one" ) return @attr(tags=[ "role", "accounts", "simulator", "advanced", "advancedns", "basic", "eip", "sg" ]) def test_updateDomainAdminDetails(self): """Test update domain admin details """ # Steps for test scenario # 2. update the user details (firstname, lastname, user) with # updateUser API # 3. listUsers in the account # 4. delete the account # Validate the following # 1. listAccounts should show account created successfully # 2. updateUser API should return valid response # 3. user should be updated with new details self.debug("Creating a domain admin account") self.account = Account.create( self.apiclient, self.services["account"], admin=True, domainid=self.domain.id ) self.cleanup.append(self.account) # Fetching the user details of account self.debug( "Fetching user details for account: %s" % self.account.name) users = User.list( self.apiclient, account=self.account.name, domainid=self.account.domainid ) self.assertEqual( isinstance(users, list), True, "List users should return a valid list for account" ) user_1 = users[0] self.debug("Updating the details of user: %s" % user_1.name) firstname = random_gen() lastname = random_gen() self.debug("New firstname: %s, lastname: %s" % (firstname, lastname)) User.update( self.apiclient, user_1.id, firstname=firstname, lastname=lastname ) # Fetching the user details of account self.debug( "Fetching user details for user: %s" % user_1.name) users = User.list( self.apiclient, id=user_1.id, listall=True ) self.assertEqual( isinstance(users, list), True, "List users should return a valid list for account" ) user_1 = users[0] self.assertEqual( user_1.firstname, firstname, "User's first name should be updated with new one" ) self.assertEqual( user_1.lastname, lastname, "User's last name should be updated with new one" ) return class TestUserLogin(cloudstackTestCase): @classmethod def setUpClass(cls): cls.testClient = super(TestUserLogin, cls).getClsTestClient() cls.api_client = cls.testClient.getApiClient() cls.services = Services().services cls.domain = get_domain(cls.api_client) cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests()) cls.services['mode'] = cls.zone.networktype cls._cleanup = [] return @classmethod def tearDownClass(cls): try: # Cleanup resources used cleanup_resources(cls.api_client, cls._cleanup) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) return def setUp(self): self.apiclient = self.testClient.getApiClient() self.dbclient = self.testClient.getDbConnection() self.cleanup = [] return def tearDown(self): try: # Clean up, terminate the created network offerings cleanup_resources(self.apiclient, self.cleanup) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) return @attr(tags=["login", "accounts", "simulator", "advanced", "advancedns", "basic", "eip", "sg"]) def test_LoginApiUuidResponse(self): """Test if Login API does not return UUID's """ # Steps for test scenario # 1. create a user account # 2. login to the user account with given credentials (loginCmd) # 3. delete the user account # Validate the following # 1. listAccounts should return account created # 2. loginResponse should have UUID only is response. Assert by # checking database id is not same as response id # Login also succeeds with non NULL sessionId in response self.debug("Creating an user account..") self.account = Account.create( self.apiclient, self.services["account"], domainid=self.domain.id ) self.cleanup.append(self.account) self.debug("Logging into the cloudstack with login API") respose = User.login( self.apiclient, username=self.account.name, password=self.services["account"]["password"] ) self.debug("Login API response: %s" % respose) self.assertNotEqual( respose.sessionkey, None, "Login to the CloudStack should be successful" + "response shall have non Null key" ) return @attr(tags=["login", "accounts", "simulator", "advanced", "advancedns", "basic", "eip", "sg"]) def test_LoginApiDomain(self): """Test login API with domain """ # Steps for test scenario # 1. create a domain # 2. create user in the domain # 3. login to the user account above using UUID domain/user # 4. delete the user account # Validate the following # 1. listDomains returns created domain # 2. listAccounts returns created user # 3. loginResponse should have UUID only in responses # Login also succeeds with non NULL sessionId in response self.debug("Creating a domain for login with API domain test") domain = Domain.create( self.apiclient, self.services["domain"], parentdomainid=self.domain.id ) self.debug("Domain: %s is created succesfully." % domain.name) self.debug( "Checking if the created domain is listed in list domains API") domains = Domain.list(self.apiclient, id=domain.id, listall=True) self.assertEqual( isinstance(domains, list), True, "List domains shall return a valid response" ) self.debug("Creating an user account in domain: %s" % domain.name) self.account = Account.create( self.apiclient, self.services["account"], domainid=domain.id ) self.cleanup.append(self.account) accounts = Account.list( self.apiclient, name=self.account.name, domainid=self.account.domainid, listall=True ) self.assertEqual( isinstance(accounts, list), True, "List accounts should return a valid response" ) self.debug("Logging into the cloudstack with login API") respose = User.login( self.apiclient, username=self.account.name, password=self.services["account"]["password"], domainid=domain.id) self.debug("Login API response: %s" % respose) self.assertNotEqual( respose.sessionkey, None, "Login to the CloudStack should be successful" + "response shall have non Null key" ) return class TestUserAPIKeys(cloudstackTestCase): @classmethod def setUpClass(cls): cls.testClient = super(TestUserAPIKeys, cls).getClsTestClient() cls.api_client = cls.testClient.getApiClient() cls.services = Services().services cls.domain = get_domain(cls.api_client) cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests()) cls.services['mode'] = cls.zone.networktype # Create an account, domain etc cls.domain = Domain.create( cls.api_client, cls.services["domain"], ) cls.account = Account.create( cls.api_client, cls.services["account"], admin=False, domainid=cls.domain.id ) cls.domain_2 = Domain.create( cls.api_client, cls.services["domain"], ) cls.account_2 = Account.create( cls.api_client, cls.services["account"], admin=False, domainid=cls.domain_2.id ) cls._cleanup = [ cls.account, cls.domain, cls.account_2, cls.domain_2 ] return @classmethod def tearDownClass(cls): try: # Cleanup resources used cleanup_resources(cls.api_client, cls._cleanup) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) return def setUp(self): self.apiclient = self.testClient.getApiClient() self.dbclient = self.testClient.getDbConnection() self.cleanup = [] return def tearDown(self): try: # Clean up, terminate the created network offerings cleanup_resources(self.apiclient, self.cleanup) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) return @attr(tags=[ "role", "accounts", "simulator", "advanced", "advancedns", "basic", "eip", "sg" ]) def test_user_key_renew_same_account(self): # Create an User associated with the account user_1 = User.create( self.apiclient, self.services["user"], account=self.account.name, domainid=self.domain.id ) self.cleanup.append(user_1) account_response = list_accounts( self.apiclient, id=self.account.id )[0] self.assertEqual( hasattr(account_response, 'user'), True, "Users are included in account response") account_users = account_response.user self.assertEqual( isinstance(account_users, list), True, "Check account for valid data" ) self.assertNotEqual( len(account_users), 0, "Check number of User in Account") [user] = [u for u in account_users if u.username == user_1.username] self.assertEqual( user.apikey, None, "Check that the user don't have an API key yet") self.debug("Register API keys for user") userkeys = User.registerUserKeys(self.apiclient, user_1.id) users = list_accounts( self.apiclient, id=self.account.id )[0].user [user] = [u for u in users if u.id == user_1.id] self.assertEqual( user.apikey, userkeys.apikey, "Check User api key") self.assertEqual( user.secretkey, userkeys.secretkey, "Check User having secret key") self.debug("Get test client with user keys") cs_api = self.testClient.getUserApiClient( UserName=self.account.name, DomainName=self.account.domain) self.debug("Renew API keys for user using current keys") new_keys = User.registerUserKeys(cs_api, user_1.id) self.assertNotEqual( userkeys.apikey, new_keys.apikey, "Check API key is different") self.assertNotEqual( userkeys.secretkey, new_keys.secretkey, "Check secret key is different") @attr(tags=[ "role", "accounts", "simulator", "advanced", "advancedns", "basic", "eip", "sg" ]) def test_user_cannot_renew_other_keys(self): cs_api = self.testClient.getUserApiClient( UserName=self.account.name, DomainName=self.account.domain) self.debug("Try to change API key of an account in another domain") users = list_accounts( self.apiclient, id=self.account_2.id )[0].user with self.assertRaises(CloudstackAPIException) as e: User.registerUserKeys(cs_api, users[0].id) class TestDomainForceRemove(cloudstackTestCase): @classmethod def setUpClass(cls): cls.testClient = super(TestDomainForceRemove, cls).getClsTestClient() cls.api_client = cls.testClient.getApiClient() cls.services = Services().services cls.domain = get_domain(cls.api_client) cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests()) cls.services['mode'] = cls.zone.networktype cls.template = get_template( cls.api_client, cls.zone.id, cls.services["ostype"] ) cls.services["virtual_machine"]["zoneid"] = cls.zone.id cls._cleanup = [] return @classmethod def tearDownClass(cls): try: # Clean up, terminate the created resources cleanup_resources(cls.api_client, cls._cleanup) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) return def setUp(self): self.apiclient = self.testClient.getApiClient() self.dbclient = self.testClient.getDbConnection() self.cleanup = [] return def tearDown(self): try: # Clean up, terminate the created resources cleanup_resources(self.apiclient, self.cleanup) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) return @attr( tags=[ "domains", "advanced", "advancedns", "simulator", "dvs"], required_hardware="false") def test_forceDeleteDomain(self): """ Test delete domain with force option""" # Steps for validations # 1. create a domain DOM # 2. create 2 users under this domain # 3. deploy 1 VM into each of these user accounts # 4. create PF / FW rules for port 22 on these VMs for their # respective accounts # 5. delete the domain with force=true option # Validate the following # 1. listDomains should list the created domain # 2. listAccounts should list the created accounts # 3. listvirtualmachines should show the Running VMs # 4. PF and FW rules should be shown in listFirewallRules # 5. domain should delete successfully and above three list calls # should show all the resources now deleted. listRouters should # not return any routers in the deleted accounts/domains self.debug("Creating a domain for login with API domain test") domain = Domain.create( self.apiclient, self.services["domain"], parentdomainid=self.domain.id ) self.debug("Domain is created succesfully.") self.debug( "Checking if the created domain is listed in list domains API") domains = Domain.list(self.apiclient, id=domain.id, listall=True) self.assertEqual( isinstance(domains, list), True, "List domains shall return a valid response" ) self.debug("Creating 2 user accounts in domain: %s" % domain.name) self.account_1 = Account.create( self.apiclient, self.services["account"], domainid=domain.id ) self.account_2 = Account.create( self.apiclient, self.services["account"], domainid=domain.id ) try: self.debug("Creating a tiny service offering for VM deployment") self.service_offering = ServiceOffering.create( self.apiclient, self.services["service_offering"], domainid=self.domain.id ) self.debug("Deploying virtual machine in account 1: %s" % self.account_1.name) vm_1 = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], templateid=self.template.id, accountid=self.account_1.name, domainid=self.account_1.domainid, serviceofferingid=self.service_offering.id ) self.debug("Deploying virtual machine in account 2: %s" % self.account_2.name) VirtualMachine.create( self.apiclient, self.services["virtual_machine"], templateid=self.template.id, accountid=self.account_2.name, domainid=self.account_2.domainid, serviceofferingid=self.service_offering.id ) networks = Network.list( self.apiclient, account=self.account_1.name, domainid=self.account_1.domainid, listall=True ) self.assertEqual( isinstance(networks, list), True, "List networks should return a valid response" ) network_1 = networks[0] self.debug("Default network in account 1: %s is %s" % ( self.account_1.name, network_1.name)) src_nat_list = PublicIPAddress.list( self.apiclient, associatednetworkid=network_1.id, account=self.account_1.name, domainid=self.account_1.domainid, listall=True, issourcenat=True, ) self.assertEqual( isinstance(src_nat_list, list), True, "List Public IP should return a valid source NAT" ) self.assertNotEqual( len(src_nat_list), 0, "Length of response from listPublicIp should not be 0" ) src_nat = src_nat_list[0] self.debug( "Trying to create a port forwarding rule in source NAT: %s" % src_nat.ipaddress) # Create NAT rule nat_rule = NATRule.create( self.apiclient, vm_1, self.services["natrule"], ipaddressid=src_nat.id ) self.debug("Created PF rule on source NAT: %s" % src_nat.ipaddress) nat_rules = NATRule.list(self.apiclient, id=nat_rule.id) self.assertEqual( isinstance(nat_rules, list), True, "List NAT should return a valid port forwarding rules" ) self.assertNotEqual( len(nat_rules), 0, "Length of response from listLbRules should not be 0" ) except Exception as e: self.clenaup.append(self.account_1) self.cleanup.append(self.account_2) self.fail(e) self.debug("Deleting domain with force option") try: domain.delete(self.apiclient, cleanup=True) except Exception as e: self.debug("Waiting for account.cleanup.interval" + " to cleanup any remaining resouces") # Sleep 3*account.gc to ensure that all resources are deleted wait_for_cleanup(self.apiclient, ["account.cleanup.interval"] * 3) with self.assertRaises(CloudstackAPIException): Domain.list( self.apiclient, id=domain.id, listall=True ) self.debug("Checking if the resources in domain are deleted") with self.assertRaises(CloudstackAPIException): Account.list( self.apiclient, name=self.account_1.name, domainid=self.account_1.domainid, listall=True ) return @attr( tags=[ "domains", "advanced", "advancedns", "simulator"], required_hardware="false") def test_DeleteDomain(self): """ Test delete domain without force option""" # Steps for validations # 1. create a domain DOM # 2. create 2 users under this domain # 3. deploy 1 VM into each of these user accounts # 4. create PF / FW rules for port 22 on these VMs for their # respective accounts # 5. delete the domain with force=false option # Validate the following # 1. listDomains should list the created domain # 2. listAccounts should list the created accounts # 3. listvirtualmachines should show the Running VMs # 4. PF and FW rules should be shown in listFirewallRules # 5. domain deletion should fail saying there are resources under use self.debug("Creating a domain for login with API domain test") domain = Domain.create( self.apiclient, self.services["domain"], parentdomainid=self.domain.id ) self.debug("Domain: %s is created successfully." % domain.name) self.debug( "Checking if the created domain is listed in list domains API") domains = Domain.list(self.apiclient, id=domain.id, listall=True) self.assertEqual( isinstance(domains, list), True, "List domains shall return a valid response" ) self.debug("Creating 2 user accounts in domain: %s" % domain.name) self.account_1 = Account.create( self.apiclient, self.services["account"], domainid=domain.id ) self.cleanup.append(self.account_1) self.account_2 = Account.create( self.apiclient, self.services["account"], domainid=domain.id ) self.cleanup.append(self.account_2) self.debug("Creating a tiny service offering for VM deployment") self.service_offering = ServiceOffering.create( self.apiclient, self.services["service_offering"], domainid=self.domain.id ) self.cleanup.append(self.service_offering) self.debug("Deploying virtual machine in account 1: %s" % self.account_1.name) vm_1 = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], templateid=self.template.id, accountid=self.account_1.name, domainid=self.account_1.domainid, serviceofferingid=self.service_offering.id ) self.debug("Deploying virtual machine in account 2: %s" % self.account_2.name) VirtualMachine.create( self.apiclient, self.services["virtual_machine"], templateid=self.template.id, accountid=self.account_2.name, domainid=self.account_2.domainid, serviceofferingid=self.service_offering.id ) networks = Network.list( self.apiclient, account=self.account_1.name, domainid=self.account_1.domainid, listall=True ) self.assertEqual( isinstance(networks, list), True, "List networks should return a valid response" ) network_1 = networks[0] self.debug("Default network in account 1: %s is %s" % ( self.account_1.name, network_1.name)) src_nat_list = PublicIPAddress.list( self.apiclient, associatednetworkid=network_1.id, account=self.account_1.name, domainid=self.account_1.domainid, listall=True, issourcenat=True, ) self.assertEqual( isinstance(src_nat_list, list), True, "List Public IP should return a valid source NAT" ) self.assertNotEqual( len(src_nat_list), 0, "Length of response from listPublicIp should not be 0" ) src_nat = src_nat_list[0] self.debug( "Trying to create a port forwarding rule in source NAT: %s" % src_nat.ipaddress) # Create NAT rule nat_rule = NATRule.create( self.apiclient, vm_1, self.services["natrule"], ipaddressid=src_nat.id ) self.debug("Created PF rule on source NAT: %s" % src_nat.ipaddress) nat_rules = NATRule.list(self.apiclient, id=nat_rule.id) self.assertEqual( isinstance(nat_rules, list), True, "List NAT should return a valid port forwarding rules" ) self.assertNotEqual( len(nat_rules), 0, "Length of response from listLbRules should not be 0" ) self.debug("Deleting domain without force option") with self.assertRaises(Exception): domain.delete(self.apiclient, cleanup=False) return
{ "content_hash": "eb8640db6caad1eec19f816467e63eb7", "timestamp": "", "source": "github", "line_count": 2052, "max_line_length": 90, "avg_line_length": 31.341130604288498, "alnum_prop": 0.5401946759547207, "repo_name": "resmo/cloudstack", "id": "60421d9eab7b929e68d771e14dfe3ea7b5652336", "size": "65097", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "test/integration/component/test_accounts.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "ApacheConf", "bytes": "1451" }, { "name": "Batchfile", "bytes": "11926" }, { "name": "C#", "bytes": "2356211" }, { "name": "CSS", "bytes": "335738" }, { "name": "FreeMarker", "bytes": "4917" }, { "name": "Groovy", "bytes": "153137" }, { "name": "HTML", "bytes": "151164" }, { "name": "Java", "bytes": "33712712" }, { "name": "JavaScript", "bytes": "7719277" }, { "name": "Python", "bytes": "11019815" }, { "name": "Ruby", "bytes": "896" }, { "name": "Shell", "bytes": "770039" } ], "symlink_target": "" }
from recipe_engine import recipe_api from recipe_engine import config_types class CheckoutApi(recipe_api.RecipeApi): @property def default_checkout_root(self): """The default location for cached persistent checkouts.""" return self.m.vars.cache_dir.join('work') def assert_git_is_from_cipd(self): """Fail if git is not obtained from CIPD.""" self.m.run(self.m.python.inline, 'Assert that Git is from CIPD', program=''' from __future__ import print_function import subprocess import sys which = 'where' if sys.platform == 'win32' else 'which' git = subprocess.check_output([which, 'git']).decode('utf-8') print('git was found at %s' % git) if 'cipd_bin_packages' not in git: print('Git must be obtained through CIPD.', file=sys.stderr) sys.exit(1) ''') def git(self, checkout_root): """Run the steps to perform a pure-git checkout without DEPS.""" self.assert_git_is_from_cipd() skia_dir = checkout_root.join('skia') self.m.git.checkout( self.m.properties['repository'], dir_path=skia_dir, ref=self.m.properties['revision'], submodules=False) if self.m.vars.is_trybot: self.m.git('fetch', 'origin', self.m.properties['patch_ref']) self.m.git('checkout', 'FETCH_HEAD') self.m.git('rebase', self.m.properties['revision']) return self.m.properties['revision'] def bot_update(self, checkout_root, gclient_cache=None, checkout_chromium=False, checkout_flutter=False, extra_gclient_env=None, flutter_android=False): """Run the steps to obtain a checkout using bot_update. Args: checkout_root: Root directory where the code will be synced. gclient_cache: Optional, directory of the gclient cache. checkout_chromium: If True, will check out chromium/src.git in addition to the primary repo. checkout_flutter: If True, will checkout flutter in addition to the primary repo. extra_gclient_env: Map of extra environment variable names to their values to supply while running gclient. flutter_android: Indicates that we're checking out flutter for Android. """ self.assert_git_is_from_cipd() if not gclient_cache: gclient_cache = self.m.vars.cache_dir.join('git') if not extra_gclient_env: extra_gclient_env = {} cfg_kwargs = {} # Use a persistent gclient cache for Swarming. cfg_kwargs['CACHE_DIR'] = gclient_cache if checkout_flutter: # Delete the flutter cache to start from scratch every time. # See skbug.com/9994. self.m.run.rmtree(checkout_root) # Create the checkout path if necessary. # TODO(borenet): 'makedirs checkout_root' self.m.file.ensure_directory('makedirs checkout_path', checkout_root) # Initial cleanup. gclient_cfg = self.m.gclient.make_config(**cfg_kwargs) main_repo = self.m.properties['repository'] if checkout_flutter: main_repo = 'https://github.com/flutter/engine.git' main_name = self.m.path.basename(main_repo) if main_name.endswith('.git'): main_name = main_name[:-len('.git')] # Special case for flutter because it seems to need a very specific # directory structure to successfully build. if checkout_flutter and main_name == 'engine': main_name = 'src/flutter' main = gclient_cfg.solutions.add() main.name = main_name main.managed = False main.url = main_repo main.revision = self.m.properties.get('revision') or 'origin/main' m = gclient_cfg.got_revision_mapping m[main_name] = 'got_revision' patch_root = main_name patch_repo = main.url if self.m.properties.get('patch_repo'): patch_repo = self.m.properties['patch_repo'] patch_root = patch_repo.split('/')[-1] if patch_root.endswith('.git'): patch_root = patch_root[:-4] if checkout_flutter: # Skia is a DEP of Flutter; the 'revision' property is a Skia revision, # and any patch should be applied to Skia, not Flutter. main.revision = 'origin/master' main.managed = True m[main_name] = 'got_flutter_revision' if flutter_android: gclient_cfg.target_os.add('android') skia_dep_path = 'src/third_party/skia' gclient_cfg.repo_path_map['https://skia.googlesource.com/skia'] = ( skia_dep_path, 'HEAD') gclient_cfg.revisions[skia_dep_path] = self.m.properties['revision'] m[skia_dep_path] = 'got_revision' patch_root = skia_dep_path if checkout_chromium: main.custom_vars['checkout_chromium'] = True extra_gclient_env['GYP_CHROMIUM_NO_ACTION'] = '0' # TODO(rmistry): Remove the below block after there is a solution for # crbug.com/616443 entries_file = checkout_root.join('.gclient_entries') if self.m.path.exists(entries_file) or self._test_data.enabled: self.m.file.remove('remove %s' % entries_file, entries_file) # Run bot_update. patch_refs = None patch_ref = self.m.properties.get('patch_ref') if patch_ref: patch_refs = ['%s@%s:%s' % (self.m.properties['patch_repo'], self.m.properties['revision'], patch_ref)] self.m.gclient.c = gclient_cfg with self.m.context(cwd=checkout_root): update_step = self.m.bot_update.ensure_checkout( patch_root=patch_root, # The logic in ensure_checkout for this arg is fairly naive, so if # patch=False, we'll see "... (without patch)" in the step names, even # for non-trybot runs, which is misleading and confusing. Therefore, # always specify patch=True. patch=True, patch_refs=patch_refs, ) if checkout_chromium or checkout_flutter: gclient_env = {'DEPOT_TOOLS_UPDATE': '0'} if extra_gclient_env: gclient_env.update(extra_gclient_env) with self.m.context(cwd=checkout_root, env=gclient_env): self.m.gclient.runhooks() return update_step.presentation.properties['got_revision']
{ "content_hash": "0b90ad8bb6183e338bc44463eaf20803", "timestamp": "", "source": "github", "line_count": 158, "max_line_length": 80, "avg_line_length": 38.70253164556962, "alnum_prop": 0.6454619787408014, "repo_name": "aosp-mirror/platform_external_skia", "id": "5822642681eaf9761a37464c2c97a6127219226a", "size": "6305", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "infra/bots/recipe_modules/checkout/api.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Assembly", "bytes": "12716940" }, { "name": "Batchfile", "bytes": "904" }, { "name": "C", "bytes": "620774" }, { "name": "C#", "bytes": "4683" }, { "name": "C++", "bytes": "27394853" }, { "name": "GLSL", "bytes": "67013" }, { "name": "Go", "bytes": "80137" }, { "name": "HTML", "bytes": "1002516" }, { "name": "Java", "bytes": "32794" }, { "name": "JavaScript", "bytes": "51666" }, { "name": "Lex", "bytes": "4372" }, { "name": "Lua", "bytes": "70974" }, { "name": "Makefile", "bytes": "2295" }, { "name": "Objective-C", "bytes": "35223" }, { "name": "Objective-C++", "bytes": "34410" }, { "name": "PHP", "bytes": "120845" }, { "name": "Python", "bytes": "1002226" }, { "name": "Shell", "bytes": "49974" } ], "symlink_target": "" }
"""Command-line flag library. Emulates gflags by wrapping cfg.ConfigOpts. The idea is to move fully to cfg eventually, and this wrapper is a stepping stone. """ import os import socket import sys import gflags from nova.common import cfg class FlagValues(object): class Flag: def __init__(self, name, value, update_default=None): self.name = name self.value = value self._update_default = update_default def SetDefault(self, default): if self._update_default: self._update_default(self.name, default) class ErrorCatcher: def __init__(self, orig_error): self.orig_error = orig_error self.reset() def reset(self): self._error_msg = None def catch(self, msg): if ": --" in msg: self._error_msg = msg else: self.orig_error(msg) def get_unknown_arg(self, args): if not self._error_msg: return None # Error message is e.g. "no such option: --runtime_answer" a = self._error_msg[self._error_msg.rindex(": --") + 2:] return filter(lambda i: i == a or i.startswith(a + "="), args)[0] def __init__(self): self._conf = cfg.ConfigOpts() self._conf._oparser.disable_interspersed_args() self._opts = {} self.Reset() def _parse(self): if self._extra is not None: return args = gflags.FlagValues().ReadFlagsFromFiles(self._args) extra = None # # This horrendous hack allows us to stop optparse # exiting when it encounters an unknown option # error_catcher = self.ErrorCatcher(self._conf._oparser.error) self._conf._oparser.error = error_catcher.catch try: while True: error_catcher.reset() extra = self._conf(args) unknown = error_catcher.get_unknown_arg(args) if not unknown: break args.remove(unknown) finally: self._conf._oparser.error = error_catcher.orig_error self._extra = extra def __call__(self, argv): self.Reset() self._args = argv[1:] self._parse() return [argv[0]] + self._extra def __getattr__(self, name): self._parse() return getattr(self._conf, name) def get(self, name, default): value = getattr(self, name) if value is not None: # value might be '0' or "" return value else: return default def __contains__(self, name): self._parse() return hasattr(self._conf, name) def _update_default(self, name, default): self._conf.set_default(name, default) def __iter__(self): return self.FlagValuesDict().iterkeys() def __getitem__(self, name): self._parse() if not self.__contains__(name): return None return self.Flag(name, getattr(self, name), self._update_default) def Reset(self): self._conf.reset() self._args = [] self._extra = None def ParseNewFlags(self): pass def FlagValuesDict(self): self._parse() ret = {} for opt in self._opts.values(): ret[opt.dest] = getattr(self, opt.dest) return ret def _add_option(self, opt): if opt.dest in self._opts: return self._opts[opt.dest] = opt try: self._conf.register_cli_opts(self._opts.values()) except cfg.ArgsAlreadyParsedError: self._conf.reset() self._conf.register_cli_opts(self._opts.values()) self._extra = None def define_string(self, name, default, help): self._add_option(cfg.StrOpt(name, default=default, help=help)) def define_integer(self, name, default, help): self._add_option(cfg.IntOpt(name, default=default, help=help)) def define_float(self, name, default, help): self._add_option(cfg.FloatOpt(name, default=default, help=help)) def define_bool(self, name, default, help): self._add_option(cfg.BoolOpt(name, default=default, help=help)) def define_list(self, name, default, help): self._add_option(cfg.ListOpt(name, default=default, help=help)) def define_multistring(self, name, default, help): self._add_option(cfg.MultiStrOpt(name, default=default, help=help)) FLAGS = FlagValues() def DEFINE_string(name, default, help, flag_values=FLAGS): flag_values.define_string(name, default, help) def DEFINE_integer(name, default, help, lower_bound=None, flag_values=FLAGS): # FIXME(markmc): ignoring lower_bound flag_values.define_integer(name, default, help) def DEFINE_bool(name, default, help, flag_values=FLAGS): flag_values.define_bool(name, default, help) def DEFINE_boolean(name, default, help, flag_values=FLAGS): DEFINE_bool(name, default, help, flag_values) def DEFINE_list(name, default, help, flag_values=FLAGS): flag_values.define_list(name, default, help) def DEFINE_float(name, default, help, flag_values=FLAGS): flag_values.define_float(name, default, help) def DEFINE_multistring(name, default, help, flag_values=FLAGS): flag_values.define_multistring(name, default, help) class UnrecognizedFlag(Exception): pass def DECLARE(name, module_string, flag_values=FLAGS): if module_string not in sys.modules: __import__(module_string, globals(), locals()) if name not in flag_values: raise UnrecognizedFlag('%s not defined by %s' % (name, module_string)) def DEFINE_flag(flag): pass class HelpFlag: pass class HelpshortFlag: pass class HelpXMLFlag: pass def _get_my_ip(): """Returns the actual ip of the local machine.""" try: csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) csock.connect(('8.8.8.8', 80)) (addr, port) = csock.getsockname() csock.close() return addr except socket.error as ex: return "127.0.0.1" # __GLOBAL FLAGS ONLY__ # Define any app-specific flags in their own files, docs at: # http://code.google.com/p/python-gflags/source/browse/trunk/gflags.py#a9 DEFINE_string('my_ip', _get_my_ip(), 'host ip address') DEFINE_list('region_list', [], 'list of region=fqdn pairs separated by commas') DEFINE_string('connection_type', None, 'libvirt, xenapi or fake') DEFINE_string('aws_access_key_id', 'admin', 'AWS Access ID') DEFINE_string('aws_secret_access_key', 'admin', 'AWS Access Key') # NOTE(sirp): my_ip interpolation doesn't work within nested structures DEFINE_string('glance_host', _get_my_ip(), 'default glance host') DEFINE_integer('glance_port', 9292, 'default glance port') DEFINE_list('glance_api_servers', ['%s:%d' % (FLAGS.glance_host, FLAGS.glance_port)], 'list of glance api servers available to nova (host:port)') DEFINE_integer('glance_num_retries', 0, 'The number of times to retry downloading an image from glance') DEFINE_integer('s3_port', 3333, 's3 port') DEFINE_string('s3_host', '$my_ip', 's3 host (for infrastructure)') DEFINE_string('s3_dmz', '$my_ip', 's3 dmz ip (for instances)') DEFINE_string('cert_topic', 'cert', 'the topic cert nodes listen on') DEFINE_string('compute_topic', 'compute', 'the topic compute nodes listen on') DEFINE_string('console_topic', 'console', 'the topic console proxy nodes listen on') DEFINE_string('scheduler_topic', 'scheduler', 'the topic scheduler nodes listen on') DEFINE_string('volume_topic', 'volume', 'the topic volume nodes listen on') DEFINE_string('network_topic', 'network', 'the topic network nodes listen on') DEFINE_string('ajax_console_proxy_topic', 'ajax_proxy', 'the topic ajax proxy nodes listen on') DEFINE_string('ajax_console_proxy_url', 'http://127.0.0.1:8000', 'location of ajax console proxy, \ in the form "http://127.0.0.1:8000"') DEFINE_integer('ajax_console_proxy_port', 8000, 'port that ajax_console_proxy binds') DEFINE_string('vsa_topic', 'vsa', 'the topic that nova-vsa service listens on') DEFINE_bool('verbose', False, 'show debug output') DEFINE_boolean('fake_rabbit', False, 'use a fake rabbit') DEFINE_bool('fake_network', False, 'should we use fake network devices and addresses') DEFINE_string('rabbit_host', 'localhost', 'rabbit host') DEFINE_integer('rabbit_port', 5672, 'rabbit port') DEFINE_bool('rabbit_use_ssl', False, 'connect over SSL') DEFINE_string('rabbit_userid', 'guest', 'rabbit userid') DEFINE_string('rabbit_password', 'guest', 'rabbit password') DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host') DEFINE_integer('rabbit_retry_interval', 1, 'rabbit connection retry interval to start') DEFINE_integer('rabbit_retry_backoff', 2, 'rabbit connection retry backoff in seconds') DEFINE_integer('rabbit_max_retries', 0, 'maximum rabbit connection attempts (0=try forever)') DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to') DEFINE_boolean('rabbit_durable_queues', False, 'use durable queues') DEFINE_list('enabled_apis', ['ec2', 'osapi_compute', 'osapi_volume', 'metadata'], 'list of APIs to enable by default') DEFINE_string('ec2_host', '$my_ip', 'ip of api server') DEFINE_string('ec2_dmz_host', '$my_ip', 'internal ip of api server') DEFINE_integer('ec2_port', 8773, 'cloud controller port') DEFINE_string('ec2_scheme', 'http', 'prefix for ec2') DEFINE_string('ec2_path', '/services/Cloud', 'suffix for ec2') DEFINE_multistring('osapi_compute_extension', ['nova.api.openstack.compute.contrib.standard_extensions'], 'osapi compute extension to load') DEFINE_multistring('osapi_volume_extension', ['nova.api.openstack.volume.contrib.standard_extensions'], 'osapi volume extension to load') DEFINE_string('osapi_scheme', 'http', 'prefix for openstack') DEFINE_string('osapi_path', '/v1.1/', 'suffix for openstack') DEFINE_integer('osapi_max_limit', 1000, 'max number of items returned in a collection response') DEFINE_string('metadata_host', '$my_ip', 'ip of metadata server') DEFINE_integer('metadata_port', 8775, 'Metadata API port') DEFINE_string('default_project', 'openstack', 'default project for openstack') DEFINE_string('default_image', 'ami-11111', 'default image to use, testing only') DEFINE_string('default_instance_type', 'm1.small', 'default instance type to use, testing only') DEFINE_string('null_kernel', 'nokernel', 'kernel image that indicates not to use a kernel,' ' but to use a raw disk image instead') DEFINE_string('vpn_image_id', '0', 'image id for cloudpipe vpn server') DEFINE_string('vpn_key_suffix', '-vpn', 'Suffix to add to project name for vpn key and secgroups') DEFINE_integer('auth_token_ttl', 3600, 'Seconds for auth tokens to linger') DEFINE_string('state_path', os.path.join(os.path.dirname(__file__), '../'), "Top-level directory for maintaining nova's state") DEFINE_string('lock_path', os.path.join(os.path.dirname(__file__), '../'), 'Directory for lock files') DEFINE_string('logdir', None, 'output to a per-service log file in named ' 'directory') DEFINE_string('logfile_mode', '0644', 'Default file mode of the logs.') DEFINE_string('sqlite_db', 'nova.sqlite', 'file name for sqlite') DEFINE_bool('sqlite_synchronous', True, 'Synchronous mode for sqlite') DEFINE_string('sql_connection', 'sqlite:///$state_path/$sqlite_db', 'connection string for sql database') DEFINE_integer('sql_idle_timeout', 3600, 'timeout for idle sql database connections') DEFINE_integer('sql_max_retries', 12, 'sql connection attempts') DEFINE_integer('sql_retry_interval', 10, 'sql connection retry interval') DEFINE_string('compute_manager', 'nova.compute.manager.ComputeManager', 'Manager for compute') DEFINE_string('console_manager', 'nova.console.manager.ConsoleProxyManager', 'Manager for console proxy') DEFINE_string('cert_manager', 'nova.cert.manager.CertManager', 'Manager for cert') DEFINE_string('instance_dns_manager', 'nova.network.dns_driver.DNSDriver', 'DNS Manager for instance IPs') DEFINE_string('instance_dns_domain', '', 'DNS Zone for instance IPs') DEFINE_string('floating_ip_dns_manager', 'nova.network.dns_driver.DNSDriver', 'DNS Manager for floating IPs') DEFINE_string('network_manager', 'nova.network.manager.VlanManager', 'Manager for network') DEFINE_string('volume_manager', 'nova.volume.manager.VolumeManager', 'Manager for volume') DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager', 'Manager for scheduler') DEFINE_string('vsa_manager', 'nova.vsa.manager.VsaManager', 'Manager for vsa') DEFINE_string('vc_image_name', 'vc_image', 'the VC image ID (for a VC image that exists in DB Glance)') # VSA constants and enums DEFINE_string('default_vsa_instance_type', 'm1.small', 'default instance type for VSA instances') DEFINE_integer('max_vcs_in_vsa', 32, 'maxinum VCs in a VSA') DEFINE_integer('vsa_part_size_gb', 100, 'default partition size for shared capacity') # Default firewall driver for security groups and provider firewall DEFINE_string('firewall_driver', 'nova.virt.libvirt.firewall.IptablesFirewallDriver', 'Firewall driver (defaults to iptables)') # The service to use for image search and retrieval DEFINE_string('image_service', 'nova.image.glance.GlanceImageService', 'The service to use for retrieving and searching for images.') DEFINE_string('host', socket.gethostname(), 'Name of this node. This can be an opaque identifier. It is ' 'not necessarily a hostname, FQDN, or IP address.') DEFINE_string('node_availability_zone', 'nova', 'availability zone of this node') DEFINE_string('notification_driver', 'nova.notifier.no_op_notifier', 'Default driver for sending notifications') DEFINE_list('memcached_servers', None, 'Memcached servers or None for in process cache.') DEFINE_string('zone_name', 'nova', 'name of this zone') DEFINE_list('zone_capabilities', ['hypervisor=xenserver;kvm', 'os=linux;windows'], 'Key/Multi-value list representng capabilities of this zone') DEFINE_string('build_plan_encryption_key', None, '128bit (hex) encryption key for scheduler build plans.') DEFINE_string('instance_usage_audit_period', 'month', 'time period to generate instance usages for.') DEFINE_integer('bandwith_poll_interval', 600, 'interval to pull bandwidth usage info') DEFINE_bool('start_guests_on_host_boot', False, 'Whether to restart guests when the host reboots') DEFINE_bool('resume_guests_state_on_host_boot', False, 'Whether to start guests, that was running before the host reboot') DEFINE_string('default_ephemeral_format', None, 'The default format a ephemeral_volume will be formatted ' 'with on creation.') DEFINE_string('root_helper', 'sudo', 'Command prefix to use for running commands as root') DEFINE_string('network_driver', 'nova.network.linux_net', 'Driver to use for network creation') DEFINE_bool('use_ipv6', False, 'use ipv6') DEFINE_integer('password_length', 12, 'Length of generated instance admin passwords') DEFINE_bool('monkey_patch', False, 'Whether to log monkey patching') DEFINE_list('monkey_patch_modules', ['nova.api.ec2.cloud:nova.notifier.api.notify_decorator', 'nova.compute.api:nova.notifier.api.notify_decorator'], 'Module list representing monkey ' 'patched module and decorator') DEFINE_bool('allow_resize_to_same_host', False, 'Allow destination machine to match source for resize. Useful' ' when testing in environments with only one host machine.') DEFINE_string('stub_network', False, 'Stub network related code') DEFINE_integer('reclaim_instance_interval', 0, 'Interval in seconds for reclaiming deleted instances') DEFINE_integer('zombie_instance_updated_at_window', 172800, 'Limit in seconds that a zombie instance can exist before ' 'being cleaned up.') DEFINE_boolean('allow_ec2_admin_api', False, 'Enable/Disable EC2 Admin API') DEFINE_integer('service_down_time', 60, 'maximum time since last check-in for up service') DEFINE_string('default_schedule_zone', None, 'zone to use when user doesnt specify one') DEFINE_list('isolated_images', [], 'Images to run on isolated host') DEFINE_list('isolated_hosts', [], 'Host reserved for specific images')
{ "content_hash": "bd0e4f09d218286c91d2102029163648", "timestamp": "", "source": "github", "line_count": 461, "max_line_length": 79, "avg_line_length": 37.668112798264644, "alnum_prop": 0.6400230348401958, "repo_name": "KarimAllah/nova", "id": "d535f783f92a78adc22a5b707bd926eedcaf3d61", "size": "18173", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "nova/flags.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "JavaScript", "bytes": "7412" }, { "name": "Python", "bytes": "5419134" }, { "name": "Shell", "bytes": "24506" } ], "symlink_target": "" }
from __future__ import with_statement import os import time import traceback from datetime import datetime from gettext import gettext as _ from xml.etree.cElementTree import Element, SubElement, tostring from eventlet import Timeout import swift.common.db from swift.common.db import ContainerBroker from swift.common.request_helpers import get_param from swift.common.utils import get_logger, hash_path, public, \ normalize_timestamp, storage_directory, validate_sync_to, \ config_true_value, validate_device_partition, json, timing_stats, \ replication, parse_content_type from swift.common.constraints import CONTAINER_LISTING_LIMIT, \ check_mount, check_float, check_utf8, FORMAT2CONTENT_TYPE from swift.common.bufferedhttp import http_connect from swift.common.exceptions import ConnectionTimeout from swift.common.db_replicator import ReplicatorRpc from swift.common.http import HTTP_NOT_FOUND, is_success from swift.common.swob import HTTPAccepted, HTTPBadRequest, HTTPConflict, \ HTTPCreated, HTTPInternalServerError, HTTPNoContent, HTTPNotFound, \ HTTPPreconditionFailed, HTTPMethodNotAllowed, Request, Response, \ HTTPInsufficientStorage, HTTPNotAcceptable, HTTPException, HeaderKeyDict DATADIR = 'containers' class ContainerController(object): """WSGI Controller for the container server.""" # Ensure these are all lowercase save_headers = ['x-container-read', 'x-container-write', 'x-container-sync-key', 'x-container-sync-to'] def __init__(self, conf): self.logger = get_logger(conf, log_route='container-server') self.root = conf.get('devices', '/srv/node/') self.mount_check = config_true_value(conf.get('mount_check', 'true')) self.node_timeout = int(conf.get('node_timeout', 3)) self.conn_timeout = float(conf.get('conn_timeout', 0.5)) replication_server = conf.get('replication_server', None) if replication_server is not None: replication_server = config_true_value(replication_server) self.replication_server = replication_server self.allowed_sync_hosts = [ h.strip() for h in conf.get('allowed_sync_hosts', '127.0.0.1').split(',') if h.strip()] self.replicator_rpc = ReplicatorRpc( self.root, DATADIR, ContainerBroker, self.mount_check, logger=self.logger) self.auto_create_account_prefix = \ conf.get('auto_create_account_prefix') or '.' if config_true_value(conf.get('allow_versions', 'f')): self.save_headers.append('x-versions-location') swift.common.db.DB_PREALLOCATION = \ config_true_value(conf.get('db_preallocation', 'f')) def _get_container_broker(self, drive, part, account, container, **kwargs): """ Get a DB broker for the container. :param drive: drive that holds the container :param part: partition the container is in :param account: account name :param container: container name :returns: ContainerBroker object """ hsh = hash_path(account, container) db_dir = storage_directory(DATADIR, part, hsh) db_path = os.path.join(self.root, drive, db_dir, hsh + '.db') kwargs.setdefault('account', account) kwargs.setdefault('container', container) kwargs.setdefault('logger', self.logger) return ContainerBroker(db_path, **kwargs) def account_update(self, req, account, container, broker): """ Update the account server(s) with latest container info. :param req: swob.Request object :param account: account name :param container: container name :param broker: container DB broker object :returns: if all the account requests return a 404 error code, HTTPNotFound response object, if the account cannot be updated due to a malformed header, an HTTPBadRequest response object, otherwise None. """ account_hosts = [h.strip() for h in req.headers.get('X-Account-Host', '').split(',')] account_devices = [d.strip() for d in req.headers.get('X-Account-Device', '').split(',')] account_partition = req.headers.get('X-Account-Partition', '') if len(account_hosts) != len(account_devices): # This shouldn't happen unless there's a bug in the proxy, # but if there is, we want to know about it. self.logger.error(_('ERROR Account update failed: different ' 'numbers of hosts and devices in request: ' '"%s" vs "%s"' % (req.headers.get('X-Account-Host', ''), req.headers.get('X-Account-Device', '')))) return HTTPBadRequest(req=req) if account_partition: updates = zip(account_hosts, account_devices) else: updates = [] account_404s = 0 for account_host, account_device in updates: account_ip, account_port = account_host.rsplit(':', 1) new_path = '/' + '/'.join([account, container]) info = broker.get_info() account_headers = HeaderKeyDict({ 'x-put-timestamp': info['put_timestamp'], 'x-delete-timestamp': info['delete_timestamp'], 'x-object-count': info['object_count'], 'x-bytes-used': info['bytes_used'], 'x-trans-id': req.headers.get('x-trans-id', '-'), 'user-agent': 'container-server %s' % os.getpid(), 'referer': req.as_referer()}) if req.headers.get('x-account-override-deleted', 'no').lower() == \ 'yes': account_headers['x-account-override-deleted'] = 'yes' try: with ConnectionTimeout(self.conn_timeout): conn = http_connect( account_ip, account_port, account_device, account_partition, 'PUT', new_path, account_headers) with Timeout(self.node_timeout): account_response = conn.getresponse() account_response.read() if account_response.status == HTTP_NOT_FOUND: account_404s += 1 elif not is_success(account_response.status): self.logger.error(_( 'ERROR Account update failed ' 'with %(ip)s:%(port)s/%(device)s (will retry ' 'later): Response %(status)s %(reason)s'), {'ip': account_ip, 'port': account_port, 'device': account_device, 'status': account_response.status, 'reason': account_response.reason}) except (Exception, Timeout): self.logger.exception(_( 'ERROR account update failed with ' '%(ip)s:%(port)s/%(device)s (will retry later)'), {'ip': account_ip, 'port': account_port, 'device': account_device}) if updates and account_404s == len(updates): return HTTPNotFound(req=req) else: return None @public @timing_stats() def DELETE(self, req): """Handle HTTP DELETE request.""" try: drive, part, account, container, obj = req.split_path(4, 5, True) validate_device_partition(drive, part) except ValueError, err: return HTTPBadRequest(body=str(err), content_type='text/plain', request=req) if 'x-timestamp' not in req.headers or \ not check_float(req.headers['x-timestamp']): return HTTPBadRequest(body='Missing timestamp', request=req, content_type='text/plain') if self.mount_check and not check_mount(self.root, drive): return HTTPInsufficientStorage(drive=drive, request=req) broker = self._get_container_broker(drive, part, account, container) if account.startswith(self.auto_create_account_prefix) and obj and \ not os.path.exists(broker.db_file): try: broker.initialize(normalize_timestamp( req.headers.get('x-timestamp') or time.time())) except swift.common.db.DatabaseAlreadyExists: pass if not os.path.exists(broker.db_file): return HTTPNotFound() if obj: # delete object broker.delete_object(obj, req.headers.get('x-timestamp')) return HTTPNoContent(request=req) else: # delete container if not broker.empty(): return HTTPConflict(request=req) existed = float(broker.get_info()['put_timestamp']) and \ not broker.is_deleted() broker.delete_db(req.headers['X-Timestamp']) if not broker.is_deleted(): return HTTPConflict(request=req) resp = self.account_update(req, account, container, broker) if resp: return resp if existed: return HTTPNoContent(request=req) return HTTPNotFound() @public @timing_stats() def PUT(self, req): """Handle HTTP PUT request.""" try: drive, part, account, container, obj = req.split_path(4, 5, True) validate_device_partition(drive, part) except ValueError, err: return HTTPBadRequest(body=str(err), content_type='text/plain', request=req) if 'x-timestamp' not in req.headers or \ not check_float(req.headers['x-timestamp']): return HTTPBadRequest(body='Missing timestamp', request=req, content_type='text/plain') if 'x-container-sync-to' in req.headers: err = validate_sync_to(req.headers['x-container-sync-to'], self.allowed_sync_hosts) if err: return HTTPBadRequest(err) if self.mount_check and not check_mount(self.root, drive): return HTTPInsufficientStorage(drive=drive, request=req) timestamp = normalize_timestamp(req.headers['x-timestamp']) broker = self._get_container_broker(drive, part, account, container) if obj: # put container object if account.startswith(self.auto_create_account_prefix) and \ not os.path.exists(broker.db_file): try: broker.initialize(timestamp) except swift.common.db.DatabaseAlreadyExists: pass if not os.path.exists(broker.db_file): return HTTPNotFound() broker.put_object(obj, timestamp, int(req.headers['x-size']), req.headers['x-content-type'], req.headers['x-etag']) return HTTPCreated(request=req) else: # put container if not os.path.exists(broker.db_file): try: broker.initialize(timestamp) created = True except swift.common.db.DatabaseAlreadyExists: pass else: created = broker.is_deleted() broker.update_put_timestamp(timestamp) if broker.is_deleted(): return HTTPConflict(request=req) metadata = {} metadata.update( (key, (value, timestamp)) for key, value in req.headers.iteritems() if key.lower() in self.save_headers or key.lower().startswith('x-container-meta-')) if metadata: if 'X-Container-Sync-To' in metadata: if 'X-Container-Sync-To' not in broker.metadata or \ metadata['X-Container-Sync-To'][0] != \ broker.metadata['X-Container-Sync-To'][0]: broker.set_x_container_sync_points(-1, -1) broker.update_metadata(metadata) resp = self.account_update(req, account, container, broker) if resp: return resp if created: return HTTPCreated(request=req) else: return HTTPAccepted(request=req) @public @timing_stats(sample_rate=0.1) def HEAD(self, req): """Handle HTTP HEAD request.""" try: drive, part, account, container, obj = req.split_path(4, 5, True) validate_device_partition(drive, part) except ValueError, err: return HTTPBadRequest(body=str(err), content_type='text/plain', request=req) query_format = get_param(req, 'format') if query_format: req.accept = FORMAT2CONTENT_TYPE.get( query_format.lower(), FORMAT2CONTENT_TYPE['plain']) out_content_type = req.accept.best_match( ['text/plain', 'application/json', 'application/xml', 'text/xml']) if not out_content_type: return HTTPNotAcceptable(request=req) if self.mount_check and not check_mount(self.root, drive): return HTTPInsufficientStorage(drive=drive, request=req) broker = self._get_container_broker(drive, part, account, container, pending_timeout=0.1, stale_reads_ok=True) if broker.is_deleted(): return HTTPNotFound(request=req) info = broker.get_info() headers = { 'X-Container-Object-Count': info['object_count'], 'X-Container-Bytes-Used': info['bytes_used'], 'X-Timestamp': info['created_at'], 'X-PUT-Timestamp': info['put_timestamp'], } headers.update( (key, value) for key, (value, timestamp) in broker.metadata.iteritems() if value != '' and (key.lower() in self.save_headers or key.lower().startswith('x-container-meta-'))) headers['Content-Type'] = out_content_type return HTTPNoContent(request=req, headers=headers, charset='utf-8') def update_data_record(self, record): """ Perform any mutations to container listing records that are common to all serialization formats, and returns it as a dict. Converts created time to iso timestamp. Replaces size with 'swift_bytes' content type parameter. :params record: object entry record :returns: modified record """ (name, created, size, content_type, etag) = record if content_type is None: return {'subdir': name} response = {'bytes': size, 'hash': etag, 'name': name} last_modified = datetime.utcfromtimestamp(float(created)).isoformat() # python isoformat() doesn't include msecs when zero if len(last_modified) < len("1970-01-01T00:00:00.000000"): last_modified += ".000000" response['last_modified'] = last_modified + 'Z' content_type, params = parse_content_type(content_type) for key, value in params: if key == 'swift_bytes': try: response['bytes'] = int(value) except ValueError: self.logger.exception("Invalid swift_bytes") else: content_type += ';%s=%s' % (key, value) response['content_type'] = content_type return response @public @timing_stats() def GET(self, req): """Handle HTTP GET request.""" try: drive, part, account, container, obj = req.split_path(4, 5, True) validate_device_partition(drive, part) except ValueError, err: return HTTPBadRequest(body=str(err), content_type='text/plain', request=req) path = get_param(req, 'path') prefix = get_param(req, 'prefix') delimiter = get_param(req, 'delimiter') if delimiter and (len(delimiter) > 1 or ord(delimiter) > 254): # delimiters can be made more flexible later return HTTPPreconditionFailed(body='Bad delimiter') marker = get_param(req, 'marker', '') end_marker = get_param(req, 'end_marker') limit = CONTAINER_LISTING_LIMIT given_limit = get_param(req, 'limit') if given_limit and given_limit.isdigit(): limit = int(given_limit) if limit > CONTAINER_LISTING_LIMIT: return HTTPPreconditionFailed( request=req, body='Maximum limit is %d' % CONTAINER_LISTING_LIMIT) query_format = get_param(req, 'format') if query_format: req.accept = FORMAT2CONTENT_TYPE.get(query_format.lower(), FORMAT2CONTENT_TYPE['plain']) out_content_type = req.accept.best_match( ['text/plain', 'application/json', 'application/xml', 'text/xml']) if not out_content_type: return HTTPNotAcceptable(request=req) if self.mount_check and not check_mount(self.root, drive): return HTTPInsufficientStorage(drive=drive, request=req) broker = self._get_container_broker(drive, part, account, container, pending_timeout=0.1, stale_reads_ok=True) if broker.is_deleted(): return HTTPNotFound(request=req) info = broker.get_info() resp_headers = { 'X-Container-Object-Count': info['object_count'], 'X-Container-Bytes-Used': info['bytes_used'], 'X-Timestamp': info['created_at'], 'X-PUT-Timestamp': info['put_timestamp'], } for key, (value, timestamp) in broker.metadata.iteritems(): if value and (key.lower() in self.save_headers or key.lower().startswith('x-container-meta-')): resp_headers[key] = value ret = Response(request=req, headers=resp_headers, content_type=out_content_type, charset='utf-8') container_list = broker.list_objects_iter(limit, marker, end_marker, prefix, delimiter, path) if out_content_type == 'application/json': ret.body = json.dumps([self.update_data_record(record) for record in container_list]) elif out_content_type.endswith('/xml'): doc = Element('container', name=container.decode('utf-8')) for obj in container_list: record = self.update_data_record(obj) if 'subdir' in record: name = record['subdir'].decode('utf-8') sub = SubElement(doc, 'subdir', name=name) SubElement(sub, 'name').text = name else: obj_element = SubElement(doc, 'object') for field in ["name", "hash", "bytes", "content_type", "last_modified"]: SubElement(obj_element, field).text = str( record.pop(field)).decode('utf-8') for field in sorted(record.keys()): SubElement(obj_element, field).text = str( record[field]).decode('utf-8') ret.body = tostring(doc, encoding='UTF-8') else: if not container_list: return HTTPNoContent(request=req, headers=resp_headers) ret.body = '\n'.join(rec[0] for rec in container_list) + '\n' return ret @public @replication @timing_stats(sample_rate=0.01) def REPLICATE(self, req): """ Handle HTTP REPLICATE request (json-encoded RPC calls for replication.) """ try: post_args = req.split_path(3) drive, partition, hash = post_args validate_device_partition(drive, partition) except ValueError, err: return HTTPBadRequest(body=str(err), content_type='text/plain', request=req) if self.mount_check and not check_mount(self.root, drive): return HTTPInsufficientStorage(drive=drive, request=req) try: args = json.load(req.environ['wsgi.input']) except ValueError, err: return HTTPBadRequest(body=str(err), content_type='text/plain') ret = self.replicator_rpc.dispatch(post_args, args) ret.request = req return ret @public @timing_stats() def POST(self, req): """Handle HTTP POST request.""" try: drive, part, account, container = req.split_path(4) validate_device_partition(drive, part) except ValueError, err: return HTTPBadRequest(body=str(err), content_type='text/plain', request=req) if 'x-timestamp' not in req.headers or \ not check_float(req.headers['x-timestamp']): return HTTPBadRequest(body='Missing or bad timestamp', request=req, content_type='text/plain') if 'x-container-sync-to' in req.headers: err = validate_sync_to(req.headers['x-container-sync-to'], self.allowed_sync_hosts) if err: return HTTPBadRequest(err) if self.mount_check and not check_mount(self.root, drive): return HTTPInsufficientStorage(drive=drive, request=req) broker = self._get_container_broker(drive, part, account, container) if broker.is_deleted(): return HTTPNotFound(request=req) timestamp = normalize_timestamp(req.headers['x-timestamp']) metadata = {} metadata.update( (key, (value, timestamp)) for key, value in req.headers.iteritems() if key.lower() in self.save_headers or key.lower().startswith('x-container-meta-')) if metadata: if 'X-Container-Sync-To' in metadata: if 'X-Container-Sync-To' not in broker.metadata or \ metadata['X-Container-Sync-To'][0] != \ broker.metadata['X-Container-Sync-To'][0]: broker.set_x_container_sync_points(-1, -1) broker.update_metadata(metadata) return HTTPNoContent(request=req) def __call__(self, env, start_response): start_time = time.time() req = Request(env) self.logger.txn_id = req.headers.get('x-trans-id', None) if not check_utf8(req.path_info): res = HTTPPreconditionFailed(body='Invalid UTF8 or contains NULL') else: try: # disallow methods which have not been marked 'public' try: method = getattr(self, req.method) getattr(method, 'publicly_accessible') replication_method = getattr(method, 'replication', False) if (self.replication_server is not None and self.replication_server != replication_method): raise AttributeError('Not allowed method.') except AttributeError: res = HTTPMethodNotAllowed() else: res = method(req) except HTTPException as error_response: res = error_response except (Exception, Timeout): self.logger.exception(_( 'ERROR __call__ error with %(method)s %(path)s '), {'method': req.method, 'path': req.path}) res = HTTPInternalServerError(body=traceback.format_exc()) trans_time = '%.4f' % (time.time() - start_time) log_message = '%s - - [%s] "%s %s" %s %s "%s" "%s" "%s" %s' % ( req.remote_addr, time.strftime('%d/%b/%Y:%H:%M:%S +0000', time.gmtime()), req.method, req.path, res.status.split()[0], res.content_length or '-', req.headers.get('x-trans-id', '-'), req.referer or '-', req.user_agent or '-', trans_time) if req.method.upper() == 'REPLICATE': self.logger.debug(log_message) else: self.logger.info(log_message) return res(env, start_response) def app_factory(global_conf, **local_conf): """paste.deploy app factory for creating WSGI container server apps""" conf = global_conf.copy() conf.update(local_conf) return ContainerController(conf)
{ "content_hash": "390806025efa8fc295d9aaac07ae5ae1", "timestamp": "", "source": "github", "line_count": 544, "max_line_length": 79, "avg_line_length": 46.59742647058823, "alnum_prop": 0.5524083790287585, "repo_name": "orion/swift-config", "id": "0da4a0c4d3ee73edc7b4be2fdbdfa2e935b76877", "size": "25939", "binary": false, "copies": "3", "ref": "refs/heads/dynamic-pipeline", "path": "swift/container/server.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "15048" }, { "name": "Python", "bytes": "3063773" }, { "name": "Shell", "bytes": "685" } ], "symlink_target": "" }
"""Base class for all storage backends""" import logging from oslo_config import cfg from oslo_utils import importutils from oslo_utils import units from glance_store import capabilities from glance_store.common import utils from glance_store import exceptions from glance_store import i18n _ = i18n._ LOG = logging.getLogger(__name__) class Store(capabilities.StoreCapability): OPTIONS = None READ_CHUNKSIZE = 4 * units.Mi # 4M WRITE_CHUNKSIZE = READ_CHUNKSIZE def __init__(self, conf): """ Initialize the Store """ super(Store, self).__init__() self.conf = conf self.store_location_class = None try: if self.OPTIONS is not None: # NOTE(flaper87): To be removed in k-2. This should # give deployers enough time to migrate their systems # and move configs under the new section. for opt in self.OPTIONS: opt.deprecated_opts = [cfg.DeprecatedOpt(opt.name, group='DEFAULT')] self.conf.register_opts(self.OPTIONS, group='glance_store') except cfg.DuplicateOptError: pass def configure(self, re_raise_bsc=False): """ Configure the store to use the stored configuration options and initialize capabilities based on current configuration. Any store that needs special configuration should implement this method. """ try: self.configure_add() except exceptions.BadStoreConfiguration as e: self.unset_capabilities(capabilities.BitMasks.WRITE_ACCESS) msg = (_(u"Failed to configure store correctly: %s " "Disabling add method.") % utils.exception_to_str(e)) LOG.warn(msg) if re_raise_bsc: raise finally: self.update_capabilities() def get_schemes(self): """ Returns a tuple of schemes which this store can handle. """ raise NotImplementedError def get_store_location_class(self): """ Returns the store location class that is used by this store. """ if not self.store_location_class: class_name = "%s.StoreLocation" % (self.__module__) LOG.debug("Late loading location class %s", class_name) self.store_location_class = importutils.import_class(class_name) return self.store_location_class def configure_add(self): """ This is like `configure` except that it's specifically for configuring the store to accept objects. If the store was not able to successfully configure itself, it should raise `exceptions.BadStoreConfiguration`. """ # NOTE(flaper87): This should probably go away @capabilities.check def get(self, location, offset=0, chunk_size=None, context=None): """ Takes a `glance_store.location.Location` object that indicates where to find the image file, and returns a tuple of generator (for reading the image file) and image_size :param location `glance_store.location.Location` object, supplied from glance_store.location.get_location_from_uri() :raises `glance.exceptions.NotFound` if image does not exist """ raise NotImplementedError def get_size(self, location, context=None): """ Takes a `glance_store.location.Location` object that indicates where to find the image file, and returns the size :param location `glance_store.location.Location` object, supplied from glance_store.location.get_location_from_uri() :raises `glance_store.exceptions.NotFound` if image does not exist """ raise NotImplementedError @capabilities.check def add(self, image_id, image_file, image_size, context=None): """ Stores an image file with supplied identifier to the backend storage system and returns a tuple containing information about the stored image. :param image_id: The opaque image identifier :param image_file: The image data to write, as a file-like object :param image_size: The size of the image data to write, in bytes :retval tuple of URL in backing store, bytes written, checksum and a dictionary with storage system specific information :raises `glance_store.exceptions.Duplicate` if the image already existed """ raise NotImplementedError @capabilities.check def delete(self, location, context=None): """ Takes a `glance_store.location.Location` object that indicates where to find the image file to delete :location `glance_store.location.Location` object, supplied from glance_store.location.get_location_from_uri() :raises `glance_store.exceptions.NotFound` if image does not exist """ raise NotImplementedError def set_acls(self, location, public=False, read_tenants=[], write_tenants=[], context=None): """ Sets the read and write access control list for an image in the backend store. :location `glance_store.location.Location` object, supplied from glance_store.location.get_location_from_uri() :public A boolean indicating whether the image should be public. :read_tenants A list of tenant strings which should be granted read access for an image. :write_tenants A list of tenant strings which should be granted write access for an image. """ raise NotImplementedError
{ "content_hash": "8284118b48d3a08fa723f90a75302cc3", "timestamp": "", "source": "github", "line_count": 161, "max_line_length": 78, "avg_line_length": 36.546583850931675, "alnum_prop": 0.6227056424201224, "repo_name": "cpallares/glance_store", "id": "c55848f900c9c5da8be383baaf0d9aa4017e55ac", "size": "6549", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "glance_store/driver.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "442283" }, { "name": "Shell", "bytes": "7239" } ], "symlink_target": "" }
import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) sys.path.insert(0, os.path.abspath('..')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'memcache_client' copyright = u'2012, Mixpanel' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '1.0' # The full version, including alpha/beta/rc tags. release = '1.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'memcache_clientdoc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'memcache_client.tex', u'memcache\\_client Documentation', u'Mixpanel', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'memcache_client', u'memcache_client Documentation', [u'Mixpanel'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'memcache_client', u'memcache_client Documentation', u'Mixpanel', 'memcache_client', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' autodoc_member_order = 'bysource' autoclass_content = 'both'
{ "content_hash": "c21092e1cd07d890bcfdc36950e0d1be", "timestamp": "", "source": "github", "line_count": 233, "max_line_length": 80, "avg_line_length": 32.227467811158796, "alnum_prop": 0.7026235184445332, "repo_name": "crask/redisproxy", "id": "0c22d5f6ffb529fc9eef5ae5810d86fc441fdc8c", "size": "7935", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "test/memcache/doc/conf.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "601036" }, { "name": "Python", "bytes": "159191" }, { "name": "Ragel in Ruby Host", "bytes": "6341" }, { "name": "Shell", "bytes": "67078" } ], "symlink_target": "" }
from nose import tools as nose import unittest from shiva import exceptions as exc class ExceptionsTestCase(unittest.TestCase): def test_invalid_mimetype_error(self): error = exc.InvalidMimeTypeError('audio/mp3') nose.eq_(error.__str__(), "Invalid mimetype 'audio/mp3'") def test_no_config_found_error(self): error = exc.NoConfigFoundError() nose.assert_not_equal(error.__str__(), '')
{ "content_hash": "113afe16ef532c0c609ac0a9ad765ae7", "timestamp": "", "source": "github", "line_count": 15, "max_line_length": 65, "avg_line_length": 28.666666666666668, "alnum_prop": 0.6813953488372093, "repo_name": "maurodelazeri/shiva-server", "id": "741adb5b684a5f4b2b8567a73038eeabbc1680c5", "size": "454", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "tests/unit/exceptions-test.py", "mode": "33188", "license": "mit", "language": [ { "name": "Makefile", "bytes": "6758" }, { "name": "Python", "bytes": "183318" }, { "name": "Shell", "bytes": "47" } ], "symlink_target": "" }
import os import datetime from app import app, db class Hint(db.Model): __tablename__ = 'hints' id = db.Column(db.Integer, primary_key=True) description = db.Column(db.Text) is_open = db.Column(db.Boolean) problem_id = db.Column(db.Integer, db.ForeignKey('problems.id')) def __repr__(self): return '<Hint %r>' % (self.description) def __init__(self, description='', is_open=False, problem=None): self.description = description self.is_open = is_open self.problem = problem
{ "content_hash": "abdad13f932b5a13d49b25fe08bb0807", "timestamp": "", "source": "github", "line_count": 20, "max_line_length": 68, "avg_line_length": 26.85, "alnum_prop": 0.633147113594041, "repo_name": "vigov5/oshougatsu2015", "id": "f41a1ce9bbfb9a3f65c33e9986100ab487ba7015", "size": "537", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "app/hint/models.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "68421" }, { "name": "HTML", "bytes": "42941" }, { "name": "JavaScript", "bytes": "87207" }, { "name": "Mako", "bytes": "494" }, { "name": "Python", "bytes": "69624" }, { "name": "Shell", "bytes": "3662" } ], "symlink_target": "" }
import os from datetime import datetime from django.db import models from django.core.management import call_command from djapian import Indexer, Field, space from djapian.tests.utils import BaseTestCase, BaseIndexerTest, Entry, Person, MultipleIndexerEntry class IndexerUpdateTest(BaseIndexerTest, BaseTestCase): def test_database_exists(self): self.assert_(os.path.exists(Entry.indexer._db._path)) def test_document_count(self): self.assertEqual(Entry.indexer.document_count(), 3) class IndexCommandTest(BaseTestCase): def setUp(self): p = Person.objects.create(name="Alex") entry1 = Entry.objects.create( author=p, title="Test entry", text="Not large text field" ) entry2 = Entry.objects.create( author=p, title="Another test entry", is_active=False ) call_command("index", daemonize=False) def test_database(self): self.assertEqual(Entry.indexer.document_count(), 1) class IndexCommandMultipleIndexersTest(BaseTestCase): def setUp(self): MultipleIndexerEntry.objects.create( title="Test entry", text="Not large text field which helps us to test Djapian" ) call_command("index", daemonize=False) def tearDown(self): for indexer in space.get_indexers_for_model(MultipleIndexerEntry): indexer.clear() def test_create_multiple_indexers(self): for indexer in space.get_indexers_for_model(MultipleIndexerEntry): self.assertEqual(indexer.document_count(), 1) self.assertEqual(indexer.search("test").count(), 1) def test_change_multiple_indexers(self): for entry in MultipleIndexerEntry.objects.all(): entry.title = "Test entry: updated" entry.save() call_command("index", daemonize=False) self.assertEqual(MultipleIndexerEntry.indexer_title.search("updated").count(), 1) self.assertEqual(MultipleIndexerEntry.indexer_text.search("updated").count(), 0) def test_delete_multiple_indexers(self): MultipleIndexerEntry.objects.all().delete() call_command("index", daemonize=False) for indexer in space.get_indexers_for_model(MultipleIndexerEntry): self.assertEqual(indexer.document_count(), 0)
{ "content_hash": "9c9f372df0b442467717c7761fb4af96", "timestamp": "", "source": "github", "line_count": 70, "max_line_length": 98, "avg_line_length": 33.92857142857143, "alnum_prop": 0.6656842105263158, "repo_name": "adalekin/djapian", "id": "279cc31f45458ed3ae0b917208ebe8992a51bf78", "size": "2375", "binary": false, "copies": "5", "ref": "refs/heads/master", "path": "djapian/tests/index.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "82217" } ], "symlink_target": "" }
import unittest import requests_mock from frontstage import app from tests.integration.mocked_services import url_banner_api class TestSignOutHelp(unittest.TestCase): def setUp(self): self.app = app.test_client() self.app.set_cookie("localhost", "authorization", "session_key") @requests_mock.mock() def test_sign_out_help_get(self, mock_request): mock_request.get(url_banner_api, status_code=404) response = self.app.get("/help") self.assertEqual(response.status_code, 200) self.assertIn("Help".encode(), response.data) self.assertIn("Choose an option".encode(), response.data) self.assertIn("Information about the Office for National Statistics (ONS)".encode(), response.data) self.assertIn("Continue".encode(), response.data) self.assertIn("Cancel".encode(), response.data) @requests_mock.mock() def test_sign_out_help_post(self, mock_request): mock_request.get(url_banner_api, status_code=404) form = {"option": "info-ons"} response = self.app.post("/help", data=form, follow_redirects=True) self.assertEqual(response.status_code, 200) self.assertIn("Information about the Office for National Statistics (ONS)".encode(), response.data) self.assertIn("Choose an option".encode(), response.data) self.assertIn("Who is the Office for National Statistics (ONS)?".encode(), response.data) self.assertIn("How do you keep my data safe?".encode(), response.data) self.assertIn("Something else".encode(), response.data) self.assertIn("Continue".encode(), response.data) self.assertIn("Cancel".encode(), response.data) @requests_mock.mock() def test_sign_out_help_post_select_option(self, mock_request): mock_request.get(url_banner_api, status_code=404) form = {} response = self.app.post("/help", data=form, follow_redirects=True) self.assertEqual(response.status_code, 200) self.assertIn("At least one option should be selected.".encode(), response.data) self.assertIn("You need to choose an option".encode(), response.data) @requests_mock.mock() def test_sign_out_help_post_ons_info_option_needs_to_be_selected(self, mock_request): mock_request.get(url_banner_api, status_code=404) form = {} response = self.app.post("/help/info-ons", data=form, follow_redirects=True) self.assertEqual(response.status_code, 200) self.assertIn("At least one option should be selected.".encode(), response.data) self.assertIn("You need to choose an option".encode(), response.data) @requests_mock.mock() def test_sign_out_help_post_who_is_ons(self, mock_request): mock_request.get(url_banner_api, status_code=404) form = {"option": "ons"} response = self.app.post("/help/info-ons", data=form, follow_redirects=True) self.assertEqual(response.status_code, 200) self.assertIn("Who is the Office for National Statistics (ONS)?".encode(), response.data) self.assertNotIn("Continue".encode(), response.data) self.assertNotIn("Cancel".encode(), response.data) @requests_mock.mock() def test_sign_out_help_post_ons_info_data_safe(self, mock_request): mock_request.get(url_banner_api, status_code=404) form = {"option": "data"} response = self.app.post("/help/info-ons", data=form, follow_redirects=True) self.assertEqual(response.status_code, 200) self.assertIn("How do you keep my data safe?".encode(), response.data) self.assertNotIn("Continue".encode(), response.data) self.assertNotIn("Cancel".encode(), response.data) @requests_mock.mock() def test_sign_out_help_post_ons_info_something_else(self, mock_request): mock_request.get(url_banner_api, status_code=404) form = {"option": "info-something-else"} response = self.app.post("/help/info-ons", data=form, follow_redirects=True) self.assertEqual(response.status_code, 200) self.assertIn("Information about the Office for National Statistics (ONS)".encode(), response.data) self.assertIn("Need more information?".encode(), response.data) self.assertNotIn("Continue".encode(), response.data) self.assertNotIn("Cancel".encode(), response.data) @requests_mock.mock() def test_sign_out_help_with_my_password(self, mock_request): mock_request.get(url_banner_api, status_code=404) form = {"option": "password"} response = self.app.post("/help", data=form, follow_redirects=True) self.assertEqual(response.status_code, 200) self.assertIn("Help with my password".encode(), response.data) self.assertIn("Choose an option".encode(), response.data) self.assertIn("I have not received the password reset email".encode(), response.data) self.assertIn("I cannot reset my password using the link".encode(), response.data) self.assertIn("My new password is not being accepted".encode(), response.data) self.assertIn("Something else".encode(), response.data) self.assertIn("Continue".encode(), response.data) self.assertIn("Cancel".encode(), response.data) @requests_mock.mock() def test_sign_out_help_with_password_post_ons_info_option_needs_to_be_selected(self, mock_request): mock_request.get(url_banner_api, status_code=404) form = {} response = self.app.post("/help/help-with-my-password", data=form, follow_redirects=True) self.assertEqual(response.status_code, 200) self.assertIn("At least one option should be selected.".encode(), response.data) self.assertIn("You need to choose an option".encode(), response.data) @requests_mock.mock() def test_sign_out_help_with_password_post_not_received_password_reset_email(self, mock_request): mock_request.get(url_banner_api, status_code=404) form = {"option": "reset-email"} response = self.app.post("/help/help-with-my-password", data=form, follow_redirects=True) self.assertEqual(response.status_code, 200) self.assertIn("I have not received the password reset email".encode(), response.data) self.assertIn( "that the email address ons.surveys@notifications.service.gov.uk is added to your list of " "approved senders".encode(), response.data, ) self.assertNotIn("Continue".encode(), response.data) self.assertNotIn("Cancel".encode(), response.data) @requests_mock.mock() def test_sign_out_help_with_password_is_not_being_accepted(self, mock_request): mock_request.get(url_banner_api, status_code=404) form = {"option": "password-not-accept"} response = self.app.post("/help/help-with-my-password", data=form, follow_redirects=True) self.assertEqual(response.status_code, 200) self.assertIn("My new password isn't being accepted".encode(), response.data) self.assertIn( "We recommend that you enter your password directly into the password box rather than copy and " "paste it in. This will prevent you pasting any hidden or special characters.".encode(), response.data, ) self.assertNotIn("Continue".encode(), response.data) self.assertNotIn("Cancel".encode(), response.data) @requests_mock.mock() def test_sign_out_help_with_password_reset(self, mock_request): mock_request.get(url_banner_api, status_code=404) form = {"option": "reset-password"} response = self.app.post("/help/help-with-my-password", data=form, follow_redirects=True) self.assertEqual(response.status_code, 200) self.assertIn("I cannot reset my password using the link".encode(), response.data) self.assertIn( "If 72 hours have passed since you reset, you should reset your password again.".encode(), response.data ) self.assertNotIn("Continue".encode(), response.data) self.assertNotIn("Cancel".encode(), response.data) @requests_mock.mock() def test_sign_out_help_with_password_something_else(self, mock_request): mock_request.get(url_banner_api, status_code=404) form = {"option": "password-something-else"} response = self.app.post("/help/help-with-my-password", data=form, follow_redirects=True) self.assertEqual(response.status_code, 200) self.assertIn("Help with my password".encode(), response.data) self.assertIn("If you are having problems signing in, please".encode(), response.data) self.assertNotIn("Continue".encode(), response.data) self.assertNotIn("Cancel".encode(), response.data) @requests_mock.mock() def test_sign_out_help_something_else(self, mock_request): mock_request.get(url_banner_api, status_code=404) form = {"option": "something-else"} response = self.app.post("/help", data=form, follow_redirects=True) self.assertEqual(response.status_code, 200) self.assertIn("Further help".encode(), response.data) self.assertIn("You can find help for common issues if you".encode(), response.data) self.assertIn("If you are having problems signing in, please".encode(), response.data)
{ "content_hash": "d8ccaea09968c1e8b69b997b7a1f2fd3", "timestamp": "", "source": "github", "line_count": 174, "max_line_length": 116, "avg_line_length": 53.88505747126437, "alnum_prop": 0.6697952218430034, "repo_name": "ONSdigital/ras-frontstage", "id": "82429f9db98f6c0c8d09dcb581552c7f03aae88d", "size": "9376", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "tests/integration/test_sign_out_help.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "592" }, { "name": "Dockerfile", "bytes": "621" }, { "name": "HTML", "bytes": "269090" }, { "name": "Makefile", "bytes": "824" }, { "name": "Python", "bytes": "705890" }, { "name": "Shell", "bytes": "2874" } ], "symlink_target": "" }
from setuptools import setup, Extension, Command, find_packages import sys,os,platform osname=platform.uname()[0].lower() VERSION = '0.1' DESCRIPTION = "It is a console based client written in python to access content of https://geekli.st website." LONG_DESCRIPTION = """ It is a console based client written in python to access content of https://geekli.st website. You can view micros, post micros etc in a geeky way now. Go on and make an account on https://geekli.st and join an amazing community of geeks. """ CLASSIFIERS = filter(None, map(str.strip, """ Intended Audience :: Geeks! License :: Apache License version 2.0 Programming Language :: Python Operating System :: Linux :: MacOS X Topic :: Command Line Tool """.splitlines())) setup( name="Geeklist Console", version=VERSION, description=DESCRIPTION, long_description=LONG_DESCRIPTION, classifiers=CLASSIFIERS, author="Bhavyanshu Parasher", author_email="bhavyanshu.spl@gmail.com", url="https://github.com/bhavyanshu/Geeklist_console", license="Apache License, Version 2.0", packages = find_packages(), platforms=['any'], zip_safe=True, install_requires = [ 'setuptools', 'docutils', 'rauth', 'simplejson' ], )
{ "content_hash": "caa12ab00b9df3f264d0a55b33132eca", "timestamp": "", "source": "github", "line_count": 43, "max_line_length": 151, "avg_line_length": 29.27906976744186, "alnum_prop": 0.7029388403494837, "repo_name": "bhavyanshu/Geeklist_console", "id": "31303c56e1c36dabf25b149101119d86de395e03", "size": "1282", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "setup.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "4832" } ], "symlink_target": "" }
"""Python bindings generation for HCTSA.""" from itertools import chain from whatami import whatable from pyopy.hctsa.hctsa_config import HCTSA_BINDINGS_FILE, HCTSA_BINDINGS_DIR from pyopy.hctsa.hctsa_catalog import HCTSACatalog from pyopy.hctsa.hctsa_data import hctsa_sine from pyopy.base import PyopyEngines from pyopy.hctsa.hctsa_transformers import hctsa_prepare_input from pyopy.misc import ensure_python_package @whatable class HCTSASuper(object): TAGS = () def __init__(self, outnames=None, eng=None): self._outnames = outnames self._eng = eng def _infer_eng(self, eng): if eng is None: eng = self._eng if self._eng is not None else PyopyEngines.default() return eng def output_names(self, eng=None, x=None, force=False): """ Returns ------- A string list of the outputs in HCTSA order """ eng = self._infer_eng(eng) if x is None: x = hctsa_sine()[:40] if force or self._outnames is None: # N.B. this is not deterministic inference, as sometimes the number of outputs depend on the T.S. # Is there an "upper bound on the amount of outputs"? x = eng.put('outputs_for_ts_87010877aawer98', x) # FIXME: put this ts in the engine on prepare out = self.transform(x, eng=eng) if isinstance(out, dict): self._outnames = sorted(out.keys()) # No easy way to get here the order in the struct else: self._outnames = None return self._outnames def transform(self, x, eng=None): eng = self._infer_eng(eng) return self._eval_hook(eng, x) def compute(self, x, eng=None): return self.transform(x, eng=eng) def _eval_hook(self, eng, x): raise NotImplementedError() def has_tag(self, tag='shit'): """Returns True iff this operation has the specified tag.""" return tag in self.TAGS def use_eng(self, eng): self._eng = eng @whatable(force_flag_as_whatami=True) class HCTSAOperation(object): def __init__(self, operation_name, matlab_call, operation): super(HCTSAOperation, self).__init__() self.name = operation_name self.matlab_call = matlab_call self.operation = operation self._must_standardize = None def what(self): return self.operation.what() def must_standardize(self): if self._must_standardize is None: from hctsa_catalog import HCTSACatalog self._must_standardize = HCTSACatalog.must_standardize(self.name) return self._must_standardize def compute(self, x, y=None, eng=None): if self.must_standardize() and y is None: x = hctsa_prepare_input(x, z_scored=True) if y is None else y else: x = hctsa_prepare_input(x, z_scored=False) return self.operation.compute(x, eng=eng) def __call__(self, x, y=None, eng=None): return self.compute(x, y=y, eng=eng) def _as_tuple(self): return self.name, self.matlab_call, self.operation def __getitem__(self, item): return self._as_tuple()[item] def __iter__(self): return iter(self._as_tuple()) def __repr__(self): return self.__class__.__name__ + self._as_tuple().__repr__() def gen_bindings(hctsa_catalog=None, write_function_too=False): """Generates the python bindings to the library operators.""" def gen_function(funcname, parameters, doc, defaults=None, prefix='HCTSA', varargin_as_args=False): """Generates python bindings for calling the feature extractors.""" pyfuncname = '%s_%s' % (prefix, funcname) # varargin support (http://www.mathworks.com/help/matlab/ref/varargin.html) if len(parameters) > 0 and parameters[-1] == 'varargin' and varargin_as_args: parameters[-1] = '*args' raise NotImplementedError() # this would require to support default values longer than the number of parameters # this would need further work on calling to support pure numeric args via the cell-trick # avoid shadowing buitins with parameters... parameters = [{'ord': 'ord_'}.get(param, param)for param in parameters] # build the function signature if defaults is None: defaults = [None] * len(parameters) else: defaults.extend([None] * (len(parameters) - len(defaults))) defaults = map(lambda x: float(x) if isinstance(x, int) else x, defaults) # all double parameter_string = '' if len(parameters) == 0 else \ ', ' + ', '.join(['%s=%r' % (param, default) for param, default in zip(parameters, defaults)]) # ...better tuples than lists... parameter_string = parameter_string.replace('[', '(').replace(']', ')') # def line defline = 'def %s(eng, x%s):' % (pyfuncname, parameter_string) # ...cosmetic for long lines if len(defline) > 120: one_middle_comma = defline.find(',', 80, 100) + 1 visual_indent = ' ' * len('def %s(' % pyfuncname) defline = defline[:one_middle_comma] + '\n' + visual_indent + defline[one_middle_comma:].strip() # documentation indented_doc = '\n'.join([' %s' % line for line in doc.splitlines()[1:]]) doc_line = '\n'.join([' """', ' Matlab doc:', ' %s' % ('-' * 40), '%s' % indented_doc, ' %s' % ('-' * 40), ' """']) # function body body = [] for i, parameter in enumerate(parameters): body.append(' %s %s is None:' % ('if' if i == 0 else 'elif', parameter)) body.append(' return eng.run_function(1, \'%s\', x, %s)' % (funcname, ', '.join(parameters[0:i]))) body.append(' return eng.run_function(1, \'%s\', x, %s)' % (funcname, ', '.join(parameters))) body_line = '\n'.join(body) # assemble all together return pyfuncname, '\n'.join([defline, doc_line, body_line]) def gen_class_from_function_string(hctsa_function, func_params, function_prefix='HCTSA_', catalog=None): if catalog is None: catalog = HCTSACatalog.catalog() # Our indentation levels indent1 = ' ' * 4 indent2 = ' ' * 8 # Avoid shadowing + work well with whatami func_params = [{'ord': 'ordd'}.get(param, param) for param in func_params] hctsa_function = hctsa_function.replace('ord_', 'ordd') # Split the generated function code... # All this nastiness out of lazyness (did first function generation and do not feel like redoing right now) deflines, _, body = hctsa_function.partition('):\n') name = deflines[4:].partition('(')[0][len(function_prefix):] args_string = deflines.partition('(eng, x, ')[2] docstring, _, body = body.rpartition('"""') docstring += '"""' # parameter read -> class member read for param in func_params: body = body.replace('if %s is' % param, 'if self.%s is' % param) if 'varargin' == param: # dirty hack to force varagint to be transferred as cells # can fail in many instances (e.g. tuples should be passed as ((1,2),) instead of just (1,2)) # works for HCTSA as it is at the moment, but varargin is not generally supported # we need to fully implement it by translating to python *args body = body.replace( ', varargin', ", self.varargin + ('_celltrick_',) if isinstance(self.varargin, tuple) " "else (self.varargin, '_celltrick_')") else: body = body.replace(', %s' % param, ', self.%s' % param) body = body.strip() # known outputs and tags outputs_string = indent1 + 'KNOWN_OUTPUTS_SIZES = %r' % ( tuple(len(k) for k in catalog.functions_dict[name].known_outputs()),) tags_string = indent1 + 'TAGS = %r' % (tuple(catalog.functions_dict[name].tags()), ) if len(tags_string) > 120: comma = tags_string.find(', ', 90) tags_string = tags_string[:comma] + '\n' + ' ' * len(indent1 + 'TAGS = (') + tags_string[comma+2:] # Constructor body if args_string: lines = args_string.splitlines() if len(lines) == 2: # cosmetics constructor_string = indent1 + 'def __init__(self, %s\n%s' % ( lines[0], ' ' * len(' def __init__(') + lines[1].strip() + '):\n') else: constructor_string = indent1 + 'def __init__(self, %s):\n' % args_string else: constructor_string = indent1 + 'def __init__(self):\n' constructor_string += indent2 + 'super(%s, self).__init__()\n' % name if len(func_params) > 0: constructor_string += indent2 + indent2.join('self.%s = %s\n' % (param, param) for param in func_params) # eval method eval_method = indent1 + 'def _eval_hook(self, eng, x):\n' eval_method += indent1 + '\n'.join(indent1 + line for line in body.splitlines()) # cosmetics def break_long(line): if len(line) < 120: return [line] comma = line.find(', ', 90) return [line[:comma+1], ' ' * len(line.partition('run_function(')[0] + 'run_function(') + line[comma + 2:]] eval_method = '\n'.join(chain.from_iterable(map(break_long, eval_method.splitlines()))) # put all together code = [ 'class %s(HCTSASuper):' % name, docstring + '\n', outputs_string + '\n', tags_string + '\n', constructor_string, eval_method, ] return name, '\n'.join(code) def gen_operations_class(catalog=None, add_commented_out=False): """Returns text with all the metaops in the current HCTSA release under a class namespace.""" if catalog is None: catalog = HCTSACatalog() lines = [] for fname, func in sorted(catalog.functions_dict.items()): for operation in func.operations: if not add_commented_out and operation.is_commented: continue # generate the call string # avoid shadowing buitins with parameters... params = [{'ord': 'ordd'}.get(param, param) for param in func.params] values = operation.param_values params_string = '' if not values else \ ', '.join('%s=%r' % (name, value) for name, value in zip(params, values)) def chunks(l, n): for i in xrange(0, len(l), n): yield l[i:i+n] for outs in chunks(operation.known_outputs(), 5): lines.append('# outs: %s' % ','.join(map(str, outs) if operation.known_outputs() else '')) lines.append('# tags: %s' % ','.join(map(str, operation.tags()) if operation.tags() else '')) lines.append('%s = HCTSAOperation(' % operation.opname) lines.append(' \'%s\',' % operation.opname) lines.append(' %r,' % operation.opcall) instline = ' %s(%s))\n' % (fname, params_string) if len(instline) < 110: lines.append(instline) else: first_comma = instline.find(',', 90) lines.append(instline[:first_comma + 1]) lines.append(' ' * len(' %s(' % fname) + instline[first_comma+2:]) lines = [' %s' % line for line in lines] return 'class HCTSAOperations(object):\n' \ ' """Namespace for HCTSA selected operations."""' \ '\n\n%s' % '\n'.join(lines) # Read-in the catalog if hctsa_catalog is None: hctsa_catalog = HCTSACatalog() # Ensure that the destination python package exists ensure_python_package(HCTSA_BINDINGS_DIR) with open(HCTSA_BINDINGS_FILE, 'w') as writer: # Bindings imports binding_imports = ( 'from pyopy.base import MatlabSequence', 'from pyopy.hctsa.hctsa_bindings_gen import HCTSASuper, HCTSAOperation') exec '\n'.join(binding_imports) in globals() # We are using nasty execs around that need these imports # Write the header writer.write('# coding=utf-8\n') writer.write('\n'.join(binding_imports) + '\n\n\n') # Write the functions and classes funcnames = [] classnames = [] exclusions = {'PP_PreProcess', } # PP_PreProcess does not extract features for func in sorted(hctsa_catalog.functions_dict.values(), key=lambda f: f.funcname): if func.funcname in exclusions: continue funcname, funcdef = gen_function(func.funcname, func.params, func.doc, hctsa_catalog.default_parameters(func.funcname)) if write_function_too: writer.write(funcdef) writer.write('\n\n\n') classname, classdef = gen_class_from_function_string(funcdef, func.params, catalog=hctsa_catalog) writer.write(classdef) writer.write('\n\n\n') funcnames.append(funcname) classnames.append(classname) # Write "all operations" tuples if write_function_too: writer.write('HCTSA_ALL_FUNCS = (\n %s)' % ' '.join('%s,\n' % f for f in funcnames)) writer.write('\n\n') writer.write('HCTSA_ALL_CLASSES = (\n %s)' % ' '.join('%s,\n' % f for f in classnames)) writer.write('\n\n\n') writer.write(gen_operations_class()) if __name__ == '__main__': gen_bindings()
{ "content_hash": "27d5069dbf85f67094a570663d7b37c9", "timestamp": "", "source": "github", "line_count": 331, "max_line_length": 117, "avg_line_length": 43.17522658610272, "alnum_prop": 0.5504863200615772, "repo_name": "strawlab/pyopy", "id": "0cf38255c3b54dba564e38f993bde74069f51fd0", "size": "14306", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "pyopy/hctsa/hctsa_bindings_gen.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Java", "bytes": "13490" }, { "name": "Matlab", "bytes": "306" }, { "name": "Python", "bytes": "1003257" }, { "name": "Shell", "bytes": "592" } ], "symlink_target": "" }
class Client(object): def __init__(self, **kwargs): self.driver = self.Driver(**kwargs)
{ "content_hash": "048792c4b26e31512e8a0a5862ea6e4c", "timestamp": "", "source": "github", "line_count": 4, "max_line_length": 43, "avg_line_length": 25.25, "alnum_prop": 0.594059405940594, "repo_name": "Jc2k/libcloudcore", "id": "98f4d6062a998f303f520118af41a04ed9afdf38", "size": "884", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "libcloudcore/client.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "130567" } ], "symlink_target": "" }
from django.urls import reverse_lazy from django.utils.translation import ugettext_lazy as _ from horizon import exceptions from horizon import tabs from gbpui import client from gbpui import column_filters as gfilters from gbpui.panels.network_policy import tables class L3PolicyDetailsTab(tabs.Tab): name = _("L3 Policy Details") slug = "l3_policy_details" template_name = "project/endpoint_groups/_l3_policy_details.html" failure_url = reverse_lazy('horizon:project:network_policy:index') def get_context_data(self, request): l3policy_id = self.tab_group.kwargs['l3policy_id'] try: l3policy = client.l3policy_get(request, l3policy_id) except Exception: exceptions.handle( request, _('Unable to retrieve l3 policy details.'), redirect=self.failure_url) return {'l3policy': l3policy} class L3PolicyTab(tabs.TableTab): table_classes = (tables.L3PolicyTable,) name = _("L3 Policy") slug = "l3policy" template_name = "horizon/common/_detail_table.html" def get_l3policy_table_data(self): policies = [] try: policies = client.l3policy_list(self.request, tenant_id=self.request.user.tenant_id) update = lambda x: gfilters.update_l3_policy_attributes( self.request, x) policies = [update(item) for item in policies] except Exception: policies = [] exceptions.handle(self.tab_group.request, _('Unable to retrieve l3 policy list.')) return policies class L2PolicyTab(tabs.TableTab): table_classes = (tables.L2PolicyTable,) name = _("L2 Policies") slug = "l2policy" template_name = "horizon/common/_detail_table.html" def get_l2policy_table_data(self): policies = [] try: policies = client.l2policy_list(self.request, tenant_id=self.request.user.tenant_id) except Exception: policies = [] exceptions.handle(self.tab_group.request, _('Unable to retrieve l2 policy list.')) return policies class ServicePolicyTab(tabs.TableTab): table_classes = (tables.ServicePolicyTable,) name = _("Service Policy") slug = "service_policy" template_name = "horizon/common/_detail_table.html" def get_service_policy_table_data(self): policies = [] try: policies = client.networkservicepolicy_list(self.request, tenant_id=self.request.user.tenant_id) update = lambda x: gfilters.update_service_policy_attributes(x) policies = [update(item) for item in policies] except Exception: exceptions.handle(self.tab_group.request, _('Unable to retrieve network service policy list.')) return policies class ServicePolicyDetailsTab(tabs.Tab): name = _("Service Policy Details") slug = "service_policy_details" template_name = "project/network_policy/_service_policy_details.html" failure_url = reverse_lazy('horizon:project:network_policy:index') def get_context_data(self, request): policy_id = self.tab_group.kwargs['service_policy_id'] try: policy = client.get_networkservice_policy(request, policy_id) except Exception: exceptions.handle( request, _('Unable to retrieve service policy details.'), redirect=self.failure_url) return {'policy': policy} class ExternalConnectivityTab(tabs.TableTab): table_classes = (tables.ExternalConnectivityTable,) name = _("External Connectivity") slug = "external_connectivity" template_name = "horizon/common/_detail_table.html" def get_external_connectivity_table_data(self): external_segment_list = [] try: external_segment_list = \ client.externalconnectivity_list(self.request, self.request.user.tenant_id) except Exception: exceptions.handle(self.tab_group.request, _('Unable to retrieve network service policy list.')) return external_segment_list class ExternalConnectivityDetailsTab(tabs.Tab): name = _("External Connectivity Details") slug = "external_connectivity_details" template_name = \ "project/network_policy/_external_connectivity_details.html" failure_url = reverse_lazy('horizon:project:network_policy:index') def get_context_data(self, request): external_connectivity_id = \ self.tab_group.kwargs['external_connectivity_id'] try: external_connectivity = client.get_externalconnectivity(request, external_connectivity_id) except Exception: exceptions.handle( request, _('Unable to retrieve service policy details.'), redirect=self.failure_url) return {'external_connectivity': external_connectivity} class NATPoolTab(tabs.TableTab): table_classes = (tables.NATPoolTable,) name = _("NAT Pool") slug = "nat_pool" template_name = "horizon/common/_detail_table.html" def get_nat_pool_table_data(self): nat_pool_list = [] try: nat_pools = \ client.natpool_list(self.request, self.request.user.tenant_id) update = lambda x: gfilters.update_nat_pool_attributes( self.request, x) nat_pool_list = [update(nat_pool) for nat_pool in nat_pools] except Exception: exceptions.handle(self.tab_group.request, _('Unable to retrieve nat pool list.')) return nat_pool_list class NATPoolDetailsTab(tabs.Tab): name = _("NAT Pool Details") slug = "nat_pool_details" template_name = \ "project/network_policy/_nat_pool_details.html" failure_url = reverse_lazy('horizon:project:network_policy:index') def get_context_data(self, request): nat_pool_id = \ self.tab_group.kwargs['nat_pool_id'] try: nat_pool = client.get_natpool(request, nat_pool_id) except Exception: exceptions.handle( request, _('Unable to retrieve nat pool details.'), redirect=self.failure_url) return {'nat_pool': nat_pool} class ServicePolicyDetailsTabs(tabs.TabGroup): slug = "service_policy_details_tab" tabs = (ServicePolicyDetailsTab,) sticky = True class ExternalConnectivityDetailsTabs(tabs.TabGroup): slug = "external_connectivity_details_tab" tabs = (ExternalConnectivityDetailsTab,) sticky = True class NATPoolDetailsTabs(tabs.TabGroup): slug = "nat_pool_details_tab" tabs = (NATPoolDetailsTab,) sticky = True class L3PolicyTabs(tabs.TabGroup): slug = "l3policy_tab" tabs = (L3PolicyTab, ServicePolicyTab, ExternalConnectivityTab, NATPoolTab) sticky = True class L2PolicyDetailsTab(tabs.Tab): name = _("L2 Policy Details") slug = "l2_policy_details" template_name = "project/network_policy/_l2_policy_details.html" failure_url = reverse_lazy('horizon:project:endpoint_group:index') def get_context_data(self, request): l2policy_id = self.tab_group.kwargs['l2policy_id'] try: l2policy = client.l2policy_get(request, l2policy_id) ptgs = [] for item in l2policy.policy_target_groups: ptgs.append(client.policy_target_get(request, item)) setattr(l2policy, 'ptgs', ptgs) except Exception: exceptions.handle( request, _('Unable to retrieve l2 policy details.'), redirect=self.failure_url) return {'l2policy': l2policy} class L2PolicyDetailsTabs(tabs.TabGroup): slug = "l2policy_tabs" tabs = (L2PolicyDetailsTab,) class L3PolicyDetailsTabs(tabs.TabGroup): slug = "l3policy_tabs" tabs = (L3PolicyDetailsTab, L2PolicyTab,)
{ "content_hash": "fff4bb3902f9b99ddf6b663f162c8ca4", "timestamp": "", "source": "github", "line_count": 236, "max_line_length": 79, "avg_line_length": 34.3771186440678, "alnum_prop": 0.629976580796253, "repo_name": "noironetworks/group-based-policy-ui", "id": "ee960890aeb9df639a610a7a7eb086473dff14b1", "size": "8686", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "gbpui/panels/network_policy/tabs.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "31546" }, { "name": "HTML", "bytes": "26683" }, { "name": "JavaScript", "bytes": "37600" }, { "name": "Python", "bytes": "277035" }, { "name": "SCSS", "bytes": "174" }, { "name": "Shell", "bytes": "16825" } ], "symlink_target": "" }
import os import sys if __name__ == "__main__": os.environ.setdefault("DJANGO_SETTINGS_MODULE", "madapp.settings") from django.core.management import execute_from_command_line execute_from_command_line(sys.argv)
{ "content_hash": "531c40bad52364eb58a30997c3ac90bf", "timestamp": "", "source": "github", "line_count": 9, "max_line_length": 70, "avg_line_length": 25.22222222222222, "alnum_prop": 0.7092511013215859, "repo_name": "gilneidp/FinalProject", "id": "8d4ee19f8c7d80bd60da8e1208af2610416f17d9", "size": "249", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "manage.py", "mode": "33261", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "762" }, { "name": "C++", "bytes": "37788" }, { "name": "CSS", "bytes": "4096" }, { "name": "HTML", "bytes": "20080" }, { "name": "JavaScript", "bytes": "11126" }, { "name": "Python", "bytes": "1723412" }, { "name": "Shell", "bytes": "20419" } ], "symlink_target": "" }
from __future__ import absolute_import, division, print_function import unittest import dreal import dreal._odr_test_module_py as odr_test_module class TestODR(unittest.TestCase): def test_variable(self): x1 = dreal.Variable('x') x2 = odr_test_module.new_variable('x') self.assertNotEqual(x1.get_id(), x2.get_id()) if __name__ == '__main__': unittest.main(verbosity=0)
{ "content_hash": "0ef2c3628aad519b6cc0c61f062195a9", "timestamp": "", "source": "github", "line_count": 17, "max_line_length": 64, "avg_line_length": 23.941176470588236, "alnum_prop": 0.6658476658476659, "repo_name": "soonho-tri/dreal4", "id": "372115a9889690bd3180bdf913bab25dfd4ad68c", "size": "407", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "dreal/test/python/odr_test.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "250" }, { "name": "C++", "bytes": "638067" }, { "name": "Dockerfile", "bytes": "1139" }, { "name": "Jupyter Notebook", "bytes": "1745" }, { "name": "LLVM", "bytes": "16721" }, { "name": "Python", "bytes": "71679" }, { "name": "SMT", "bytes": "1233609" }, { "name": "Shell", "bytes": "12043" }, { "name": "Starlark", "bytes": "68448" }, { "name": "Yacc", "bytes": "29216" } ], "symlink_target": "" }
"""Simple script to read from serial ports for the purpose of logging timestamped data from serial-based sensors. Current implementation reads from three serial ports hard-coded into the open_serial() function, which must be enabled/disabled based on setting the port_flags as arguments. IMUs were Sparkfun Razor IMUs and the GPS was a NovAtel. Author: Adam Werries, awerries@cmu.edu Usage: python3 serialread.py <port0 on/off> <port1 on/off> <port2 on/off> python3 serialread.py 1 0 0 python3 serialread.py 0 1 1 Default is 0 0 1 if no arguments are given. """ import sys import serial import io from time import sleep, time from datetime import datetime start = b'4' def open_serial(port_flags): imu1, imu2, gps = None, None, None if port_flags[0]: imu1 = serial.Serial(port='/dev/ttyUSB1', baudrate=57600, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, bytesize=serial.EIGHTBITS, timeout=0) if port_flags[1]: imu2 = serial.Serial(port='/dev/ttyUSB2', baudrate=115200, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, bytesize=serial.EIGHTBITS, timeout=0) if port_flags[2]: gps = serial.Serial(port='/dev/ttyUSB0', baudrate=115200, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, bytesize=serial.EIGHTBITS, timeout=0) return imu1,imu2,gps def log_imu(ser, outputfile, line, hasStart): output = ser.read().decode('utf-8') if output == '$': line = [output] hasStart = True elif hasStart: line.append(output) if output == '\n': save_output(outputfile, ''.join(line)) hasStart = False return line, hasStart def log_gps(ser, outputfile, line, hasStart): output = ser.read().decode('utf-8') if output == '#': line = [output] hasStart = True elif hasStart: line.append(output) if output == '\n': save_output(outputfile, ''.join(line)) hasStart = False return line, hasStart def save_output(outputfile, output): if output: outputfile.write(str(time())) outputfile.write(' ') outputfile.write(output) print(output,end='') def main(argv): # default is to only log GPS if len(argv) < 4: port_flags = [0, 0, 1] port_flags = [int(argv[1]), int(argv[2]), int(argv[3])] imu1, imu2, gps = None, None, None imu1_log, imu2_log, gps_log = None, None, None imu1_start, imu2_start, gps_start = False, False, False line1 = list() line2 = list() line3 = list() now = datetime.now().replace(microsecond=0).isoformat() try: print('Opening serial...') (imu1, imu2, gps) = open_serial(port_flags) sleep(2) if imu1: imu1_log = open('imu1_log_{0}.log'.format(now), 'w') imu1.readline() imu1.write(start) imu1.flush() if imu2: imu2_log = open('imu2_log_{0}.log'.format(now), 'w') imu2.flush() if gps: gps_log = open('gps_log_{0}.log'.format(now), 'w') gps.readline() gps.flush() print('Starting logging...') while True: if imu1: line1,imu1_start = log_imu(imu1, imu1_log, line1, imu1_start) if imu2: line2,imu2_start = log_imu(imu2, imu2_log, line2, imu2_start) if gps: line3,gps_start = log_gps(gps, gps_log, line3, gps_start) finally: if imu1: imu1.close() imu1_log.close() if imu2: imu2.close() imu2_log.close() if gps: gps.close() gps_log.close() if __name__=='__main__': main(sys.argv)
{ "content_hash": "21f1fe764c16c6106260308270db7984", "timestamp": "", "source": "github", "line_count": 123, "max_line_length": 119, "avg_line_length": 31.333333333333332, "alnum_prop": 0.5786196159833938, "repo_name": "awerries/lowcost-sensors", "id": "d1b5d31636659dcb5dd449715abb86d3bfe53d71", "size": "3876", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "Python_Tools/serialread.py", "mode": "33261", "license": "mit", "language": [ { "name": "Arduino", "bytes": "6014" }, { "name": "C++", "bytes": "215683" }, { "name": "Makefile", "bytes": "1783" }, { "name": "Python", "bytes": "11737" } ], "symlink_target": "" }
import os class LocalPathInfo(object): def __init__(self, path_priority_groups): self._path_priority_groups = self._ParseLocalPaths(path_priority_groups) def GetLocalPath(self): for priority_group in self._path_priority_groups: priority_group = filter(os.path.exists, priority_group) if not priority_group: continue return max(priority_group, key=lambda path: os.stat(path).st_mtime) return None def IsPathInLocalPaths(self, path): return any( path in priority_group for priority_group in self._path_priority_groups) def Update(self, local_path_info): if not local_path_info: return for priority_group in local_path_info._path_priority_groups: group_list = [] for path in priority_group: if not self.IsPathInLocalPaths(path): group_list.append(path) if group_list: self._path_priority_groups.append(group_list) @staticmethod def _ParseLocalPaths(local_paths): if not local_paths: return [] return [[e] if isinstance(e, basestring) else e for e in local_paths]
{ "content_hash": "5321ca360f9369151b775f146e11b662", "timestamp": "", "source": "github", "line_count": 36, "max_line_length": 80, "avg_line_length": 30.583333333333332, "alnum_prop": 0.6793823796548593, "repo_name": "XiaosongWei/chromium-crosswalk", "id": "0103e8f7590c313491a3f3d8ef1d276134e91d39", "size": "1264", "binary": false, "copies": "17", "ref": "refs/heads/master", "path": "tools/telemetry/catapult_base/dependency_manager/local_path_info.py", "mode": "33188", "license": "bsd-3-clause", "language": [], "symlink_target": "" }
from setuptools import setup, find_packages f = open('README.rst') readme = f.read() f.close() setup( name='django-taggit', version='0.12', description='django-taggit is a reusable Django application for simple tagging.', long_description=readme, author='Alex Gaynor', author_email='alex.gaynor@gmail.com', url='http://github.com/alex/django-taggit/tree/master', packages=find_packages(exclude=('tests*',)), package_data = { 'taggit': [ 'locale/*/LC_MESSAGES/*', ], }, license='BSD', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Framework :: Django', ], include_package_data=True, zip_safe=False, )
{ "content_hash": "c972f1fe2aad19723ca6619cc305ed05", "timestamp": "", "source": "github", "line_count": 39, "max_line_length": 85, "avg_line_length": 30.358974358974358, "alnum_prop": 0.597972972972973, "repo_name": "laborautonomo/django-taggit", "id": "8f301dcb7c2bb7e1e793cac4de706f368c954e1c", "size": "1184", "binary": false, "copies": "3", "ref": "refs/heads/develop", "path": "setup.py", "mode": "33188", "license": "bsd-3-clause", "language": [], "symlink_target": "" }