text
stringlengths
4
1.02M
meta
dict
""" This runs the mail throttler as a window service Base code for the service is taken from http://code.activestate.com/recipes/551780/ """ #Internal Modules from mailThrottler import MTDaemon from core import _config #Python BulitIns from os.path import splitext, abspath from sys import modules, argv import logging from logging.handlers import TimedRotatingFileHandler #External Modules import win32serviceutil import win32service import win32event import win32api class Service(win32serviceutil.ServiceFramework): _svc_name_ = '_unNamed' _svc_display_name_ = '_Service Template' def __init__(self, *args): win32serviceutil.ServiceFramework.__init__(self, *args) self.log('init') self.stop_event = win32event.CreateEvent(None, 0, 0, None) def log(self, msg): import servicemanager servicemanager.LogInfoMsg(str(msg)) def sleep(self, sec): win32api.Sleep(sec*1000, True) def SvcDoRun(self): self.ReportServiceStatus(win32service.SERVICE_START_PENDING) try: self.ReportServiceStatus(win32service.SERVICE_RUNNING) self.log('start') self.start() self.log('wait') win32event.WaitForSingleObject(self.stop_event, win32event.INFINITE) self.log('done') except Exception, x: self.log('Exception : %s' % x) self.SvcStop() def SvcStop(self): self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING) self.log('stopping') self.stop() self.log('stopped') win32event.SetEvent(self.stop_event) self.ReportServiceStatus(win32service.SERVICE_STOPPED) # to be overridden def start(self): pass # to be overridden def stop(self): pass def instart(cls, name, display_name=None, stay_alive=True): ''' Install and Start (auto) a Service cls : the class (derived from Service) that implement the Service name : Service name display_name : the name displayed in the service manager stay_alive : Service will stop on logout if False ''' cls._svc_name_ = name cls._svc_display_name_ = display_name or name try: module_path=modules[cls.__module__].__file__ except AttributeError: # maybe py2exe went by from sys import executable module_path=executable module_file=splitext(abspath(module_path))[0] cls._svc_reg_class_ = '%s.%s' % (module_file, cls.__name__) if stay_alive: win32api.SetConsoleCtrlHandler(lambda x: True, True) try: win32serviceutil.InstallService( cls._svc_reg_class_, cls._svc_name_, cls._svc_display_name_, startType=win32service.SERVICE_AUTO_START ) print "Install ok" win32serviceutil.StartService( cls._svc_name_ ) print 'Start ok' except Exception, x: print str(x) class WindowsMT(Service): defaultValues = { "logFilePath": "logfiles/mailthrotter.log", "logBackupCount" : "5" } _config.importDefaults("WindowsMT", defaultValues) def start(self): filepath = "" filepath = splitext(modules[MTDaemon.__module__].__file__)[0] + ".ini" self.daemon = MTDaemon(filepath) #Attach rolling file logger log = TimedRotatingFileHandler( _config.get("WindowsMT", "logFilePath") , when='midnight', backupCount=_config.getint("WindowsMT", "logBackupCount")) log.setLevel(logging.INFO) formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") log.setFormatter(formatter) logging.getLogger("").addHandler(log) self.daemon.start() def stop(self): self.daemon.stop() del(self.daemon) instart(WindowsMT, 'MailThrotter', 'MailThrotter python service')
{ "content_hash": "938726d63544768b31d0039e53d5ab37", "timestamp": "", "source": "github", "line_count": 118, "max_line_length": 157, "avg_line_length": 34.09322033898305, "alnum_prop": 0.6221725080785483, "repo_name": "bobbynewmark/mailthrottler", "id": "b6f2b0a639ba43946d6643bab8dab5a963721ce5", "size": "4111", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "mtWindowsServer.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "1771" }, { "name": "HTML", "bytes": "7248" }, { "name": "Python", "bytes": "54627" } ], "symlink_target": "" }
from collections import OrderedDict from random import randint from time import time d = OrderedDict() players = list("ABCDEFGH") start = time() for i in xrange(8): raw_input() p = players.pop(randint(0,7 - i)) end = time() print i+1, p ,end - start d[p] = (i+i,end - start) print print '-' * 40 for item in d: print item,d[item]
{ "content_hash": "6d8a3409d5480b73afa8f166d1dc6e6b", "timestamp": "", "source": "github", "line_count": 22, "max_line_length": 37, "avg_line_length": 16.40909090909091, "alnum_prop": 0.6260387811634349, "repo_name": "liuhll/BlogAndNotes", "id": "5cff065db0b4513de2483ba2aba51486a4489990", "size": "385", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "Notes/Python/src/exercise/orderdict_demo.py", "mode": "33188", "license": "mit", "language": [ { "name": "C#", "bytes": "841" }, { "name": "Python", "bytes": "17571" }, { "name": "Shell", "bytes": "7304" }, { "name": "VimL", "bytes": "10983" } ], "symlink_target": "" }
from __future__ import absolute_import, unicode_literals import copy import datetime import os import re import sys import time import warnings from io import BytesIO from pprint import pformat try: from urllib.parse import quote, parse_qsl, urlencode, urljoin, urlparse except ImportError: # Python 2 from urllib import quote, urlencode from urlparse import parse_qsl, urljoin, urlparse from django.utils import six from django.utils.six.moves import http_cookies # Some versions of Python 2.7 and later won't need this encoding bug fix: _cookie_encodes_correctly = http_cookies.SimpleCookie().value_encode(';') == (';', '"\\073"') # See ticket #13007, http://bugs.python.org/issue2193 and http://trac.edgewall.org/ticket/2256 _tc = http_cookies.SimpleCookie() try: _tc.load(str('foo:bar=1')) _cookie_allows_colon_in_names = True except http_cookies.CookieError: _cookie_allows_colon_in_names = False if _cookie_encodes_correctly and _cookie_allows_colon_in_names: SimpleCookie = http_cookies.SimpleCookie else: Morsel = http_cookies.Morsel class SimpleCookie(http_cookies.SimpleCookie): if not _cookie_encodes_correctly: def value_encode(self, val): # Some browsers do not support quoted-string from RFC 2109, # including some versions of Safari and Internet Explorer. # These browsers split on ';', and some versions of Safari # are known to split on ', '. Therefore, we encode ';' and ',' # SimpleCookie already does the hard work of encoding and decoding. # It uses octal sequences like '\\012' for newline etc. # and non-ASCII chars. We just make use of this mechanism, to # avoid introducing two encoding schemes which would be confusing # and especially awkward for javascript. # NB, contrary to Python docs, value_encode returns a tuple containing # (real val, encoded_val) val, encoded = super(SimpleCookie, self).value_encode(val) encoded = encoded.replace(";", "\\073").replace(",","\\054") # If encoded now contains any quoted chars, we need double quotes # around the whole string. if "\\" in encoded and not encoded.startswith('"'): encoded = '"' + encoded + '"' return val, encoded if not _cookie_allows_colon_in_names: def load(self, rawdata): self.bad_cookies = set() super(SimpleCookie, self).load(rawdata) for key in self.bad_cookies: del self[key] # override private __set() method: # (needed for using our Morsel, and for laxness with CookieError def _BaseCookie__set(self, key, real_value, coded_value): try: M = self.get(key, Morsel()) M.set(key, real_value, coded_value) dict.__setitem__(self, key, M) except http_cookies.CookieError: self.bad_cookies.add(key) dict.__setitem__(self, key, http_cookies.Morsel()) from django.conf import settings from django.core import signing from django.core.exceptions import ImproperlyConfigured, SuspiciousOperation from django.core.files import uploadhandler from django.http.multipartparser import MultiPartParser from django.http.utils import * from django.utils.datastructures import MultiValueDict, ImmutableList from django.utils.encoding import smart_str, smart_unicode, iri_to_uri, force_unicode from django.utils.http import cookie_date from django.utils import six from django.utils import timezone RESERVED_CHARS="!*'();:@&=+$,/?%#[]" absolute_http_url_re = re.compile(r"^https?://", re.I) class Http404(Exception): pass RAISE_ERROR = object() def build_request_repr(request, path_override=None, GET_override=None, POST_override=None, COOKIES_override=None, META_override=None): """ Builds and returns the request's representation string. The request's attributes may be overridden by pre-processed values. """ # Since this is called as part of error handling, we need to be very # robust against potentially malformed input. try: get = (pformat(GET_override) if GET_override is not None else pformat(request.GET)) except: get = '<could not parse>' if request._post_parse_error: post = '<could not parse>' else: try: post = (pformat(POST_override) if POST_override is not None else pformat(request.POST)) except: post = '<could not parse>' try: cookies = (pformat(COOKIES_override) if COOKIES_override is not None else pformat(request.COOKIES)) except: cookies = '<could not parse>' try: meta = (pformat(META_override) if META_override is not None else pformat(request.META)) except: meta = '<could not parse>' path = path_override if path_override is not None else request.path return smart_unicode('<%s\npath:%s,\nGET:%s,\nPOST:%s,\nCOOKIES:%s,\nMETA:%s>' % (request.__class__.__name__, path, six.text_type(get), six.text_type(post), six.text_type(cookies), six.text_type(meta))) class UnreadablePostError(IOError): pass class HttpRequest(object): """A basic HTTP request.""" # The encoding used in GET/POST dicts. None means use default setting. _encoding = None _upload_handlers = [] def __init__(self): self.GET, self.POST, self.COOKIES, self.META, self.FILES = {}, {}, {}, {}, {} self.path = '' self.path_info = '' self.method = None self._post_parse_error = False def __repr__(self): return build_request_repr(self) def get_host(self): """Returns the HTTP host using the environment or request headers.""" # We try three options, in order of decreasing preference. if settings.USE_X_FORWARDED_HOST and ( 'HTTP_X_FORWARDED_HOST' in self.META): host = self.META['HTTP_X_FORWARDED_HOST'] elif 'HTTP_HOST' in self.META: host = self.META['HTTP_HOST'] else: # Reconstruct the host using the algorithm from PEP 333. host = self.META['SERVER_NAME'] server_port = str(self.META['SERVER_PORT']) if server_port != (self.is_secure() and '443' or '80'): host = '%s:%s' % (host, server_port) return host def get_full_path(self): # RFC 3986 requires query string arguments to be in the ASCII range. # Rather than crash if this doesn't happen, we encode defensively. return '%s%s' % (self.path, self.META.get('QUERY_STRING', '') and ('?' + iri_to_uri(self.META.get('QUERY_STRING', ''))) or '') def get_signed_cookie(self, key, default=RAISE_ERROR, salt='', max_age=None): """ Attempts to return a signed cookie. If the signature fails or the cookie has expired, raises an exception... unless you provide the default argument in which case that value will be returned instead. """ try: cookie_value = self.COOKIES[key].encode('utf-8') except KeyError: if default is not RAISE_ERROR: return default else: raise try: value = signing.get_cookie_signer(salt=key + salt).unsign( cookie_value, max_age=max_age) except signing.BadSignature: if default is not RAISE_ERROR: return default else: raise return value def build_absolute_uri(self, location=None): """ Builds an absolute URI from the location and the variables available in this request. If no location is specified, the absolute URI is built on ``request.get_full_path()``. """ if not location: location = self.get_full_path() if not absolute_http_url_re.match(location): current_uri = '%s://%s%s' % (self.is_secure() and 'https' or 'http', self.get_host(), self.path) location = urljoin(current_uri, location) return iri_to_uri(location) def _is_secure(self): return os.environ.get("HTTPS") == "on" def is_secure(self): # First, check the SECURE_PROXY_SSL_HEADER setting. if settings.SECURE_PROXY_SSL_HEADER: try: header, value = settings.SECURE_PROXY_SSL_HEADER except ValueError: raise ImproperlyConfigured('The SECURE_PROXY_SSL_HEADER setting must be a tuple containing two values.') if self.META.get(header, None) == value: return True # Failing that, fall back to _is_secure(), which is a hook for # subclasses to implement. return self._is_secure() def is_ajax(self): return self.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest' def _set_encoding(self, val): """ Sets the encoding used for GET/POST accesses. If the GET or POST dictionary has already been created, it is removed and recreated on the next access (so that it is decoded correctly). """ self._encoding = val if hasattr(self, '_get'): del self._get if hasattr(self, '_post'): del self._post def _get_encoding(self): return self._encoding encoding = property(_get_encoding, _set_encoding) def _initialize_handlers(self): self._upload_handlers = [uploadhandler.load_handler(handler, self) for handler in settings.FILE_UPLOAD_HANDLERS] def _set_upload_handlers(self, upload_handlers): if hasattr(self, '_files'): raise AttributeError("You cannot set the upload handlers after the upload has been processed.") self._upload_handlers = upload_handlers def _get_upload_handlers(self): if not self._upload_handlers: # If there are no upload handlers defined, initialize them from settings. self._initialize_handlers() return self._upload_handlers upload_handlers = property(_get_upload_handlers, _set_upload_handlers) def parse_file_upload(self, META, post_data): """Returns a tuple of (POST QueryDict, FILES MultiValueDict).""" self.upload_handlers = ImmutableList( self.upload_handlers, warning = "You cannot alter upload handlers after the upload has been processed." ) parser = MultiPartParser(META, post_data, self.upload_handlers, self.encoding) return parser.parse() @property def body(self): if not hasattr(self, '_body'): if self._read_started: raise Exception("You cannot access body after reading from request's data stream") try: self._body = self.read() except IOError: e = sys.exc_info() six.reraise(UnreadablePostError, UnreadablePostError(*e[1].args), e[2]) self._stream = BytesIO(self._body) return self._body @property def raw_post_data(self): warnings.warn('HttpRequest.raw_post_data has been deprecated. Use HttpRequest.body instead.', DeprecationWarning) return self.body def _mark_post_parse_error(self): self._post = QueryDict('') self._files = MultiValueDict() self._post_parse_error = True def _load_post_and_files(self): # Populates self._post and self._files if self.method != 'POST': self._post, self._files = QueryDict('', encoding=self._encoding), MultiValueDict() return if self._read_started and not hasattr(self, '_body'): self._mark_post_parse_error() return if self.META.get('CONTENT_TYPE', '').startswith('multipart'): if hasattr(self, '_body'): # Use already read data data = BytesIO(self._body) else: data = self try: self._post, self._files = self.parse_file_upload(self.META, data) except: # An error occured while parsing POST data. Since when # formatting the error the request handler might access # self.POST, set self._post and self._file to prevent # attempts to parse POST data again. # Mark that an error occured. This allows self.__repr__ to # be explicit about it instead of simply representing an # empty POST self._mark_post_parse_error() raise else: self._post, self._files = QueryDict(self.body, encoding=self._encoding), MultiValueDict() ## File-like and iterator interface. ## ## Expects self._stream to be set to an appropriate source of bytes by ## a corresponding request subclass (e.g. WSGIRequest). ## Also when request data has already been read by request.POST or ## request.body, self._stream points to a StringIO instance ## containing that data. def read(self, *args, **kwargs): self._read_started = True return self._stream.read(*args, **kwargs) def readline(self, *args, **kwargs): self._read_started = True return self._stream.readline(*args, **kwargs) def xreadlines(self): while True: buf = self.readline() if not buf: break yield buf __iter__ = xreadlines def readlines(self): return list(iter(self)) class QueryDict(MultiValueDict): """ A specialized MultiValueDict that takes a query string when initialized. This is immutable unless you create a copy of it. Values retrieved from this class are converted from the given encoding (DEFAULT_CHARSET by default) to unicode. """ # These are both reset in __init__, but is specified here at the class # level so that unpickling will have valid values _mutable = True _encoding = None def __init__(self, query_string, mutable=False, encoding=None): super(QueryDict, self).__init__() if not encoding: encoding = settings.DEFAULT_CHARSET self.encoding = encoding for key, value in parse_qsl((query_string or ''), True): # keep_blank_values=True self.appendlist(force_unicode(key, encoding, errors='replace'), force_unicode(value, encoding, errors='replace')) self._mutable = mutable def _get_encoding(self): if self._encoding is None: self._encoding = settings.DEFAULT_CHARSET return self._encoding def _set_encoding(self, value): self._encoding = value encoding = property(_get_encoding, _set_encoding) def _assert_mutable(self): if not self._mutable: raise AttributeError("This QueryDict instance is immutable") def __setitem__(self, key, value): self._assert_mutable() key = str_to_unicode(key, self.encoding) value = str_to_unicode(value, self.encoding) super(QueryDict, self).__setitem__(key, value) def __delitem__(self, key): self._assert_mutable() super(QueryDict, self).__delitem__(key) def __copy__(self): result = self.__class__('', mutable=True, encoding=self.encoding) for key, value in self.iterlists(): result.setlist(key, value) return result def __deepcopy__(self, memo): result = self.__class__('', mutable=True, encoding=self.encoding) memo[id(self)] = result for key, value in self.iterlists(): result.setlist(copy.deepcopy(key, memo), copy.deepcopy(value, memo)) return result def setlist(self, key, list_): self._assert_mutable() key = str_to_unicode(key, self.encoding) list_ = [str_to_unicode(elt, self.encoding) for elt in list_] super(QueryDict, self).setlist(key, list_) def setlistdefault(self, key, default_list=None): self._assert_mutable() return super(QueryDict, self).setlistdefault(key, default_list) def appendlist(self, key, value): self._assert_mutable() key = str_to_unicode(key, self.encoding) value = str_to_unicode(value, self.encoding) super(QueryDict, self).appendlist(key, value) def pop(self, key, *args): self._assert_mutable() return super(QueryDict, self).pop(key, *args) def popitem(self): self._assert_mutable() return super(QueryDict, self).popitem() def clear(self): self._assert_mutable() super(QueryDict, self).clear() def setdefault(self, key, default=None): self._assert_mutable() key = str_to_unicode(key, self.encoding) default = str_to_unicode(default, self.encoding) return super(QueryDict, self).setdefault(key, default) def copy(self): """Returns a mutable copy of this object.""" return self.__deepcopy__({}) def urlencode(self, safe=None): """ Returns an encoded string of all query string arguments. :arg safe: Used to specify characters which do not require quoting, for example:: >>> q = QueryDict('', mutable=True) >>> q['next'] = '/a&b/' >>> q.urlencode() 'next=%2Fa%26b%2F' >>> q.urlencode(safe='/') 'next=/a%26b/' """ output = [] if safe: safe = six.n(safe) encode = lambda k, v: '%s=%s' % ((quote(k, safe), quote(v, safe))) else: encode = lambda k, v: urlencode({k: v}) for k, list_ in self.lists(): k = smart_str(k, self.encoding) output.extend([encode(k, smart_str(v, self.encoding)) for v in list_]) return '&'.join(output) def parse_cookie(cookie): if cookie == '': return {} if not isinstance(cookie, http_cookies.BaseCookie): try: c = SimpleCookie() c.load(six.n(cookie)) except http_cookies.CookieError: # Invalid cookie return {} else: c = cookie cookiedict = {} for key in c.keys(): cookiedict[key] = c.get(key).value return cookiedict class BadHeaderError(ValueError): pass class HttpResponse(object): """A basic HTTP response, with content and dictionary-accessed headers.""" status_code = 200 def __init__(self, content='', content_type=None, status=None, mimetype=None): # _headers is a mapping of the lower-case name to the original case of # the header (required for working with legacy systems) and the header # value. Both the name of the header and its value are ASCII strings. self._headers = {} self._charset = settings.DEFAULT_CHARSET if mimetype: warnings.warn("Using mimetype keyword argument is deprecated, use" " content_type instead", PendingDeprecationWarning) content_type = mimetype if not content_type: content_type = "%s; charset=%s" % (settings.DEFAULT_CONTENT_TYPE, self._charset) self.content = content self.cookies = SimpleCookie() if status: self.status_code = status self['Content-Type'] = content_type def __str__(self): """Full HTTP message, including headers.""" return '\n'.join(['%s: %s' % (key, value) for key, value in self._headers.values()]) \ + '\n\n' + self.content.decode(self._charset) def _convert_to_ascii(self, *values): """Converts all values to ascii strings.""" for value in values: if isinstance(value, six.text_type): try: if not six.PY3: value = value.encode('us-ascii') else: # In 3k, still use Unicode strings in headers value.encode('us-ascii') except UnicodeError as e: e.reason += ', HTTP response headers must be in US-ASCII format' raise elif six.PY3 and isinstance(value, bytes): # str(<bytes>) would result in "b'...'" value = value.decode() else: value = str(value) if '\n' in value or '\r' in value: raise BadHeaderError("Header values can't contain newlines (got %r)" % (value)) yield value def __setitem__(self, header, value): header, value = self._convert_to_ascii(header, value) self._headers[header.lower()] = (header, value) def __delitem__(self, header): try: del self._headers[header.lower()] except KeyError: pass def __getitem__(self, header): return self._headers[header.lower()][1] def __getstate__(self): # SimpleCookie is not pickeable with pickle.HIGHEST_PROTOCOL, so we # serialise to a string instead state = self.__dict__.copy() state['cookies'] = str(state['cookies']) return state def __setstate__(self, state): self.__dict__.update(state) self.cookies = SimpleCookie(self.cookies) def has_header(self, header): """Case-insensitive check for a header.""" return header.lower() in self._headers __contains__ = has_header def items(self): return six.dictvalues(self._headers) def get(self, header, alternate=None): return self._headers.get(header.lower(), (None, alternate))[1] def set_cookie(self, key, value='', max_age=None, expires=None, path='/', domain=None, secure=False, httponly=False): """ Sets a cookie. ``expires`` can be: - a string in the correct format, - a naive ``datetime.datetime`` object in UTC, - an aware ``datetime.datetime`` object in any time zone. If it is a ``datetime.datetime`` object then ``max_age`` will be calculated. """ self.cookies[key] = value if expires is not None: if isinstance(expires, datetime.datetime): if timezone.is_aware(expires): expires = timezone.make_naive(expires, timezone.utc) delta = expires - expires.utcnow() # Add one second so the date matches exactly (a fraction of # time gets lost between converting to a timedelta and # then the date string). delta = delta + datetime.timedelta(seconds=1) # Just set max_age - the max_age logic will set expires. expires = None max_age = max(0, delta.days * 86400 + delta.seconds) else: self.cookies[key]['expires'] = expires if max_age is not None: self.cookies[key]['max-age'] = max_age # IE requires expires, so set it if hasn't been already. if not expires: self.cookies[key]['expires'] = cookie_date(time.time() + max_age) if path is not None: self.cookies[key]['path'] = path if domain is not None: self.cookies[key]['domain'] = domain if secure: self.cookies[key]['secure'] = True if httponly: self.cookies[key]['httponly'] = True def set_signed_cookie(self, key, value, salt='', **kwargs): value = signing.get_cookie_signer(salt=key + salt).sign(value) return self.set_cookie(key, value, **kwargs) def delete_cookie(self, key, path='/', domain=None): self.set_cookie(key, max_age=0, path=path, domain=domain, expires='Thu, 01-Jan-1970 00:00:00 GMT') def _process_chunk(self, chunk): # django3: added to simplify handling of non-character content # like integers (why we need this, I have no idea ;-) if isinstance(chunk, six.text_type): chunk = chunk.encode(self._charset) if not isinstance(chunk, bytes): # django3: chunk could be any random object # (see test_iter_content), so if it's not # bytes at this point, we need to convert # it to text using a str operation chunk = str(chunk).encode(self._charset) # Python 3.2 insists that the type is bytes, # a subclass is not accepted. See Python issues #5800, #10935 return bytes(chunk) def _get_content(self): if self.has_header('Content-Encoding'): process = self._process_chunk return b''.join([process(e) for e in self._container]) return b''.join([smart_str(e, self._charset) for e in self._container]) def _set_content(self, value): # in 3.x, bytes and unicode has __iter__, but they shouldn't be considered # collections here if hasattr(value, '__iter__') and not isinstance(value, (bytes, six.text_type)): self._container = value self._base_content_is_iter = True else: self._container = [value] self._base_content_is_iter = False content = property(_get_content, _set_content) def __iter__(self): self._iterator = iter(self._container) return self def next(self): return self._process_chunk(next(self._iterator)) __next__ = next def close(self): if hasattr(self._container, 'close'): self._container.close() # The remaining methods partially implement the file-like object interface. # See http://docs.python.org/lib/bltin-file-objects.html def write(self, content): if self._base_content_is_iter: raise Exception("This %s instance is not writable" % self.__class__) self._container.append(content) def flush(self): pass def tell(self): if self._base_content_is_iter: raise Exception("This %s instance cannot tell its position" % self.__class__) return sum([len(chunk) for chunk in self]) class HttpResponseRedirectBase(HttpResponse): allowed_schemes = ['http', 'https', 'ftp'] def __init__(self, redirect_to): parsed = urlparse(redirect_to) if parsed.scheme and parsed.scheme not in self.allowed_schemes: raise SuspiciousOperation("Unsafe redirect to URL with protocol '%s'" % parsed.scheme) super(HttpResponseRedirectBase, self).__init__() self['Location'] = iri_to_uri(redirect_to) class HttpResponseRedirect(HttpResponseRedirectBase): status_code = 302 class HttpResponsePermanentRedirect(HttpResponseRedirectBase): status_code = 301 class HttpResponseNotModified(HttpResponse): status_code = 304 class HttpResponseBadRequest(HttpResponse): status_code = 400 class HttpResponseNotFound(HttpResponse): status_code = 404 class HttpResponseForbidden(HttpResponse): status_code = 403 class HttpResponseNotAllowed(HttpResponse): status_code = 405 def __init__(self, permitted_methods): super(HttpResponseNotAllowed, self).__init__() self['Allow'] = ', '.join(permitted_methods) class HttpResponseGone(HttpResponse): status_code = 410 class HttpResponseServerError(HttpResponse): status_code = 500 # A backwards compatible alias for HttpRequest.get_host. def get_host(request): return request.get_host() # It's neither necessary nor appropriate to use # django.utils.encoding.smart_unicode for parsing URLs and form inputs. Thus, # this slightly more restricted function. def str_to_unicode(s, encoding): """ Converts basestring objects to unicode, using the given encoding. Illegally encoded input characters are replaced with Unicode "unknown" codepoint (\ufffd). Returns any non-basestring objects without change. """ if isinstance(s, bytes): return six.text_type(s, encoding, 'replace') else: return s
{ "content_hash": "9e2c9d151d86006faa6a609bd3d478ea", "timestamp": "", "source": "github", "line_count": 777, "max_line_length": 134, "avg_line_length": 37.17760617760618, "alnum_prop": 0.5900924291203655, "repo_name": "vsajip/django", "id": "1a9f359ab342a7ec6f2557a427bf4f36f3a8f46a", "size": "28887", "binary": false, "copies": "1", "ref": "refs/heads/django3", "path": "django/http/__init__.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "JavaScript", "bytes": "89078" }, { "name": "Python", "bytes": "8200429" }, { "name": "Shell", "bytes": "4241" } ], "symlink_target": "" }
from . import constants from . import decorators from . import exceptions from . import helpers from . import objects from . import photodb from . import searchhelpers from . import tag_export __all__ = [ 'constants', 'decorators', 'exceptions', 'helpers', 'objects', 'photodb', 'searchhelpers', 'tag_export', ]
{ "content_hash": "809d9e304c78b5515b6ac7094a0656f9", "timestamp": "", "source": "github", "line_count": 19, "max_line_length": 27, "avg_line_length": 18.157894736842106, "alnum_prop": 0.6579710144927536, "repo_name": "voussoir/etiquette", "id": "498b1a6bc70f852431fb5e1c0b11e55a095f1b0a", "size": "345", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "etiquette/__init__.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "16663" }, { "name": "HTML", "bytes": "138395" }, { "name": "JavaScript", "bytes": "85676" }, { "name": "Python", "bytes": "350327" } ], "symlink_target": "" }
import sys import json import urllib import urllib2 import argparse def translate(target, text): api = "https://www.googleapis.com/language/translate/v2?" api_key = "AIzaSyBgBlJCogk_1Hd_7WaLQgLVbQss0_dvNUc" parameters = urllib.urlencode({ 'target': target, 'key': api_key, 'q': text }) response = urllib2.urlopen(api + parameters) translations = json.loads(response.read()) translated_text = translations['data']['translations'][0]['translatedText'] return translated_text.encode('utf-8') def main(): parser = argparse.ArgumentParser() parser.add_argument('-t', '--target', default="en", \ help="the language to translate into (default: en)") parser.add_argument('text', nargs='*', help="the text to translate") args = parser.parse_args() if args.text: text = ' '.join(args.text) else: text = sys.stdin.read() print translate(args.target, text) if __name__ == "__main__": main()
{ "content_hash": "bf611c50e7844dbd342f5259eef01ab5", "timestamp": "", "source": "github", "line_count": 37, "max_line_length": 79, "avg_line_length": 27.513513513513512, "alnum_prop": 0.6218074656188605, "repo_name": "voidabhi/python-scripts", "id": "76200e343cdc9e166ed8014c287918d49dfce880", "size": "1040", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "translate.py", "mode": "33188", "license": "mit", "language": [ { "name": "C++", "bytes": "446" }, { "name": "Go", "bytes": "330" }, { "name": "JavaScript", "bytes": "1728" }, { "name": "Python", "bytes": "282732" }, { "name": "Shell", "bytes": "794" } ], "symlink_target": "" }
import sys, os, json import PARC_QRM_Toolkit this_file_path = os.path.dirname(os.path.abspath(__file__)) os.chdir(os.path.join(this_file_path, '..', 'CyPhy')) print 'current dir {0}'.format(os.getcwd()) PARC_QRM_Toolkit.OMC = os.path.join(os.getenv("OPENMODELICAHOME"), 'bin', 'omc') PARC_QRM_Toolkit.QRM = 'QRM' if os.name == 'nt': # Windows import _winreg def query_analysis_tools(): analysis_tools_key_name = r'SOFTWARE\Wow6432Node\META\AnalysisTools' analysis_tools_key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, analysis_tools_key_name, 0, _winreg.KEY_READ | _winreg.KEY_WOW64_32KEY) number_of_keys = _winreg.QueryInfoKey(analysis_tools_key)[0] # 0 means number of sub_keys info = {} for sub_key_id in range(0, number_of_keys): sub_key_name = _winreg.EnumKey(analysis_tools_key, sub_key_id) sub_key = _winreg.OpenKey(analysis_tools_key, sub_key_name) info[sub_key_name] = {} number_of_values = _winreg.QueryInfoKey(sub_key)[1] for value_id in range(0, number_of_values): value_tuple = _winreg.EnumValue(sub_key, value_id) value_name = value_tuple[0] value = value_tuple[1] info[sub_key_name][value_name] = value return info tools = query_analysis_tools() # add QR.exe location to the path os.environ['PATH'] += str(os.pathsep + os.path.join(tools['QR']['InstallLocation'], 'bin')) model_config = json.load(open('model_config.json')) for directory in model_config.get("lib_package_paths"): libraries = [os.getcwd()+'\\'] for file in os.listdir(directory): if not file.startswith(".") and (file.endswith(".mo") or (os.path.isdir(file) and os.path.isdir(os.path.join(file, "package.mo")))): libraries.append(os.path.join(directory, file)) model_name = model_config.get('verification_model_name') print libraries jsonfile = PARC_QRM_Toolkit.do_envisionment_from_modelica(model_name, libraries,CyPhy_output=True) json_filename = os.path.join("..", model_config.get("result_file")) + ".qrm.json" open(json_filename, "w").write(open(jsonfile).read()) os.unlink(jsonfile) # update summary.testresults.json file qrm_results = {} with open(json_filename, 'r') as f_p_qrm_results: qrm_results = json.load(f_p_qrm_results) results_filename = os.path.join('..', 'testbench_manifest.json') results = {} with open(results_filename, 'r') as f_p_results: results = json.load(f_p_results) results.update(qrm_results) results['Status'] = 'OK' with open(results_filename, 'w') as f_p_results: json.dump(results, f_p_results, indent=4)
{ "content_hash": "584cad23ef5480a48c4d036fa42d31b8", "timestamp": "", "source": "github", "line_count": 72, "max_line_length": 140, "avg_line_length": 39.375, "alnum_prop": 0.617636684303351, "repo_name": "pombredanne/metamorphosys-desktop", "id": "9b0721fed3713d97b52b18fb7e7654471a081cc0", "size": "2835", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "metamorphosys/META/analysis_tools/QR/output/scripts/QR_runner.py", "mode": "33261", "license": "mit", "language": [ { "name": "Arduino", "bytes": "10683" }, { "name": "Assembly", "bytes": "117345" }, { "name": "Awk", "bytes": "3591" }, { "name": "Batchfile", "bytes": "228118" }, { "name": "BitBake", "bytes": "4526" }, { "name": "C", "bytes": "3613212" }, { "name": "C#", "bytes": "11617773" }, { "name": "C++", "bytes": "51448188" }, { "name": "CMake", "bytes": "3055" }, { "name": "CSS", "bytes": "109563" }, { "name": "Clojure", "bytes": "37831" }, { "name": "Eagle", "bytes": "3782687" }, { "name": "Emacs Lisp", "bytes": "8514" }, { "name": "GAP", "bytes": "49124" }, { "name": "Groff", "bytes": "2178" }, { "name": "Groovy", "bytes": "7686" }, { "name": "HTML", "bytes": "4025250" }, { "name": "Inno Setup", "bytes": "35715" }, { "name": "Java", "bytes": "489537" }, { "name": "JavaScript", "bytes": "167454" }, { "name": "Lua", "bytes": "1660" }, { "name": "Makefile", "bytes": "97209" }, { "name": "Mathematica", "bytes": "26" }, { "name": "Matlab", "bytes": "80874" }, { "name": "Max", "bytes": "78198" }, { "name": "Modelica", "bytes": "44541139" }, { "name": "Objective-C", "bytes": "34004" }, { "name": "Perl", "bytes": "19285" }, { "name": "PostScript", "bytes": "400254" }, { "name": "PowerShell", "bytes": "19749" }, { "name": "Processing", "bytes": "1477" }, { "name": "Prolog", "bytes": "3121" }, { "name": "Protocol Buffer", "bytes": "58995" }, { "name": "Python", "bytes": "5517835" }, { "name": "Ruby", "bytes": "4483" }, { "name": "Shell", "bytes": "956773" }, { "name": "Smarty", "bytes": "37892" }, { "name": "TeX", "bytes": "4183594" }, { "name": "Visual Basic", "bytes": "22546" }, { "name": "XSLT", "bytes": "332312" } ], "symlink_target": "" }
""" Scrapes bill text available in PDF. """ from django.core.management.base import BaseCommand from legislative.models import BillText from tqdm import tqdm import requests import re from time import sleep import PyPDF2 import pdfquery class Command(BaseCommand): """ Scrapes bill text available in html. """ help = 'Scrapes bill text available in PDF.' def handle(self, *args, **options): """ Make it happen. """ def ingest_bill_text(bill_text): result = requests.get(bill_text.state_url) c = result.content pdf_path = "scrape_cache/b_txt_{}.pdf".format(bill_text.id) tag_regex = re.compile("<.+?>") sep_line_num_regex = re.compile("\n\d+([\s\d]+)?\n") in_line_num_regex = re.compile("\n(\s+)?\d+\s(?=.+?\n)") tighten_spacing_regex = re.compile("\n\n") second_tighten_spacing_regex = re.compile("\n\n\s+") with open(pdf_path, "wb") as pdf: pdf.write(c) scraper = pdfquery.PDFQuery(pdf_path) scraper.load() scraper.tree.write("{}.xml".format(pdf_path),pretty_print=True) with open("{}.xml".format(pdf_path), "rb") as xml: strn = ' '.join(xml.readlines()) untagged = tag_regex.sub('\n', strn) num_fix = sep_line_num_regex.sub('\n', untagged) inline_num_fix = in_line_num_regex.sub('\n', num_fix) tightened = tighten_spacing_regex.sub('\n', inline_num_fix) second_tighten = second_tighten_spacing_regex.sub('\n\n', tightened) bill_text.text = second_tighten bill_text.save() qset = BillText.objects.filter(type='application/pdf', text=None) for b_text in tqdm( iterable=qset, total=qset.count()): try: ingest_bill_text(b_text) sleep(1) except: # If something goes wrong, just move on. continue
{ "content_hash": "24ea16eb189088a84404e1187ea70000", "timestamp": "", "source": "github", "line_count": 68, "max_line_length": 84, "avg_line_length": 31.38235294117647, "alnum_prop": 0.5290534208059982, "repo_name": "access-missouri/am-django-project", "id": "d516c415fddf80a0dfc17ae3b9414d2a5ffe7820", "size": "2180", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "am/legislative/management/commands/scrape_pdf_bill_text.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "CSS", "bytes": "208381" }, { "name": "HTML", "bytes": "75543" }, { "name": "JavaScript", "bytes": "68836" }, { "name": "Makefile", "bytes": "803" }, { "name": "Python", "bytes": "241729" }, { "name": "Ruby", "bytes": "105" } ], "symlink_target": "" }
import os, sys, types, inspect from StringIO import StringIO # twisted & related imports from zope.interface import classProvides, implements, Interface # ZSI imports from pyremotevbox.ZSI import _get_element_nsuri_name, EvaluateException, ParseException,\ fault, ParsedSoap, SoapWriter from pyremotevbox.ZSI.twisted.reverse import DataHandler, ReverseHandlerChain,\ HandlerChainInterface """ EXAMPLES: See zsi/samples/WSGI """ def soapmethod(requesttypecode, responsetypecode, soapaction='', operation=None, **kw): """@soapmethod decorator function for soap methods """ def _closure(func_cb): func_cb.root = (requesttypecode.nspname,requesttypecode.pname) func_cb.action = soapaction func_cb.requesttypecode = requesttypecode func_cb.responsetypecode = responsetypecode func_cb.soapmethod = True func_cb.operation = None return func_cb return _closure class SOAPCallbackHandler: """ ps --> pyobj, pyobj --> sw class variables: writerClass -- ElementProxy implementation to use for SoapWriter instances. """ classProvides(HandlerChainInterface) writerClass = None @classmethod def processRequest(cls, ps, **kw): """invokes callback that should return a (request,response) tuple. representing the SOAP request and response respectively. ps -- ParsedSoap instance representing HTTP Body. request -- twisted.web.server.Request """ resource = kw['resource'] request = kw['request'] root = _get_element_nsuri_name(ps.body_root) for key,method in inspect.getmembers(resource, inspect.ismethod): if (getattr(method, 'soapmethod', False) and method.root == root): break else: raise RuntimeError, 'Missing soap callback method for root "%s"' %root try: req = ps.Parse(method.requesttypecode) except Exception, ex: raise try: rsp = method.responsetypecode.pyclass() except Exception, ex: raise try: req,rsp = method(req, rsp) except Exception, ex: raise return rsp @classmethod def processResponse(cls, output, **kw): sw = SoapWriter(outputclass=cls.writerClass) sw.serialize(output) return sw class SOAPHandlerChainFactory: protocol = ReverseHandlerChain @classmethod def newInstance(cls): return cls.protocol(DataHandler, SOAPCallbackHandler) class WSGIApplication(dict): encoding = "UTF-8" def __call__(self, env, start_response): """do dispatching, else process """ script = env['SCRIPT_NAME'] # consumed ipath = os.path.split(env['PATH_INFO'])[1:] for i in range(1, len(ipath)+1): path = os.path.join(*ipath[:i]) print "PATH: ", path application = self.get(path) if application is not None: env['SCRIPT_NAME'] = script + path env['PATH_INFO'] = '' print "SCRIPT: ", env['SCRIPT_NAME'] return application(env, start_response) return self._request_cb(env, start_response) def _request_cb(self, env, start_response): """callback method, override """ start_response("404 ERROR", [('Content-Type','text/plain')]) return ['Move along people, there is nothing to see to hear'] def putChild(self, path, resource): """ """ path = path.split('/') lp = len(path) if lp == 0: raise RuntimeError, 'bad path "%s"' %path if lp == 1: self[path[0]] = resource for i in range(len(path)): if not path[i]: continue break next = self.get(path[i], None) if next is None: next = self[path[i]] = WSGIApplication() next.putChild('/'.join(path[-1:]), resource) class SOAPApplication(WSGIApplication): """ """ factory = SOAPHandlerChainFactory def __init__(self, **kw): dict.__init__(self, **kw) self.delegate = None def _request_cb(self, env, start_response): """process request, """ if env['REQUEST_METHOD'] == 'GET': return self._handle_GET(env, start_response) if env['REQUEST_METHOD'] == 'POST': return self._handle_POST(env, start_response) start_response("500 ERROR", [('Content-Type','text/plain')]) s = StringIO() h = env.items(); h.sort() for k,v in h: print >>s, k,'=',`v` return [s.getvalue()] def _handle_GET(self, env, start_response): if env['QUERY_STRING'].lower() == 'wsdl': start_response("200 OK", [('Content-Type','text/plain')]) r = self.delegate or self return _resourceToWSDL(r) start_response("404 ERROR", [('Content-Type','text/plain')]) return ['NO RESOURCE FOR GET'] def _handle_POST(self, env, start_response): """Dispatch Method called by twisted render, creates a request/response handler chain. request -- twisted.web.server.Request """ input = env['wsgi.input'] data = input.read( int(env['CONTENT_LENGTH']) ) mimeType = "text/xml" if self.encoding is not None: mimeType = 'text/xml; charset="%s"' % self.encoding request = None resource = self.delegate or self chain = self.factory.newInstance() try: pyobj = chain.processRequest(data, request=request, resource=resource) except Exception, ex: start_response("500 ERROR", [('Content-Type',mimeType)]) return [fault.FaultFromException(ex, False, sys.exc_info()[2]).AsSOAP()] try: soap = chain.processResponse(pyobj, request=request, resource=resource) except Exception, ex: start_response("500 ERROR", [('Content-Type',mimeType)]) return [fault.FaultFromException(ex, False, sys.exc_info()[2]).AsSOAP()] start_response("200 OK", [('Content-Type',mimeType)]) return [soap] def test(app, port=8080, host="localhost"): """ """ from twisted.internet import reactor from twisted.python import log from twisted.web2.channel import HTTPFactory from twisted.web2.server import Site from twisted.web2.wsgi import WSGIResource log.startLogging(sys.stdout) reactor.listenTCP(port, HTTPFactory( Site(WSGIResource(app)) ), interface=host, ) reactor.run() def _issoapmethod(f): return type(f) is types.MethodType and getattr(f, 'soapmethod', False) def _resourceToWSDL(resource): from xml.etree import ElementTree from xml.etree.ElementTree import Element, QName from pyremotevbox.ZSI.wstools.Namespaces import WSDL r = resource methods = filter(_issoapmethod, map(lambda i: getattr(r, i), dir(r))) tns = '' #tree = ElementTree() defs = Element("{%s}definitions" %WSDL.BASE) defs.attrib['name'] = 'SampleDefs' defs.attrib['targetNamespace'] = tns #tree.append(defs) porttype = Element("{%s}portType" %WSDL) porttype.attrib['name'] = QName("{%s}SamplePortType" %tns) binding = Element("{%s}binding" %WSDL) defs.append(binding) binding.attrib['name'] = QName("{%s}SampleBinding" %tns) binding.attrib['type'] = porttype.get('name') for m in methods: m.action service = Element("{%s}service" %WSDL.BASE) defs.append(service) service.attrib['name'] = 'SampleService' port = Element("{%s}port" %WSDL.BASE) service.append(port) port.attrib['name'] = "SamplePort" port.attrib['binding'] = binding.get('name') soapaddress = Element("{%s}address" %WSDL.BIND_SOAP) soapaddress.attrib['location'] = 'http://localhost/bla' port.append(soapaddress) return [ElementTree.tostring(defs)] """ <?xml version="1.0" encoding="UTF-8"?> <wsdl:definitions name="Counter" targetNamespace="http://counter.com/bindings" xmlns:wsdl="http://schemas.xmlsoap.org/wsdl/" xmlns:porttype="http://counter.com" xmlns:soap="http://schemas.xmlsoap.org/wsdl/soap/"> <wsdl:import namespace="http://counter.com" location="counter_flattened.wsdl"/> <wsdl:binding name="CounterPortTypeSOAPBinding" type="porttype:CounterPortType"> <soap:binding style="document" transport="http://schemas.xmlsoap.org/soap/http"/> <wsdl:operation name="createCounter"> <soap:operation soapAction="http://counter.com/CounterPortType/createCounterRequest"/> <wsdl:input> <soap:body use="literal"/> </wsdl:input> <wsdl:output> <soap:body use="literal"/> </wsdl:output> </wsdl:operation> <wsdl:definitions name="Counter" targetNamespace="http://counter.com/service" xmlns:wsdl="http://schemas.xmlsoap.org/wsdl/" xmlns:soap="http://schemas.xmlsoap.org/wsdl/soap/" xmlns:binding="http://counter.com/bindings"> <wsdl:import namespace="http://counter.com/bindings" location="counter_bindings.wsdl"/> <wsdl:service name="CounterService"> <wsdl:port name="CounterPortTypePort" binding="binding:CounterPortTypeSOAPBinding"> <soap:address location="http://localhost:8080/wsrf/services/"/> </wsdl:port> </wsdl:service> </wsdl:definitions> """
{ "content_hash": "f7fc2018edee7d35a182088e71aaacae", "timestamp": "", "source": "github", "line_count": 298, "max_line_length": 212, "avg_line_length": 32.399328859060404, "alnum_prop": 0.6073537027446919, "repo_name": "rameshg87/pyremotevbox", "id": "bf5d047bec41242cfbd3cafd50e6cd1050d83bb3", "size": "9882", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "pyremotevbox/ZSI/twisted/wsgi.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Makefile", "bytes": "1519" }, { "name": "Python", "bytes": "4275056" } ], "symlink_target": "" }
try: # pragma: no cover from django_hosts.defaults import patterns, host from django_hosts.reverse import reverse_host, reverse_full except ImportError: # pragma: no cover pass # following PEP 386 __version__ = "0.6"
{ "content_hash": "3acf94a06c8bb36fc031d5794f87915c", "timestamp": "", "source": "github", "line_count": 8, "max_line_length": 63, "avg_line_length": 29, "alnum_prop": 0.7112068965517241, "repo_name": "carlitux/django-hosts", "id": "0f5786430d93f90e4a5854d5224eee55bc886490", "size": "247", "binary": false, "copies": "2", "ref": "refs/heads/develop", "path": "django_hosts/__init__.py", "mode": "33188", "license": "bsd-3-clause", "language": [], "symlink_target": "" }
import json import uuid from twisted.internet import threads from twisted.internet.protocol import Factory, Protocol from txsockjs.factory import SockJSResource from hendrix.resources import NamedResource from .messaging import hxdispatcher from .signals import message_signal def send_signal(transport, data): message_signal.send(None, dispatcher=transport, data=data) class MessageHandlerProtocol(Protocol): """ A basic protocol for socket messaging using a hendrix messaging dispatcher to handle addressing messages to active sockets from different contexts """ dispatcher = hxdispatcher guid = None def dataReceived(self, data): """ Takes "data" which we assume is json encoded If data has a subject_id attribute, we pass that to the dispatcher as the subject_id so it will get carried through into any return communications and be identifiable to the client falls back to just passing the message along... """ try: address = self.guid data = json.loads(data) threads.deferToThread(send_signal, self.dispatcher, data) if 'hx_subscribe' in data: return self.dispatcher.subscribe(self.transport, data) if 'address' in data: address = data['address'] else: address = self.guid self.dispatcher.send(address, data) except Exception, exc: raise self.dispatcher.send( self.guid, {'message': data, 'error': str(exc)} ) def connectionMade(self): """ establish the address of this new connection and add it to the list of sockets managed by the dispatcher reply to the transport with a "setup_connection" notice containing the recipient's address for use by the client as a return address for future communications """ self.transport.uid = str(uuid.uuid1()) self.guid = self.dispatcher.add(self.transport) self.dispatcher.send(self.guid, {'setup_connection': self.guid}) def connectionLost(self, something): "clean up the no longer useful socket in the dispatcher" self.dispatcher.remove(self.transport) MessageResource = NamedResource('messages') MessageResource.putChild( 'main', SockJSResource(Factory.forProtocol(MessageHandlerProtocol)) )
{ "content_hash": "e071ff8b450d46db3a47b3c1c8056e06", "timestamp": "", "source": "github", "line_count": 84, "max_line_length": 78, "avg_line_length": 29.988095238095237, "alnum_prop": 0.6447002778880508, "repo_name": "anthonyalmarza/hendrix", "id": "a432fdcf6d8bd9aae81feac843169c91abe54385", "size": "2519", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "hendrix/contrib/async/resources.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "71045" }, { "name": "Shell", "bytes": "528" } ], "symlink_target": "" }
""" Splitting dataset """ from world import world, setup_module, teardown_module import create_source_steps as source_create import create_dataset_steps as dataset_create class TestSplitDataset(object): def test_scenario1(self): """ Scenario: Successfully creating a split dataset: Given I create a data source uploading a "<data>" file And I wait until the source is ready less than <time_1> secs And I create a dataset And I wait until the dataset is ready less than <time_2> secs And I create a dataset extracting a <rate> sample And I wait until the dataset is ready less than <time_3> secs When I compare the datasets' instances Then the proportion of instances between datasets is <rate> Examples: | data | time_1 | time_2 | time_3 | rate | | ../data/iris.csv | 10 | 10 | 10 | 0.8 | """ print self.test_scenario1.__doc__ examples = [ ['data/iris.csv', '10', '10', '10', '0.8']] for example in examples: print "\nTesting with:\n", example source_create.i_upload_a_file(self, example[0]) source_create.the_source_is_finished(self, example[1]) dataset_create.i_create_a_dataset(self) dataset_create.the_dataset_is_finished_in_less_than(self, example[2]) dataset_create.i_create_a_split_dataset(self, example[4]) dataset_create.the_dataset_is_finished_in_less_than(self, example[3]) dataset_create.i_compare_datasets_instances(self) dataset_create.proportion_datasets_instances(self, example[4])
{ "content_hash": "ade204f971a30be3f0e0982b7111e96f", "timestamp": "", "source": "github", "line_count": 40, "max_line_length": 77, "avg_line_length": 47.975, "alnum_prop": 0.5476810838978635, "repo_name": "ShaguptaS/python", "id": "cfeab07fffb070ed4ffd52f3d3b2173b28aa58f1", "size": "2538", "binary": false, "copies": "1", "ref": "refs/heads/next", "path": "bigml/tests/test_17_split_dataset.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "600936" } ], "symlink_target": "" }
import os import sys sys.path.append(os.environ['ceroot']) from kpi import CostKpi, DurationKpi, AccKpi # NOTE kpi.py should shared in models in some way!!!! train_cost_kpi = CostKpi('train_cost', 0.02, actived=True) test_acc_kpi = AccKpi('test_acc', 0.005, actived=True) train_duration_kpi = DurationKpi('train_duration', 0.06, actived=True) train_acc_kpi = AccKpi('train_acc', 0.005, actived=True) tracking_kpis = [ train_acc_kpi, train_cost_kpi, test_acc_kpi, train_duration_kpi, ] def parse_log(log): ''' This method should be implemented by model developers. The suggestion: each line in the log should be key, value, for example: " train_cost\t1.0 test_cost\t1.0 train_cost\t1.0 train_cost\t1.0 train_acc\t1.2 " ''' for line in log.split('\n'): fs = line.strip().split('\t') print(fs) if len(fs) == 3 and fs[0] == 'kpis': kpi_name = fs[1] kpi_value = float(fs[2]) yield kpi_name, kpi_value def log_to_ce(log): kpi_tracker = {} for kpi in tracking_kpis: kpi_tracker[kpi.name] = kpi for (kpi_name, kpi_value) in parse_log(log): print(kpi_name, kpi_value) kpi_tracker[kpi_name].add_record(kpi_value) kpi_tracker[kpi_name].persist() if __name__ == '__main__': log = sys.stdin.read() log_to_ce(log)
{ "content_hash": "5040983a75a355989d2cb194a41fcd0d", "timestamp": "", "source": "github", "line_count": 59, "max_line_length": 70, "avg_line_length": 23.610169491525422, "alnum_prop": 0.6037329504666188, "repo_name": "lcy-seso/models", "id": "9c2dba53526d2e976252fce05c7ff7f0f44b39b2", "size": "1451", "binary": false, "copies": "3", "ref": "refs/heads/develop", "path": "fluid/mnist/_ce.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C++", "bytes": "15149" }, { "name": "Python", "bytes": "1438932" }, { "name": "Shell", "bytes": "31168" } ], "symlink_target": "" }
from diary import DiaryDB, Event import unittest import sqlite3 import os.path class TestDiaryDB(unittest.TestCase): TEMP_DB_PATH = os.path.join(os.path.dirname(__file__), 'testing_dir', 'temp.db') SIMPLE_EVENT = Event("INFO", "LEVEL") def setUp(self): self.logdb = DiaryDB(self.TEMP_DB_PATH) self.logdb_default = DiaryDB() @classmethod def tearDownClass(cls): import os os.remove(cls.TEMP_DB_PATH) def constructs_correctly(self): self.assertIsInstance(self.logdb.conn, sqlite3.Connection) self.assertIsInstance(self.logdb.cursor, sqlite3.Cursor) def test_creates_table(self): table = self.logdb.cursor.execute('''SELECT name FROM sqlite_master WHERE type="table" AND name="logs" ''').fetchone()[0] self.assertEquals(table, 'logs') def test_creates_table_already_exists(self): self.logdb.create_tables() tables = self.logdb.cursor.execute('''SELECT name FROM sqlite_master WHERE type="table" AND name="logs" ''').fetchall() self.assertEquals(len(tables), 1) def test_log(self): self.logdb.log(self.SIMPLE_EVENT) entry = self.logdb.cursor.execute('''SELECT * FROM logs ORDER BY inputDT ASC LIMIT 1''').fetchone() self.assertEquals(entry[0], self.SIMPLE_EVENT.dt) self.assertEquals(entry[1], self.SIMPLE_EVENT.level) self.assertEquals(entry[2], self.SIMPLE_EVENT.info) def test_close(self): self.logdb.close() with self.assertRaises(sqlite3.ProgrammingError, msg="Cannot operate on a closed database."): self.logdb.conn.execute("SELECT 1 FROM logs LIMIT 1") def test_default_path(self): self.logdb_default.log(self.SIMPLE_EVENT) entry = self.logdb_default.cursor.execute('''SELECT * FROM logs ORDER BY inputDT DESC LIMIT 1''').fetchone() self.assertEquals(entry[0], self.SIMPLE_EVENT.dt) self.assertEquals(entry[1], self.SIMPLE_EVENT.level) self.assertEquals(entry[2], self.SIMPLE_EVENT.info) self.logdb_default.close() if __name__ == '__main__': unittest.main()
{ "content_hash": "86f934c4881aea450a9254b9ca1f44e2", "timestamp": "", "source": "github", "line_count": 63, "max_line_length": 80, "avg_line_length": 38.96825396825397, "alnum_prop": 0.5751527494908351, "repo_name": "GreenVars/diary", "id": "880d7920bcaf4ed01b24c42032057c0bbe7d5849", "size": "2455", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/logdb_test.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "60676" } ], "symlink_target": "" }
import pandas as pd import numpy as np import os import tensorflow as tf import copy from tensorflow.contrib import learn import _pickle as pickle def read_from_csv(): csv_fname = "/Users/shubhi/Public/CMPS296/friends.csv" #replace with local file loc df = pd.DataFrame.from_csv(csv_fname) df = df.dropna(subset = ['utterance']) print(len(df)) X = list(df['utterance']) Y = copy.deepcopy(X) X = X[:-1] Y = Y[1:] return (X, Y) def loadGloVe(filename): vocab = [] embd = [] file = open(filename,'r') for line in file.readlines(): row = line.strip().split(' ') vocab.append(row[0]) embd.append(row[1:]) print('Loaded GloVe!') file.close() return vocab, embd def getEmbedding(X,Y): max_sentence_length = 20 glove_filename = '/Users/shubhi/Public/CMPS296/glove.6B/glove.6B.50d.txt' #replace with local file loc vocab, embd = loadGloVe(glove_filename) vocab_size = len(vocab) embedding_dim = len(embd[0]) embedding = np.asarray(embd) #init vocab processor vocab_processor = learn.preprocessing.VocabularyProcessor(max_sentence_length) #fit the vocab from glove pretrain = vocab_processor.fit(vocab) #transform inputs X = np.array(list(vocab_processor.transform(X))) return (vocab_size, embedding_dim, embedding) def tf_session(vocab_size, embedding_dim, embedding, X) : with tf.Session() as sess: # TF variable (placeholder) W = tf.Variable(tf.constant(0.0, shape=[vocab_size, embedding_dim]), trainable=False, name="W") embedding_placeholder = tf.placeholder(tf.float32, [vocab_size, embedding_dim]) embedding_init = W.assign(embedding_placeholder) req_embedded = tf.nn.embedding_lookup(W, X) # Call the session sess.run(embedding_init, feed_dict={embedding_placeholder: embedding}) vectors = req_embedded.eval() print (vectors[0]) pickle.dump(vectors, open("vectorized_input", "wb")) def shrink_vocab(X): from collections import Counter word_list = (" ".join(X)).split(" ") counter = Counter(word_list) #print (counter.count("the")) return counter.most_common(10000) def remove_hyphen(X): X = [" ".join(sentence.split("-")) for sentence in X] X = [" ".join(sentence.split(" ")[:20]) for sentence in X] return X def main(): (X,Y) = read_from_csv() X = remove_hyphen(X) shrink_vocab(X) vocab_size, embedding_dim, embedding =getEmbedding(X,Y) small_vocab = shrink_vocab(X) tf_session(vocab_size, embedding_dim, embedding, X) main()
{ "content_hash": "18058ad075d554c79edb3737243c12d1", "timestamp": "", "source": "github", "line_count": 97, "max_line_length": 106, "avg_line_length": 27.309278350515463, "alnum_prop": 0.6402416006040015, "repo_name": "ProjectsUCSC/CMPS296", "id": "bfb7e0d0d4fd72451c7eaa50ea7790ebcab32399", "size": "2649", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/deprecated/seq2seq.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "68979" } ], "symlink_target": "" }
SKIP = "SKIP" FAIL = "FAIL" PASS = "PASS" OKAY = "OKAY" TIMEOUT = "TIMEOUT" CRASH = "CRASH" SLOW = "SLOW" FLAKY = "FLAKY" NO_VARIANTS = "NO_VARIANTS" # These are just for the status files and are mapped below in DEFS: FAIL_OK = "FAIL_OK" PASS_OR_FAIL = "PASS_OR_FAIL" ALWAYS = "ALWAYS" KEYWORDS = {} for key in [SKIP, FAIL, PASS, OKAY, TIMEOUT, CRASH, SLOW, FLAKY, FAIL_OK, NO_VARIANTS, PASS_OR_FAIL, ALWAYS]: KEYWORDS[key] = key DEFS = {FAIL_OK: [FAIL, OKAY], PASS_OR_FAIL: [PASS, FAIL]} # Support arches, modes to be written as keywords instead of strings. VARIABLES = {ALWAYS: True} for var in ["debug", "release", "android_arm", "android_arm64", "android_ia32", "arm", "arm64", "ia32", "mipsel", "x64", "nacl_ia32", "nacl_x64", "macos", "windows", "linux"]: VARIABLES[var] = var def DoSkip(outcomes): return SKIP in outcomes def IsSlow(outcomes): return SLOW in outcomes def OnlyStandardVariant(outcomes): return NO_VARIANTS in outcomes def IsFlaky(outcomes): return FLAKY in outcomes def IsPassOrFail(outcomes): return ((PASS in outcomes) and (FAIL in outcomes) and (not CRASH in outcomes) and (not OKAY in outcomes)) def IsFailOk(outcomes): return (FAIL in outcomes) and (OKAY in outcomes) def _AddOutcome(result, new): global DEFS if new in DEFS: mapped = DEFS[new] if type(mapped) == list: for m in mapped: _AddOutcome(result, m) elif type(mapped) == str: _AddOutcome(result, mapped) else: result.add(new) def _ParseOutcomeList(rule, outcomes, target_dict, variables): result = set([]) if type(outcomes) == str: outcomes = [outcomes] for item in outcomes: if type(item) == str: _AddOutcome(result, item) elif type(item) == list: if not eval(item[0], variables): continue for outcome in item[1:]: assert type(outcome) == str _AddOutcome(result, outcome) else: assert False if len(result) == 0: return if rule in target_dict: target_dict[rule] |= result else: target_dict[rule] = result def ReadStatusFile(path, variables): with open(path) as f: global KEYWORDS contents = eval(f.read(), KEYWORDS) rules = {} wildcards = {} variables.update(VARIABLES) for section in contents: assert type(section) == list assert len(section) == 2 if not eval(section[0], variables): continue section = section[1] assert type(section) == dict for rule in section: assert type(rule) == str if rule[-1] == '*': _ParseOutcomeList(rule, section[rule], wildcards, variables) else: _ParseOutcomeList(rule, section[rule], rules, variables) return rules, wildcards
{ "content_hash": "9837f47484d3f40542f0588f050ed46f", "timestamp": "", "source": "github", "line_count": 111, "max_line_length": 79, "avg_line_length": 24.64864864864865, "alnum_prop": 0.6410818713450293, "repo_name": "imzcy/JavaScriptExecutable", "id": "826b576f2308c6f4f66350867019ae6175115bd5", "size": "4367", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "thirdparty/v8/src/tools/testrunner/local/statusfile.py", "mode": "33188", "license": "mit", "language": [ { "name": "C++", "bytes": "86953" }, { "name": "JavaScript", "bytes": "4260" } ], "symlink_target": "" }
import sys import socket import tensorflow.contrib.keras as keras import pandas as pd import numpy as np import vixstructure.models as models def get_model(hidden_layers, step_size, dropout=None): model = models.spread_price_prediction(hidden_layers, 12, dropout) sgd = keras.optimizers.SGD(step_size) model.compile(sgd, keras.losses.mean_squared_error, metrics=['accuracy']) return model def normalize(data: pd.DataFrame): mean = data.mean() ptp = data.max() - data.min() return (data - mean) / ptp def train(hidden_layers, dropout=None, epochs=50, verbose=1, validation_split=0.8, sgd_step_size=0.01): lines_without_useful_data = 650 term_structure = pd.read_csv("data/annual_structure.csv", header=0, index_col=0, dtype=np.float32, parse_dates=[0], skiprows=lines_without_useful_data) spread_prices = pd.read_csv("data/long_prices.csv", header=0, index_col=0, dtype=np.float32, parse_dates=[0], skiprows=lines_without_useful_data) assert len(term_structure) == len(spread_prices) term_structure = normalize(term_structure) spread_prices = normalize(spread_prices) term_structure = term_structure.fillna(1.) spread_prices = spread_prices.fillna(1.) splitsize = int(len(term_structure) * validation_split) x_train = term_structure.values[:splitsize] y_train = spread_prices.values[:splitsize] x_val = term_structure.values[splitsize:] y_val = spread_prices.values[splitsize:] model = get_model(hidden_layers, sgd_step_size, dropout) repr_string = "training_{}_{}_{}_{}".format(hidden_layers, "nodropout" if not dropout else dropout, sgd_step_size, socket.gethostname()) history = model.fit(x_train, y_train, 1, epochs=epochs, verbose=verbose, validation_data=(x_val, y_val), shuffle=True, callbacks=[keras.callbacks.CSVLogger("./logs/predict_spread_prices/{}.csv".format(repr_string))]) model.save("./models/predict_spread_prices/{}.hdf5".format(repr_string)) if __name__ == "__main__": # if len(sys.argv) != 3: # print("Usage: train.py x y (where range(x,y) is the number of hidden layers)") # print("Defaulting to 5 networks in range(1,6).") # x = 1 # y = 6 # else: # x = int(sys.argv[1]) # y = int(sys.argv[2]) # print(f"Training {y-x} networks in range({x},{y}).") # for i in range(x, y): # print(f"Training with {i} hidden layer.") # train(i) for layer in range(8, 11): for dropout in (None, 0.5): for stepsize in (0.01, 0.003, 0.001, 0.0003, 0.0001): print("Train model with {} hidden layers, dropout is {}, stepsize {}.".format(layer, dropout, stepsize)) train(layer, dropout=dropout, epochs=100, verbose=0, sgd_step_size=stepsize)
{ "content_hash": "e9f98ad75c5ebdc41297ae93b25dd7a6", "timestamp": "", "source": "github", "line_count": 64, "max_line_length": 140, "avg_line_length": 44.859375, "alnum_prop": 0.6405433646812957, "repo_name": "leyhline/vix-term-structure", "id": "2dcc39ed2c1ed9f3f560dc5db49d2c83141f6eb4", "size": "2895", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "old_train_spread_price_predictions.py", "mode": "33188", "license": "mit", "language": [ { "name": "Jupyter Notebook", "bytes": "11890737" }, { "name": "Python", "bytes": "86539" }, { "name": "Shell", "bytes": "348" }, { "name": "TeX", "bytes": "88413" } ], "symlink_target": "" }
import unittest from collections import deque import numpy as np import pandas as pd import pandas.util.testing as tm from zipline.utils.data import RollingPanel class TestRollingPanel(unittest.TestCase): def test_basics(self): items = ['foo', 'bar', 'baz'] minor = ['A', 'B', 'C', 'D'] window = 10 rp = RollingPanel(window, items, minor, cap_multiple=2) dates = pd.date_range('2000-01-01', periods=30, tz='utc') major_deque = deque() frames = {} for i in range(30): frame = pd.DataFrame(np.random.randn(3, 4), index=items, columns=minor) date = dates[i] rp.add_frame(date, frame) frames[date] = frame major_deque.append(date) if i >= window: major_deque.popleft() result = rp.get_current() expected = pd.Panel(frames, items=list(major_deque), major_axis=items, minor_axis=minor) tm.assert_panel_equal(result, expected.swapaxes(0, 1)) def f(option='clever', n=500, copy=False): items = range(5) minor = range(20) window = 100 periods = n dates = pd.date_range('2000-01-01', periods=periods, tz='utc') frames = {} if option == 'clever': rp = RollingPanel(window, items, minor, cap_multiple=2) major_deque = deque() dummy = pd.DataFrame(np.random.randn(len(items), len(minor)), index=items, columns=minor) for i in range(periods): frame = dummy * (1 + 0.001 * i) date = dates[i] rp.add_frame(date, frame) frames[date] = frame major_deque.append(date) if i >= window: del frames[major_deque.popleft()] result = rp.get_current() if copy: result = result.copy() else: major_deque = deque() dummy = pd.DataFrame(np.random.randn(len(items), len(minor)), index=items, columns=minor) for i in range(periods): frame = dummy * (1 + 0.001 * i) date = dates[i] frames[date] = frame major_deque.append(date) if i >= window: del frames[major_deque.popleft()] result = pd.Panel(frames, items=list(major_deque), major_axis=items, minor_axis=minor)
{ "content_hash": "b35eb91e37f7fe4ed6fd644de5d28df9", "timestamp": "", "source": "github", "line_count": 93, "max_line_length": 69, "avg_line_length": 26.978494623655912, "alnum_prop": 0.5185332801913113, "repo_name": "lsbardel/zipline", "id": "ded2c8ca69fb529b58ea93057dd278aa4a036658", "size": "3092", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "tests/test_data_util.py", "mode": "33188", "license": "apache-2.0", "language": [], "symlink_target": "" }
import calendar import json import random import traceback import datetime import sys reload(sys) sys.setdefaultencoding("utf8") from fake_useragent import UserAgent from datetime import date import time from dateutil.relativedelta import relativedelta if __name__ == '__main__': sys.path.append('../../..') sys.path.append('../../../..') sys.path.append('../../../../..') from worker.crawler.base_crawler import BaseCrawler else: from worker.crawler.base_crawler import BaseCrawler class Crawler(BaseCrawler): """ kwargs 包含 'tel': str, 'pin_pwd': str, 'id_card': str, 'full_name': unicode, 'sms_code': str, 'captcha_code': str 錯誤等級 0: 成功 1: 帳號密碼錯誤 2: 認證碼錯誤 9: 其他錯誤 """ def __init__(self, **kwargs): """ 初始化 """ super(Crawler, self).__init__(**kwargs) self.session.headers.update({'User-Agent': UserAgent().safari}) self.pubToken = '' self.areaCode = '' def need_parameters(self, **kwargs): return ['pin_pwd', 'sms_verify', 'full_name', 'id_card'] def get_verify_type(self, **kwargs): return 'SMS' def login(self, **kwargs): url = 'http://wapjs.189.cn/tysh/pages/main/home.html' code, key, resp = self.get(url) if code != 0: return code, key # # 登录 url = 'http://wapjs.189.cn/tysh/interface/doAjax.do' headers = { 'X-Requested-With': 'XMLHttpRequest', 'Content-Type': 'application/x-www-form-urlencoded', 'Referer': 'http://wapjs.189.cn/tysh/pages/main/home.html', 'Origin': 'http://wapjs.189.cn', } self.session.headers.update(headers) data = { 'para': 'accNbr={};password={};accNbrType=2000004;PWDType=-1;areaCode=;actionCode=login037;channelCode_common=100003;pubAreaCode=025;pushUserId=jszt_804723162833;coachUser=;userLogAccNbrType=;userLogAccNbr=;userTokenAccNbrType=2;ztVersion=3.1.0;ztInterSource=100003;pubToken=undefined;'.format(kwargs['tel'],kwargs['pin_pwd']), 'url': 'http://61.160.137.141/jszt/rest/login2User', } code, key, resp = self.post(url, data=data) if code != 0: return code, key if u'服务密码错误' in resp.text or u'您已连续输错2次!' in resp.text or '密码被限制使用' in resp.text or u'已输错三次验证码' in resp.text: self.log('user', u'服务密码错误', resp) return 1, 'pin_pwd_error' if u'您的账号已被锁定' in resp.text: self.log('user', '账号已被锁定', resp) return 9, 'account_locked' try: resp_json = eval(resp.text.replace('null', '')) areaCode = resp_json['areaCode'] token = resp_json['token'] self.pubToken = token self.areaCode = areaCode para_str = """accNbr={};accNbrType=2000004;PWDType=-1;areaCode={};actionCode=yw013;channelCode_common=100003;pubAreaCode={};pushUserId=jszt_804723162833;coachUser=;userLogAccNbrType=2;userLogAccNbr={};userTokenAccNbrType=2;ztVersion=3.1.0;ztInterSource=100003;pubToken={}""".format(kwargs['tel'], areaCode, areaCode, kwargs['tel'], token) except: if u'用户的产品状态不允许登录' in resp.text: self.log('user', u'用户的产品状态不允许登录', resp) return 9, 'websiter_prohibited_error' msg = traceback.format_exc() if u'密码未激活' in resp.text: self.log('user', u'密码未激活', resp) return 1, 'pin_pwd_error' if u'帐号不存在' in resp.text: self.log('user', u'帐号不存在', resp) return 9, 'invalid_tel' self.log('crawler', msg, resp) return 9, 'json_error' data = { 'para': para_str, 'url': 'http://61.160.137.141/jszt/rest/login2UserInfo', } code, key, resp1 = self.post(url, data=data) if code != 0: return code, key # 个人信息 self.user_info = { 'full_name': '', 'id_card': '', 'is_realname_register': '', 'open_date': '', 'address': '' } try: user_msg = eval(resp.text.replace('null', '')) self.user_info['full_name'] = user_msg['customerName'].decode('utf-8') self.user_info['id_card'] = user_msg['indentNbr'] self.user_info['open_date'] = user_msg['broadbandInfo'][0]['startTime'] self.user_info['address'] = user_msg['broadbandInfo'][0]['address'].decode('utf-8') if self.user_info['id_card']: self.user_info['is_realname_register'] = True except: msg = traceback.format_exc() self.log('crawler', msg, resp) # 登录跳转2 para_str = 'accNbr={};dccDestinationAttr=2;areaCode={};actionCode=jsztActionCode_uniformitysearchCallBalanceReqWithCache;channelCode_common=100003;pubAreaCode={};pushUserId=jszt_804723162833;coachUser=;userLogAccNbrType=2;userLogAccNbr={};userTokenAccNbrType=2;ztVersion=3.1.0;ztInterSource=100003;pubToken={}'.format( kwargs['tel'], areaCode, areaCode, kwargs['tel'], token) data = { 'para': para_str, 'url': 'http://61.160.137.141/jszt/uniformity/searchCallBalanceReqWithCache', } code, key, resp2 = self.post(url, data=data) if code != 0: return code, key # 登录跳转3 para_str = 'accNbr={};areaCode={};family=2;actionCode=jsztActionCode_restaccnbrscore;channelCode_common=100003;pubAreaCode={};pushUserId=jszt_804723162833;coachUser=;userLogAccNbrType=2;userLogAccNbr={};userTokenAccNbrType=2;ztVersion=3.1.0;ztInterSource=100003;pubToken={};'.format(kwargs['tel'], areaCode, areaCode, kwargs['tel'], token) data = { 'para': para_str, 'url': 'http://61.160.137.141/jszt/rest/accnbrscore', } code, key, resp3 = self.post(url, data=data) if code != 0: return code, key # 登录跳转4 para_str = 'accNbr={};areaCode={};family=2;actionCode=jsztActionCode_uniformitysearchCurrAcuReqWithCache;channelCode_common=100003;pubAreaCode={};pushUserId=jszt_804723162833;coachUser=;userLogAccNbrType=2;userLogAccNbr={};userTokenAccNbrType=2;ztVersion=3.1.0;ztInterSource=100003;pubToken={};'.format( kwargs['tel'], areaCode, areaCode, kwargs['tel'], token) data = { 'para': para_str, 'url': 'http://61.160.137.141/jszt/uniformity/searchCurrAcuReqWithCache', } code, key, resp4 = self.post(url, data=data) if code != 0: return code, key # 登录跳转5 para_str = 'recommendAreaMark=100;limit=21;areaCode={};actionCode=jsztActionCode_flowflowrestgetFocusPic;channelCode_common=100003;pubAreaCode={};pushUserId=jszt_804723162833;coachUser=;userLogAccNbrType=2;userLogAccNbr={};userTokenAccNbrType=2;ztVersion=3.1.0;ztInterSource=100003;pubToken={};'.format( areaCode, areaCode, kwargs['tel'], token) data = { 'para': para_str, 'url': 'http://221.228.39.34/ZtFlowOrder/jszt/flow/flowrest/getFocusPic', } code, key, resp5 = self.post(url, data=data) if code != 0: return code, key try: resp_json = json.loads(resp5.text) if resp_json['TSR_MSG'] == '成功': return 0, 'success' if u'繁忙,请稍后再试' in resp5.text : self.log('website', u'1:{}\n 2:{}\n 3:{}\n 4:{}\n 5:{}\n'. format(resp.text,resp1.text, resp2.text, resp3.text, resp4.text), resp5) return 9, 'website_busy_error' # self.log('crawler', u'未知错误', resp5) self.log('crawler', u'未知错误1:{}\n 2:{}\n 3:{}\n 4:{}\n 5:{}\n'. format(resp.text, resp1.text, resp2.text, resp3.text, resp4.text), resp5) return 9, 'crawl_error' except: msg = traceback.format_exc() self.log('crawler', msg, resp5) return 9, 'json_error' def send_verify_request(self, **kwargs): url = 'http://wapjs.189.cn/tysh/interface/doAjax.do' para_str = 'accNbr={};actionCode=auth_smscode_001;channelCode_common=100003;pubAreaCode={};pushUserId=jszt_553072141649;coachUser=;userLogAccNbrType=2;userLogAccNbr={};userTokenAccNbrType=2;ztVersion=3.1.0;ztInterSource=100003;pubToken={};'.format( kwargs['tel'], self.areaCode, kwargs['tel'], self.pubToken) data = { 'para': para_str, 'url': 'http://61.160.137.141/jszt/ztauth/getAuthCode', } code, key, resp = self.post(url, data=data) if code != 0: return code, key, '' try: resp_json = eval(resp.text.replace('null', '')) if resp_json['TSR_MSG'] == '成功': return 0, 'success', '' if u'操作太频繁啦' in resp.text: self.log('user', 'send_sms_too_quick_error', resp) return 9, 'send_sms_too_quick_error', '' self.log('crawler', 'send_sms_error', resp) return 9, 'send_sms_error', '' except: msg = traceback.format_exc() self.log('crawler', msg, resp) return 9, 'json_error', '' def verify(self, **kwargs): # return 0, 'success' url = 'http://wapjs.189.cn/tysh/interface/doAjax.do' para_str = 'accNbr={};family=2;code={};idcard={};name={};actionCode=jsztActionCode_ztauthverifyIdentity;channelCode_common=100003;pubAreaCode={};pushUserId=jszt_553072141649;coachUser=;userLogAccNbrType=2;userLogAccNbr={};userTokenAccNbrType=2;ztVersion=3.1.0;ztInterSource=100003;pubToken={};'.format( kwargs['tel'], kwargs['sms_code'], kwargs['id_card'], kwargs['full_name'], self.areaCode, kwargs['tel'],self.pubToken) data = { 'para': para_str, 'url': 'http://61.160.137.141/jszt/ztauth/verifyIdentity', } code, key, resp = self.post(url, data=data) if code != 0: return code, key if u'"TSR_MSG":"成功"' in resp.text or u'"TSR_MSG":"成功!"' in resp.text or 'accNbr' in resp.text: return 0, 'success' if u'验证码错误或者失效' in resp.text: self.log('user', 'verify_error', resp) return 2, 'verify_error' if u'姓名验证失败' in resp.text: self.log('user', 'user_name_error', resp) return 2, 'user_name_error' if u'身份证验证失败' in resp.text: self.log('user', 'user_id_error', resp) return 2, 'user_id_error' if u'系统繁忙' in resp.text: self.log('website', 'website_busy_error', resp) return 9, 'website_busy_error' self.log('crawler', u'未知错误', resp) return 9, 'crawl_error' def crawl_info(self, **kwargs): return 0, "success", self.user_info def crawl_call_log(self, **kwargs): missing_list = [] possibly_missing_list = [] call_log = [] crawl_num = 0 today = date.today() page_and_retry = [] url = 'http://wapjs.189.cn/tysh/interface/doAjax.do' # pageSize 3000 search_month = [x for x in range(0, -6, -1)] for each_month in search_month: query_date = today + relativedelta(months=each_month) begDate = "%d%02d01" % (query_date.year, query_date.month) endDay = calendar.monthrange(query_date.year, query_date.month)[1] endDate = "%d%02d%s" % (query_date.year, query_date.month, endDay) para = 'accNbr={}; acctName={};begDate={};endDate={};areaCode={};productId=2;family=2;auth=auth;curPage=1;pageSize=3000;getFlag=1;actionCode=jsztActionCode_restvoiceticket;channelCode_common=100003;pubAreaCode={};pushUserId=jszt_553072141649;coachUser=;userLogAccNbrType=2;userLogAccNbr={};userTokenAccNbrType=2;ztVersion=3.1.0;ztInterSource=100003;pubToken={};' para = para.format(kwargs['tel'], kwargs['tel'], begDate, endDate, self.areaCode, self.areaCode, kwargs['tel'], self.pubToken) data = { 'url': 'http://61.160.137.141/jszt/rest/voiceticket', 'para': para } page_and_retry.append((data, begDate[:6], self.max_retry)) st_time = time.time() et_time = st_time + 20 log_for_retry_request = [] while page_and_retry: call_log_data, m_query_month, m_retry_times = page_and_retry.pop(0) log_for_retry_request.append((m_query_month, m_retry_times)) m_retry_times -= 1 result = [] msg = '' code, key, resp = self.post(url, data=call_log_data) if code == 0: try: result = self.call_log_get(resp.text) if result: call_log.extend(result) continue except: msg = traceback.format_exc() crawl_num += 1 now_time = time.time() if m_retry_times > 0: page_and_retry.append((call_log_data, m_query_month, m_retry_times)) continue elif now_time < et_time: rand_sleep = random.randint(2, 4) if m_retry_times > -10: page_and_retry.append((call_log_data, m_query_month, m_retry_times)) time.sleep(rand_sleep) continue if code == 0 and not result: possibly_missing_list.append(m_query_month) else: missing_list.append(m_query_month) self.log('website', u'未找到指定数据:{}'.format(msg), resp) self.log("crawler", "重试记录: {}".format(log_for_retry_request), "") # print('missing_list:{}'.format(missing_list)) # print('possibly_missing_list:{}'.format(possibly_missing_list)) if len(missing_list) + len(possibly_missing_list) == 6: if crawl_num > 0: return 9, 'crawl_error', call_log, missing_list, possibly_missing_list return 9, 'website_busy_error', call_log, missing_list, possibly_missing_list return 0, "success", call_log, missing_list, possibly_missing_list def call_log_get(self, response): """ | `update_time` | string | 更新时间戳 | | `call_cost` | string | 爬取费用 | | `call_time` | string | 通话起始时间 | | `call_method` | string | 呼叫类型(主叫, 被叫) | | `call_type` | string | 通话类型(本地, 长途) | | `call_from` | string | 本机通话地 | | `call_to` | string | 对方归属地 | | `call_duration` | string | 通话时长 | """ records = [] resp_json = eval(response.replace('null', '')) if not resp_json.has_key('items'): return [] items = resp_json['items'] if not isinstance(items, list): items1 = [] items1.append(items) items = items1 for item in items: data = {} data['month'] = item['startDateNew'].replace('-', '')[:6] data['update_time'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) data['call_cost'] = item['ticketChargeCh'] timeArray = time.strptime('{} {}'.format(item['startDateNew'], item['startTimeNew']), "%Y-%m-%d %H:%M:%S") call_time_timeStamp = str(int(time.mktime(timeArray))) data['call_time'] = call_time_timeStamp data['call_method'] = item['ticketTypeNew'].decode('utf-8') data['call_type'] = item['durationType'].decode('utf-8') raw_call_from = item['areaCode'].strip() call_from, error = self.formatarea(raw_call_from) if call_from: data['call_from'] = call_from else: data['call_from'] = raw_call_from data['call_to'] = '' if 'nbr' in item: data['call_tel'] = item['nbr'] else: data['call_tel'] = item['accNbr'] if 'duartionCh' in item: duration = item['duartionCh'] else: duration = item['durationCh'] call_durations = duration.split(':') call_duration = int(call_durations[0]) * 3600 + int(call_durations[1]) * 60 + int(call_durations[2]) data['call_duration'] = str(call_duration) records.append(data) return records def __monthly_period(self, length=6, strf='%Y%m'): current_time = datetime.datetime.now() for month_offset in range(0, length): yield (current_time - relativedelta(months=month_offset)).strftime(strf) def crawl_phone_bill(self, **kwargs): phone_bill = list() missing_list = [] crawl_num = 0 data = { 'para':'', 'url':'http://61.160.137.141/jszt/rest/bill', } url = 'http://wapjs.189.cn/tysh/interface/doAjax.do' for searchMonth in self.__monthly_period(6, '%Y%m'): para = 'accNbr={};areaCode={};productId=2;billingCycleId={};auth=auth;queryFlag=0;actionCode=fy002;channelCode_common=100003;pubAreaCode={};pushUserId=jszt_307261426672;coachUser=;userLogAccNbrType=2;userLogAccNbr={};userTokenAccNbrType=2;ztVersion=3.1.0;ztInterSource=100003;pubToken={};'.format(kwargs['tel'], self.areaCode, searchMonth,self.areaCode,kwargs['tel'], self.pubToken) data['para'] = para code, key, resp = self.post(url, data=data) if code != 0: # return code, key missing_list.append(searchMonth) continue try: result = self.get_phone_bill(resp, searchMonth) phone_bill.append(result) except: if u'鉴权不通过,请重新登录' in resp.text: self.log('website', u'鉴权不通过,请重新登录', resp) return 9, 'website_busy_error', phone_bill, missing_list msg = traceback.format_exc() self.log('crawler', msg, resp) missing_list.append(searchMonth) continue if len(missing_list) == 6: if crawl_num > 0: return 9, 'crawl_error', phone_bill, missing_list return 9, 'website_busy_error', phone_bill, missing_list today = date.today() today_month = "%d%02d" % (today.year, today.month) if today_month in missing_list: missing_list.remove(today_month) return 0, 'success', phone_bill, missing_list def get_phone_bill(self, resp, month): phone_bill = eval(resp.text.replace('null', '')) bill_list = phone_bill['CustBillQuery'][1]['ProductBillTree'][0] bill_data = { 'bill_month': month, 'bill_amount': '', 'bill_package': '', 'bill_ext_calls': '', 'bill_ext_data': '', 'bill_ext_sms': '', 'bill_zengzhifei': '', 'bill_daishoufei': '', 'bill_qita': '', } for bill in bill_list: if bill['BillItemName'] == u'手机': bill_data['bill_amount'] = bill['BillItemAmount'] if bill['BillItemName'] == u'套餐费优惠': bill_data['bill_package'] = bill['BillItemAmount'] if bill['BillItemName'] == u'国内短信费': bill_data['bill_ext_sms'] = bill['BillItemAmount'] if bill['BillItemName'] == u'语音通信费': bill_data['bill_ext_calls'] = bill['BillItemAmount'] return bill_data if __name__ == '__main__': c = Crawler() USER_ID = "17368357716" USER_PASSWORD = "488496" full_name = '应朝晖' id_card = '33010219680525123X' c.self_test(tel=USER_ID, pin_pwd=USER_PASSWORD, full_name=full_name, id_card=id_card)
{ "content_hash": "6b02a43a140a55e24a716c91f9802761", "timestamp": "", "source": "github", "line_count": 463, "max_line_length": 394, "avg_line_length": 44.183585313174945, "alnum_prop": 0.5419660751820893, "repo_name": "Svolcano/python_exercise", "id": "401cf43f43b75c1e2788aae3d2ce391b38a963eb", "size": "21139", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "dianhua/worker/crawler/china_telecom/wap/jiangsu/main.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "732" }, { "name": "JavaScript", "bytes": "157614" }, { "name": "Python", "bytes": "3292561" }, { "name": "Shell", "bytes": "1417" } ], "symlink_target": "" }
import flask from sqlalchemy import exc as sa_exc from sqlalchemy import sql from sqlalchemy import orm from dci.api.v1 import base, sso from dci import auth from dci import dci_config from dci.common import exceptions as dci_exc from dciauth.request import AuthRequest from dciauth.signature import Signature from dciauth.v2.headers import parse_headers from dciauth.v2.signature import is_valid from dci.db import models2 from dci.identity import Identity from jwt import exceptions as jwt_exc class BaseMechanism(object): def __init__(self, request): self.request = request self.identity = None def authenticate(self): """Authenticate the user, if the user fail to authenticate then the method must raise an exception with proper error message.""" pass def identity_from_db(self, model_constraint): try: user = ( flask.g.session.query(models2.User) .filter(models2.User.state == "active") .options(orm.selectinload("team")) .filter(model_constraint) .one() ) except orm.exc.NoResultFound: return None user_info = { # UUID to str "id": str(user.id), "password": user.password, "name": user.name, "fullname": user.fullname, "timezone": user.timezone, "email": user.email, "sso_username": user.sso_username, "etag": user.etag, "is_user": True, } is_super_admin = False is_read_only_user = False is_epm_user = False user_teams = {} for user_team in user.team: if user_team.id == flask.g.team_admin_id: is_super_admin = True if user_team.id == flask.g.team_redhat_id: is_read_only_user = True if user_team.id == flask.g.team_epm_id: is_epm_user = True # TODO (gvincent): use user_team.serialize() user_teams[user_team.id] = { "id": user_team.id, "name": user_team.name, } user_info["teams"] = user_teams user_info["is_super_admin"] = is_super_admin user_info["is_read_only_user"] = is_read_only_user user_info["is_epm_user"] = is_epm_user return Identity(user_info) def check_team_is_active(self, team_id): if self.identity.teams[team_id]["state"] != "active": name = self.identity.teams[team_id]["team_name"] raise dci_exc.DCIException("team %s not active" % name, status_code=412) class BasicAuthMechanism(BaseMechanism): def authenticate(self): auth = self.request.authorization if not auth: raise dci_exc.DCIException("Authorization header missing", status_code=401) user, is_authenticated = self.get_user_and_check_auth( auth.username, auth.password ) if not is_authenticated: raise dci_exc.DCIException("Invalid user credentials", status_code=401) self.identity = user return True def get_user_and_check_auth(self, username, password): """Check the combination username/password that is valid on the database. """ user = self.identity_from_db(models2.User.name == username) if user is None: user = self.identity_from_db(models2.User.email == username) if user is None: raise dci_exc.DCIException( "User %s does not exists." % username, status_code=401 ) return user, auth.check_passwords_equal(password, user.password) class HmacMechanism(BaseMechanism): def authenticate(self): headers = self.request.headers auth_request = AuthRequest( method=self.request.method, endpoint=self.request.path, payload=self.request.get_json(silent=True), headers=headers, params=self.request.args.to_dict(flat=True), ) hmac_signature = Signature(request=auth_request) self.identity = self.build_identity(auth_request.get_client_info()) if self.identity is None: raise dci_exc.DCIException("identity does not exists.", status_code=401) secret = self.identity.api_secret if not hmac_signature.is_valid(secret): raise dci_exc.DCIException( "HmacMechanism failed: signature invalid", status_code=401, ) if hmac_signature.is_expired(): raise dci_exc.DCIException( "HmacMechanism failed: signature expired", status_code=401, ) if len(self.identity.teams_ids) > 0: self.check_team_is_active(self.identity.teams_ids[0]) return True def build_identity(self, client_info): allowed_types_model = { "remoteci": models2.Remoteci, "feeder": models2.Feeder, } client_type = client_info["client_type"] identity_model = allowed_types_model.get(client_type) if identity_model is None: return None identity = base.get_resource_orm( identity_model, client_info["client_id"], options=[orm.selectinload("team")] ) return Identity( { "id": str(identity.id), "teams": { identity.team.id: { "team_name": identity.team.name, "state": identity.team.state, } }, "api_secret": str(identity.api_secret), "is_remoteci": client_type == "remoteci", "is_feeder": client_type == "feeder", } ) class Hmac2Mechanism(HmacMechanism): def authenticate(self): headers = parse_headers(self.request.headers) if not headers: raise dci_exc.DCIException( "Hmac2Mechanism failed: bad or incomplete headers.", status_code=400 ) self.identity = self.build_identity(headers) if self.identity is None: raise dci_exc.DCIException("identity does not exists.", status_code=401) valid, error_message = is_valid( { "method": self.request.method, "endpoint": self.request.path, "data": self.request.data, "params": self.request.args.to_dict(flat=True), }, {"secret_key": self.identity.api_secret}, headers, ) if not valid: raise dci_exc.DCIException("Hmac2Mechanism failed: %s" % error_message) if len(self.identity.teams_ids) > 0: self.check_team_is_active(self.identity.teams_ids[0]) return True class OpenIDCAuth(BaseMechanism): def authenticate(self): auth_header = self.request.headers.get("Authorization").split(" ") if len(auth_header) != 2: return False bearer, token = auth_header conf = dci_config.CONFIG try: decoded_token = auth.decode_jwt( token, conf["SSO_PUBLIC_KEY"], conf["SSO_CLIENT_ID"] ) except (jwt_exc.DecodeError, ValueError): decoded_token = sso.decode_token_with_latest_public_key(token) except jwt_exc.ExpiredSignatureError: raise dci_exc.DCIException( "JWT token expired, please refresh.", status_code=401 ) team_id = None ro_group = conf["SSO_READ_ONLY_GROUP"] realm_access = decoded_token["realm_access"] if "roles" in realm_access and ro_group in realm_access["roles"]: team_id = flask.g.team_redhat_id user_info = self._get_user_info(decoded_token) try: self.identity = self._get_or_create_user(user_info, team_id) except sa_exc.IntegrityError: raise dci_exc.DCICreationConflict("users", "username") return True @staticmethod def _get_user_info(token): return { "name": token.get("username"), "fullname": token.get("username"), "sso_username": token.get("username"), "email": token.get("email"), "timezone": "UTC", } def _get_or_create_user(self, user_info, team_id=None): constraint = sql.or_( models2.User.sso_username == user_info["sso_username"], models2.User.email == user_info["sso_username"], models2.User.email == user_info["email"], ) identity = self.identity_from_db(constraint) if identity is None: try: user = models2.User(**user_info) flask.g.session.add(user) flask.g.session.commit() if team_id is not None: team = base.get_resource_orm(models2.Team, team_id) team.users.append(user) flask.g.session.add(team) flask.g.session.commit() except Exception: flask.g.session.rollback() raise dci_exc.DCIException( message="Cannot create user in Open ID Connect auth mechanism" ) identity = self.identity_from_db(constraint) return identity return identity
{ "content_hash": "9f24f0df33174338247a1ca770d5a591", "timestamp": "", "source": "github", "line_count": 263, "max_line_length": 88, "avg_line_length": 36.26235741444867, "alnum_prop": 0.5620216000838838, "repo_name": "redhat-cip/dci-control-server", "id": "4777813836961f6ec8f8b05d750d85fd84d9c23a", "size": "10140", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "dci/auth_mechanism.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "1472" }, { "name": "Mako", "bytes": "1112" }, { "name": "Python", "bytes": "688221" }, { "name": "Shell", "bytes": "2062" } ], "symlink_target": "" }
from indico.core.db import db from indico.modules.events.cloning import EventCloner from indico.modules.vc import VCRoomEventAssociation from indico.modules.vc.models.vc_rooms import VCRoomLinkType from indico.modules.vc.util import get_vc_plugins from indico.util.i18n import _ class VCCloner(EventCloner): name = 'vc' friendly_name = _('Videoconference') uses = {'sessions', 'contributions'} is_default = True @property def is_visible(self): return bool(get_vc_plugins()) @property def is_available(self): if self.n_occurrence > 1: # if we're not on the first occurrence, we shouldn't do this check, # since there's the possibility all rooms are gone in the meantime return True return self._has_content(self.old_event) def get_conflicts(self, target_event): if self._has_content(target_event): return [_('The target event already has a videoconference')] def _has_content(self, event): return VCRoomEventAssociation.find_for_event(event, include_hidden=True).has_rows() def run(self, new_event, cloners, shared_data, event_exists=False): self._clone_nested_vc_rooms = False self._session_block_map = self._contrib_map = None if cloners >= {'sessions', 'contributions'}: self._clone_nested_vc_rooms = True self._session_block_map = shared_data['sessions']['session_block_map'] self._contrib_map = shared_data['contributions']['contrib_map'] with db.session.no_autoflush: self._clone_vc_rooms(new_event) db.session.flush() def _clone_vc_rooms(self, new_event): for old_event_vc_room in self.old_event.all_vc_room_associations: link_object = None if old_event_vc_room.link_type == VCRoomLinkType.event: link_object = new_event elif old_event_vc_room.link_type == VCRoomLinkType.contribution and self._contrib_map is not None: link_object = self._contrib_map[old_event_vc_room.link_object] elif old_event_vc_room.link_type == VCRoomLinkType.block and self._session_block_map is not None: link_object = self._session_block_map[old_event_vc_room.link_object] if link_object is None: continue plugin = old_event_vc_room.vc_room.plugin if not plugin: continue clone = plugin.clone_room(old_event_vc_room, link_object) if clone: # the plugin may decide to not clone the room old_event_vc_room.vc_room.events.append(clone)
{ "content_hash": "96f9c5a5b2add2a93b12def1e1fa3e70", "timestamp": "", "source": "github", "line_count": 62, "max_line_length": 110, "avg_line_length": 43.16129032258065, "alnum_prop": 0.6352765321375187, "repo_name": "indico/indico", "id": "1f919f0a01b6dc5841c5df16a649eda410cd0b06", "size": "2890", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "indico/modules/vc/clone.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "33289" }, { "name": "HTML", "bytes": "1420471" }, { "name": "JavaScript", "bytes": "2362355" }, { "name": "Mako", "bytes": "1527" }, { "name": "Python", "bytes": "5550085" }, { "name": "SCSS", "bytes": "486043" }, { "name": "Shell", "bytes": "3877" }, { "name": "TeX", "bytes": "23435" }, { "name": "XSLT", "bytes": "1504" } ], "symlink_target": "" }
from time import time import json from webob import Request class ForensicMiddleware(object): """ Log ALL request information as JSON, one request per line. NOTE: Needs to set some reasonable restrictions on request content length in order to be usable in production. Request bodies which are too large should be logged to a file, or truncated somehow. """ def __init__(self, app, path): self.app = app self.f = open(path, 'a') def __call__(self, environ, start_response): req = Request(environ) resp = req.get_response(self.app) entry = dict(method=req.method, body=req.body.decode('utf-8'), url=req.url, time=time(), headers=list(req.headers.items())) line = json.dumps(entry) assert '\n' not in line self.f.write(line) self.f.write('\n') self.f.flush() return resp(environ, start_response)
{ "content_hash": "b294ebd95f97f6af679b8ca791a165c5", "timestamp": "", "source": "github", "line_count": 34, "max_line_length": 77, "avg_line_length": 29.529411764705884, "alnum_prop": 0.5846613545816733, "repo_name": "storborg/maitai", "id": "0bbd04aef1f7a9487e8e9e225c97dbd93c5981cb", "size": "1004", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "maitai/forensic.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "24752" } ], "symlink_target": "" }
from openflow.optin_manager.sfa.rspecs.elements.element import Element class Property(Element): fields = [ 'source_id', 'dest_id', 'capacity', 'latency', 'packet_loss', ]
{ "content_hash": "cfd935400d562c6ab882a96c828ac371", "timestamp": "", "source": "github", "line_count": 12, "max_line_length": 70, "avg_line_length": 19.416666666666668, "alnum_prop": 0.5407725321888412, "repo_name": "dana-i2cat/felix", "id": "d4096d659c017bd3533f285ffa95b35cbaba3a64", "size": "233", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "optin_manager/src/python/openflow/optin_manager/sfa/rspecs/elements/property.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "337811" }, { "name": "DTrace", "bytes": "370" }, { "name": "Elixir", "bytes": "17243" }, { "name": "Emacs Lisp", "bytes": "1098" }, { "name": "Groff", "bytes": "1735" }, { "name": "HTML", "bytes": "660363" }, { "name": "Java", "bytes": "18362" }, { "name": "JavaScript", "bytes": "838960" }, { "name": "Makefile", "bytes": "11211" }, { "name": "Perl", "bytes": "5416" }, { "name": "Python", "bytes": "7875883" }, { "name": "Shell", "bytes": "258079" } ], "symlink_target": "" }
from jsonrpc import ServiceProxy import sys import string # ===== BEGIN USER SETTINGS ===== # if you do not set these you will be prompted for a password for every command rpcuser = "" rpcpass = "" # ====== END USER SETTINGS ====== if rpcpass == "": access = ServiceProxy("http://127.0.0.1:12700") else: access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:12700") cmd = sys.argv[1].lower() if cmd == "backupwallet": try: path = raw_input("Enter destination path/filename: ") print access.backupwallet(path) except: print "\n---An error occurred---\n" elif cmd == "getaccount": try: addr = raw_input("Enter a Ion address: ") print access.getaccount(addr) except: print "\n---An error occurred---\n" elif cmd == "getaccountaddress": try: acct = raw_input("Enter an account name: ") print access.getaccountaddress(acct) except: print "\n---An error occurred---\n" elif cmd == "getaddressesbyaccount": try: acct = raw_input("Enter an account name: ") print access.getaddressesbyaccount(acct) except: print "\n---An error occurred---\n" elif cmd == "getbalance": try: acct = raw_input("Enter an account (optional): ") mc = raw_input("Minimum confirmations (optional): ") try: print access.getbalance(acct, mc) except: print access.getbalance() except: print "\n---An error occurred---\n" elif cmd == "getblockbycount": try: height = raw_input("Height: ") print access.getblockbycount(height) except: print "\n---An error occurred---\n" elif cmd == "getblockcount": try: print access.getblockcount() except: print "\n---An error occurred---\n" elif cmd == "getblocknumber": try: print access.getblocknumber() except: print "\n---An error occurred---\n" elif cmd == "getconnectioncount": try: print access.getconnectioncount() except: print "\n---An error occurred---\n" elif cmd == "getdifficulty": try: print access.getdifficulty() except: print "\n---An error occurred---\n" elif cmd == "getgenerate": try: print access.getgenerate() except: print "\n---An error occurred---\n" elif cmd == "gethashespersec": try: print access.gethashespersec() except: print "\n---An error occurred---\n" elif cmd == "getinfo": try: print access.getinfo() except: print "\n---An error occurred---\n" elif cmd == "getnewaddress": try: acct = raw_input("Enter an account name: ") try: print access.getnewaddress(acct) except: print access.getnewaddress() except: print "\n---An error occurred---\n" elif cmd == "getreceivedbyaccount": try: acct = raw_input("Enter an account (optional): ") mc = raw_input("Minimum confirmations (optional): ") try: print access.getreceivedbyaccount(acct, mc) except: print access.getreceivedbyaccount() except: print "\n---An error occurred---\n" elif cmd == "getreceivedbyaddress": try: addr = raw_input("Enter a Ion address (optional): ") mc = raw_input("Minimum confirmations (optional): ") try: print access.getreceivedbyaddress(addr, mc) except: print access.getreceivedbyaddress() except: print "\n---An error occurred---\n" elif cmd == "gettransaction": try: txid = raw_input("Enter a transaction ID: ") print access.gettransaction(txid) except: print "\n---An error occurred---\n" elif cmd == "getwork": try: data = raw_input("Data (optional): ") try: print access.gettransaction(data) except: print access.gettransaction() except: print "\n---An error occurred---\n" elif cmd == "help": try: cmd = raw_input("Command (optional): ") try: print access.help(cmd) except: print access.help() except: print "\n---An error occurred---\n" elif cmd == "listaccounts": try: mc = raw_input("Minimum confirmations (optional): ") try: print access.listaccounts(mc) except: print access.listaccounts() except: print "\n---An error occurred---\n" elif cmd == "listreceivedbyaccount": try: mc = raw_input("Minimum confirmations (optional): ") incemp = raw_input("Include empty? (true/false, optional): ") try: print access.listreceivedbyaccount(mc, incemp) except: print access.listreceivedbyaccount() except: print "\n---An error occurred---\n" elif cmd == "listreceivedbyaddress": try: mc = raw_input("Minimum confirmations (optional): ") incemp = raw_input("Include empty? (true/false, optional): ") try: print access.listreceivedbyaddress(mc, incemp) except: print access.listreceivedbyaddress() except: print "\n---An error occurred---\n" elif cmd == "listtransactions": try: acct = raw_input("Account (optional): ") count = raw_input("Number of transactions (optional): ") frm = raw_input("Skip (optional):") try: print access.listtransactions(acct, count, frm) except: print access.listtransactions() except: print "\n---An error occurred---\n" elif cmd == "move": try: frm = raw_input("From: ") to = raw_input("To: ") amt = raw_input("Amount:") mc = raw_input("Minimum confirmations (optional): ") comment = raw_input("Comment (optional): ") try: print access.move(frm, to, amt, mc, comment) except: print access.move(frm, to, amt) except: print "\n---An error occurred---\n" elif cmd == "sendfrom": try: frm = raw_input("From: ") to = raw_input("To: ") amt = raw_input("Amount:") mc = raw_input("Minimum confirmations (optional): ") comment = raw_input("Comment (optional): ") commentto = raw_input("Comment-to (optional): ") try: print access.sendfrom(frm, to, amt, mc, comment, commentto) except: print access.sendfrom(frm, to, amt) except: print "\n---An error occurred---\n" elif cmd == "sendmany": try: frm = raw_input("From: ") to = raw_input("To (in format address1:amount1,address2:amount2,...): ") mc = raw_input("Minimum confirmations (optional): ") comment = raw_input("Comment (optional): ") try: print access.sendmany(frm,to,mc,comment) except: print access.sendmany(frm,to) except: print "\n---An error occurred---\n" elif cmd == "sendtoaddress": try: to = raw_input("To (in format address1:amount1,address2:amount2,...): ") amt = raw_input("Amount:") comment = raw_input("Comment (optional): ") commentto = raw_input("Comment-to (optional): ") try: print access.sendtoaddress(to,amt,comment,commentto) except: print access.sendtoaddress(to,amt) except: print "\n---An error occurred---\n" elif cmd == "setaccount": try: addr = raw_input("Address: ") acct = raw_input("Account:") print access.setaccount(addr,acct) except: print "\n---An error occurred---\n" elif cmd == "setgenerate": try: gen= raw_input("Generate? (true/false): ") cpus = raw_input("Max processors/cores (-1 for unlimited, optional):") try: print access.setgenerate(gen, cpus) except: print access.setgenerate(gen) except: print "\n---An error occurred---\n" elif cmd == "settxfee": try: amt = raw_input("Amount:") print access.settxfee(amt) except: print "\n---An error occurred---\n" elif cmd == "stop": try: print access.stop() except: print "\n---An error occurred---\n" elif cmd == "validateaddress": try: addr = raw_input("Address: ") print access.validateaddress(addr) except: print "\n---An error occurred---\n" elif cmd == "walletpassphrase": try: pwd = raw_input("Enter wallet passphrase: ") access.walletpassphrase(pwd, 60) print "\n---Wallet unlocked---\n" except: print "\n---An error occurred---\n" elif cmd == "walletpassphrasechange": try: pwd = raw_input("Enter old wallet passphrase: ") pwd2 = raw_input("Enter new wallet passphrase: ") access.walletpassphrasechange(pwd, pwd2) print print "\n---Passphrase changed---\n" except: print print "\n---An error occurred---\n" print else: print "Command not found or not supported"
{ "content_hash": "d66c16afd1146bb93cf40dfc2b84826e", "timestamp": "", "source": "github", "line_count": 324, "max_line_length": 79, "avg_line_length": 24.166666666666668, "alnum_prop": 0.6614303959131546, "repo_name": "aspaas/ion", "id": "48f7cbb3d36f9b098f69fec415a4a6925039f304", "size": "7830", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "contrib/bitrpc/bitrpc.py", "mode": "33261", "license": "mit", "language": [ { "name": "C", "bytes": "616463" }, { "name": "C++", "bytes": "4560754" }, { "name": "CSS", "bytes": "1127" }, { "name": "HTML", "bytes": "50621" }, { "name": "Java", "bytes": "2100" }, { "name": "M4", "bytes": "18274" }, { "name": "Makefile", "bytes": "16792" }, { "name": "NSIS", "bytes": "5917" }, { "name": "Objective-C++", "bytes": "6205" }, { "name": "Python", "bytes": "96149" }, { "name": "QMake", "bytes": "20721" }, { "name": "Shell", "bytes": "391146" } ], "symlink_target": "" }
import os import sys try: from setuptools import setup except ImportError: from distutils.core import setup if sys.argv[-1] == "publish": os.system("python setup.py sdist upload") sys.exit() required = ['requests'] setup( name='chain-python', version='0.0.3', description="The Unofficial Python SDK for Chain's Bitcoin API", long_description=open('README.md').read(), author='Mahdi Yusuf', author_email='yusuf.mahdi@gmail.com', url='https://github.com/myusuf3/chain-python', install_requires=required, license='MIT', classifiers=( 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Natural Language :: English', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python', 'Programming Language :: Python :: 2.5', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.0', 'Programming Language :: Python :: 3.1', 'Programming Language :: Python :: 3.2', ), )
{ "content_hash": "fcc102d161fc5e5430d1551039d33ec4", "timestamp": "", "source": "github", "line_count": 40, "max_line_length": 68, "avg_line_length": 28.125, "alnum_prop": 0.6186666666666667, "repo_name": "myusuf3/chain-python", "id": "b2b1535a3c426701564fefe6bc8fb480b5c85b6c", "size": "1172", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "setup.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "4802" } ], "symlink_target": "" }
"""count_word_api URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.10/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.conf.urls import url, include 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) """ from django.conf.urls import include, url from django.contrib import admin urlpatterns = [ url(r'^admin/', admin.site.urls), url(r'^', include('count_word_api.core.urls', namespace='core')), ]
{ "content_hash": "76a1bfeafac0f1c78778e58b745a792e", "timestamp": "", "source": "github", "line_count": 22, "max_line_length": 79, "avg_line_length": 38.63636363636363, "alnum_prop": 0.6952941176470588, "repo_name": "rafaelhenrique/count_word_api", "id": "1fdac152c39e6284159d4180edccd906629ebf77", "size": "850", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "count_word_api/urls.py", "mode": "33188", "license": "mit", "language": [ { "name": "Makefile", "bytes": "2060" }, { "name": "Python", "bytes": "12204" } ], "symlink_target": "" }
""" jQuery templates use constructs like: {{if condition}} print something{{/if}} This, of course, completely screws up Django templates, because Django thinks {{ and }} mean something. Wrap {% verbatim %} and {% endverbatim %} around those blocks of jQuery templates and this will try its best to output the contents with no changes. From: https://gist.github.com/629508 """ from django import template register = template.Library() class VerbatimNode(template.Node): def __init__(self, text): self.text = text def render(self, context): return self.text @register.tag def verbatim(parser, token): text = [] while 1: token = parser.tokens.pop(0) if token.contents == 'endverbatim': break if token.token_type == template.TOKEN_VAR: text.append('{{') elif token.token_type == template.TOKEN_BLOCK: text.append('{%') text.append(token.contents) if token.token_type == template.TOKEN_VAR: text.append('}}') elif token.token_type == template.TOKEN_BLOCK: text.append('%}') return VerbatimNode(''.join(text))
{ "content_hash": "dac43da7d29d070a30dea90bca4f7d80", "timestamp": "", "source": "github", "line_count": 46, "max_line_length": 55, "avg_line_length": 25.456521739130434, "alnum_prop": 0.6293766011955594, "repo_name": "boosh/pwlocker", "id": "b24438dd2b3e8e99b4fec384ce2816b3f9205966", "size": "1171", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "pwlocker/apps/passwords/templatetags/verbatim.py", "mode": "33188", "license": "mit", "language": [ { "name": "JavaScript", "bytes": "82490" }, { "name": "Python", "bytes": "68818" } ], "symlink_target": "" }
""" Jedi is a static analysis tool for Python that can be used in IDEs/editors. Its historic focus is autocompletion, but does static analysis for now as well. Jedi is fast and is very well tested. It understands Python on a deeper level than all other static analysis frameworks for Python. Jedi has support for two different goto functions. It's possible to search for related names and to list all names in a Python file and infer them. Jedi understands docstrings and you can use Jedi autocompletion in your REPL as well. Jedi uses a very simple API to connect with IDE's. There's a reference implementation as a `VIM-Plugin <https://github.com/davidhalter/jedi-vim>`_, which uses Jedi's autocompletion. We encourage you to use Jedi in your IDEs. It's really easy. To give you a simple example how you can use the Jedi library, here is an example for the autocompletion feature: >>> import jedi >>> source = ''' ... import datetime ... datetime.da''' >>> script = jedi.Script(source, 3, len('datetime.da'), 'example.py') >>> script <Script: 'example.py'> >>> completions = script.completions() >>> completions #doctest: +ELLIPSIS [<Completion: date>, <Completion: datetime>, ...] >>> print(completions[0].complete) te >>> print(completions[0].name) date As you see Jedi is pretty simple and allows you to concentrate on writing a good text editor, while still having very good IDE features for Python. """ __version__ = '0.10.2' from jedi.api import Script, Interpreter, NotFoundError, set_debug_function from jedi.api import preload_module, defined_names, names from jedi import settings
{ "content_hash": "920af817163eb7edd84f348158175086", "timestamp": "", "source": "github", "line_count": 43, "max_line_length": 79, "avg_line_length": 38.16279069767442, "alnum_prop": 0.7343083485679464, "repo_name": "nitin-cherian/LifeLongLearning", "id": "5fe878d310aab0670a85375ee83a15654fc68e79", "size": "1641", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "Python/PythonProgrammingLanguage/Encapsulation/encap_env/lib/python3.5/site-packages/jedi/__init__.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "32365" }, { "name": "CSS", "bytes": "10259" }, { "name": "HTML", "bytes": "55977" }, { "name": "JavaScript", "bytes": "7368910" }, { "name": "Jupyter Notebook", "bytes": "768879" }, { "name": "Mako", "bytes": "494" }, { "name": "Python", "bytes": "17502534" }, { "name": "Shell", "bytes": "7751" }, { "name": "Smarty", "bytes": "30663" } ], "symlink_target": "" }
""" Unit tests for EC2 error responses. """ from lxml import etree from nova.api import ec2 from nova import context from nova import test from nova import wsgi class TestClientExceptionEC2(Exception): ec2_code = 'ClientException.Test' message = "Test Client Exception." code = 400 class TestServerExceptionEC2(Exception): ec2_code = 'ServerException.Test' message = "Test Server Exception." code = 500 class Ec2ErrorResponseTestCase(test.NoDBTestCase): """ Test EC2 error responses. This deals mostly with api/ec2/__init__.py code, especially the ec2_error_ex helper. """ def setUp(self): super(Ec2ErrorResponseTestCase, self).setUp() self.context = context.RequestContext('test_user_id', 'test_project_id') self.req = wsgi.Request.blank('/test') self.req.environ['nova.context'] = self.context def _validate_ec2_error(self, response, http_status, ec2_code, msg=None, unknown_msg=False): self.assertEqual(response.status_code, http_status, 'Expected HTTP status %s' % http_status) root_e = etree.XML(response.body) self.assertEqual(root_e.tag, 'Response', "Top element must be Response.") errors_e = root_e.find('Errors') self.assertEqual(len(errors_e), 1, "Expected exactly one Error element in Errors.") error_e = errors_e[0] self.assertEqual(error_e.tag, 'Error', "Expected Error element.") # Code code_e = error_e.find('Code') self.assertIsNotNone(code_e, "Code element must be present.") self.assertEqual(code_e.text, ec2_code) # Message if msg or unknown_msg: message_e = error_e.find('Message') self.assertIsNotNone(code_e, "Message element must be present.") if msg: self.assertEqual(message_e.text, msg) elif unknown_msg: self.assertEqual(message_e.text, "Unknown error occured.", "Error message should be anonymous.") # RequestID requestid_e = root_e.find('RequestID') self.assertIsNotNone(requestid_e, 'RequestID element should be present.') self.assertEqual(requestid_e.text, self.context.request_id) def test_exception_ec2_4xx(self): """ Test response to EC2 exception with code = 400. """ msg = "Test client failure." err = ec2.ec2_error_ex(TestClientExceptionEC2(msg), self.req) self._validate_ec2_error(err, TestClientExceptionEC2.code, TestClientExceptionEC2.ec2_code, msg) def test_exception_ec2_5xx(self): """ Test response to EC2 exception with code = 500. Expected errors are treated as client ones even with 5xx code. """ msg = "Test client failure with 5xx error code." err = ec2.ec2_error_ex(TestServerExceptionEC2(msg), self.req) self._validate_ec2_error(err, 400, TestServerExceptionEC2.ec2_code, msg) def test_unexpected_exception_ec2_4xx(self): """ Test response to unexpected EC2 exception with code = 400. """ msg = "Test unexpected client failure." err = ec2.ec2_error_ex(TestClientExceptionEC2(msg), self.req, unexpected=True) self._validate_ec2_error(err, TestClientExceptionEC2.code, TestClientExceptionEC2.ec2_code, msg) def test_unexpected_exception_ec2_5xx(self): """ Test response to unexpected EC2 exception with code = 500. Server exception messages (with code >= 500 or without code) should be filtered as they might contain sensitive information. """ msg = "Test server failure." err = ec2.ec2_error_ex(TestServerExceptionEC2(msg), self.req, unexpected=True) self._validate_ec2_error(err, TestServerExceptionEC2.code, TestServerExceptionEC2.ec2_code, unknown_msg=True) def test_unexpected_exception_builtin(self): """ Test response to builtin unexpected exception. Server exception messages (with code >= 500 or without code) should be filtered as they might contain sensitive information. """ msg = "Test server failure." err = ec2.ec2_error_ex(RuntimeError(msg), self.req, unexpected=True) self._validate_ec2_error(err, 500, 'RuntimeError', unknown_msg=True)
{ "content_hash": "7914d80b3b72aae7c55f6c9ae82b4382", "timestamp": "", "source": "github", "line_count": 124, "max_line_length": 76, "avg_line_length": 38.28225806451613, "alnum_prop": 0.5982725932167685, "repo_name": "bclau/nova", "id": "ce2661d5648d8d0935cb820edd071dabe60ed1b2", "size": "5359", "binary": false, "copies": "7", "ref": "refs/heads/master", "path": "nova/tests/api/ec2/test_error_response.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "13139382" }, { "name": "Shell", "bytes": "17194" } ], "symlink_target": "" }
from __future__ import unicode_literals import django.db.models.deletion from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ('downloader', '0001_initial'), ] operations = [ migrations.CreateModel( name='Alarm', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('datetime', models.DateTimeField()), ('download', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='downloader.Download')), ], ), ]
{ "content_hash": "5d8e020166e44bb356388dd9a2dafb76", "timestamp": "", "source": "github", "line_count": 24, "max_line_length": 118, "avg_line_length": 28.791666666666668, "alnum_prop": 0.5861070911722142, "repo_name": "pietermarsman/wakeup", "id": "645cbddc5cd02f7e596b18a4dab0849a4ea33efd", "size": "764", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "app/clock/migrations/0001_initial.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "1909" }, { "name": "Java", "bytes": "6814" }, { "name": "Python", "bytes": "18337" }, { "name": "Shell", "bytes": "2830" } ], "symlink_target": "" }
"""ACME Identifier Validation Challenges.""" import abc import codecs import functools import hashlib import logging import socket from typing import Type from cryptography.hazmat.primitives import hashes import josepy as jose from OpenSSL import crypto from OpenSSL import SSL import requests from acme import crypto_util from acme import errors from acme import fields from acme.mixins import ResourceMixin from acme.mixins import TypeMixin logger = logging.getLogger(__name__) class Challenge(jose.TypedJSONObjectWithFields): # _fields_to_partial_json """ACME challenge.""" TYPES: dict = {} @classmethod def from_json(cls, jobj): try: return super().from_json(jobj) except jose.UnrecognizedTypeError as error: logger.debug(error) return UnrecognizedChallenge.from_json(jobj) class ChallengeResponse(ResourceMixin, TypeMixin, jose.TypedJSONObjectWithFields): # _fields_to_partial_json """ACME challenge response.""" TYPES: dict = {} resource_type = 'challenge' resource = fields.Resource(resource_type) class UnrecognizedChallenge(Challenge): """Unrecognized challenge. ACME specification defines a generic framework for challenges and defines some standard challenges that are implemented in this module. However, other implementations (including peers) might define additional challenge types, which should be ignored if unrecognized. :ivar jobj: Original JSON decoded object. """ def __init__(self, jobj): super().__init__() object.__setattr__(self, "jobj", jobj) def to_partial_json(self): return self.jobj # pylint: disable=no-member @classmethod def from_json(cls, jobj): return cls(jobj) class _TokenChallenge(Challenge): """Challenge with token. :ivar bytes token: """ TOKEN_SIZE = 128 / 8 # Based on the entropy value from the spec """Minimum size of the :attr:`token` in bytes.""" # TODO: acme-spec doesn't specify token as base64-encoded value token = jose.Field( "token", encoder=jose.encode_b64jose, decoder=functools.partial( jose.decode_b64jose, size=TOKEN_SIZE, minimum=True)) # XXX: rename to ~token_good_for_url @property def good_token(self): # XXX: @token.decoder """Is `token` good? .. todo:: acme-spec wants "It MUST NOT contain any non-ASCII characters", but it should also warrant that it doesn't contain ".." or "/"... """ # TODO: check that path combined with uri does not go above # URI_ROOT_PATH! # pylint: disable=unsupported-membership-test return b'..' not in self.token and b'/' not in self.token class KeyAuthorizationChallengeResponse(ChallengeResponse): """Response to Challenges based on Key Authorization. :param unicode key_authorization: """ key_authorization = jose.Field("keyAuthorization") thumbprint_hash_function = hashes.SHA256 def verify(self, chall, account_public_key): """Verify the key authorization. :param KeyAuthorization chall: Challenge that corresponds to this response. :param JWK account_public_key: :return: ``True`` iff verification of the key authorization was successful. :rtype: bool """ parts = self.key_authorization.split('.') if len(parts) != 2: logger.debug("Key authorization (%r) is not well formed", self.key_authorization) return False if parts[0] != chall.encode("token"): logger.debug("Mismatching token in key authorization: " "%r instead of %r", parts[0], chall.encode("token")) return False thumbprint = jose.b64encode(account_public_key.thumbprint( hash_function=self.thumbprint_hash_function)).decode() if parts[1] != thumbprint: logger.debug("Mismatching thumbprint in key authorization: " "%r instead of %r", parts[0], thumbprint) return False return True def to_partial_json(self): jobj = super().to_partial_json() jobj.pop('keyAuthorization', None) return jobj class KeyAuthorizationChallenge(_TokenChallenge, metaclass=abc.ABCMeta): """Challenge based on Key Authorization. :param response_cls: Subclass of `KeyAuthorizationChallengeResponse` that will be used to generate ``response``. :param str typ: type of the challenge """ typ: str = NotImplemented response_cls: Type[KeyAuthorizationChallengeResponse] = NotImplemented thumbprint_hash_function = ( KeyAuthorizationChallengeResponse.thumbprint_hash_function) def key_authorization(self, account_key): """Generate Key Authorization. :param JWK account_key: :rtype unicode: """ return self.encode("token") + "." + jose.b64encode( account_key.thumbprint( hash_function=self.thumbprint_hash_function)).decode() def response(self, account_key): """Generate response to the challenge. :param JWK account_key: :returns: Response (initialized `response_cls`) to the challenge. :rtype: KeyAuthorizationChallengeResponse """ return self.response_cls( # pylint: disable=not-callable key_authorization=self.key_authorization(account_key)) @abc.abstractmethod def validation(self, account_key, **kwargs): """Generate validation for the challenge. Subclasses must implement this method, but they are likely to return completely different data structures, depending on what's necessary to complete the challenge. Interpretation of that return value must be known to the caller. :param JWK account_key: :returns: Challenge-specific validation. """ raise NotImplementedError() # pragma: no cover def response_and_validation(self, account_key, *args, **kwargs): """Generate response and validation. Convenience function that return results of `response` and `validation`. :param JWK account_key: :rtype: tuple """ return (self.response(account_key), self.validation(account_key, *args, **kwargs)) @ChallengeResponse.register class DNS01Response(KeyAuthorizationChallengeResponse): """ACME dns-01 challenge response.""" typ = "dns-01" def simple_verify(self, chall, domain, account_public_key): # pylint: disable=unused-argument """Simple verify. This method no longer checks DNS records and is a simple wrapper around `KeyAuthorizationChallengeResponse.verify`. :param challenges.DNS01 chall: Corresponding challenge. :param unicode domain: Domain name being verified. :param JWK account_public_key: Public key for the key pair being authorized. :return: ``True`` iff verification of the key authorization was successful. :rtype: bool """ verified = self.verify(chall, account_public_key) if not verified: logger.debug("Verification of key authorization in response failed") return verified @Challenge.register class DNS01(KeyAuthorizationChallenge): """ACME dns-01 challenge.""" response_cls = DNS01Response typ = response_cls.typ LABEL = "_acme-challenge" """Label clients prepend to the domain name being validated.""" def validation(self, account_key, **unused_kwargs): """Generate validation. :param JWK account_key: :rtype: unicode """ return jose.b64encode(hashlib.sha256(self.key_authorization( account_key).encode("utf-8")).digest()).decode() def validation_domain_name(self, name): """Domain name for TXT validation record. :param unicode name: Domain name being validated. """ return "{0}.{1}".format(self.LABEL, name) @ChallengeResponse.register class HTTP01Response(KeyAuthorizationChallengeResponse): """ACME http-01 challenge response.""" typ = "http-01" PORT = 80 """Verification port as defined by the protocol. You can override it (e.g. for testing) by passing ``port`` to `simple_verify`. """ WHITESPACE_CUTSET = "\n\r\t " """Whitespace characters which should be ignored at the end of the body.""" def simple_verify(self, chall, domain, account_public_key, port=None): """Simple verify. :param challenges.SimpleHTTP chall: Corresponding challenge. :param unicode domain: Domain name being verified. :param JWK account_public_key: Public key for the key pair being authorized. :param int port: Port used in the validation. :returns: ``True`` iff validation with the files currently served by the HTTP server is successful. :rtype: bool """ if not self.verify(chall, account_public_key): logger.debug("Verification of key authorization in response failed") return False # TODO: ACME specification defines URI template that doesn't # allow to use a custom port... Make sure port is not in the # request URI, if it's standard. if port is not None and port != self.PORT: logger.warning( "Using non-standard port for http-01 verification: %s", port) domain += ":{0}".format(port) uri = chall.uri(domain) logger.debug("Verifying %s at %s...", chall.typ, uri) try: http_response = requests.get(uri, verify=False) except requests.exceptions.RequestException as error: logger.error("Unable to reach %s: %s", uri, error) return False # By default, http_response.text will try to guess the encoding to use # when decoding the response to Python unicode strings. This guesswork # is error prone. RFC 8555 specifies that HTTP-01 responses should be # key authorizations with possible trailing whitespace. Since key # authorizations must be composed entirely of the base64url alphabet # plus ".", we tell requests that the response should be ASCII. See # https://datatracker.ietf.org/doc/html/rfc8555#section-8.3 for more # info. http_response.encoding = "ascii" logger.debug("Received %s: %s. Headers: %s", http_response, http_response.text, http_response.headers) challenge_response = http_response.text.rstrip(self.WHITESPACE_CUTSET) if self.key_authorization != challenge_response: logger.debug("Key authorization from response (%r) doesn't match " "HTTP response (%r)", self.key_authorization, challenge_response) return False return True @Challenge.register class HTTP01(KeyAuthorizationChallenge): """ACME http-01 challenge.""" response_cls = HTTP01Response typ = response_cls.typ URI_ROOT_PATH = ".well-known/acme-challenge" """URI root path for the server provisioned resource.""" @property def path(self): """Path (starting with '/') for provisioned resource. :rtype: string """ return '/' + self.URI_ROOT_PATH + '/' + self.encode('token') def uri(self, domain): """Create an URI to the provisioned resource. Forms an URI to the HTTPS server provisioned resource (containing :attr:`~SimpleHTTP.token`). :param unicode domain: Domain name being verified. :rtype: string """ return "http://" + domain + self.path def validation(self, account_key, **unused_kwargs): """Generate validation. :param JWK account_key: :rtype: unicode """ return self.key_authorization(account_key) @ChallengeResponse.register class TLSALPN01Response(KeyAuthorizationChallengeResponse): """ACME tls-alpn-01 challenge response.""" typ = "tls-alpn-01" PORT = 443 """Verification port as defined by the protocol. You can override it (e.g. for testing) by passing ``port`` to `simple_verify`. """ ID_PE_ACME_IDENTIFIER_V1 = b"1.3.6.1.5.5.7.1.30.1" ACME_TLS_1_PROTOCOL = "acme-tls/1" @property def h(self): """Hash value stored in challenge certificate""" return hashlib.sha256(self.key_authorization.encode('utf-8')).digest() def gen_cert(self, domain, key=None, bits=2048): """Generate tls-alpn-01 certificate. :param unicode domain: Domain verified by the challenge. :param OpenSSL.crypto.PKey key: Optional private key used in certificate generation. If not provided (``None``), then fresh key will be generated. :param int bits: Number of bits for newly generated key. :rtype: `tuple` of `OpenSSL.crypto.X509` and `OpenSSL.crypto.PKey` """ if key is None: key = crypto.PKey() key.generate_key(crypto.TYPE_RSA, bits) der_value = b"DER:" + codecs.encode(self.h, 'hex') acme_extension = crypto.X509Extension(self.ID_PE_ACME_IDENTIFIER_V1, critical=True, value=der_value) return crypto_util.gen_ss_cert(key, [domain], force_san=True, extensions=[acme_extension]), key def probe_cert(self, domain, host=None, port=None): """Probe tls-alpn-01 challenge certificate. :param unicode domain: domain being validated, required. :param string host: IP address used to probe the certificate. :param int port: Port used to probe the certificate. """ if host is None: host = socket.gethostbyname(domain) logger.debug('%s resolved to %s', domain, host) if port is None: port = self.PORT return crypto_util.probe_sni(host=host, port=port, name=domain, alpn_protocols=[self.ACME_TLS_1_PROTOCOL]) def verify_cert(self, domain, cert): """Verify tls-alpn-01 challenge certificate. :param unicode domain: Domain name being validated. :param OpensSSL.crypto.X509 cert: Challenge certificate. :returns: Whether the certificate was successfully verified. :rtype: bool """ # pylint: disable=protected-access names = crypto_util._pyopenssl_cert_or_req_all_names(cert) logger.debug('Certificate %s. SANs: %s', cert.digest('sha256'), names) if len(names) != 1 or names[0].lower() != domain.lower(): return False for i in range(cert.get_extension_count()): ext = cert.get_extension(i) # FIXME: assume this is the ACME extension. Currently there is no # way to get full OID of an unknown extension from pyopenssl. if ext.get_short_name() == b'UNDEF': data = ext.get_data() return data == self.h return False # pylint: disable=too-many-arguments def simple_verify(self, chall, domain, account_public_key, cert=None, host=None, port=None): """Simple verify. Verify ``validation`` using ``account_public_key``, optionally probe tls-alpn-01 certificate and check using `verify_cert`. :param .challenges.TLSALPN01 chall: Corresponding challenge. :param str domain: Domain name being validated. :param JWK account_public_key: :param OpenSSL.crypto.X509 cert: Optional certificate. If not provided (``None``) certificate will be retrieved using `probe_cert`. :param string host: IP address used to probe the certificate. :param int port: Port used to probe the certificate. :returns: ``True`` if and only if client's control of the domain has been verified. :rtype: bool """ if not self.verify(chall, account_public_key): logger.debug("Verification of key authorization in response failed") return False if cert is None: try: cert = self.probe_cert(domain=domain, host=host, port=port) except errors.Error as error: logger.debug(str(error), exc_info=True) return False return self.verify_cert(domain, cert) @Challenge.register # pylint: disable=too-many-ancestors class TLSALPN01(KeyAuthorizationChallenge): """ACME tls-alpn-01 challenge.""" response_cls = TLSALPN01Response typ = response_cls.typ def validation(self, account_key, **kwargs): """Generate validation. :param JWK account_key: :param unicode domain: Domain verified by the challenge. :param OpenSSL.crypto.PKey cert_key: Optional private key used in certificate generation. If not provided (``None``), then fresh key will be generated. :rtype: `tuple` of `OpenSSL.crypto.X509` and `OpenSSL.crypto.PKey` """ return self.response(account_key).gen_cert( key=kwargs.get('cert_key'), domain=kwargs.get('domain')) @staticmethod def is_supported(): """ Check if TLS-ALPN-01 challenge is supported on this machine. This implies that a recent version of OpenSSL is installed (>= 1.0.2), or a recent cryptography version shipped with the OpenSSL library is installed. :returns: ``True`` if TLS-ALPN-01 is supported on this machine, ``False`` otherwise. :rtype: bool """ return (hasattr(SSL.Connection, "set_alpn_protos") and hasattr(SSL.Context, "set_alpn_select_callback")) @Challenge.register class DNS(_TokenChallenge): """ACME "dns" challenge.""" typ = "dns" LABEL = "_acme-challenge" """Label clients prepend to the domain name being validated.""" def gen_validation(self, account_key, alg=jose.RS256, **kwargs): """Generate validation. :param .JWK account_key: Private account key. :param .JWA alg: :returns: This challenge wrapped in `.JWS` :rtype: .JWS """ return jose.JWS.sign( payload=self.json_dumps(sort_keys=True).encode('utf-8'), key=account_key, alg=alg, **kwargs) def check_validation(self, validation, account_public_key): """Check validation. :param JWS validation: :param JWK account_public_key: :rtype: bool """ if not validation.verify(key=account_public_key): return False try: return self == self.json_loads( validation.payload.decode('utf-8')) except jose.DeserializationError as error: logger.debug("Checking validation for DNS failed: %s", error) return False def gen_response(self, account_key, **kwargs): """Generate response. :param .JWK account_key: Private account key. :param .JWA alg: :rtype: DNSResponse """ return DNSResponse(validation=self.gen_validation( account_key, **kwargs)) def validation_domain_name(self, name): """Domain name for TXT validation record. :param unicode name: Domain name being validated. """ return "{0}.{1}".format(self.LABEL, name) @ChallengeResponse.register class DNSResponse(ChallengeResponse): """ACME "dns" challenge response. :param JWS validation: """ typ = "dns" validation = jose.Field("validation", decoder=jose.JWS.from_json) def check_validation(self, chall, account_public_key): """Check validation. :param challenges.DNS chall: :param JWK account_public_key: :rtype: bool """ return chall.check_validation(self.validation, account_public_key)
{ "content_hash": "f45a30e87f216377c6f96fa5f2440c9c", "timestamp": "", "source": "github", "line_count": 620, "max_line_length": 98, "avg_line_length": 32.475806451612904, "alnum_prop": 0.6307424882046189, "repo_name": "stweil/letsencrypt", "id": "2737f9f22fcac2feeebd7f1fa9138bee008bf0fa", "size": "20135", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "acme/acme/challenges.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "ApacheConf", "bytes": "14147" }, { "name": "Augeas", "bytes": "4997" }, { "name": "Batchfile", "bytes": "35037" }, { "name": "DIGITAL Command Language", "bytes": "133" }, { "name": "Groff", "bytes": "222" }, { "name": "Makefile", "bytes": "37309" }, { "name": "Nginx", "bytes": "4270" }, { "name": "Python", "bytes": "1355274" }, { "name": "Shell", "bytes": "120566" }, { "name": "Standard ML", "bytes": "256" } ], "symlink_target": "" }
import unittest from programy.config.client.webchat import WebChatConfiguration from programy.config.file.yaml_file import YamlConfigurationFile from programy.config.client.client import ClientConfiguration from programy.config.client.webchat import WebChatClientConfiguration class WebChatConfigurationTests(unittest.TestCase): def test_init(self): client_config = ClientConfiguration() yaml = YamlConfigurationFile(client_config) self.assertIsNotNone(yaml) yaml.load_from_text(""" webchat: host: 127.0.0.1 port: 5000 debug: false """, ".") webchat_config = WebChatConfiguration() webchat_config.load_config_section(yaml, ".") self.assertEqual("127.0.0.1", webchat_config.host) self.assertEqual(5000, webchat_config.port) self.assertEqual(False, webchat_config.debug) class WebChatClientConfigurationTests(unittest.TestCase): def test_init(self): client_config = WebChatClientConfiguration() yaml = YamlConfigurationFile(client_config) self.assertIsNotNone(yaml) yaml.load_from_text(""" brain: supress_warnings: true allow_system_aiml: true allow_learn_aiml: true allow_learnf_aiml: true files: aiml: files: /aiml extension: .aiml directories: true sets: files: /sets extension: .txt directories: false maps: files: /maps extension: .txt directories: true denormal: denormal.txt normal: normal.txt gender: gender.txt person: person.txt person2: person2.txt predicates: predicates.txt pronouns: pronouns.txt properties: properties.txt triples: triples.txt preprocessors: preprocessors.txt postprocessors: postprocessors.txt services: REST: path: programy.utils.services.webchat.GenericRESTService Pannous: path: programy.utils.services.pannous.PannousService Pandora: path: programy.utils.services.pandora.PandoraService Wikipedia: path: programy.utils.services.wikipedia.WikipediaService bot: prompt: ">>>" default_response: Sorry, I don't have an answer for that! exit_response: So long, and thanks for the fish! initial_question: Hi, how can I help you? webchat: host: 127.0.0.1 port: 5000 debug: false """, ".") self.assertIsNotNone(client_config.bot_configuration) self.assertIsNotNone(client_config.brain_configuration) self.assertIsNotNone(client_config.webchat_configuration)
{ "content_hash": "d43eabb40413e5dce5c092bfcf5852e3", "timestamp": "", "source": "github", "line_count": 93, "max_line_length": 74, "avg_line_length": 32.59139784946237, "alnum_prop": 0.5836357637743319, "repo_name": "dkamotsky/program-y", "id": "d0db380437f116fb45a4d59aa99e513b69dddfe1", "size": "3031", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/test/config/client/test_webchat.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "937" }, { "name": "HTML", "bytes": "1583" }, { "name": "Python", "bytes": "1131157" }, { "name": "Shell", "bytes": "3481" } ], "symlink_target": "" }
"""ansible module for ec2 ami copy to all regions""" # -*- coding: utf-8 -*- # vim: expandtab:tabstop=4:shiftwidth=4 DOCUMENTATION = ''' --- module: aos_ami_copy_to_regions short_description: this module copies an ami out to all regions description: - this module accepts an ami id and copies it to all regions options: ami_name: description: - name of the ami required: false default: none aliases: [] ami_id: description: - id of the ami required: false default: none aliases: [] region: description: - the region where the ami exists required: false default: us-east-1 aliases: [] ''' EXAMPLES = ''' # perform a list on the enabled repos - aos_ami_copy_to_regions: ami_id: ami-xxxxxx region: us-east-1 register: repos ''' import boto.ec2 class AMICopy(object): """simple wrapper class for rhsm repos""" regions_to_copy = ['ap-southeast-1', 'ap-southeast-2', 'ca-central-1', 'eu-central-1', 'eu-west-1', 'eu-west-2', 'sa-east-1', 'us-east-1', 'us-east-2', 'us-west-1', 'us-west-2', ] def __init__(self, aid=None, name=None, region='us-east-1'): '''constructor for amicopy class''' self._ami = None self.ami_id = aid self.ami_name = name self.region = region self.conn = boto.ec2.connect_to_region(region) @property def ami(self): '''property for ami''' if self._ami == None: images = self.get_images() self._ami = images[0] return self._ami @ami.setter def ami(self, inc): '''setter for ami''' self._ami = inc def get_images(self, filters=None): '''Return images based on a filter''' filt = {} if filters: filt = filters elif self.ami_id: filt['image_id'] = self.ami_id else: filt['name'] = self.ami_name return self.conn.get_all_images(filters=filt) def copy_to_region(self): """verify that the enabled repos are enabled""" ami_dict = {} for region in AMICopy.regions_to_copy: conn = boto.ec2.connect_to_region(region) ami = conn.get_all_images(filters={'name': self.ami.name}) if not ami: ami = conn.copy_image(self.region, self.ami.id) ami_dict[region] = ami.image_id else: ami_dict[region] = ami[0].id return ami_dict @staticmethod def run_ansible(module): """run the ansible code""" amicopy = AMICopy(module.params.get('ami_id', None), module.params.get('ami_name', None), module.params['region'], ) # Step 1: Get the current ami name images = amicopy.get_images() if len(images) == 0: return {'msg': 'Unable to find ami with id or name.', 'rc': 0} amicopy.ami = images[0] # Step 2: if we are state=list, return the ami if module.params['state'] == 'list': module.exit_json(changed=False, ami=amicopy.ami, rc=0) # Step 2: if we are state=present, copy out the ami # Since ami doesn't have a sha or identifier other than name, we check name elif module.params['state'] == 'present': # Step 3: we need to set our repositories results = amicopy.copy_to_region() # Everything went ok, no changes were made if not results: module.exit_json(changed=False, results=results, rc=0) module.exit_json(changed=True, results=results, rc=0) module.fail_json(msg="unsupported state.", rc=1) def main(): """Create the ansible module and run the ansible code""" module = AnsibleModule( argument_spec=dict( state=dict(default='present', choices=['list', 'present'], type='str'), ami_id=dict(default=None, type='str'), ami_name=dict(default=None, type='str'), region=dict(default='us-east-1', choices=AMICopy.regions_to_copy, type='str'), query=dict(default='all', choices=['all', 'enabled', 'disabled']), ), supports_check_mode=False, ) # call the ansible function AMICopy.run_ansible(module) if __name__ == '__main__': # pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import # import module snippets from ansible.module_utils.basic import * main()
{ "content_hash": "6f7ca0d1e46735a8ebe6a67addd2e85e", "timestamp": "", "source": "github", "line_count": 159, "max_line_length": 90, "avg_line_length": 29.962264150943398, "alnum_prop": 0.5447103274559194, "repo_name": "appuio/ansible-role-openshift-zabbix-monitoring", "id": "86f8754821b3b2435d946350ebd13b0f20f0e309", "size": "4782", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "vendor/openshift-tools/ansible/roles/lib_utils/library/oo_ami_copy_to_regions.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "3095" } ], "symlink_target": "" }
# coding=utf-8 __source__ = 'https://leetcode.com/problems/delete-node-in-a-bst/#/description' # Time: O() # Space: O() # # Description: 450. Delete Node in a BST # # Given a root node reference of a BST and a key, # delete the node with the given key in the BST. # Return the root node reference (possibly updated) of the BST. # # Basically, the deletion can be divided into two stages: # # Search for a node to remove. # If the node is found, delete the node. # Note: Time complexity should be O(height of tree). # # Example: # # root = [5,3,6,2,4,null,7] # key = 3 # # 5 # / \ # 3 6 # / \ \ # 2 4 7 # # Given key to delete is 3. So we find the node with value 3 and delete it. # # One valid answer is [5,4,6,2,null,null,7], shown in the following BST. # # 5 # / \ # 4 6 # / \ # 2 7 # # Another valid answer is [5,2,6,null,4,null,7]. # # 5 # / \ # 2 6 # \ \ # 4 7 # Hide Company Tags Uber # Hide Tags Tree import unittest # Definition for a binary tree node. # class TreeNode(object): # def __init__(self, x): # self.val = x # self.left = None # self.right = None class Solution(object): def deleteNode(self, root, key): """ :type root: TreeNode :type key: int :rtype: TreeNode """ pass class TestMethods(unittest.TestCase): def test_Local(self): self.assertEqual(1, 1) if __name__ == '__main__': unittest.main() Java = ''' # Thought: Steps: Recursively find the node that has the same value as the key, while setting the left/right nodes equal to the returned subtree Once the node is found, have to handle the below 4 cases node doesn't have left or right - return null node only has left subtree- return the left subtree node only has right subtree- return the right subtree node has both left and right - find the minimum value in the right subtree, set that value to the currently found node, then recursively delete the minimum value in the right subtree /** * Definition for a binary tree node. * public class TreeNode { * int val; * TreeNode left; * TreeNode right; * TreeNode(int x) { val = x; } * } */ # 3ms 100% class Solution { public TreeNode deleteNode(TreeNode root, int key) { TreeNode target = root, parent = null; while (target != null && target.val != key) { parent = target; if (key > target.val) target = target.right; else target = target.left; } if (target == null) return root; // not found if (target.right == null) { // no right subtree if (parent == null) return target.left; if (target == parent.left) parent.left = target.left; else parent.right = target.left; return root; } // with right subtree TreeNode prev = target, p = target.right; while (p.left != null) { prev = p; p = p.left; } target.val = p.val; if (p == prev.left) prev.left = p.right; else prev.right = p.right; return root; } } # 4ms 66.98% class Solution { public TreeNode deleteNode(TreeNode root, int key) { if (root == null) return null; if (key < root.val) { root.left = deleteNode(root.left, key); }else if ( key > root.val) { root.right = deleteNode(root.right, key); } else { if (root.left == null) { return root.right; } else if (root.right == null) { return root.left; } TreeNode minNode = findMin(root.right); root.val = minNode.val; root.right = deleteNode(root.right, root.val); } return root; } private TreeNode findMin(TreeNode node) { while( node.left != null) { node = node.left; } return node; } } # 5ms 26.58% class Solution { public TreeNode deleteNode(TreeNode root, int key) { if(root==null) return null; if(root.val > key){ root.left = deleteNode(root.left, key); }else if(root.val < key){ root.right = deleteNode(root.right, key); }else{ if(root.left==null) return root.right; if(root.right==null) return root.left; //如果左右都不是null,要从右边的subtree里面找最小的 //要不就从左边的subtree里面找最大的 /* int min = findMin(root.right); root.val = min; root.right = deleteNode(root.right, min);*/ int max = findMax(root.left); root.val = max; root.left = deleteNode(root.left, max); } return root; } public int findMax(TreeNode root){ TreeNode temp = root; while(temp.right!=null){ temp = temp.right; } return temp.val; } public int findMin(TreeNode root){ TreeNode temp = root; while(temp.left!=null){ temp = temp.left; } return temp.val; } } '''
{ "content_hash": "e648ca922b1881862137eef3a5c4b07a", "timestamp": "", "source": "github", "line_count": 202, "max_line_length": 79, "avg_line_length": 25.257425742574256, "alnum_prop": 0.5582124656997256, "repo_name": "JulyKikuAkita/PythonPrac", "id": "abced0cf8ca4979ca172e121a27820ea62c02c93", "size": "5164", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "cs15211/DeleteNodeInABST.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "DIGITAL Command Language", "bytes": "191608" }, { "name": "HTML", "bytes": "647778" }, { "name": "Python", "bytes": "5429558" } ], "symlink_target": "" }
from builtins import str import json import logging import pytz from dateutil.parser import parse from django.contrib.auth.decorators import permission_required from django.core.exceptions import MultipleObjectsReturned, ValidationError from django.core.urlresolvers import reverse, reverse_lazy from django.db.utils import IntegrityError from django.http import HttpResponseRedirect, Http404, HttpResponse from django.shortcuts import render_to_response, redirect from django.template import RequestContext from rest_framework import mixins, status from rest_framework.filters import ( DjangoFilterBackend, SearchFilter, OrderingFilter) from rest_framework.generics import GenericAPIView from rest_framework.parsers import JSONParser, FormParser, MultiPartParser from rest_framework.permissions import DjangoModelPermissionsOrAnonReadOnly from rest_framework.response import Response from realtime.app_settings import SLUG_ASH_LANDING_PAGE, \ LANDING_PAGE_SYSTEM_CATEGORY from realtime.forms.ash import AshUploadForm from realtime.models.ash import Ash, AshReport from realtime.models.coreflatpage import CoreFlatPage from realtime.models.volcano import Volcano from realtime.serializers.ash_serializer import ( AshSerializer, AshReportSerializer, AshGeoJsonSerializer) __author__ = 'lucernae' __project_name__ = 'inasafe-django' __filename__ = 'ash' __date__ = '7/15/16' __copyright__ = 'lana.pcfre@gmail.com' LOGGER = logging.getLogger(__name__) def index(request): if request.method == 'POST': pass landing_page = CoreFlatPage.objects.filter( slug_id=SLUG_ASH_LANDING_PAGE, system_category=LANDING_PAGE_SYSTEM_CATEGORY, language=request.LANGUAGE_CODE).first() context = RequestContext(request) return render_to_response( 'realtime/ash/index.html', { 'landing_page': landing_page, 'LANDING_PAGE_SLUG_ID': SLUG_ASH_LANDING_PAGE, 'LANDING_PAGE_SYSTEM_CATEGORY': LANDING_PAGE_SYSTEM_CATEGORY }, context_instance=context) @permission_required( perm=['realtime.add_ash'], login_url=reverse_lazy('realtime_admin:login')) def upload_form(request): """Upload ash event.""" context = RequestContext(request) if request.method == 'POST': form = AshUploadForm(request.POST, request.FILES) if form.is_valid(): instance = form.instance # convert timezone from browser and add it to time information tz_name = instance.event_time_zone_string try: tz = pytz.timezone(tz_name) except BaseException: tz = pytz.utc instance.event_time = tz.localize(instance.event_time.replace( tzinfo=None)) form.save() # Redirect to the document list after POST return HttpResponseRedirect(reverse('realtime:ash_index')) else: form = AshUploadForm() # A empty, unbound form volcano_list = [] for volcano in Volcano.objects.all(): v = { 'id': volcano.id, 'name': str(volcano), 'timezone': volcano.timezone } volcano_list.append(v) volcano_list_string = json.dumps(volcano_list) # Render the form return render_to_response( 'realtime/ash/upload_modal.html', { 'form': form, 'volcano_list': volcano_list_string }, context_instance=context) class AshList(mixins.ListModelMixin, mixins.CreateModelMixin, GenericAPIView): """Views for Ash models.""" queryset = Ash.objects.all() serializer_class = AshSerializer parser_classes = [JSONParser, FormParser] filter_backends = (DjangoFilterBackend, SearchFilter, OrderingFilter) # filter_fields = ('volcano_name', 'region', 'subregion', 'morphology') search_fields = ('volcano__volcano_name', 'region', 'subregion', 'morphology') ordering = ('volcano__volcano_name', ) permission_classes = (DjangoModelPermissionsOrAnonReadOnly, ) def get(self, request, *args, **kwargs): return self.list(request, *args, **kwargs) def post(self, request, *args, **kwargs): retval = self.create(request, *args, **kwargs) return retval class AshDetail(mixins.RetrieveModelMixin, mixins.UpdateModelMixin, mixins.DestroyModelMixin, GenericAPIView): queryset = Ash.objects.all() serializer_class = AshSerializer lookup_field = 'id' parser_classes = [JSONParser, FormParser, MultiPartParser] permission_classes = (DjangoModelPermissionsOrAnonReadOnly, ) def get(self, request, volcano_name=None, event_time=None): try: if volcano_name and event_time: instance = Ash.objects.get( volcano__volcano_name__iexact=volcano_name, event_time=parse(event_time)) serializer = self.get_serializer(instance) return Response(serializer.data) else: return redirect( 'realtime:ash_list') except AshReport.DoesNotExist: return Response(status=status.HTTP_404_NOT_FOUND) except MultipleObjectsReturned as e: # this should not happen. # But in case it is happening, returned the last object, but still # log the error to sentry LOGGER.warning(e.message) instance = AshReport.objects.filter( ash__volcano__volcano_name__iexact=volcano_name, ash__event_time=parse(event_time)).last() serializer = self.get_serializer(instance) return Response(serializer.data) def put(self, request, volcano_name=None, event_time=None, *args, **kwargs): try: if volcano_name and event_time: event_time = parse(event_time) instance = Ash.objects.get( volcano__volcano_name__iexact=volcano_name, event_time=event_time) self.kwargs.update(id=instance.id) if 'hazard_file' in request.FILES and instance.hazard_file: instance.hazard_file.delete() if 'impact_files' in request.FILES and instance.impact_files: instance.impact_files.delete() retval = self.update(request, partial=True, *args, **kwargs) return retval else: return Response(status=status.HTTP_400_BAD_REQUEST) except Exception as e: LOGGER.warning(e) return Response(status=status.HTTP_500_INTERNAL_SERVER_ERROR) def delete(self, request, *args, **kwargs): return self.destroy(request, *args, **kwargs) class AshReportList(mixins.ListModelMixin, mixins.RetrieveModelMixin, mixins.CreateModelMixin, GenericAPIView): queryset = AshReport.objects.all() serializer_class = AshReportSerializer filter_backends = (DjangoFilterBackend, SearchFilter, OrderingFilter) filter_fields = ('ash__id', 'language') search_fields = ('ash__id', 'language') ordering_fields = ('ash__id', 'language') ordering = ('ash__id',) permission_classes = (DjangoModelPermissionsOrAnonReadOnly,) def get(self, request, volcano_name=None, event_time=None, *args, **kwargs): try: if volcano_name: ash = Ash.objects.filter( volcano__volcano_name__iexact=volcano_name) if event_time: ash = ash.filter( event_time=parse(event_time)) instances = [] for a in ash: for r in a.reports.all(): instances.append(r) page = self.paginate_queryset(instances) if page is not None: serializer = self.get_serializer(page, many=True) return self.get_paginated_response(serializer.data) serializer = self.get_serializer(instances, many=True) return Response(serializer.data) else: return self.list(request, *args, **kwargs) except (AshReport.DoesNotExist, Ash.DoesNotExist): return Response(status=status.HTTP_404_NOT_FOUND) def post(self, request, volcano_name=None, event_time=None, language=None): data = request.data try: volcano_name = volcano_name or data.get('volcano_name') event_time = event_time or data.get('event_time') language = language or data.get('language') ash = Ash.objects.get( volcano__volcano_name__iexact=volcano_name, event_time=parse(event_time)) data['ash'] = ash.id report = ash.reports.filter(language=language) except Ash.DoesNotExist: return Response(status=status.HTTP_404_NOT_FOUND) if report: # cannot post report if it is already exists serializer = AshReportSerializer(report[0]) return Response( serializer.data, status=status.HTTP_400_BAD_REQUEST) serializer = AshReportSerializer(data=data, partial=True) if serializer.is_valid(): try: serializer.save() return Response( serializer.data, status=status.HTTP_201_CREATED) except (ValidationError, IntegrityError) as e: # This happens when simultaneuously two conn trying to save # the same unique_together fields (earthquake, language) # Should warn this to sentry LOGGER.warning(e.message) return Response( serializer.errors, status=status.HTTP_500_INTERNAL_SERVER_ERROR) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) class AshReportDetail(mixins.ListModelMixin, mixins.RetrieveModelMixin, mixins.UpdateModelMixin, mixins.DestroyModelMixin, GenericAPIView): queryset = AshReport.objects.all() serializer_class = AshReportSerializer parser_classes = (JSONParser, FormParser, MultiPartParser) permission_classes = (DjangoModelPermissionsOrAnonReadOnly,) def get(self, request, volcano_name=None, event_time=None, language=None, *args, **kwargs): try: if language: instance = AshReport.objects.get( ash__volcano__volcano_name__iexact=volcano_name, ash__event_time=parse(event_time), language=language) serializer = self.get_serializer(instance) return Response(serializer.data) else: return redirect( 'realtime:ash_report_list', volcano_name=volcano_name, event_time=event_time) except AshReport.DoesNotExist: return Response(status=status.HTTP_404_NOT_FOUND) except MultipleObjectsReturned as e: # this should not happen. # But in case it is happening, returned the last object, but still # log the error to sentry LOGGER.warning(e.message) instance = AshReport.objects.filter( ash__volcano__volcano_name__iexact=volcano_name, ash__event_time=parse(event_time), language=language).last() serializer = self.get_serializer(instance) return Response(serializer.data) def put(self, request, volcano_name=None, event_time=None, language=None): data = request.data try: if volcano_name and event_time and language: report = AshReport.objects.get( ash__volcano__volcano_name__iexact=volcano_name, ash__event_time=parse(event_time), language=language) else: return Response(status=status.HTTP_400_BAD_REQUEST) except AshReport.DoesNotExist: return Response(status=status.HTTP_404_NOT_FOUND) # delete previous files report.report_map.delete() serializer = AshReportSerializer(report, data=data, partial=True) if serializer.is_valid(): serializer.save() return Response(serializer.data) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) def delete(self, request, volcano_name, event_time, language): try: report = AshReport.objects.get( ash__volcano__volcano_name__iexact=volcano_name, ash__event_time=parse(event_time), language=language) except AshReport.DoesNotExist: return Response(status=status.HTTP_404_NOT_FOUND) report.delete() return Response(status=status.HTTP_204_NO_CONTENT) def ash_report_map(request, volcano_name, event_time, language='en'): """View to serve pdf report.""" try: instance = AshReport.objects.get( ash__volcano__volcano_name__iexact=volcano_name, ash__event_time=parse(event_time), language=language) response = HttpResponse( instance.report_map.read(), content_type='application/pdf') response['Content-Disposition'] = 'inline; filename="{0}";'.format( instance.report_map_filename) return response except AshReport.DoesNotExist: raise Http404() class AshFeatureList(AshList): serializer_class = AshGeoJsonSerializer pagination_class = None
{ "content_hash": "26fadedcfe10b0c49625560cbc0e2e43", "timestamp": "", "source": "github", "line_count": 362, "max_line_length": 79, "avg_line_length": 38.50828729281768, "alnum_prop": 0.6128407460545193, "repo_name": "AIFDR/inasafe-django", "id": "f6b55f18ba59353c245d816733c401adb2678e53", "size": "13955", "binary": false, "copies": "2", "ref": "refs/heads/develop", "path": "django_project/realtime/views/ash.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "CSS", "bytes": "196369" }, { "name": "HTML", "bytes": "93481" }, { "name": "JavaScript", "bytes": "346781" }, { "name": "Makefile", "bytes": "9201" }, { "name": "Python", "bytes": "285851" }, { "name": "Shell", "bytes": "2169" } ], "symlink_target": "" }
from django.db import models from django_autoslug.fields import AutoSlugField class Page(models.Model): title = models.CharField(max_length=255) slug = AutoSlugField(populate_from=('title',), recursive='parent') parent = models.ForeignKey('Page', blank=True, null=True) def __unicode__(self): return self.title
{ "content_hash": "efed6ce6e4410a39621b464b9b16a422", "timestamp": "", "source": "github", "line_count": 10, "max_line_length": 70, "avg_line_length": 33.7, "alnum_prop": 0.712166172106825, "repo_name": "aljosa/django-autoslug-field", "id": "b19dc00c865c2d3d0b187b70097fa99e05158f1c", "size": "337", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "testautoslug/testapp/models.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "12179" } ], "symlink_target": "" }
from test_framework.mininode import * from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * import time from test_framework.blocktools import create_block, create_coinbase ''' AcceptBlockTest -- test processing of unrequested blocks. Since behavior differs when receiving unrequested blocks from whitelisted peers versus non-whitelisted peers, this tests the behavior of both (effectively two separate tests running in parallel). Setup: two nodes, node0 and node1, not connected to each other. Node0 does not whitelist localhost, but node1 does. They will each be on their own chain for this test. We have one NodeConn connection to each, test_node and white_node respectively. The test: 1. Generate one block on each node, to leave IBD. 2. Mine a new block on each tip, and deliver to each node from node's peer. The tip should advance. 3. Mine a block that forks the previous block, and deliver to each node from corresponding peer. Node0 should not process this block (just accept the header), because it is unrequested and doesn't have more work than the tip. Node1 should process because this is coming from a whitelisted peer. 4. Send another block that builds on the forking block. Node0 should process this block but be stuck on the shorter chain, because it's missing an intermediate block. Node1 should reorg to this longer chain. 4b.Send 288 more blocks on the longer chain. Node0 should process all but the last block (too far ahead in height). Send all headers to Node1, and then send the last block in that chain. Node1 should accept the block because it's coming from a whitelisted peer. 5. Send a duplicate of the block in #3 to Node0. Node0 should not process the block because it is unrequested, and stay on the shorter chain. 6. Send Node0 an inv for the height 3 block produced in #4 above. Node0 should figure out that Node0 has the missing height 2 block and send a getdata. 7. Send Node0 the missing block again. Node0 should process and the tip should advance. ''' # TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending # p2p messages to a node, generating the messages in the main testing logic. class TestNode(NodeConnCB): def __init__(self): NodeConnCB.__init__(self) self.create_callback_map() self.connection = None self.ping_counter = 1 self.last_pong = msg_pong() def add_connection(self, conn): self.connection = conn # Track the last getdata message we receive (used in the test) def on_getdata(self, conn, message): self.last_getdata = message # Spin until verack message is received from the node. # We use this to signal that our test can begin. This # is called from the testing thread, so it needs to acquire # the global lock. def wait_for_verack(self): while True: with mininode_lock: if self.verack_received: return time.sleep(0.05) # Wrapper for the NodeConn's send_message function def send_message(self, message): self.connection.send_message(message) def on_pong(self, conn, message): self.last_pong = message # Sync up with the node after delivery of a block def sync_with_ping(self, timeout=30): self.connection.send_message(msg_ping(nonce=self.ping_counter)) received_pong = False sleep_time = 0.05 while not received_pong and timeout > 0: time.sleep(sleep_time) timeout -= sleep_time with mininode_lock: if self.last_pong.nonce == self.ping_counter: received_pong = True self.ping_counter += 1 return received_pong class AcceptBlockTest(BitcoinTestFramework): def add_options(self, parser): parser.add_option("--testbinary", dest="testbinary", default=os.getenv("polcoind", "polcoind"), help="polcoind binary to test") def setup_chain(self): initialize_chain_clean(self.options.tmpdir, 2) def setup_network(self): # Node0 will be used to test behavior of processing unrequested blocks # from peers which are not whitelisted, while Node1 will be used for # the whitelisted case. self.nodes = [] self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"], binary=self.options.testbinary)) self.nodes.append(start_node(1, self.options.tmpdir, ["-debug", "-whitelist=127.0.0.1"], binary=self.options.testbinary)) def run_test(self): # Setup the p2p connections and start up the network thread. test_node = TestNode() # connects to node0 (not whitelisted) white_node = TestNode() # connects to node1 (whitelisted) connections = [] connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node)) connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], white_node)) test_node.add_connection(connections[0]) white_node.add_connection(connections[1]) NetworkThread().start() # Start up network handling in another thread # Test logic begins here test_node.wait_for_verack() white_node.wait_for_verack() # 1. Have both nodes mine a block (leave IBD) [ n.generate(1) for n in self.nodes ] tips = [ int ("0x" + n.getbestblockhash() + "L", 0) for n in self.nodes ] # 2. Send one block that builds on each tip. # This should be accepted. blocks_h2 = [] # the height 2 blocks on each node's chain block_time = time.time() + 1 for i in xrange(2): blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time)) blocks_h2[i].solve() block_time += 1 test_node.send_message(msg_block(blocks_h2[0])) white_node.send_message(msg_block(blocks_h2[1])) [ x.sync_with_ping() for x in [test_node, white_node] ] assert_equal(self.nodes[0].getblockcount(), 2) assert_equal(self.nodes[1].getblockcount(), 2) print "First height 2 block accepted by both nodes" # 3. Send another block that builds on the original tip. blocks_h2f = [] # Blocks at height 2 that fork off the main chain for i in xrange(2): blocks_h2f.append(create_block(tips[i], create_coinbase(2), blocks_h2[i].nTime+1)) blocks_h2f[i].solve() test_node.send_message(msg_block(blocks_h2f[0])) white_node.send_message(msg_block(blocks_h2f[1])) [ x.sync_with_ping() for x in [test_node, white_node] ] for x in self.nodes[0].getchaintips(): if x['hash'] == blocks_h2f[0].hash: assert_equal(x['status'], "headers-only") for x in self.nodes[1].getchaintips(): if x['hash'] == blocks_h2f[1].hash: assert_equal(x['status'], "valid-headers") print "Second height 2 block accepted only from whitelisted peer" # 4. Now send another block that builds on the forking chain. blocks_h3 = [] for i in xrange(2): blocks_h3.append(create_block(blocks_h2f[i].sha256, create_coinbase(3), blocks_h2f[i].nTime+1)) blocks_h3[i].solve() test_node.send_message(msg_block(blocks_h3[0])) white_node.send_message(msg_block(blocks_h3[1])) [ x.sync_with_ping() for x in [test_node, white_node] ] # Since the earlier block was not processed by node0, the new block # can't be fully validated. for x in self.nodes[0].getchaintips(): if x['hash'] == blocks_h3[0].hash: assert_equal(x['status'], "headers-only") # But this block should be accepted by node0 since it has more work. try: self.nodes[0].getblock(blocks_h3[0].hash) print "Unrequested more-work block accepted from non-whitelisted peer" except: raise AssertionError("Unrequested more work block was not processed") # Node1 should have accepted and reorged. assert_equal(self.nodes[1].getblockcount(), 3) print "Successfully reorged to length 3 chain from whitelisted peer" # 4b. Now mine 288 more blocks and deliver; all should be processed but # the last (height-too-high) on node0. Node1 should process the tip if # we give it the headers chain leading to the tip. tips = blocks_h3 headers_message = msg_headers() all_blocks = [] # node0's blocks for j in xrange(2): for i in xrange(288): next_block = create_block(tips[j].sha256, create_coinbase(i + 4), tips[j].nTime+1) next_block.solve() if j==0: test_node.send_message(msg_block(next_block)) all_blocks.append(next_block) else: headers_message.headers.append(CBlockHeader(next_block)) tips[j] = next_block time.sleep(2) for x in all_blocks: try: self.nodes[0].getblock(x.hash) if x == all_blocks[287]: raise AssertionError("Unrequested block too far-ahead should have been ignored") except: if x == all_blocks[287]: print "Unrequested block too far-ahead not processed" else: raise AssertionError("Unrequested block with more work should have been accepted") headers_message.headers.pop() # Ensure the last block is unrequested white_node.send_message(headers_message) # Send headers leading to tip white_node.send_message(msg_block(tips[1])) # Now deliver the tip try: white_node.sync_with_ping() self.nodes[1].getblock(tips[1].hash) print "Unrequested block far ahead of tip accepted from whitelisted peer" except: raise AssertionError("Unrequested block from whitelisted peer not accepted") # 5. Test handling of unrequested block on the node that didn't process # Should still not be processed (even though it has a child that has more # work). test_node.send_message(msg_block(blocks_h2f[0])) # Here, if the sleep is too short, the test could falsely succeed (if the # node hasn't processed the block by the time the sleep returns, and then # the node processes it and incorrectly advances the tip). # But this would be caught later on, when we verify that an inv triggers # a getdata request for this block. test_node.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 2) print "Unrequested block that would complete more-work chain was ignored" # 6. Try to get node to request the missing block. # Poke the node with an inv for block at height 3 and see if that # triggers a getdata on block 2 (it should if block 2 is missing). with mininode_lock: # Clear state so we can check the getdata request test_node.last_getdata = None test_node.send_message(msg_inv([CInv(2, blocks_h3[0].sha256)])) test_node.sync_with_ping() with mininode_lock: getdata = test_node.last_getdata # Check that the getdata includes the right block assert_equal(getdata.inv[0].hash, blocks_h2f[0].sha256) print "Inv at tip triggered getdata for unprocessed block" # 7. Send the missing block for the third time (now it is requested) test_node.send_message(msg_block(blocks_h2f[0])) test_node.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 290) print "Successfully reorged to longer chain from non-whitelisted peer" [ c.disconnect_node() for c in connections ] if __name__ == '__main__': AcceptBlockTest().main()
{ "content_hash": "5b242eac4498136eba986d514a00af23", "timestamp": "", "source": "github", "line_count": 285, "max_line_length": 107, "avg_line_length": 42.78947368421053, "alnum_prop": 0.6326363263632636, "repo_name": "wargo32/Polcoin", "id": "e715195f754d7358cba0b31247f4484bf0895790", "size": "12364", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "qa/rpc-tests/p2p-acceptblock.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "456682" }, { "name": "C++", "bytes": "3876763" }, { "name": "CSS", "bytes": "1127" }, { "name": "Groff", "bytes": "18445" }, { "name": "HTML", "bytes": "50621" }, { "name": "Java", "bytes": "2100" }, { "name": "Makefile", "bytes": "63563" }, { "name": "Objective-C", "bytes": "2022" }, { "name": "Objective-C++", "bytes": "7240" }, { "name": "Protocol Buffer", "bytes": "2308" }, { "name": "Python", "bytes": "491253" }, { "name": "QMake", "bytes": "2020" }, { "name": "Shell", "bytes": "30599" } ], "symlink_target": "" }
from sqlalchemy import * from migrate import * from migrate.changeset import schema pre_meta = MetaData() post_meta = MetaData() tasks = Table('tasks', pre_meta, Column('id', INTEGER, primary_key=True, nullable=False), Column('priority', INTEGER, nullable=False), Column('user_id', INTEGER), Column('description', VARCHAR(length=140)), Column('Cr', FLOAT), Column('Cu', FLOAT), Column('Ec', FLOAT), Column('Ef', FLOAT), Column('Fr', FLOAT), Column('Fu', FLOAT), Column('L', FLOAT), Column('Lav', FLOAT), Column('Mass', FLOAT), Column('U', FLOAT), Column('Uav', FLOAT), Column('LFI', FLOAT), Column('MCI', FLOAT), Column('VirginFeed', FLOAT), Column('Wc', FLOAT), Column('Wf', FLOAT), Column('Wtot', FLOAT), Column('X', FLOAT), Column('industry', VARCHAR(length=140)), Column('product', VARCHAR(length=140)), ) def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; bind # migrate_engine to your metadata pre_meta.bind = migrate_engine post_meta.bind = migrate_engine pre_meta.tables['tasks'].columns['priority'].drop() def downgrade(migrate_engine): # Operations to reverse the above upgrade go here. pre_meta.bind = migrate_engine post_meta.bind = migrate_engine pre_meta.tables['tasks'].columns['priority'].create()
{ "content_hash": "16ee6c4da22c8f3461654e62c39b7701", "timestamp": "", "source": "github", "line_count": 48, "max_line_length": 68, "avg_line_length": 28.958333333333332, "alnum_prop": 0.6438848920863309, "repo_name": "jasonmaier/CircularEconomyBlog", "id": "b9d5db7a27e1040ac9436dd76e6cdeef09439a79", "size": "1390", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "db_repository/versions/023_migration.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "HTML", "bytes": "19905" }, { "name": "Python", "bytes": "90704" } ], "symlink_target": "" }
import os import sys import unittest from check_arg import valid_and_doc, default_doc_maker class TestCheck_Arg(unittest.TestCase): def setUp(self): def must_have_key(*needed_keys): def must_have_key(*a,**kw): if not kw: raise Exception("No keyword are provided") for key in needed_keys: if not key in kw: raise Exception("%s is mandatory in keywords arguments" % key ) return must_have_key def at_least_n_positional(how_many): def at_least_n_positional(*a, **kw): if len(a) < how_many: raise Exception("too few arguments") return at_least_n_positional at_least = valid_and_doc(at_least_n_positional) must_have = valid_and_doc(must_have_key) @at_least(2) @must_have("name") def does_nothing(a=1,*b, **kw): """useless fonction""" return True self.test_me = does_nothing def test_doc_output(self): doc = self.test_me.__doc__ self.assertEqual(doc, "useless fonction\n**must_have_key** :name\n\n\n**at_least_n_positional** :2\n\n") def test_valid(self): with self.assertRaises(Exception) as context: self.test_me(1,2,nime="") self.assertEqual(str(context.exception),"name is mandatory in keywords arguments") def test_valid2(self): with self.assertRaises(Exception) as context: self.test_me(1,) self.assertEqual(str(context.exception),"too few arguments") if __name__ == '__main__': unittest.main(verbosity=7)
{ "content_hash": "0736d427eb69aa72819382e8189cb1a2", "timestamp": "", "source": "github", "line_count": 50, "max_line_length": 112, "avg_line_length": 33.96, "alnum_prop": 0.5653710247349824, "repo_name": "jul/check_arg", "id": "fb86c7f5c1d0e0faca6c986da46a8001ebfd0cbd", "size": "1719", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "check_arg/test_valid.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "Python", "bytes": "4128" } ], "symlink_target": "" }
import renderdoc as rd import rdtest import struct class D3D12_AMD_Shader_Extensions(rdtest.TestCase): demos_test_name = 'D3D12_AMD_Shader_Extensions' def check_capture(self): for pass_type in ["SM51", "SM60"]: action = self.find_action(pass_type + " Draw") if action is not None: self.controller.SetFrameEvent(action.next.eventId, False) pipe = self.controller.GetPipelineState() tex = pipe.GetOutputTargets()[0].resourceId vp = pipe.GetViewport(0) # Should have barycentrics showing the closest vertex for each pixel in the triangle # Without relying on barycentric order, ensure that the three pixels are red, green, and blue pixels = [] x = int(vp.x + vp.width * 0.5) y = int(vp.y + vp.height * 0.5) picked: rd.PixelValue = self.controller.PickPixel(tex, x+ 0, y+ 0, rd.Subresource(), rd.CompType.UNorm) pixels.append(picked.floatValue[0:4]) picked: rd.PixelValue = self.controller.PickPixel(tex, x-20, y+20, rd.Subresource(), rd.CompType.UNorm) pixels.append(picked.floatValue[0:4]) picked: rd.PixelValue = self.controller.PickPixel(tex, x+20, y+20, rd.Subresource(), rd.CompType.UNorm) pixels.append(picked.floatValue[0:4]) if (not (1.0, 0.0, 0.0, 1.0) in pixels) or (not (1.0, 0.0, 0.0, 1.0) in pixels) or ( not (1.0, 0.0, 0.0, 1.0) in pixels): raise rdtest.TestFailureException("Expected red, green and blue in picked pixels. Got {}".format(pixels)) rdtest.log.success("Picked barycentric values are as expected") action = self.find_action(pass_type + " Dispatch") self.controller.SetFrameEvent(action.next.eventId, False) # find the cpuMax and gpuMax actions cpuMax = self.find_action(pass_type + " cpuMax") gpuMax = self.find_action(pass_type + " gpuMax") # The values should be identical cpuMax = int(cpuMax.customName.split(': ')[1]) gpuMax = int(gpuMax.customName.split(': ')[1]) if cpuMax != gpuMax or cpuMax == 0: raise rdtest.TestFailureException( "captured cpuMax and gpuMax are not equal and positive: {} vs {}".format(cpuMax, gpuMax)) rdtest.log.success("recorded cpuMax and gpuMax are as expected") outBuf = self.get_resource_by_name("outBuf") data = self.controller.GetBufferData(outBuf.resourceId, 0, 8) replayedGpuMax = struct.unpack("Q", data)[0] if replayedGpuMax != gpuMax: raise rdtest.TestFailureException( "captured gpuMax and replayed gpuMax are not equal: {} vs {}".format(gpuMax, replayedGpuMax)) rdtest.log.success("replayed gpuMax is as expected") # We should get everything except maybe DXIL elif pass_type != "SM60": raise rdtest.TestFailureException("Didn't find test action for {}".format(pass_type)) # We always check the CS pipe to ensure the reflection is OK cs_pipe = self.get_resource_by_name("cspipe" + pass_type) if cs_pipe is None: # everything but DXIL we must get, DXIL we may not be able to compile if pass_type != "SM60": raise rdtest.TestFailureException("Didn't find compute pipeline for {}".format(pass_type)) continue pipe = cs_pipe.resourceId cs = rd.ResourceId() for d in cs_pipe.derivedResources + cs_pipe.parentResources: res = self.get_resource(d) if res.type == rd.ResourceType.Shader: cs = res.resourceId break refl: rd.ShaderReflection = self.controller.GetShader(pipe, cs, rd.ShaderEntryPoint("main", rd.ShaderStage.Compute)) self.check(len(refl.readWriteResources) == 2) self.check([rw.name for rw in refl.readWriteResources] == ["inUAV", "outUAV"]) # Don't test disassembly or debugging with DXIL, we don't do any of that if pass_type == "SM60": continue disasm = self.controller.DisassembleShader(pipe, refl, "") if "amd_u64_atomic" not in disasm: raise rdtest.TestFailureException( "Didn't find expected AMD opcode in disassembly: {}".format(disasm)) rdtest.log.success("compute shader disassembly is as expected") if refl.debugInfo.debuggable: self.controller.SetFrameEvent(self.find_action("Dispatch").eventId, False) trace: rd.ShaderDebugTrace = self.controller.DebugThread((0, 0, 0), (0, 0, 0)) if trace.debugger is None: self.controller.FreeTrace(trace) raise rdtest.TestFailureException("Couldn't debug compute shader") cycles, variables = self.process_trace(trace) if cycles < 3: raise rdtest.TestFailureException("Compute shader has too few cycles {}".format(cycles)) else: raise rdtest.TestFailureException( "Compute shader is listed as non-debuggable: {}".format(refl.debugInfo.debugStatus)) rdtest.log.success("compute shader debugged successfully")
{ "content_hash": "19c17c7139f9cb50b5187773b1afff80", "timestamp": "", "source": "github", "line_count": 127, "max_line_length": 125, "avg_line_length": 45.031496062992126, "alnum_prop": 0.5707291484525266, "repo_name": "moradin/renderdoc", "id": "3816842dd9d321323ae3d21e1bf325b2a2ebd207", "size": "5719", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "util/test/tests/D3D12/D3D12_AMD_Shader_Extensions.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "2544577" }, { "name": "C#", "bytes": "2112824" }, { "name": "C++", "bytes": "10579561" }, { "name": "CMake", "bytes": "110710" }, { "name": "GLSL", "bytes": "38561" }, { "name": "HLSL", "bytes": "46226" }, { "name": "Java", "bytes": "526" }, { "name": "Objective-C", "bytes": "57753" }, { "name": "Perl", "bytes": "7374" }, { "name": "QMake", "bytes": "4548" }, { "name": "Shell", "bytes": "5486" } ], "symlink_target": "" }
import sqlalchemy from typing import List from application.database import global_db from core.job.helpers import expand_nodelist class Job(global_db.Model): __tablename__ = 'job' __table_args__ = ( global_db.Index('job_index', "job_id", "task_id") , global_db.UniqueConstraint('job_id', 'task_id', name='unique_jobs') , global_db.CheckConstraint("t_submit > 0", name="good_submit") , global_db.CheckConstraint("t_start >= t_submit", name="good_start") , global_db.CheckConstraint("t_end >= t_start", name="good_end") , global_db.CheckConstraint("t_end - t_start < 60*60*24*14", name="good_length")) id = global_db.Column("id", global_db.Integer, primary_key=True) # inner entry id job_id = global_db.Column(global_db.Integer, index=True) # scheduler id task_id = global_db.Column(global_db.Integer, default=0) # scheduler sub-id for stupid job with same id partition = global_db.Column(global_db.String(64)) account = global_db.Column(global_db.String(64)) t_submit = global_db.Column(global_db.Integer) t_start = global_db.Column(global_db.Integer) t_end = global_db.Column(global_db.Integer) timelimit = global_db.Column(global_db.Integer) num_nodes = global_db.Column(global_db.Integer) num_cores = global_db.Column(global_db.Integer) state = global_db.Column(global_db.String(16)) priority = global_db.Column(global_db.BigInteger) command = global_db.Column(global_db.Text()) workdir = global_db.Column(global_db.Text()) nodelist = global_db.Column(global_db.Text()) @property def expanded_nodelist(self) -> List[str]: return expand_nodelist(self.nodelist) def __init__(self, job_id: int, task_id: int, partition: str, account: str , t_submit: int, t_start: int, t_end: int, timelimit: int , num_nodes: int, num_cores: int, state: str, priority: int , command: str, workdir: str, nodelist: str): self.job_id = job_id self.task_id = task_id self.partition = partition self.account = account self.t_submit = t_submit self.t_start = t_start self.t_end = t_end self.timelimit = timelimit self.num_nodes = num_nodes self.num_cores = num_cores self.state = state self.priority = priority self.command = command self.workdir = workdir self.nodelist = nodelist def __repr__(self) -> str: return "<job_id {}>".format(self.job_id) def to_dict(self) -> dict: return { "id": self.id , "job_id": self.job_id , "task_id": self.task_id , "partition": self.partition , "account": self.account , "t_submit": self.t_submit , "t_start": self.t_start , "t_end": self.t_end , "timelimit": self.timelimit , "num_nodes": self.num_nodes , "num_cores": self.num_cores , "state": self.state , "priority": self.priority , "command": self.command , "workdir": self.workdir , "nodelist": self.nodelist} @staticmethod def get_by_id(job_id: int, task_id: int): try: return Job.query.filter(Job.job_id == job_id).filter(Job.task_id == task_id).one() except sqlalchemy.orm.exc.NoResultFound as e: raise LookupError("job not found") from e
{ "content_hash": "9f5f1e9f4b0e74f46c7eaac844aab5fa", "timestamp": "", "source": "github", "line_count": 100, "max_line_length": 105, "avg_line_length": 30.77, "alnum_prop": 0.6798830029249269, "repo_name": "srcc-msu/job_statistics", "id": "f7b6c4bee7fed5123d2d012db189304c963376d7", "size": "3077", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "core/job/models.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "19266" }, { "name": "HTML", "bytes": "62218" }, { "name": "JavaScript", "bytes": "97906" }, { "name": "Makefile", "bytes": "342" }, { "name": "Python", "bytes": "102112" } ], "symlink_target": "" }
import hashlib import locale import os from codecs import open from tempfile import mkdtemp from shutil import rmtree import unittest import subprocess from pelican import Pelican from pelican.settings import read_settings from pelican.tests.support import mute, skipIfNoExecutable, module_exists CUR_DIR = os.path.dirname(__file__) THEME_DIR = os.path.join(CUR_DIR, 'test_data') CSS_REF = open(os.path.join(THEME_DIR, 'static', 'css', 'style.min.css')).read() CSS_HASH = hashlib.md5(CSS_REF.encode()).hexdigest()[0:8] @unittest.skipUnless(module_exists('webassets'), "webassets isn't installed") @skipIfNoExecutable(['sass', '-v']) @skipIfNoExecutable(['cssmin', '--version']) class TestWebAssets(unittest.TestCase): """Base class for testing webassets.""" def setUp(self, override=None): import assets self.temp_path = mkdtemp(prefix='pelicantests.') settings = { 'ASSET_CONFIG': [('sass_bin', 'scss')], 'PATH': os.path.join(os.path.dirname(CUR_DIR), 'test_data', 'content'), 'OUTPUT_PATH': self.temp_path, 'PLUGINS': [assets], 'THEME': THEME_DIR, 'LOCALE': locale.normalize('en_US'), 'CACHE_CONTENT': False } if override: settings.update(override) self.settings = read_settings(override=settings) pelican = Pelican(settings=self.settings) mute(True)(pelican.run)() def tearDown(self): rmtree(self.temp_path) def check_link_tag(self, css_file, html_file): """Check the presence of `css_file` in `html_file`.""" link_tag = ('<link rel="stylesheet" href="{css_file}">' .format(css_file=css_file)) html = open(html_file).read() self.assertRegexpMatches(html, link_tag) class TestWebAssetsRelativeURLS(TestWebAssets): """Test pelican with relative urls.""" def setUp(self): TestWebAssets.setUp(self, override={'RELATIVE_URLS': True}) def test_jinja2_ext(self): # Test that the Jinja2 extension was correctly added. from webassets.ext.jinja2 import AssetsExtension self.assertIn(AssetsExtension, self.settings['JINJA_ENVIRONMENT']['extensions']) def test_compilation(self): # Compare the compiled css with the reference. gen_file = os.path.join(self.temp_path, 'theme', 'gen', 'style.{0}.min.css'.format(CSS_HASH)) self.assertTrue(os.path.isfile(gen_file)) css_new = open(gen_file).read() self.assertEqual(css_new, CSS_REF) def test_template(self): # Look in the output files for the link tag. css_file = './theme/gen/style.{0}.min.css'.format(CSS_HASH) html_files = ['index.html', 'archives.html', 'this-is-a-super-article.html'] for f in html_files: self.check_link_tag(css_file, os.path.join(self.temp_path, f)) self.check_link_tag( '../theme/gen/style.{0}.min.css'.format(CSS_HASH), os.path.join(self.temp_path, 'category/yeah.html')) class TestWebAssetsAbsoluteURLS(TestWebAssets): """Test pelican with absolute urls.""" def setUp(self): TestWebAssets.setUp(self, override={'RELATIVE_URLS': False, 'SITEURL': 'http://localhost'}) def test_absolute_url(self): # Look in the output files for the link tag with absolute url. css_file = ('http://localhost/theme/gen/style.{0}.min.css' .format(CSS_HASH)) html_files = ['index.html', 'archives.html', 'this-is-a-super-article.html'] for f in html_files: self.check_link_tag(css_file, os.path.join(self.temp_path, f))
{ "content_hash": "f936df0cb9d6a12f08e998a7bb9d83bd", "timestamp": "", "source": "github", "line_count": 110, "max_line_length": 88, "avg_line_length": 34.77272727272727, "alnum_prop": 0.6101960784313726, "repo_name": "kura/kura.io", "id": "0aaebf07984cb3a32385352966d3fcfd4a0025bb", "size": "3892", "binary": false, "copies": "5", "ref": "refs/heads/main", "path": "plugins/assets/test_assets.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "346" }, { "name": "CSS", "bytes": "48614" }, { "name": "HTML", "bytes": "81185" }, { "name": "JavaScript", "bytes": "22549" }, { "name": "Makefile", "bytes": "2193" }, { "name": "Perl 6", "bytes": "1075" }, { "name": "Python", "bytes": "65766" }, { "name": "Shell", "bytes": "7834" } ], "symlink_target": "" }
import os from pwd import getpwnam from grp import getgrnam from MirrorPlugin import MirrorPlugin class ChangeOwnerException(Exception): pass class ChangeOwner(MirrorPlugin): def __init__(self, *args, **kwargs): try: m = kwargs['mirror'] config = kwargs['config'] super(ChangeOwner, self).__init__(m) self.destination = self.mirror.destination username = config.get(ChangeOwner.__name__,'user') user = getpwnam(username) self.uid = user.pw_uid groupname = config.get(ChangeOwner.__name__,'group') group = getgrnam(groupname) self.gid = group.gr_gid except Exception as e: raise ChangeOwnerException("Unable to init ChangeOwner plugin: " + str(e)) def __before__(self, buff): pass def __after__(self, buff): for root, dirs, files in os.walk(self.destination): os.chown(root, self.uid, self.gid) for momo in dirs: os.chown(os.path.join(root, momo), self.uid, self.gid) for momo in files: os.chown(os.path.join(root, momo), self.uid, self.gid)
{ "content_hash": "2c3c717e251aaa6e7e97a92dc9f76bc4", "timestamp": "", "source": "github", "line_count": 41, "max_line_length": 86, "avg_line_length": 30.26829268292683, "alnum_prop": 0.5616438356164384, "repo_name": "vramiro/easy-wget-mirror", "id": "7df166da93df043eca6b47a499cc03e8bfe9ba7b", "size": "1241", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "plugins/ChangeOwner.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "13823" } ], "symlink_target": "" }
from __future__ import absolute_import import pytest import responses from requests.exceptions import RequestException from sentry.lang.javascript.processor import ( BadSource, discover_sourcemap, fetch_sourcemap, fetch_url, generate_module, trim_line, UrlResult ) from sentry.lang.javascript.sourcemaps import SourceMap, SourceMapIndex from sentry.testutils import TestCase base64_sourcemap = 'data:application/json;base64,eyJ2ZXJzaW9uIjozLCJmaWxlIjoiZ2VuZXJhdGVkLmpzIiwic291cmNlcyI6WyIvdGVzdC5qcyJdLCJuYW1lcyI6W10sIm1hcHBpbmdzIjoiO0FBQUEiLCJzb3VyY2VzQ29udGVudCI6WyJjb25zb2xlLmxvZyhcImhlbGxvLCBXb3JsZCFcIikiXX0=' class FetchUrlTest(TestCase): @responses.activate def test_simple(self): responses.add(responses.GET, 'http://example.com', body='foo bar', content_type='application/json') result = fetch_url('http://example.com') assert len(responses.calls) == 1 assert result.url == 'http://example.com' assert result.body == 'foo bar' assert result.headers == {'content-type': 'application/json'} # ensure we use the cached result result2 = fetch_url('http://example.com') assert len(responses.calls) == 1 assert result == result2 @responses.activate def test_with_token(self): responses.add(responses.GET, 'http://example.com', body='foo bar', content_type='application/json') self.project.update_option('sentry:token', 'foobar') self.project.update_option('sentry:origins', ['*']) result = fetch_url('http://example.com', project=self.project) assert len(responses.calls) == 1 assert responses.calls[0].request.headers['X-Sentry-Token'] == 'foobar' assert result.url == 'http://example.com' assert result.body == 'foo bar' assert result.headers == {'content-type': 'application/json'} @responses.activate def test_connection_failure(self): responses.add(responses.GET, 'http://example.com', body=RequestException()) with pytest.raises(BadSource): result = fetch_url('http://example.com') assert len(responses.calls) == 1 # ensure we use the cached domain-wide failure for the second call with pytest.raises(BadSource): result = fetch_url('http://example.com/foo/bar') assert len(responses.calls) == 1 class DiscoverSourcemapTest(TestCase): # discover_sourcemap(result) def test_simple(self): result = UrlResult('http://example.com', {}, '') assert discover_sourcemap(result) is None result = UrlResult('http://example.com', { 'x-sourcemap': 'http://example.com/source.map.js' }, '') assert discover_sourcemap(result) == 'http://example.com/source.map.js' result = UrlResult('http://example.com', { 'sourcemap': 'http://example.com/source.map.js' }, '') assert discover_sourcemap(result) == 'http://example.com/source.map.js' result = UrlResult('http://example.com', {}, '//@ sourceMappingURL=http://example.com/source.map.js\nconsole.log(true)') assert discover_sourcemap(result) == 'http://example.com/source.map.js' result = UrlResult('http://example.com', {}, '//# sourceMappingURL=http://example.com/source.map.js\nconsole.log(true)') assert discover_sourcemap(result) == 'http://example.com/source.map.js' result = UrlResult('http://example.com', {}, 'console.log(true)\n//@ sourceMappingURL=http://example.com/source.map.js') assert discover_sourcemap(result) == 'http://example.com/source.map.js' result = UrlResult('http://example.com', {}, 'console.log(true)\n//# sourceMappingURL=http://example.com/source.map.js') assert discover_sourcemap(result) == 'http://example.com/source.map.js' class GenerateModuleTest(TestCase): def test_simple(self): assert generate_module(None) == '<unknown module>' assert generate_module('http://example.com/foo.js') == 'foo' assert generate_module('http://example.com/foo/bar.js') == 'foo/bar' assert generate_module('http://example.com/js/foo/bar.js') == 'foo/bar' assert generate_module('http://example.com/javascript/foo/bar.js') == 'foo/bar' assert generate_module('http://example.com/1.0/foo/bar.js') == 'foo/bar' assert generate_module('http://example.com/v1/foo/bar.js') == 'foo/bar' assert generate_module('http://example.com/v1.0.0/foo/bar.js') == 'foo/bar' assert generate_module('http://example.com/_baz/foo/bar.js') == 'foo/bar' assert generate_module('http://example.com/1/2/3/foo/bar.js') == 'foo/bar' assert generate_module('http://example.com/abcdef0/foo/bar.js') == 'foo/bar' assert generate_module('http://example.com/92cd589eca8235e7b373bf5ae94ebf898e3b949c/foo/bar.js') == 'foo/bar' assert generate_module('http://example.com/7d6d00eae0ceccdc7ee689659585d95f/foo/bar.js') == 'foo/bar' assert generate_module('/foo/bar.js') == 'foo/bar' assert generate_module('../../foo/bar.js') == 'foo/bar' assert generate_module('/foo/bar-7d6d00eae0ceccdc7ee689659585d95f.js') == 'foo/bar' assert generate_module('/bower_components/foo/bar.js') == 'foo/bar' assert generate_module('/node_modules/foo/bar.js') == 'foo/bar' class FetchBase64SourcemapTest(TestCase): def test_simple(self): index = fetch_sourcemap(base64_sourcemap) states = [SourceMap(1, 0, '/test.js', 0, 0, None)] sources = set(['/test.js']) keys = [(1, 0)] content = {'/test.js': ['console.log("hello, World!")']} assert index == SourceMapIndex(states, keys, sources, content) class TrimLineTest(TestCase): long_line = 'The public is more familiar with bad design than good design. It is, in effect, conditioned to prefer bad design, because that is what it lives with. The new becomes threatening, the old reassuring.' def test_simple(self): assert trim_line('foo') == 'foo' assert trim_line(self.long_line) == 'The public is more familiar with bad design than good design. It is, in effect, conditioned to prefer bad design, because that is what it li {snip}' assert trim_line(self.long_line, column=10) == 'The public is more familiar with bad design than good design. It is, in effect, conditioned to prefer bad design, because that is what it li {snip}' assert trim_line(self.long_line, column=66) == '{snip} blic is more familiar with bad design than good design. It is, in effect, conditioned to prefer bad design, because that is what it lives wi {snip}' assert trim_line(self.long_line, column=190) == '{snip} gn. It is, in effect, conditioned to prefer bad design, because that is what it lives with. The new becomes threatening, the old reassuring.' assert trim_line(self.long_line, column=9999) == '{snip} gn. It is, in effect, conditioned to prefer bad design, because that is what it lives with. The new becomes threatening, the old reassuring.'
{ "content_hash": "d57cbb7aaba2ce5e668ca3f68ead41ba", "timestamp": "", "source": "github", "line_count": 143, "max_line_length": 238, "avg_line_length": 49.87412587412587, "alnum_prop": 0.6661525518788559, "repo_name": "jokey2k/sentry", "id": "2e5482fb5d4fd1ba430849180717a1b1891fda7b", "size": "7157", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/sentry/lang/javascript/test_processor.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "580459" }, { "name": "Gettext Catalog", "bytes": "2933595" }, { "name": "HTML", "bytes": "292821" }, { "name": "JavaScript", "bytes": "608760" }, { "name": "Makefile", "bytes": "2710" }, { "name": "Python", "bytes": "5105385" } ], "symlink_target": "" }
import io import os import setuptools name = 'google-cloud-iot' description = 'Cloud IoT API API client library' version = '0.1.0' release_status = 'Development Status :: 3 - Alpha' dependencies = [ 'google-api-core[grpc] >= 1.4.1, < 2.0.0dev', 'grpc-google-iam-v1 >= 0.11.4, < 0.12dev', 'enum34; python_version < "3.4"', ] package_root = os.path.abspath(os.path.dirname(__file__)) readme_filename = os.path.join(package_root, 'README.rst') with io.open(readme_filename, encoding='utf-8') as readme_file: readme = readme_file.read() packages = [ package for package in setuptools.find_packages() if package.startswith('google') ] namespaces = ['google'] if 'google.cloud' in packages: namespaces.append('google.cloud') setuptools.setup( name=name, version=version, description=description, long_description=readme, author='Google LLC', author_email='googleapis-packages@oogle.com', license='Apache 2.0', url='https://github.com/GoogleCloudPlatform/google-cloud-python', classifiers=[ release_status, 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Operating System :: OS Independent', 'Topic :: Internet', ], platforms='Posix; MacOS X; Windows', packages=packages, namespace_packages=namespaces, install_requires=dependencies, include_package_data=True, zip_safe=False, )
{ "content_hash": "70ac2cbca3182f78373a62ff3ded1318", "timestamp": "", "source": "github", "line_count": 60, "max_line_length": 69, "avg_line_length": 29.75, "alnum_prop": 0.6481792717086835, "repo_name": "jonparrott/gcloud-python", "id": "e856f5542affd288c9f2e25c34f848b93a1f77a0", "size": "2361", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "iot/setup.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "3366" }, { "name": "PowerShell", "bytes": "7195" }, { "name": "Protocol Buffer", "bytes": "62009" }, { "name": "Python", "bytes": "3459300" }, { "name": "Shell", "bytes": "7548" } ], "symlink_target": "" }
import click from .comm import pdserver_request @click.group() @click.pass_context def routers(ctx): """ (deprecated) Access router information on the controller. These commands are deprecated. Please use the equivalent commands under `pdtools cloud --help`. """ ctx.obj['routers_url'] = ctx.obj['pdserver_url'] + "/api/routers" @routers.command() @click.pass_context def list(ctx): """ List routers. """ url = ctx.obj['routers_url'] result = pdserver_request('GET', url) routers = result.json() for router in routers: print("{} {} {}".format(router['_id'], router['name'], router['online'])) @routers.command() @click.pass_context @click.argument('token') def claim(ctx, token): """ Claim an existing router. """ url = ctx.obj['routers_url'] + '/claim' data = { 'claim_token': token } result = pdserver_request('POST', url, json=data) if result.ok: router = result.json() print("Claimed router: {}".format(router['name'])) else: print("There was an error claiming the router.") print("Please check that your claim token is correct.") @routers.command() @click.pass_context @click.argument('name') @click.option('--orphaned/--not-orphaned', default=False) @click.option('--claim', default=None) def create(ctx, name, orphaned, claim): """ Create a new router. """ url = ctx.obj['routers_url'] data = { 'name': name, 'orphaned': orphaned } if claim is not None: data['claim_token'] = claim result = pdserver_request('POST', url, json=data) if result.ok: router = result.json() print("Name: {}".format(router['name'])) print("ID: {}".format(router['_id'])) print("Password: {}".format(router['password'])) print("Claim: {}".format(router['claim_token'])) print("") print("You may use the following command to provision your router:") print("pdtools device <address> provision {_id} {password}".format(**router)) @routers.command() @click.pass_context @click.argument('router_id') def delete(ctx, router_id): """ Delete a router. """ url = ctx.obj['routers_url'] + "/" + router_id pdserver_request('DELETE', url)
{ "content_hash": "8572232b5bd214e53936d379c7da8a5d", "timestamp": "", "source": "github", "line_count": 90, "max_line_length": 85, "avg_line_length": 25.61111111111111, "alnum_prop": 0.6047722342733188, "repo_name": "ParadropLabs/Paradrop", "id": "1d05dd245ad187b6af590e8184d651ddd7efe879", "size": "2305", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tools/pdtools/pdtools/routers.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "148071" }, { "name": "Dockerfile", "bytes": "10449" }, { "name": "HTML", "bytes": "554" }, { "name": "Makefile", "bytes": "1665" }, { "name": "Python", "bytes": "1049444" }, { "name": "Shell", "bytes": "9897" } ], "symlink_target": "" }
import sys import time import random import commands import userinterface.Client as Client from taskbuffer.JobSpec import JobSpec from taskbuffer.FileSpec import FileSpec site = sys.argv[1] cloud = sys.argv[2] prodDBlock = 'mc09_10TeV.105807.JF35_pythia_jet_filter.evgen.EVNT.e469_tid095268' inputFile = 'EVNT.095268._000110.pool.root.1' if len(sys.argv)==5: site = sys.argv[1] cloud = sys.argv[2] prodDBlock = sys.argv[3] inputFile = sys.argv[4] datasetName = 'panda.destDB.%s' % commands.getoutput('uuidgen') files = { inputFile:None, } jobList = [] index = 0 for lfn in files.keys(): index += 1 job = JobSpec() job.jobDefinitionID = (time.time()) % 10000 job.jobName = "%s_%d" % (commands.getoutput('uuidgen'),index) job.AtlasRelease = 'Atlas-15.3.1' job.homepackage = 'AtlasProduction/15.3.1.5' job.transformation = 'csc_atlasG4_trf.py' job.destinationDBlock = datasetName job.computingSite = site job.prodDBlock = prodDBlock job.prodSourceLabel = 'test' job.processingType = 'test' job.currentPriority = 10000 job.cloud = cloud job.cmtConfig = 'i686-slc4-gcc34-opt' fileI = FileSpec() fileI.dataset = job.prodDBlock fileI.prodDBlock = job.prodDBlock fileI.lfn = lfn fileI.type = 'input' job.addFile(fileI) fileD = FileSpec() fileD.dataset = 'ddo.000001.Atlas.Ideal.DBRelease.v070302' fileD.prodDBlock = fileD.dataset fileD.lfn = 'DBRelease-7.3.2.tar.gz' fileD.type = 'input' job.addFile(fileD) fileOA = FileSpec() fileOA.lfn = "%s.HITS.pool.root" % job.jobName fileOA.destinationDBlock = job.destinationDBlock fileOA.destinationSE = job.destinationSE fileOA.dataset = job.destinationDBlock fileOA.destinationDBlockToken = 'ATLASDATADISK' fileOA.type = 'output' job.addFile(fileOA) fileOL = FileSpec() fileOL.lfn = "%s.job.log.tgz" % job.jobName fileOL.destinationDBlock = job.destinationDBlock fileOL.destinationSE = job.destinationSE fileOL.dataset = job.destinationDBlock fileOL.destinationDBlockToken = 'ATLASDATADISK' fileOL.type = 'log' job.addFile(fileOL) job.jobParameters="%s %s 5 1850 8738 ATLAS-GEO-08-00-01 QGSP_BERT VertexPos.py %s OFLCOND-SIM-01-00-00 False s595" % \ (fileI.lfn,fileOA.lfn,fileD.lfn) jobList.append(job) s,o = Client.submitJobs(jobList) print "---------------------" print s for x in o: print "PandaID=%s" % x[0]
{ "content_hash": "9d8d16db6b40108f52180706673d7602", "timestamp": "", "source": "github", "line_count": 88, "max_line_length": 122, "avg_line_length": 29.772727272727273, "alnum_prop": 0.6435114503816793, "repo_name": "fbarreir/panda-server", "id": "19b8d4e4b4b2d4f0428f57c744ba5b15cb2016df", "size": "2620", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "pandaserver/test/testG4sim15.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "PLSQL", "bytes": "18429" }, { "name": "Python", "bytes": "2406184" }, { "name": "Shell", "bytes": "15098" } ], "symlink_target": "" }
from ..core import dr from .. import metadata class MultinodeMetaclass(type): def __init__(cls, name, bases, dct): if name != 'MultinodeMetadata' and not name.endswith("Child"): cls.products.append(name) super(MultinodeMetaclass, cls).__init__(name, bases, dct) class MultinodeMetadata(object): __metaclass__ = MultinodeMetaclass products = [] common_fields = ["display_name", "insights_version"] fields = [] def __init__(self, data, path=None): if data['product'] != self.__class__.__name__: raise dr.SkipComponent() self.data = data for f in self.common_fields: setattr(self, f, data.get(f)) for f in self.fields: setattr(self, f, data.get(f)) @property def product(self): return self.data["product"] @property def children(self): if not hasattr(self, "_children"): self._children = self.populate_children() return self._children.values() def child(self, system_id): if not hasattr(self, "_children"): self._children = self.populate_children() return self._children.get(system_id) def populate_children(self): children = {} for child in self.data["systems"]: children[child["system_id"]] = self.child_class(child, parent=self) return children @metadata(group=dr.GROUPS.cluster) def rhev(data): return RHEV(data) @metadata(group=dr.GROUPS.cluster) class RHEV(MultinodeMetadata): fields = [ "storagedomains", "hosts", "api_version", "datacenters", "networks", "rhev_version", "coordinator_version", "diskprofiles", "vms", "clusters", "macpools" ] @property def child_class(self): return RHEVChild @metadata(group=dr.GROUPS.cluster) def docker(data): return Docker(data) @metadata(group=dr.GROUPS.cluster) class Docker(MultinodeMetadata): @property def child_class(self): return DockerChild @metadata(group=dr.GROUPS.cluster) def osp(data): return OSP(data) @metadata(group=dr.GROUPS.cluster) class OSP(MultinodeMetadata): fields = [ "nova_client_api_version", "coordinator_version", "rhel_version", "rhosp_version", "overcloud_networks", "total_overcloud_networks", "stack_networks", "total_stack_networks" ] @property def child_class(self): return OSPChild class MultinodeChild(MultinodeMetadata): def __init__(self, data, parent=None): if data['product'] != self.parent_class.__name__ or "links" not in data: raise dr.SkipComponent() self.data = data self.parent = parent for f in self.fields: setattr(self, f, data.get(f)) @property def role(self): return self.data["type"] @metadata() class OSPChild(MultinodeChild): fields = [ "status", "ip" ] parent_class = OSP @metadata() class RHEVChild(MultinodeChild): parent_class = RHEV @metadata() class DockerChild(MultinodeChild): parent_class = Docker @property def image(self): return self.data["type"] == "image" @property def host(self): return self.data["type"] == "host" @property def container(self): return self.data["type"] == "container"
{ "content_hash": "6edb0006af283a396203fbef6f6ce35e", "timestamp": "", "source": "github", "line_count": 141, "max_line_length": 80, "avg_line_length": 23.843971631205672, "alnum_prop": 0.6124330755502677, "repo_name": "wcmitchell/insights-core", "id": "5ded76c3799cd439e3082390459afff07fc13ff9", "size": "3362", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "insights/parsers/multinode.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Clojure", "bytes": "19339" }, { "name": "Jupyter Notebook", "bytes": "91793" }, { "name": "Python", "bytes": "3414025" }, { "name": "Shell", "bytes": "2274" } ], "symlink_target": "" }
import pytest from vtdiscourse import vtdiscourse def test_sumary(): parm = vtdiscourse.Parser(name='directors-election-gitbook', githubfile='SUMMARY.md') summary_data = parm.get_summary assert len(summary_data) == 4
{ "content_hash": "f20cfec19be813c1ecfb161bc0b74455", "timestamp": "", "source": "github", "line_count": 10, "max_line_length": 89, "avg_line_length": 23.3, "alnum_prop": 0.7339055793991416, "repo_name": "chairco/vtdiscourse", "id": "cc6dcec5ee82a72b79ca91526d163c2acbd2648c", "size": "257", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/test_vtdiscourse.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "22602" } ], "symlink_target": "" }
from collections import namedtuple import numpy as np from .. import names, prims from ..ndtypes import (IntT, FloatT, TupleT, FnT, Type, BoolT, NoneT, Float32, Float64, Bool, ClosureT, ScalarT, PtrT, NoneType, ArrayT, SliceT, TypeValueT) from ..syntax import (Const, Var, PrimCall, Attribute, TupleProj, Tuple, ArrayView, Expr, Closure, TypedFn) # from ..syntax.helpers import get_types import type_mappings from base_compiler import BaseCompiler CompiledFlatFn = namedtuple("CompiledFlatFn", ("name", "sig", "src", "extra_objects", "extra_functions", "extra_function_signatures", "declarations")) # mapping from (field_types, struct_name, field_names) to type names _struct_type_names = {} # mapping from struct name to decl _struct_type_decls = {} class FnCompiler(BaseCompiler): def __init__(self, module_entry = False, struct_type_cache = None, **kwargs): BaseCompiler.__init__(self, **kwargs) self.declarations = [] # depends on these .o files self.extra_objects = set([]) # to avoid adding the same function's source twice # we use its signature as a key self.extra_functions = {} self.extra_function_signatures = [] # are we actually compiling the entry-point into a Python module? # if so, expect some of the methods like visit_Return to be overloaded # to return PyObjects self.module_entry = module_entry def add_decl(self, decl): if decl not in self.declarations: self.declarations.append(decl) def ptr_struct_type(self, elt_t): # need both an actual data pointer # and an optional PyObject base field_types = ["%s*" % self.to_ctype(elt_t), "PyObject*"] return self.struct_type_from_fields(field_types, struct_name = "%s_ptr_type" % elt_t, field_names = ["raw_ptr", "base"]) def array_struct_type(self, elt_t, rank): ptr_field_t = self.ptr_struct_type(elt_t) field_types = [ptr_field_t, "npy_intp", "npy_intp", "int64_t", "int64_t"] field_names = ["data", "shape", "strides", "offset", "size"] field_repeats = {} field_repeats["shape"] = rank field_repeats["strides"] = rank return self.struct_type_from_fields(field_types, "array_type", field_names, field_repeats) def slice_struct_type(self, start_t = "int64_t", stop_t = "int64_t", step_t = "int64_t"): field_types = [start_t, stop_t, step_t] field_names = ["start", "stop", "step"] return self.struct_type_from_fields(field_types, "slice_type", field_names) def struct_type_from_fields(self, field_types, struct_name = "tuple_type", field_names = None, field_repeats = {}): if any(not isinstance(t, str) for t in field_types): field_types = tuple(self.to_ctype(t) if isinstance(t, Type) else t for t in field_types) else: field_types = tuple(field_types) if field_names is None: field_names = tuple("elt%d" % i for i in xrange(len(field_types))) else: assert len(field_names) == len(field_types), \ "Mismatching number of types %d and field names %d" % (len(field_types), len(field_names)) field_names = tuple(field_names) repeat_set = frozenset(sorted(field_repeats.items())) key = field_types, struct_name, field_names, repeat_set if key in _struct_type_names: typename = _struct_type_names[key] decl = _struct_type_decls[typename] if decl not in self.declarations: self.declarations.append(decl) return typename typename = names.fresh(struct_name).replace(".", "") field_decls = [] for t, field_name in zip(field_types, field_names): if field_name in field_repeats: field_decl = " %s %s[%d];" % (t, field_name, field_repeats[field_name]) else: field_decl = " %s %s;" % (t, field_name) field_decls.append(field_decl) decl = "typedef struct %s {\n%s\n} %s;" % (typename, "\n".join(field_decls), typename) _struct_type_names[key] = typename _struct_type_decls[typename] = decl self.add_decl(decl) return typename def to_ctype(self, parakeet_type): if isinstance(parakeet_type, (NoneT, ScalarT)): return type_mappings.to_ctype(parakeet_type) elif isinstance(parakeet_type, TupleT): return self.struct_type_from_fields(parakeet_type.elt_types) elif isinstance(parakeet_type, PtrT): return self.ptr_struct_type(parakeet_type.elt_type) elif isinstance(parakeet_type, ArrayT): elt_t = parakeet_type.elt_type rank = parakeet_type.rank return self.array_struct_type(elt_t, rank) elif isinstance(parakeet_type, SliceT): return self.slice_struct_type() elif isinstance(parakeet_type, ClosureT): return self.struct_type_from_fields(parakeet_type.arg_types) elif isinstance(parakeet_type, TypeValueT): return "int" else: assert False, "Don't know how to make C type for %s" % parakeet_type def to_ctypes(self, ts): return tuple(self.to_ctype(t) for t in ts) def visit_Slice(self, expr): typename = self.to_ctype(expr.type) start = self.visit_expr(expr.start) stop = self.visit_expr(expr.stop) step = self.visit_expr(expr.step) return self.fresh_var(typename, "slice", "{%s, %s, %s}" % (start,stop,step)) def visit_Alloc(self, expr): elt_t = expr.elt_type nelts = self.fresh_var("npy_intp", "nelts", self.visit_expr(expr.count)) bytes_per_elt = elt_t.nbytes nbytes = self.mul(nelts, bytes_per_elt)#"%s * %d" % (nelts, bytes_per_elt) raw_ptr = "(%s) malloc(%s)" % (type_mappings.to_ctype(expr.type), nbytes) struct_type = self.to_ctype(expr.type) return self.fresh_var(struct_type, "new_ptr", "{%s, NULL}" % raw_ptr) def visit_Const(self, expr): t = expr.type c = t.__class__ if c == BoolT: return "1" if expr.value else "0" elif c == NoneT: return "0" assert isinstance(t, ScalarT), "Don't know how to translate Const %s : %s" % (expr,t) v = expr.value if np.isinf(v): return "INFINITY" elif np.isnan(v): return "NAN" return "%s" % expr.value def visit_Var(self, expr): return self.name(expr.name) def visit_Cast(self, expr): x = self.visit_expr(expr.value) ct = self.to_ctype(expr.type) if isinstance(expr, (Const, Var)): return "(%s) %s" % (ct, x) else: return "((%s) (%s))" % (ct, x) def visit_PrimCall(self, expr): t = expr.type args = self.visit_expr_list(expr.args) # parenthesize any compound expressions for i, arg_expr in enumerate(expr.args): if not isinstance(arg_expr, (Var, Const)): args[i] = "(" + args[i] + ")" p = expr.prim if p == prims.add: #return "%s + %s" % (args[0], args[1]) return self.add(args[0], args[1]) if p == prims.subtract: #return "%s - %s" % (args[0], args[1]) return self.sub(args[0],args[1]) elif p == prims.multiply: return self.mul(args[0], args[1]) # return "%s * %s" % (args[0], args[1]) elif p == prims.divide: return self.div(args[0], args[1]) # return "%s / %s" % (args[0], args[1]) elif p == prims.negative: if t == Bool: return "1 - %s" % args[0] else: return "-%s" % args[0] elif p == prims.abs: x = args[0] return " %s >= 0 ? %s : -%s" % (x,x,x) elif p == prims.bitwise_and: return "%s & %s" % (args[0], args[1]) elif p == prims.bitwise_or: return "%s | %s" % (args[0], args[1]) elif p == prims.bitwise_not: return "~%s" % args[0] elif p == prims.logical_and: return self.and_(args[0], args[1]) elif p == prims.logical_or: return self.or_(args[0], args[1]) elif p == prims.logical_not: return self.not_(args[0]) elif p == prims.equal: return self.eq(args[0], args[1], t) elif p == prims.not_equal: return self.neq(args[0], args[1], t) elif p == prims.greater: return self.gt(args[0], args[1], t) elif p == prims.greater_equal: return self.gte(args[0], args[1], t) elif p == prims.less: return self.lt(args[0], args[1], t) elif p == prims.less_equal: return self.lte(args[0], args[1], t) elif p == prims.remainder: x,y = args if t == Float32: return "fmod(%s, %s)" % (x,y) elif t == Float64: return "fmod(%s, %s)" % (x,y) assert isinstance(t, (BoolT, IntT)), "Modulo not implemented for %s" % t rem = self.fresh_var(t, "rem", "%s %% %s" % (x,y)) y_is_negative = self.fresh_var(t, "y_is_negative", "%s < 0" % y) rem_is_negative = self.fresh_var(t, "rem_is_negative", "%s < 0" % rem) y_nonzero = self.fresh_var(t, "y_nonzero", "%s != 0" % y) rem_nonzero = self.fresh_var(t, "rem_nonzero", "%s != 0" % rem) neither_zero = self.fresh_var(t, "neither_zero", "%s && %s" % (y_nonzero, rem_nonzero)) diff_signs = self.fresh_var(t, "diff_signs", "%s ^ %s" % (y_is_negative, rem_is_negative)) should_flip = self.fresh_var(t, "should_flip", "%s && %s" % (neither_zero, diff_signs)) flipped_rem = self.fresh_var(t, "flipped_rem", "%s + %s" % (y, rem)) return "%s ? %s : %s" % (should_flip, flipped_rem, rem) elif p == prims.fmod: if t == Float32: return "fmodf(%s, %s)" % (args[0], args[1]) elif t == Float64: return "fmod(%s, %s)" % (args[0], args[1]) return "%s %% %s" % (args[0], args[1]) elif p == prims.maximum: x,y = args return "(%s > %s) ? %s : %s" % (x,y,x,y) elif p == prims.minimum: x,y = args return "(%s < %s) ? %s : %s" % (x,y,x,y) elif p == prims.power: if t == Float32: return "powf(%s, %s)" % (args[0], args[1]) else: return "pow(%s, %s)" % (args[0], args[1]) elif isinstance(t, FloatT): # many float prims implemented using the same name in math.h name = p.name if name.startswith("arc"): # arccos -> acos name = "a" + name[3:] if t == Float32: name = name + "f" if len(args) == 1: return "%s(%s)" % (name, args[0]) else: assert len(args) == 2, "Unexpected prim %s with %d args (%s)" % (p, len(args), args) return "%s(%s, %s)" % (name, args[0], args[1]) else: assert False, "Prim not yet implemented: %s" % p def visit_Index(self, expr): arr = self.visit_expr(expr.value) if isinstance(expr.index.type, ScalarT): index_exprs = [expr.index] else: assert isinstance(expr.index.type, TupleT), \ "Unexpected index %s : %s" % (expr.index, expr.index.type) if isinstance(expr.index, Tuple): index_exprs = expr.index.elts else: index_exprs = [TupleProj(expr.index, i, type = t) for i, t in enumerate(expr.index.type.elt_types) ] assert all(isinstance(idx_expr.type, ScalarT) for idx_expr in index_exprs), \ "Expected all indices to be scalars but got %s" % (index_exprs,) indices = [self.visit_expr(idx_expr) for idx_expr in index_exprs] if isinstance(expr.value.type, PtrT): assert len (indices) == 1, \ "Can't index into pointer using %d indices (%s)" % (len(indices), index_exprs) raw_ptr = "%s.raw_ptr" % arr offset = indices[0] else: assert isinstance(expr.value.type, ArrayT) offset = self.fresh_var("int64_t", "offset", "%s.offset" % arr) for i, idx in enumerate(indices): stride = "%s.strides[%d]" % (arr, i) self.append("%s += %s * %s;" % (offset, idx, stride)) raw_ptr = "%s.data.raw_ptr" % arr return "%s[%s]" % (raw_ptr, offset) def visit_Call(self, expr): fn_name = self.get_fn_name(expr.fn) closure_args = self.get_closure_args(expr.fn) args = self.visit_expr_list(expr.args) return "%s(%s)" % (fn_name, ", ".join(tuple(closure_args) + tuple(args))) def visit_Select(self, expr): cond = self.visit_expr(expr.cond) true = self.visit_expr(expr.true_value) false = self.visit_expr(expr.false_value) return "%s ? %s : %s" % (cond, true, false) def is_pure(self, expr): return expr.__class__ in (Var, Const, PrimCall, Attribute, TupleProj, Tuple, ArrayView) def visit_Assign(self, stmt): rhs = self.visit_expr(stmt.rhs) if stmt.lhs.__class__ is Var: lhs = self.visit_expr(stmt.lhs) return "%s %s = %s;" % (self.to_ctype(stmt.lhs.type), lhs, rhs) elif stmt.lhs.__class__ is Tuple: struct_value = self.fresh_var(self.to_ctype(stmt.lhs.type), "lhs_tuple") self.assign(struct_value, rhs) for i, lhs_var in enumerate(stmt.lhs.elts): assert isinstance(lhs_var, Var), "Expected LHS variable, got %s" % lhs_var c_name = self.visit_expr(lhs_var) self.append("%s %s = %s.elt%d;" % (self.to_ctype(lhs_var.type), c_name, struct_value, i )) return "" else: lhs = self.visit_expr(stmt.lhs) return "%s = %s;" % (lhs, rhs) def declare(self, parakeet_name, parakeet_type, init_value = None): c_name = self.name(parakeet_name) t = self.to_ctype(parakeet_type) if init_value is None: self.append("%s %s;" % (t, c_name)) else: self.append("%s %s = %s;" % (t, c_name, init_value)) def declare_merge_vars(self, merge): """ Declare but don't initialize """ for (name, (left, _)) in merge.iteritems(): self.declare(name, left.type) def visit_merge_left(self, merge, fresh_vars = True): if len(merge) == 0: return "" self.push() self.comment("Merge Phi Nodes (left side) " + str(merge)) for (name, (left, _)) in merge.iteritems(): c_left = self.visit_expr(left) if fresh_vars: self.declare(name, left.type, c_left) else: c_name = self.name(name) self.append("%s = %s;" % (c_name, c_left)) return self.pop() def visit_merge_right(self, merge): if len(merge) == 0: return "" self.push() self.comment("Merge Phi Nodes (right side) " + str(merge)) for (name, (_, right)) in merge.iteritems(): c_right = self.visit_expr(right) self.append("%s = %s;" % (self.name(name), c_right)) return self.pop() def visit_NumCores(self, expr): # by default we're running sequentially return "1" def visit_Comment(self, stmt): return "// " + stmt.text def visit_PrintString(self, stmt): self.printf(stmt.text) return "// done with printf" def visit_SourceExpr(self, expr): return expr.text def visit_SourceStmt(self, stmt): return stmt.text def visit_If(self, stmt): self.declare_merge_vars(stmt.merge) cond = self.visit_expr(stmt.cond) true = self.visit_block(stmt.true) + self.visit_merge_left(stmt.merge, fresh_vars = False) false = self.visit_block(stmt.false) + self.visit_merge_right(stmt.merge) return self.indent("if(%s) {\n%s\n} else {\n%s\n}" % (cond, self.indent(true), self.indent(false))) def visit_While(self, stmt): decls = self.visit_merge_left(stmt.merge, fresh_vars = True) cond = self.visit_expr(stmt.cond) body = self.visit_block(stmt.body) + self.visit_merge_right(stmt.merge) return decls + "while (%s) {%s}" % (cond, body) def visit_ExprStmt(self, stmt): return self.visit_expr(stmt.value) + ";" def visit_ForLoop(self, stmt): s = self.visit_merge_left(stmt.merge, fresh_vars = True) start = self.visit_expr(stmt.start) stop = self.visit_expr(stmt.stop) step = self.visit_expr(stmt.step) var = self.visit_expr(stmt.var) t = self.to_ctype(stmt.var.type) body = self.visit_block(stmt.body) body += self.visit_merge_right(stmt.merge) body = self.indent("\n" + body) s += "\n %(t)s %(var)s;" up_loop = \ "\nfor (%(var)s = %(start)s; %(var)s < %(stop)s; %(var)s += %(step)s) {%(body)s}" down_loop = \ "\nfor (%(var)s = %(start)s; %(var)s > %(stop)s; %(var)s += %(step)s) {%(body)s}" if stmt.step.__class__ is Const: if stmt.step.value >= 0: s += up_loop else: s += down_loop else: s += "if(%(step)s >= 0) {\n" s += up_loop s += "\n} else {\n" s += down_loop s += "\n}" return s % locals() def visit_Return(self, stmt): assert not self.return_by_ref, "Returning multiple values by ref not yet implemented: %s" % stmt if self.return_void: return "return;" elif isinstance(stmt.value, Tuple): # if not returning multiple values by reference, then make a struct for them struct_type = self.to_ctype(stmt.value.type) result_elts = ", ".join(self.visit_expr(elt) for elt in stmt.value.elts) result_value = "{" + result_elts + "}" result = self.fresh_var(struct_type, "result", result_value) return "return %s;" % result else: v = self.visit_expr(stmt.value) return "return %s;" % v def visit_block(self, stmts, push = True): if push: self.push() for stmt in stmts: s = self.visit_stmt(stmt) self.append(s) self.append("\n") return self.indent("\n" + self.pop()) # TODO: set inline=True # currently causes shared lib loading errors since symbol for # inlined function gets stripped out of the .so def get_fn_name(self, expr, compiler_kwargs = {}, attributes = [], inline = True): if expr.__class__ is TypedFn: fn = expr elif expr.__class__ is Closure: fn = expr.fn else: assert isinstance(expr.type, (FnT, ClosureT)), \ "Expected function or closure, got %s : %s" % (expr, expr.type) fn = expr.type.fn compiler = self.__class__(module_entry = False, **compiler_kwargs) compiled = compiler.compile_flat_source(fn, attributes = attributes, inline = inline) if compiled.sig not in self.extra_function_signatures: # add any declarations it depends on for decl in compiled.declarations: self.add_decl(decl) #add any external objects it wants to be linked against self.extra_objects.update(compiled.extra_objects) # first add the new function's dependencies for extra_sig in compiled.extra_function_signatures: if extra_sig not in self.extra_function_signatures: self.extra_function_signatures.append(extra_sig) self.extra_functions[extra_sig] = compiled.extra_functions[extra_sig] # now add the function itself self.extra_function_signatures.append(compiled.sig) self.extra_functions[compiled.sig] = compiled.src for link_flag in compiler.extra_link_flags: if link_flag not in self.extra_link_flags: self.extra_link_flags.append(link_flag) for compile_flag in compiler.extra_compile_flags: if compile_flag not in self.extra_compile_flags: self.extra_compile_flags.append(compile_flag) return compiled.name def get_closure_args(self, fn): if isinstance(fn.type, FnT): return [] else: assert isinstance(fn, Closure), "Expected closure, got %s : %s" % (fn, fn.type) return self.visit_expr_list(fn.args) def build_loops(self, loop_vars, bounds, body): if len(loop_vars) == 0: return body var = loop_vars[0] bound = bounds[0] nested = self.build_loops(loop_vars[1:], bounds[1:], body) return """ for (%s = 0; %s < %s; ++%s) { %s }""" % (var, var, bound, var, nested ) def visit_TypedFn(self, expr): return self.get_fn_name(expr) def visit_UntypedFn(self, expr): return "{}" def return_types(self, fn): if isinstance(fn.return_type, TupleT): return fn.return_type.elt_types elif isinstance(fn.return_type, NoneT): return [] else: # assert isinstance(fn.return_type, (PtrT, ScalarT)), "Unexpected return type %s" % fn.return_type return [fn.return_type] def visit_flat_fn(self, fn, return_by_ref = False, attributes = None, inline = True): if attributes is None: attributes = [] c_fn_name = names.refresh(fn.name).replace(".", "_") arg_types = [self.to_ctype(t) for t in fn.input_types] arg_names = [self.name(old_arg) for old_arg in fn.arg_names] return_types = self.return_types(fn) n_return = len(return_types) if n_return == 1: return_type = self.to_ctype(return_types[0]) self.return_void = (return_type == NoneType) self.return_by_ref = False elif n_return == 0: return_type = "void" self.return_void = True self.return_by_ref = False elif return_by_ref: return_type = "void" self.return_void = True self.return_by_ref = True self.return_var_types = [self.to_ctype(t) for t in return_types] self.return_var_names = [self.fresh_name("return_value%d" % i) for i in xrange(n_return)] arg_types = arg_types + ["%s*" % t for t in self.return_var_types] arg_names = arg_names + self.return_var_names else: return_type = self.struct_type_from_fields(return_types) self.return_void = False self.return_by_ref = False args_str = ", ".join("%s %s" % (t, name) for (t,name) in zip(arg_types,arg_names)) body_str = self.visit_block(fn.body) if inline: attributes = attributes + ["static inline"] attr_str = " ".join(attributes) sig = "%s %s(%s)" % (return_type, c_fn_name, args_str) src = "%s %s {\n\n%s}" % (attr_str, sig, body_str) return c_fn_name, sig, src @property def cache_key(self): """ If we ever need to differentiate compiled function by *how* they were compiled, we can use this cache key to track the class of the compiler or other relevant meta-data """ return self.__class__ _flat_compile_cache = {} def compile_flat_source(self, parakeet_fn, attributes = [], inline = True): # make sure compiled source uses consistent names for tuple and array types, # which both need declarations for their C struct representations struct_types = set(t for t in parakeet_fn.type_env.itervalues() if isinstance(t, (ArrayT, TupleT))) # include your own class in the cache key so that we get distinct code # for derived compilers like OpenMP and CUDA key = parakeet_fn.cache_key, frozenset(struct_types), self.cache_key, tuple(attributes) if key in self._flat_compile_cache: return self._flat_compile_cache[key] name, sig, src = self.visit_flat_fn(parakeet_fn, attributes = attributes, inline = inline) result = CompiledFlatFn( name = name, sig = sig, src = src, extra_objects = self.extra_objects, extra_functions = self.extra_functions, extra_function_signatures = self.extra_function_signatures, declarations = self.declarations) self._flat_compile_cache[key] = result return result
{ "content_hash": "49dc6d78aa252898077d42508bfb0b61", "timestamp": "", "source": "github", "line_count": 665, "max_line_length": 104, "avg_line_length": 35.51278195488722, "alnum_prop": 0.584815379403794, "repo_name": "pombredanne/parakeet", "id": "3d5005e3565f763a79633f7370623b7172f95b01", "size": "23616", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "parakeet/c_backend/fn_compiler.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "1008397" } ], "symlink_target": "" }
from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) import os import tempfile from abc import abstractmethod from contextlib import contextmanager from six import binary_type, string_types from twitter.common.collections import maybe_list from pants.backend.jvm.subsystems.jar_tool import JarTool from pants.backend.jvm.targets.java_agent import JavaAgent from pants.backend.jvm.targets.jvm_binary import Duplicate, JarRules, JvmBinary, Skip from pants.backend.jvm.tasks.nailgun_task import NailgunTask from pants.base.exceptions import TaskError from pants.binaries.binary_util import safe_args from pants.java.jar.manifest import Manifest from pants.util.contextutil import temporary_dir from pants.util.meta import AbstractClass class Jar(object): """Encapsulates operations to build up or update a jar file. Upon construction the jar is conceptually opened for writes. The write methods are called to add to the jar's contents and then changes are finalized with a call to close. If close is not called the staged changes will be lost. """ class Error(Exception): """Indicates an error creating or updating a jar on disk.""" class Entry(AbstractClass): """An entry to be written to a jar.""" def __init__(self, dest): self._dest = dest @property def dest(self): """The destination path of the entry in the jar.""" return self._dest @abstractmethod def materialize(self, scratch_dir): """Materialize this entry's source data into a filesystem path. :param string scratch_dir: A temporary directory that may be used to do any work required to materialize the entry as a source file. The caller is responsible for cleaning up `scratch_dir` after the jar is closed. :returns: The path to the source data. """ class FileSystemEntry(Entry): """An entry backed by an existing file on disk.""" def __init__(self, src, dest=None): super(Jar.FileSystemEntry, self).__init__(dest) self._src = src def materialize(self, _): return self._src class MemoryEntry(Entry): """An entry backed by an in-memory sequence of bytes.""" def __init__(self, dest, contents): super(Jar.MemoryEntry, self).__init__(dest) self._contents = contents def materialize(self, scratch_dir): fd, path = tempfile.mkstemp(dir=scratch_dir) try: os.write(fd, self._contents) finally: os.close(fd) return path def __init__(self): self._entries = [] self._jars = [] self._manifest_entry = None self._main = None self._classpath = None def main(self, main): """Specifies a Main-Class entry for this jar's manifest. :param string main: a fully qualified class name """ if not main or not isinstance(main, string_types): raise ValueError('The main entry must be a non-empty string') self._main = main def classpath(self, classpath): """Specifies a Class-Path entry for this jar's manifest. :param list classpath: a list of paths """ self._classpath = maybe_list(classpath) def write(self, src, dest=None): """Schedules a write of the file at ``src`` to the ``dest`` path in this jar. If the ``src`` is a file, then ``dest`` must be specified. If the ``src`` is a directory then by default all descendant files will be added to the jar as entries carrying their relative path. If ``dest`` is specified it will be prefixed to each descendant's relative path to form its jar entry path. :param string src: the path to the pre-existing source file or directory :param string dest: the path the source file or directory should have in this jar """ if not src or not isinstance(src, string_types): raise ValueError('The src path must be a non-empty string, got {} of type {}.'.format( src, type(src))) if dest and not isinstance(dest, string_types): raise ValueError('The dest entry path must be a non-empty string, got {} of type {}.'.format( dest, type(dest))) if not os.path.isdir(src) and not dest: raise self.Error('Source file {} must have a jar destination specified'.format(src)) self._add_entry(self.FileSystemEntry(src, dest)) def writestr(self, path, contents): """Schedules a write of the file ``contents`` to the given ``path`` in this jar. :param string path: the path to write the contents to in this jar :param string contents: the raw byte contents of the file to write to ``path`` """ if not path or not isinstance(path, string_types): raise ValueError('The path must be a non-empty string') if contents is None or not isinstance(contents, binary_type): raise ValueError('The contents must be a sequence of bytes') self._add_entry(self.MemoryEntry(path, contents)) def _add_entry(self, entry): if Manifest.PATH == entry.dest: self._manifest_entry = entry else: self._entries.append(entry) def writejar(self, jar): """Schedules all entries from the given ``jar``'s to be added to this jar save for the manifest. :param string jar: the path to the pre-existing jar to graft into this jar """ if not jar or not isinstance(jar, string_types): raise ValueError('The jar path must be a non-empty string') self._jars.append(jar) @contextmanager def _render_jar_tool_args(self, options): """Format the arguments to jar-tool. :param Options options: """ args = [] with temporary_dir() as manifest_stage_dir: classpath = self._classpath or [] def as_cli_entry(entry): src = entry.materialize(manifest_stage_dir) return '{}={}'.format(src, entry.dest) if entry.dest else src files = map(as_cli_entry, self._entries) if self._entries else [] jars = self._jars or [] with safe_args(classpath, options, delimiter=',') as classpath_args: with safe_args(files, options, delimiter=',') as files_args: with safe_args(jars, options, delimiter=',') as jars_args: # If you specify --manifest to jar-tool you cannot specify --main. if self._manifest_entry: manifest_file = self._manifest_entry.materialize(manifest_stage_dir) else: manifest_file = None if self._main and manifest_file: main_arg = None with open(manifest_file, 'a') as f: f.write("Main-Class: {}\n".format(self._main)) else: main_arg = self._main if main_arg: args.append('-main={}'.format(self._main)) if classpath_args: args.append('-classpath={}'.format(','.join(classpath_args))) if manifest_file: args.append('-manifest={}'.format(manifest_file)) if files_args: args.append('-files={}'.format(','.join(files_args))) if jars_args: args.append('-jars={}'.format(','.join(jars_args))) yield args class JarTask(NailgunTask): """A baseclass for tasks that need to create or update jars. All subclasses will share the same underlying nailgunned jar tool and thus benefit from fast invocations. """ @classmethod def global_subsystems(cls): return super(JarTask, cls).global_subsystems() + (JarTool, ) @staticmethod def _flag(bool_value): return 'true' if bool_value else 'false' _DUPLICATE_ACTION_TO_NAME = { Duplicate.SKIP: 'SKIP', Duplicate.REPLACE: 'REPLACE', Duplicate.CONCAT: 'CONCAT', Duplicate.FAIL: 'THROW', } @classmethod def _action_name(cls, action): name = cls._DUPLICATE_ACTION_TO_NAME.get(action) if name is None: raise ValueError('Unrecognized duplicate action: {}'.format(action)) return name def __init__(self, *args, **kwargs): super(JarTask, self).__init__(*args, **kwargs) self.set_distribution(jdk=True) # TODO(John Sirois): Consider poking a hole for custom jar-tool jvm args - namely for Xmx # control. @contextmanager def open_jar(self, path, overwrite=False, compressed=True, jar_rules=None): """Yields a Jar that will be written when the context exits. :param string path: the path to the jar file :param bool overwrite: overwrite the file at ``path`` if it exists; ``False`` by default; ie: update the pre-existing jar at ``path`` :param bool compressed: entries added to the jar should be compressed; ``True`` by default :param jar_rules: an optional set of rules for handling jar exclusions and duplicates """ jar = Jar() try: yield jar except jar.Error as e: raise TaskError('Failed to write to jar at {}: {}'.format(path, e)) with jar._render_jar_tool_args(self.get_options()) as args: if args: # Don't build an empty jar args.append('-update={}'.format(self._flag(not overwrite))) args.append('-compress={}'.format(self._flag(compressed))) jar_rules = jar_rules or JarRules.default() args.append('-default_action={}'.format(self._action_name(jar_rules.default_dup_action))) skip_patterns = [] duplicate_actions = [] for rule in jar_rules.rules: if isinstance(rule, Skip): skip_patterns.append(rule.apply_pattern) elif isinstance(rule, Duplicate): duplicate_actions.append('{}={}'.format( rule.apply_pattern.pattern, self._action_name(rule.action))) else: raise ValueError('Unrecognized rule: {}'.format(rule)) if skip_patterns: args.append('-skip={}'.format(','.join(p.pattern for p in skip_patterns))) if duplicate_actions: args.append('-policies={}'.format(','.join(duplicate_actions))) args.append(path) if JarTool.global_instance().run(context=self.context, runjava=self.runjava, args=args): raise TaskError('jar-tool failed') class JarBuilder(AbstractClass): """A utility to aid in adding the classes and resources associated with targets to a jar.""" @staticmethod def _add_agent_manifest(agent, manifest): # TODO(John Sirois): refactor an agent model to support 'Boot-Class-Path' properly. manifest.addentry(Manifest.MANIFEST_VERSION, '1.0') if agent.premain: manifest.addentry('Premain-Class', agent.premain) if agent.agent_class: manifest.addentry('Agent-Class', agent.agent_class) if agent.can_redefine: manifest.addentry('Can-Redefine-Classes', 'true') if agent.can_retransform: manifest.addentry('Can-Retransform-Classes', 'true') if agent.can_set_native_method_prefix: manifest.addentry('Can-Set-Native-Method-Prefix', 'true') @staticmethod def _add_manifest_entries(jvm_binary_target, manifest): """Add additional fields to MANIFEST.MF as declared in the ManifestEntries structure. :param JvmBinary jvm_binary_target: :param Manifest manifest: """ for header, value in jvm_binary_target.manifest_entries.entries.iteritems(): manifest.addentry(header, value) @staticmethod def prepare(round_manager): """Prepares the products needed to use `create_jar_builder`. This method should be called during task preparation to ensure the classes and resources needed for jarring targets are mapped by upstream tasks that generate these. Later, in execute context, the `create_jar_builder` method can be called to get back a prepared ``JarTask.JarBuilder`` ready for use. """ round_manager.require_data('resources_by_target') round_manager.require_data('classes_by_target') def __init__(self, context, jar): self._context = context self._jar = jar self._manifest = Manifest() def add_target(self, target, recursive=False): """Adds the classes and resources for a target to an open jar. :param target: The target to add generated classes and resources for. :param bool recursive: `True` to add classes and resources for the target's transitive internal dependency closure. :returns: The list of targets that actually contributed classes or resources or both to the jar. """ classes_by_target = self._context.products.get_data('classes_by_target') resources_by_target = self._context.products.get_data('resources_by_target') targets_added = [] def add_to_jar(tgt): target_classes = classes_by_target.get(tgt) target_resources = [] # TODO(pl): https://github.com/pantsbuild/pants/issues/206 resource_products_on_target = resources_by_target.get(tgt) if resource_products_on_target: target_resources.append(resource_products_on_target) if tgt.has_resources: target_resources.extend(resources_by_target.get(r) for r in tgt.resources) if target_classes or target_resources: targets_added.append(tgt) def add_products(target_products): if target_products: for root, products in target_products.rel_paths(): for prod in products: self._jar.write(os.path.join(root, prod), prod) add_products(target_classes) for resources_target in target_resources: add_products(resources_target) if isinstance(tgt, JavaAgent): self._add_agent_manifest(tgt, self._manifest) if isinstance(target, JvmBinary): self._add_manifest_entries(target, self._manifest) if recursive: target.walk(add_to_jar) else: add_to_jar(target) return targets_added def commit_manifest(self, jar): """Updates the manifest in the jar being written to. Typically done right before closing the .jar. This gives a chance for all targets to bundle in their contributions to the manifest. """ if not self._manifest.is_empty(): jar.writestr(Manifest.PATH, self._manifest.contents()) @contextmanager def create_jar_builder(self, jar): """Creates a ``JarTask.JarBuilder`` ready for use. This method should be called during in `execute` context and only after ensuring `JarTask.JarBuilder.prepare` has already been called in `prepare` context. :param jar: An opened ``pants.backend.jvm.tasks.jar_task.Jar`. """ builder = self.JarBuilder(self.context, jar) yield builder builder.commit_manifest(jar)
{ "content_hash": "d9179e36840d756e87b6cd1dc20b7e17", "timestamp": "", "source": "github", "line_count": 410, "max_line_length": 100, "avg_line_length": 35.83658536585366, "alnum_prop": 0.6560266793711291, "repo_name": "scode/pants", "id": "f1b3222a05eb8752da4e0a3a6e3920a06656aa42", "size": "14840", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "src/python/pants/backend/jvm/tasks/jar_task.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C++", "bytes": "767" }, { "name": "CSS", "bytes": "11139" }, { "name": "GAP", "bytes": "2459" }, { "name": "Go", "bytes": "1437" }, { "name": "HTML", "bytes": "69479" }, { "name": "Java", "bytes": "302900" }, { "name": "JavaScript", "bytes": "10157" }, { "name": "Protocol Buffer", "bytes": "3783" }, { "name": "Python", "bytes": "3788845" }, { "name": "Scala", "bytes": "76623" }, { "name": "Shell", "bytes": "49094" }, { "name": "Thrift", "bytes": "2583" } ], "symlink_target": "" }
from __future__ import absolute_import import mock import six import time from exam import fixture, patcher from sentry.quotas.redis import ( is_rate_limited, BasicRedisQuota, RedisQuota, ) from sentry.testutils import TestCase from sentry.utils.redis import clusters from six.moves import xrange def test_is_rate_limited_script(): now = int(time.time()) cluster = clusters.get('default') client = cluster.get_local_client(six.next(iter(cluster.hosts))) # The item should not be rate limited by either key. assert list(map(bool, is_rate_limited( client, ('foo', 'r:foo', 'bar', 'r:bar'), (1, now + 60, 2, now + 120))) ) == [False, False] # The item should be rate limited by the first key (1). assert list(map(bool, is_rate_limited( client, ('foo', 'r:foo', 'bar', 'r:bar'), (1, now + 60, 2, now + 120))) ) == [True, False] # The item should still be rate limited by the first key (1), but *not* # rate limited by the second key (2) even though this is the third time # we've checked the quotas. This ensures items that are rejected by a lower # quota don't affect unrelated items that share a parent quota. assert list(map(bool, is_rate_limited( client, ('foo', 'r:foo', 'bar', 'r:bar'), (1, now + 60, 2, now + 120))) ) == [True, False] assert client.get('foo') == '1' assert 59 <= client.ttl('foo') <= 60 assert client.get('bar') == '1' assert 119 <= client.ttl('bar') <= 120 # make sure "refund/negative" keys haven't been incremented assert client.get('r:foo') is None assert client.get('r:bar') is None # Test that refunded quotas work client.set('apple', 5) # increment is_rate_limited( client, ('orange', 'baz'), (1, now + 60) ) # test that it's rate limited without refund assert list(map(bool, is_rate_limited( client, ('orange', 'baz'), (1, now + 60) ))) == [True, ] # test that refund key is used assert list(map(bool, is_rate_limited( client, ('orange', 'apple'), (1, now + 60) ))) == [False, ] class RedisQuotaTest(TestCase): quota = fixture(RedisQuota) @patcher.object(RedisQuota, 'get_project_quota') def get_project_quota(self): inst = mock.MagicMock() inst.return_value = (0, 60) return inst @patcher.object(RedisQuota, 'get_organization_quota') def get_organization_quota(self): inst = mock.MagicMock() inst.return_value = (0, 60) return inst def test_uses_defined_quotas(self): self.get_project_quota.return_value = (200, 60) self.get_organization_quota.return_value = (300, 60) quotas = self.quota.get_quotas(self.project) assert quotas[0].key == 'p:{}'.format(self.project.id) assert quotas[0].limit == 200 assert quotas[0].window == 60 assert quotas[1].key == 'o:{}'.format(self.project.organization.id) assert quotas[1].limit == 300 assert quotas[1].window == 60 @mock.patch('sentry.quotas.redis.is_rate_limited') @mock.patch.object(RedisQuota, 'get_quotas', return_value=[]) def test_bails_immediately_without_any_quota(self, get_quotas, is_rate_limited): result = self.quota.is_rate_limited(self.project) assert not is_rate_limited.called assert not result.is_limited @mock.patch('sentry.quotas.redis.is_rate_limited', return_value=(False, False)) def test_is_not_limited_without_rejections(self, is_rate_limited): self.get_organization_quota.return_value = (100, 60) self.get_project_quota.return_value = (200, 60) assert not self.quota.is_rate_limited(self.project).is_limited @mock.patch('sentry.quotas.redis.is_rate_limited', return_value=(True, False)) def test_is_limited_on_rejections(self, is_rate_limited): self.get_organization_quota.return_value = (100, 60) self.get_project_quota.return_value = (200, 60) assert self.quota.is_rate_limited(self.project).is_limited @mock.patch.object(RedisQuota, 'get_quotas') @mock.patch('sentry.quotas.redis.is_rate_limited', return_value=(True, False)) def test_not_limited_without_enforce(self, mock_is_rate_limited, mock_get_quotas): mock_get_quotas.return_value = ( BasicRedisQuota( key='p:1', limit=1, window=1, reason_code='project_quota', enforce=False, ), BasicRedisQuota( key='p:2', limit=1, window=1, reason_code='project_quota', enforce=True, ), ) assert not self.quota.is_rate_limited(self.project).is_limited @mock.patch.object(RedisQuota, 'get_quotas') @mock.patch('sentry.quotas.redis.is_rate_limited', return_value=(True, True)) def test_limited_without_enforce(self, mock_is_rate_limited, mock_get_quotas): mock_get_quotas.return_value = ( BasicRedisQuota( key='p:1', limit=1, window=1, reason_code='project_quota', enforce=False, ), BasicRedisQuota( key='p:2', limit=1, window=1, reason_code='project_quota', enforce=True, ), ) assert self.quota.is_rate_limited(self.project).is_limited def test_get_usage(self): timestamp = time.time() self.get_project_quota.return_value = (200, 60) self.get_organization_quota.return_value = (300, 60) n = 10 for _ in xrange(n): self.quota.is_rate_limited(self.project, timestamp=timestamp) quotas = self.quota.get_quotas(self.project) assert self.quota.get_usage( self.project.organization_id, quotas + [ BasicRedisQuota( key='unlimited', limit=0, window=60, reason_code='unlimited', ), BasicRedisQuota( key='dummy', limit=10, window=60, reason_code='dummy', ), ], timestamp=timestamp, ) == [n for _ in quotas] + [None, 0] @mock.patch.object(RedisQuota, 'get_quotas') def test_refund(self, mock_get_quotas): timestamp = time.time() mock_get_quotas.return_value = ( BasicRedisQuota( key='p:1', limit=1, window=1, reason_code='project_quota', enforce=False, ), BasicRedisQuota( key='p:2', limit=1, window=1, reason_code='project_quota', enforce=True, ), ) self.quota.refund(self.project, timestamp=timestamp) client = self.quota.cluster.get_local_client_for_key( six.text_type(self.project.organization.pk) ) keys = client.keys('r:quota:p:?:*') assert len(keys) == 2 for key in keys: assert client.get(key) == '1' def test_get_usage_uses_refund(self): timestamp = time.time() self.get_project_quota.return_value = (200, 60) self.get_organization_quota.return_value = (300, 60) n = 10 for _ in xrange(n): self.quota.is_rate_limited(self.project, timestamp=timestamp) self.quota.refund(self.project, timestamp=timestamp) quotas = self.quota.get_quotas(self.project) assert self.quota.get_usage( self.project.organization_id, quotas + [ BasicRedisQuota( key='unlimited', limit=0, window=60, reason_code='unlimited', ), BasicRedisQuota( key='dummy', limit=10, window=60, reason_code='dummy', ), ], timestamp=timestamp, # the - 1 is because we refunded once ) == [n - 1 for _ in quotas] + [None, 0]
{ "content_hash": "cfcd178e3108ce44c2c5f340e3534dda", "timestamp": "", "source": "github", "line_count": 251, "max_line_length": 87, "avg_line_length": 33.64143426294821, "alnum_prop": 0.550213169114164, "repo_name": "looker/sentry", "id": "7beafece996560bf0614e94dbcad53e7497eaad6", "size": "8469", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "tests/sentry/quotas/redis/tests.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "289931" }, { "name": "HTML", "bytes": "241322" }, { "name": "JavaScript", "bytes": "3112298" }, { "name": "Lua", "bytes": "65795" }, { "name": "Makefile", "bytes": "7048" }, { "name": "Python", "bytes": "36341504" }, { "name": "Ruby", "bytes": "204" }, { "name": "Shell", "bytes": "5701" } ], "symlink_target": "" }
import string import sys HEADER = """\ // Copyright (c) 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // This file automatically generated by testing/generate_gmock_mutant.py. // DO NOT EDIT. #ifndef TESTING_GMOCK_MUTANT_H_ #define TESTING_GMOCK_MUTANT_H_ // The intention of this file is to make possible using GMock actions in // all of its syntactic beauty. Classes and helper functions can be used as // more generic variants of Task and Callback classes (see base/task.h) // Mutant supports both pre-bound arguments (like Task) and call-time // arguments (like Callback) - hence the name. :-) // // DispatchToMethod/Function supports two sets of arguments: pre-bound (P) and // call-time (C). The arguments as well as the return type are templatized. // DispatchToMethod/Function will also try to call the selected method or // function even if provided pre-bound arguments does not match exactly with // the function signature hence the X1, X2 ... XN parameters in CreateFunctor. // DispatchToMethod will try to invoke method that may not belong to the // object's class itself but to the object's class base class. // // Additionally you can bind the object at calltime by binding a pointer to // pointer to the object at creation time - before including this file you // have to #define GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING. // // TODO(stoyan): It's yet not clear to me should we use T& and T&* instead // of T* and T** when we invoke CreateFunctor to match the EXPECT_CALL style. // // // Sample usage with gMock: // // struct Mock : public ObjectDelegate { // MOCK_METHOD2(string, OnRequest(int n, const string& request)); // MOCK_METHOD1(void, OnQuit(int exit_code)); // MOCK_METHOD2(void, LogMessage(int level, const string& message)); // // string HandleFlowers(const string& reply, int n, const string& request) { // string result = SStringPrintf("In request of %d %s ", n, request); // for (int i = 0; i < n; ++i) result.append(reply) // return result; // } // // void DoLogMessage(int level, const string& message) { // } // // void QuitMessageLoop(int seconds) { // base::MessageLoop* loop = base::MessageLoop::current(); // loop->PostDelayedTask(FROM_HERE, base::MessageLoop::QuitClosure(), // 1000 * seconds); // } // }; // // Mock mock; // // Will invoke mock.HandleFlowers("orchids", n, request) // // "orchids" is a pre-bound argument, and <n> and <request> are call-time // // arguments - they are not known until the OnRequest mock is invoked. // EXPECT_CALL(mock, OnRequest(Ge(5), StartsWith("flower")) // .Times(1) // .WillOnce(Invoke(CreateFunctor(&mock, &Mock::HandleFlowers, // string("orchids")))); // // // // No pre-bound arguments, two call-time arguments passed // // directly to DoLogMessage // EXPECT_CALL(mock, OnLogMessage(_, _)) // .Times(AnyNumber()) // .WillAlways(Invoke(CreateFunctor, &mock, &Mock::DoLogMessage)); // // // // In this case we have a single pre-bound argument - 3. We ignore // // all of the arguments of OnQuit. // EXCEPT_CALL(mock, OnQuit(_)) // .Times(1) // .WillOnce(InvokeWithoutArgs(CreateFunctor( // &mock, &Mock::QuitMessageLoop, 3))); // // MessageLoop loop; // loop.Run(); // // // // Here is another example of how we can set an action that invokes // // method of an object that is not yet created. // struct Mock : public ObjectDelegate { // MOCK_METHOD1(void, DemiurgeCreated(Demiurge*)); // MOCK_METHOD2(void, OnRequest(int count, const string&)); // // void StoreDemiurge(Demiurge* w) { // demiurge_ = w; // } // // Demiurge* demiurge; // } // // EXPECT_CALL(mock, DemiurgeCreated(_)).Times(1) // .WillOnce(Invoke(CreateFunctor(&mock, &Mock::StoreDemiurge))); // // EXPECT_CALL(mock, OnRequest(_, StrEq("Moby Dick"))) // .Times(AnyNumber()) // .WillAlways(WithArgs<0>(Invoke( // CreateFunctor(&mock->demiurge_, &Demiurge::DecreaseMonsters)))); // #include "base/memory/linked_ptr.h" #include "base/tuple.h" namespace testing {""" MUTANT = """\ // Interface that is exposed to the consumer, that does the actual calling // of the method. template <typename R, typename Params> class MutantRunner { public: virtual R RunWithParams(const Params& params) = 0; virtual ~MutantRunner() {} }; // Mutant holds pre-bound arguments (like Task). Like Callback // allows call-time arguments. You bind a pointer to the object // at creation time. template <typename R, typename T, typename Method, typename PreBound, typename Params> class Mutant : public MutantRunner<R, Params> { public: Mutant(T* obj, Method method, const PreBound& pb) : obj_(obj), method_(method), pb_(pb) { } // MutantRunner implementation virtual R RunWithParams(const Params& params) { return DispatchToMethod<R>(this->obj_, this->method_, pb_, params); } T* obj_; Method method_; PreBound pb_; }; template <typename R, typename Function, typename PreBound, typename Params> class MutantFunction : public MutantRunner<R, Params> { public: MutantFunction(Function function, const PreBound& pb) : function_(function), pb_(pb) { } // MutantRunner implementation virtual R RunWithParams(const Params& params) { return DispatchToFunction<R>(function_, pb_, params); } Function function_; PreBound pb_; }; #ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING // MutantLateBind is like Mutant, but you bind a pointer to a pointer // to the object. This way you can create actions for an object // that is not yet created (has only storage for a pointer to it). template <typename R, typename T, typename Method, typename PreBound, typename Params> class MutantLateObjectBind : public MutantRunner<R, Params> { public: MutantLateObjectBind(T** obj, Method method, const PreBound& pb) : obj_(obj), method_(method), pb_(pb) { } // MutantRunner implementation. virtual R RunWithParams(const Params& params) { EXPECT_THAT(*this->obj_, testing::NotNull()); if (NULL == *this->obj_) return R(); return DispatchToMethod<R>( *this->obj_, this->method_, pb_, params); } T** obj_; Method method_; PreBound pb_; }; #endif // Simple MutantRunner<> wrapper acting as a functor. // Redirects operator() to MutantRunner<Params>::Run() template <typename R, typename Params> struct MutantFunctor { explicit MutantFunctor(MutantRunner<R, Params>* cb) : impl_(cb) { } ~MutantFunctor() { } inline R operator()() { return impl_->RunWithParams(base::Tuple<>()); } template <typename Arg1> inline R operator()(const Arg1& a) { return impl_->RunWithParams(Params(a)); } template <typename Arg1, typename Arg2> inline R operator()(const Arg1& a, const Arg2& b) { return impl_->RunWithParams(Params(a, b)); } template <typename Arg1, typename Arg2, typename Arg3> inline R operator()(const Arg1& a, const Arg2& b, const Arg3& c) { return impl_->RunWithParams(Params(a, b, c)); } template <typename Arg1, typename Arg2, typename Arg3, typename Arg4> inline R operator()(const Arg1& a, const Arg2& b, const Arg3& c, const Arg4& d) { return impl_->RunWithParams(Params(a, b, c, d)); } private: // We need copy constructor since MutantFunctor is copied few times // inside GMock machinery, hence no DISALLOW_EVIL_CONTRUCTORS MutantFunctor(); linked_ptr<MutantRunner<R, Params> > impl_; }; """ FOOTER = """\ } // namespace testing #endif // TESTING_GMOCK_MUTANT_H_""" # Templates for DispatchToMethod/DispatchToFunction functions. # template_params - typename P1, typename P2.. typename C1.. # prebound - Tuple<P1, .. PN> # calltime - Tuple<C1, .. CN> # args - p.a, p.b.., c.a, c.b.. DISPATCH_TO_METHOD_TEMPLATE = """\ template <typename R, typename T, typename Method, %(template_params)s> inline R DispatchToMethod(T* obj, Method method, const %(prebound)s& p, const %(calltime)s& c) { return (obj->*method)(%(args)s); } """ DISPATCH_TO_FUNCTION_TEMPLATE = """\ template <typename R, typename Function, %(template_params)s> inline R DispatchToFunction(Function function, const %(prebound)s& p, const %(calltime)s& c) { return (*function)(%(args)s); } """ # Templates for CreateFunctor functions. # template_params - typename P1, typename P2.. typename C1.. typename X1.. # prebound - Tuple<P1, .. PN> # calltime - Tuple<A1, .. AN> # params - X1,.. , A1, .. # args - const P1& p1 .. # call_args - p1, p2, p3.. CREATE_METHOD_FUNCTOR_TEMPLATE = """\ template <typename R, typename T, typename U, %(template_params)s> inline MutantFunctor<R, %(calltime)s> CreateFunctor(T* obj, R (U::*method)(%(params)s), %(args)s) { MutantRunner<R, %(calltime)s>* t = new Mutant<R, T, R (U::*)(%(params)s), %(prebound)s, %(calltime)s> (obj, method, base::MakeTuple(%(call_args)s)); return MutantFunctor<R, %(calltime)s>(t); } """ CREATE_FUNCTION_FUNCTOR_TEMPLATE = """\ template <typename R, %(template_params)s> inline MutantFunctor<R, %(calltime)s> CreateFunctor(R (*function)(%(params)s), %(args)s) { MutantRunner<R, %(calltime)s>* t = new MutantFunction<R, R (*)(%(params)s), %(prebound)s, %(calltime)s> (function, base::MakeTuple(%(call_args)s)); return MutantFunctor<R, %(calltime)s>(t); } """ def SplitLine(line, width): """Splits a single line at comma, at most |width| characters long.""" if len(line) <= width: return (line, None) n = 1 + line[:width].rfind(",") if n == 0: # If comma cannot be found give up and return the entire line. return (line, None) # Assume there is a space after the comma assert line[n] == " " return (line[:n], line[n + 1:]) def Wrap(s, width, subsequent_offset): """Wraps a single line |s| at commas so every line is at most |width| characters long. """ w = [] spaces = " " * subsequent_offset while s: (f, s) = SplitLine(s, width) w.append(f) if s: s = spaces + s return "\n".join(w) def Clean(s): """Cleans artifacts from generated C++ code. Our simple string formatting/concatenation may introduce extra commas. """ s = s.replace(", >", ">") s = s.replace(", )", ")") return s def ExpandPattern(pattern, it): """Return list of expanded pattern strings. Each string is created by replacing all '%' in |pattern| with element of |it|. """ return [pattern.replace("%", x) for x in it] def Gen(pattern, n, start): """Expands pattern replacing '%' with sequential integers starting with start. Expanded patterns will be joined with comma separator. Gen("X%", 3, 1) will return "X1, X2, X3". """ it = string.hexdigits[start:n + start] return ", ".join(ExpandPattern(pattern, it)) def Merge(a): return ", ".join(filter(len, a)) def GenTuple(pattern, n): return Clean("base::Tuple<%s>" % (Gen(pattern, n, 1))) def FixCode(s): lines = Clean(s).splitlines() # Wrap sometimes very long 1st line to be inside the "template <" lines[0] = Wrap(lines[0], 80, 10) # Wrap all subsequent lines to 6 spaces arbitrarily. This is a 2-space line # indent, plus a 4 space continuation indent. for line in xrange(1, len(lines)): lines[line] = Wrap(lines[line], 80, 6) return "\n".join(lines) def GenerateDispatch(prebound, calltime): print "\n// %d - %d" % (prebound, calltime) args = { "template_params": Merge([Gen("typename P%", prebound, 1), Gen("typename C%", calltime, 1)]), "prebound": GenTuple("P%", prebound), "calltime": GenTuple("C%", calltime), "args": Merge([Gen("base::get<%>(p)", prebound, 0), Gen("base::get<%>(c)", calltime, 0)]), } print FixCode(DISPATCH_TO_METHOD_TEMPLATE % args) print FixCode(DISPATCH_TO_FUNCTION_TEMPLATE % args) def GenerateCreateFunctor(prebound, calltime): print "// %d - %d" % (prebound, calltime) args = { "calltime": GenTuple("A%", calltime), "prebound": GenTuple("P%", prebound), "params": Merge([Gen("X%", prebound, 1), Gen("A%", calltime, 1)]), "args": Gen("const P%& p%", prebound, 1), "call_args": Gen("p%", prebound, 1), "template_params": Merge([Gen("typename P%", prebound, 1), Gen("typename A%", calltime, 1), Gen("typename X%", prebound, 1)]) } mutant = FixCode(CREATE_METHOD_FUNCTOR_TEMPLATE % args) print mutant # Slightly different version for free function call. print "\n", FixCode(CREATE_FUNCTION_FUNCTOR_TEMPLATE % args) # Functor with pointer to a pointer of the object. print "\n#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING" mutant2 = mutant.replace("CreateFunctor(T* obj,", "CreateFunctor(T** obj,") mutant2 = mutant2.replace("new Mutant", "new MutantLateObjectBind") mutant2 = mutant2.replace(" " * 17 + "Tuple", " " * 31 + "Tuple") print mutant2 print "#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING\n" # OS_WIN specific. Same functors but with stdcall calling conventions. # These are not for WIN64 (x86_64) because there is only one calling # convention in WIN64. # Functor for method with __stdcall calling conventions. print "#if defined (OS_WIN) && !defined (ARCH_CPU_X86_64)" stdcall_method = CREATE_METHOD_FUNCTOR_TEMPLATE stdcall_method = stdcall_method.replace("U::", "__stdcall U::") stdcall_method = FixCode(stdcall_method % args) print stdcall_method # Functor for free function with __stdcall calling conventions. stdcall_function = CREATE_FUNCTION_FUNCTOR_TEMPLATE stdcall_function = stdcall_function.replace("R (*", "R (__stdcall *") print "\n", FixCode(stdcall_function % args) print "#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING" stdcall2 = stdcall_method stdcall2 = stdcall2.replace("CreateFunctor(T* obj,", "CreateFunctor(T** obj,") stdcall2 = stdcall2.replace("new Mutant", "new MutantLateObjectBind") stdcall2 = stdcall2.replace(" " * 17 + "Tuple", " " * 31 + "Tuple") print stdcall2 print "#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING" print "#endif // defined (OS_WIN) && !defined (ARCH_CPU_X86_64)\n" def main(): print HEADER for prebound in xrange(0, 6 + 1): for args in xrange(0, 6 + 1): GenerateDispatch(prebound, args) print MUTANT for prebound in xrange(0, 6 + 1): for args in xrange(0, 6 + 1): GenerateCreateFunctor(prebound, args) print FOOTER return 0 if __name__ == "__main__": sys.exit(main())
{ "content_hash": "7397fa7af3c3b1a1144dcec39099f6d0", "timestamp": "", "source": "github", "line_count": 445, "max_line_length": 80, "avg_line_length": 33.05842696629213, "alnum_prop": 0.6557677928081028, "repo_name": "afandria/mojo", "id": "9c5678c2f296df5beada9c5f2ecefee48635fc91", "size": "14900", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "testing/generate_gmock_mutant.py", "mode": "33261", "license": "bsd-3-clause", "language": [ { "name": "C", "bytes": "2031492" }, { "name": "C++", "bytes": "24525248" }, { "name": "Dart", "bytes": "354436" }, { "name": "Go", "bytes": "183671" }, { "name": "Groff", "bytes": "2494" }, { "name": "Java", "bytes": "1244656" }, { "name": "JavaScript", "bytes": "208100" }, { "name": "Makefile", "bytes": "402" }, { "name": "Objective-C", "bytes": "82678" }, { "name": "Objective-C++", "bytes": "389484" }, { "name": "Protocol Buffer", "bytes": "1048" }, { "name": "Python", "bytes": "3524970" }, { "name": "Shell", "bytes": "148167" }, { "name": "nesC", "bytes": "18347" } ], "symlink_target": "" }
"""Unit tests for compute API.""" import contextlib import copy import datetime import iso8601 import mock from mox3 import mox from oslo.utils import timeutils from nova.compute import api as compute_api from nova.compute import arch from nova.compute import cells_api as compute_cells_api from nova.compute import delete_types from nova.compute import flavors from nova.compute import instance_actions from nova.compute import task_states from nova.compute import utils as compute_utils from nova.compute import vm_mode from nova.compute import vm_states from nova import context from nova import db from nova import exception from nova import objects from nova.objects import base as obj_base from nova.objects import quotas as quotas_obj from nova.openstack.common import uuidutils from nova import quota from nova import test from nova.tests.unit import fake_block_device from nova.tests.unit import fake_instance from nova.tests.unit.image import fake as fake_image from nova.tests.unit import matchers from nova.tests.unit.objects import test_flavor from nova.tests.unit.objects import test_migration from nova.tests.unit.objects import test_service from nova.volume import cinder FAKE_IMAGE_REF = 'fake-image-ref' NODENAME = 'fakenode1' SHELVED_IMAGE = 'fake-shelved-image' SHELVED_IMAGE_NOT_FOUND = 'fake-shelved-image-notfound' SHELVED_IMAGE_NOT_AUTHORIZED = 'fake-shelved-image-not-authorized' SHELVED_IMAGE_EXCEPTION = 'fake-shelved-image-exception' class _ComputeAPIUnitTestMixIn(object): def setUp(self): super(_ComputeAPIUnitTestMixIn, self).setUp() self.user_id = 'fake' self.project_id = 'fake' self.context = context.RequestContext(self.user_id, self.project_id) def _get_vm_states(self, exclude_states=None): vm_state = set([vm_states.ACTIVE, vm_states.BUILDING, vm_states.PAUSED, vm_states.SUSPENDED, vm_states.RESCUED, vm_states.STOPPED, vm_states.RESIZED, vm_states.SOFT_DELETED, vm_states.DELETED, vm_states.ERROR, vm_states.SHELVED, vm_states.SHELVED_OFFLOADED]) if not exclude_states: exclude_states = set() return vm_state - exclude_states def _create_flavor(self, **updates): flavor = {'id': 1, 'flavorid': 1, 'name': 'm1.tiny', 'memory_mb': 512, 'vcpus': 1, 'vcpu_weight': None, 'root_gb': 1, 'ephemeral_gb': 0, 'rxtx_factor': 1, 'swap': 0, 'deleted': 0, 'disabled': False, 'is_public': True, 'deleted_at': None, 'created_at': datetime.datetime(2012, 1, 19, 18, 49, 30, 877329), 'updated_at': None, } if updates: flavor.update(updates) return objects.Flavor._from_db_object(self.context, objects.Flavor(), flavor) def _create_instance_obj(self, params=None, flavor=None): """Create a test instance.""" if not params: params = {} if flavor is None: flavor = self._create_flavor() def make_fake_sys_meta(): sys_meta = params.pop("system_metadata", {}) for key in flavors.system_metadata_flavor_props: sys_meta['instance_type_%s' % key] = flavor[key] return sys_meta now = timeutils.utcnow() instance = objects.Instance() instance.metadata = {} instance.metadata.update(params.pop('metadata', {})) instance.system_metadata = make_fake_sys_meta() instance.system_metadata.update(params.pop('system_metadata', {})) instance._context = self.context instance.id = 1 instance.uuid = uuidutils.generate_uuid() instance.cell_name = 'api!child' instance.vm_state = vm_states.ACTIVE instance.task_state = None instance.image_ref = FAKE_IMAGE_REF instance.reservation_id = 'r-fakeres' instance.user_id = self.user_id instance.project_id = self.project_id instance.host = 'fake_host' instance.node = NODENAME instance.instance_type_id = flavor.id instance.ami_launch_index = 0 instance.memory_mb = 0 instance.vcpus = 0 instance.root_gb = 0 instance.ephemeral_gb = 0 instance.architecture = arch.X86_64 instance.os_type = 'Linux' instance.locked = False instance.created_at = now instance.updated_at = now instance.launched_at = now instance.disable_terminate = False instance.info_cache = objects.InstanceInfoCache() if params: instance.update(params) instance.obj_reset_changes() return instance def test_create_quota_exceeded_messages(self): image_href = "image_href" image_id = 0 instance_type = self._create_flavor() self.mox.StubOutWithMock(self.compute_api, "_get_image") self.mox.StubOutWithMock(quota.QUOTAS, "limit_check") self.mox.StubOutWithMock(quota.QUOTAS, "reserve") quotas = {'instances': 1, 'cores': 1, 'ram': 1} usages = dict((r, {'in_use': 1, 'reserved': 1}) for r in ['instances', 'cores', 'ram']) quota_exception = exception.OverQuota(quotas=quotas, usages=usages, overs=['instances']) for _unused in range(2): self.compute_api._get_image(self.context, image_href).AndReturn( (image_id, {})) quota.QUOTAS.limit_check(self.context, metadata_items=mox.IsA(int), project_id=mox.IgnoreArg(), user_id=mox.IgnoreArg()) quota.QUOTAS.reserve(self.context, instances=40, cores=mox.IsA(int), expire=mox.IgnoreArg(), project_id=mox.IgnoreArg(), user_id=mox.IgnoreArg(), ram=mox.IsA(int)).AndRaise(quota_exception) self.mox.ReplayAll() for min_count, message in [(20, '20-40'), (40, '40')]: try: self.compute_api.create(self.context, instance_type, "image_href", min_count=min_count, max_count=40) except exception.TooManyInstances as e: self.assertEqual(message, e.kwargs['req']) else: self.fail("Exception not raised") def test_specified_port_and_multiple_instances_neutronv2(self): # Tests that if port is specified there is only one instance booting # (i.e max_count == 1) as we can't share the same port across multiple # instances. self.flags(network_api_class='nova.network.neutronv2.api.API') port = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' address = '10.0.0.1' min_count = 1 max_count = 2 requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(address=address, port_id=port)]) self.assertRaises(exception.MultiplePortsNotApplicable, self.compute_api.create, self.context, 'fake_flavor', 'image_id', min_count=min_count, max_count=max_count, requested_networks=requested_networks) def _test_specified_ip_and_multiple_instances_helper(self, requested_networks): # Tests that if ip is specified there is only one instance booting # (i.e max_count == 1) min_count = 1 max_count = 2 self.assertRaises(exception.InvalidFixedIpAndMaxCountRequest, self.compute_api.create, self.context, "fake_flavor", 'image_id', min_count=min_count, max_count=max_count, requested_networks=requested_networks) def test_specified_ip_and_multiple_instances(self): network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' address = '10.0.0.1' requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id=network, address=address)]) self._test_specified_ip_and_multiple_instances_helper( requested_networks) def test_specified_ip_and_multiple_instances_neutronv2(self): self.flags(network_api_class='nova.network.neutronv2.api.API') network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' address = '10.0.0.1' requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id=network, address=address)]) self._test_specified_ip_and_multiple_instances_helper( requested_networks) def test_suspend(self): # Ensure instance can be suspended. instance = self._create_instance_obj() self.assertEqual(instance.vm_state, vm_states.ACTIVE) self.assertIsNone(instance.task_state) self.mox.StubOutWithMock(instance, 'save') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') if self.cell_type == 'api': rpcapi = self.compute_api.cells_rpcapi else: rpcapi = self.compute_api.compute_rpcapi self.mox.StubOutWithMock(rpcapi, 'suspend_instance') instance.save(expected_task_state=[None]) self.compute_api._record_action_start(self.context, instance, instance_actions.SUSPEND) rpcapi.suspend_instance(self.context, instance) self.mox.ReplayAll() self.compute_api.suspend(self.context, instance) self.assertEqual(vm_states.ACTIVE, instance.vm_state) self.assertEqual(task_states.SUSPENDING, instance.task_state) def _test_suspend_fails(self, vm_state): params = dict(vm_state=vm_state) instance = self._create_instance_obj(params=params) self.assertIsNone(instance.task_state) self.assertRaises(exception.InstanceInvalidState, self.compute_api.suspend, self.context, instance) def test_suspend_fails_invalid_states(self): invalid_vm_states = self._get_vm_states(set([vm_states.ACTIVE])) for state in invalid_vm_states: self._test_suspend_fails(state) def test_resume(self): # Ensure instance can be resumed (if suspended). instance = self._create_instance_obj( params=dict(vm_state=vm_states.SUSPENDED)) self.assertEqual(instance.vm_state, vm_states.SUSPENDED) self.assertIsNone(instance.task_state) self.mox.StubOutWithMock(instance, 'save') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') if self.cell_type == 'api': rpcapi = self.compute_api.cells_rpcapi else: rpcapi = self.compute_api.compute_rpcapi self.mox.StubOutWithMock(rpcapi, 'resume_instance') instance.save(expected_task_state=[None]) self.compute_api._record_action_start(self.context, instance, instance_actions.RESUME) rpcapi.resume_instance(self.context, instance) self.mox.ReplayAll() self.compute_api.resume(self.context, instance) self.assertEqual(vm_states.SUSPENDED, instance.vm_state) self.assertEqual(task_states.RESUMING, instance.task_state) def test_start(self): params = dict(vm_state=vm_states.STOPPED) instance = self._create_instance_obj(params=params) self.mox.StubOutWithMock(instance, 'save') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') instance.save(expected_task_state=[None]) self.compute_api._record_action_start(self.context, instance, instance_actions.START) if self.cell_type == 'api': rpcapi = self.compute_api.cells_rpcapi else: rpcapi = self.compute_api.compute_rpcapi self.mox.StubOutWithMock(rpcapi, 'start_instance') rpcapi.start_instance(self.context, instance) self.mox.ReplayAll() self.compute_api.start(self.context, instance) self.assertEqual(task_states.POWERING_ON, instance.task_state) def test_start_invalid_state(self): instance = self._create_instance_obj() self.assertEqual(instance.vm_state, vm_states.ACTIVE) self.assertRaises(exception.InstanceInvalidState, self.compute_api.start, self.context, instance) def test_start_no_host(self): params = dict(vm_state=vm_states.STOPPED, host='') instance = self._create_instance_obj(params=params) self.assertRaises(exception.InstanceNotReady, self.compute_api.start, self.context, instance) def _test_stop(self, vm_state, force=False, clean_shutdown=True): # Make sure 'progress' gets reset params = dict(task_state=None, progress=99, vm_state=vm_state) instance = self._create_instance_obj(params=params) self.mox.StubOutWithMock(instance, 'save') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') instance.save(expected_task_state=[None]) self.compute_api._record_action_start(self.context, instance, instance_actions.STOP) if self.cell_type == 'api': rpcapi = self.compute_api.cells_rpcapi else: rpcapi = self.compute_api.compute_rpcapi self.mox.StubOutWithMock(rpcapi, 'stop_instance') rpcapi.stop_instance(self.context, instance, do_cast=True, clean_shutdown=clean_shutdown) self.mox.ReplayAll() if force: self.compute_api.force_stop(self.context, instance, clean_shutdown=clean_shutdown) else: self.compute_api.stop(self.context, instance, clean_shutdown=clean_shutdown) self.assertEqual(task_states.POWERING_OFF, instance.task_state) self.assertEqual(0, instance.progress) def test_stop(self): self._test_stop(vm_states.ACTIVE) def test_stop_stopped_instance_with_bypass(self): self._test_stop(vm_states.STOPPED, force=True) def test_stop_forced_shutdown(self): self._test_stop(vm_states.ACTIVE, force=True) def test_stop_without_clean_shutdown(self): self._test_stop(vm_states.ACTIVE, clean_shutdown=False) def test_stop_forced_without_clean_shutdown(self): self._test_stop(vm_states.ACTIVE, force=True, clean_shutdown=False) def _test_stop_invalid_state(self, vm_state): params = dict(vm_state=vm_state) instance = self._create_instance_obj(params=params) self.assertRaises(exception.InstanceInvalidState, self.compute_api.stop, self.context, instance) def test_stop_fails_invalid_states(self): invalid_vm_states = self._get_vm_states(set([vm_states.ACTIVE, vm_states.ERROR])) for state in invalid_vm_states: self._test_stop_invalid_state(state) def test_stop_a_stopped_inst(self): params = {'vm_state': vm_states.STOPPED} instance = self._create_instance_obj(params=params) self.assertRaises(exception.InstanceInvalidState, self.compute_api.stop, self.context, instance) def test_stop_no_host(self): params = {'host': ''} instance = self._create_instance_obj(params=params) self.assertRaises(exception.InstanceNotReady, self.compute_api.stop, self.context, instance) def _test_shelve(self, vm_state=vm_states.ACTIVE, boot_from_volume=False, clean_shutdown=True): params = dict(task_state=None, vm_state=vm_state, display_name='fake-name') instance = self._create_instance_obj(params=params) with contextlib.nested( mock.patch.object(self.compute_api, 'is_volume_backed_instance', return_value=boot_from_volume), mock.patch.object(self.compute_api, '_create_image', return_value=dict(id='fake-image-id')), mock.patch.object(instance, 'save'), mock.patch.object(self.compute_api, '_record_action_start'), mock.patch.object(self.compute_api.compute_rpcapi, 'shelve_instance'), mock.patch.object(self.compute_api.compute_rpcapi, 'shelve_offload_instance') ) as ( volume_backed_inst, create_image, instance_save, record_action_start, rpcapi_shelve_instance, rpcapi_shelve_offload_instance ): self.compute_api.shelve(self.context, instance, clean_shutdown=clean_shutdown) # assert field values set on the instance object self.assertEqual(task_states.SHELVING, instance.task_state) # assert our mock calls volume_backed_inst.assert_called_once_with( self.context, instance) instance_save.assert_called_once_with(expected_task_state=[None]) record_action_start.assert_called_once_with( self.context, instance, instance_actions.SHELVE) if boot_from_volume: rpcapi_shelve_offload_instance.assert_called_once_with( self.context, instance=instance, clean_shutdown=clean_shutdown) else: rpcapi_shelve_instance.assert_called_once_with( self.context, instance=instance, image_id='fake-image-id', clean_shutdown=clean_shutdown) def test_shelve(self): self._test_shelve() def test_shelve_stopped(self): self._test_shelve(vm_state=vm_states.STOPPED) def test_shelve_paused(self): self._test_shelve(vm_state=vm_states.PAUSED) def test_shelve_suspended(self): self._test_shelve(vm_state=vm_states.SUSPENDED) def test_shelve_boot_from_volume(self): self._test_shelve(boot_from_volume=True) def test_shelve_forced_shutdown(self): self._test_shelve(clean_shutdown=False) def test_shelve_boot_from_volume_forced_shutdown(self): self._test_shelve(boot_from_volume=True, clean_shutdown=False) def _test_shelve_invalid_state(self, vm_state): params = dict(vm_state=vm_state) instance = self._create_instance_obj(params=params) self.assertRaises(exception.InstanceInvalidState, self.compute_api.shelve, self.context, instance) def test_shelve_fails_invalid_states(self): invalid_vm_states = self._get_vm_states(set([vm_states.ACTIVE, vm_states.STOPPED, vm_states.PAUSED, vm_states.SUSPENDED])) for state in invalid_vm_states: self._test_shelve_invalid_state(state) def _test_shelve_offload(self, clean_shutdown=True): params = dict(task_state=None, vm_state=vm_states.SHELVED) instance = self._create_instance_obj(params=params) with contextlib.nested( mock.patch.object(instance, 'save'), mock.patch.object(self.compute_api.compute_rpcapi, 'shelve_offload_instance') ) as ( instance_save, rpcapi_shelve_offload_instance ): self.compute_api.shelve_offload(self.context, instance, clean_shutdown=clean_shutdown) # assert field values set on the instance object self.assertEqual(task_states.SHELVING_OFFLOADING, instance.task_state) instance_save.assert_called_once_with(expected_task_state=[None]) rpcapi_shelve_offload_instance.assert_called_once_with( self.context, instance=instance, clean_shutdown=clean_shutdown) def test_shelve_offload(self): self._test_shelve_offload() def test_shelve_offload_forced_shutdown(self): self._test_shelve_offload(clean_shutdown=False) def _test_shelve_offload_invalid_state(self, vm_state): params = dict(vm_state=vm_state) instance = self._create_instance_obj(params=params) self.assertRaises(exception.InstanceInvalidState, self.compute_api.shelve_offload, self.context, instance) def test_shelve_offload_fails_invalid_states(self): invalid_vm_states = self._get_vm_states(set([vm_states.SHELVED])) for state in invalid_vm_states: self._test_shelve_offload_invalid_state(state) def _test_reboot_type(self, vm_state, reboot_type, task_state=None): # Ensure instance can be soft rebooted. inst = self._create_instance_obj() inst.vm_state = vm_state inst.task_state = task_state self.mox.StubOutWithMock(self.context, 'elevated') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') self.mox.StubOutWithMock(self.compute_api, 'update') self.mox.StubOutWithMock(inst, 'save') expected_task_state = [None] if reboot_type == 'HARD': expected_task_state.extend([task_states.REBOOTING, task_states.REBOOT_PENDING, task_states.REBOOT_STARTED, task_states.REBOOTING_HARD, task_states.RESUMING, task_states.UNPAUSING, task_states.SUSPENDING]) inst.save(expected_task_state=expected_task_state) self.compute_api._record_action_start(self.context, inst, instance_actions.REBOOT) if self.cell_type == 'api': rpcapi = self.compute_api.cells_rpcapi else: rpcapi = self.compute_api.compute_rpcapi self.mox.StubOutWithMock(rpcapi, 'reboot_instance') rpcapi.reboot_instance(self.context, instance=inst, block_device_info=None, reboot_type=reboot_type) self.mox.ReplayAll() self.compute_api.reboot(self.context, inst, reboot_type) def _test_reboot_type_fails(self, reboot_type, **updates): inst = self._create_instance_obj() inst.update(updates) self.assertRaises(exception.InstanceInvalidState, self.compute_api.reboot, self.context, inst, reboot_type) def test_reboot_hard_active(self): self._test_reboot_type(vm_states.ACTIVE, 'HARD') def test_reboot_hard_error(self): self._test_reboot_type(vm_states.ERROR, 'HARD') def test_reboot_hard_rebooting(self): self._test_reboot_type(vm_states.ACTIVE, 'HARD', task_state=task_states.REBOOTING) def test_reboot_hard_reboot_started(self): self._test_reboot_type(vm_states.ACTIVE, 'HARD', task_state=task_states.REBOOT_STARTED) def test_reboot_hard_reboot_pending(self): self._test_reboot_type(vm_states.ACTIVE, 'HARD', task_state=task_states.REBOOT_PENDING) def test_reboot_hard_rescued(self): self._test_reboot_type_fails('HARD', vm_state=vm_states.RESCUED) def test_reboot_hard_resuming(self): self._test_reboot_type(vm_states.ACTIVE, 'HARD', task_state=task_states.RESUMING) def test_reboot_hard_pausing(self): self._test_reboot_type(vm_states.ACTIVE, 'HARD', task_state=task_states.PAUSING) def test_reboot_hard_unpausing(self): self._test_reboot_type(vm_states.ACTIVE, 'HARD', task_state=task_states.UNPAUSING) def test_reboot_hard_suspending(self): self._test_reboot_type(vm_states.ACTIVE, 'HARD', task_state=task_states.SUSPENDING) def test_reboot_hard_error_not_launched(self): self._test_reboot_type_fails('HARD', vm_state=vm_states.ERROR, launched_at=None) def test_reboot_soft(self): self._test_reboot_type(vm_states.ACTIVE, 'SOFT') def test_reboot_soft_error(self): self._test_reboot_type_fails('SOFT', vm_state=vm_states.ERROR) def test_reboot_soft_paused(self): self._test_reboot_type_fails('SOFT', vm_state=vm_states.PAUSED) def test_reboot_soft_stopped(self): self._test_reboot_type_fails('SOFT', vm_state=vm_states.STOPPED) def test_reboot_soft_suspended(self): self._test_reboot_type_fails('SOFT', vm_state=vm_states.SUSPENDED) def test_reboot_soft_rebooting(self): self._test_reboot_type_fails('SOFT', task_state=task_states.REBOOTING) def test_reboot_soft_rebooting_hard(self): self._test_reboot_type_fails('SOFT', task_state=task_states.REBOOTING_HARD) def test_reboot_soft_reboot_started(self): self._test_reboot_type_fails('SOFT', task_state=task_states.REBOOT_STARTED) def test_reboot_soft_reboot_pending(self): self._test_reboot_type_fails('SOFT', task_state=task_states.REBOOT_PENDING) def test_reboot_soft_rescued(self): self._test_reboot_type_fails('SOFT', vm_state=vm_states.RESCUED) def test_reboot_soft_error_not_launched(self): self._test_reboot_type_fails('SOFT', vm_state=vm_states.ERROR, launched_at=None) def test_reboot_soft_resuming(self): self._test_reboot_type_fails('SOFT', task_state=task_states.RESUMING) def test_reboot_soft_pausing(self): self._test_reboot_type_fails('SOFT', task_state=task_states.PAUSING) def test_reboot_soft_unpausing(self): self._test_reboot_type_fails('SOFT', task_state=task_states.UNPAUSING) def test_reboot_soft_suspending(self): self._test_reboot_type_fails('SOFT', task_state=task_states.SUSPENDING) def _test_delete_resizing_part(self, inst, deltas): fake_db_migration = test_migration.fake_db_migration() migration = objects.Migration._from_db_object( self.context, objects.Migration(), fake_db_migration) inst.instance_type_id = migration.new_instance_type_id old_flavor = self._create_flavor(vcpus=1, memory_mb=512) deltas['cores'] = -old_flavor.vcpus deltas['ram'] = -old_flavor.memory_mb self.mox.StubOutWithMock(objects.Migration, 'get_by_instance_and_status') self.mox.StubOutWithMock(flavors, 'get_flavor') self.context.elevated().AndReturn(self.context) objects.Migration.get_by_instance_and_status( self.context, inst.uuid, 'post-migrating').AndReturn(migration) flavors.get_flavor(migration.old_instance_type_id).AndReturn( old_flavor) def _test_delete_resized_part(self, inst): migration = objects.Migration._from_db_object( self.context, objects.Migration(), test_migration.fake_db_migration()) self.mox.StubOutWithMock(objects.Migration, 'get_by_instance_and_status') self.context.elevated().AndReturn(self.context) objects.Migration.get_by_instance_and_status( self.context, inst.uuid, 'finished').AndReturn(migration) self.compute_api._downsize_quota_delta(self.context, inst ).AndReturn('deltas') fake_quotas = objects.Quotas.from_reservations(self.context, ['rsvs']) self.compute_api._reserve_quota_delta(self.context, 'deltas', inst, ).AndReturn(fake_quotas) self.compute_api._record_action_start( self.context, inst, instance_actions.CONFIRM_RESIZE) self.compute_api.compute_rpcapi.confirm_resize( self.context, inst, migration, migration['source_compute'], fake_quotas.reservations, cast=False) def _test_delete_shelved_part(self, inst): image_api = self.compute_api.image_api self.mox.StubOutWithMock(image_api, 'delete') snapshot_id = inst.system_metadata.get('shelved_image_id') if snapshot_id == SHELVED_IMAGE: image_api.delete(self.context, snapshot_id).AndReturn(True) elif snapshot_id == SHELVED_IMAGE_NOT_FOUND: image_api.delete(self.context, snapshot_id).AndRaise( exception.ImageNotFound(image_id=snapshot_id)) elif snapshot_id == SHELVED_IMAGE_NOT_AUTHORIZED: image_api.delete(self.context, snapshot_id).AndRaise( exception.ImageNotAuthorized(image_id=snapshot_id)) elif snapshot_id == SHELVED_IMAGE_EXCEPTION: image_api.delete(self.context, snapshot_id).AndRaise( test.TestingException("Unexpected error")) def _test_downed_host_part(self, inst, updates, delete_time, delete_type): inst.info_cache.delete() compute_utils.notify_about_instance_usage( self.compute_api.notifier, self.context, inst, '%s.start' % delete_type) self.context.elevated().AndReturn(self.context) self.compute_api.network_api.deallocate_for_instance( self.context, inst) state = (delete_types.SOFT_DELETE in delete_type and vm_states.SOFT_DELETED or vm_states.DELETED) updates.update({'vm_state': state, 'task_state': None, 'terminated_at': delete_time}) inst.save() updates.update({'deleted_at': delete_time, 'deleted': True}) fake_inst = fake_instance.fake_db_instance(**updates) db.instance_destroy(self.context, inst.uuid, constraint=None).AndReturn(fake_inst) compute_utils.notify_about_instance_usage( self.compute_api.notifier, self.context, inst, '%s.end' % delete_type, system_metadata=inst.system_metadata) def _test_delete(self, delete_type, **attrs): reservations = ['fake-resv'] inst = self._create_instance_obj() inst.update(attrs) inst._context = self.context deltas = {'instances': -1, 'cores': -inst.vcpus, 'ram': -inst.memory_mb} delete_time = datetime.datetime(1955, 11, 5, 9, 30, tzinfo=iso8601.iso8601.Utc()) timeutils.set_time_override(delete_time) task_state = (delete_type == delete_types.SOFT_DELETE and task_states.SOFT_DELETING or task_states.DELETING) updates = {'progress': 0, 'task_state': task_state} if delete_type == delete_types.SOFT_DELETE: updates['deleted_at'] = delete_time self.mox.StubOutWithMock(inst, 'save') self.mox.StubOutWithMock(objects.BlockDeviceMappingList, 'get_by_instance_uuid') self.mox.StubOutWithMock(quota.QUOTAS, 'reserve') self.mox.StubOutWithMock(self.context, 'elevated') self.mox.StubOutWithMock(db, 'service_get_by_compute_host') self.mox.StubOutWithMock(self.compute_api.servicegroup_api, 'service_is_up') self.mox.StubOutWithMock(self.compute_api, '_downsize_quota_delta') self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') self.mox.StubOutWithMock(db, 'instance_update_and_get_original') self.mox.StubOutWithMock(inst.info_cache, 'delete') self.mox.StubOutWithMock(self.compute_api.network_api, 'deallocate_for_instance') self.mox.StubOutWithMock(db, 'instance_system_metadata_get') self.mox.StubOutWithMock(db, 'instance_destroy') self.mox.StubOutWithMock(compute_utils, 'notify_about_instance_usage') self.mox.StubOutWithMock(quota.QUOTAS, 'commit') rpcapi = self.compute_api.compute_rpcapi self.mox.StubOutWithMock(rpcapi, 'confirm_resize') if (inst.vm_state in (vm_states.SHELVED, vm_states.SHELVED_OFFLOADED)): self._test_delete_shelved_part(inst) if self.cell_type == 'api': rpcapi = self.compute_api.cells_rpcapi self.mox.StubOutWithMock(rpcapi, 'terminate_instance') self.mox.StubOutWithMock(rpcapi, 'soft_delete_instance') objects.BlockDeviceMappingList.get_by_instance_uuid( self.context, inst.uuid).AndReturn([]) inst.save() if inst.task_state == task_states.RESIZE_FINISH: self._test_delete_resizing_part(inst, deltas) quota.QUOTAS.reserve(self.context, project_id=inst.project_id, user_id=inst.user_id, expire=mox.IgnoreArg(), **deltas).AndReturn(reservations) # NOTE(comstud): This is getting messy. But what we are wanting # to test is: # If cells is enabled and we're the API cell: # * Cast to cells_rpcapi.<method> with reservations=None # * Commit reservations # Otherwise: # * Check for downed host # * If downed host: # * Clean up instance, destroying it, sending notifications. # (Tested in _test_downed_host_part()) # * Commit reservations # * If not downed host: # * Record the action start. # * Cast to compute_rpcapi.<method> with the reservations cast = True commit_quotas = True soft_delete = False if self.cell_type != 'api': if inst.vm_state == vm_states.RESIZED: self._test_delete_resized_part(inst) if inst.vm_state == vm_states.SOFT_DELETED: soft_delete = True if inst.vm_state != vm_states.SHELVED_OFFLOADED: self.context.elevated().AndReturn(self.context) db.service_get_by_compute_host( self.context, inst.host).AndReturn( test_service.fake_service) self.compute_api.servicegroup_api.service_is_up( mox.IsA(objects.Service)).AndReturn( inst.host != 'down-host') if (inst.host == 'down-host' or inst.vm_state == vm_states.SHELVED_OFFLOADED): self._test_downed_host_part(inst, updates, delete_time, delete_type) cast = False else: # Happens on the manager side commit_quotas = False if cast: if self.cell_type != 'api': self.compute_api._record_action_start(self.context, inst, instance_actions.DELETE) if commit_quotas or soft_delete: cast_reservations = None else: cast_reservations = reservations if delete_type == delete_types.SOFT_DELETE: rpcapi.soft_delete_instance(self.context, inst, reservations=cast_reservations) elif delete_type in [delete_types.DELETE, delete_types.FORCE_DELETE]: rpcapi.terminate_instance(self.context, inst, [], reservations=cast_reservations) if commit_quotas: # Local delete or when we're testing API cell. quota.QUOTAS.commit(self.context, reservations, project_id=inst.project_id, user_id=inst.user_id) self.mox.ReplayAll() getattr(self.compute_api, delete_type)(self.context, inst) for k, v in updates.items(): self.assertEqual(inst[k], v) self.mox.UnsetStubs() def test_delete(self): self._test_delete(delete_types.DELETE) def test_delete_if_not_launched(self): self._test_delete(delete_types.DELETE, launched_at=None) def test_delete_in_resizing(self): self._test_delete(delete_types.DELETE, task_state=task_states.RESIZE_FINISH) def test_delete_in_resized(self): self._test_delete(delete_types.DELETE, vm_state=vm_states.RESIZED) def test_delete_shelved(self): fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE} self._test_delete(delete_types.DELETE, vm_state=vm_states.SHELVED, system_metadata=fake_sys_meta) def test_delete_shelved_offloaded(self): fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE} self._test_delete(delete_types.DELETE, vm_state=vm_states.SHELVED_OFFLOADED, system_metadata=fake_sys_meta) def test_delete_shelved_image_not_found(self): fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE_NOT_FOUND} self._test_delete(delete_types.DELETE, vm_state=vm_states.SHELVED_OFFLOADED, system_metadata=fake_sys_meta) def test_delete_shelved_image_not_authorized(self): fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE_NOT_AUTHORIZED} self._test_delete(delete_types.DELETE, vm_state=vm_states.SHELVED_OFFLOADED, system_metadata=fake_sys_meta) def test_delete_shelved_exception(self): fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE_EXCEPTION} self._test_delete(delete_types.DELETE, vm_state=vm_states.SHELVED, system_metadata=fake_sys_meta) def test_delete_with_down_host(self): self._test_delete(delete_types.DELETE, host='down-host') def test_delete_soft_with_down_host(self): self._test_delete(delete_types.SOFT_DELETE, host='down-host') def test_delete_soft(self): self._test_delete(delete_types.SOFT_DELETE) def test_delete_forced(self): fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE} for vm_state in self._get_vm_states(): if vm_state in (vm_states.SHELVED, vm_states.SHELVED_OFFLOADED): self._test_delete(delete_types.FORCE_DELETE, vm_state=vm_state, system_metadata=fake_sys_meta) self._test_delete(delete_types.FORCE_DELETE, vm_state=vm_state) def test_delete_forced_when_task_state_deleting(self): for vm_state in self._get_vm_states(): self._test_delete(delete_types.FORCE_DELETE, vm_state=vm_state, task_state=task_states.DELETING) def test_no_delete_when_task_state_deleting(self): if self.cell_type == 'api': # In 'api' cell, the callback terminate_instance will # get called, and quota will be committed before returning. # It doesn't check for below condition, hence skipping the test. """ if original_task_state in (task_states.DELETING, task_states.SOFT_DELETING): LOG.info(_('Instance is already in deleting state, ' 'ignoring this request'), instance=instance) quotas.rollback() return """ self.skipTest("API cell doesn't delete instance directly.") attrs = {} fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE} for vm_state in self._get_vm_states(): if vm_state == vm_states.SHELVED: attrs.update({'system_metadata': fake_sys_meta}) if vm_state == vm_states.SHELVED_OFFLOADED: # when instance in SHELVED_OFFLOADED state, we assume the # instance cannot be in deleting task state, this is same to # the case that instance.host is down, deleting locally. continue attrs.update({'vm_state': vm_state, 'task_state': 'deleting'}) reservations = ['fake-resv'] inst = self._create_instance_obj() inst.update(attrs) inst._context = self.context deltas = {'instances': -1, 'cores': -inst.vcpus, 'ram': -inst.memory_mb} delete_time = datetime.datetime(1955, 11, 5, 9, 30, tzinfo=iso8601.iso8601.Utc()) timeutils.set_time_override(delete_time) bdms = [] migration = objects.Migration._from_db_object( self.context, objects.Migration(), test_migration.fake_db_migration()) fake_quotas = objects.Quotas.from_reservations(self.context, ['rsvs']) image_api = self.compute_api.image_api rpcapi = self.compute_api.compute_rpcapi with contextlib.nested( mock.patch.object(image_api, 'delete'), mock.patch.object(inst, 'save'), mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid', return_value=bdms), mock.patch.object(objects.Migration, 'get_by_instance_and_status'), mock.patch.object(quota.QUOTAS, 'reserve', return_value=reservations), mock.patch.object(self.context, 'elevated', return_value=self.context), mock.patch.object(db, 'service_get_by_compute_host', return_value=test_service.fake_service), mock.patch.object(self.compute_api.servicegroup_api, 'service_is_up', return_value=inst.host != 'down-host'), mock.patch.object(self.compute_api, '_downsize_quota_delta', return_value=fake_quotas), mock.patch.object(self.compute_api, '_reserve_quota_delta'), mock.patch.object(self.compute_api, '_record_action_start'), mock.patch.object(db, 'instance_update_and_get_original'), mock.patch.object(inst.info_cache, 'delete'), mock.patch.object(self.compute_api.network_api, 'deallocate_for_instance'), mock.patch.object(db, 'instance_system_metadata_get'), mock.patch.object(db, 'instance_destroy'), mock.patch.object(compute_utils, 'notify_about_instance_usage'), mock.patch.object(quota.QUOTAS, 'commit'), mock.patch.object(quota.QUOTAS, 'rollback'), mock.patch.object(rpcapi, 'confirm_resize'), mock.patch.object(rpcapi, 'terminate_instance') ) as ( image_delete, save, get_by_instance_uuid, get_by_instance_and_status, reserve, elevated, service_get_by_compute_host, service_is_up, _downsize_quota_delta, _reserve_quota_delta, _record_action_start, instance_update_and_get_original, delete, deallocate_for_instance, instance_system_metadata_get, instance_destroy, notify_about_instance_usage, commit, rollback, confirm_resize, terminate_instance ): if (inst.vm_state in (vm_states.SHELVED, vm_states.SHELVED_OFFLOADED)): image_delete.return_value = True if inst.vm_state == vm_states.RESIZED: get_by_instance_and_status.return_value = migration _downsize_quota_delta.return_value = deltas self.compute_api.delete(self.context, inst) self.assertEqual(1, rollback.call_count) self.assertEqual(0, terminate_instance.call_count) def test_delete_fast_if_host_not_set(self): inst = self._create_instance_obj() inst.host = '' quotas = quotas_obj.Quotas(self.context) updates = {'progress': 0, 'task_state': task_states.DELETING} self.mox.StubOutWithMock(inst, 'save') self.mox.StubOutWithMock(db, 'block_device_mapping_get_all_by_instance') self.mox.StubOutWithMock(db, 'constraint') self.mox.StubOutWithMock(db, 'instance_destroy') self.mox.StubOutWithMock(self.compute_api, '_create_reservations') self.mox.StubOutWithMock(compute_utils, 'notify_about_instance_usage') if self.cell_type == 'api': rpcapi = self.compute_api.cells_rpcapi else: rpcapi = self.compute_api.compute_rpcapi self.mox.StubOutWithMock(rpcapi, 'terminate_instance') db.block_device_mapping_get_all_by_instance(self.context, inst.uuid, use_slave=False).AndReturn([]) inst.save() self.compute_api._create_reservations(self.context, inst, inst.task_state, inst.project_id, inst.user_id ).AndReturn(quotas) if self.cell_type == 'api': rpcapi.terminate_instance( self.context, inst, mox.IsA(objects.BlockDeviceMappingList), reservations=None) else: compute_utils.notify_about_instance_usage( self.compute_api.notifier, self.context, inst, 'delete.start') db.constraint(host=mox.IgnoreArg()).AndReturn('constraint') delete_time = datetime.datetime(1955, 11, 5, 9, 30, tzinfo=iso8601.iso8601.Utc()) updates['deleted_at'] = delete_time updates['deleted'] = True fake_inst = fake_instance.fake_db_instance(**updates) db.instance_destroy(self.context, inst.uuid, constraint='constraint').AndReturn(fake_inst) compute_utils.notify_about_instance_usage( self.compute_api.notifier, self.context, inst, 'delete.end', system_metadata=inst.system_metadata) self.mox.ReplayAll() self.compute_api.delete(self.context, inst) for k, v in updates.items(): self.assertEqual(inst[k], v) def test_local_delete_with_deleted_volume(self): bdms = [objects.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict( {'id': 42, 'volume_id': 'volume_id', 'source_type': 'volume', 'destination_type': 'volume', 'delete_on_termination': False}))] def _fake_do_delete(context, instance, bdms, rservations=None, local=False): pass inst = self._create_instance_obj() inst._context = self.context self.mox.StubOutWithMock(inst, 'destroy') self.mox.StubOutWithMock(self.context, 'elevated') self.mox.StubOutWithMock(inst.info_cache, 'delete') self.mox.StubOutWithMock(self.compute_api.network_api, 'deallocate_for_instance') self.mox.StubOutWithMock(db, 'instance_system_metadata_get') self.mox.StubOutWithMock(compute_utils, 'notify_about_instance_usage') self.mox.StubOutWithMock(self.compute_api.volume_api, 'terminate_connection') self.mox.StubOutWithMock(objects.BlockDeviceMapping, 'destroy') inst.info_cache.delete() compute_utils.notify_about_instance_usage( self.compute_api.notifier, self.context, inst, 'delete.start') self.context.elevated().MultipleTimes().AndReturn(self.context) if self.cell_type != 'api': self.compute_api.network_api.deallocate_for_instance( self.context, inst) self.compute_api.volume_api.terminate_connection( mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).\ AndRaise(exception. VolumeNotFound('volume_id')) bdms[0].destroy() inst.destroy() compute_utils.notify_about_instance_usage( self.compute_api.notifier, self.context, inst, 'delete.end', system_metadata=inst.system_metadata) self.mox.ReplayAll() self.compute_api._local_delete(self.context, inst, bdms, delete_types.DELETE, _fake_do_delete) def test_delete_disabled(self): inst = self._create_instance_obj() inst.disable_terminate = True self.mox.StubOutWithMock(db, 'instance_update_and_get_original') self.mox.ReplayAll() self.compute_api.delete(self.context, inst) def test_delete_soft_rollback(self): inst = self._create_instance_obj() self.mox.StubOutWithMock(db, 'block_device_mapping_get_all_by_instance') self.mox.StubOutWithMock(inst, 'save') delete_time = datetime.datetime(1955, 11, 5) timeutils.set_time_override(delete_time) db.block_device_mapping_get_all_by_instance( self.context, inst.uuid, use_slave=False).AndReturn([]) inst.save().AndRaise(test.TestingException) self.mox.ReplayAll() self.assertRaises(test.TestingException, self.compute_api.soft_delete, self.context, inst) def _test_confirm_resize(self, mig_ref_passed=False): params = dict(vm_state=vm_states.RESIZED) fake_inst = self._create_instance_obj(params=params) fake_mig = objects.Migration._from_db_object( self.context, objects.Migration(), test_migration.fake_db_migration()) self.mox.StubOutWithMock(self.context, 'elevated') self.mox.StubOutWithMock(objects.Migration, 'get_by_instance_and_status') self.mox.StubOutWithMock(self.compute_api, '_downsize_quota_delta') self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta') self.mox.StubOutWithMock(fake_mig, 'save') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') self.mox.StubOutWithMock(self.compute_api.compute_rpcapi, 'confirm_resize') self.context.elevated().AndReturn(self.context) if not mig_ref_passed: objects.Migration.get_by_instance_and_status( self.context, fake_inst['uuid'], 'finished').AndReturn( fake_mig) self.compute_api._downsize_quota_delta(self.context, fake_inst).AndReturn('deltas') resvs = ['resvs'] fake_quotas = objects.Quotas.from_reservations(self.context, resvs) self.compute_api._reserve_quota_delta(self.context, 'deltas', fake_inst).AndReturn(fake_quotas) def _check_mig(expected_task_state=None): self.assertEqual('confirming', fake_mig.status) fake_mig.save().WithSideEffects(_check_mig) if self.cell_type: fake_quotas.commit(self.context) self.compute_api._record_action_start(self.context, fake_inst, 'confirmResize') self.compute_api.compute_rpcapi.confirm_resize( self.context, fake_inst, fake_mig, 'compute-source', [] if self.cell_type else fake_quotas.reservations) self.mox.ReplayAll() if mig_ref_passed: self.compute_api.confirm_resize(self.context, fake_inst, migration=fake_mig) else: self.compute_api.confirm_resize(self.context, fake_inst) def test_confirm_resize(self): self._test_confirm_resize() def test_confirm_resize_with_migration_ref(self): self._test_confirm_resize(mig_ref_passed=True) def _test_revert_resize(self): params = dict(vm_state=vm_states.RESIZED) fake_inst = self._create_instance_obj(params=params) fake_mig = objects.Migration._from_db_object( self.context, objects.Migration(), test_migration.fake_db_migration()) self.mox.StubOutWithMock(self.context, 'elevated') self.mox.StubOutWithMock(objects.Migration, 'get_by_instance_and_status') self.mox.StubOutWithMock(self.compute_api, '_reverse_upsize_quota_delta') self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta') self.mox.StubOutWithMock(fake_inst, 'save') self.mox.StubOutWithMock(fake_mig, 'save') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') self.mox.StubOutWithMock(self.compute_api.compute_rpcapi, 'revert_resize') self.context.elevated().AndReturn(self.context) objects.Migration.get_by_instance_and_status( self.context, fake_inst['uuid'], 'finished').AndReturn( fake_mig) self.compute_api._reverse_upsize_quota_delta( self.context, fake_mig).AndReturn('deltas') resvs = ['resvs'] fake_quotas = objects.Quotas.from_reservations(self.context, resvs) self.compute_api._reserve_quota_delta(self.context, 'deltas', fake_inst).AndReturn(fake_quotas) def _check_state(expected_task_state=None): self.assertEqual(task_states.RESIZE_REVERTING, fake_inst.task_state) fake_inst.save(expected_task_state=[None]).WithSideEffects( _check_state) def _check_mig(expected_task_state=None): self.assertEqual('reverting', fake_mig.status) fake_mig.save().WithSideEffects(_check_mig) if self.cell_type: fake_quotas.commit(self.context) self.compute_api._record_action_start(self.context, fake_inst, 'revertResize') self.compute_api.compute_rpcapi.revert_resize( self.context, fake_inst, fake_mig, 'compute-dest', [] if self.cell_type else fake_quotas.reservations) self.mox.ReplayAll() self.compute_api.revert_resize(self.context, fake_inst) def test_revert_resize(self): self._test_revert_resize() def test_revert_resize_concurent_fail(self): params = dict(vm_state=vm_states.RESIZED) fake_inst = self._create_instance_obj(params=params) fake_mig = objects.Migration._from_db_object( self.context, objects.Migration(), test_migration.fake_db_migration()) self.mox.StubOutWithMock(self.context, 'elevated') self.mox.StubOutWithMock(objects.Migration, 'get_by_instance_and_status') self.mox.StubOutWithMock(self.compute_api, '_reverse_upsize_quota_delta') self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta') self.mox.StubOutWithMock(fake_inst, 'save') self.context.elevated().AndReturn(self.context) objects.Migration.get_by_instance_and_status( self.context, fake_inst['uuid'], 'finished').AndReturn(fake_mig) delta = ['delta'] self.compute_api._reverse_upsize_quota_delta( self.context, fake_mig).AndReturn(delta) resvs = ['resvs'] fake_quotas = objects.Quotas.from_reservations(self.context, resvs) self.compute_api._reserve_quota_delta( self.context, delta, fake_inst).AndReturn(fake_quotas) exc = exception.UnexpectedTaskStateError( actual=task_states.RESIZE_REVERTING, expected=None) fake_inst.save(expected_task_state=[None]).AndRaise(exc) fake_quotas.rollback(self.context) self.mox.ReplayAll() self.assertRaises(exception.UnexpectedTaskStateError, self.compute_api.revert_resize, self.context, fake_inst) def _test_resize(self, flavor_id_passed=True, same_host=False, allow_same_host=False, allow_mig_same_host=False, project_id=None, extra_kwargs=None, same_flavor=False): if extra_kwargs is None: extra_kwargs = {} self.flags(allow_resize_to_same_host=allow_same_host, allow_migrate_to_same_host=allow_mig_same_host) params = {} if project_id is not None: # To test instance w/ different project id than context (admin) params['project_id'] = project_id fake_inst = self._create_instance_obj(params=params) self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id') self.mox.StubOutWithMock(self.compute_api, '_upsize_quota_delta') self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta') self.mox.StubOutWithMock(fake_inst, 'save') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') self.mox.StubOutWithMock(self.compute_api.compute_task_api, 'resize_instance') current_flavor = flavors.extract_flavor(fake_inst) if flavor_id_passed: new_flavor = self._create_flavor(id=200, flavorid='new-flavor-id', name='new_flavor', disabled=False) if same_flavor: cur_flavor = flavors.extract_flavor(fake_inst) new_flavor.id = cur_flavor.id flavors.get_flavor_by_flavor_id( 'new-flavor-id', read_deleted='no').AndReturn(new_flavor) else: new_flavor = current_flavor if (self.cell_type == 'compute' or not (flavor_id_passed and same_flavor)): resvs = ['resvs'] project_id, user_id = quotas_obj.ids_from_instance(self.context, fake_inst) fake_quotas = objects.Quotas.from_reservations(self.context, resvs) self.compute_api._upsize_quota_delta( self.context, mox.IsA(objects.Flavor), mox.IsA(objects.Flavor)).AndReturn('deltas') self.compute_api._reserve_quota_delta(self.context, 'deltas', fake_inst).AndReturn(fake_quotas) def _check_state(expected_task_state=None): self.assertEqual(task_states.RESIZE_PREP, fake_inst.task_state) self.assertEqual(fake_inst.progress, 0) for key, value in extra_kwargs.items(): self.assertEqual(value, getattr(fake_inst, key)) fake_inst.save(expected_task_state=[None]).WithSideEffects( _check_state) if allow_same_host: filter_properties = {'ignore_hosts': []} else: filter_properties = {'ignore_hosts': [fake_inst['host']]} if not flavor_id_passed and not allow_mig_same_host: filter_properties['ignore_hosts'].append(fake_inst['host']) expected_reservations = fake_quotas.reservations if self.cell_type == 'api': fake_quotas.commit(self.context) expected_reservations = [] mig = objects.Migration() def _get_migration(context=None): return mig def _check_mig(): self.assertEqual(fake_inst.uuid, mig.instance_uuid) self.assertEqual(current_flavor.id, mig.old_instance_type_id) self.assertEqual(new_flavor.id, mig.new_instance_type_id) self.assertEqual('finished', mig.status) self.stubs.Set(objects, 'Migration', _get_migration) self.mox.StubOutWithMock(self.context, 'elevated') self.mox.StubOutWithMock(mig, 'create') self.context.elevated().AndReturn(self.context) mig.create().WithSideEffects(_check_mig) if flavor_id_passed: self.compute_api._record_action_start(self.context, fake_inst, 'resize') else: self.compute_api._record_action_start(self.context, fake_inst, 'migrate') scheduler_hint = {'filter_properties': filter_properties} self.compute_api.compute_task_api.resize_instance( self.context, fake_inst, extra_kwargs, scheduler_hint=scheduler_hint, flavor=mox.IsA(objects.Flavor), reservations=expected_reservations) self.mox.ReplayAll() if flavor_id_passed: self.compute_api.resize(self.context, fake_inst, flavor_id='new-flavor-id', **extra_kwargs) else: self.compute_api.resize(self.context, fake_inst, **extra_kwargs) def _test_migrate(self, *args, **kwargs): self._test_resize(*args, flavor_id_passed=False, **kwargs) def test_resize(self): self._test_resize() def test_resize_with_kwargs(self): self._test_resize(extra_kwargs=dict(cow='moo')) def test_resize_same_host_and_allowed(self): self._test_resize(same_host=True, allow_same_host=True) def test_resize_same_host_and_not_allowed(self): self._test_resize(same_host=True, allow_same_host=False) def test_resize_different_project_id(self): self._test_resize(project_id='different') def test_migrate(self): self._test_migrate() def test_migrate_with_kwargs(self): self._test_migrate(extra_kwargs=dict(cow='moo')) def test_migrate_same_host_and_allowed(self): self._test_migrate(same_host=True, allow_same_host=True) def test_migrate_same_host_and_not_allowed(self): self._test_migrate(same_host=True, allow_same_host=False) def test_migrate_different_project_id(self): self._test_migrate(project_id='different') def test_resize_invalid_flavor_fails(self): self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id') # Should never reach these. self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta') self.mox.StubOutWithMock(self.compute_api, 'update') self.mox.StubOutWithMock(quota.QUOTAS, 'commit') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') self.mox.StubOutWithMock(self.compute_api.compute_task_api, 'resize_instance') fake_inst = self._create_instance_obj() exc = exception.FlavorNotFound(flavor_id='flavor-id') flavors.get_flavor_by_flavor_id('flavor-id', read_deleted='no').AndRaise(exc) self.mox.ReplayAll() self.assertRaises(exception.FlavorNotFound, self.compute_api.resize, self.context, fake_inst, flavor_id='flavor-id') def test_resize_disabled_flavor_fails(self): self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id') # Should never reach these. self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta') self.mox.StubOutWithMock(self.compute_api, 'update') self.mox.StubOutWithMock(quota.QUOTAS, 'commit') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') self.mox.StubOutWithMock(self.compute_api.compute_task_api, 'resize_instance') fake_inst = self._create_instance_obj() fake_flavor = self._create_flavor(id=200, flavorid='flavor-id', name='foo', disabled=True) flavors.get_flavor_by_flavor_id( 'flavor-id', read_deleted='no').AndReturn(fake_flavor) self.mox.ReplayAll() self.assertRaises(exception.FlavorNotFound, self.compute_api.resize, self.context, fake_inst, flavor_id='flavor-id') @mock.patch.object(flavors, 'get_flavor_by_flavor_id') def test_resize_to_zero_disk_flavor_fails(self, get_flavor_by_flavor_id): fake_inst = self._create_instance_obj() fake_flavor = self._create_flavor(id=200, flavorid='flavor-id', name='foo', root_gb=0) get_flavor_by_flavor_id.return_value = fake_flavor self.assertRaises(exception.CannotResizeDisk, self.compute_api.resize, self.context, fake_inst, flavor_id='flavor-id') def test_resize_quota_exceeds_fails(self): self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id') self.mox.StubOutWithMock(self.compute_api, '_upsize_quota_delta') self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta') # Should never reach these. self.mox.StubOutWithMock(self.compute_api, 'update') self.mox.StubOutWithMock(quota.QUOTAS, 'commit') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') self.mox.StubOutWithMock(self.compute_api.compute_task_api, 'resize_instance') fake_inst = self._create_instance_obj() fake_flavor = self._create_flavor(id=200, flavorid='flavor-id', name='foo', disabled=False) flavors.get_flavor_by_flavor_id( 'flavor-id', read_deleted='no').AndReturn(fake_flavor) deltas = dict(resource=0) self.compute_api._upsize_quota_delta( self.context, mox.IsA(objects.Flavor), mox.IsA(objects.Flavor)).AndReturn(deltas) usage = dict(in_use=0, reserved=0) quotas = {'resource': 0} usages = {'resource': usage} overs = ['resource'] over_quota_args = dict(quotas=quotas, usages=usages, overs=overs) self.compute_api._reserve_quota_delta(self.context, deltas, fake_inst).AndRaise( exception.OverQuota(**over_quota_args)) self.mox.ReplayAll() self.assertRaises(exception.TooManyInstances, self.compute_api.resize, self.context, fake_inst, flavor_id='flavor-id') def test_pause(self): # Ensure instance can be paused. instance = self._create_instance_obj() self.assertEqual(instance.vm_state, vm_states.ACTIVE) self.assertIsNone(instance.task_state) self.mox.StubOutWithMock(instance, 'save') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') if self.cell_type == 'api': rpcapi = self.compute_api.cells_rpcapi else: rpcapi = self.compute_api.compute_rpcapi self.mox.StubOutWithMock(rpcapi, 'pause_instance') instance.save(expected_task_state=[None]) self.compute_api._record_action_start(self.context, instance, instance_actions.PAUSE) rpcapi.pause_instance(self.context, instance) self.mox.ReplayAll() self.compute_api.pause(self.context, instance) self.assertEqual(vm_states.ACTIVE, instance.vm_state) self.assertEqual(task_states.PAUSING, instance.task_state) def _test_pause_fails(self, vm_state): params = dict(vm_state=vm_state) instance = self._create_instance_obj(params=params) self.assertIsNone(instance.task_state) self.assertRaises(exception.InstanceInvalidState, self.compute_api.pause, self.context, instance) def test_pause_fails_invalid_states(self): invalid_vm_states = self._get_vm_states(set([vm_states.ACTIVE])) for state in invalid_vm_states: self._test_pause_fails(state) def test_unpause(self): # Ensure instance can be unpaused. params = dict(vm_state=vm_states.PAUSED) instance = self._create_instance_obj(params=params) self.assertEqual(instance.vm_state, vm_states.PAUSED) self.assertIsNone(instance.task_state) self.mox.StubOutWithMock(instance, 'save') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') if self.cell_type == 'api': rpcapi = self.compute_api.cells_rpcapi else: rpcapi = self.compute_api.compute_rpcapi self.mox.StubOutWithMock(rpcapi, 'unpause_instance') instance.save(expected_task_state=[None]) self.compute_api._record_action_start(self.context, instance, instance_actions.UNPAUSE) rpcapi.unpause_instance(self.context, instance) self.mox.ReplayAll() self.compute_api.unpause(self.context, instance) self.assertEqual(vm_states.PAUSED, instance.vm_state) self.assertEqual(task_states.UNPAUSING, instance.task_state) def test_swap_volume_volume_api_usage(self): # This test ensures that volume_id arguments are passed to volume_api # and that volumes return to previous states in case of error. def fake_vol_api_begin_detaching(context, volume_id): self.assertTrue(uuidutils.is_uuid_like(volume_id)) volumes[volume_id]['status'] = 'detaching' def fake_vol_api_roll_detaching(context, volume_id): self.assertTrue(uuidutils.is_uuid_like(volume_id)) if volumes[volume_id]['status'] == 'detaching': volumes[volume_id]['status'] = 'in-use' def fake_vol_api_reserve(context, volume_id): self.assertTrue(uuidutils.is_uuid_like(volume_id)) self.assertEqual(volumes[volume_id]['status'], 'available') volumes[volume_id]['status'] = 'attaching' def fake_vol_api_unreserve(context, volume_id): self.assertTrue(uuidutils.is_uuid_like(volume_id)) if volumes[volume_id]['status'] == 'attaching': volumes[volume_id]['status'] = 'available' def fake_swap_volume_exc(context, instance, old_volume_id, new_volume_id): raise AttributeError # Random exception # Should fail if VM state is not valid instance = {'vm_state': vm_states.BUILDING, 'launched_at': timeutils.utcnow(), 'locked': False, 'availability_zone': 'fake_az', 'uuid': 'fake'} volumes = {} old_volume_id = uuidutils.generate_uuid() volumes[old_volume_id] = {'id': old_volume_id, 'display_name': 'old_volume', 'attach_status': 'attached', 'instance_uuid': 'fake', 'size': 5, 'status': 'in-use'} new_volume_id = uuidutils.generate_uuid() volumes[new_volume_id] = {'id': new_volume_id, 'display_name': 'new_volume', 'attach_status': 'detached', 'instance_uuid': None, 'size': 5, 'status': 'available'} self.assertRaises(exception.InstanceInvalidState, self.compute_api.swap_volume, self.context, instance, volumes[old_volume_id], volumes[new_volume_id]) instance['vm_state'] = vm_states.ACTIVE instance['task_state'] = None # Should fail if old volume is not attached volumes[old_volume_id]['attach_status'] = 'detached' self.assertRaises(exception.VolumeUnattached, self.compute_api.swap_volume, self.context, instance, volumes[old_volume_id], volumes[new_volume_id]) self.assertEqual(volumes[old_volume_id]['status'], 'in-use') self.assertEqual(volumes[new_volume_id]['status'], 'available') volumes[old_volume_id]['attach_status'] = 'attached' # Should fail if old volume's instance_uuid is not that of the instance volumes[old_volume_id]['instance_uuid'] = 'fake2' self.assertRaises(exception.InvalidVolume, self.compute_api.swap_volume, self.context, instance, volumes[old_volume_id], volumes[new_volume_id]) self.assertEqual(volumes[old_volume_id]['status'], 'in-use') self.assertEqual(volumes[new_volume_id]['status'], 'available') volumes[old_volume_id]['instance_uuid'] = 'fake' # Should fail if new volume is attached volumes[new_volume_id]['attach_status'] = 'attached' self.assertRaises(exception.InvalidVolume, self.compute_api.swap_volume, self.context, instance, volumes[old_volume_id], volumes[new_volume_id]) self.assertEqual(volumes[old_volume_id]['status'], 'in-use') self.assertEqual(volumes[new_volume_id]['status'], 'available') volumes[new_volume_id]['attach_status'] = 'detached' # Should fail if new volume is smaller than the old volume volumes[new_volume_id]['size'] = 4 self.assertRaises(exception.InvalidVolume, self.compute_api.swap_volume, self.context, instance, volumes[old_volume_id], volumes[new_volume_id]) self.assertEqual(volumes[old_volume_id]['status'], 'in-use') self.assertEqual(volumes[new_volume_id]['status'], 'available') volumes[new_volume_id]['size'] = 5 # Fail call to swap_volume self.stubs.Set(self.compute_api.volume_api, 'begin_detaching', fake_vol_api_begin_detaching) self.stubs.Set(self.compute_api.volume_api, 'roll_detaching', fake_vol_api_roll_detaching) self.stubs.Set(self.compute_api.volume_api, 'reserve_volume', fake_vol_api_reserve) self.stubs.Set(self.compute_api.volume_api, 'unreserve_volume', fake_vol_api_unreserve) self.stubs.Set(self.compute_api.compute_rpcapi, 'swap_volume', fake_swap_volume_exc) self.assertRaises(AttributeError, self.compute_api.swap_volume, self.context, instance, volumes[old_volume_id], volumes[new_volume_id]) self.assertEqual(volumes[old_volume_id]['status'], 'in-use') self.assertEqual(volumes[new_volume_id]['status'], 'available') # Should succeed self.stubs.Set(self.compute_api.compute_rpcapi, 'swap_volume', lambda c, instance, old_volume_id, new_volume_id: True) self.compute_api.swap_volume(self.context, instance, volumes[old_volume_id], volumes[new_volume_id]) def _test_snapshot_and_backup(self, is_snapshot=True, with_base_ref=False, min_ram=None, min_disk=None, create_fails=False, instance_vm_state=vm_states.ACTIVE): # 'cache_in_nova' is for testing non-inheritable properties # 'user_id' should also not be carried from sys_meta into # image property...since it should be set explicitly by # _create_image() in compute api. fake_sys_meta = dict(image_foo='bar', blah='bug?', image_cache_in_nova='dropped', cache_in_nova='dropped', user_id='meow') if with_base_ref: fake_sys_meta['image_base_image_ref'] = 'fake-base-ref' params = dict(system_metadata=fake_sys_meta, locked=True) instance = self._create_instance_obj(params=params) instance.vm_state = instance_vm_state fake_sys_meta.update(instance.system_metadata) extra_props = dict(cow='moo', cat='meow') self.mox.StubOutWithMock(compute_utils, 'get_image_metadata') self.mox.StubOutWithMock(self.compute_api.image_api, 'create') self.mox.StubOutWithMock(instance, 'save') self.mox.StubOutWithMock(self.compute_api.compute_rpcapi, 'snapshot_instance') self.mox.StubOutWithMock(self.compute_api.compute_rpcapi, 'backup_instance') image_type = is_snapshot and 'snapshot' or 'backup' expected_sys_meta = dict(fake_sys_meta) expected_sys_meta.pop('cache_in_nova') expected_sys_meta.pop('image_cache_in_nova') expected_sys_meta.pop('user_id') expected_sys_meta['foo'] = expected_sys_meta.pop('image_foo') if with_base_ref: expected_sys_meta['base_image_ref'] = expected_sys_meta.pop( 'image_base_image_ref') expected_props = {'instance_uuid': instance.uuid, 'user_id': self.context.user_id, 'image_type': image_type} expected_props.update(extra_props) expected_props.update(expected_sys_meta) expected_meta = {'name': 'fake-name', 'is_public': False, 'properties': expected_props} if is_snapshot: if min_ram is not None: expected_meta['min_ram'] = min_ram if min_disk is not None: expected_meta['min_disk'] = min_disk else: expected_props['backup_type'] = 'fake-backup-type' compute_utils.get_image_metadata( self.context, self.compute_api.image_api, FAKE_IMAGE_REF, instance).AndReturn(expected_meta) fake_image = dict(id='fake-image-id') mock_method = self.compute_api.image_api.create( self.context, expected_meta) if create_fails: mock_method.AndRaise(test.TestingException()) else: mock_method.AndReturn(fake_image) def check_state(expected_task_state=None): expected_state = (is_snapshot and task_states.IMAGE_SNAPSHOT_PENDING or task_states.IMAGE_BACKUP) self.assertEqual(expected_state, instance.task_state) if not create_fails: instance.save(expected_task_state=[None]).WithSideEffects( check_state) if is_snapshot: self.compute_api.compute_rpcapi.snapshot_instance( self.context, instance, fake_image['id']) else: self.compute_api.compute_rpcapi.backup_instance( self.context, instance, fake_image['id'], 'fake-backup-type', 'fake-rotation') self.mox.ReplayAll() got_exc = False try: if is_snapshot: res = self.compute_api.snapshot(self.context, instance, 'fake-name', extra_properties=extra_props) else: res = self.compute_api.backup(self.context, instance, 'fake-name', 'fake-backup-type', 'fake-rotation', extra_properties=extra_props) self.assertEqual(fake_image, res) except test.TestingException: got_exc = True self.assertEqual(create_fails, got_exc) self.mox.UnsetStubs() def test_snapshot(self): self._test_snapshot_and_backup() def test_snapshot_fails(self): self._test_snapshot_and_backup(create_fails=True) def test_snapshot_invalid_state(self): instance = self._create_instance_obj() instance.vm_state = vm_states.ACTIVE instance.task_state = task_states.IMAGE_SNAPSHOT self.assertRaises(exception.InstanceInvalidState, self.compute_api.snapshot, self.context, instance, 'fake-name') instance.vm_state = vm_states.ACTIVE instance.task_state = task_states.IMAGE_BACKUP self.assertRaises(exception.InstanceInvalidState, self.compute_api.snapshot, self.context, instance, 'fake-name') instance.vm_state = vm_states.BUILDING instance.task_state = None self.assertRaises(exception.InstanceInvalidState, self.compute_api.snapshot, self.context, instance, 'fake-name') def test_snapshot_with_base_image_ref(self): self._test_snapshot_and_backup(with_base_ref=True) def test_snapshot_min_ram(self): self._test_snapshot_and_backup(min_ram=42) def test_snapshot_min_disk(self): self._test_snapshot_and_backup(min_disk=42) def test_backup(self): for state in [vm_states.ACTIVE, vm_states.STOPPED, vm_states.PAUSED, vm_states.SUSPENDED]: self._test_snapshot_and_backup(is_snapshot=False, instance_vm_state=state) def test_backup_fails(self): self._test_snapshot_and_backup(is_snapshot=False, create_fails=True) def test_backup_invalid_state(self): instance = self._create_instance_obj() instance.vm_state = vm_states.ACTIVE instance.task_state = task_states.IMAGE_SNAPSHOT self.assertRaises(exception.InstanceInvalidState, self.compute_api.backup, self.context, instance, 'fake-name', 'fake', 'fake') instance.vm_state = vm_states.ACTIVE instance.task_state = task_states.IMAGE_BACKUP self.assertRaises(exception.InstanceInvalidState, self.compute_api.backup, self.context, instance, 'fake-name', 'fake', 'fake') instance.vm_state = vm_states.BUILDING instance.task_state = None self.assertRaises(exception.InstanceInvalidState, self.compute_api.backup, self.context, instance, 'fake-name', 'fake', 'fake') def test_backup_with_base_image_ref(self): self._test_snapshot_and_backup(is_snapshot=False, with_base_ref=True) def test_snapshot_volume_backed(self): params = dict(locked=True) instance = self._create_instance_obj(params=params) instance['root_device_name'] = 'vda' instance_bdms = [] image_meta = { 'id': 'fake-image-id', 'properties': {'mappings': []}, 'status': 'fake-status', 'location': 'far-away', 'owner': 'fake-tenant', } expect_meta = { 'name': 'test-snapshot', 'properties': {'root_device_name': 'vda', 'mappings': 'DONTCARE'}, 'size': 0, 'is_public': False } def fake_get_all_by_instance(context, instance, use_slave=False): return copy.deepcopy(instance_bdms) def fake_image_create(context, image_meta, data=None): self.assertThat(image_meta, matchers.DictMatches(expect_meta)) def fake_volume_get(context, volume_id): return {'id': volume_id, 'display_description': ''} def fake_volume_create_snapshot(context, volume_id, name, description): return {'id': '%s-snapshot' % volume_id} self.stubs.Set(db, 'block_device_mapping_get_all_by_instance', fake_get_all_by_instance) self.stubs.Set(self.compute_api.image_api, 'create', fake_image_create) self.stubs.Set(self.compute_api.volume_api, 'get', fake_volume_get) self.stubs.Set(self.compute_api.volume_api, 'create_snapshot_force', fake_volume_create_snapshot) # No block devices defined self.compute_api.snapshot_volume_backed( self.context, instance, copy.deepcopy(image_meta), 'test-snapshot') bdm = fake_block_device.FakeDbBlockDeviceDict( {'no_device': False, 'volume_id': '1', 'boot_index': 0, 'connection_info': 'inf', 'device_name': '/dev/vda', 'source_type': 'volume', 'destination_type': 'volume'}) instance_bdms.append(bdm) expect_meta['properties']['bdm_v2'] = True expect_meta['properties']['block_device_mapping'] = [] expect_meta['properties']['block_device_mapping'].append( {'guest_format': None, 'boot_index': 0, 'no_device': None, 'image_id': None, 'volume_id': None, 'disk_bus': None, 'volume_size': None, 'source_type': 'snapshot', 'device_type': None, 'snapshot_id': '1-snapshot', 'destination_type': 'volume', 'delete_on_termination': None}) # All the db_only fields and the volume ones are removed self.compute_api.snapshot_volume_backed( self.context, instance, copy.deepcopy(image_meta), 'test-snapshot') image_mappings = [{'virtual': 'ami', 'device': 'vda'}, {'device': 'vda', 'virtual': 'ephemeral0'}, {'device': 'vdb', 'virtual': 'swap'}, {'device': 'vdc', 'virtual': 'ephemeral1'}] image_meta['properties']['mappings'] = image_mappings expect_meta['properties']['mappings'] = [ {'virtual': 'ami', 'device': 'vda'}] # Check that the mappgins from the image properties are included self.compute_api.snapshot_volume_backed( self.context, instance, copy.deepcopy(image_meta), 'test-snapshot') def test_volume_snapshot_create(self): volume_id = '1' create_info = {'id': 'eyedee'} fake_bdm = fake_block_device.FakeDbBlockDeviceDict({ 'id': 123, 'device_name': '/dev/sda2', 'source_type': 'volume', 'destination_type': 'volume', 'connection_info': "{'fake': 'connection_info'}", 'volume_id': 1, 'boot_index': -1}) fake_bdm['instance'] = fake_instance.fake_db_instance() fake_bdm['instance_uuid'] = fake_bdm['instance']['uuid'] fake_bdm = objects.BlockDeviceMapping._from_db_object( self.context, objects.BlockDeviceMapping(), fake_bdm, expected_attrs=['instance']) self.mox.StubOutWithMock(objects.BlockDeviceMapping, 'get_by_volume_id') self.mox.StubOutWithMock(self.compute_api.compute_rpcapi, 'volume_snapshot_create') objects.BlockDeviceMapping.get_by_volume_id( self.context, volume_id, expected_attrs=['instance']).AndReturn(fake_bdm) self.compute_api.compute_rpcapi.volume_snapshot_create(self.context, fake_bdm['instance'], volume_id, create_info) self.mox.ReplayAll() snapshot = self.compute_api.volume_snapshot_create(self.context, volume_id, create_info) expected_snapshot = { 'snapshot': { 'id': create_info['id'], 'volumeId': volume_id, }, } self.assertEqual(snapshot, expected_snapshot) def test_volume_snapshot_delete(self): volume_id = '1' snapshot_id = '2' fake_bdm = fake_block_device.FakeDbBlockDeviceDict({ 'id': 123, 'device_name': '/dev/sda2', 'source_type': 'volume', 'destination_type': 'volume', 'connection_info': "{'fake': 'connection_info'}", 'volume_id': 1, 'boot_index': -1}) fake_bdm['instance'] = fake_instance.fake_db_instance() fake_bdm['instance_uuid'] = fake_bdm['instance']['uuid'] fake_bdm = objects.BlockDeviceMapping._from_db_object( self.context, objects.BlockDeviceMapping(), fake_bdm, expected_attrs=['instance']) self.mox.StubOutWithMock(objects.BlockDeviceMapping, 'get_by_volume_id') self.mox.StubOutWithMock(self.compute_api.compute_rpcapi, 'volume_snapshot_delete') objects.BlockDeviceMapping.get_by_volume_id( self.context, volume_id, expected_attrs=['instance']).AndReturn(fake_bdm) self.compute_api.compute_rpcapi.volume_snapshot_delete(self.context, fake_bdm['instance'], volume_id, snapshot_id, {}) self.mox.ReplayAll() self.compute_api.volume_snapshot_delete(self.context, volume_id, snapshot_id, {}) def _test_boot_volume_bootable(self, is_bootable=False): def get_vol_data(*args, **kwargs): return {'bootable': is_bootable} block_device_mapping = [{ 'id': 1, 'device_name': 'vda', 'no_device': None, 'virtual_name': None, 'snapshot_id': None, 'volume_id': '1', 'delete_on_termination': False, }] expected_meta = {'min_disk': 0, 'min_ram': 0, 'properties': {}, 'size': 0, 'status': 'active'} with mock.patch.object(self.compute_api.volume_api, 'get', side_effect=get_vol_data): if not is_bootable: self.assertRaises(exception.InvalidBDMVolumeNotBootable, self.compute_api._get_bdm_image_metadata, self.context, block_device_mapping) else: meta = self.compute_api._get_bdm_image_metadata(self.context, block_device_mapping) self.assertEqual(expected_meta, meta) def test_boot_volume_non_bootable(self): self._test_boot_volume_bootable(False) def test_boot_volume_bootable(self): self._test_boot_volume_bootable(True) def test_boot_volume_basic_property(self): block_device_mapping = [{ 'id': 1, 'device_name': 'vda', 'no_device': None, 'virtual_name': None, 'snapshot_id': None, 'volume_id': '1', 'delete_on_termination': False, }] fake_volume = {"volume_image_metadata": {"min_ram": 256, "min_disk": 128, "foo": "bar"}} with mock.patch.object(self.compute_api.volume_api, 'get', return_value=fake_volume): meta = self.compute_api._get_bdm_image_metadata( self.context, block_device_mapping) self.assertEqual(256, meta['min_ram']) self.assertEqual(128, meta['min_disk']) self.assertEqual('active', meta['status']) self.assertEqual('bar', meta['properties']['foo']) def test_boot_volume_snapshot_basic_property(self): block_device_mapping = [{ 'id': 1, 'device_name': 'vda', 'no_device': None, 'virtual_name': None, 'snapshot_id': '2', 'volume_id': None, 'delete_on_termination': False, }] fake_volume = {"volume_image_metadata": {"min_ram": 256, "min_disk": 128, "foo": "bar"}} fake_snapshot = {"volume_id": "1"} with contextlib.nested( mock.patch.object(self.compute_api.volume_api, 'get', return_value=fake_volume), mock.patch.object(self.compute_api.volume_api, 'get_snapshot', return_value=fake_snapshot)) as ( volume_get, volume_get_snapshot): meta = self.compute_api._get_bdm_image_metadata( self.context, block_device_mapping) self.assertEqual(256, meta['min_ram']) self.assertEqual(128, meta['min_disk']) self.assertEqual('active', meta['status']) self.assertEqual('bar', meta['properties']['foo']) volume_get_snapshot.assert_called_once_with(self.context, block_device_mapping[0]['snapshot_id']) volume_get.assert_called_once_with(self.context, fake_snapshot['volume_id']) def _create_instance_with_disabled_disk_config(self, object=False): sys_meta = {"image_auto_disk_config": "Disabled"} params = {"system_metadata": sys_meta} instance = self._create_instance_obj(params=params) if object: return instance return obj_base.obj_to_primitive(instance) def _setup_fake_image_with_disabled_disk_config(self): self.fake_image = { 'id': 1, 'name': 'fake_name', 'status': 'active', 'properties': {"auto_disk_config": "Disabled"}, } def fake_show(obj, context, image_id, **kwargs): return self.fake_image fake_image.stub_out_image_service(self.stubs) self.stubs.Set(fake_image._FakeImageService, 'show', fake_show) return self.fake_image['id'] def test_resize_with_disabled_auto_disk_config_fails(self): fake_inst = self._create_instance_with_disabled_disk_config() self.assertRaises(exception.AutoDiskConfigDisabledByImage, self.compute_api.resize, self.context, fake_inst, auto_disk_config=True) def test_create_with_disabled_auto_disk_config_fails(self): image_id = self._setup_fake_image_with_disabled_disk_config() self.assertRaises(exception.AutoDiskConfigDisabledByImage, self.compute_api.create, self.context, "fake_flavor", image_id, auto_disk_config=True) def test_rebuild_with_disabled_auto_disk_config_fails(self): fake_inst = self._create_instance_with_disabled_disk_config( object=True) image_id = self._setup_fake_image_with_disabled_disk_config() self.assertRaises(exception.AutoDiskConfigDisabledByImage, self.compute_api.rebuild, self.context, fake_inst, image_id, "new password", auto_disk_config=True) @mock.patch.object(objects.Instance, 'save') @mock.patch.object(objects.Instance, 'get_flavor') @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid') @mock.patch.object(compute_api.API, '_get_image') @mock.patch.object(compute_api.API, '_check_auto_disk_config') @mock.patch.object(compute_api.API, '_checks_for_create_and_rebuild') @mock.patch.object(compute_api.API, '_record_action_start') def test_rebuild(self, _record_action_start, _checks_for_create_and_rebuild, _check_auto_disk_config, _get_image, bdm_get_by_instance_uuid, get_flavor, instance_save): orig_system_metadata = {} instance = fake_instance.fake_instance_obj(self.context, vm_state=vm_states.ACTIVE, cell_name='fake-cell', launched_at=timeutils.utcnow(), system_metadata=orig_system_metadata, expected_attrs=['system_metadata']) get_flavor.return_value = test_flavor.fake_flavor flavor = instance.get_flavor() image_href = '' image = {"min_ram": 10, "min_disk": 1, "properties": {'architecture': arch.X86_64}} admin_pass = '' files_to_inject = [] bdms = [] _get_image.return_value = (None, image) bdm_get_by_instance_uuid.return_value = bdms with mock.patch.object(self.compute_api.compute_task_api, 'rebuild_instance') as rebuild_instance: self.compute_api.rebuild(self.context, instance, image_href, admin_pass, files_to_inject) rebuild_instance.assert_called_once_with(self.context, instance=instance, new_pass=admin_pass, injected_files=files_to_inject, image_ref=image_href, orig_image_ref=image_href, orig_sys_metadata=orig_system_metadata, bdms=bdms, preserve_ephemeral=False, host=instance.host, kwargs={}) _check_auto_disk_config.assert_called_once_with(image=image) _checks_for_create_and_rebuild.assert_called_once_with(self.context, None, image, flavor, {}, []) self.assertNotEqual(orig_system_metadata, instance.system_metadata) @mock.patch.object(objects.Instance, 'save') @mock.patch.object(objects.Instance, 'get_flavor') @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid') @mock.patch.object(compute_api.API, '_get_image') @mock.patch.object(compute_api.API, '_check_auto_disk_config') @mock.patch.object(compute_api.API, '_checks_for_create_and_rebuild') @mock.patch.object(compute_api.API, '_record_action_start') def test_rebuild_change_image(self, _record_action_start, _checks_for_create_and_rebuild, _check_auto_disk_config, _get_image, bdm_get_by_instance_uuid, get_flavor, instance_save): orig_system_metadata = {} get_flavor.return_value = test_flavor.fake_flavor orig_image_href = 'orig_image' orig_image = {"min_ram": 10, "min_disk": 1, "properties": {'architecture': arch.X86_64, 'vm_mode': 'hvm'}} new_image_href = 'new_image' new_image = {"min_ram": 10, "min_disk": 1, "properties": {'architecture': arch.X86_64, 'vm_mode': 'xen'}} admin_pass = '' files_to_inject = [] bdms = [] instance = fake_instance.fake_instance_obj(self.context, vm_state=vm_states.ACTIVE, cell_name='fake-cell', launched_at=timeutils.utcnow(), system_metadata=orig_system_metadata, expected_attrs=['system_metadata'], image_ref=orig_image_href, vm_mode=vm_mode.HVM) flavor = instance.get_flavor() def get_image(context, image_href): if image_href == new_image_href: return (None, new_image) if image_href == orig_image_href: return (None, orig_image) _get_image.side_effect = get_image bdm_get_by_instance_uuid.return_value = bdms with mock.patch.object(self.compute_api.compute_task_api, 'rebuild_instance') as rebuild_instance: self.compute_api.rebuild(self.context, instance, new_image_href, admin_pass, files_to_inject) rebuild_instance.assert_called_once_with(self.context, instance=instance, new_pass=admin_pass, injected_files=files_to_inject, image_ref=new_image_href, orig_image_ref=orig_image_href, orig_sys_metadata=orig_system_metadata, bdms=bdms, preserve_ephemeral=False, host=instance.host, kwargs={}) _check_auto_disk_config.assert_called_once_with(image=new_image) _checks_for_create_and_rebuild.assert_called_once_with(self.context, None, new_image, flavor, {}, []) self.assertEqual(vm_mode.XEN, instance.vm_mode) def _test_check_injected_file_quota_onset_file_limit_exceeded(self, side_effect): injected_files = [ { "path": "/etc/banner.txt", "contents": "foo" } ] with mock.patch.object(quota.QUOTAS, 'limit_check', side_effect=side_effect): self.compute_api._check_injected_file_quota( self.context, injected_files) def test_check_injected_file_quota_onset_file_limit_exceeded(self): # This is the first call to limit_check. side_effect = exception.OverQuota(overs='injected_files') self.assertRaises(exception.OnsetFileLimitExceeded, self._test_check_injected_file_quota_onset_file_limit_exceeded, side_effect) def test_check_injected_file_quota_onset_file_path_limit(self): # This is the second call to limit_check. side_effect = (mock.DEFAULT, exception.OverQuota(overs='injected_file_path_bytes')) self.assertRaises(exception.OnsetFilePathLimitExceeded, self._test_check_injected_file_quota_onset_file_limit_exceeded, side_effect) def test_check_injected_file_quota_onset_file_content_limit(self): # This is the second call to limit_check but with different overs. side_effect = (mock.DEFAULT, exception.OverQuota(overs='injected_file_content_bytes')) self.assertRaises(exception.OnsetFileContentLimitExceeded, self._test_check_injected_file_quota_onset_file_limit_exceeded, side_effect) @mock.patch('nova.objects.Quotas.commit') @mock.patch('nova.objects.Quotas.reserve') @mock.patch('nova.objects.Instance.save') @mock.patch('nova.objects.InstanceAction.action_start') def test_restore(self, action_start, instance_save, quota_reserve, quota_commit): instance = self._create_instance_obj() instance.vm_state = vm_states.SOFT_DELETED instance.task_state = None instance.save() with mock.patch.object(self.compute_api, 'compute_rpcapi') as rpc: self.compute_api.restore(self.context, instance) rpc.restore_instance.assert_called_once_with(self.context, instance) self.assertEqual(instance.task_state, task_states.RESTORING) self.assertEqual(1, quota_commit.call_count) def test_external_instance_event(self): instances = [ objects.Instance(uuid='uuid1', host='host1'), objects.Instance(uuid='uuid2', host='host1'), objects.Instance(uuid='uuid3', host='host2'), ] events = [ objects.InstanceExternalEvent(instance_uuid='uuid1'), objects.InstanceExternalEvent(instance_uuid='uuid2'), objects.InstanceExternalEvent(instance_uuid='uuid3'), ] self.compute_api.compute_rpcapi = mock.MagicMock() self.compute_api.external_instance_event(self.context, instances, events) method = self.compute_api.compute_rpcapi.external_instance_event method.assert_any_call(self.context, instances[0:2], events[0:2]) method.assert_any_call(self.context, instances[2:], events[2:]) self.assertEqual(2, method.call_count) def test_volume_ops_invalid_task_state(self): instance = self._create_instance_obj() self.assertEqual(instance.vm_state, vm_states.ACTIVE) instance.task_state = 'Any' volume_id = uuidutils.generate_uuid() self.assertRaises(exception.InstanceInvalidState, self.compute_api.attach_volume, self.context, instance, volume_id) self.assertRaises(exception.InstanceInvalidState, self.compute_api.detach_volume, self.context, instance, volume_id) new_volume_id = uuidutils.generate_uuid() self.assertRaises(exception.InstanceInvalidState, self.compute_api.swap_volume, self.context, instance, volume_id, new_volume_id) @mock.patch.object(cinder.API, 'get', side_effect=exception.CinderConnectionFailed(reason='error')) def test_get_bdm_image_metadata_with_cinder_down(self, mock_get): bdms = [objects.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict( { 'id': 1, 'volume_id': 1, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': 'vda', }))] self.assertRaises(exception.CinderConnectionFailed, self.compute_api._get_bdm_image_metadata, self.context, bdms, legacy_bdm=True) @mock.patch.object(cinder.API, 'get') @mock.patch.object(cinder.API, 'check_attach', side_effect=exception.InvalidVolume(reason='error')) def test_validate_bdm_with_error_volume(self, mock_check_attach, mock_get): # Tests that an InvalidVolume exception raised from # volume_api.check_attach due to the volume status not being # 'available' results in _validate_bdm re-raising InvalidVolume. instance = self._create_instance_obj() instance_type = self._create_flavor() volume_id = 'e856840e-9f5b-4894-8bde-58c6e29ac1e8' volume_info = {'status': 'error', 'attach_status': 'detached', 'id': volume_id} mock_get.return_value = volume_info bdms = [objects.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict( { 'boot_index': 0, 'volume_id': volume_id, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': 'vda', }))] self.assertRaises(exception.InvalidVolume, self.compute_api._validate_bdm, self.context, instance, instance_type, bdms) mock_get.assert_called_once_with(self.context, volume_id) mock_check_attach.assert_called_once_with( self.context, volume_info, instance=instance) @mock.patch.object(cinder.API, 'get_snapshot', side_effect=exception.CinderConnectionFailed(reason='error')) @mock.patch.object(cinder.API, 'get', side_effect=exception.CinderConnectionFailed(reason='error')) def test_validate_bdm_with_cinder_down(self, mock_get, mock_get_snapshot): instance = self._create_instance_obj() instance_type = self._create_flavor() bdm = [objects.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict( { 'id': 1, 'volume_id': 1, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': 'vda', 'boot_index': 0, }))] bdms = [objects.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict( { 'id': 1, 'snapshot_id': 1, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': 'vda', 'boot_index': 0, }))] self.assertRaises(exception.CinderConnectionFailed, self.compute_api._validate_bdm, self.context, instance, instance_type, bdm) self.assertRaises(exception.CinderConnectionFailed, self.compute_api._validate_bdm, self.context, instance, instance_type, bdms) def _test_create_db_entry_for_new_instance_with_cinder_error(self, expected_exception): @mock.patch.object(objects.Instance, 'create') @mock.patch.object(compute_api.SecurityGroupAPI, 'ensure_default') @mock.patch.object(compute_api.API, '_populate_instance_names') @mock.patch.object(compute_api.API, '_populate_instance_for_create') def do_test(self, mock_create, mock_names, mock_ensure, mock_inst_create): instance = self._create_instance_obj() instance['display_name'] = 'FAKE_DISPLAY_NAME' instance['shutdown_terminate'] = False instance_type = self._create_flavor() fake_image = { 'id': 'fake-image-id', 'properties': {'mappings': []}, 'status': 'fake-status', 'location': 'far-away'} fake_security_group = None fake_num_instances = 1 fake_index = 1 bdm = [objects.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict( { 'id': 1, 'volume_id': 1, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': 'vda', 'boot_index': 0, }))] with mock.patch.object(instance, "destroy") as destroy: self.assertRaises(expected_exception, self.compute_api. create_db_entry_for_new_instance, self.context, instance_type, fake_image, instance, fake_security_group, bdm, fake_num_instances, fake_index) destroy.assert_called_once_with() # We use a nested method so we can decorate with the mocks. do_test(self) @mock.patch.object(cinder.API, 'get', side_effect=exception.CinderConnectionFailed(reason='error')) def test_create_db_entry_for_new_instancewith_cinder_down(self, mock_get): self._test_create_db_entry_for_new_instance_with_cinder_error( expected_exception=exception.CinderConnectionFailed) @mock.patch.object(cinder.API, 'get', return_value={'id': 1, 'status': 'error', 'attach_status': 'detached'}) def test_create_db_entry_for_new_instancewith_error_volume(self, mock_get): self._test_create_db_entry_for_new_instance_with_cinder_error( expected_exception=exception.InvalidVolume) def _test_rescue(self, vm_state=vm_states.ACTIVE, rescue_password=None, rescue_image=None, clean_shutdown=True): instance = self._create_instance_obj(params={'vm_state': vm_state}) bdms = [] with contextlib.nested( mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid', return_value=bdms), mock.patch.object(self.compute_api, 'is_volume_backed_instance', return_value=False), mock.patch.object(instance, 'save'), mock.patch.object(self.compute_api, '_record_action_start'), mock.patch.object(self.compute_api.compute_rpcapi, 'rescue_instance') ) as ( bdm_get_by_instance_uuid, volume_backed_inst, instance_save, record_action_start, rpcapi_rescue_instance ): self.compute_api.rescue(self.context, instance, rescue_password=rescue_password, rescue_image_ref=rescue_image, clean_shutdown=clean_shutdown) # assert field values set on the instance object self.assertEqual(task_states.RESCUING, instance.task_state) # assert our mock calls bdm_get_by_instance_uuid.assert_called_once_with( self.context, instance.uuid) volume_backed_inst.assert_called_once_with( self.context, instance, bdms) instance_save.assert_called_once_with(expected_task_state=[None]) record_action_start.assert_called_once_with( self.context, instance, instance_actions.RESCUE) rpcapi_rescue_instance.assert_called_once_with( self.context, instance=instance, rescue_password=rescue_password, rescue_image_ref=rescue_image, clean_shutdown=clean_shutdown) def test_rescue_active(self): self._test_rescue() def test_rescue_stopped(self): self._test_rescue(vm_state=vm_states.STOPPED) def test_rescue_error(self): self._test_rescue(vm_state=vm_states.ERROR) def test_rescue_with_password(self): self._test_rescue(rescue_password='fake-password') def test_rescue_with_image(self): self._test_rescue(rescue_image='fake-image') def test_rescue_forced_shutdown(self): self._test_rescue(clean_shutdown=False) def test_unrescue(self): instance = self._create_instance_obj( params={'vm_state': vm_states.RESCUED}) with contextlib.nested( mock.patch.object(instance, 'save'), mock.patch.object(self.compute_api, '_record_action_start'), mock.patch.object(self.compute_api.compute_rpcapi, 'unrescue_instance') ) as ( instance_save, record_action_start, rpcapi_unrescue_instance ): self.compute_api.unrescue(self.context, instance) # assert field values set on the instance object self.assertEqual(task_states.UNRESCUING, instance.task_state) # assert our mock calls instance_save.assert_called_once_with(expected_task_state=[None]) record_action_start.assert_called_once_with( self.context, instance, instance_actions.UNRESCUE) rpcapi_unrescue_instance.assert_called_once_with( self.context, instance=instance) def test_set_admin_password_invalid_state(self): # Tests that InstanceInvalidState is raised when not ACTIVE. instance = self._create_instance_obj({'vm_state': vm_states.STOPPED}) self.assertRaises(exception.InstanceInvalidState, self.compute_api.set_admin_password, self.context, instance) def test_set_admin_password(self): # Ensure instance can have its admin password set. instance = self._create_instance_obj() @mock.patch.object(objects.Instance, 'save') @mock.patch.object(self.compute_api, '_record_action_start') @mock.patch.object(self.compute_api.compute_rpcapi, 'set_admin_password') def do_test(compute_rpcapi_mock, record_mock, instance_save_mock): # call the API self.compute_api.set_admin_password(self.context, instance) # make our assertions instance_save_mock.assert_called_once_with( expected_task_state=[None]) record_mock.assert_called_once_with( self.context, instance, instance_actions.CHANGE_PASSWORD) compute_rpcapi_mock.assert_called_once_with( self.context, instance=instance, new_pass=None) do_test() def _test_attach_interface_invalid_state(self, state): instance = self._create_instance_obj( params={'vm_state': state}) self.assertRaises(exception.InstanceInvalidState, self.compute_api.attach_interface, self.context, instance, '', '', '', []) def test_attach_interface_invalid_state(self): for state in [vm_states.BUILDING, vm_states.DELETED, vm_states.ERROR, vm_states.RESCUED, vm_states.RESIZED, vm_states.SOFT_DELETED, vm_states.SUSPENDED, vm_states.SHELVED, vm_states.SHELVED_OFFLOADED]: self._test_attach_interface_invalid_state(state) def _test_detach_interface_invalid_state(self, state): instance = self._create_instance_obj( params={'vm_state': state}) self.assertRaises(exception.InstanceInvalidState, self.compute_api.detach_interface, self.context, instance, '', '', '', []) def test_detach_interface_invalid_state(self): for state in [vm_states.BUILDING, vm_states.DELETED, vm_states.ERROR, vm_states.RESCUED, vm_states.RESIZED, vm_states.SOFT_DELETED, vm_states.SUSPENDED, vm_states.SHELVED, vm_states.SHELVED_OFFLOADED]: self._test_detach_interface_invalid_state(state) def test_check_and_transform_bdm(self): instance_type = self._create_flavor() base_options = {'uuid': 'fake_uuid', 'image_ref': 'fake_image_ref', 'metadata': {}} image_meta = {'status': 'active', 'name': 'image_name', 'deleted': False, 'container_format': 'bare', 'id': 'image_id'} legacy_bdm = False block_device_mapping = [{'boot_index': 0, 'device_name': None, 'image_id': 'image_id', 'source_type': 'image'}, {'device_name': '/dev/vda', 'source_type': 'volume', 'device_type': None, 'volume_id': 'volume_id'}] self.assertRaises(exception.InvalidRequest, self.compute_api._check_and_transform_bdm, base_options, instance_type, image_meta, 1, 1, block_device_mapping, legacy_bdm) class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase): def setUp(self): super(ComputeAPIUnitTestCase, self).setUp() self.compute_api = compute_api.API() self.cell_type = None def test_resize_same_flavor_fails(self): self.assertRaises(exception.CannotResizeToSameFlavor, self._test_resize, same_flavor=True) class ComputeAPIAPICellUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase): def setUp(self): super(ComputeAPIAPICellUnitTestCase, self).setUp() self.flags(cell_type='api', enable=True, group='cells') self.compute_api = compute_cells_api.ComputeCellsAPI() self.cell_type = 'api' def test_resize_same_flavor_fails(self): self.assertRaises(exception.CannotResizeToSameFlavor, self._test_resize, same_flavor=True) class ComputeAPIComputeCellUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase): def setUp(self): super(ComputeAPIComputeCellUnitTestCase, self).setUp() self.flags(cell_type='compute', enable=True, group='cells') self.compute_api = compute_api.API() self.cell_type = 'compute' def test_resize_same_flavor_passes(self): self._test_resize(same_flavor=True) class DiffDictTestCase(test.NoDBTestCase): """Unit tests for _diff_dict().""" def test_no_change(self): old = dict(a=1, b=2, c=3) new = dict(a=1, b=2, c=3) diff = compute_api._diff_dict(old, new) self.assertEqual(diff, {}) def test_new_key(self): old = dict(a=1, b=2, c=3) new = dict(a=1, b=2, c=3, d=4) diff = compute_api._diff_dict(old, new) self.assertEqual(diff, dict(d=['+', 4])) def test_changed_key(self): old = dict(a=1, b=2, c=3) new = dict(a=1, b=4, c=3) diff = compute_api._diff_dict(old, new) self.assertEqual(diff, dict(b=['+', 4])) def test_removed_key(self): old = dict(a=1, b=2, c=3) new = dict(a=1, c=3) diff = compute_api._diff_dict(old, new) self.assertEqual(diff, dict(b=['-'])) class SecurityGroupAPITest(test.NoDBTestCase): def setUp(self): super(SecurityGroupAPITest, self).setUp() self.secgroup_api = compute_api.SecurityGroupAPI() self.user_id = 'fake' self.project_id = 'fake' self.context = context.RequestContext(self.user_id, self.project_id) @mock.patch('nova.objects.security_group.SecurityGroupList.' 'get_by_instance') def test_get_instance_security_groups(self, mock_get): groups = objects.SecurityGroupList() groups.objects = [objects.SecurityGroup(name='foo'), objects.SecurityGroup(name='bar')] mock_get.return_value = groups names = self.secgroup_api.get_instance_security_groups(self.context, 'fake-uuid') self.assertEqual([{'name': 'bar'}, {'name': 'foo'}], sorted(names)) self.assertEqual(1, mock_get.call_count) self.assertEqual('fake-uuid', mock_get.call_args_list[0][0][1].uuid)
{ "content_hash": "427a8eae10cdb39782f9e394c678cfb8", "timestamp": "", "source": "github", "line_count": 2840, "max_line_length": 79, "avg_line_length": 44.25774647887324, "alnum_prop": 0.5654218247780288, "repo_name": "sajeeshcs/nested_quota_latest", "id": "c9f90289d9096bcdd61d7766ab4aee25a3cffd59", "size": "126267", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "nova/tests/unit/compute/test_compute_api.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "15283878" }, { "name": "Shell", "bytes": "18626" } ], "symlink_target": "" }
try: import numpy except ImportError: pass from nltk.cluster.util import Dendrogram, VectorSpaceClusterer, cosine_distance class GAAClusterer(VectorSpaceClusterer): """ The Group Average Agglomerative starts with each of the N vectors as singleton clusters. It then iteratively merges pairs of clusters which have the closest centroids. This continues until there is only one cluster. The order of merges gives rise to a dendrogram: a tree with the earlier merges lower than later merges. The membership of a given number of clusters c, 1 <= c <= N, can be found by cutting the dendrogram at depth c. This clusterer uses the cosine similarity metric only, which allows for efficient speed-up in the clustering process. """ def __init__(self, num_clusters=1, normalise=True, svd_dimensions=None): VectorSpaceClusterer.__init__(self, normalise, svd_dimensions) self._num_clusters = num_clusters self._dendrogram = None self._groups_values = None def cluster(self, vectors, assign_clusters=False, trace=False): # stores the merge order self._dendrogram = Dendrogram( [numpy.array(vector, numpy.float64) for vector in vectors] ) return VectorSpaceClusterer.cluster(self, vectors, assign_clusters, trace) def cluster_vectorspace(self, vectors, trace=False): # variables describing the initial situation N = len(vectors) cluster_len = [1] * N cluster_count = N index_map = numpy.arange(N) # construct the similarity matrix dims = (N, N) dist = numpy.ones(dims, dtype=float) * numpy.inf for i in range(N): for j in range(i + 1, N): dist[i, j] = cosine_distance(vectors[i], vectors[j]) while cluster_count > max(self._num_clusters, 1): i, j = numpy.unravel_index(dist.argmin(), dims) if trace: print("merging %d and %d" % (i, j)) # update similarities for merging i and j self._merge_similarities(dist, cluster_len, i, j) # remove j dist[:, j] = numpy.inf dist[j, :] = numpy.inf # merge the clusters cluster_len[i] = cluster_len[i] + cluster_len[j] self._dendrogram.merge(index_map[i], index_map[j]) cluster_count -= 1 # update the index map to reflect the indexes if we # had removed j index_map[j + 1 :] -= 1 index_map[j] = N self.update_clusters(self._num_clusters) def _merge_similarities(self, dist, cluster_len, i, j): # the new cluster i merged from i and j adopts the average of # i and j's similarity to each other cluster, weighted by the # number of points in the clusters i and j i_weight = cluster_len[i] j_weight = cluster_len[j] weight_sum = i_weight + j_weight # update for x<i dist[:i, i] = dist[:i, i] * i_weight + dist[:i, j] * j_weight dist[:i, i] /= weight_sum # update for i<x<j dist[i, i + 1 : j] = ( dist[i, i + 1 : j] * i_weight + dist[i + 1 : j, j] * j_weight ) # update for i<j<x dist[i, j + 1 :] = dist[i, j + 1 :] * i_weight + dist[j, j + 1 :] * j_weight dist[i, i + 1 :] /= weight_sum def update_clusters(self, num_clusters): clusters = self._dendrogram.groups(num_clusters) self._centroids = [] for cluster in clusters: assert len(cluster) > 0 if self._should_normalise: centroid = self._normalise(cluster[0]) else: centroid = numpy.array(cluster[0]) for vector in cluster[1:]: if self._should_normalise: centroid += self._normalise(vector) else: centroid += vector centroid /= len(cluster) self._centroids.append(centroid) self._num_clusters = len(self._centroids) def classify_vectorspace(self, vector): best = None for i in range(self._num_clusters): centroid = self._centroids[i] dist = cosine_distance(vector, centroid) if not best or dist < best[0]: best = (dist, i) return best[1] def dendrogram(self): """ :return: The dendrogram representing the current clustering :rtype: Dendrogram """ return self._dendrogram def num_clusters(self): return self._num_clusters def __repr__(self): return "<GroupAverageAgglomerative Clusterer n=%d>" % self._num_clusters def demo(): """ Non-interactive demonstration of the clusterers with simple 2-D data. """ from nltk.cluster import GAAClusterer # use a set of tokens with 2D indices vectors = [numpy.array(f) for f in [[3, 3], [1, 2], [4, 2], [4, 0], [2, 3], [3, 1]]] # test the GAAC clusterer with 4 clusters clusterer = GAAClusterer(4) clusters = clusterer.cluster(vectors, True) print("Clusterer:", clusterer) print("Clustered:", vectors) print("As:", clusters) print() # show the dendrogram clusterer.dendrogram().show() # classify a new vector vector = numpy.array([3, 3]) print("classify(%s):" % vector, end=" ") print(clusterer.classify(vector)) print() if __name__ == "__main__": demo()
{ "content_hash": "98ef8d950e4ea80946fa508652f005c6", "timestamp": "", "source": "github", "line_count": 163, "max_line_length": 88, "avg_line_length": 33.90184049079755, "alnum_prop": 0.5808903365906624, "repo_name": "nltk/nltk", "id": "53d680c00517bfc08bc167d4e300420e0f16cd7f", "size": "5751", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "nltk/cluster/gaac.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "705" }, { "name": "HTML", "bytes": "24786" }, { "name": "Jupyter Notebook", "bytes": "55608" }, { "name": "Makefile", "bytes": "7983" }, { "name": "Python", "bytes": "4831858" }, { "name": "Shell", "bytes": "10877" } ], "symlink_target": "" }
from setuptools import setup, find_packages import sys, os from dynaform import version setup(name='dynaform', version=version, description="App para crear formularios dinámicos", packages=find_packages(exclude=['ez_setup', 'examples', 'tests']), include_package_data=True, install_requires=[ 'celery', ], dependency_links=[ 'git+https://github.com/ninjaotoko/djblog.git', ], zip_safe=False, author='Xavier Lesa', author_email='xavierlesa@gmail.com', url='http://github.com/ninjaotoko/dynaform' )
{ "content_hash": "9e669201ad470d08640677372017c1aa", "timestamp": "", "source": "github", "line_count": 20, "max_line_length": 74, "avg_line_length": 31.4, "alnum_prop": 0.6035031847133758, "repo_name": "ninjaotoko/dynaform", "id": "d7788f7c2b5c92e133833b870a84dc0d7e4f3bb0", "size": "783", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "setup.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "HTML", "bytes": "11852" }, { "name": "JavaScript", "bytes": "9012" }, { "name": "Python", "bytes": "110446" } ], "symlink_target": "" }
import pygame import threading import time import sys reload(sys) sys.setdefaultencoding('utf-8') class MagicCard: def __init__(self,dict): self.self_servant_effect = dict['self_servant'] self.self_blood_effect = dict['self_blood'] self.enermy_servant_effect = dict['enermy_servant'] self.enermy_blood_effect = dict['enermy_blood'] self.show_img = pygame.image.load(dict['show_img']) self.create_time = dict['time'] self.use_time = '' self.screen = '' self.type = '' def load_card(self,current_time,screen): if current_time-self.use_time < 1: if self.type == 0: if self.enermy_servant_effect>0: m7effect_img = pygame.image.load('m7effect.png') screen.blit(m7effect_img, (0, 145)) elif self.self_servant_effect>0: m2effect_img = pygame.image.load('m2effect.png') screen.blit(m2effect_img, (0, 300)) elif self.type==1: if self.enermy_servant_effect>0: m7effect_img = pygame.image.load('m7effect.png') screen.blit(m7effect_img, (0, 300)) elif self.self_servant_effect>0: m2effect_img = pygame.image.load('m2effect.png') screen.blit(m2effect_img, (0, 145)) def use_card(self,self_blood,enermy_blood,self_servant_list,enermy_servant_list,type): self.use_time = time.time() new_self_blood = self_blood + self.self_blood_effect new_enermy_blood = enermy_blood - self.enermy_blood_effect new_self_servant_list = self_servant_list new_enermy_servant_list = enermy_servant_list for i in new_self_servant_list: i.blood_increase(self.self_servant_effect) dead_list = [] for i in new_enermy_servant_list: if i.blood_decrease(self.enermy_servant_effect,self.use_time): dead_list.append(new_enermy_servant_list.index(i)) dead_list.sort(reverse=True) for i in dead_list: del new_enermy_servant_list[i] self.type = type return new_self_blood,new_enermy_blood,new_self_servant_list,new_enermy_servant_list
{ "content_hash": "60624a81c9b1b941ceac109faf356234", "timestamp": "", "source": "github", "line_count": 61, "max_line_length": 92, "avg_line_length": 38.49180327868852, "alnum_prop": 0.5728279386712095, "repo_name": "diyalujiaf/yewan-s-", "id": "49c1903403e55dcff1c758b93561f2e5f65966c2", "size": "2494", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "cardgame/magic_card.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "40155" } ], "symlink_target": "" }
""" Load Homity Config, Start Services """ import logging from Hub import app from ConfigParser import SafeConfigParser, NoOptionError, NoSectionError from couchdb import Server from v1.Common.helpers import bool_or_string ''' Parse Config ''' hub_config = {} hub_config_parser = SafeConfigParser() hub_config_parser.read('/etc/homity/homityhub.conf') try: hub_config['logger_path'] = hub_config_parser.get('logger', 'path') except (NoOptionError, NoSectionError): hub_config['logger_path'] = False try: hub_config['couch_url'] = hub_config_parser.get('couchdb', 'server') except (NoOptionError, NoSectionError): hub_config['couch_url'] = False try: hub_config['couch_username'] = hub_config_parser.get('couchdb', 'username') hub_config['couch_password'] = hub_config_parser.get('couchdb', 'password') except (NoOptionError, NoSectionError): hub_config['couch_username'] = False hub_config['couch_password'] = False try: hub_config['ssl_enable'] = bool_or_string(hub_config_parser.get('ssl', 'enabled')) except (NoOptionError, NoSectionError): hub_config['ssl_enable'] = False if hub_config['ssl_enable']: try: hub_config['ssl_private_key'] = hub_config_parser.get( 'ssl', 'private_key_path') hub_config['ssl_cert'] = hub_config_parser.get('ssl', 'cert_path') except (NoOptionError, NoSectionError): hub_config['ssl_enable'] = False ''' Set up Logger ''' if hub_config['logger_path']: FILE_HANDLER = logging.FileHandler(hub_config.get('logger_path')) FILE_HANDLER.setLevel(logging.INFO) FILE_HANDLER.setFormatter(logging.Formatter( '%(asctime)s %(levelname)s: %(message)s ' )) app.logger.setLevel(logging.INFO) app.logger.addHandler(FILE_HANDLER) LOG = logging.getLogger('werkzeug') LOG = logging.getLogger('wsgi') LOG.setLevel(logging.INFO) LOG.addHandler(FILE_HANDLER) ''' Set up CouchDB ''' if hub_config['couch_url']: couch = Server(url=hub_config.get('couch_url')) if hub_config.get('couch_username'): couch.resource.credentials = (hub_config.get('couch_username'), hub_config.get('couch_password')) else: couch = Server(url="http://localhost:5984") @app.route('/', methods=['GET']) def get_all_status(): """Placeholder for root get.""" return ""
{ "content_hash": "331d4173a090567ae106ed737296f3a5", "timestamp": "", "source": "github", "line_count": 87, "max_line_length": 79, "avg_line_length": 31.24137931034483, "alnum_prop": 0.5809418690213393, "repo_name": "openhomity/homity-hub", "id": "95e76c98ca3df98f62185469859cfcf8f90366ee", "size": "2718", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "Hub/api.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "80545" } ], "symlink_target": "" }
from ansible.module_utils.api import basic_auth_argument_spec ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.0'} DOCUMENTATION = ''' --- module: netapp_e_volume version_added: "2.2" short_description: Manage storage volumes (standard and thin) description: - Create or remove volumes (standard and thin) for NetApp E/EF-series storage arrays. options: api_username: required: true description: - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API. api_password: required: true description: - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API. api_url: required: true description: - The url to the SANtricity WebServices Proxy or embedded REST API. example: - https://prod-1.wahoo.acme.com/devmgr/v2 validate_certs: required: false default: true description: - Should https certificates be validated? ssid: required: true description: - The ID of the array to manage (as configured on the web services proxy). state: required: true description: - Whether the specified volume should exist or not. choices: ['present', 'absent'] name: required: true description: - The name of the volume to manage storage_pool_name: required: true description: - "Required only when requested state is 'present'. The name of the storage pool the volume should exist on." size_unit: description: - The unit used to interpret the size parameter choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb'] default: 'gb' size: required: true description: - "Required only when state = 'present'. The size of the volume in (size_unit)." segment_size_kb: description: - The segment size of the new volume default: 512 thin_provision: description: - Whether the volume should be thin provisioned. Thin volumes can only be created on disk pools (raidDiskPool). default: False choices: ['yes','no','true','false'] thin_volume_repo_size: description: - Initial size of the thin volume repository volume (in size_unit) required: True thin_volume_max_repo_size: description: - Maximum size that the thin volume repository volume will automatically expand to default: same as size (in size_unit) ssd_cache_enabled: description: - Whether an existing SSD cache should be enabled on the volume (fails if no SSD cache defined) default: None (ignores existing SSD cache setting) choices: ['yes','no','true','false'] data_assurance_enabled: description: - If data assurance should be enabled for the volume default: false # TODO: doc thin volume parameters author: Kevin Hulquest (@hulquest) ''' EXAMPLES = ''' - name: No thin volume netapp_e_volume: ssid: "{{ ssid }}" name: NewThinVolumeByAnsible state: absent log_path: /tmp/volume.log api_url: "{{ netapp_api_url }}" api_username: "{{ netapp_api_username }}" api_password: "{{ netapp_api_password }}" validate_certs: "{{ netapp_api_validate_certs }}" when: check_volume - name: No fat volume netapp_e_volume: ssid: "{{ ssid }}" name: NewVolumeByAnsible state: absent log_path: /tmp/volume.log api_url: "{{ netapp_api_url }}" api_username: "{{ netapp_api_username }}" api_password: "{{ netapp_api_password }}" validate_certs: "{{ netapp_api_validate_certs }}" when: check_volume ''' RETURN = ''' --- msg: "Standard volume [workload_vol_1] has been created." msg: "Thin volume [workload_thin_vol] has been created." msg: "Volume [workload_vol_1] has been expanded." msg: "Volume [workload_vol_1] has been deleted." msg: "Volume [workload_vol_1] did not exist." msg: "Volume [workload_vol_1] already exists." ''' import json import logging import time from traceback import format_exc from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.pycompat24 import get_exception from ansible.module_utils.urls import open_url from ansible.module_utils.six.moves.urllib.error import HTTPError def request(url, data=None, headers=None, method='GET', use_proxy=True, force=False, last_mod_time=None, timeout=10, validate_certs=True, url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False): try: r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy, force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs, url_username=url_username, url_password=url_password, http_agent=http_agent, force_basic_auth=force_basic_auth) except HTTPError: err = get_exception() r = err.fp try: raw_data = r.read() if raw_data: data = json.loads(raw_data) else: raw_data is None except: if ignore_errors: pass else: raise Exception(raw_data) resp_code = r.getcode() if resp_code >= 400 and not ignore_errors: raise Exception(resp_code, data) else: return resp_code, data def ifilter(predicate, iterable): # python 2, 3 generic filtering. if predicate is None: predicate = bool for x in iterable: if predicate(x): yield x class NetAppESeriesVolume(object): def __init__(self): self._size_unit_map = dict( bytes=1, b=1, kb=1024, mb=1024 ** 2, gb=1024 ** 3, tb=1024 ** 4, pb=1024 ** 5, eb=1024 ** 6, zb=1024 ** 7, yb=1024 ** 8 ) self._post_headers = dict(Accept="application/json") self._post_headers['Content-Type'] = 'application/json' argument_spec = basic_auth_argument_spec() argument_spec.update(dict( state=dict(required=True, choices=['present', 'absent']), ssid=dict(required=True, type='str'), name=dict(required=True, type='str'), storage_pool_name=dict(type='str'), size_unit=dict(default='gb', choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb'], type='str'), size=dict(type='int'), segment_size_kb=dict(default=128, choices=[8, 16, 32, 64, 128, 256, 512], type='int'), ssd_cache_enabled=dict(type='bool'), # no default, leave existing setting alone data_assurance_enabled=dict(default=False, type='bool'), thin_provision=dict(default=False, type='bool'), thin_volume_repo_size=dict(type='int'), thin_volume_max_repo_size=dict(type='int'), # TODO: add cache, owning controller support, thin expansion policy, etc log_path=dict(type='str'), api_url=dict(type='str'), api_username=dict(type='str'), api_password=dict(type='str'), validate_certs=dict(type='bool'), )) self.module = AnsibleModule(argument_spec=argument_spec, required_if=[ ('state', 'present', ['storage_pool_name', 'size']), ('thin_provision', 'true', ['thin_volume_repo_size']) ], supports_check_mode=True) p = self.module.params log_path = p['log_path'] # logging setup self._logger = logging.getLogger(self.__class__.__name__) self.debug = self._logger.debug if log_path: logging.basicConfig(level=logging.DEBUG, filename=log_path) self.state = p['state'] self.ssid = p['ssid'] self.name = p['name'] self.storage_pool_name = p['storage_pool_name'] self.size_unit = p['size_unit'] self.size = p['size'] self.segment_size_kb = p['segment_size_kb'] self.ssd_cache_enabled = p['ssd_cache_enabled'] self.data_assurance_enabled = p['data_assurance_enabled'] self.thin_provision = p['thin_provision'] self.thin_volume_repo_size = p['thin_volume_repo_size'] self.thin_volume_max_repo_size = p['thin_volume_max_repo_size'] if not self.thin_volume_max_repo_size: self.thin_volume_max_repo_size = self.size self.validate_certs = p['validate_certs'] try: self.api_usr = p['api_username'] self.api_pwd = p['api_password'] self.api_url = p['api_url'] except KeyError: self.module.fail_json(msg="You must pass in api_username " "and api_password and api_url to the module.") def get_volume(self, volume_name): self.debug('fetching volumes') # fetch the list of volume objects and look for one with a matching name (we'll need to merge volumes and thin-volumes) try: (rc, volumes) = request(self.api_url + "/storage-systems/%s/volumes" % (self.ssid), headers=dict(Accept="application/json"), url_username=self.api_usr, url_password=self.api_pwd, validate_certs=self.validate_certs) except Exception: err = get_exception() self.module.fail_json( msg="Failed to obtain list of standard/thick volumes. Array Id [%s]. Error[%s]." % (self.ssid, str(err))) try: self.debug('fetching thin-volumes') (rc, thinvols) = request(self.api_url + "/storage-systems/%s/thin-volumes" % (self.ssid), headers=dict(Accept="application/json"), url_username=self.api_usr, url_password=self.api_pwd, validate_certs=self.validate_certs) except Exception: err = get_exception() self.module.fail_json( msg="Failed to obtain list of thin volumes. Array Id [%s]. Error[%s]." % (self.ssid, str(err))) volumes.extend(thinvols) self.debug("searching for volume '%s'" % volume_name) volume_detail = next(ifilter(lambda a: a['name'] == volume_name, volumes), None) if volume_detail: self.debug('found') else: self.debug('not found') return volume_detail def get_storage_pool(self, storage_pool_name): self.debug("fetching storage pools") # map the storage pool name to its id try: (rc, resp) = request(self.api_url + "/storage-systems/%s/storage-pools" % (self.ssid), headers=dict(Accept="application/json"), url_username=self.api_usr, url_password=self.api_pwd, validate_certs=self.validate_certs) except Exception: err = get_exception() self.module.fail_json( msg="Failed to obtain list of storage pools. Array Id [%s]. Error[%s]." % (self.ssid, str(err))) self.debug("searching for storage pool '%s'" % storage_pool_name) pool_detail = next(ifilter(lambda a: a['name'] == storage_pool_name, resp), None) if pool_detail: self.debug('found') else: self.debug('not found') return pool_detail def create_volume(self, pool_id, name, size_unit, size, segment_size_kb, data_assurance_enabled): volume_add_req = dict( name=name, poolId=pool_id, sizeUnit=size_unit, size=size, segSize=segment_size_kb, dataAssuranceEnabled=data_assurance_enabled, ) self.debug("creating volume '%s'" % name) try: (rc, resp) = request(self.api_url + "/storage-systems/%s/volumes" % (self.ssid), data=json.dumps(volume_add_req), headers=self._post_headers, method='POST', url_username=self.api_usr, url_password=self.api_pwd, validate_certs=self.validate_certs, timeout=120) except Exception: err = get_exception() self.module.fail_json( msg="Failed to create volume. Volume [%s]. Array Id [%s]. Error[%s]." % (self.name, self.ssid, str(err))) def create_thin_volume(self, pool_id, name, size_unit, size, thin_volume_repo_size, thin_volume_max_repo_size, data_assurance_enabled): thin_volume_add_req = dict( name=name, poolId=pool_id, sizeUnit=size_unit, virtualSize=size, repositorySize=thin_volume_repo_size, maximumRepositorySize=thin_volume_max_repo_size, dataAssuranceEnabled=data_assurance_enabled, ) self.debug("creating thin-volume '%s'" % name) try: (rc, resp) = request(self.api_url + "/storage-systems/%s/thin-volumes" % (self.ssid), data=json.dumps(thin_volume_add_req), headers=self._post_headers, method='POST', url_username=self.api_usr, url_password=self.api_pwd, validate_certs=self.validate_certs, timeout=120) except Exception: err = get_exception() self.module.fail_json( msg="Failed to create thin volume. Volume [%s]. Array Id [%s]. Error[%s]." % (self.name, self.ssid, str(err))) def delete_volume(self): # delete the volume self.debug("deleting volume '%s'" % self.volume_detail['name']) try: (rc, resp) = request( self.api_url + "/storage-systems/%s/%s/%s" % (self.ssid, self.volume_resource_name, self.volume_detail['id']), method='DELETE', url_username=self.api_usr, url_password=self.api_pwd, validate_certs=self.validate_certs, timeout=120) except Exception: err = get_exception() self.module.fail_json( msg="Failed to delete volume. Volume [%s]. Array Id [%s]. Error[%s]." % (self.name, self.ssid, str(err))) @property def volume_resource_name(self): if self.volume_detail['thinProvisioned']: return 'thin-volumes' else: return 'volumes' @property def volume_properties_changed(self): return self.volume_ssdcache_setting_changed # or with other props here when extended # TODO: add support for r/w cache settings, owning controller, scan settings, expansion policy, growth alert threshold @property def volume_ssdcache_setting_changed(self): # None means ignore existing setting if self.ssd_cache_enabled is not None and self.ssd_cache_enabled != self.volume_detail['flashCached']: self.debug("flash cache setting changed") return True def update_volume_properties(self): update_volume_req = dict() # conditionally add values so we ignore unspecified props if self.volume_ssdcache_setting_changed: update_volume_req['flashCache'] = self.ssd_cache_enabled self.debug("updating volume properties...") try: (rc, resp) = request( self.api_url + "/storage-systems/%s/%s/%s/" % (self.ssid, self.volume_resource_name, self.volume_detail['id']), data=json.dumps(update_volume_req), headers=self._post_headers, method='POST', url_username=self.api_usr, url_password=self.api_pwd, validate_certs=self.validate_certs, timeout=120) except Exception: err = get_exception() self.module.fail_json( msg="Failed to update volume properties. Volume [%s]. Array Id [%s]. Error[%s]." % (self.name, self.ssid, str(err))) @property def volume_needs_expansion(self): current_size_bytes = int(self.volume_detail['capacity']) requested_size_bytes = self.size * self._size_unit_map[self.size_unit] # TODO: check requested/current repo volume size for thin-volumes as well # TODO: do we need to build any kind of slop factor in here? return requested_size_bytes > current_size_bytes def expand_volume(self): is_thin = self.volume_detail['thinProvisioned'] if is_thin: # TODO: support manual repo expansion as well self.debug('expanding thin volume') thin_volume_expand_req = dict( newVirtualSize=self.size, sizeUnit=self.size_unit ) try: (rc, resp) = request(self.api_url + "/storage-systems/%s/thin-volumes/%s/expand" % (self.ssid, self.volume_detail[ 'id']), data=json.dumps(thin_volume_expand_req), headers=self._post_headers, method='POST', url_username=self.api_usr, url_password=self.api_pwd, validate_certs=self.validate_certs, timeout=120) except Exception: err = get_exception() self.module.fail_json( msg="Failed to expand thin volume. Volume [%s]. Array Id [%s]. Error[%s]." % (self.name, self.ssid, str(err))) # TODO: check return code else: self.debug('expanding volume') volume_expand_req = dict( expansionSize=self.size, sizeUnit=self.size_unit ) try: (rc, resp) = request( self.api_url + "/storage-systems/%s/volumes/%s/expand" % (self.ssid, self.volume_detail['id']), data=json.dumps(volume_expand_req), headers=self._post_headers, method='POST', url_username=self.api_usr, url_password=self.api_pwd, validate_certs=self.validate_certs, timeout=120) except Exception: err = get_exception() self.module.fail_json( msg="Failed to expand volume. Volume [%s]. Array Id [%s]. Error[%s]." % (self.name, self.ssid, str(err))) self.debug('polling for completion...') while True: try: (rc, resp) = request(self.api_url + "/storage-systems/%s/volumes/%s/expand" % (self.ssid, self.volume_detail[ 'id']), method='GET', url_username=self.api_usr, url_password=self.api_pwd, validate_certs=self.validate_certs) except Exception: err = get_exception() self.module.fail_json( msg="Failed to get volume expansion progress. Volume [%s]. Array Id [%s]. Error[%s]." % ( self.name, self.ssid, str(err))) action = resp['action'] percent_complete = resp['percentComplete'] self.debug('expand action %s, %s complete...' % (action, percent_complete)) if action == 'none': self.debug('expand complete') break else: time.sleep(5) def apply(self): changed = False volume_exists = False msg = None self.volume_detail = self.get_volume(self.name) if self.volume_detail: volume_exists = True if self.state == 'absent': self.debug("CHANGED: volume exists, but requested state is 'absent'") changed = True elif self.state == 'present': # check requested volume size, see if expansion is necessary if self.volume_needs_expansion: self.debug( "CHANGED: requested volume size %s%s is larger than current size %sb" % (self.size, self.size_unit, self.volume_detail[ 'capacity'])) changed = True if self.volume_properties_changed: self.debug("CHANGED: one or more volume properties have changed") changed = True else: if self.state == 'present': self.debug("CHANGED: volume does not exist, but requested state is 'present'") changed = True if changed: if self.module.check_mode: self.debug('skipping changes due to check mode') else: if self.state == 'present': if not volume_exists: pool_detail = self.get_storage_pool(self.storage_pool_name) if not pool_detail: self.module.fail_json(msg='Requested storage pool (%s) not found' % self.storage_pool_name) if self.thin_provision and not pool_detail['diskPool']: self.module.fail_json( msg='Thin provisioned volumes can only be located on disk pools (not volume groups)') pool_id = pool_detail['id'] if not self.thin_provision: self.create_volume(pool_id, self.name, self.size_unit, self.size, self.segment_size_kb, self.data_assurance_enabled) msg = "Standard volume [%s] has been created." % (self.name) else: self.create_thin_volume(pool_id, self.name, self.size_unit, self.size, self.thin_volume_repo_size, self.thin_volume_max_repo_size, self.data_assurance_enabled) msg = "Thin volume [%s] has been created." % (self.name) else: # volume exists but differs, modify... if self.volume_needs_expansion: self.expand_volume() msg = "Volume [%s] has been expanded." % (self.name) # this stuff always needs to run on present (since props can't be set on creation) if self.volume_properties_changed: self.update_volume_properties() msg = "Properties of volume [%s] has been updated." % (self.name) elif self.state == 'absent': self.delete_volume() msg = "Volume [%s] has been deleted." % (self.name) else: self.debug("exiting with no changes") if self.state == 'absent': msg = "Volume [%s] did not exist." % (self.name) else: msg = "Volume [%s] already exists." % (self.name) self.module.exit_json(msg=msg, changed=changed) def main(): v = NetAppESeriesVolume() try: v.apply() except Exception: e = get_exception() v.debug("Exception in apply(): \n%s" % format_exc(e)) v.module.fail_json(msg="Module failed. Error [%s]." % (str(e))) if __name__ == '__main__': main()
{ "content_hash": "547f9a16ba6db49a527f8486e868c380", "timestamp": "", "source": "github", "line_count": 602, "max_line_length": 127, "avg_line_length": 42.63455149501661, "alnum_prop": 0.5130133250214292, "repo_name": "nwiizo/workspace_2017", "id": "2610796585594a825d5fcc7ae3cac3c00bb7f3e5", "size": "26366", "binary": false, "copies": "26", "ref": "refs/heads/master", "path": "ansible-modules-extras/storage/netapp/netapp_e_volume.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "173" }, { "name": "C++", "bytes": "7105" }, { "name": "CSS", "bytes": "50021" }, { "name": "Go", "bytes": "112005" }, { "name": "HTML", "bytes": "66435" }, { "name": "JavaScript", "bytes": "73266" }, { "name": "Makefile", "bytes": "1227" }, { "name": "PHP", "bytes": "3916" }, { "name": "PowerShell", "bytes": "277598" }, { "name": "Python", "bytes": "11925958" }, { "name": "Ruby", "bytes": "3779" }, { "name": "Rust", "bytes": "1484076" }, { "name": "Shell", "bytes": "86558" } ], "symlink_target": "" }
""" Read some json files and print all keys """ import os import sys import argparse from roblib import bcolors import json __author__ = 'Rob Edwards' if __name__ == "__main__": parser = argparse.ArgumentParser(description=' ') parser.add_argument('-f', help='json file', required=True) parser.add_argument('-v', help='verbose output', action='store_true') args = parser.parse_args() akeys = set() t = json.load(open(args.f, 'r')) for k in t: akeys.update(t[k].keys()) print("{}".format("\n".join(akeys)))
{ "content_hash": "d04a9c711728e972323d8f38bc2d974f", "timestamp": "", "source": "github", "line_count": 26, "max_line_length": 73, "avg_line_length": 21.26923076923077, "alnum_prop": 0.6220614828209765, "repo_name": "linsalrob/EdwardsLab", "id": "7bb356336db45546b57acc3c344840a74eb17ce4", "size": "553", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "ModelSEED/json_keys_keys.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "227276" }, { "name": "C++", "bytes": "21508" }, { "name": "Jupyter Notebook", "bytes": "490830" }, { "name": "Makefile", "bytes": "936" }, { "name": "Perl", "bytes": "280086" }, { "name": "Python", "bytes": "1102051" }, { "name": "Shell", "bytes": "13759" } ], "symlink_target": "" }
from django.http import HttpResponse, HttpResponseRedirect from django.shortcuts import render_to_response from django.contrib.auth.decorators import login_required from django.core.exceptions import ObjectDoesNotExist from django.core.context_processors import csrf from django.template import RequestContext from r_pass.models import Service, AccessToken, Host, Group from r_pass.forms import ServiceForm from r_pass.authz import AuthZ import logging import markdown2 @login_required def home(request): context = RequestContext(request, {}) data = {} data["services"] = [] authz = AuthZ() services = Service.objects.all() md = markdown2.Markdown(safe_mode="escape") for service in services: if authz.has_access_to_service(request.user, service): data["services"].append({ "title": service.title, "url": service.view_url(), "description": md.convert(service.description), }) return render_to_response('services.html', data, context) @login_required def _create_or_edit(request, service): if request.method == 'POST': form = ServiceForm(request.POST) if form.is_valid(): hosts = form.cleaned_data["hosts"].split() groups = form.cleaned_data["groups"].split() can_view_new_service = False authz = AuthZ() # Check to make sure the user will have access to the # credential they're creating for group in groups: if authz.is_member_of_group(request.user, group): can_view_new_service = True break if can_view_new_service: service.title = form.cleaned_data["title"] service.description = form.cleaned_data["description"] service_is_new = True if service.pk: service_is_new = False service.save() service.hosts.clear() for host in hosts: model, create = Host.objects.get_or_create(cname=host) service.hosts.add(model) service.groups.clear() for group in groups: model, create = Group.objects.get_or_create(source_id=group) service.groups.add(model) AccessToken.objects.filter(service=service).delete() access_token = AccessToken() access_token.name = form.cleaned_data["access_name"] access_token.description = form.cleaned_data["access_description"] access_token.user = form.cleaned_data["access_user"] access_token.access_token= form.cleaned_data["access_token"] access_token.service = service access_token.save() if service_is_new: _log(request, "Created service id: %s, name: %s" % (service.pk, service.title)) else: _log(request, "Edited service id: %s, name: %s" % (service.pk, service.title)) return HttpResponseRedirect(service.view_url()) else: form._errors["groups"] = form.error_class(["You don't have access to this service with the groups given"]) else: data = None if service and service.pk: data = {} data["title"] = service.title data["description"] = service.description groups = service.groups.all() data["groups"] = "\n".join(map(lambda x: x.source_id, groups)) hosts = service.hosts.all() data["hosts"] = "\n".join(map(lambda x: x.cname, hosts)) access_tokens = AccessToken.objects.filter(service=service) if len(access_tokens): token = access_tokens[0] data["access_name"] = token.name data["access_description"] = token.description data["access_user"] = token.user data["access_token"] = token.access_token form = ServiceForm(data) if service and service.pk: submit_url = service.edit_url() else: submit_url = "/r-pass/create" context = RequestContext(request, {}) context.update(csrf(request)) context["form"] = form context["submit_url"] = submit_url return render_to_response('create_edit.html', context) @login_required def create(request): return _create_or_edit(request, Service()) @login_required def edit(request, service_id): service = None try: service = Service.objects.get(pk=service_id) except ObjectDoesNotExist: return HttpResponse("Not Found", status=404,) authz = AuthZ() if not authz.has_access_to_service(request.user, service): _log(request, "Tried to view service - no access. id: %s, name: %s" % (service.pk, service.title)) return HttpResponse(status=403) return _create_or_edit(request, service) @login_required def service(request, service_id): context = RequestContext(request, {}) service = None try: service = Service.objects.get(pk=service_id) except ObjectDoesNotExist: return HttpResponse("Not Found", status=404,) authz = AuthZ() if not authz.has_access_to_service(request.user, service): _log(request, "Tried to view service - no access. id: %s, name: %s" % (service.pk, service.title)) return HttpResponse(status=403) _log(request, "Viewed service id: %s, name: %s" % (service.pk, service.title)) data = {} data["service"] = service md = markdown2.Markdown(safe_mode="escape") data["service_description"] = md.convert(service.description) data["hosts"] = service.hosts.all() data["tokens"] = AccessToken.objects.filter(service=service) for token in data["tokens"]: token.markdown_description = md.convert(token.description) data["groups"] = [] for group in service.groups.all(): data["groups"].append({ "display": authz.group_display_name(group.source_id), "id": group.source_id, "membership_url": authz.group_membership_url(group.source_id), }) data["edit_url"] = service.edit_url() return render_to_response("service_details.html", data, context) def _log(request, message): logger = logging.getLogger('r_pass.data_log') logger.info("%s: %s", request.user, message)
{ "content_hash": "d785a55e2eaf08ac8f17a70659f2c233", "timestamp": "", "source": "github", "line_count": 175, "max_line_length": 122, "avg_line_length": 37.21142857142857, "alnum_prop": 0.5999692874692875, "repo_name": "abztrakt/r-pass", "id": "f6a98df90a194d947ff54f0f3ce619d1d0dffefe", "size": "6512", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "r_pass/views.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "2081" }, { "name": "HTML", "bytes": "5602" }, { "name": "JavaScript", "bytes": "1227" }, { "name": "Python", "bytes": "9750" } ], "symlink_target": "" }
import sys from KerbalStuff.config import _cfg, _cfgi from KerbalStuff.database import db, init_db from KerbalStuff.objects import User from KerbalStuff.email import send_confirmation init_db() if sys.argv[1] == 'delete_user': user = User.query.filter(User.username == sys.argv[2]).first() if not user: sys.exit("User not found.") else: db.delete(user) db.commit() print("Success.") sys.exit()
{ "content_hash": "2c7f91489f832d0fc2a9caa44ffd69b9", "timestamp": "", "source": "github", "line_count": 18, "max_line_length": 66, "avg_line_length": 24.944444444444443, "alnum_prop": 0.6570155902004454, "repo_name": "ModulousSmash/Modulous", "id": "73288fc351b3ebb86b865bf580c85de691e25600", "size": "449", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "admin.py", "mode": "33188", "license": "mit", "language": [ { "name": "ASP", "bytes": "8077" }, { "name": "CSS", "bytes": "208709" }, { "name": "CoffeeScript", "bytes": "34512" }, { "name": "HTML", "bytes": "1531955" }, { "name": "JavaScript", "bytes": "365418" }, { "name": "Mako", "bytes": "412" }, { "name": "PHP", "bytes": "5957" }, { "name": "Python", "bytes": "187439" } ], "symlink_target": "" }
def extractRexjadeWordpressCom(item): ''' Parser for 'rexjade.wordpress.com' ''' vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title']) if not (chp or vol) or "preview" in item['title'].lower(): return None tagmap = [ ('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel'), ] for tagname, name, tl_type in tagmap: if tagname in item['tags']: return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False
{ "content_hash": "44fa68fd63c1d02cdcd4707194d3be98", "timestamp": "", "source": "github", "line_count": 21, "max_line_length": 104, "avg_line_length": 26.333333333333332, "alnum_prop": 0.6311030741410488, "repo_name": "fake-name/ReadableWebProxy", "id": "fd7b4f94d5d5723c157b1c5128961cb5cefa9768", "size": "554", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "WebMirror/management/rss_parser_funcs/feed_parse_extractRexjadeWordpressCom.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "105811" }, { "name": "Dockerfile", "bytes": "1178" }, { "name": "HTML", "bytes": "119737" }, { "name": "JavaScript", "bytes": "3006524" }, { "name": "Jupyter Notebook", "bytes": "148075" }, { "name": "Mako", "bytes": "1454" }, { "name": "Python", "bytes": "5264346" }, { "name": "Shell", "bytes": "1059" } ], "symlink_target": "" }
from devilry.devilry_message.utils.subject_generator import SubjectTextGenerator class SubjectTextTestGenerator(SubjectTextGenerator): def get_subject_text(self): return 'Test subject'
{ "content_hash": "cac23b4d81a83ec0d79ce09986aa134e", "timestamp": "", "source": "github", "line_count": 6, "max_line_length": 80, "avg_line_length": 33.166666666666664, "alnum_prop": 0.7989949748743719, "repo_name": "devilry/devilry-django", "id": "0b45a28169501545765263c9f1a028a095cec797", "size": "199", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "devilry/devilry_message/tests/test_utils.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "513510" }, { "name": "Dockerfile", "bytes": "211" }, { "name": "HTML", "bytes": "421969" }, { "name": "JavaScript", "bytes": "756713" }, { "name": "Less", "bytes": "166670" }, { "name": "PLpgSQL", "bytes": "397986" }, { "name": "Python", "bytes": "6507968" }, { "name": "Shell", "bytes": "10328" } ], "symlink_target": "" }
from ipyparallel import Client from EXOSIMS.Prototypes.SurveyEnsemble import SurveyEnsemble import time from IPython.core.display import clear_output import sys import os import numpy as np import os.path import subprocess class IPClusterEnsemble(SurveyEnsemble): """Parallelized suvey ensemble based on IPython parallel (ipcluster)""" def __init__(self, **specs): SurveyEnsemble.__init__(self, **specs) self.verb = specs.get("verbose", True) # access the cluster self.rc = Client() self.dview = self.rc[:] self.dview.block = True with self.dview.sync_imports(): import EXOSIMS, EXOSIMS.util.get_module, os, os.path, time, random, pickle, traceback, numpy # noqa: E401, F401, E501 if "logger" in specs: specs.pop("logger") if "seed" in specs: specs.pop("seed") self.dview.push(dict(specs=specs)) self.vprint("Building SurveySimulation object on all workers.") _ = self.dview.execute( "SS = EXOSIMS.util.get_module.get_module(specs['modules'] \ ['SurveySimulation'], 'SurveySimulation')(**specs)" ) _ = self.dview.execute("SS.reset_sim()") self.vprint( "Created SurveySimulation objects on %d engines." % len(self.rc.ids) ) # for row in res.stdout: # self.vprint(row) self.lview = self.rc.load_balanced_view() self.maxNumEngines = len(self.rc.ids) def run_ensemble( self, sim, nb_run_sim, run_one=None, genNewPlanets=True, rewindPlanets=True, kwargs={}, ): """ Args: sim: """ hangingRunsOccured = False # keeps track of whether hanging runs have occured t1 = time.time() async_res = [] for j in range(nb_run_sim): ar = self.lview.apply_async( run_one, genNewPlanets=genNewPlanets, rewindPlanets=rewindPlanets, **kwargs ) async_res.append(ar) print("Submitted %d tasks." % len(async_res)) engine_pids = self.rc[:].apply(os.getpid).get_dict() # ar2 = self.lview.apply_async(os.getpid) # pids = ar2.get_dict() print("engine_pids") print(engine_pids) runStartTime = time.time() # create job starting time avg_time_per_run = 0.0 tmplenoutstandingset = nb_run_sim tLastRunFinished = time.time() ar = self.rc._asyncresult_from_jobs(async_res) while not ar.ready(): ar.wait(10.0) clear_output(wait=True) if ar.progress > 0: timeleft = ar.elapsed / ar.progress * (nb_run_sim - ar.progress) if timeleft > 3600.0: timeleftstr = "%2.2f hours" % (timeleft / 3600.0) elif timeleft > 60.0: timeleftstr = "%2.2f minutes" % (timeleft / 60.0) else: timeleftstr = "%2.2f seconds" % timeleft else: timeleftstr = "who knows" # Terminate hanging runs # a set of msg_ids that have been submitted but resunts have not # been received outstandingset = self.rc.outstanding # there is at least 1 run still going and we have not just started if len(outstandingset) > 0 and len(outstandingset) < nb_run_sim: # compute average amount of time per run avg_time_per_run = (time.time() - runStartTime) / float( nb_run_sim - len(outstandingset) ) # The scheduler has finished a run if len(outstandingset) < tmplenoutstandingset: # update this. should decrease by ~1 or number of cores... tmplenoutstandingset = len(outstandingset) # update tLastRunFinished to the last time a simulation finished # (right now) tLastRunFinished = time.time() if ( time.time() - tLastRunFinished > avg_time_per_run * (1.0 + self.maxNumEngines * 2.0) * 4.0 ): # nb_run_sim = len(self.rc.outstanding) # restartRuns = True self.vprint( "Aborting " + str(len(self.rc.outstanding)) + "qty outstandingset jobs" ) # runningPIDS = os.listdir('/proc') # get all running pids self.vprint("queue_status") self.vprint(str(self.rc.queue_status())) self.rc.abort() ar.wait(20) # runningPIDS = [ # int(tpid) for tpid in os.listdir("/proc") if tpid.isdigit() # ] # [self.rc.queue_status()[eind] for # eind in np.arange(self.maxNumEngines) if # self.rc.queue_status()[eind]['tasks']>0] for engineInd in [ eind for eind in np.arange(self.maxNumEngines) if self.rc.queue_status()[eind]["tasks"] > 0 ]: os.kill(engine_pids[engineInd], 15) time.sleep(20) # for pid in [engine_pids[eind] for eind in # np.arange(len(engine_pids))]: # if pid in runningPIDS: # os.kill(pid,9) # send kill command to stop this worker stopIPClusterCommand = subprocess.Popen(["ipcluster", "stop"]) stopIPClusterCommand.wait() time.sleep( 60 ) # doing this instead of waiting for ipcluster to terminate stopIPClusterCommand = subprocess.Popen(["ipcluster", "stop"]) stopIPClusterCommand.wait() time.sleep( 60 ) # doing this instead of waiting for ipcluster to terminate hangingRunsOccured = ( True # keeps track of whether hanging runs have occured ) break # stopIPClusterCommand.wait() # waits for process to terminate # call(["ipcluster","stop"]) # send command to stop ipcluster # self.rc.abort(jobs=self.rc.outstanding.copy().pop()) # self.rc.abort() # by default should abort all outstanding jobs... # it is possible that this will not stop the jobs running # ar.wait(100) # self.rc.purge_everything() # purge all results if outstanding *because rc.abort() # didn't seem to do the job right # update tLastRunFinished to the last time a simulation was # restarted (right now) tLastRunFinished = time.time() print( "%4i/%i tasks finished after %4i s. About %s to go." % (ar.progress, nb_run_sim, ar.elapsed, timeleftstr), end="", ) sys.stdout.flush() # numRunStarts += 1 # increment number of run restarts t2 = time.time() print("\nCompleted in %d sec" % (t2 - t1)) if hangingRunsOccured: # hanging runs have occured res = [1] else: res = [ar.get() for ar in async_res] return res
{ "content_hash": "4c431d13d841e8344a35a1e1ce261090", "timestamp": "", "source": "github", "line_count": 200, "max_line_length": 130, "avg_line_length": 39.425, "alnum_prop": 0.4986683576410907, "repo_name": "dsavransky/EXOSIMS", "id": "e7d9f9319bdb6d80ceb4b80a3148748d15cc02b7", "size": "7885", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "EXOSIMS/SurveyEnsemble/IPClusterEnsemble.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "C", "bytes": "8087" }, { "name": "Cython", "bytes": "2459" }, { "name": "Python", "bytes": "2936469" } ], "symlink_target": "" }
"""igcollect - MySQL Status Copyright (c) 2016 InnoGames GmbH """ try: from mysql.connector import connect except ImportError: from MySQLdb import connect from argparse import ArgumentParser from time import time def parse_args(): parser = ArgumentParser() parser.add_argument('--prefix', default='mysql') parser.add_argument('--host', default='localhost') parser.add_argument('--user') parser.add_argument('--password') parser.add_argument( '--unix-socket', default='/var/run/mysqld/mysqld.sock', ) return parser.parse_args() def main(): args = parse_args() template = args.prefix + '.{}.{} {} ' + str(int(time())) db = connect( user=args.user, passwd=args.password, host=args.host, unix_socket=args.unix_socket, ) cur = db.cursor() # Check for global status cur.execute('SHOW GLOBAL STATUS') for row in cur.fetchall(): if row[1].isdigit(): print(template.format('status', row[0], row[1])) cur.execute('SHOW VARIABLES') for row in cur.fetchall(): if row[1].isdigit(): print(template.format('variables', row[0], row[1])) # Find out how much space we can recover by Optimize sysdbs = { 'information_schema', 'mysql', 'performance_schema', 'sys', 'test', } free = 0 cur.execute('SHOW DATABASES') for row in cur.fetchall(): if row[0] in sysdbs: continue cur.execute( 'SELECT table_name, ' 'ROUND(data_free / 1024 / 1024), ' 'ROUND((data_length + index_length), 2) ' 'FROM information_schema.tables ' 'WHERE table_type = "BASE TABLE" ' 'AND table_schema = %s', [row[0]] ) for value in cur.fetchall(): print(template.format('table_size', '{}.{}'.format(row[0], value[0]), value[2])) free += value[1] print(template.format('status', 'optimize_freeable', free)) if __name__ == '__main__': main()
{ "content_hash": "3932143fc563c84cd7faf6fccc232f93", "timestamp": "", "source": "github", "line_count": 80, "max_line_length": 92, "avg_line_length": 26.175, "alnum_prop": 0.5630372492836676, "repo_name": "innogames/igcollect", "id": "967f78b0bca71d1b6ae82253a0f5933c818ce148", "size": "2116", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "igcollect/mysql.py", "mode": "33261", "license": "mit", "language": [ { "name": "Makefile", "bytes": "447" }, { "name": "Python", "bytes": "192599" } ], "symlink_target": "" }
"""Tests for linear.py in the exp_framework module.""" from absl.testing import absltest from sparse_data.exp_framework import linear from sparse_data.exp_framework.dnn_test import TestModel class TestLinear(TestModel): def setUp(self): super(TestLinear, self).setUp() self.submodule = linear if __name__ == '__main__': absltest.main()
{ "content_hash": "905793681f49b1ea0e1feeb58b6b73f8", "timestamp": "", "source": "github", "line_count": 17, "max_line_length": 56, "avg_line_length": 20.88235294117647, "alnum_prop": 0.7183098591549296, "repo_name": "google-research/google-research", "id": "4bdd2bdcd99eba1b8cdd475a4b2be164408206b3", "size": "963", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "sparse_data/exp_framework/linear_test.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "9817" }, { "name": "C++", "bytes": "4166670" }, { "name": "CMake", "bytes": "6412" }, { "name": "CSS", "bytes": "27092" }, { "name": "Cuda", "bytes": "1431" }, { "name": "Dockerfile", "bytes": "7145" }, { "name": "Gnuplot", "bytes": "11125" }, { "name": "HTML", "bytes": "77599" }, { "name": "ImageJ Macro", "bytes": "50488" }, { "name": "Java", "bytes": "487585" }, { "name": "JavaScript", "bytes": "896512" }, { "name": "Julia", "bytes": "67986" }, { "name": "Jupyter Notebook", "bytes": "71290299" }, { "name": "Lua", "bytes": "29905" }, { "name": "MATLAB", "bytes": "103813" }, { "name": "Makefile", "bytes": "5636" }, { "name": "NASL", "bytes": "63883" }, { "name": "Perl", "bytes": "8590" }, { "name": "Python", "bytes": "53790200" }, { "name": "R", "bytes": "101058" }, { "name": "Roff", "bytes": "1208" }, { "name": "Rust", "bytes": "2389" }, { "name": "Shell", "bytes": "730444" }, { "name": "Smarty", "bytes": "5966" }, { "name": "Starlark", "bytes": "245038" } ], "symlink_target": "" }
from swgpy.object import * def create(kernel): result = Static() result.template = "object/static/structure/naboo/shared_gungan_statue_s02.iff" result.attribute_template_id = -1 result.stfName("obj_n","unknown_object") #### BEGIN MODIFICATIONS #### #### END MODIFICATIONS #### return result
{ "content_hash": "947347e26445681fa5f1894d91062763", "timestamp": "", "source": "github", "line_count": 13, "max_line_length": 79, "avg_line_length": 23.692307692307693, "alnum_prop": 0.6948051948051948, "repo_name": "obi-two/Rebelion", "id": "0264081f8a507880d69b3380a8d734f1bffa3fc2", "size": "453", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "data/scripts/templates/object/static/structure/naboo/shared_gungan_statue_s02.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "11818" }, { "name": "C", "bytes": "7699" }, { "name": "C++", "bytes": "2293610" }, { "name": "CMake", "bytes": "39727" }, { "name": "PLSQL", "bytes": "42065" }, { "name": "Python", "bytes": "7499185" }, { "name": "SQLPL", "bytes": "41864" } ], "symlink_target": "" }
from django.shortcuts import render, redirect from django.views.generic import View, TemplateView from django.http import HttpResponse from django.core.mail import EmailMessage from django.conf import settings from .forms import NewEmail from .models import Tarea class homework(View): template_name = 'homework.html' def get(self,request): form = NewEmail() return render(request,self.template_name,locals()) def post(self,request): form = NewEmail(request.POST, request.FILES) user = request.user email = user.email if form.is_valid(): form.save(user=user) subject = "Tarea de: " + request.POST['subject'] name = request.POST['name'] user_email = email to_email = request.POST['to_email'] to_list = [to_email, user_email] homework_doc = request.FILES['homerwork_doc'] try: mail = EmailMessage(subject,name,email, to_list)#settings.EMAIL_HOST_USER mail.attach(homework_doc.name, homework_doc.read(), homework_doc.content_type) mail.send() return redirect('homework_success') except: return redirect('homework_error') else: return render(request, self.template_name, locals()) class homework_success(TemplateView): template_name = 'homework_success.html' class homework_error(TemplateView): template_name = 'homework_error.html' def get_homework(request): user = request.user homeworks = Tarea.objects.filter(user=user) print (homeworks) return render( request, 'homework1.html', {'homeworks':homeworks} )
{ "content_hash": "128e875e5064a8c6f829684ba3387c8f", "timestamp": "", "source": "github", "line_count": 52, "max_line_length": 82, "avg_line_length": 28.692307692307693, "alnum_prop": 0.7292225201072386, "repo_name": "diego-d5000/Estudiala", "id": "c5bc512a73169149c2f785d7448a332a360eee98", "size": "1492", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "estudiala/estudiala/tareas/views.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "490801" }, { "name": "Groff", "bytes": "2189" }, { "name": "HTML", "bytes": "2444503" }, { "name": "JavaScript", "bytes": "1941799" }, { "name": "Makefile", "bytes": "302" }, { "name": "Python", "bytes": "34083" }, { "name": "Shell", "bytes": "10111" } ], "symlink_target": "" }
""" Twitter Authentication """ from .oauthclient import client from django.conf.urls import url from django.urls import reverse from django.http import HttpResponseRedirect from helios_auth import utils import logging from django.conf import settings API_KEY = settings.TWITTER_API_KEY API_SECRET = settings.TWITTER_API_SECRET USER_TO_FOLLOW = settings.TWITTER_USER_TO_FOLLOW REASON_TO_FOLLOW = settings.TWITTER_REASON_TO_FOLLOW DM_TOKEN = settings.TWITTER_DM_TOKEN # some parameters to indicate that status updating is possible STATUS_UPDATES = True STATUS_UPDATE_WORDING_TEMPLATE = "Tweet %s" FOLLOW_VIEW_URL_NAME = "auth@twitter@follow" OAUTH_PARAMS = { 'root_url' : 'https://twitter.com', 'request_token_path' : '/oauth/request_token', 'authorize_path' : '/oauth/authorize', 'authenticate_path' : '/oauth/authenticate', 'access_token_path': '/oauth/access_token' } def _get_new_client(token=None, token_secret=None): if token: return client.LoginOAuthClient(API_KEY, API_SECRET, OAUTH_PARAMS, token, token_secret) else: return client.LoginOAuthClient(API_KEY, API_SECRET, OAUTH_PARAMS) def _get_client_by_token(token): return _get_new_client(token['oauth_token'], token['oauth_token_secret']) def get_auth_url(request, redirect_url): client = _get_new_client() try: tok = client.get_request_token() except: return None request.session['request_token'] = tok url = client.get_authenticate_url(tok['oauth_token']) return url def get_user_info_after_auth(request): tok = request.session['request_token'] twitter_client = _get_client_by_token(tok) access_token = twitter_client.get_access_token() request.session['access_token'] = access_token user_info = utils.from_json(twitter_client.oauth_request('http://api.twitter.com/1/account/verify_credentials.json', args={}, method='GET')) return {'type': 'twitter', 'user_id' : user_info['screen_name'], 'name': user_info['name'], 'info': user_info, 'token': access_token} def user_needs_intervention(user_id, user_info, token): """ check to see if user is following the users we need """ twitter_client = _get_client_by_token(token) friendship = utils.from_json(twitter_client.oauth_request('http://api.twitter.com/1/friendships/exists.json', args={'user_a': user_id, 'user_b': USER_TO_FOLLOW}, method='GET')) if friendship: return None return HttpResponseRedirect(reverse(FOLLOW_VIEW_URL_NAME)) def _get_client_by_request(request): access_token = request.session['access_token'] return _get_client_by_token(access_token) def update_status(user_id, user_info, token, message): """ post a message to the auth system's update stream, e.g. twitter stream """ twitter_client = _get_client_by_token(token) result = twitter_client.oauth_request('http://api.twitter.com/1/statuses/update.json', args={'status': message}, method='POST') def send_message(user_id, user_name, user_info, subject, body): pass def public_url(user_id): return "http://twitter.com/%s" % user_id def send_notification(user_id, user_info, message): twitter_client = _get_client_by_token(DM_TOKEN) result = twitter_client.oauth_request('http://api.twitter.com/1/direct_messages/new.json', args={'screen_name': user_id, 'text': message}, method='POST') ## ## views ## def follow_view(request): if request.method == "GET": from helios_auth.view_utils import render_template from helios_auth.views import after return render_template(request, 'twitter/follow', {'user_to_follow': USER_TO_FOLLOW, 'reason_to_follow' : REASON_TO_FOLLOW}) if request.method == "POST": follow_p = bool(request.POST.get('follow_p',False)) if follow_p: from helios_auth.security import get_user user = get_user(request) twitter_client = _get_client_by_token(user.token) result = twitter_client.oauth_request('http://api.twitter.com/1/friendships/create.json', args={'screen_name': USER_TO_FOLLOW}, method='POST') from helios_auth.url_names import AUTH_AFTER_INTERVENTION return HttpResponseRedirect(reverse(AUTH_AFTER_INTERVENTION)) # # Election Creation # def can_create_election(user_id, user_info): return True urlpatterns = [url(r'^twitter/follow', follow_view, name=FOLLOW_VIEW_URL_NAME)]
{ "content_hash": "281c568b4dc05229e7ca594fc7b6c73e", "timestamp": "", "source": "github", "line_count": 132, "max_line_length": 178, "avg_line_length": 32.54545454545455, "alnum_prop": 0.7129888268156425, "repo_name": "benadida/helios-server", "id": "541ac4df7cd874259a71be3566246fa3b2a48b57", "size": "4296", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "helios_auth/auth_systems/twitter.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "13437" }, { "name": "HTML", "bytes": "162385" }, { "name": "Java", "bytes": "2271" }, { "name": "JavaScript", "bytes": "307826" }, { "name": "Procfile", "bytes": "119" }, { "name": "Python", "bytes": "483554" }, { "name": "Shell", "bytes": "402" } ], "symlink_target": "" }
"""Extract date & time from text having today, tomorrow, yesterday, etc.""" import re import datetime from chronos.extractor.base import Extractor class RelativeDayExtractor(Extractor): def __init__(self, ref=None): super(RelativeDayExtractor, self).__init__(ref) self.patterns = {re.compile(r"""\b(day before yesterday|day after tomorrow|yesterday|tomorrow|today)\b""", re.IGNORECASE): self.__extract_day, re.compile(r'\b(next|in)* ?(\d+|a) (year|month|week|day)s? ?(ago|back)*\b', re.IGNORECASE): self.__extract_relative_days, re.compile(r'\b(last|next) (year|month|week)\b', re.IGNORECASE): self.__extract_last_next, re.compile(r'\b(last|next) (monday|tuesday|wednesday|thursday|friday|saturday|sunday)\b', re.IGNORECASE): self.__extract_last_next_day, } if ref: self.today = ref.date() else: self.today = datetime.date.today() def extract(self, text): """Extract all types of relative patterns.""" result = [] for pattern, functor in self.patterns.items(): matches = pattern.findall(text) result.extend(functor(matches)) return result def __extract_day(self, matches): """Extract today, tomorrow, yesterday, etc.""" result = [] one_day = datetime.timedelta(days=1) yesterday = self.today - one_day tomorrow = self.today + one_day db_yesterday = yesterday - one_day da_tomorrow = tomorrow + one_day for match in matches: match_lower = match.lower() if match_lower == 'yesterday': result.append(yesterday) elif match_lower == 'tomorrow': result.append(tomorrow) elif match_lower == 'today': result.append(self.today) elif match_lower == 'day before yesterday': result.append(db_yesterday) elif match_lower == 'day after tomorrow': result.append(da_tomorrow) return result def __extract_relative_days(self, matches): """Extract phrases like "in N days", "N days ago", etc.""" result = [] for match in matches: if match[1] == 'a': # For "in a week", "a week back", "a week ago", etc. if match[0] == 'next': # "next a week" doesn't make sense continue else: num = 1 else: # in N days, N weeks back, N years ago, etc. num = int(match[1]) if match[2] == 'day': delta = datetime.timedelta(days=num) elif match[2] == 'week': delta = datetime.timedelta(weeks=num) elif match[2] == 'month': # assuming 30 days to a month delta = datetime.timedelta(days=num*30) elif match[2] == 'year': # assuming 365 days to a year delta = datetime.timedelta(days=num*365) if match[0] and match[3]: # both in and ago doesn't make sense continue if match[0]: # either of next or in result.append(self.today + delta) elif match[3]: # either ago or back result.append(self.today - delta) return result def __extract_last_next(self, matches): """Extract from matches like last/next week/month/year combinations.""" result = [] for match in matches: if match[1] == 'week': delta = datetime.timedelta(weeks=1) elif match[1] == 'month': delta = datetime.timedelta(days=30) elif match[1] == 'year': delta = datetime.timedelta(days=365) if match[0] == 'last': result.append(self.today - delta) elif match[0] == 'next': result.append(self.today + delta) return result def __extract_last_next_day(self, matches): """Extract matches with last wednesday, next friday, etc.""" result = [] today_num = self.today.weekday() days = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday'] day_num = dict([(day, i) for i, day in enumerate(days)]) for match in matches: match_num = day_num[match[1].lower()] if match[0] == 'last': if today_num >= match_num: if today_num - match_num == 0: # matched day is also today delta = datetime.timedelta(days=7) else: delta = datetime.timedelta(days=today_num - match_num) else: delta = datetime.timedelta(days=7-today_num) result.append(self.today - delta) elif match[0] == 'next': if today_num >= match_num: delta = datetime.timedelta(days=7 + match_num - today_num) else: delta = datetime.timedelta(match_num - today_num) result.append(self.today + delta) return result
{ "content_hash": "ed7e145371c942d5c1894864d84c1ce7", "timestamp": "", "source": "github", "line_count": 136, "max_line_length": 114, "avg_line_length": 40.0735294117647, "alnum_prop": 0.5077064220183486, "repo_name": "cnu/chronos", "id": "19f51ad30db8ab33a22ca3553ee81ede0b93c0e4", "size": "5497", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "chronos/extractor/relative_day_extractor.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "35856" } ], "symlink_target": "" }
""" ===================== Workflow from scratch ===================== """ import nipype.interfaces.io as nio # Data i/o import nipype.interfaces.spm as spm # spm import nipype.pipeline.engine as pe # pypeline engine import nipype.algorithms.modelgen as model # model specification from nipype.interfaces.base import Bunch import os # system functions """In the following section, to showcase NiPyPe, we will describe how to create and extend a typical fMRI processing pipeline. We will begin with a basic processing layout and follow with extending it by adding/exchanging different components. Most fMRI pipeline can be divided into two sections - preprocessing and modelling. First one deals with cleaning data from confounds and noise and the second one fits a model based on the experimental design. Preprocessing stage in our first iteration of a pipeline will consist of only two steps: realignment and smoothing. In NiPyPe Every processing step consist of an Interface (which defines how to execute corresponding software) encapsulated in a Node (which defines for example a unique name). For realignment (motion correction achieved by coregistering all volumes to the mean) and smoothing (convolution with 3D Gaussian kernel) we will use SPM implementation. Definition of appropriate nodes can be found in Listing 1 (TODO). Inputs (such as register_to_mean from listing 1) of nodes are accessible through the inputs property. Upon setting any input its type is verified to avoid errors during the execution.""" realign = pe.Node(interface=spm.Realign(), name="realign") realign.inputs.register_to_mean = True smooth = pe.Node(interface=spm.Smooth(), name="smooth") smooth.inputs.fwhm = 4 """To connect two nodes a Workflow has to be created. connect() method of a Workflow allows to specify which outputs of which Nodes should be connected to which inputs of which Nodes (see Listing 2). By connecting realigned_files output of realign to in_files input of Smooth we have created a simple preprocessing workflow (see Figure TODO).""" preprocessing = pe.Workflow(name="preprocessing") preprocessing.connect(realign, "realigned_files", smooth, "in_files") """Creating a modelling workflow which will define the design, estimate model and contrasts follows the same suite. We will again use SPM implementations. NiPyPe, however, adds extra abstraction layer to model definition which allows using the same definition for many model estimation implemantations (for example one from FSL or nippy). Therefore we will need four nodes: SpecifyModel (NiPyPe specific abstraction layer), Level1Design (SPM design definition), ModelEstimate, and ContrastEstimate. The connected modelling Workflow can be seen on Figure TODO. Model specification supports block, event and sparse designs. Contrasts provided to ContrastEstimate are defined using the same names of regressors as defined in the SpecifyModel.""" specify_model = pe.Node(interface=model.SpecifyModel(), name="specify_model") specify_model.inputs.input_units = 'secs' specify_model.inputs.time_repetition = 3. specify_model.inputs.high_pass_filter_cutoff = 120 specify_model.inputs.subject_info = [Bunch(conditions=['Task-Odd','Task-Even'], onsets=[range(15,240,60), range(45,240,60)], durations=[[15], [15]])]*4 level1design = pe.Node(interface=spm.Level1Design(), name= "level1design") level1design.inputs.bases = {'hrf':{'derivs': [0,0]}} level1design.inputs.timing_units = 'secs' level1design.inputs.interscan_interval = specify_model.inputs.time_repetition level1estimate = pe.Node(interface=spm.EstimateModel(), name="level1estimate") level1estimate.inputs.estimation_method = {'Classical' : 1} contrastestimate = pe.Node(interface = spm.EstimateContrast(), name="contrastestimate") cont1 = ('Task>Baseline','T', ['Task-Odd','Task-Even'],[0.5,0.5]) cont2 = ('Task-Odd>Task-Even','T', ['Task-Odd','Task-Even'],[1,-1]) contrastestimate.inputs.contrasts = [cont1, cont2] modelling = pe.Workflow(name="modelling") modelling.connect(specify_model, 'session_info', level1design, 'session_info') modelling.connect(level1design, 'spm_mat_file', level1estimate, 'spm_mat_file') modelling.connect(level1estimate,'spm_mat_file', contrastestimate,'spm_mat_file') modelling.connect(level1estimate,'beta_images', contrastestimate,'beta_images') modelling.connect(level1estimate,'residual_image', contrastestimate,'residual_image') """Having preprocessing and modelling workflows we need to connect them together, add data grabbing facility and save the results. For this we will create a master Workflow which will host preprocessing and model Workflows as well as DataGrabber and DataSink Nodes. NiPyPe allows connecting Nodes between Workflows. We will use this feature to connect realignment_parameters and smoothed_files to modelling workflow.""" main_workflow = pe.Workflow(name="main_workflow") main_workflow.base_dir = "workflow_from_scratch" main_workflow.connect(preprocessing, "realign.realignment_parameters", modelling, "specify_model.realignment_parameters") main_workflow.connect(preprocessing, "smooth.smoothed_files", modelling, "specify_model.functional_runs") """DataGrabber allows to define flexible search patterns which can be parameterized by user defined inputs (such as subject ID, session etc.). This allows to adapt to a wide range of file layouts. In our case we will parameterize it with subject ID. In this way we will be able to run it for different subjects. We can automate this by iterating over a list of subject Ids, by setting an iterables property on the subject_id input of DataGrabber. Its output will be connected to realignment node from preprocessing workflow.""" datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'], outfields=['func']), name = 'datasource') datasource.inputs.base_directory = os.path.abspath('data') datasource.inputs.template = '%s/%s.nii' datasource.inputs.template_args = dict(func=[['subject_id', ['f3','f5','f7','f10']]]) datasource.inputs.subject_id = 's1' main_workflow.connect(datasource, 'func', preprocessing, 'realign.in_files') """DataSink on the other side provides means to storing selected results to a specified location. It supports automatic creation of folder stricter and regular expression based substitutions. In this example we will store T maps.""" datasink = pe.Node(interface=nio.DataSink(), name="datasink") datasink.inputs.base_directory = os.path.abspath('workflow_from_scratch/output') main_workflow.connect(modelling, 'contrastestimate.spmT_images', datasink, 'contrasts.@T') main_workflow.run() main_workflow.write_graph()
{ "content_hash": "578cb838788b03c1018e168d0af9b8dd", "timestamp": "", "source": "github", "line_count": 138, "max_line_length": 80, "avg_line_length": 51.2536231884058, "alnum_prop": 0.7299589990103209, "repo_name": "christianbrodbeck/nipype", "id": "fc36a9ae65702231296bfc3710db5da6140930c2", "size": "7187", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "examples/frontiers_paper/workflow_from_scratch.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Matlab", "bytes": "282" }, { "name": "Objective-C", "bytes": "4736" }, { "name": "Python", "bytes": "2537426" }, { "name": "Tcl", "bytes": "43408" } ], "symlink_target": "" }
import RPIO def gpio_callback(gpio_id, val): print("gpio %s: %s" % (gpio_id, val)) def socket_callback(socket, val): print("socket %s: '%s'" % (socket.fileno(), val)) socket.send("echo: %s\n" % val) # GPIO 中断回调 RPIO.add_interrupt_callback(7, gpio_callback) RPIO.add_interrupt_callback(9, gpio_callback, pull_up_down=RPIO.PUD_UP) # 回调服务器端口 8080 TCP socket RPIO.add_tcp_callback(8080, socket_callback) # 阻断主 epoll 循环 RPIO.wait_for_interrupts()
{ "content_hash": "86886ba721086c62f949b8cda1031dfd", "timestamp": "", "source": "github", "line_count": 18, "max_line_length": 71, "avg_line_length": 25.5, "alnum_prop": 0.6862745098039216, "repo_name": "quchunguang/test", "id": "ae80f0a3f742e959fba14f2ba7aff8f5179bc1a6", "size": "491", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "testraspberrypi/interrupt.py", "mode": "33261", "license": "mit", "language": [ { "name": "ActionScript", "bytes": "1086" }, { "name": "Assembly", "bytes": "71339" }, { "name": "Awk", "bytes": "1033" }, { "name": "Batchfile", "bytes": "571" }, { "name": "C", "bytes": "1063602" }, { "name": "C++", "bytes": "309142" }, { "name": "CSS", "bytes": "22567" }, { "name": "CoffeeScript", "bytes": "5429" }, { "name": "Common Lisp", "bytes": "941" }, { "name": "Fortran", "bytes": "21095" }, { "name": "Gnuplot", "bytes": "11868" }, { "name": "Go", "bytes": "14507" }, { "name": "HCL", "bytes": "21381" }, { "name": "HTML", "bytes": "788820" }, { "name": "Java", "bytes": "947462" }, { "name": "JavaScript", "bytes": "11208" }, { "name": "Lex", "bytes": "8920" }, { "name": "M", "bytes": "14447" }, { "name": "M4", "bytes": "550" }, { "name": "Makefile", "bytes": "123588" }, { "name": "Mathematica", "bytes": "3808649" }, { "name": "Matlab", "bytes": "99775" }, { "name": "Objective-C", "bytes": "18954" }, { "name": "OpenEdge ABL", "bytes": "5002" }, { "name": "PHP", "bytes": "80666" }, { "name": "PLpgSQL", "bytes": "399" }, { "name": "Perl", "bytes": "350" }, { "name": "PostScript", "bytes": "9049" }, { "name": "Python", "bytes": "521668" }, { "name": "QMake", "bytes": "258" }, { "name": "R", "bytes": "67" }, { "name": "Roff", "bytes": "1331" }, { "name": "Scala", "bytes": "1467" }, { "name": "Scheme", "bytes": "68" }, { "name": "Shell", "bytes": "551111" }, { "name": "SuperCollider", "bytes": "26339" }, { "name": "TeX", "bytes": "6604" }, { "name": "Yacc", "bytes": "23335" } ], "symlink_target": "" }
from itertools import tee from six import string_types import jmespath import json import base64 import logging from botocore.exceptions import PaginationError from botocore.compat import zip from botocore.utils import set_value_from_jmespath, merge_dicts log = logging.getLogger(__name__) class PaginatorModel(object): def __init__(self, paginator_config): self._paginator_config = paginator_config['pagination'] def get_paginator(self, operation_name): try: single_paginator_config = self._paginator_config[operation_name] except KeyError: raise ValueError("Paginator for operation does not exist: %s" % operation_name) return single_paginator_config class PageIterator(object): def __init__(self, method, input_token, output_token, more_results, result_keys, non_aggregate_keys, limit_key, max_items, starting_token, page_size, op_kwargs): self._method = method self._input_token = input_token self._output_token = output_token self._more_results = more_results self._result_keys = result_keys self._max_items = max_items self._limit_key = limit_key self._starting_token = starting_token self._page_size = page_size self._op_kwargs = op_kwargs self._resume_token = None self._non_aggregate_key_exprs = non_aggregate_keys self._non_aggregate_part = {} @property def result_keys(self): return self._result_keys @property def resume_token(self): """Token to specify to resume pagination.""" return self._resume_token @resume_token.setter def resume_token(self, value): if not isinstance(value, dict): raise ValueError("Bad starting token: %s" % value) if 'boto_truncate_amount' in value: token_keys = sorted(self._input_token + ['boto_truncate_amount']) else: token_keys = sorted(self._input_token) dict_keys = sorted(value.keys()) if token_keys == dict_keys: self._resume_token = base64.b64encode( json.dumps(value).encode('utf-8')).decode('utf-8') else: raise ValueError("Bad starting token: %s" % value) @property def non_aggregate_part(self): return self._non_aggregate_part def __iter__(self): current_kwargs = self._op_kwargs previous_next_token = None next_token = dict((key, None) for key in self._input_token) # The number of items from result_key we've seen so far. total_items = 0 first_request = True primary_result_key = self.result_keys[0] starting_truncation = 0 self._inject_starting_params(current_kwargs) while True: response = self._make_request(current_kwargs) parsed = self._extract_parsed_response(response) if first_request: # The first request is handled differently. We could # possibly have a resume/starting token that tells us where # to index into the retrieved page. if self._starting_token is not None: starting_truncation = self._handle_first_request( parsed, primary_result_key, starting_truncation) first_request = False self._record_non_aggregate_key_values(parsed) current_response = primary_result_key.search(parsed) if current_response is None: current_response = [] num_current_response = len(current_response) truncate_amount = 0 if self._max_items is not None: truncate_amount = (total_items + num_current_response) \ - self._max_items if truncate_amount > 0: self._truncate_response(parsed, primary_result_key, truncate_amount, starting_truncation, next_token) yield response break else: yield response total_items += num_current_response next_token = self._get_next_token(parsed) if all(t is None for t in next_token.values()): break if self._max_items is not None and \ total_items == self._max_items: # We're on a page boundary so we can set the current # next token to be the resume token. self.resume_token = next_token break if previous_next_token is not None and \ previous_next_token == next_token: message = ("The same next token was received " "twice: %s" % next_token) raise PaginationError(message=message) self._inject_token_into_kwargs(current_kwargs, next_token) previous_next_token = next_token def search(self, expression): """Applies a JMESPath expression to a paginator Each page of results is searched using the provided JMESPath expression. If the result is not a list, it is yielded directly. If the result is a list, each element in the result is yielded individually (essentially implementing a flatmap in which the JMESPath search is the mapping function). :type expression: str :param expression: JMESPath expression to apply to each page. :return: Returns an iterator that yields the individual elements of applying a JMESPath expression to each page of results. """ compiled = jmespath.compile(expression) for page in self: results = compiled.search(page) if isinstance(results, list): for element in results: yield element else: # Yield result directly if it is not a list. yield results def _make_request(self, current_kwargs): return self._method(**current_kwargs) def _extract_parsed_response(self, response): return response def _record_non_aggregate_key_values(self, response): non_aggregate_keys = {} for expression in self._non_aggregate_key_exprs: result = expression.search(response) set_value_from_jmespath(non_aggregate_keys, expression.expression, result) self._non_aggregate_part = non_aggregate_keys def _inject_starting_params(self, op_kwargs): # If the user has specified a starting token we need to # inject that into the operation's kwargs. if self._starting_token is not None: # Don't need to do anything special if there is no starting # token specified. next_token = self._parse_starting_token()[0] self._inject_token_into_kwargs(op_kwargs, next_token) if self._page_size is not None: # Pass the page size as the parameter name for limiting # page size, also known as the limit_key. op_kwargs[self._limit_key] = self._page_size def _inject_token_into_kwargs(self, op_kwargs, next_token): for name, token in next_token.items(): if (token is not None) and (token != 'None'): op_kwargs[name] = token elif name in op_kwargs: del op_kwargs[name] def _handle_first_request(self, parsed, primary_result_key, starting_truncation): # If the payload is an array or string, we need to slice into it # and only return the truncated amount. starting_truncation = self._parse_starting_token()[1] all_data = primary_result_key.search(parsed) if isinstance(all_data, (list, string_types)): data = all_data[starting_truncation:] else: data = None set_value_from_jmespath( parsed, primary_result_key.expression, data ) # We also need to truncate any secondary result keys # because they were not truncated in the previous last # response. for token in self.result_keys: if token == primary_result_key: continue sample = token.search(parsed) if isinstance(sample, list): empty_value = [] elif isinstance(sample, string_types): empty_value = '' elif isinstance(sample, (int, float)): empty_value = 0 else: empty_value = None set_value_from_jmespath(parsed, token.expression, empty_value) return starting_truncation def _truncate_response(self, parsed, primary_result_key, truncate_amount, starting_truncation, next_token): original = primary_result_key.search(parsed) if original is None: original = [] amount_to_keep = len(original) - truncate_amount truncated = original[:amount_to_keep] set_value_from_jmespath( parsed, primary_result_key.expression, truncated ) # The issue here is that even though we know how much we've truncated # we need to account for this globally including any starting # left truncation. For example: # Raw response: [0,1,2,3] # Starting index: 1 # Max items: 1 # Starting left truncation: [1, 2, 3] # End right truncation for max items: [1] # However, even though we only kept 1, this is post # left truncation so the next starting index should be 2, not 1 # (left_truncation + amount_to_keep). next_token['boto_truncate_amount'] = \ amount_to_keep + starting_truncation self.resume_token = next_token def _get_next_token(self, parsed): if self._more_results is not None: if not self._more_results.search(parsed): return {} next_tokens = {} for output_token, input_key in \ zip(self._output_token, self._input_token): next_token = output_token.search(parsed) # We do not want to include any empty strings as actual tokens. # Treat them as None. if next_token: next_tokens[input_key] = next_token else: next_tokens[input_key] = None return next_tokens def result_key_iters(self): teed_results = tee(self, len(self.result_keys)) return [ResultKeyIterator(i, result_key) for i, result_key in zip(teed_results, self.result_keys)] def build_full_result(self): complete_result = {} for response in self: page = response # We want to try to catch operation object pagination # and format correctly for those. They come in the form # of a tuple of two elements: (http_response, parsed_responsed). # We want the parsed_response as that is what the page iterator # uses. We can remove it though once operation objects are removed. if isinstance(response, tuple) and len(response) == 2: page = response[1] # We're incrementally building the full response page # by page. For each page in the response we need to # inject the necessary components from the page # into the complete_result. for result_expression in self.result_keys: # In order to incrementally update a result key # we need to search the existing value from complete_result, # then we need to search the _current_ page for the # current result key value. Then we append the current # value onto the existing value, and re-set that value # as the new value. result_value = result_expression.search(page) if result_value is None: continue existing_value = result_expression.search(complete_result) if existing_value is None: # Set the initial result set_value_from_jmespath( complete_result, result_expression.expression, result_value) continue # Now both result_value and existing_value contain something if isinstance(result_value, list): existing_value.extend(result_value) elif isinstance(result_value, (int, float, string_types)): # Modify the existing result with the sum or concatenation set_value_from_jmespath( complete_result, result_expression.expression, existing_value + result_value) merge_dicts(complete_result, self.non_aggregate_part) if self.resume_token is not None: complete_result['NextToken'] = self.resume_token return complete_result def _parse_starting_token(self): if self._starting_token is None: return None # The starting token is a dict passed as a base64 encoded string. next_token = self._starting_token try: next_token = json.loads( base64.b64decode(next_token).decode('utf-8')) index = 0 if 'boto_truncate_amount' in next_token: index = next_token.get('boto_truncate_amount') del next_token['boto_truncate_amount'] except (ValueError, TypeError): next_token, index = self._parse_starting_token_deprecated() return next_token, index def _parse_starting_token_deprecated(self): """ This handles parsing of old style starting tokens, and attempts to coerce them into the new style. """ log.debug("Attempting to fall back to old starting token parser. For " "token: %s" % self._starting_token) if self._starting_token is None: return None parts = self._starting_token.split('___') next_token = [] index = 0 if len(parts) == len(self._input_token) + 1: try: index = int(parts.pop()) except ValueError: raise ValueError("Bad starting token: %s" % self._starting_token) for part in parts: if part == 'None': next_token.append(None) else: next_token.append(part) return self._convert_deprecated_starting_token(next_token), index def _convert_deprecated_starting_token(self, deprecated_token): """ This attempts to convert a deprecated starting token into the new style. """ len_deprecated_token = len(deprecated_token) len_input_token = len(self._input_token) if len_deprecated_token > len_input_token: raise ValueError("Bad starting token: %s" % self._starting_token) elif len_deprecated_token < len_input_token: log.debug("Old format starting token does not contain all input " "tokens. Setting the rest, in order, as None.") for i in range(len_input_token - len_deprecated_token): deprecated_token.append(None) return dict(zip(self._input_token, deprecated_token)) class Paginator(object): PAGE_ITERATOR_CLS = PageIterator def __init__(self, method, pagination_config): self._method = method self._pagination_cfg = pagination_config self._output_token = self._get_output_tokens(self._pagination_cfg) self._input_token = self._get_input_tokens(self._pagination_cfg) self._more_results = self._get_more_results_token(self._pagination_cfg) self._non_aggregate_keys = self._get_non_aggregate_keys( self._pagination_cfg) self._result_keys = self._get_result_keys(self._pagination_cfg) self._limit_key = self._get_limit_key(self._pagination_cfg) @property def result_keys(self): return self._result_keys def _get_non_aggregate_keys(self, config): keys = [] for key in config.get('non_aggregate_keys', []): keys.append(jmespath.compile(key)) return keys def _get_output_tokens(self, config): output = [] output_token = config['output_token'] if not isinstance(output_token, list): output_token = [output_token] for config in output_token: output.append(jmespath.compile(config)) return output def _get_input_tokens(self, config): input_token = self._pagination_cfg['input_token'] if not isinstance(input_token, list): input_token = [input_token] return input_token def _get_more_results_token(self, config): more_results = config.get('more_results') if more_results is not None: return jmespath.compile(more_results) def _get_result_keys(self, config): result_key = config.get('result_key') if result_key is not None: if not isinstance(result_key, list): result_key = [result_key] result_key = [jmespath.compile(rk) for rk in result_key] return result_key def _get_limit_key(self, config): return config.get('limit_key') def paginate(self, **kwargs): """Create paginator object for an operation. This returns an iterable object. Iterating over this object will yield a single page of a response at a time. """ page_params = self._extract_paging_params(kwargs) return self.PAGE_ITERATOR_CLS( self._method, self._input_token, self._output_token, self._more_results, self._result_keys, self._non_aggregate_keys, self._limit_key, page_params['MaxItems'], page_params['StartingToken'], page_params['PageSize'], kwargs) def _extract_paging_params(self, kwargs): pagination_config = kwargs.pop('PaginationConfig', {}) max_items = pagination_config.get('MaxItems', None) if max_items is not None: max_items = int(max_items) page_size = pagination_config.get('PageSize', None) if page_size is not None: if self._pagination_cfg.get('limit_key', None) is None: raise PaginationError( message="PageSize parameter is not supported for the " "pagination interface for this operation.") page_size = int(page_size) return { 'MaxItems': max_items, 'StartingToken': pagination_config.get('StartingToken', None), 'PageSize': page_size, } class ResultKeyIterator(object): """Iterates over the results of paginated responses. Each iterator is associated with a single result key. Iterating over this object will give you each element in the result key list. :param pages_iterator: An iterator that will give you pages of results (a ``PageIterator`` class). :param result_key: The JMESPath expression representing the result key. """ def __init__(self, pages_iterator, result_key): self._pages_iterator = pages_iterator self.result_key = result_key def __iter__(self): for page in self._pages_iterator: results = self.result_key.search(page) if results is None: results = [] for result in results: yield result
{ "content_hash": "b2e17e5be9d86a25da12488b3e987515", "timestamp": "", "source": "github", "line_count": 498, "max_line_length": 79, "avg_line_length": 40.592369477911646, "alnum_prop": 0.5787286668315608, "repo_name": "lordmuffin/aws-cfn-plex", "id": "e817dd4e1ba251e6cd60ebebe99d50f6bc59c819", "size": "20782", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "functions/credstash/botocore/paginate.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "33347" }, { "name": "CSS", "bytes": "67087" }, { "name": "JavaScript", "bytes": "19469" }, { "name": "Python", "bytes": "7866843" }, { "name": "Shell", "bytes": "1025" }, { "name": "TeX", "bytes": "1491" } ], "symlink_target": "" }
""" Type inference base on CPA. The algorithm guarantees monotonic growth of type-sets for each variable. Steps: 1. seed initial types 2. build constraints 3. propagate constraints 4. unify types Constraint propagation is precise and does not regret (no backtracing). Constraints push types forward following the dataflow. """ import logging import operator import contextlib import itertools from pprint import pprint from collections import OrderedDict, defaultdict from functools import reduce from numba.core import types, utils, typing, ir, config from numba.core.typing.templates import Signature from numba.core.errors import (TypingError, UntypedAttributeError, new_error_context, termcolor, UnsupportedError, ForceLiteralArg, CompilerError, NumbaValueError) from numba.core.funcdesc import qualifying_prefix from numba.core.typeconv import Conversion _logger = logging.getLogger(__name__) class NOTSET: pass # terminal color markup _termcolor = termcolor() class TypeVar(object): def __init__(self, context, var): self.context = context self.var = var self.type = None self.locked = False # Stores source location of first definition self.define_loc = None # Qualifiers self.literal_value = NOTSET def add_type(self, tp, loc): assert isinstance(tp, types.Type), type(tp) if self.locked: if tp != self.type: if self.context.can_convert(tp, self.type) is None: msg = ("No conversion from %s to %s for '%s', " "defined at %s") raise TypingError(msg % (tp, self.type, self.var, self.define_loc), loc=loc) else: if self.type is not None: unified = self.context.unify_pairs(self.type, tp) if unified is None: msg = "Cannot unify %s and %s for '%s', defined at %s" raise TypingError(msg % (self.type, tp, self.var, self.define_loc), loc=self.define_loc) else: # First time definition unified = tp self.define_loc = loc self.type = unified return self.type def lock(self, tp, loc, literal_value=NOTSET): assert isinstance(tp, types.Type), type(tp) if self.locked: msg = ("Invalid reassignment of a type-variable detected, type " "variables are locked according to the user provided " "function signature or from an ir.Const node. This is a " "bug! Type={}. {}").format(tp, self.type) raise CompilerError(msg, loc) # If there is already a type, ensure we can convert it to the # locked type. if (self.type is not None and self.context.can_convert(self.type, tp) is None): raise TypingError("No conversion from %s to %s for " "'%s'" % (tp, self.type, self.var), loc=loc) self.type = tp self.locked = True if self.define_loc is None: self.define_loc = loc self.literal_value = literal_value def union(self, other, loc): if other.type is not None: self.add_type(other.type, loc=loc) return self.type def __repr__(self): return '%s := %s' % (self.var, self.type or "<undecided>") @property def defined(self): return self.type is not None def get(self): return (self.type,) if self.type is not None else () def getone(self): if self.type is None: raise TypingError("Undecided type {}".format(self)) return self.type def __len__(self): return 1 if self.type is not None else 0 class ConstraintNetwork(object): """ TODO: It is possible to optimize constraint propagation to consider only dirty type variables. """ def __init__(self): self.constraints = [] def append(self, constraint): self.constraints.append(constraint) def propagate(self, typeinfer): """ Execute all constraints. Errors are caught and returned as a list. This allows progressing even though some constraints may fail due to lack of information (e.g. imprecise types such as List(undefined)). """ errors = [] for constraint in self.constraints: loc = constraint.loc with typeinfer.warnings.catch_warnings(filename=loc.filename, lineno=loc.line): try: constraint(typeinfer) except ForceLiteralArg as e: errors.append(e) except TypingError as e: _logger.debug("captured error", exc_info=e) new_exc = TypingError( str(e), loc=constraint.loc, highlighting=False, ) errors.append(utils.chain_exception(new_exc, e)) except Exception as e: if utils.use_old_style_errors(): _logger.debug("captured error", exc_info=e) msg = ("Internal error at {con}.\n{err}\n" "Enable logging at debug level for details.") new_exc = TypingError( msg.format(con=constraint, err=str(e)), loc=constraint.loc, highlighting=False, ) errors.append(utils.chain_exception(new_exc, e)) elif utils.use_new_style_errors(): raise e else: msg = ("Unknown CAPTURED_ERRORS style: " f"'{config.CAPTURED_ERRORS}'.") assert 0, msg return errors class Propagate(object): """ A simple constraint for direct propagation of types for assignments. """ def __init__(self, dst, src, loc): self.dst = dst self.src = src self.loc = loc def __call__(self, typeinfer): with new_error_context("typing of assignment at {0}", self.loc, loc=self.loc): typeinfer.copy_type(self.src, self.dst, loc=self.loc) # If `dst` is refined, notify us typeinfer.refine_map[self.dst] = self def refine(self, typeinfer, target_type): # Do not back-propagate to locked variables (e.g. constants) assert target_type.is_precise() typeinfer.add_type(self.src, target_type, unless_locked=True, loc=self.loc) class ArgConstraint(object): def __init__(self, dst, src, loc): self.dst = dst self.src = src self.loc = loc def __call__(self, typeinfer): with new_error_context("typing of argument at {0}", self.loc): typevars = typeinfer.typevars src = typevars[self.src] if not src.defined: return ty = src.getone() if isinstance(ty, types.Omitted): ty = typeinfer.context.resolve_value_type_prefer_literal( ty.value, ) if not ty.is_precise(): raise TypingError('non-precise type {}'.format(ty)) typeinfer.add_type(self.dst, ty, loc=self.loc) class BuildTupleConstraint(object): def __init__(self, target, items, loc): self.target = target self.items = items self.loc = loc def __call__(self, typeinfer): with new_error_context("typing of tuple at {0}", self.loc): typevars = typeinfer.typevars tsets = [typevars[i.name].get() for i in self.items] for vals in itertools.product(*tsets): if vals and all(vals[0] == v for v in vals): tup = types.UniTuple(dtype=vals[0], count=len(vals)) else: # empty tuples fall here as well tup = types.Tuple(vals) assert tup.is_precise() typeinfer.add_type(self.target, tup, loc=self.loc) class _BuildContainerConstraint(object): def __init__(self, target, items, loc): self.target = target self.items = items self.loc = loc def __call__(self, typeinfer): with new_error_context("typing of {0} at {1}", self.container_type, self.loc): typevars = typeinfer.typevars tsets = [typevars[i.name].get() for i in self.items] if not tsets: typeinfer.add_type(self.target, self.container_type(types.undefined), loc=self.loc) else: for typs in itertools.product(*tsets): unified = typeinfer.context.unify_types(*typs) if unified is not None: typeinfer.add_type(self.target, self.container_type(unified), loc=self.loc) class BuildListConstraint(_BuildContainerConstraint): def __init__(self, target, items, loc): self.target = target self.items = items self.loc = loc def __call__(self, typeinfer): with new_error_context("typing of {0} at {1}", types.List, self.loc): typevars = typeinfer.typevars tsets = [typevars[i.name].get() for i in self.items] if not tsets: typeinfer.add_type(self.target, types.List(types.undefined), loc=self.loc) else: for typs in itertools.product(*tsets): unified = typeinfer.context.unify_types(*typs) if unified is not None: # pull out literals if available islit = [isinstance(x, types.Literal) for x in typs] iv = None if all(islit): iv = [x.literal_value for x in typs] typeinfer.add_type(self.target, types.List(unified, initial_value=iv), loc=self.loc) else: typeinfer.add_type(self.target, types.LiteralList(typs), loc=self.loc) class BuildSetConstraint(_BuildContainerConstraint): container_type = types.Set class BuildMapConstraint(object): def __init__(self, target, items, special_value, value_indexes, loc): self.target = target self.items = items self.special_value = special_value self.value_indexes = value_indexes self.loc = loc def __call__(self, typeinfer): with new_error_context("typing of dict at {0}", self.loc): typevars = typeinfer.typevars # figure out what sort of dict is being dealt with tsets = [(typevars[k.name].getone(), typevars[v.name].getone()) for k, v in self.items] if not tsets: typeinfer.add_type(self.target, types.DictType(types.undefined, types.undefined, self.special_value), loc=self.loc) else: # all the info is known about the dict, if its # str keys -> random heterogeneous values treat as literalstrkey ktys = [x[0] for x in tsets] vtys = [x[1] for x in tsets] strkey = all([isinstance(x, types.StringLiteral) for x in ktys]) literalvty = all([isinstance(x, types.Literal) for x in vtys]) vt0 = types.unliteral(vtys[0]) # homogeneous values comes in the form of being able to cast # all the other values in the ctor to the type of the first. # The order is important as `typed.Dict` takes it's type from # the first element. def check(other): conv = typeinfer.context.can_convert(other, vt0) return conv is not None and conv < Conversion.unsafe homogeneous = all([check(types.unliteral(x)) for x in vtys]) # Special cases: # Single key:value in ctor, key is str, value is an otherwise # illegal container type, e.g. LiteralStrKeyDict or # List, there's no way to put this into a typed.Dict, so make it # a LiteralStrKeyDict, same goes for LiteralList. if len(vtys) == 1: valty = vtys[0] if isinstance(valty, (types.LiteralStrKeyDict, types.List, types.LiteralList)): homogeneous = False if strkey and not homogeneous: resolved_dict = {x: y for x, y in zip(ktys, vtys)} ty = types.LiteralStrKeyDict(resolved_dict, self.value_indexes) typeinfer.add_type(self.target, ty, loc=self.loc) else: init_value = self.special_value if literalvty else None key_type, value_type = tsets[0] typeinfer.add_type(self.target, types.DictType(key_type, value_type, init_value), loc=self.loc) class ExhaustIterConstraint(object): def __init__(self, target, count, iterator, loc): self.target = target self.count = count self.iterator = iterator self.loc = loc def __call__(self, typeinfer): with new_error_context("typing of exhaust iter at {0}", self.loc): typevars = typeinfer.typevars for tp in typevars[self.iterator.name].get(): # unpack optional tp = tp.type if isinstance(tp, types.Optional) else tp if isinstance(tp, types.BaseTuple): if len(tp) == self.count: assert tp.is_precise() typeinfer.add_type(self.target, tp, loc=self.loc) break else: msg = (f"wrong tuple length for {self.iterator.name}: ", f"expected {self.count}, got {len(tp)}") raise NumbaValueError(msg) elif isinstance(tp, types.IterableType): tup = types.UniTuple(dtype=tp.iterator_type.yield_type, count=self.count) assert tup.is_precise() typeinfer.add_type(self.target, tup, loc=self.loc) break else: raise TypingError("failed to unpack {}".format(tp), loc=self.loc) class PairFirstConstraint(object): def __init__(self, target, pair, loc): self.target = target self.pair = pair self.loc = loc def __call__(self, typeinfer): with new_error_context("typing of pair-first at {0}", self.loc): typevars = typeinfer.typevars for tp in typevars[self.pair.name].get(): if not isinstance(tp, types.Pair): # XXX is this an error? continue assert (isinstance(tp.first_type, types.UndefinedFunctionType) or tp.first_type.is_precise()) typeinfer.add_type(self.target, tp.first_type, loc=self.loc) class PairSecondConstraint(object): def __init__(self, target, pair, loc): self.target = target self.pair = pair self.loc = loc def __call__(self, typeinfer): with new_error_context("typing of pair-second at {0}", self.loc): typevars = typeinfer.typevars for tp in typevars[self.pair.name].get(): if not isinstance(tp, types.Pair): # XXX is this an error? continue assert tp.second_type.is_precise() typeinfer.add_type(self.target, tp.second_type, loc=self.loc) class StaticGetItemConstraint(object): def __init__(self, target, value, index, index_var, loc): self.target = target self.value = value self.index = index if index_var is not None: self.fallback = IntrinsicCallConstraint(target, operator.getitem, (value, index_var), {}, None, loc) else: self.fallback = None self.loc = loc def __call__(self, typeinfer): with new_error_context("typing of static-get-item at {0}", self.loc): typevars = typeinfer.typevars for ty in typevars[self.value.name].get(): sig = typeinfer.context.resolve_static_getitem( value=ty, index=self.index, ) if sig is not None: itemty = sig.return_type # if the itemty is not precise, let it through, unification # will catch it and produce a better error message typeinfer.add_type(self.target, itemty, loc=self.loc) elif self.fallback is not None: self.fallback(typeinfer) def get_call_signature(self): # The signature is only needed for the fallback case in lowering return self.fallback and self.fallback.get_call_signature() class TypedGetItemConstraint(object): def __init__(self, target, value, dtype, index, loc): self.target = target self.value = value self.dtype = dtype self.index = index self.loc = loc def __call__(self, typeinfer): with new_error_context("typing of typed-get-item at {0}", self.loc): typevars = typeinfer.typevars idx_ty = typevars[self.index.name].get() ty = typevars[self.value.name].get() self.signature = Signature(self.dtype, ty + idx_ty, None) typeinfer.add_type(self.target, self.dtype, loc=self.loc) def get_call_signature(self): return self.signature def fold_arg_vars(typevars, args, vararg, kws): """ Fold and resolve the argument variables of a function call. """ # Fetch all argument types, bail if any is unknown n_pos_args = len(args) kwds = [kw for (kw, var) in kws] argtypes = [typevars[a.name] for a in args] argtypes += [typevars[var.name] for (kw, var) in kws] if vararg is not None: argtypes.append(typevars[vararg.name]) if not all(a.defined for a in argtypes): return args = tuple(a.getone() for a in argtypes) pos_args = args[:n_pos_args] if vararg is not None: errmsg = "*args in function call should be a tuple, got %s" # Handle constant literal used for `*args` if isinstance(args[-1], types.Literal): const_val = args[-1].literal_value # Is the constant value a tuple? if not isinstance(const_val, tuple): raise TypeError(errmsg % (args[-1],)) # Append the elements in the const tuple to the positional args pos_args += const_val # Handle non-constant elif not isinstance(args[-1], types.BaseTuple): # Unsuitable for *args # (Python is more lenient and accepts all iterables) raise TypeError(errmsg % (args[-1],)) else: # Append the elements in the tuple to the positional args pos_args += args[-1].types # Drop the last arg args = args[:-1] kw_args = dict(zip(kwds, args[n_pos_args:])) return pos_args, kw_args def _is_array_not_precise(arrty): """Check type is array and it is not precise """ return isinstance(arrty, types.Array) and not arrty.is_precise() class CallConstraint(object): """Constraint for calling functions. Perform case analysis foreach combinations of argument types. """ signature = None def __init__(self, target, func, args, kws, vararg, loc): self.target = target self.func = func self.args = args self.kws = kws or {} self.vararg = vararg self.loc = loc def __call__(self, typeinfer): msg = "typing of call at {0}\n".format(self.loc) with new_error_context(msg): typevars = typeinfer.typevars with new_error_context( "resolving caller type: {}".format(self.func)): fnty = typevars[self.func].getone() with new_error_context("resolving callee type: {0}", fnty): self.resolve(typeinfer, typevars, fnty) def resolve(self, typeinfer, typevars, fnty): assert fnty context = typeinfer.context r = fold_arg_vars(typevars, self.args, self.vararg, self.kws) if r is None: # Cannot resolve call type until all argument types are known return pos_args, kw_args = r # Check argument to be precise for a in itertools.chain(pos_args, kw_args.values()): # Forbids imprecise type except array of undefined dtype if not a.is_precise() and not isinstance(a, types.Array): return # Resolve call type if isinstance(fnty, types.TypeRef): # Unwrap TypeRef fnty = fnty.instance_type try: sig = typeinfer.resolve_call(fnty, pos_args, kw_args) except ForceLiteralArg as e: # Adjust for bound methods folding_args = ((fnty.this,) + tuple(self.args) if isinstance(fnty, types.BoundFunction) else self.args) folded = e.fold_arguments(folding_args, self.kws) requested = set() unsatisfied = set() for idx in e.requested_args: maybe_arg = typeinfer.func_ir.get_definition(folded[idx]) if isinstance(maybe_arg, ir.Arg): requested.add(maybe_arg.index) else: unsatisfied.add(idx) if unsatisfied: raise TypingError("Cannot request literal type.", loc=self.loc) elif requested: raise ForceLiteralArg(requested, loc=self.loc) if sig is None: # Note: duplicated error checking. # See types.BaseFunction.get_call_type # Arguments are invalid => explain why headtemp = "Invalid use of {0} with parameters ({1})" args = [str(a) for a in pos_args] args += ["%s=%s" % (k, v) for k, v in sorted(kw_args.items())] head = headtemp.format(fnty, ', '.join(map(str, args))) desc = context.explain_function_type(fnty) msg = '\n'.join([head, desc]) raise TypingError(msg) typeinfer.add_type(self.target, sig.return_type, loc=self.loc) # If the function is a bound function and its receiver type # was refined, propagate it. if (isinstance(fnty, types.BoundFunction) and sig.recvr is not None and sig.recvr != fnty.this): refined_this = context.unify_pairs(sig.recvr, fnty.this) if (refined_this is None and fnty.this.is_precise() and sig.recvr.is_precise()): msg = "Cannot refine type {} to {}".format( sig.recvr, fnty.this, ) raise TypingError(msg, loc=self.loc) if refined_this is not None and refined_this.is_precise(): refined_fnty = fnty.copy(this=refined_this) typeinfer.propagate_refined_type(self.func, refined_fnty) # If the return type is imprecise but can be unified with the # target variable's inferred type, use the latter. # Useful for code such as:: # s = set() # s.add(1) # (the set() call must be typed as int64(), not undefined()) if not sig.return_type.is_precise(): target = typevars[self.target] if target.defined: targetty = target.getone() if context.unify_pairs(targetty, sig.return_type) == targetty: sig = sig.replace(return_type=targetty) self.signature = sig self._add_refine_map(typeinfer, typevars, sig) def _add_refine_map(self, typeinfer, typevars, sig): """Add this expression to the refine_map base on the type of target_type """ target_type = typevars[self.target].getone() # Array if (isinstance(target_type, types.Array) and isinstance(sig.return_type.dtype, types.Undefined)): typeinfer.refine_map[self.target] = self # DictType if (isinstance(target_type, types.DictType) and not target_type.is_precise()): typeinfer.refine_map[self.target] = self def refine(self, typeinfer, updated_type): # Is getitem? if self.func == operator.getitem: aryty = typeinfer.typevars[self.args[0].name].getone() # is array not precise? if _is_array_not_precise(aryty): # allow refinement of dtype assert updated_type.is_precise() newtype = aryty.copy(dtype=updated_type.dtype) typeinfer.add_type(self.args[0].name, newtype, loc=self.loc) else: m = 'no type refinement implemented for function {} updating to {}' raise TypingError(m.format(self.func, updated_type)) def get_call_signature(self): return self.signature class IntrinsicCallConstraint(CallConstraint): def __call__(self, typeinfer): with new_error_context("typing of intrinsic-call at {0}", self.loc): fnty = self.func if fnty in utils.OPERATORS_TO_BUILTINS: fnty = typeinfer.resolve_value_type(None, fnty) self.resolve(typeinfer, typeinfer.typevars, fnty=fnty) class GetAttrConstraint(object): def __init__(self, target, attr, value, loc, inst): self.target = target self.attr = attr self.value = value self.loc = loc self.inst = inst def __call__(self, typeinfer): with new_error_context("typing of get attribute at {0}", self.loc): typevars = typeinfer.typevars valtys = typevars[self.value.name].get() for ty in valtys: attrty = typeinfer.context.resolve_getattr(ty, self.attr) if attrty is None: raise UntypedAttributeError(ty, self.attr, loc=self.inst.loc) else: assert attrty.is_precise() typeinfer.add_type(self.target, attrty, loc=self.loc) typeinfer.refine_map[self.target] = self def refine(self, typeinfer, target_type): if isinstance(target_type, types.BoundFunction): recvr = target_type.this assert recvr.is_precise() typeinfer.add_type(self.value.name, recvr, loc=self.loc) source_constraint = typeinfer.refine_map.get(self.value.name) if source_constraint is not None: source_constraint.refine(typeinfer, recvr) def __repr__(self): return 'resolving type of attribute "{attr}" of "{value}"'.format( value=self.value, attr=self.attr) class SetItemRefinement(object): """A mixin class to provide the common refinement logic in setitem and static setitem. """ def _refine_target_type(self, typeinfer, targetty, idxty, valty, sig): """Refine the target-type given the known index type and value type. """ # For array setitem, refine imprecise array dtype if _is_array_not_precise(targetty): typeinfer.add_type(self.target.name, sig.args[0], loc=self.loc) # For Dict setitem if isinstance(targetty, types.DictType): if not targetty.is_precise(): refined = targetty.refine(idxty, valty) typeinfer.add_type( self.target.name, refined, loc=self.loc, ) elif isinstance(targetty, types.LiteralStrKeyDict): typeinfer.add_type( self.target.name, types.DictType(idxty, valty), loc=self.loc, ) class SetItemConstraint(SetItemRefinement): def __init__(self, target, index, value, loc): self.target = target self.index = index self.value = value self.loc = loc def __call__(self, typeinfer): with new_error_context("typing of setitem at {0}", self.loc): typevars = typeinfer.typevars if not all(typevars[var.name].defined for var in (self.target, self.index, self.value)): return targetty = typevars[self.target.name].getone() idxty = typevars[self.index.name].getone() valty = typevars[self.value.name].getone() sig = typeinfer.context.resolve_setitem(targetty, idxty, valty) if sig is None: raise TypingError("Cannot resolve setitem: %s[%s] = %s" % (targetty, idxty, valty), loc=self.loc) self.signature = sig self._refine_target_type(typeinfer, targetty, idxty, valty, sig) def get_call_signature(self): return self.signature class StaticSetItemConstraint(SetItemRefinement): def __init__(self, target, index, index_var, value, loc): self.target = target self.index = index self.index_var = index_var self.value = value self.loc = loc def __call__(self, typeinfer): with new_error_context("typing of staticsetitem at {0}", self.loc): typevars = typeinfer.typevars if not all(typevars[var.name].defined for var in (self.target, self.index_var, self.value)): return targetty = typevars[self.target.name].getone() idxty = typevars[self.index_var.name].getone() valty = typevars[self.value.name].getone() sig = typeinfer.context.resolve_static_setitem(targetty, self.index, valty) if sig is None: sig = typeinfer.context.resolve_setitem(targetty, idxty, valty) if sig is None: raise TypingError("Cannot resolve setitem: %s[%r] = %s" % (targetty, self.index, valty), loc=self.loc) self.signature = sig self._refine_target_type(typeinfer, targetty, idxty, valty, sig) def get_call_signature(self): return self.signature class DelItemConstraint(object): def __init__(self, target, index, loc): self.target = target self.index = index self.loc = loc def __call__(self, typeinfer): with new_error_context("typing of delitem at {0}", self.loc): typevars = typeinfer.typevars if not all(typevars[var.name].defined for var in (self.target, self.index)): return targetty = typevars[self.target.name].getone() idxty = typevars[self.index.name].getone() sig = typeinfer.context.resolve_delitem(targetty, idxty) if sig is None: raise TypingError("Cannot resolve delitem: %s[%s]" % (targetty, idxty), loc=self.loc) self.signature = sig def get_call_signature(self): return self.signature class SetAttrConstraint(object): def __init__(self, target, attr, value, loc): self.target = target self.attr = attr self.value = value self.loc = loc def __call__(self, typeinfer): with new_error_context("typing of set attribute {0!r} at {1}", self.attr, self.loc): typevars = typeinfer.typevars if not all(typevars[var.name].defined for var in (self.target, self.value)): return targetty = typevars[self.target.name].getone() valty = typevars[self.value.name].getone() sig = typeinfer.context.resolve_setattr(targetty, self.attr, valty) if sig is None: raise TypingError("Cannot resolve setattr: (%s).%s = %s" % (targetty, self.attr, valty), loc=self.loc) self.signature = sig def get_call_signature(self): return self.signature class PrintConstraint(object): def __init__(self, args, vararg, loc): self.args = args self.vararg = vararg self.loc = loc def __call__(self, typeinfer): typevars = typeinfer.typevars r = fold_arg_vars(typevars, self.args, self.vararg, {}) if r is None: # Cannot resolve call type until all argument types are known return pos_args, kw_args = r fnty = typeinfer.context.resolve_value_type(print) assert fnty is not None sig = typeinfer.resolve_call(fnty, pos_args, kw_args) self.signature = sig def get_call_signature(self): return self.signature class TypeVarMap(dict): def set_context(self, context): self.context = context def __getitem__(self, name): if name not in self: self[name] = TypeVar(self.context, name) return super(TypeVarMap, self).__getitem__(name) def __setitem__(self, name, value): assert isinstance(name, str) if name in self: raise KeyError("Cannot redefine typevar %s" % name) else: super(TypeVarMap, self).__setitem__(name, value) # A temporary mapping of {function name: dispatcher object} _temporary_dispatcher_map = {} # A temporary mapping of {function name: dispatcher object reference count} # Reference: https://github.com/numba/numba/issues/3658 _temporary_dispatcher_map_ref_count = defaultdict(int) @contextlib.contextmanager def register_dispatcher(disp): """ Register a Dispatcher for inference while it is not yet stored as global or closure variable (e.g. during execution of the @jit() call). This allows resolution of recursive calls with eager compilation. """ assert callable(disp) assert callable(disp.py_func) name = disp.py_func.__name__ _temporary_dispatcher_map[name] = disp _temporary_dispatcher_map_ref_count[name] += 1 try: yield finally: _temporary_dispatcher_map_ref_count[name] -= 1 if not _temporary_dispatcher_map_ref_count[name]: del _temporary_dispatcher_map[name] typeinfer_extensions = {} class TypeInferer(object): """ Operates on block that shares the same ir.Scope. """ def __init__(self, context, func_ir, warnings): self.context = context # sort based on label, ensure iteration order! self.blocks = OrderedDict() for k in sorted(func_ir.blocks.keys()): self.blocks[k] = func_ir.blocks[k] self.generator_info = func_ir.generator_info self.func_id = func_ir.func_id self.func_ir = func_ir self.typevars = TypeVarMap() self.typevars.set_context(context) self.constraints = ConstraintNetwork() self.warnings = warnings # { index: mangled name } self.arg_names = {} # self.return_type = None # Set of assumed immutable globals self.assumed_immutables = set() # Track all calls and associated constraints self.calls = [] # The inference result of the above calls self.calltypes = utils.UniqueDict() # Target var -> constraint with refine hook self.refine_map = {} if config.DEBUG or config.DEBUG_TYPEINFER: self.debug = TypeInferDebug(self) else: self.debug = NullDebug() self._skip_recursion = False def copy(self, skip_recursion=False): clone = TypeInferer(self.context, self.func_ir, self.warnings) clone.arg_names = self.arg_names.copy() clone._skip_recursion = skip_recursion for k, v in self.typevars.items(): if not v.locked and v.defined: clone.typevars[k].add_type(v.getone(), loc=v.define_loc) return clone def _mangle_arg_name(self, name): # Disambiguise argument name return "arg.%s" % (name,) def _get_return_vars(self): rets = [] for blk in self.blocks.values(): inst = blk.terminator if isinstance(inst, ir.Return): rets.append(inst.value) return rets def get_argument_types(self): return [self.typevars[k].getone() for k in self.arg_names.values()] def seed_argument(self, name, index, typ): name = self._mangle_arg_name(name) self.seed_type(name, typ) self.arg_names[index] = name def seed_type(self, name, typ): """All arguments should be seeded. """ self.lock_type(name, typ, loc=None) def seed_return(self, typ): """Seeding of return value is optional. """ for var in self._get_return_vars(): self.lock_type(var.name, typ, loc=None) def build_constraint(self): for blk in self.blocks.values(): for inst in blk.body: self.constrain_statement(inst) def return_types_from_partial(self): """ Resume type inference partially to deduce the return type. Note: No side-effect to `self`. Returns the inferred return type or None if it cannot deduce the return type. """ # Clone the typeinferer and disable typing recursive calls cloned = self.copy(skip_recursion=True) # rebuild constraint network cloned.build_constraint() # propagate without raising cloned.propagate(raise_errors=False) # get return types rettypes = set() for retvar in cloned._get_return_vars(): if retvar.name in cloned.typevars: typevar = cloned.typevars[retvar.name] if typevar and typevar.defined: rettypes.add(types.unliteral(typevar.getone())) if not rettypes: return # unify return types return cloned._unify_return_types(rettypes) def propagate(self, raise_errors=True): newtoken = self.get_state_token() oldtoken = None # Since the number of types are finite, the typesets will eventually # stop growing. while newtoken != oldtoken: self.debug.propagate_started() oldtoken = newtoken # Errors can appear when the type set is incomplete; only # raise them when there is no progress anymore. errors = self.constraints.propagate(self) newtoken = self.get_state_token() self.debug.propagate_finished() if errors: if raise_errors: force_lit_args = [e for e in errors if isinstance(e, ForceLiteralArg)] if not force_lit_args: raise errors[0] else: raise reduce(operator.or_, force_lit_args) else: return errors def add_type(self, var, tp, loc, unless_locked=False): assert isinstance(var, str), type(var) tv = self.typevars[var] if unless_locked and tv.locked: return oldty = tv.type unified = tv.add_type(tp, loc=loc) if unified != oldty: self.propagate_refined_type(var, unified) def add_calltype(self, inst, signature): assert signature is not None self.calltypes[inst] = signature def copy_type(self, src_var, dest_var, loc): self.typevars[dest_var].union(self.typevars[src_var], loc=loc) def lock_type(self, var, tp, loc, literal_value=NOTSET): tv = self.typevars[var] tv.lock(tp, loc=loc, literal_value=literal_value) def propagate_refined_type(self, updated_var, updated_type): source_constraint = self.refine_map.get(updated_var) if source_constraint is not None: source_constraint.refine(self, updated_type) def unify(self, raise_errors=True): """ Run the final unification pass over all inferred types, and catch imprecise types. """ typdict = utils.UniqueDict() def find_offender(name, exhaustive=False): # finds the offending variable definition by name # if exhaustive is set it will try and trace through temporary # variables to find a concrete offending definition. offender = None for block in self.func_ir.blocks.values(): offender = block.find_variable_assignment(name) if offender is not None: if not exhaustive: break try: # simple assignment hasattr(offender.value, 'name') offender_value = offender.value.name except (AttributeError, KeyError): break orig_offender = offender if offender_value.startswith('$'): offender = find_offender(offender_value, exhaustive=exhaustive) if offender is None: offender = orig_offender break return offender def diagnose_imprecision(offender): # helper for diagnosing imprecise types list_msg = """\n For Numba to be able to compile a list, the list must have a known and precise type that can be inferred from the other variables. Whilst sometimes the type of empty lists can be inferred, this is not always the case, see this documentation for help: https://numba.readthedocs.io/en/stable/user/troubleshoot.html#my-code-has-an-untyped-list-problem """ if offender is not None: # This block deals with imprecise lists if hasattr(offender, 'value'): if hasattr(offender.value, 'op'): # might be `foo = []` if offender.value.op == 'build_list': return list_msg # or might be `foo = list()` elif offender.value.op == 'call': try: # assignment involving a call call_name = offender.value.func.name # find the offender based on the call name offender = find_offender(call_name) if isinstance(offender.value, ir.Global): if offender.value.name == 'list': return list_msg except (AttributeError, KeyError): pass return "" # no help possible def check_var(name): tv = self.typevars[name] if not tv.defined: if raise_errors: offender = find_offender(name) val = getattr(offender, 'value', 'unknown operation') loc = getattr(offender, 'loc', ir.unknown_loc) msg = ("Type of variable '%s' cannot be determined, " "operation: %s, location: %s") raise TypingError(msg % (var, val, loc), loc) else: typdict[var] = types.unknown return tp = tv.getone() if isinstance(tp, types.UndefinedFunctionType): tp = tp.get_precise() if not tp.is_precise(): offender = find_offender(name, exhaustive=True) msg = ("Cannot infer the type of variable '%s'%s, " "have imprecise type: %s. %s") istmp = " (temporary variable)" if var.startswith('$') else "" loc = getattr(offender, 'loc', ir.unknown_loc) # is this an untyped list? try and provide help extra_msg = diagnose_imprecision(offender) if raise_errors: raise TypingError(msg % (var, istmp, tp, extra_msg), loc) else: typdict[var] = types.unknown return else: # type is precise, hold it typdict[var] = tp # For better error display, check first user-visible vars, then # temporaries temps = set(k for k in self.typevars if not k[0].isalpha()) others = set(self.typevars) - temps for var in sorted(others): check_var(var) for var in sorted(temps): check_var(var) try: retty = self.get_return_type(typdict) except Exception as e: # partial type inference may raise e.g. attribute error if a # constraint has no computable signature, ignore this as needed if raise_errors: raise e else: retty = None else: typdict = utils.UniqueDict( typdict, **{v.name: retty for v in self._get_return_vars()}) try: fntys = self.get_function_types(typdict) except Exception as e: # partial type inference may raise e.g. attribute error if a # constraint has no computable signature, ignore this as needed if raise_errors: raise e else: fntys = None if self.generator_info: retty = self.get_generator_type(typdict, retty, raise_errors=raise_errors) self.debug.unify_finished(typdict, retty, fntys) return typdict, retty, fntys def get_generator_type(self, typdict, retty, raise_errors=True): gi = self.generator_info arg_types = [None] * len(self.arg_names) for index, name in self.arg_names.items(): arg_types[index] = typdict[name] state_types = None try: state_types = [typdict[var_name] for var_name in gi.state_vars] except KeyError: msg = "Cannot type generator: state variable types cannot be found" if raise_errors: raise TypingError(msg) state_types = [types.unknown for _ in gi.state_vars] yield_types = None try: yield_types = [typdict[y.inst.value.name] for y in gi.get_yield_points()] except KeyError: msg = "Cannot type generator: yield type cannot be found" if raise_errors: raise TypingError(msg) if not yield_types: msg = "Cannot type generator: it does not yield any value" if raise_errors: raise TypingError(msg) yield_types = [types.unknown for _ in gi.get_yield_points()] if not yield_types or all(yield_types) == types.unknown: # unknown yield, probably partial type inference, escape return types.Generator(self.func_id.func, types.unknown, arg_types, state_types, has_finalizer=True) yield_type = self.context.unify_types(*yield_types) if yield_type is None or isinstance(yield_type, types.Optional): msg = "Cannot type generator: cannot unify yielded types %s" yp_highlights = [] for y in gi.get_yield_points(): msg = (_termcolor.errmsg("Yield of: IR '%s', type '%s', " "location: %s")) yp_highlights.append(msg % (str(y.inst), typdict[y.inst.value.name], y.inst.loc.strformat())) explain_ty = set() for ty in yield_types: if isinstance(ty, types.Optional): explain_ty.add(ty.type) explain_ty.add(types.NoneType('none')) else: explain_ty.add(ty) if raise_errors: raise TypingError("Can't unify yield type from the " "following types: %s" % ", ".join(sorted(map(str, explain_ty))) + "\n\n" + "\n".join(yp_highlights)) return types.Generator(self.func_id.func, yield_type, arg_types, state_types, has_finalizer=True) def get_function_types(self, typemap): """ Fill and return the calltypes map. """ # XXX why can't this be done on the fly? calltypes = self.calltypes for call, constraint in self.calls: calltypes[call] = constraint.get_call_signature() return calltypes def _unify_return_types(self, rettypes): if rettypes: unified = self.context.unify_types(*rettypes) if isinstance(unified, types.FunctionType): # unified is allowed to be UndefinedFunctionType # instance (that is imprecise). return unified if unified is None or not unified.is_precise(): def check_type(atype): lst = [] for k, v in self.typevars.items(): if atype == v.type: lst.append(k) returns = {} for x in reversed(lst): for block in self.func_ir.blocks.values(): for instr in block.find_insts(ir.Return): value = instr.value if isinstance(value, ir.Var): name = value.name else: pass if x == name: returns[x] = instr break interped = "" for name, offender in returns.items(): loc = getattr(offender, 'loc', ir.unknown_loc) msg = ("Return of: IR name '%s', type '%s', " "location: %s") interped = msg % (name, atype, loc.strformat()) return interped problem_str = [] for xtype in rettypes: problem_str.append(_termcolor.errmsg(check_type(xtype))) raise TypingError("Can't unify return type from the " "following types: %s" % ", ".join(sorted(map(str, rettypes))) + "\n" + "\n".join(problem_str)) return unified else: # Function without a successful return path return types.none def get_return_type(self, typemap): rettypes = set() for var in self._get_return_vars(): rettypes.add(typemap[var.name]) return self._unify_return_types(rettypes) def get_state_token(self): """The algorithm is monotonic. It can only grow or "refine" the typevar map. """ return [tv.type for name, tv in sorted(self.typevars.items())] def constrain_statement(self, inst): if isinstance(inst, ir.Assign): self.typeof_assign(inst) elif isinstance(inst, ir.SetItem): self.typeof_setitem(inst) elif isinstance(inst, ir.StaticSetItem): self.typeof_static_setitem(inst) elif isinstance(inst, ir.DelItem): self.typeof_delitem(inst) elif isinstance(inst, ir.SetAttr): self.typeof_setattr(inst) elif isinstance(inst, ir.Print): self.typeof_print(inst) elif isinstance(inst, ir.StoreMap): self.typeof_storemap(inst) elif isinstance(inst, (ir.Jump, ir.Branch, ir.Return, ir.Del)): pass elif isinstance(inst, (ir.StaticRaise, ir.StaticTryRaise)): pass elif isinstance(inst, ir.PopBlock): pass # It's a marker statement elif type(inst) in typeinfer_extensions: # let external calls handle stmt if type matches f = typeinfer_extensions[type(inst)] f(inst, self) else: msg = "Unsupported constraint encountered: %s" % inst raise UnsupportedError(msg, loc=inst.loc) def typeof_setitem(self, inst): constraint = SetItemConstraint(target=inst.target, index=inst.index, value=inst.value, loc=inst.loc) self.constraints.append(constraint) self.calls.append((inst, constraint)) def typeof_storemap(self, inst): constraint = SetItemConstraint(target=inst.dct, index=inst.key, value=inst.value, loc=inst.loc) self.constraints.append(constraint) self.calls.append((inst, constraint)) def typeof_static_setitem(self, inst): constraint = StaticSetItemConstraint(target=inst.target, index=inst.index, index_var=inst.index_var, value=inst.value, loc=inst.loc) self.constraints.append(constraint) self.calls.append((inst, constraint)) def typeof_delitem(self, inst): constraint = DelItemConstraint(target=inst.target, index=inst.index, loc=inst.loc) self.constraints.append(constraint) self.calls.append((inst, constraint)) def typeof_setattr(self, inst): constraint = SetAttrConstraint(target=inst.target, attr=inst.attr, value=inst.value, loc=inst.loc) self.constraints.append(constraint) self.calls.append((inst, constraint)) def typeof_print(self, inst): constraint = PrintConstraint(args=inst.args, vararg=inst.vararg, loc=inst.loc) self.constraints.append(constraint) self.calls.append((inst, constraint)) def typeof_assign(self, inst): value = inst.value if isinstance(value, ir.Const): self.typeof_const(inst, inst.target, value.value) elif isinstance(value, ir.Var): self.constraints.append(Propagate(dst=inst.target.name, src=value.name, loc=inst.loc)) elif isinstance(value, (ir.Global, ir.FreeVar)): self.typeof_global(inst, inst.target, value) elif isinstance(value, ir.Arg): self.typeof_arg(inst, inst.target, value) elif isinstance(value, ir.Expr): self.typeof_expr(inst, inst.target, value) elif isinstance(value, ir.Yield): self.typeof_yield(inst, inst.target, value) else: msg = ("Unsupported assignment encountered: %s %s" % (type(value), str(value))) raise UnsupportedError(msg, loc=inst.loc) def resolve_value_type(self, inst, val): """ Resolve the type of a simple Python value, such as can be represented by literals. """ try: return self.context.resolve_value_type(val) except ValueError as e: msg = str(e) raise TypingError(msg, loc=inst.loc) def typeof_arg(self, inst, target, arg): src_name = self._mangle_arg_name(arg.name) self.constraints.append(ArgConstraint(dst=target.name, src=src_name, loc=inst.loc)) def typeof_const(self, inst, target, const): ty = self.resolve_value_type(inst, const) if inst.value.use_literal_type: lit = types.maybe_literal(value=const) else: lit = None self.add_type(target.name, lit or ty, loc=inst.loc) def typeof_yield(self, inst, target, yield_): # Sending values into generators isn't supported. self.add_type(target.name, types.none, loc=inst.loc) def sentry_modified_builtin(self, inst, gvar): """ Ensure that builtins are not modified. """ if gvar.name == 'range' and gvar.value is not range: bad = True elif gvar.name == 'slice' and gvar.value is not slice: bad = True elif gvar.name == 'len' and gvar.value is not len: bad = True else: bad = False if bad: raise TypingError("Modified builtin '%s'" % gvar.name, loc=inst.loc) def resolve_call(self, fnty, pos_args, kw_args): """ Resolve a call to a given function type. A signature is returned. """ if isinstance(fnty, types.FunctionType): return fnty.get_call_type(self, pos_args, kw_args) if isinstance(fnty, types.RecursiveCall) and not self._skip_recursion: # Recursive call disp = fnty.dispatcher_type.dispatcher pysig, args = disp.fold_argument_types(pos_args, kw_args) frame = self.context.callstack.match(disp.py_func, args) # If the signature is not being compiled if frame is None: sig = self.context.resolve_function_type(fnty.dispatcher_type, pos_args, kw_args) fndesc = disp.overloads[args].fndesc qual = qualifying_prefix(fndesc.modname, fndesc.qualname) fnty.add_overloads(args, qual, fndesc.uid) return sig fnid = frame.func_id qual = qualifying_prefix(fnid.modname, fnid.func_qualname) fnty.add_overloads(args, qual, fnid.unique_id) # Resume propagation in parent frame return_type = frame.typeinfer.return_types_from_partial() # No known return type if return_type is None: raise TypingError("cannot type infer runaway recursion") sig = typing.signature(return_type, *args) sig = sig.replace(pysig=pysig) # Keep track of unique return_type frame.add_return_type(return_type) return sig else: # Normal non-recursive call return self.context.resolve_function_type(fnty, pos_args, kw_args) def typeof_global(self, inst, target, gvar): try: typ = self.resolve_value_type(inst, gvar.value) except TypingError as e: if (gvar.name == self.func_id.func_name and gvar.name in _temporary_dispatcher_map): # Self-recursion case where the dispatcher is not (yet?) known # as a global variable typ = types.Dispatcher(_temporary_dispatcher_map[gvar.name]) else: from numba.misc import special nm = gvar.name # check if the problem is actually a name error func_glbls = self.func_id.func.__globals__ if (nm not in func_glbls.keys() and nm not in special.__all__ and nm not in __builtins__.keys() and nm not in self.func_id.code.co_freevars): errstr = "NameError: name '%s' is not defined" msg = _termcolor.errmsg(errstr % nm) e.patch_message(msg) raise else: msg = _termcolor.errmsg("Untyped global name '%s':" % nm) msg += " %s" # interps the actual error # if the untyped global is a numba internal function then add # to the error message asking if it's been imported. if nm in special.__all__: tmp = ("\n'%s' looks like a Numba internal function, has " "it been imported (i.e. 'from numba import %s')?\n" % (nm, nm)) msg += _termcolor.errmsg(tmp) e.patch_message(msg % e) raise if isinstance(typ, types.Dispatcher) and typ.dispatcher.is_compiling: # Recursive call callstack = self.context.callstack callframe = callstack.findfirst(typ.dispatcher.py_func) if callframe is not None: typ = types.RecursiveCall(typ) else: raise NotImplementedError( "call to %s: unsupported recursion" % typ.dispatcher) if isinstance(typ, types.Array): # Global array in nopython mode is constant typ = typ.copy(readonly=True) if isinstance(typ, types.BaseAnonymousTuple): # if it's a tuple of literal types, swap the type for the more # specific literal version literaled = [types.maybe_literal(x) for x in gvar.value] if all(literaled): typ = types.Tuple(literaled) # if any of the items in the tuple are arrays, they need to be # typed as readonly, mutating an array in a global container # is not supported (should be compile time constant etc). def mark_array_ro(tup): newtup = [] for item in tup.types: if isinstance(item, types.Array): item = item.copy(readonly=True) elif isinstance(item, types.BaseAnonymousTuple): item = mark_array_ro(item) newtup.append(item) return types.BaseTuple.from_types(newtup) typ = mark_array_ro(typ) self.sentry_modified_builtin(inst, gvar) # Setting literal_value for globals because they are handled # like const value in numba lit = types.maybe_literal(gvar.value) # The user may have provided the type for this variable already. # In this case, call add_type() to make sure the value type is # consistent. See numba.tests.test_array_reductions # TestArrayReductions.test_array_cumsum for examples. # Variable type locked by using the locals dict. tv = self.typevars[target.name] if tv.locked: tv.add_type(lit or typ, loc=inst.loc) else: self.lock_type(target.name, lit or typ, loc=inst.loc) self.assumed_immutables.add(inst) def typeof_expr(self, inst, target, expr): if expr.op == 'call': self.typeof_call(inst, target, expr) elif expr.op in ('getiter', 'iternext'): self.typeof_intrinsic_call(inst, target, expr.op, expr.value) elif expr.op == 'exhaust_iter': constraint = ExhaustIterConstraint(target.name, count=expr.count, iterator=expr.value, loc=expr.loc) self.constraints.append(constraint) elif expr.op == 'pair_first': constraint = PairFirstConstraint(target.name, pair=expr.value, loc=expr.loc) self.constraints.append(constraint) elif expr.op == 'pair_second': constraint = PairSecondConstraint(target.name, pair=expr.value, loc=expr.loc) self.constraints.append(constraint) elif expr.op == 'binop': self.typeof_intrinsic_call(inst, target, expr.fn, expr.lhs, expr.rhs) elif expr.op == 'inplace_binop': self.typeof_intrinsic_call(inst, target, expr.fn, expr.lhs, expr.rhs) elif expr.op == 'unary': self.typeof_intrinsic_call(inst, target, expr.fn, expr.value) elif expr.op == 'static_getitem': constraint = StaticGetItemConstraint(target.name, value=expr.value, index=expr.index, index_var=expr.index_var, loc=expr.loc) self.constraints.append(constraint) self.calls.append((inst.value, constraint)) elif expr.op == 'getitem': self.typeof_intrinsic_call(inst, target, operator.getitem, expr.value, expr.index,) elif expr.op == 'typed_getitem': constraint = TypedGetItemConstraint(target.name, value=expr.value, dtype=expr.dtype, index=expr.index, loc=expr.loc) self.constraints.append(constraint) self.calls.append((inst.value, constraint)) elif expr.op == 'getattr': constraint = GetAttrConstraint(target.name, attr=expr.attr, value=expr.value, loc=inst.loc, inst=inst) self.constraints.append(constraint) elif expr.op == 'build_tuple': constraint = BuildTupleConstraint(target.name, items=expr.items, loc=inst.loc) self.constraints.append(constraint) elif expr.op == 'build_list': constraint = BuildListConstraint(target.name, items=expr.items, loc=inst.loc) self.constraints.append(constraint) elif expr.op == 'build_set': constraint = BuildSetConstraint(target.name, items=expr.items, loc=inst.loc) self.constraints.append(constraint) elif expr.op == 'build_map': constraint = BuildMapConstraint( target.name, items=expr.items, special_value=expr.literal_value, value_indexes=expr.value_indexes, loc=inst.loc) self.constraints.append(constraint) elif expr.op == 'cast': self.constraints.append(Propagate(dst=target.name, src=expr.value.name, loc=inst.loc)) elif expr.op == 'phi': for iv in expr.incoming_values: if iv is not ir.UNDEFINED: self.constraints.append(Propagate(dst=target.name, src=iv.name, loc=inst.loc)) elif expr.op == 'make_function': self.lock_type(target.name, types.MakeFunctionLiteral(expr), loc=inst.loc, literal_value=expr) else: msg = "Unsupported op-code encountered: %s" % expr raise UnsupportedError(msg, loc=inst.loc) def typeof_call(self, inst, target, call): constraint = CallConstraint(target.name, call.func.name, call.args, call.kws, call.vararg, loc=inst.loc) self.constraints.append(constraint) self.calls.append((inst.value, constraint)) def typeof_intrinsic_call(self, inst, target, func, *args): constraint = IntrinsicCallConstraint(target.name, func, args, kws=(), vararg=None, loc=inst.loc) self.constraints.append(constraint) self.calls.append((inst.value, constraint)) class NullDebug(object): def propagate_started(self): pass def propagate_finished(self): pass def unify_finished(self, typdict, retty, fntys): pass class TypeInferDebug(object): def __init__(self, typeinfer): self.typeinfer = typeinfer def _dump_state(self): print('---- type variables ----') pprint([v for k, v in sorted(self.typeinfer.typevars.items())]) def propagate_started(self): print("propagate".center(80, '-')) def propagate_finished(self): self._dump_state() def unify_finished(self, typdict, retty, fntys): print("Variable types".center(80, "-")) pprint(typdict) print("Return type".center(80, "-")) pprint(retty) print("Call types".center(80, "-")) pprint(fntys)
{ "content_hash": "52ddda9487aac6bd226a6d799a5c9476", "timestamp": "", "source": "github", "line_count": 1780, "max_line_length": 97, "avg_line_length": 39.938202247191015, "alnum_prop": 0.5341960894640596, "repo_name": "numba/numba", "id": "98295d899b34719a09fa4992b091c389892b8df8", "size": "71090", "binary": false, "copies": "2", "ref": "refs/heads/main", "path": "numba/core/typeinfer.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "Batchfile", "bytes": "3522" }, { "name": "C", "bytes": "574888" }, { "name": "C++", "bytes": "166526" }, { "name": "Cuda", "bytes": "2063" }, { "name": "GDB", "bytes": "101" }, { "name": "HTML", "bytes": "3464" }, { "name": "Python", "bytes": "9400448" }, { "name": "Shell", "bytes": "13621" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.core.management.base import NoArgsCommand, CommandError from django.test.client import RequestFactory from allaccess.models import Provider class Command(NoArgsCommand): "Convert existing providers from django-social-auth to django-all-access." def handle_noargs(self, **options): verbosity = int(options.get('verbosity')) try: from social_auth import version from social_auth.backends import get_backends, BaseOAuth except ImportError: # pragma: no cover raise CommandError("django-social-auth is not installed.") request = RequestFactory().get('/') for name, backend in get_backends().items(): if issubclass(backend, BaseOAuth) and backend.enabled(): if version < (0, 7): # Prior to 0.7 get_key_and_secret was an instance method backend = backend(request, '/') # Create providers if they don't already exist key, secret = backend.get_key_and_secret() defaults = { 'request_token_url': getattr(backend, 'REQUEST_TOKEN_URL', '') or '', 'authorization_url': getattr(backend, 'AUTHORIZATION_URL', '') or '', 'access_token_url': getattr(backend, 'ACCESS_TOKEN_URL', '') or '', 'profile_url': '', 'consumer_key': key or None, 'consumer_secret': secret or None, } provider, created = Provider.objects.get_or_create(name=name, defaults=defaults) if created and verbosity > 0: self.stdout.write('New provider created from "%s" backend.\n' % name)
{ "content_hash": "5f97269904c95045f6cbefd30a5793cb", "timestamp": "", "source": "github", "line_count": 37, "max_line_length": 96, "avg_line_length": 48.24324324324324, "alnum_prop": 0.584873949579832, "repo_name": "hydroshare/hydroshare-demo-auth", "id": "b579644888d281271d5d16cafd64ba54750fc6bd", "size": "1785", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "allaccess/management/commands/migrate_social_providers.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "HTML", "bytes": "3345" }, { "name": "Python", "bytes": "122570" } ], "symlink_target": "" }
from django.conf.urls import url, include from django.views.generic import ListView, DetailView from . import views urlpatterns = [ url(r'^$', views.fetch_places, name= 'list'), url(r'^foo/', views.fetch_places_loc, name='foo'), url(r'^create/$', views.post_create), url(r'^(?P<id>\d+)/$', views.post_detail, name='detail'), url(r'^(?P<id>\d+)/edit/$', views.post_update, name='update'), url(r'^(?P<id>\d+)/delete/$', views.post_delete, name= 'delete'), ]
{ "content_hash": "54e211f17ea69ef444686d0ac97d71ad", "timestamp": "", "source": "github", "line_count": 12, "max_line_length": 69, "avg_line_length": 40.166666666666664, "alnum_prop": 0.6244813278008299, "repo_name": "radamizell/WallApp", "id": "22fcb14c04cc74c1cf15646d73ff8cfa79ef030e", "size": "482", "binary": false, "copies": "1", "ref": "refs/heads/DO-database-version", "path": "location/urls.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "6045" }, { "name": "HTML", "bytes": "43999" }, { "name": "JavaScript", "bytes": "1996" }, { "name": "Python", "bytes": "12870" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('ringapp', '0040_auto_20170906_2208'), ] operations = [ migrations.RemoveField( model_name='ring', name='poster', ), migrations.AddField( model_name='ring', name='user', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), ]
{ "content_hash": "e78b734eb924fef5d15ad011b136daaf", "timestamp": "", "source": "github", "line_count": 25, "max_line_length": 133, "avg_line_length": 27.56, "alnum_prop": 0.6298984034833092, "repo_name": "rschwiebert/RingApp", "id": "df15f54ee743de60200c1052e0a3f521756bccb6", "size": "761", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "ringapp/migrations/0041_auto_20171021_2116.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "645" }, { "name": "HTML", "bytes": "112733" }, { "name": "JavaScript", "bytes": "1823" }, { "name": "Procfile", "bytes": "40" }, { "name": "Python", "bytes": "275458" } ], "symlink_target": "" }
if __name__ == "__main__": import openpnm as op import porespy as ps import numpy as np import os from pathlib import Path # %% Read image from file in fixtures path = Path(os.path.realpath(__file__), '../../../tests/fixtures/berea_100_to_300.npz') data = np.load(path.resolve()) im = data['im'] # %% Note meta data for this image data = { 'shape': { 'x': im.shape[0], 'y': im.shape[1], 'z': im.shape[2], }, 'resolution': 5.345e-6, 'porosity': 19.6, 'permeability': { 'Kx': 1360, 'Ky': 1304, 'Kz': 1193, 'Kave': 1286, }, 'formation factor': { 'Fx': 23.12, 'Fy': 23.99, 'Fz': 25.22, 'Fave': 24.08, }, } # %% Perform extraction snow = ps.networks.snow2(im, voxel_size=data['resolution'], boundary_width=[3, 0, 0], accuracy='standard') # ps.imshow(snow.regions/snow.phase) # %% Open network in OpenPNM pn = op.io.network_from_porespy(snow.network) pn['pore.diameter'] = pn['pore.equivalent_diameter'] pn['throat.diameter'] = pn['throat.inscribed_diameter'] pn['throat.spacing'] = pn['throat.total_length'] # pn.add_model(propname='throat.conduit_lengths', # model=op.models.geometry.conduit_lengths.pyramids_and_cuboids) pn.add_model(propname='throat.hydraulic_size_factors', model=op.models.geometry.hydraulic_size_factors.pyramids_and_cuboids) pn.add_model(propname='throat.diffusive_size_factors', model=op.models.geometry.diffusive_size_factors.pyramids_and_cuboids) pn.regenerate_models() h = op.utils.check_network_health(pn) op.topotools.trim(network=pn, pores=h['disconnected_pores']) gas = op.phase.Phase(network=pn) gas['pore.diffusivity'] = 1.0 gas['pore.viscosity'] = 1.0 gas.add_model_collection(op.models.collections.physics.basic) gas.regenerate_models() # %% Perform Fickian Diffusion to find formation factor fd = op.algorithms.FickianDiffusion(network=pn, phase=gas) fd.set_value_BC(pores=pn.pores('xmin'), values=1.0) fd.set_value_BC(pores=pn.pores('xmax'), values=0.0) fd.run() dC = 1.0 L = (data['shape']['x'] + 6)*data['resolution'] A = data['shape']['y']*data['shape']['z']*data['resolution']**2 Deff = fd.rate(pores=pn.pores('xmin'))*(L/A)/dC F = 1/Deff print(f"The Formation factor of the extracted network is {F}") print(f"The compares to a value of {data['formation factor']['Fx']} from DNS") np.testing.assert_allclose(F, data['formation factor']['Fx'], rtol=0.09) # %% Perform Stokes flow to find Permeability coefficient sf = op.algorithms.StokesFlow(network=pn, phase=gas) sf.set_value_BC(pores=pn.pores('xmin'), values=1.0) sf.set_value_BC(pores=pn.pores('xmax'), values=0.0) sf.run() dP = 1.0 L = (data['shape']['x'] + 6)*data['resolution'] A = data['shape']['y']*data['shape']['z']*data['resolution']**2 K = sf.rate(pores=pn.pores('xmin'))*(L/A)/dP*1e12 print(f'Permeability coefficient is {K} Darcy') print(f"The compares to a value of {data['permeability']['Kx']/1000} from DNS") np.testing.assert_allclose(K, data['permeability']['Kx']/1000, rtol=0.05)
{ "content_hash": "ff5c95ac843d67a71684ec5fade44943", "timestamp": "", "source": "github", "line_count": 92, "max_line_length": 86, "avg_line_length": 37.108695652173914, "alnum_prop": 0.5884592852958407, "repo_name": "PMEAL/OpenPNM", "id": "82e4ba93c289f271a039c8423e806747c8ee83ec", "size": "3414", "binary": false, "copies": "1", "ref": "refs/heads/dev", "path": "tests/integration/PoreSpyIO_on_Berea.py", "mode": "33188", "license": "mit", "language": [ { "name": "Makefile", "bytes": "375" }, { "name": "Python", "bytes": "1437146" } ], "symlink_target": "" }
"""RemoveKeyFromList Removes a Key from a JSON-backed List """ import demistomock as demisto from CommonServerPython import * # noqa # pylint: disable=unused-wildcard-import from CommonServerUserPython import * # noqa from typing import Dict, Any import traceback ''' STANDALONE FUNCTION ''' def remove_key_from_list(list_name: str, key_name: str) -> str: res = demisto.executeCommand('getList', {'listName': list_name}) if ( not isinstance(res, list) or 'Contents' not in res[0] or not isinstance(res[0]['Contents'], str) or res[0]['Contents'] == 'Item not found (8)' ): raise ValueError(f'Cannot retrieve list {list_name}') list_data: Dict = {} data: str = res[0]['Contents'] if data and len(data) > 0: try: list_data = json.loads(data) except json.decoder.JSONDecodeError as e: raise ValueError(f'List does not contain valid JSON data: {e}') elem = list_data.pop(key_name, None) if not elem: return f'Key {key_name} not found in list {list_name}, cannot remove.' demisto.executeCommand('setList', {'listName': list_name, 'listData': json.dumps(list_data)}) return f'Successfully removed key {key_name} from list {list_name}.' ''' COMMAND FUNCTION ''' def remove_key_from_list_command(args: Dict[str, Any]) -> CommandResults: list_name = args.get('listName', None) if not list_name: raise ValueError('listName must be specified') key_name = args.get('keyName', None) if not key_name: raise ValueError('keyName must be specified') # Call the standalone function and get the raw response result = remove_key_from_list(list_name, key_name) return CommandResults( readable_output=result ) ''' MAIN FUNCTION ''' def main(): try: return_results(remove_key_from_list_command(demisto.args())) except Exception as ex: demisto.error(traceback.format_exc()) # print the traceback return_error(f'Failed to execute RemoveKeyFromList. Error: {str(ex)}') ''' ENTRY POINT ''' if __name__ in ('__main__', '__builtin__', 'builtins'): main()
{ "content_hash": "9be2c7a53ce4dd48eb9c2bd1033a515e", "timestamp": "", "source": "github", "line_count": 78, "max_line_length": 97, "avg_line_length": 27.858974358974358, "alnum_prop": 0.6438104003681546, "repo_name": "VirusTotal/content", "id": "d27544af10e70475b1a9bea3e3566e13e67575d5", "size": "2173", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "Packs/CommonScripts/Scripts/RemoveKeyFromList/RemoveKeyFromList.py", "mode": "33188", "license": "mit", "language": [ { "name": "Dockerfile", "bytes": "2146" }, { "name": "HTML", "bytes": "205901" }, { "name": "JavaScript", "bytes": "1584075" }, { "name": "PowerShell", "bytes": "442288" }, { "name": "Python", "bytes": "47594464" }, { "name": "Rich Text Format", "bytes": "480911" }, { "name": "Shell", "bytes": "108066" }, { "name": "YARA", "bytes": "1185" } ], "symlink_target": "" }
from setuptools import setup, find_packages setup( name='zeit.care', version='0.3.dev0', author='gocept, Zeit Online', author_email='zon-backend@zeit.de', url='http://www.zeit.de/', description="Helper scripts for managing DAV content", packages=find_packages('src'), package_dir={'': 'src'}, include_package_data=True, zip_safe=False, license='BSD', namespace_packages=['zeit'], install_requires=[ 'zeit.connector', 'setuptools', 'pytz', ], entry_points=""" [console_scripts] isofication=zeit.care.worker:isofy_main xslt=zeit.care.worker:xslt_main divisor=zeit.care.divisor:main boxinjector=zeit.care.boxinjector:main ressortindexwriter=zeit.care.ressortindex:main commentthreadworker=zeit.care.commentthread:main propertyworker=zeit.care.worker:property_main xmlworker=zeit.care.xmlworker:main """ )
{ "content_hash": "5a73d0a1a0defd1abf0c4eb58c84a79f", "timestamp": "", "source": "github", "line_count": 33, "max_line_length": 58, "avg_line_length": 29.333333333333332, "alnum_prop": 0.6415289256198347, "repo_name": "ZeitOnline/zeit.care", "id": "ae7100aa84f7913c14b14fb0cc058ee9610ce264", "size": "968", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "setup.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "109209" }, { "name": "XSLT", "bytes": "1849" } ], "symlink_target": "" }
import pyaf.Bench.TS_datasets as tsds import tests.artificial.process_artificial_dataset as art art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 7, transform = "Quantization", sigma = 0.0, exog_count = 20, ar_order = 0);
{ "content_hash": "33fdf9a02a73ae82adbbe31fe0c0d928", "timestamp": "", "source": "github", "line_count": 7, "max_line_length": 166, "avg_line_length": 38, "alnum_prop": 0.706766917293233, "repo_name": "antoinecarme/pyaf", "id": "b0029ab23484ee3988c3e11377762fa4d9b36283", "size": "266", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/artificial/transf_Quantization/trend_Lag1Trend/cycle_7/ar_/test_artificial_32_Quantization_Lag1Trend_7__20.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Makefile", "bytes": "6773299" }, { "name": "Procfile", "bytes": "24" }, { "name": "Python", "bytes": "54209093" }, { "name": "R", "bytes": "807" }, { "name": "Shell", "bytes": "3619" } ], "symlink_target": "" }
from django.db import migrations, models import saleor.core.utils.json_serializer class Migration(migrations.Migration): dependencies = [ ("page", "0013_update_publication_date"), ] operations = [ migrations.AddField( model_name="page", name="metadata", field=models.JSONField( blank=True, default=dict, encoder=saleor.core.utils.json_serializer.CustomJsonEncoder, null=True, ), ), migrations.AddField( model_name="page", name="private_metadata", field=models.JSONField( blank=True, default=dict, encoder=saleor.core.utils.json_serializer.CustomJsonEncoder, null=True, ), ), ]
{ "content_hash": "73c0a320839680d5a33e35b8fecba68e", "timestamp": "", "source": "github", "line_count": 33, "max_line_length": 76, "avg_line_length": 26.12121212121212, "alnum_prop": 0.5197215777262181, "repo_name": "mociepka/saleor", "id": "c01f53dfdcf2e2031e27c6b42578328e814898af", "size": "911", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "saleor/page/migrations/0014_add_metadata.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Dockerfile", "bytes": "2228" }, { "name": "HTML", "bytes": "249248" }, { "name": "Procfile", "bytes": "290" }, { "name": "Python", "bytes": "12686831" }, { "name": "Shell", "bytes": "439" } ], "symlink_target": "" }
from __future__ import unicode_literals import swapper from accelerator_abstract.models.base_startup_program_interest import ( BaseStartupProgramInterest ) class StartupProgramInterest(BaseStartupProgramInterest): class Meta(BaseStartupProgramInterest.Meta): swappable = swapper.swappable_setting( BaseStartupProgramInterest.Meta.app_label, "StartupProgramInterest")
{ "content_hash": "dccd625777ee191a564a5e42ef773713", "timestamp": "", "source": "github", "line_count": 14, "max_line_length": 71, "avg_line_length": 29.357142857142858, "alnum_prop": 0.7664233576642335, "repo_name": "masschallenge/django-accelerator", "id": "0e426651dcbd5c3b7514ce288caa460ce41fed3d", "size": "411", "binary": false, "copies": "1", "ref": "refs/heads/development", "path": "accelerator/models/startup_program_interest.py", "mode": "33188", "license": "mit", "language": [ { "name": "Dockerfile", "bytes": "1848" }, { "name": "Makefile", "bytes": "6817" }, { "name": "Python", "bytes": "996767" }, { "name": "Shell", "bytes": "2453" } ], "symlink_target": "" }
from befh.ws_api_socket import WebSocketApiClient from befh.market_data import L2Depth, Trade from befh.exchanges.gateway import ExchangeGateway from befh.instrument import Instrument from befh.clients.sql_template import SqlClientTemplate from befh.util import Logger import time import threading import json from functools import partial from datetime import datetime class ExchGwApiBitstamp(WebSocketApiClient): """ Exchange socket """ def __init__(self): """ Constructor """ WebSocketApiClient.__init__(self, 'Bitstamp') @classmethod def get_trades_timestamp_field_name(cls): return 'timestamp' @classmethod def get_bids_field_name(cls): return 'bids' @classmethod def get_asks_field_name(cls): return 'asks' @classmethod def get_trade_side_field_name(cls): return 'type' @classmethod def get_trade_id_field_name(cls): return 'id' @classmethod def get_trade_price_field_name(cls): return 'price' @classmethod def get_trade_volume_field_name(cls): return 'amount' @classmethod def get_link(cls): return 'ws://ws.pusherapp.com/app/de504dc5763aeef9ff52?protocol=7' @classmethod def get_order_book_subscription_string(cls, instmt): if cls.is_default_instmt(instmt): return json.dumps({"event":"pusher:subscribe","data":{"channel":"order_book"}}) else: return json.dumps({"event":"pusher:subscribe","data":{"channel":"order_book_%s" % instmt.get_instmt_code()}}) @classmethod def get_trades_subscription_string(cls, instmt): if cls.is_default_instmt(instmt): return json.dumps({"event":"pusher:subscribe","data":{"channel":"live_trades"}}) else: return json.dumps({"event":"pusher:subscribe","data":{"channel":"live_trades_%s" % instmt.get_instmt_code()}}) @classmethod def is_default_instmt(cls, instmt): return instmt.get_instmt_code() == "\"\"" or instmt.get_instmt_code() == "" or instmt.get_instmt_code() == "''" @classmethod def parse_l2_depth(cls, instmt, raw): """ Parse raw data to L2 depth :param instmt: Instrument :param raw: Raw data in JSON """ l2_depth = instmt.get_l2_depth() keys = list(raw.keys()) if cls.get_bids_field_name() in keys and \ cls.get_asks_field_name() in keys: # Date time l2_depth.date_time = datetime.utcnow().strftime("%Y%m%d %H:%M:%S.%f") # Bids bids = raw[cls.get_bids_field_name()] bids_len = min(l2_depth.depth, len(bids)) for i in range(0, bids_len): l2_depth.bids[i].price = float(bids[i][0]) if not isinstance(bids[i][0], float) else bids[i][0] l2_depth.bids[i].volume = float(bids[i][1]) if not isinstance(bids[i][1], float) else bids[i][1] # Asks asks = raw[cls.get_asks_field_name()] asks_len = min(l2_depth.depth, len(asks)) for i in range(0, asks_len): l2_depth.asks[i].price = float(asks[i][0]) if not isinstance(asks[i][0], float) else asks[i][0] l2_depth.asks[i].volume = float(asks[i][1]) if not isinstance(asks[i][1], float) else asks[i][1] else: raise Exception('Does not contain order book keys in instmt %s-%s.\nOriginal:\n%s' % \ (instmt.get_exchange_name(), instmt.get_instmt_name(), \ raw)) return l2_depth @classmethod def parse_trade(cls, instmt, raw): """ :param instmt: Instrument :param raw: Raw data in JSON :return: """ trade = Trade() keys = list(raw.keys()) if cls.get_trades_timestamp_field_name() in keys and \ cls.get_trade_id_field_name() in keys and \ cls.get_trade_side_field_name() in keys and \ cls.get_trade_price_field_name() in keys and \ cls.get_trade_volume_field_name() in keys: # Date time date_time = float(raw[cls.get_trades_timestamp_field_name()]) trade.date_time = datetime.utcfromtimestamp(date_time).strftime("%Y%m%d %H:%M:%S.%f") # Trade side # Buy = 0 # Side = 1 trade.trade_side = Trade.parse_side(raw[cls.get_trade_side_field_name()] + 1) # Trade id trade.trade_id = str(raw[cls.get_trade_id_field_name()]) # Trade price trade.trade_price = raw[cls.get_trade_price_field_name()] # Trade volume trade.trade_volume = raw[cls.get_trade_volume_field_name()] else: raise Exception('Does not contain trade keys in instmt %s-%s.\nOriginal:\n%s' % \ (instmt.get_exchange_name(), instmt.get_instmt_name(), \ raw)) return trade class ExchGwBitstamp(ExchangeGateway): """ Exchange gateway """ def __init__(self, db_clients): """ Constructor :param db_client: Database client """ ExchangeGateway.__init__(self, ExchGwApiBitstamp(), db_clients) @classmethod def get_exchange_name(cls): """ Get exchange name :return: Exchange name string """ return 'Bitstamp' def on_open_handler(self, instmt, ws): """ Socket on open handler :param instmt: Instrument :param ws: Web socket """ Logger.info(self.__class__.__name__, "Instrument %s is subscribed in channel %s" % \ (instmt.get_instmt_name(), instmt.get_exchange_name())) if not instmt.get_subscribed(): ws.send(self.api_socket.get_order_book_subscription_string(instmt)) ws.send(self.api_socket.get_trades_subscription_string(instmt)) instmt.set_subscribed(True) def on_close_handler(self, instmt, ws): """ Socket on close handler :param instmt: Instrument :param ws: Web socket """ Logger.info(self.__class__.__name__, "Instrument %s is unsubscribed in channel %s" % \ (instmt.get_instmt_name(), instmt.get_exchange_name())) instmt.set_subscribed(False) def on_message_handler(self, instmt, message): """ Incoming message handler :param instmt: Instrument :param message: Message """ keys = message.keys() if 'event' in keys and message['event'] in ['data', 'trade'] and 'channel' in keys and 'data' in keys: channel_name = message['channel'] if (self.api_socket.is_default_instmt(instmt) and channel_name == "order_book") or \ (not self.api_socket.is_default_instmt(instmt) and channel_name == "order_book_%s" % instmt.get_instmt_code()): instmt.set_prev_l2_depth(instmt.get_l2_depth().copy()) self.api_socket.parse_l2_depth(instmt, json.loads(message['data'])) if instmt.get_l2_depth().is_diff(instmt.get_prev_l2_depth()): instmt.incr_order_book_id() self.insert_order_book(instmt) elif (self.api_socket.is_default_instmt(instmt) and channel_name == "live_trades") or \ (not self.api_socket.is_default_instmt(instmt) and channel_name == "live_trades_%s" % instmt.get_instmt_code()): trade = self.api_socket.parse_trade(instmt, json.loads(message['data'])) if trade.trade_id != instmt.get_exch_trade_id(): instmt.incr_trade_id() instmt.set_exch_trade_id(trade.trade_id) self.insert_trade(instmt, trade) def start(self, instmt): """ Start the exchange gateway :param instmt: Instrument :return List of threads """ instmt.set_l2_depth(L2Depth(20)) instmt.set_prev_l2_depth(L2Depth(20)) instmt.set_instmt_snapshot_table_name(self.get_instmt_snapshot_table_name(instmt.get_exchange_name(), instmt.get_instmt_name())) self.init_instmt_snapshot_table(instmt) return [self.api_socket.connect(self.api_socket.get_link(), on_message_handler=partial(self.on_message_handler, instmt), on_open_handler=partial(self.on_open_handler, instmt), on_close_handler=partial(self.on_close_handler, instmt))] if __name__ == '__main__': exchange_name = 'Bitstamp' instmt_name = 'BTCUSD' instmt_code = '' instmt = Instrument(exchange_name, instmt_name, instmt_code) db_client = SqlClientTemplate() Logger.init_log() exch = ExchGwBitstamp([db_client]) td = exch.start(instmt)
{ "content_hash": "b00e6c2302d0b0eca4ad1cedfa072bd0", "timestamp": "", "source": "github", "line_count": 242, "max_line_length": 129, "avg_line_length": 37.22314049586777, "alnum_prop": 0.5721580817051509, "repo_name": "Aurora-Team/BitcoinExchangeFH", "id": "b5cb1e3d25414106e3dcd42f66fd03c8644eb755", "size": "9008", "binary": false, "copies": "1", "ref": "refs/heads/dev", "path": "befh/exchanges/bitstamp.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "325307" } ], "symlink_target": "" }
__version__ = '0.2' __all__ = ['opmap', 'opname', 'opcodes', 'cmp_op', 'hasarg', 'hasname', 'hasjrel', 'hasjabs', 'hasjump', 'haslocal', 'hascompare', 'hasfree', 'hascode', 'hasflow', 'getse', 'Opcode', 'SetLineno', 'Label', 'isopcode', 'Code', 'CodeList', 'printcodelist'] import opcode from dis import findlabels import types from array import array import operator import itertools import sys import warnings from cStringIO import StringIO ###################################################################### # Define opcodes and information about them python_version = '.'.join(str(x) for x in sys.version_info[:2]) if python_version not in ('2.4', '2.5', '2.6', '2.7'): warnings.warn("byteplay doesn't support Python version "+python_version) class Opcode(int): """An int which represents an opcode - has a nicer repr.""" def __repr__(self): return opname[self] __str__ = __repr__ class CodeList(list): """A list for storing opcode tuples - has a nicer __str__.""" def __str__(self): f = StringIO() printcodelist(self, f) return f.getvalue() opmap = dict((name.replace('+', '_'), Opcode(code)) for name, code in opcode.opmap.iteritems() if name != 'EXTENDED_ARG') opname = dict((code, name) for name, code in opmap.iteritems()) opcodes = set(opname) def globalize_opcodes(): for name, code in opmap.iteritems(): globals()[name] = code __all__.append(name) globalize_opcodes() cmp_op = opcode.cmp_op hasarg = set(x for x in opcodes if x >= opcode.HAVE_ARGUMENT) hasconst = set(Opcode(x) for x in opcode.hasconst) hasname = set(Opcode(x) for x in opcode.hasname) hasjrel = set(Opcode(x) for x in opcode.hasjrel) hasjabs = set(Opcode(x) for x in opcode.hasjabs) hasjump = hasjrel.union(hasjabs) haslocal = set(Opcode(x) for x in opcode.haslocal) hascompare = set(Opcode(x) for x in opcode.hascompare) hasfree = set(Opcode(x) for x in opcode.hasfree) hascode = set([MAKE_FUNCTION, MAKE_CLOSURE]) class _se: """Quick way of defining static stack effects of opcodes""" # Taken from assembler.py by Phillip J. Eby NOP = 0,0 POP_TOP = 1,0 ROT_TWO = 2,2 ROT_THREE = 3,3 ROT_FOUR = 4,4 DUP_TOP = 1,2 UNARY_POSITIVE = UNARY_NEGATIVE = UNARY_NOT = UNARY_CONVERT = \ UNARY_INVERT = GET_ITER = LOAD_ATTR = 1,1 IMPORT_FROM = 1,2 BINARY_POWER = BINARY_MULTIPLY = BINARY_DIVIDE = BINARY_FLOOR_DIVIDE = \ BINARY_TRUE_DIVIDE = BINARY_MODULO = BINARY_ADD = BINARY_SUBTRACT = \ BINARY_SUBSCR = BINARY_LSHIFT = BINARY_RSHIFT = BINARY_AND = \ BINARY_XOR = BINARY_OR = COMPARE_OP = 2,1 INPLACE_POWER = INPLACE_MULTIPLY = INPLACE_DIVIDE = \ INPLACE_FLOOR_DIVIDE = INPLACE_TRUE_DIVIDE = INPLACE_MODULO = \ INPLACE_ADD = INPLACE_SUBTRACT = INPLACE_LSHIFT = INPLACE_RSHIFT = \ INPLACE_AND = INPLACE_XOR = INPLACE_OR = 2,1 SLICE_0, SLICE_1, SLICE_2, SLICE_3 = \ (1,1),(2,1),(2,1),(3,1) STORE_SLICE_0, STORE_SLICE_1, STORE_SLICE_2, STORE_SLICE_3 = \ (2,0),(3,0),(3,0),(4,0) DELETE_SLICE_0, DELETE_SLICE_1, DELETE_SLICE_2, DELETE_SLICE_3 = \ (1,0),(2,0),(2,0),(3,0) STORE_SUBSCR = 3,0 DELETE_SUBSCR = STORE_ATTR = 2,0 DELETE_ATTR = STORE_DEREF = 1,0 PRINT_NEWLINE = 0,0 PRINT_EXPR = PRINT_ITEM = PRINT_NEWLINE_TO = IMPORT_STAR = 1,0 STORE_NAME = STORE_GLOBAL = STORE_FAST = 1,0 PRINT_ITEM_TO = 2,0 LOAD_LOCALS = LOAD_CONST = LOAD_NAME = LOAD_GLOBAL = LOAD_FAST = \ LOAD_CLOSURE = LOAD_DEREF = BUILD_MAP = 0,1 DELETE_FAST = DELETE_GLOBAL = DELETE_NAME = 0,0 EXEC_STMT = 3,0 BUILD_CLASS = 3,1 STORE_MAP = MAP_ADD = 2,0 SET_ADD = 1,0 if python_version == '2.4': YIELD_VALUE = 1,0 IMPORT_NAME = 1,1 LIST_APPEND = 2,0 elif python_version == '2.5': YIELD_VALUE = 1,1 IMPORT_NAME = 2,1 LIST_APPEND = 2,0 elif python_version == '2.6': YIELD_VALUE = 1,1 IMPORT_NAME = 2,1 LIST_APPEND = 2,0 elif python_version == '2.7': YIELD_VALUE = 1,1 IMPORT_NAME = 2,1 LIST_APPEND = 1,0 _se = dict((op, getattr(_se, opname[op])) for op in opcodes if hasattr(_se, opname[op])) hasflow = opcodes - set(_se) - \ set([CALL_FUNCTION, CALL_FUNCTION_VAR, CALL_FUNCTION_KW, CALL_FUNCTION_VAR_KW, BUILD_TUPLE, BUILD_LIST, UNPACK_SEQUENCE, BUILD_SLICE, DUP_TOPX, RAISE_VARARGS, MAKE_FUNCTION, MAKE_CLOSURE]) if python_version == '2.7': hasflow = hasflow - set([BUILD_SET]) def getse(op, arg=None): """Get the stack effect of an opcode, as a (pop, push) tuple. If an arg is needed and is not given, a ValueError is raised. If op isn't a simple opcode, that is, the flow doesn't always continue to the next opcode, a ValueError is raised. """ try: return _se[op] except KeyError: # Continue to opcodes with an effect that depends on arg pass if arg is None: raise ValueError("Opcode stack behaviour depends on arg") def get_func_tup(arg, nextra): if arg > 0xFFFF: raise ValueError("Can only split a two-byte argument") return (nextra + 1 + (arg & 0xFF) + 2*((arg >> 8) & 0xFF), 1) if op == CALL_FUNCTION: return get_func_tup(arg, 0) elif op == CALL_FUNCTION_VAR: return get_func_tup(arg, 1) elif op == CALL_FUNCTION_KW: return get_func_tup(arg, 1) elif op == CALL_FUNCTION_VAR_KW: return get_func_tup(arg, 2) elif op == BUILD_TUPLE: return arg, 1 elif op == BUILD_LIST: return arg, 1 elif python_version == '2.7' and op == BUILD_SET: return arg, 1 elif op == UNPACK_SEQUENCE: return 1, arg elif op == BUILD_SLICE: return arg, 1 elif op == DUP_TOPX: return arg, arg*2 elif op == RAISE_VARARGS: return 1+arg, 1 elif op == MAKE_FUNCTION: return 1+arg, 1 elif op == MAKE_CLOSURE: if python_version == '2.4': raise ValueError("The stack effect of MAKE_CLOSURE depends on TOS") else: return 2+arg, 1 else: raise ValueError("The opcode %r isn't recognized or has a special " "flow control" % op) class SetLinenoType(object): def __repr__(self): return 'SetLineno' SetLineno = SetLinenoType() class Label(object): pass def isopcode(obj): """Return whether obj is an opcode - not SetLineno or Label""" return obj is not SetLineno and not isinstance(obj, Label) # Flags from code.h CO_OPTIMIZED = 0x0001 # use LOAD/STORE_FAST instead of _NAME CO_NEWLOCALS = 0x0002 # only cleared for module/exec code CO_VARARGS = 0x0004 CO_VARKEYWORDS = 0x0008 CO_NESTED = 0x0010 # ??? CO_GENERATOR = 0x0020 CO_NOFREE = 0x0040 # set if no free or cell vars CO_GENERATOR_ALLOWED = 0x1000 # unused # The future flags are only used on code generation, so we can ignore them. # (It does cause some warnings, though.) CO_FUTURE_DIVISION = 0x2000 CO_FUTURE_ABSOLUTE_IMPORT = 0x4000 CO_FUTURE_WITH_STATEMENT = 0x8000 ###################################################################### # Define the Code class class Code(object): """An object which holds all the information which a Python code object holds, but in an easy-to-play-with representation. The attributes are: Affecting action ---------------- code - list of 2-tuples: the code freevars - list of strings: the free vars of the code (those are names of variables created in outer functions and used in the function) args - list of strings: the arguments of the code varargs - boolean: Does args end with a '*args' argument varkwargs - boolean: Does args end with a '**kwargs' argument newlocals - boolean: Should a new local namespace be created. (True in functions, False for module and exec code) Not affecting action -------------------- name - string: the name of the code (co_name) filename - string: the file name of the code (co_filename) firstlineno - int: the first line number (co_firstlineno) docstring - string or None: the docstring (the first item of co_consts, if it's str or unicode) code is a list of 2-tuples. The first item is an opcode, or SetLineno, or a Label instance. The second item is the argument, if applicable, or None. code can be a CodeList instance, which will produce nicer output when being printed. """ def __init__(self, code, freevars, args, varargs, varkwargs, newlocals, name, filename, firstlineno, docstring): self.code = code self.freevars = freevars self.args = args self.varargs = varargs self.varkwargs = varkwargs self.newlocals = newlocals self.name = name self.filename = filename self.firstlineno = firstlineno self.docstring = docstring @staticmethod def _findlinestarts(code): """Find the offsets in a byte code which are start of lines in the source. Generate pairs (offset, lineno) as described in Python/compile.c. This is a modified version of dis.findlinestarts, which allows multiple "line starts" with the same line number. """ byte_increments = [ord(c) for c in code.co_lnotab[0::2]] line_increments = [ord(c) for c in code.co_lnotab[1::2]] lineno = code.co_firstlineno addr = 0 for byte_incr, line_incr in zip(byte_increments, line_increments): if byte_incr: yield (addr, lineno) addr += byte_incr lineno += line_incr yield (addr, lineno) @classmethod def from_code(cls, co): """Disassemble a Python code object into a Code object.""" co_code = co.co_code labels = dict((addr, Label()) for addr in findlabels(co_code)) linestarts = dict(cls._findlinestarts(co)) cellfree = co.co_cellvars + co.co_freevars code = CodeList() n = len(co_code) i = 0 extended_arg = 0 while i < n: op = Opcode(ord(co_code[i])) if i in labels: code.append((labels[i], None)) if i in linestarts: code.append((SetLineno, linestarts[i])) i += 1 if op in hascode: lastop, lastarg = code[-1] if lastop != LOAD_CONST: raise ValueError( "%s should be preceded by LOAD_CONST code" % op) code[-1] = (LOAD_CONST, Code.from_code(lastarg)) if op not in hasarg: code.append((op, None)) else: arg = ord(co_code[i]) + ord(co_code[i+1])*256 + extended_arg extended_arg = 0 i += 2 if op == opcode.EXTENDED_ARG: extended_arg = arg << 16 elif op in hasconst: code.append((op, co.co_consts[arg])) elif op in hasname: code.append((op, co.co_names[arg])) elif op in hasjabs: code.append((op, labels[arg])) elif op in hasjrel: code.append((op, labels[i + arg])) elif op in haslocal: code.append((op, co.co_varnames[arg])) elif op in hascompare: code.append((op, cmp_op[arg])) elif op in hasfree: code.append((op, cellfree[arg])) else: code.append((op, arg)) varargs = bool(co.co_flags & CO_VARARGS) varkwargs = bool(co.co_flags & CO_VARKEYWORDS) newlocals = bool(co.co_flags & CO_NEWLOCALS) args = co.co_varnames[:co.co_argcount + varargs + varkwargs] if co.co_consts and isinstance(co.co_consts[0], basestring): docstring = co.co_consts[0] else: docstring = None return cls(code = code, freevars = co.co_freevars, args = args, varargs = varargs, varkwargs = varkwargs, newlocals = newlocals, name = co.co_name, filename = co.co_filename, firstlineno = co.co_firstlineno, docstring = docstring, ) def __eq__(self, other): if (self.freevars != other.freevars or self.args != other.args or self.varargs != other.varargs or self.varkwargs != other.varkwargs or self.newlocals != other.newlocals or self.name != other.name or self.filename != other.filename or self.firstlineno != other.firstlineno or self.docstring != other.docstring or len(self.code) != len(other.code) ): return False # Compare code. This isn't trivial because labels should be matching, # not equal. labelmapping = {} for (op1, arg1), (op2, arg2) in itertools.izip(self.code, other.code): if isinstance(op1, Label): if labelmapping.setdefault(op1, op2) is not op2: return False else: if op1 != op2: return False if op1 in hasjump: if labelmapping.setdefault(arg1, arg2) is not arg2: return False elif op1 in hasarg: if arg1 != arg2: return False return True def _compute_flags(self): opcodes = set(op for op, arg in self.code if isopcode(op)) optimized = (STORE_NAME not in opcodes and LOAD_NAME not in opcodes and DELETE_NAME not in opcodes) generator = (YIELD_VALUE in opcodes) nofree = not (opcodes.intersection(hasfree)) flags = 0 if optimized: flags |= CO_OPTIMIZED if self.newlocals: flags |= CO_NEWLOCALS if self.varargs: flags |= CO_VARARGS if self.varkwargs: flags |= CO_VARKEYWORDS if generator: flags |= CO_GENERATOR if nofree: flags |= CO_NOFREE return flags def _compute_stacksize(self): """Get a code list, compute its maximal stack usage.""" # This is done by scanning the code, and computing for each opcode # the stack state at the opcode. code = self.code # A mapping from labels to their positions in the code list label_pos = dict((op, pos) for pos, (op, arg) in enumerate(code) if isinstance(op, Label)) # sf_targets are the targets of SETUP_FINALLY opcodes. They are recorded # because they have special stack behaviour. If an exception was raised # in the block pushed by a SETUP_FINALLY opcode, the block is popped # and 3 objects are pushed. On return or continue, the block is popped # and 2 objects are pushed. If nothing happened, the block is popped by # a POP_BLOCK opcode and 1 object is pushed by a (LOAD_CONST, None) # operation. # # Our solution is to record the stack state of SETUP_FINALLY targets # as having 3 objects pushed, which is the maximum. However, to make # stack recording consistent, the get_next_stacks function will always # yield the stack state of the target as if 1 object was pushed, but # this will be corrected in the actual stack recording. sf_targets = set(label_pos[arg] for op, arg in code if op == SETUP_FINALLY) # What we compute - for each opcode, its stack state, as an n-tuple. # n is the number of blocks pushed. For each block, we record the number # of objects pushed. stacks = [None] * len(code) def get_next_stacks(pos, curstack): """Get a code position and the stack state before the operation was done, and yield pairs (pos, curstack) for the next positions to be explored - those are the positions to which you can get from the given (pos, curstack). If the given position was already explored, nothing will be yielded. """ op, arg = code[pos] if isinstance(op, Label): # We should check if we already reached a node only if it is # a label. if pos in sf_targets: curstack = curstack[:-1] + (curstack[-1] + 2,) if stacks[pos] is None: stacks[pos] = curstack else: if stacks[pos] != curstack: raise ValueError("Inconsistent code") return def newstack(n): # Return a new stack, modified by adding n elements to the last # block if curstack[-1] + n < 0: raise ValueError("Popped a non-existing element") return curstack[:-1] + (curstack[-1]+n,) if not isopcode(op): # label or SetLineno - just continue to next line yield pos+1, curstack elif op in (STOP_CODE, RETURN_VALUE, RAISE_VARARGS): # No place in particular to continue to pass elif op == MAKE_CLOSURE and python_version == '2.4': # This is only relevant in Python 2.4 - in Python 2.5 the stack # effect of MAKE_CLOSURE can be calculated from the arg. # In Python 2.4, it depends on the number of freevars of TOS, # which should be a code object. if pos == 0: raise ValueError("MAKE_CLOSURE can't be the first opcode") lastop, lastarg = code[pos-1] if lastop != LOAD_CONST: raise ValueError( "MAKE_CLOSURE should come after a LOAD_CONST op") try: nextrapops = len(lastarg.freevars) except AttributeError: try: nextrapops = len(lastarg.co_freevars) except AttributeError: raise ValueError( "MAKE_CLOSURE preceding const should " "be a code or a Code object") yield pos+1, newstack(-arg-nextrapops) elif op not in hasflow: # Simple change of stack pop, push = getse(op, arg) yield pos+1, newstack(push - pop) elif op in (JUMP_FORWARD, JUMP_ABSOLUTE): # One possibility for a jump yield label_pos[arg], curstack elif python_version < '2.7' and op in (JUMP_IF_FALSE, JUMP_IF_TRUE): # Two possibilities for a jump yield label_pos[arg], curstack yield pos+1, curstack elif python_version >= '2.7' and op in (POP_JUMP_IF_FALSE, POP_JUMP_IF_TRUE): # Two possibilities for a jump yield label_pos[arg], newstack(-1) yield pos+1, newstack(-1) elif python_version >= '2.7' and op in (JUMP_IF_TRUE_OR_POP, JUMP_IF_FALSE_OR_POP): # Two possibilities for a jump yield label_pos[arg], curstack yield pos+1, newstack(-1) elif op == FOR_ITER: # FOR_ITER pushes next(TOS) on success, and pops TOS and jumps # on failure yield label_pos[arg], newstack(-1) yield pos+1, newstack(1) elif op == BREAK_LOOP: # BREAK_LOOP jumps to a place specified on block creation, so # it is ignored here pass elif op == CONTINUE_LOOP: # CONTINUE_LOOP jumps to the beginning of a loop which should # already ave been discovered, but we verify anyway. # It pops a block. if python_version == '2.6': pos, stack = label_pos[arg], curstack[:-1] if stacks[pos] != stack: #this could be a loop with a 'with' inside yield pos, stack[:-1] + (stack[-1]-1,) else: yield pos, stack else: yield label_pos[arg], curstack[:-1] elif op == SETUP_LOOP: # We continue with a new block. # On break, we jump to the label and return to current stack # state. yield label_pos[arg], curstack yield pos+1, curstack + (0,) elif op == SETUP_EXCEPT: # We continue with a new block. # On exception, we jump to the label with 3 extra objects on # stack yield label_pos[arg], newstack(3) yield pos+1, curstack + (0,) elif op == SETUP_FINALLY: # We continue with a new block. # On exception, we jump to the label with 3 extra objects on # stack, but to keep stack recording consistent, we behave as # if we add only 1 object. Extra 2 will be added to the actual # recording. yield label_pos[arg], newstack(1) yield pos+1, curstack + (0,) elif python_version == '2.7' and op == SETUP_WITH: yield label_pos[arg], curstack yield pos+1, newstack(-1) + (1,) elif op == POP_BLOCK: # Just pop the block yield pos+1, curstack[:-1] elif op == END_FINALLY: # Since stack recording of SETUP_FINALLY targets is of 3 pushed # objects (as when an exception is raised), we pop 3 objects. yield pos+1, newstack(-3) elif op == WITH_CLEANUP: # Since WITH_CLEANUP is always found after SETUP_FINALLY # targets, and the stack recording is that of a raised # exception, we can simply pop 1 object and let END_FINALLY # pop the remaining 3. if python_version == '2.7': yield pos+1, newstack(2) else: yield pos+1, newstack(-1) else: assert False, "Unhandled opcode: %r" % op # Now comes the calculation: open_positions holds positions which are # yet to be explored. In each step we take one open position, and # explore it by adding the positions to which you can get from it, to # open_positions. On the way, we update maxsize. # open_positions is a list of tuples: (pos, stack state) maxsize = 0 open_positions = [(0, (0,))] while open_positions: pos, curstack = open_positions.pop() maxsize = max(maxsize, sum(curstack)) open_positions.extend(get_next_stacks(pos, curstack)) return maxsize def to_code(self): """Assemble a Python code object from a Code object.""" co_argcount = len(self.args) - self.varargs - self.varkwargs co_stacksize = self._compute_stacksize() co_flags = self._compute_flags() co_consts = [self.docstring] co_names = [] co_varnames = list(self.args) co_freevars = tuple(self.freevars) # We find all cellvars beforehand, for two reasons: # 1. We need the number of them to construct the numeric argument # for ops in "hasfree". # 2. We need to put arguments which are cell vars in the beginning # of co_cellvars cellvars = set(arg for op, arg in self.code if isopcode(op) and op in hasfree and arg not in co_freevars) co_cellvars = [x for x in self.args if x in cellvars] def index(seq, item, eq=operator.eq, can_append=True): """Find the index of item in a sequence and return it. If it is not found in the sequence, and can_append is True, it is appended to the sequence. eq is the equality operator to use. """ for i, x in enumerate(seq): if eq(x, item): return i else: if can_append: seq.append(item) return len(seq) - 1 else: raise IndexError("Item not found") # List of tuples (pos, label) to be filled later jumps = [] # A mapping from a label to its position label_pos = {} # Last SetLineno lastlineno = self.firstlineno lastlinepos = 0 co_code = array('B') co_lnotab = array('B') for i, (op, arg) in enumerate(self.code): if isinstance(op, Label): label_pos[op] = len(co_code) elif op is SetLineno: incr_lineno = arg - lastlineno incr_pos = len(co_code) - lastlinepos lastlineno = arg lastlinepos = len(co_code) if incr_lineno == 0 and incr_pos == 0: co_lnotab.append(0) co_lnotab.append(0) else: while incr_pos > 255: co_lnotab.append(255) co_lnotab.append(0) incr_pos -= 255 while incr_lineno > 255: co_lnotab.append(incr_pos) co_lnotab.append(255) incr_pos = 0 incr_lineno -= 255 if incr_pos or incr_lineno: co_lnotab.append(incr_pos) co_lnotab.append(incr_lineno) elif op == opcode.EXTENDED_ARG: raise ValueError("EXTENDED_ARG not supported in Code objects") elif not op in hasarg: co_code.append(op) else: if op in hasconst: if isinstance(arg, Code) and i < len(self.code)-1 and \ self.code[i+1][0] in hascode: arg = arg.to_code() arg = index(co_consts, arg, operator.is_) elif op in hasname: arg = index(co_names, arg) elif op in hasjump: # arg will be filled later jumps.append((len(co_code), arg)) arg = 0 elif op in haslocal: arg = index(co_varnames, arg) elif op in hascompare: arg = index(cmp_op, arg, can_append=False) elif op in hasfree: try: arg = index(co_freevars, arg, can_append=False) \ + len(cellvars) except IndexError: arg = index(co_cellvars, arg) else: # arg is ok pass if arg > 0xFFFF: co_code.append(opcode.EXTENDED_ARG) co_code.append((arg >> 16) & 0xFF) co_code.append((arg >> 24) & 0xFF) co_code.append(op) co_code.append(arg & 0xFF) co_code.append((arg >> 8) & 0xFF) for pos, label in jumps: jump = label_pos[label] if co_code[pos] in hasjrel: jump -= pos+3 if jump > 0xFFFF: raise NotImplementedError("Extended jumps not implemented") co_code[pos+1] = jump & 0xFF co_code[pos+2] = (jump >> 8) & 0xFF co_code = co_code.tostring() co_lnotab = co_lnotab.tostring() co_consts = tuple(co_consts) co_names = tuple(co_names) co_varnames = tuple(co_varnames) co_nlocals = len(co_varnames) co_cellvars = tuple(co_cellvars) return types.CodeType(co_argcount, co_nlocals, co_stacksize, co_flags, co_code, co_consts, co_names, co_varnames, self.filename, self.name, self.firstlineno, co_lnotab, co_freevars, co_cellvars) def printcodelist(codelist, to=sys.stdout): """Get a code list. Print it nicely.""" labeldict = {} pendinglabels = [] for i, (op, arg) in enumerate(codelist): if isinstance(op, Label): pendinglabels.append(op) elif op is SetLineno: pass else: while pendinglabels: labeldict[pendinglabels.pop()] = i lineno = None islabel = False for i, (op, arg) in enumerate(codelist): if op is SetLineno: lineno = arg print >> to continue if isinstance(op, Label): islabel = True continue if lineno is None: linenostr = '' else: linenostr = str(lineno) lineno = None if islabel: islabelstr = '>>' islabel = False else: islabelstr = '' if op in hasconst: argstr = repr(arg) elif op in hasjump: try: argstr = 'to ' + str(labeldict[arg]) except KeyError: argstr = repr(arg) elif op in hasarg: argstr = str(arg) else: argstr = '' print >> to, '%3s %2s %4d %-20s %s' % ( linenostr, islabelstr, i, op, argstr) def recompile(filename): """Create a .pyc by disassembling the file and assembling it again, printing a message that the reassembled file was loaded.""" # Most of the code here based on the compile.py module. import os import imp import marshal import struct f = open(filename, 'U') try: timestamp = long(os.fstat(f.fileno()).st_mtime) except AttributeError: timestamp = long(os.stat(filename).st_mtime) codestring = f.read() f.close() if codestring and codestring[-1] != '\n': codestring = codestring + '\n' try: codeobject = compile(codestring, filename, 'exec') except SyntaxError: print >> sys.stderr, "Skipping %s - syntax error." % filename return cod = Code.from_code(codeobject) message = "reassembled %r imported.\n" % filename cod.code[:0] = [ # __import__('sys').stderr.write(message) (LOAD_GLOBAL, '__import__'), (LOAD_CONST, 'sys'), (CALL_FUNCTION, 1), (LOAD_ATTR, 'stderr'), (LOAD_ATTR, 'write'), (LOAD_CONST, message), (CALL_FUNCTION, 1), (POP_TOP, None), ] codeobject2 = cod.to_code() fc = open(filename+'c', 'wb') fc.write('\0\0\0\0') fc.write(struct.pack('<l', timestamp)) marshal.dump(codeobject2, fc) fc.flush() fc.seek(0, 0) fc.write(imp.get_magic()) fc.close() def recompile_all(path): """recursively recompile all .py files in the directory""" import os if os.path.isdir(path): for root, dirs, files in os.walk(path): for name in files: if name.endswith('.py'): filename = os.path.abspath(os.path.join(root, name)) print >> sys.stderr, filename recompile(filename) else: filename = os.path.abspath(path) recompile(filename) def main(): import os if len(sys.argv) != 2 or not os.path.exists(sys.argv[1]): print("""\ Usage: %s dir Search recursively for *.py in the given directory, disassemble and assemble them, adding a note when each file is imported. Use it to test byteplay like this: > byteplay.py Lib > make test Some FutureWarnings may be raised, but that's expected. Tip: before doing this, check to see which tests fail even without reassembling them... """ % sys.argv[0]) sys.exit(1) recompile_all(sys.argv[1]) if __name__ == '__main__': main()
{ "content_hash": "c66197fdf8d647f0b1b00e0acb141229", "timestamp": "", "source": "github", "line_count": 903, "max_line_length": 95, "avg_line_length": 36.46843853820598, "alnum_prop": 0.5346026540341927, "repo_name": "rootpy/rootpy", "id": "c73d5a979884c89e99533e4d3e8a08db08218a83", "size": "33874", "binary": false, "copies": "7", "ref": "refs/heads/master", "path": "rootpy/extern/byteplay2/__init__.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "C++", "bytes": "109" }, { "name": "Makefile", "bytes": "2778" }, { "name": "Python", "bytes": "861605" }, { "name": "Shell", "bytes": "3089" } ], "symlink_target": "" }
from oslo_config import cfg from oslo_utils import timeutils import webob.exc from cinder.api import extensions from cinder.api.openstack import wsgi from cinder.api import xmlutil from cinder import db from cinder import exception from cinder.i18n import _ from cinder.openstack.common import log as logging from cinder import utils CONF = cfg.CONF LOG = logging.getLogger(__name__) authorize = extensions.extension_authorizer('volume', 'services') class ServicesIndexTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('services') elem = xmlutil.SubTemplateElement(root, 'service', selector='services') elem.set('binary') elem.set('host') elem.set('zone') elem.set('status') elem.set('state') elem.set('update_at') elem.set('disabled_reason') return xmlutil.MasterTemplate(root, 1) class ServicesUpdateTemplate(xmlutil.TemplateBuilder): def construct(self): # TODO(uni): template elements of 'host', 'service' and 'disabled' # should be deprecated to make ServicesUpdateTemplate consistent # with ServicesIndexTemplate. Still keeping it here for API # compatibility sake. root = xmlutil.TemplateElement('host') root.set('host') root.set('service') root.set('disabled') root.set('binary') root.set('status') root.set('disabled_reason') return xmlutil.MasterTemplate(root, 1) class ServiceController(wsgi.Controller): def __init__(self, ext_mgr=None): self.ext_mgr = ext_mgr super(ServiceController, self).__init__() @wsgi.serializers(xml=ServicesIndexTemplate) def index(self, req): """Return a list of all running services. Filter by host & service name. """ context = req.environ['cinder.context'] authorize(context) detailed = self.ext_mgr.is_loaded('os-extended-services') now = timeutils.utcnow() services = db.service_get_all(context) host = '' if 'host' in req.GET: host = req.GET['host'] service = '' if 'service' in req.GET: service = req.GET['service'] LOG.deprecated(_("Query by service parameter is deprecated. " "Please use binary parameter instead.")) binary = '' if 'binary' in req.GET: binary = req.GET['binary'] if host: services = [s for s in services if s['host'] == host] # NOTE(uni): deprecating service request key, binary takes precedence binary_key = binary or service if binary_key: services = [s for s in services if s['binary'] == binary_key] svcs = [] for svc in services: delta = now - (svc['updated_at'] or svc['created_at']) alive = abs(delta.total_seconds()) <= CONF.service_down_time art = (alive and "up") or "down" active = 'enabled' if svc['disabled']: active = 'disabled' ret_fields = {'binary': svc['binary'], 'host': svc['host'], 'zone': svc['availability_zone'], 'status': active, 'state': art, 'updated_at': svc['updated_at']} if detailed: ret_fields['disabled_reason'] = svc['disabled_reason'] svcs.append(ret_fields) return {'services': svcs} def _is_valid_as_reason(self, reason): if not reason: return False try: utils.check_string_length(reason.strip(), 'Disabled reason', min_length=1, max_length=255) except exception.InvalidInput: return False return True @wsgi.serializers(xml=ServicesUpdateTemplate) def update(self, req, id, body): """Enable/Disable scheduling for a service.""" context = req.environ['cinder.context'] authorize(context) ext_loaded = self.ext_mgr.is_loaded('os-extended-services') ret_val = {} if id == "enable": disabled = False status = "enabled" if ext_loaded: ret_val['disabled_reason'] = None elif (id == "disable" or (id == "disable-log-reason" and ext_loaded)): disabled = True status = "disabled" else: raise webob.exc.HTTPNotFound(explanation=_("Unknown action")) try: host = body['host'] except (TypeError, KeyError): raise webob.exc.HTTPBadRequest() ret_val['disabled'] = disabled if id == "disable-log-reason" and ext_loaded: reason = body.get('disabled_reason') if not self._is_valid_as_reason(reason): msg = _('Disabled reason contains invalid characters ' 'or is too long') raise webob.exc.HTTPBadRequest(explanation=msg) ret_val['disabled_reason'] = reason # NOTE(uni): deprecating service request key, binary takes precedence # Still keeping service key here for API compatibility sake. service = body.get('service', '') binary = body.get('binary', '') binary_key = binary or service if not binary_key: raise webob.exc.HTTPBadRequest() try: svc = db.service_get_by_args(context, host, binary_key) if not svc: raise webob.exc.HTTPNotFound(explanation=_('Unknown service')) db.service_update(context, svc['id'], ret_val) except exception.ServiceNotFound: raise webob.exc.HTTPNotFound(explanation=_("service not found")) ret_val.update({'host': host, 'service': service, 'binary': binary, 'status': status}) return ret_val class Services(extensions.ExtensionDescriptor): """Services support.""" name = "Services" alias = "os-services" namespace = "http://docs.openstack.org/volume/ext/services/api/v2" updated = "2012-10-28T00:00:00-00:00" def get_resources(self): resources = [] controller = ServiceController(self.ext_mgr) resource = extensions.ResourceExtension('os-services', controller) resources.append(resource) return resources
{ "content_hash": "834919f0f99d5e4142d75aad7b5e8f2d", "timestamp": "", "source": "github", "line_count": 186, "max_line_length": 79, "avg_line_length": 34.68279569892473, "alnum_prop": 0.5825453418074718, "repo_name": "blueboxgroup/cinder", "id": "c0971bec816d8ae184679a9c59c77284a313a279", "size": "7077", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "cinder/api/contrib/services.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Makefile", "bytes": "3322" }, { "name": "Python", "bytes": "10024269" }, { "name": "Shell", "bytes": "9905" } ], "symlink_target": "" }
""" Set of "markup" template filters for Django. These filters transform plain text markup syntaxes to HTML; currently there is support for: * Textile, which requires the PyTextile library available at http://loopcore.com/python-textile/ * Markdown, which requires the Python-markdown library from http://www.freewisdom.org/projects/python-markdown * reStructuredText, which requires docutils from http://docutils.sf.net/ """ from django import template from django.conf import settings from django.utils.encoding import force_bytes, force_text from django.utils.safestring import mark_safe register = template.Library() @register.filter(is_safe=True) def textile(value): try: import textile except ImportError: if settings.DEBUG: raise template.TemplateSyntaxError("Error in 'textile' filter: The Python textile library isn't installed.") return force_text(value) else: return mark_safe(force_text(textile.textile(force_bytes(value), encoding='utf-8', output='utf-8'))) @register.filter(is_safe=True) def markdown(value, arg=''): """ Runs Markdown over a given value, optionally using various extensions python-markdown supports. Syntax:: {{ value|markdown:"extension1_name,extension2_name..." }} To enable safe mode, which strips raw HTML and only returns HTML generated by actual Markdown syntax, pass "safe" as the first extension in the list. If the version of Markdown in use does not support extensions, they will be silently ignored. """ import warnings warnings.warn('The markdown filter has been deprecated', category=DeprecationWarning) try: import markdown except ImportError: if settings.DEBUG: raise template.TemplateSyntaxError("Error in 'markdown' filter: The Python markdown library isn't installed.") return force_text(value) else: markdown_vers = getattr(markdown, "version_info", 0) if markdown_vers < (2, 1): if settings.DEBUG: raise template.TemplateSyntaxError( "Error in 'markdown' filter: Django does not support versions of the Python markdown library < 2.1.") return force_text(value) else: extensions = [e for e in arg.split(",") if e] if extensions and extensions[0] == "safe": extensions = extensions[1:] return mark_safe(markdown.markdown( force_text(value), extensions=extensions, safe_mode=True, enable_attributes=False)) else: return mark_safe(markdown.markdown( force_text(value), extensions=extensions, safe_mode=False)) @register.filter(is_safe=True) def restructuredtext(value): import warnings warnings.warn('The restructuredtext filter has been deprecated', category=DeprecationWarning) try: from docutils.core import publish_parts except ImportError: if settings.DEBUG: raise template.TemplateSyntaxError("Error in 'restructuredtext' filter: The Python docutils library isn't installed.") return force_text(value) else: docutils_settings = getattr(settings, "RESTRUCTUREDTEXT_FILTER_SETTINGS", {}) parts = publish_parts(source=force_bytes(value), writer_name="html4css1", settings_overrides=docutils_settings) return mark_safe(force_text(parts["fragment"]))
{ "content_hash": "8fa8700a4e89940e5d74efabfec09f42", "timestamp": "", "source": "github", "line_count": 90, "max_line_length": 130, "avg_line_length": 39.12222222222222, "alnum_prop": 0.672536211303607, "repo_name": "fcurella/django-markup-deprecated", "id": "9fc5ed2f628fd43f1e9bff199c2cadb1da7c51ab", "size": "3521", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "markup_deprecated/templatetags/markup.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "9561" } ], "symlink_target": "" }
""" Views for managing Nova instances. """ import logging from django import http from django import template from django.contrib import messages from django.contrib.auth.decorators import login_required from django.core import validators from django.shortcuts import redirect, render_to_response from django_openstack import api from django_openstack import forms from novaclient import exceptions as novaclient_exceptions LOG = logging.getLogger('django_openstack.dash.views.keypairs') class DeleteKeypair(forms.SelfHandlingForm): keypair_id = forms.CharField(widget=forms.HiddenInput()) def handle(self, request, data): try: LOG.info('Deleting keypair "%s"' % data['keypair_id']) api.keypair_delete(request, data['keypair_id']) messages.info(request, 'Successfully deleted keypair: %s' \ % data['keypair_id']) except novaclient_exceptions.ClientException, e: LOG.exception("ClientException in DeleteKeypair") messages.error(request, 'Error deleting keypair: %s' % e.message) return redirect(request.build_absolute_uri()) class CreateKeypair(forms.SelfHandlingForm): name = forms.CharField(max_length="20", label="Keypair Name", validators=[validators.RegexValidator('\w+')]) def handle(self, request, data): try: LOG.info('Creating keypair "%s"' % data['name']) keypair = api.keypair_create(request, data['name']) response = http.HttpResponse(mimetype='application/binary') response['Content-Disposition'] = \ 'attachment; filename=%s.pem' % keypair.name response.write(keypair.private_key) return response except novaclient_exceptions.ClientException, e: LOG.exception("ClientException in CreateKeyPair") messages.error(request, 'Error Creating Keypair: %s' % e.message) return redirect(request.build_absolute_uri()) class ImportKeypair(forms.SelfHandlingForm): name = forms.CharField(max_length="20", label="Keypair Name", validators=[validators.RegexValidator('\w+')]) public_key = forms.CharField(label='Public Key', widget=forms.Textarea) def handle(self, request, data): try: LOG.info('Importing keypair "%s"' % data['name']) api.keypair_import(request, data['name'], data['public_key']) messages.success(request, 'Successfully imported public key: %s' % data['name']) return redirect('dash_keypairs', request.user.tenant_id) except novaclient_exceptions.ClientException, e: LOG.exception("ClientException in ImportKeypair") messages.error(request, 'Error Importing Keypair: %s' % e.message) return redirect(request.build_absolute_uri()) @login_required def index(request, tenant_id): delete_form, handled = DeleteKeypair.maybe_handle(request) if handled: return handled try: keypairs = api.keypair_list(request) except novaclient_exceptions.ClientException, e: keypairs = [] LOG.exception("ClientException in keypair index") messages.error(request, 'Error fetching keypairs: %s' % e.message) return render_to_response('django_openstack/dash/keypairs/index.html', { 'keypairs': keypairs, 'delete_form': delete_form, }, context_instance=template.RequestContext(request)) @login_required def create(request, tenant_id): form, handled = CreateKeypair.maybe_handle(request) if handled: return handled return render_to_response('django_openstack/dash/keypairs/create.html', { 'create_form': form, }, context_instance=template.RequestContext(request)) @login_required def import_keypair(request, tenant_id): form, handled = ImportKeypair.maybe_handle(request) if handled: return handled return render_to_response('django_openstack/dash/keypairs/import.html', { 'create_form': form, }, context_instance=template.RequestContext(request))
{ "content_hash": "ec9217b964b3d19be5bbfc8253a5e141", "timestamp": "", "source": "github", "line_count": 114, "max_line_length": 78, "avg_line_length": 36.62280701754386, "alnum_prop": 0.6634730538922156, "repo_name": "cloud-smokers/openstack-dashboard", "id": "5bdb7536f6ca3388825a5d77ba147c8e84519f62", "size": "4984", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "django-openstack/django_openstack/dash/views/keypairs.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "JavaScript", "bytes": "108904" }, { "name": "Python", "bytes": "438543" }, { "name": "Shell", "bytes": "4828" } ], "symlink_target": "" }
""" Read SAS7BDAT files Based on code written by Jared Hobbs: https://bitbucket.org/jaredhobbs/sas7bdat See also: https://github.com/BioStatMatt/sas7bdat Partial documentation of the file format: https://cran.r-project.org/web/packages/sas7bdat/vignettes/sas7bdat.pdf Reference for binary data compression: http://collaboration.cmc.ec.gc.ca/science/rpn/biblio/ddj/Website/articles/CUJ/1992/9210/ross/ross.htm """ import pandas as pd from pandas import compat from pandas.io.common import get_filepath_or_buffer, BaseIterator from pandas.errors import EmptyDataError import numpy as np import struct import pandas.io.sas.sas_constants as const from pandas.io.sas._sas import Parser class _subheader_pointer(object): pass class _column(object): pass # SAS7BDAT represents a SAS data file in SAS7BDAT format. class SAS7BDATReader(BaseIterator): """ Read SAS files in SAS7BDAT format. Parameters ---------- path_or_buf : path name or buffer Name of SAS file or file-like object pointing to SAS file contents. index : column identifier, defaults to None Column to use as index. convert_dates : boolean, defaults to True Attempt to convert dates to Pandas datetime values. Note that some rarely used SAS date formats may be unsupported. blank_missing : boolean, defaults to True Convert empty strings to missing values (SAS uses blanks to indicate missing character variables). chunksize : int, defaults to None Return SAS7BDATReader object for iterations, returns chunks with given number of lines. encoding : string, defaults to None String encoding. convert_text : bool, defaults to True If False, text variables are left as raw bytes. convert_header_text : bool, defaults to True If False, header text, including column names, are left as raw bytes. """ def __init__(self, path_or_buf, index=None, convert_dates=True, blank_missing=True, chunksize=None, encoding=None, convert_text=True, convert_header_text=True): self.index = index self.convert_dates = convert_dates self.blank_missing = blank_missing self.chunksize = chunksize self.encoding = encoding self.convert_text = convert_text self.convert_header_text = convert_header_text self.default_encoding = "latin-1" self.compression = "" self.column_names_strings = [] self.column_names = [] self.column_types = [] self.column_formats = [] self.columns = [] self._current_page_data_subheader_pointers = [] self._cached_page = None self._column_data_lengths = [] self._column_data_offsets = [] self._current_row_in_file_index = 0 self._current_row_on_page_index = 0 self._current_row_in_file_index = 0 self._path_or_buf, _, _, _ = get_filepath_or_buffer(path_or_buf) if isinstance(self._path_or_buf, compat.string_types): self._path_or_buf = open(self._path_or_buf, 'rb') self.handle = self._path_or_buf self._get_properties() self._parse_metadata() def close(self): try: self.handle.close() except AttributeError: pass def _get_properties(self): # Check magic number self._path_or_buf.seek(0) self._cached_page = self._path_or_buf.read(288) if self._cached_page[0:len(const.magic)] != const.magic: self.close() raise ValueError("magic number mismatch (not a SAS file?)") # Get alignment information align1, align2 = 0, 0 buf = self._read_bytes(const.align_1_offset, const.align_1_length) if buf == const.u64_byte_checker_value: align2 = const.align_2_value self.U64 = True self._int_length = 8 self._page_bit_offset = const.page_bit_offset_x64 self._subheader_pointer_length = const.subheader_pointer_length_x64 else: self.U64 = False self._page_bit_offset = const.page_bit_offset_x86 self._subheader_pointer_length = const.subheader_pointer_length_x86 self._int_length = 4 buf = self._read_bytes(const.align_2_offset, const.align_2_length) if buf == const.align_1_checker_value: align1 = const.align_2_value total_align = align1 + align2 # Get endianness information buf = self._read_bytes(const.endianness_offset, const.endianness_length) if buf == b'\x01': self.byte_order = "<" else: self.byte_order = ">" # Get encoding information buf = self._read_bytes(const.encoding_offset, const.encoding_length)[0] if buf in const.encoding_names: self.file_encoding = const.encoding_names[buf] else: self.file_encoding = "unknown (code=%s)" % str(buf) # Get platform information buf = self._read_bytes(const.platform_offset, const.platform_length) if buf == b'1': self.platform = "unix" elif buf == b'2': self.platform = "windows" else: self.platform = "unknown" buf = self._read_bytes(const.dataset_offset, const.dataset_length) self.name = buf.rstrip(b'\x00 ') if self.convert_header_text: self.name = self.name.decode( self.encoding or self.default_encoding) buf = self._read_bytes(const.file_type_offset, const.file_type_length) self.file_type = buf.rstrip(b'\x00 ') if self.convert_header_text: self.file_type = self.file_type.decode( self.encoding or self.default_encoding) # Timestamp is epoch 01/01/1960 epoch = pd.datetime(1960, 1, 1) x = self._read_float(const.date_created_offset + align1, const.date_created_length) self.date_created = epoch + pd.to_timedelta(x, unit='s') x = self._read_float(const.date_modified_offset + align1, const.date_modified_length) self.date_modified = epoch + pd.to_timedelta(x, unit='s') self.header_length = self._read_int(const.header_size_offset + align1, const.header_size_length) # Read the rest of the header into cached_page. buf = self._path_or_buf.read(self.header_length - 288) self._cached_page += buf if len(self._cached_page) != self.header_length: self.close() raise ValueError("The SAS7BDAT file appears to be truncated.") self._page_length = self._read_int(const.page_size_offset + align1, const.page_size_length) self._page_count = self._read_int(const.page_count_offset + align1, const.page_count_length) buf = self._read_bytes(const.sas_release_offset + total_align, const.sas_release_length) self.sas_release = buf.rstrip(b'\x00 ') if self.convert_header_text: self.sas_release = self.sas_release.decode( self.encoding or self.default_encoding) buf = self._read_bytes(const.sas_server_type_offset + total_align, const.sas_server_type_length) self.server_type = buf.rstrip(b'\x00 ') if self.convert_header_text: self.server_type = self.server_type.decode( self.encoding or self.default_encoding) buf = self._read_bytes(const.os_version_number_offset + total_align, const.os_version_number_length) self.os_version = buf.rstrip(b'\x00 ') if self.convert_header_text: self.os_version = self.os_version.decode( self.encoding or self.default_encoding) buf = self._read_bytes(const.os_name_offset + total_align, const.os_name_length) buf = buf.rstrip(b'\x00 ') if len(buf) > 0: self.os_name = buf.decode(self.encoding or self.default_encoding) else: buf = self._read_bytes(const.os_maker_offset + total_align, const.os_maker_length) self.os_name = buf.rstrip(b'\x00 ') if self.convert_header_text: self.os_name = self.os_name.decode( self.encoding or self.default_encoding) def __next__(self): da = self.read(nrows=self.chunksize or 1) if da is None: raise StopIteration return da # Read a single float of the given width (4 or 8). def _read_float(self, offset, width): if width not in (4, 8): self.close() raise ValueError("invalid float width") buf = self._read_bytes(offset, width) fd = "f" if width == 4 else "d" return struct.unpack(self.byte_order + fd, buf)[0] # Read a single signed integer of the given width (1, 2, 4 or 8). def _read_int(self, offset, width): if width not in (1, 2, 4, 8): self.close() raise ValueError("invalid int width") buf = self._read_bytes(offset, width) it = {1: "b", 2: "h", 4: "l", 8: "q"}[width] iv = struct.unpack(self.byte_order + it, buf)[0] return iv def _read_bytes(self, offset, length): if self._cached_page is None: self._path_or_buf.seek(offset) buf = self._path_or_buf.read(length) if len(buf) < length: self.close() msg = "Unable to read {:d} bytes from file position {:d}." raise ValueError(msg.format(length, offset)) return buf else: if offset + length > len(self._cached_page): self.close() raise ValueError("The cached page is too small.") return self._cached_page[offset:offset + length] def _parse_metadata(self): done = False while not done: self._cached_page = self._path_or_buf.read(self._page_length) if len(self._cached_page) <= 0: break if len(self._cached_page) != self._page_length: self.close() raise ValueError( "Failed to read a meta data page from the SAS file.") done = self._process_page_meta() def _process_page_meta(self): self._read_page_header() pt = [const.page_meta_type, const.page_amd_type] + const.page_mix_types if self._current_page_type in pt: self._process_page_metadata() return ((self._current_page_type in [256] + const.page_mix_types) or (self._current_page_data_subheader_pointers is not None)) def _read_page_header(self): bit_offset = self._page_bit_offset tx = const.page_type_offset + bit_offset self._current_page_type = self._read_int(tx, const.page_type_length) tx = const.block_count_offset + bit_offset self._current_page_block_count = self._read_int( tx, const.block_count_length) tx = const.subheader_count_offset + bit_offset self._current_page_subheaders_count = ( self._read_int(tx, const.subheader_count_length)) def _process_page_metadata(self): bit_offset = self._page_bit_offset for i in range(self._current_page_subheaders_count): pointer = self._process_subheader_pointers( const.subheader_pointers_offset + bit_offset, i) if pointer.length == 0: continue if pointer.compression == const.truncated_subheader_id: continue subheader_signature = self._read_subheader_signature( pointer.offset) subheader_index = ( self._get_subheader_index(subheader_signature, pointer.compression, pointer.ptype)) self._process_subheader(subheader_index, pointer) def _get_subheader_index(self, signature, compression, ptype): index = const.subheader_signature_to_index.get(signature) if index is None: f1 = ((compression == const.compressed_subheader_id) or (compression == 0)) f2 = (ptype == const.compressed_subheader_type) if (self.compression != "") and f1 and f2: index = const.SASIndex.data_subheader_index else: self.close() raise ValueError("Unknown subheader signature") return index def _process_subheader_pointers(self, offset, subheader_pointer_index): subheader_pointer_length = self._subheader_pointer_length total_offset = (offset + subheader_pointer_length * subheader_pointer_index) subheader_offset = self._read_int(total_offset, self._int_length) total_offset += self._int_length subheader_length = self._read_int(total_offset, self._int_length) total_offset += self._int_length subheader_compression = self._read_int(total_offset, 1) total_offset += 1 subheader_type = self._read_int(total_offset, 1) x = _subheader_pointer() x.offset = subheader_offset x.length = subheader_length x.compression = subheader_compression x.ptype = subheader_type return x def _read_subheader_signature(self, offset): subheader_signature = self._read_bytes(offset, self._int_length) return subheader_signature def _process_subheader(self, subheader_index, pointer): offset = pointer.offset length = pointer.length if subheader_index == const.SASIndex.row_size_index: processor = self._process_rowsize_subheader elif subheader_index == const.SASIndex.column_size_index: processor = self._process_columnsize_subheader elif subheader_index == const.SASIndex.column_text_index: processor = self._process_columntext_subheader elif subheader_index == const.SASIndex.column_name_index: processor = self._process_columnname_subheader elif subheader_index == const.SASIndex.column_attributes_index: processor = self._process_columnattributes_subheader elif subheader_index == const.SASIndex.format_and_label_index: processor = self._process_format_subheader elif subheader_index == const.SASIndex.column_list_index: processor = self._process_columnlist_subheader elif subheader_index == const.SASIndex.subheader_counts_index: processor = self._process_subheader_counts elif subheader_index == const.SASIndex.data_subheader_index: self._current_page_data_subheader_pointers.append(pointer) return else: raise ValueError("unknown subheader index") processor(offset, length) def _process_rowsize_subheader(self, offset, length): int_len = self._int_length lcs_offset = offset lcp_offset = offset if self.U64: lcs_offset += 682 lcp_offset += 706 else: lcs_offset += 354 lcp_offset += 378 self.row_length = self._read_int( offset + const.row_length_offset_multiplier * int_len, int_len) self.row_count = self._read_int( offset + const.row_count_offset_multiplier * int_len, int_len) self.col_count_p1 = self._read_int( offset + const.col_count_p1_multiplier * int_len, int_len) self.col_count_p2 = self._read_int( offset + const.col_count_p2_multiplier * int_len, int_len) mx = const.row_count_on_mix_page_offset_multiplier * int_len self._mix_page_row_count = self._read_int(offset + mx, int_len) self._lcs = self._read_int(lcs_offset, 2) self._lcp = self._read_int(lcp_offset, 2) def _process_columnsize_subheader(self, offset, length): int_len = self._int_length offset += int_len self.column_count = self._read_int(offset, int_len) if (self.col_count_p1 + self.col_count_p2 != self.column_count): print("Warning: column count mismatch (%d + %d != %d)\n", self.col_count_p1, self.col_count_p2, self.column_count) # Unknown purpose def _process_subheader_counts(self, offset, length): pass def _process_columntext_subheader(self, offset, length): offset += self._int_length text_block_size = self._read_int(offset, const.text_block_size_length) buf = self._read_bytes(offset, text_block_size) cname_raw = buf[0:text_block_size].rstrip(b"\x00 ") cname = cname_raw if self.convert_header_text: cname = cname.decode(self.encoding or self.default_encoding) self.column_names_strings.append(cname) if len(self.column_names_strings) == 1: compression_literal = "" for cl in const.compression_literals: if cl in cname_raw: compression_literal = cl self.compression = compression_literal offset -= self._int_length offset1 = offset + 16 if self.U64: offset1 += 4 buf = self._read_bytes(offset1, self._lcp) compression_literal = buf.rstrip(b"\x00") if compression_literal == "": self._lcs = 0 offset1 = offset + 32 if self.U64: offset1 += 4 buf = self._read_bytes(offset1, self._lcp) self.creator_proc = buf[0:self._lcp] elif compression_literal == const.rle_compression: offset1 = offset + 40 if self.U64: offset1 += 4 buf = self._read_bytes(offset1, self._lcp) self.creator_proc = buf[0:self._lcp] elif self._lcs > 0: self._lcp = 0 offset1 = offset + 16 if self.U64: offset1 += 4 buf = self._read_bytes(offset1, self._lcs) self.creator_proc = buf[0:self._lcp] if self.convert_header_text: if hasattr(self, "creator_proc"): self.creator_proc = self.creator_proc.decode( self.encoding or self.default_encoding) def _process_columnname_subheader(self, offset, length): int_len = self._int_length offset += int_len column_name_pointers_count = (length - 2 * int_len - 12) // 8 for i in range(column_name_pointers_count): text_subheader = offset + const.column_name_pointer_length * \ (i + 1) + const.column_name_text_subheader_offset col_name_offset = offset + const.column_name_pointer_length * \ (i + 1) + const.column_name_offset_offset col_name_length = offset + const.column_name_pointer_length * \ (i + 1) + const.column_name_length_offset idx = self._read_int( text_subheader, const.column_name_text_subheader_length) col_offset = self._read_int( col_name_offset, const.column_name_offset_length) col_len = self._read_int( col_name_length, const.column_name_length_length) name_str = self.column_names_strings[idx] self.column_names.append(name_str[col_offset:col_offset + col_len]) def _process_columnattributes_subheader(self, offset, length): int_len = self._int_length column_attributes_vectors_count = ( length - 2 * int_len - 12) // (int_len + 8) self.column_types = np.empty( column_attributes_vectors_count, dtype=np.dtype('S1')) self._column_data_lengths = np.empty( column_attributes_vectors_count, dtype=np.int64) self._column_data_offsets = np.empty( column_attributes_vectors_count, dtype=np.int64) for i in range(column_attributes_vectors_count): col_data_offset = (offset + int_len + const.column_data_offset_offset + i * (int_len + 8)) col_data_len = (offset + 2 * int_len + const.column_data_length_offset + i * (int_len + 8)) col_types = (offset + 2 * int_len + const.column_type_offset + i * (int_len + 8)) x = self._read_int(col_data_offset, int_len) self._column_data_offsets[i] = x x = self._read_int(col_data_len, const.column_data_length_length) self._column_data_lengths[i] = x x = self._read_int(col_types, const.column_type_length) if x == 1: self.column_types[i] = b'd' else: self.column_types[i] = b's' def _process_columnlist_subheader(self, offset, length): # unknown purpose pass def _process_format_subheader(self, offset, length): int_len = self._int_length text_subheader_format = ( offset + const.column_format_text_subheader_index_offset + 3 * int_len) col_format_offset = (offset + const.column_format_offset_offset + 3 * int_len) col_format_len = (offset + const.column_format_length_offset + 3 * int_len) text_subheader_label = ( offset + const.column_label_text_subheader_index_offset + 3 * int_len) col_label_offset = (offset + const.column_label_offset_offset + 3 * int_len) col_label_len = offset + const.column_label_length_offset + 3 * int_len x = self._read_int(text_subheader_format, const.column_format_text_subheader_index_length) format_idx = min(x, len(self.column_names_strings) - 1) format_start = self._read_int( col_format_offset, const.column_format_offset_length) format_len = self._read_int( col_format_len, const.column_format_length_length) label_idx = self._read_int( text_subheader_label, const.column_label_text_subheader_index_length) label_idx = min(label_idx, len(self.column_names_strings) - 1) label_start = self._read_int( col_label_offset, const.column_label_offset_length) label_len = self._read_int(col_label_len, const.column_label_length_length) label_names = self.column_names_strings[label_idx] column_label = label_names[label_start: label_start + label_len] format_names = self.column_names_strings[format_idx] column_format = format_names[format_start: format_start + format_len] current_column_number = len(self.columns) col = _column() col.col_id = current_column_number col.name = self.column_names[current_column_number] col.label = column_label col.format = column_format col.ctype = self.column_types[current_column_number] col.length = self._column_data_lengths[current_column_number] self.column_formats.append(column_format) self.columns.append(col) def read(self, nrows=None): if (nrows is None) and (self.chunksize is not None): nrows = self.chunksize elif nrows is None: nrows = self.row_count if len(self.column_types) == 0: self.close() raise EmptyDataError("No columns to parse from file") if self._current_row_in_file_index >= self.row_count: return None m = self.row_count - self._current_row_in_file_index if nrows > m: nrows = m nd = (self.column_types == b'd').sum() ns = (self.column_types == b's').sum() self._string_chunk = np.empty((ns, nrows), dtype=np.object) self._byte_chunk = np.empty((nd, 8 * nrows), dtype=np.uint8) self._current_row_in_chunk_index = 0 p = Parser(self) p.read(nrows) rslt = self._chunk_to_dataframe() if self.index is not None: rslt = rslt.set_index(self.index) return rslt def _read_next_page(self): self._current_page_data_subheader_pointers = [] self._cached_page = self._path_or_buf.read(self._page_length) if len(self._cached_page) <= 0: return True elif len(self._cached_page) != self._page_length: self.close() msg = ("failed to read complete page from file " "(read {:d} of {:d} bytes)") raise ValueError(msg.format(len(self._cached_page), self._page_length)) self._read_page_header() if self._current_page_type == const.page_meta_type: self._process_page_metadata() pt = [const.page_meta_type, const.page_data_type] pt += [const.page_mix_types] if self._current_page_type not in pt: return self._read_next_page() return False def _chunk_to_dataframe(self): n = self._current_row_in_chunk_index m = self._current_row_in_file_index ix = range(m - n, m) rslt = pd.DataFrame(index=ix) js, jb = 0, 0 for j in range(self.column_count): name = self.column_names[j] if self.column_types[j] == b'd': rslt[name] = self._byte_chunk[jb, :].view( dtype=self.byte_order + 'd') rslt[name] = np.asarray(rslt[name], dtype=np.float64) if self.convert_dates: unit = None if self.column_formats[j] in const.sas_date_formats: unit = 'd' elif self.column_formats[j] in const.sas_datetime_formats: unit = 's' if unit: rslt[name] = pd.to_datetime(rslt[name], unit=unit, origin="1960-01-01") jb += 1 elif self.column_types[j] == b's': rslt[name] = self._string_chunk[js, :] if self.convert_text and (self.encoding is not None): rslt[name] = rslt[name].str.decode( self.encoding or self.default_encoding) if self.blank_missing: ii = rslt[name].str.len() == 0 rslt.loc[ii, name] = np.nan js += 1 else: self.close() raise ValueError("unknown column type %s" % self.column_types[j]) return rslt
{ "content_hash": "6f83c415eca6744361b221705d9153db", "timestamp": "", "source": "github", "line_count": 687, "max_line_length": 103, "avg_line_length": 39.985443959243085, "alnum_prop": 0.5664361121223153, "repo_name": "louispotok/pandas", "id": "4d187a828285981ecd5d338cda903156c25d4c1f", "size": "27470", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "pandas/io/sas/sas7bdat.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Batchfile", "bytes": "3847" }, { "name": "C", "bytes": "432930" }, { "name": "C++", "bytes": "17193" }, { "name": "HTML", "bytes": "551714" }, { "name": "Makefile", "bytes": "563" }, { "name": "PowerShell", "bytes": "2970" }, { "name": "Python", "bytes": "13452425" }, { "name": "Shell", "bytes": "25056" }, { "name": "Smarty", "bytes": "2045" } ], "symlink_target": "" }
from urllib.parse import urljoin from django.conf import settings from django.template import TemplateSyntaxError from django.templatetags.static import StaticNode from django.test import SimpleTestCase, override_settings from ..utils import setup @override_settings(INSTALLED_APPS=[], MEDIA_URL='media/', STATIC_URL='static/') class StaticTagTests(SimpleTestCase): libraries = {'static': 'django.templatetags.static'} @setup({'static-prefixtag01': '{% load static %}{% get_static_prefix %}'}) def test_static_prefixtag01(self): output = self.engine.render_to_string('static-prefixtag01') self.assertEqual(output, settings.STATIC_URL) @setup({'static-prefixtag02': '{% load static %}' '{% get_static_prefix as static_prefix %}{{ static_prefix }}'}) def test_static_prefixtag02(self): output = self.engine.render_to_string('static-prefixtag02') self.assertEqual(output, settings.STATIC_URL) @setup({'static-prefixtag03': '{% load static %}{% get_media_prefix %}'}) def test_static_prefixtag03(self): output = self.engine.render_to_string('static-prefixtag03') self.assertEqual(output, settings.MEDIA_URL) @setup({'static-prefixtag04': '{% load static %}' '{% get_media_prefix as media_prefix %}{{ media_prefix }}'}) def test_static_prefixtag04(self): output = self.engine.render_to_string('static-prefixtag04') self.assertEqual(output, settings.MEDIA_URL) @setup({'t': '{% load static %}{% get_media_prefix ad media_prefix %}{{ media_prefix }}'}) def test_static_prefixtag_without_as(self): msg = "First argument in 'get_media_prefix' must be 'as'" with self.assertRaisesMessage(TemplateSyntaxError, msg): self.engine.render_to_string('t') @setup({'static-statictag01': '{% load static %}{% static "admin/base.css" %}'}) def test_static_statictag01(self): output = self.engine.render_to_string('static-statictag01') self.assertEqual(output, urljoin(settings.STATIC_URL, 'admin/base.css')) @setup({'static-statictag02': '{% load static %}{% static base_css %}'}) def test_static_statictag02(self): output = self.engine.render_to_string('static-statictag02', {'base_css': 'admin/base.css'}) self.assertEqual(output, urljoin(settings.STATIC_URL, 'admin/base.css')) @setup({'static-statictag03': '{% load static %}{% static "admin/base.css" as foo %}{{ foo }}'}) def test_static_statictag03(self): output = self.engine.render_to_string('static-statictag03') self.assertEqual(output, urljoin(settings.STATIC_URL, 'admin/base.css')) @setup({'static-statictag04': '{% load static %}{% static base_css as foo %}{{ foo }}'}) def test_static_statictag04(self): output = self.engine.render_to_string('static-statictag04', {'base_css': 'admin/base.css'}) self.assertEqual(output, urljoin(settings.STATIC_URL, 'admin/base.css')) @setup({'static-statictag05': '{% load static %}{% static "special?chars&quoted.html" %}'}) def test_static_quotes_urls(self): output = self.engine.render_to_string('static-statictag05') self.assertEqual(output, urljoin(settings.STATIC_URL, '/static/special%3Fchars%26quoted.html')) @setup({'t': '{% load static %}{% static %}'}) def test_static_statictag_without_path(self): msg = "'static' takes at least one argument (path to file)" with self.assertRaisesMessage(TemplateSyntaxError, msg): self.engine.render_to_string('t') class StaticNodeTests(SimpleTestCase): def test_repr(self): static_node = StaticNode(varname='named-var', path='named-path') self.assertEqual( repr(static_node), "StaticNode(varname='named-var', path='named-path')", ) static_node = StaticNode(path='named-path') self.assertEqual( repr(static_node), "StaticNode(varname=None, path='named-path')", )
{ "content_hash": "c0b6edf6d8c3d9d58ca96e7737e5c81d", "timestamp": "", "source": "github", "line_count": 86, "max_line_length": 103, "avg_line_length": 47.2906976744186, "alnum_prop": 0.6513400540939267, "repo_name": "atul-bhouraskar/django", "id": "6f27d555bf6532f6e053671d93a9a0063d4fd2a7", "size": "4067", "binary": false, "copies": "6", "ref": "refs/heads/ticket_23424", "path": "tests/template_tests/syntax_tests/test_static.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "52098" }, { "name": "HTML", "bytes": "174031" }, { "name": "JavaScript", "bytes": "249623" }, { "name": "Makefile", "bytes": "125" }, { "name": "Python", "bytes": "11309010" }, { "name": "Shell", "bytes": "809" }, { "name": "Smarty", "bytes": "130" } ], "symlink_target": "" }
import ddt from openstackinabox.tests.base import TestBase, DbFailure from openstackinabox.models.keystone import exceptions from openstackinabox.models.keystone.db.tenants import KeystoneDbTenants @ddt.ddt class TestKeystoneDbTenants(TestBase): def setUp(self): super(TestKeystoneDbTenants, self).setUp() self.model = KeystoneDbTenants self.master = 'Mercury' self.db = self.get_testing_database() def tearDown(self): super(TestKeystoneDbTenants, self).tearDown() def test_initialization(self): instance = self.model( self.master, self.db ) self.assertEqual(self.master, instance.master) self.assertEqual(self.db, instance.database) self.assertIsNone(instance.admin_tenant_id) instance.initialize() self.assertIsNotNone(instance.admin_tenant_id) @ddt.data( 0, 1 ) def test_add_failure(self, row_count): instance = self.model( self.master, DbFailure(rowcount=row_count), ) with self.assertRaises(exceptions.KeystoneTenantError): instance.add( tenant_name='Saturn', description='Planetary Bodice', enabled=True ) def test_add_and_get(self): instance = self.model( self.master, self.db ) instance.initialize() tenant_info = { 'name': 'Pluto', 'description': 'False Planet', 'enabled': True } tenant_id = instance.add( tenant_name=tenant_info['name'], description=tenant_info['description'], enabled=tenant_info['enabled'] ) tenant_data = instance.get_by_id(tenant_id) self.assertEqual(tenant_id, tenant_data['id']) self.assertEqual(tenant_info['name'], tenant_data['name']) self.assertEqual( tenant_info['description'], tenant_data['description'] ) self.assertEqual(tenant_info['enabled'], tenant_data['enabled']) def test_get_by_id_failure(self): instance = self.model( self.master, self.db ) instance.initialize() with self.assertRaises(exceptions.KeystoneTenantError): instance.get_by_id(192) @ddt.data( 0, 1, 50 ) def test_get_by_id(self, tenant_count): instance = self.model( self.master, self.db ) instance.initialize() tenants = [ { 'name': 'Plato{0}'.format(x), 'description': 'tenant {0}'.format(x), 'enabled': True } for x in range(tenant_count) ] tenant_ids = [ instance.add( tenant_name=tenant['name'], description=tenant['description'], enabled=tenant['enabled'] ) for tenant in tenants ] stored_tenants = instance.get() # -1 since the 'admin' tenant is part of the base model self.assertEqual(len(stored_tenants) - 1, len(tenant_ids)) for stored_tenant in stored_tenants: if stored_tenant['id'] != 1: self.assertIn(stored_tenant['id'], tenant_ids) found = False for tenant in tenants: if tenant['name'] == stored_tenant['name']: self.assertEqual( tenant['description'], stored_tenant['description'] ) self.assertEqual( tenant['enabled'], stored_tenant['enabled'] ) found = True self.assertTrue(found) @ddt.data( True, False ) def test_update_description(self, is_valid): instance = self.model( self.master, self.db ) instance.initialize() tenant_info = { 'name': 'Pluto', 'description': 'False Planet', 'enabled': True } tenant_id = instance.add( tenant_name=tenant_info['name'], description=tenant_info['description'], enabled=tenant_info['enabled'] ) if is_valid else 0 if is_valid: old_tenant_info = instance.get_by_id( tenant_id ) self.assertEqual( old_tenant_info['description'], tenant_info['description'] ) new_description = "wisdom ignores fools" instance.update_description( tenant_id=tenant_id, description=new_description ) updated_tenant_info = instance.get_by_id( tenant_id ) self.assertEqual( updated_tenant_info['description'], new_description ) else: with self.assertRaises(exceptions.KeystoneTenantError): instance.update_description( tenant_id=tenant_id, description="foolishness abides fools" ) @ddt.data( True, False ) def test_update_status(self, is_valid): instance = self.model( self.master, self.db ) instance.initialize() tenant_info = { 'name': 'Pluto', 'description': 'False Planet', 'enabled': True } tenant_id = instance.add( tenant_name=tenant_info['name'], description=tenant_info['description'], enabled=tenant_info['enabled'] ) if is_valid else 0 if is_valid: old_tenant_info = instance.get_by_id( tenant_id ) self.assertTrue( old_tenant_info['enabled'] ) instance.update_status( tenant_id=tenant_id, enabled=False ) updated_tenant_info = instance.get_by_id( tenant_id ) self.assertFalse( updated_tenant_info['enabled'] ) else: with self.assertRaises(exceptions.KeystoneTenantError): instance.update_status( tenant_id=tenant_id, enabled=False )
{ "content_hash": "cf385b86c0dc20b02535cb7033446b84", "timestamp": "", "source": "github", "line_count": 236, "max_line_length": 72, "avg_line_length": 28.309322033898304, "alnum_prop": 0.4964825624906451, "repo_name": "TestInABox/openstackinabox", "id": "93254bbb0379c69ffd8f1f8d8229e4c4eba42ef3", "size": "6681", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "openstackinabox/tests/models/keystone/test_tenants.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "427663" } ], "symlink_target": "" }
import socket import pickle import zmq import sys import argparse,imp parser = argparse.ArgumentParser() parser.add_argument("--config_path", help="config.py file path") args = parser.parse_args() config=imp.load_module("config",*imp.find_module('config',[args.config_path])) use_fdm=False if use_fdm: sys.path.append('..') from common import fdm import asyncio context = zmq.Context() position_struct={} fdm_socks=[] socket_pub = context.socket(zmq.PUB) socket_pub.bind("tcp://*:%d" % (config.zmq_pub_drone_fdm[1])) fd=open('pos.txt','w') def init(): drone_num=0 udp = socket.socket(socket.AF_INET, # Internet socket.SOCK_DGRAM) # UDP if use_fdm: port=5503+drone_num*10 else: port=19988+drone_num*10 udp.bind(('127.0.0.1', port)) fdm_socks.append(udp) return fdm_socks def reader(sock,ind): global position_struct data=sock.recv(1024) if use_fdm: cfdm=fdm.fdm_from_buf(data) if not ind in position_struct: position_struct[ind]={} ps=position_struct[ind] ps['lon']=cfdm[0].longitude ps['lat']=cfdm[0].latitude ps['alt']=cfdm[0].altitude #above sea level ps['roll']=cfdm[0].phi ps['pitch']=cfdm[0].theta ps['yaw']=cfdm[0].psi ps['type']='fdm' if ind==0: print('%.10f,%.10f,%.7f'%(ps['lon'],ps['lat'], ps['alt']),file=fd) fd.flush() else: u=list(map(float,data.split())) position_struct={'posx':u[0],'posy':u[1],'posz':-u[2],'roll':u[3],'pitch':u[4],'yaw':u[5],'type':'udp_patch'} #print(data) @asyncio.coroutine def printer(): while 1: print('-->',position_struct) yield from asyncio.sleep(1) @asyncio.coroutine def pub_position_struct(): while 1: socket_pub.send_multipart([config.topic_sitl_position_report,pickle.dumps(position_struct,-1)]) yield from asyncio.sleep(1/30.0) #30Hz if __name__=="__main__": init() loop = asyncio.get_event_loop() for ind,sock in enumerate(fdm_socks): loop.add_reader(sock,reader,sock,ind) tasks=[printer(),pub_position_struct()] loop.run_until_complete(asyncio.wait(tasks)) loop.close()
{ "content_hash": "b2c546922ac2b732ec0fa478a24a4229", "timestamp": "", "source": "github", "line_count": 87, "max_line_length": 118, "avg_line_length": 25.977011494252874, "alnum_prop": 0.6, "repo_name": "orig74/DroneSimLab", "id": "6250712b2194cd1deccaa75ac18b0250c645189b", "size": "2314", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "scripts/fdm_pub_ardupilot.py", "mode": "33188", "license": "mit", "language": [ { "name": "CMake", "bytes": "6141" }, { "name": "Dockerfile", "bytes": "15473" }, { "name": "Python", "bytes": "15489" }, { "name": "Shell", "bytes": "11172" }, { "name": "Vim script", "bytes": "26" } ], "symlink_target": "" }
import functools import operator import unittest from itertools import count import six from scrapy.utils.python import ( memoizemethod_noargs, isbinarytext, equal_attributes, WeakKeyCache, stringify_dict, get_func_args, to_bytes, to_unicode, without_none_values) __doctests__ = ['scrapy.utils.python'] class ToUnicodeTest(unittest.TestCase): def test_converting_an_utf8_encoded_string_to_unicode(self): self.assertEqual(to_unicode(b'lel\xc3\xb1e'), u'lel\xf1e') def test_converting_a_latin_1_encoded_string_to_unicode(self): self.assertEqual(to_unicode(b'lel\xf1e', 'latin-1'), u'lel\xf1e') def test_converting_a_unicode_to_unicode_should_return_the_same_object(self): self.assertEqual(to_unicode(u'\xf1e\xf1e\xf1e'), u'\xf1e\xf1e\xf1e') def test_converting_a_strange_object_should_raise_TypeError(self): self.assertRaises(TypeError, to_unicode, 423) def test_errors_argument(self): self.assertEqual( to_unicode(b'a\xedb', 'utf-8', errors='replace'), u'a\ufffdb' ) class ToBytesTest(unittest.TestCase): def test_converting_a_unicode_object_to_an_utf_8_encoded_string(self): self.assertEqual(to_bytes(u'\xa3 49'), b'\xc2\xa3 49') def test_converting_a_unicode_object_to_a_latin_1_encoded_string(self): self.assertEqual(to_bytes(u'\xa3 49', 'latin-1'), b'\xa3 49') def test_converting_a_regular_bytes_to_bytes_should_return_the_same_object(self): self.assertEqual(to_bytes(b'lel\xf1e'), b'lel\xf1e') def test_converting_a_strange_object_should_raise_TypeError(self): self.assertRaises(TypeError, to_bytes, unittest) def test_errors_argument(self): self.assertEqual( to_bytes(u'a\ufffdb', 'latin-1', errors='replace'), b'a?b' ) class MemoizedMethodTest(unittest.TestCase): def test_memoizemethod_noargs(self): class A(object): @memoizemethod_noargs def cached(self): return object() def noncached(self): return object() a = A() one = a.cached() two = a.cached() three = a.noncached() assert one is two assert one is not three class IsBinaryTextTest(unittest.TestCase): def test_isbinarytext(self): assert not isbinarytext(b"hello") def test_utf_16_strings_contain_null_bytes(self): assert not isbinarytext(u"hello".encode('utf-16')) def test_one_with_encoding(self): assert not isbinarytext(b"<div>Price \xa3</div>") def test_real_binary_bytes(self): assert isbinarytext(b"\x02\xa3") class UtilsPythonTestCase(unittest.TestCase): def test_equal_attributes(self): class Obj: pass a = Obj() b = Obj() # no attributes given return False self.failIf(equal_attributes(a, b, [])) # not existent attributes self.failIf(equal_attributes(a, b, ['x', 'y'])) a.x = 1 b.x = 1 # equal attribute self.assertTrue(equal_attributes(a, b, ['x'])) b.y = 2 # obj1 has no attribute y self.failIf(equal_attributes(a, b, ['x', 'y'])) a.y = 2 # equal attributes self.assertTrue(equal_attributes(a, b, ['x', 'y'])) a.y = 1 # differente attributes self.failIf(equal_attributes(a, b, ['x', 'y'])) # test callable a.meta = {} b.meta = {} self.assertTrue(equal_attributes(a, b, ['meta'])) # compare ['meta']['a'] a.meta['z'] = 1 b.meta['z'] = 1 get_z = operator.itemgetter('z') get_meta = operator.attrgetter('meta') compare_z = lambda obj: get_z(get_meta(obj)) self.assertTrue(equal_attributes(a, b, [compare_z, 'x'])) # fail z equality a.meta['z'] = 2 self.failIf(equal_attributes(a, b, [compare_z, 'x'])) def test_weakkeycache(self): class _Weakme(object): pass _values = count() wk = WeakKeyCache(lambda k: next(_values)) k = _Weakme() v = wk[k] self.assertEqual(v, wk[k]) self.assertNotEqual(v, wk[_Weakme()]) self.assertEqual(v, wk[k]) del k self.assertFalse(len(wk._weakdict)) @unittest.skipUnless(six.PY2, "deprecated function") def test_stringify_dict(self): d = {'a': 123, u'b': b'c', u'd': u'e', object(): u'e'} d2 = stringify_dict(d, keys_only=False) self.assertEqual(d, d2) self.failIf(d is d2) # shouldn't modify in place self.failIf(any(isinstance(x, six.text_type) for x in d2.keys())) self.failIf(any(isinstance(x, six.text_type) for x in d2.values())) @unittest.skipUnless(six.PY2, "deprecated function") def test_stringify_dict_tuples(self): tuples = [('a', 123), (u'b', 'c'), (u'd', u'e'), (object(), u'e')] d = dict(tuples) d2 = stringify_dict(tuples, keys_only=False) self.assertEqual(d, d2) self.failIf(d is d2) # shouldn't modify in place self.failIf(any(isinstance(x, six.text_type) for x in d2.keys()), d2.keys()) self.failIf(any(isinstance(x, six.text_type) for x in d2.values())) @unittest.skipUnless(six.PY2, "deprecated function") def test_stringify_dict_keys_only(self): d = {'a': 123, u'b': 'c', u'd': u'e', object(): u'e'} d2 = stringify_dict(d) self.assertEqual(d, d2) self.failIf(d is d2) # shouldn't modify in place self.failIf(any(isinstance(x, six.text_type) for x in d2.keys())) def test_get_func_args(self): def f1(a, b, c): pass def f2(a, b=None, c=None): pass class A(object): def __init__(self, a, b, c): pass def method(self, a, b, c): pass class Callable(object): def __call__(self, a, b, c): pass a = A(1, 2, 3) cal = Callable() partial_f1 = functools.partial(f1, None) partial_f2 = functools.partial(f1, b=None) partial_f3 = functools.partial(partial_f2, None) self.assertEqual(get_func_args(f1), ['a', 'b', 'c']) self.assertEqual(get_func_args(f2), ['a', 'b', 'c']) self.assertEqual(get_func_args(A), ['a', 'b', 'c']) self.assertEqual(get_func_args(a.method), ['a', 'b', 'c']) self.assertEqual(get_func_args(partial_f1), ['b', 'c']) self.assertEqual(get_func_args(partial_f2), ['a', 'c']) self.assertEqual(get_func_args(partial_f3), ['c']) self.assertEqual(get_func_args(cal), ['a', 'b', 'c']) self.assertEqual(get_func_args(object), []) # TODO: how do we fix this to return the actual argument names? self.assertEqual(get_func_args(six.text_type.split), []) self.assertEqual(get_func_args(" ".join), []) self.assertEqual(get_func_args(operator.itemgetter(2)), []) def test_without_none_values(self): self.assertEqual(without_none_values([1, None, 3, 4]), [1, 3, 4]) self.assertEqual(without_none_values((1, None, 3, 4)), (1, 3, 4)) self.assertEqual( without_none_values({'one': 1, 'none': None, 'three': 3, 'four': 4}), {'one': 1, 'three': 3, 'four': 4}) if __name__ == "__main__": unittest.main()
{ "content_hash": "a19cd06a3fbf479471a8f5d8f5c30b25", "timestamp": "", "source": "github", "line_count": 224, "max_line_length": 85, "avg_line_length": 33.15625, "alnum_prop": 0.5796418473138548, "repo_name": "jdemaeyer/scrapy", "id": "4f08349020f9661b2dbbf7ec910a8be675d29cfd", "size": "7427", "binary": false, "copies": "8", "ref": "refs/heads/master", "path": "tests/test_utils_python.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Groff", "bytes": "2008" }, { "name": "HTML", "bytes": "1809" }, { "name": "Python", "bytes": "1322042" }, { "name": "Shell", "bytes": "258" } ], "symlink_target": "" }
"""Delay space spectrum estimation and filtering.""" import typing import numpy as np from numpy.lib.recfunctions import structured_to_unstructured import scipy.linalg as la from caput import mpiarray, config from cora.util import units from ..core import containers, task, io from ..util import random class DelayFilter(task.SingleTask): """Remove delays less than a given threshold. This is performed by projecting the data onto the null space that is orthogonal to any mode at low delays. Note that for this task to work best the zero entries in the weights dataset should factorize in frequency-time for each baseline. A mostly optimal masking can be generated using the `draco.analysis.flagging.MaskFreq` task. Attributes ---------- delay_cut : float Delay value to filter at in seconds. za_cut : float Sine of the maximum zenith angle included in baseline-dependent delay filtering. Default is 1 which corresponds to the horizon (ie: filters out all zenith angles). Setting to zero turns off baseline dependent cut. extra_cut : float Increase the delay threshold beyond the baseline dependent term. weight_tol : float Maximum weight kept in the masked data, as a fraction of the largest weight in the original dataset. telescope_orientation : one of ('NS', 'EW', 'none') Determines if the baseline-dependent delay cut is based on the north-south component, the east-west component or the full baseline length. For cylindrical telescopes oriented in the NS direction (like CHIME) use 'NS'. The default is 'NS'. window : bool Apply the window function to the data when applying the filter. Notes ----- The delay cut applied is `max(za_cut * baseline / c + extra_cut, delay_cut)`. """ delay_cut = config.Property(proptype=float, default=0.1) za_cut = config.Property(proptype=float, default=1.0) extra_cut = config.Property(proptype=float, default=0.0) weight_tol = config.Property(proptype=float, default=1e-4) telescope_orientation = config.enum(["NS", "EW", "none"], default="NS") window = config.Property(proptype=bool, default=False) def setup(self, telescope): """Set the telescope needed to obtain baselines. Parameters ---------- telescope : TransitTelescope """ self.telescope = io.get_telescope(telescope) def process(self, ss): """Filter out delays from a SiderealStream or TimeStream. Parameters ---------- ss : containers.SiderealStream Data to filter. Returns ------- ss_filt : containers.SiderealStream Filtered dataset. """ tel = self.telescope ss.redistribute(["input", "prod", "stack"]) freq = ss.freq[:] bandwidth = np.ptp(freq) ssv = ss.vis[:].view(np.ndarray) ssw = ss.weight[:].view(np.ndarray) ia, ib = structured_to_unstructured(ss.prodstack, dtype=np.int16).T baselines = tel.feedpositions[ia] - tel.feedpositions[ib] for lbi, bi in ss.vis[:].enumerate(axis=1): # Select the baseline length to use baseline = baselines[bi] if self.telescope_orientation == "NS": baseline = abs(baseline[1]) # Y baseline elif self.telescope_orientation == "EW": baseline = abs(baseline[0]) # X baseline else: baseline = np.linalg.norm(baseline) # Norm # In micro seconds baseline_delay_cut = self.za_cut * baseline / units.c * 1e6 + self.extra_cut delay_cut = np.amax([baseline_delay_cut, self.delay_cut]) # Calculate the number of samples needed to construct the delay null space. # `4 * tau_max * bandwidth` is the amount recommended in the DAYENU paper # and seems to work well here number_cut = int(4.0 * bandwidth * delay_cut + 0.5) # Flag frequencies and times with zero weight. This works much better if the # incoming weight can be factorized f_samp = (ssw[:, lbi] > 0.0).sum(axis=1) f_mask = (f_samp == f_samp.max()).astype(np.float64) t_samp = (ssw[:, lbi] > 0.0).sum(axis=0) t_mask = (t_samp == t_samp.max()).astype(np.float64) try: NF = null_delay_filter( freq, delay_cut, f_mask, num_delay=number_cut, window=self.window, ) except la.LinAlgError as e: raise RuntimeError( f"Failed to converge while processing baseline {bi}" ) from e ssv[:, lbi] = np.dot(NF, ssv[:, lbi]) ssw[:, lbi] *= f_mask[:, np.newaxis] * t_mask[np.newaxis, :] return ss # A specific subclass of a FreqContainer FreqContainerType = typing.TypeVar("FreqContainerType", bound=containers.FreqContainer) class DelayFilterBase(task.SingleTask): """Remove delays less than a given threshold. This is performed by projecting the data onto the null space that is orthogonal to any mode at low delays. Note that for this task to work best the zero entries in the weights dataset should factorize in frequency-time for each baseline. A mostly optimal masking can be generated using the `draco.analysis.flagging.MaskFreq` task. Attributes ---------- delay_cut : float Delay value to filter at in seconds. window : bool Apply the window function to the data when applying the filter. axis : str The main axis to iterate over. The delay cut can be varied for each element of this axis. If not set, a suitable default is picked for the container type. dataset : str Apply the delay filter to this dataset. If not set, a suitable default is picked for the container type. Notes ----- The delay cut applied is `max(za_cut * baseline / c + extra_cut, delay_cut)`. """ delay_cut = config.Property(proptype=float, default=0.1) window = config.Property(proptype=bool, default=False) axis = config.Property(proptype=str, default=None) dataset = config.Property(proptype=str, default=None) def setup(self, telescope: io.TelescopeConvertible): """Set the telescope needed to obtain baselines. Parameters ---------- telescope """ self.telescope = io.get_telescope(telescope) def _delay_cut(self, ss: FreqContainerType, axis: str, ind: int) -> float: """Return the delay cut to use for this entry in microseconds. Parameters ---------- ss The container we are processing. axis The axis we are looping over. ind : int The (global) index along that axis. Returns ------- float The delay cut in microseconds. """ return self.delay_cut def process(self, ss: FreqContainerType) -> FreqContainerType: """Filter out delays from a SiderealStream or TimeStream. Parameters ---------- ss Data to filter. Returns ------- ss_filt Filtered dataset. """ if not isinstance(ss, containers.FreqContainer): raise TypeError( f"Can only process FreqContainer instances. Got {type(ss)}." ) _default_axis = { containers.SiderealStream: "stack", containers.HybridVisMModes: "m", containers.RingMap: "el", containers.GridBeam: "theta", } _default_dataset = { containers.SiderealStream: "vis", containers.HybridVisMModes: "vis", containers.RingMap: "map", containers.GridBeam: "beam", } axis = self.axis if self.axis is None: for cls, ax in _default_axis.items(): if isinstance(ss, cls): axis = ax break else: raise ValueError(f"No default axis know for {type(ss)} container.") dset = self.dataset if self.dataset is None: for cls, dataset in _default_dataset.items(): if isinstance(ss, cls): dset = dataset break else: raise ValueError(f"No default dataset know for {type(ss)} container.") ss.redistribute(axis) freq = ss.freq[:] bandwidth = np.ptp(freq) # Get views of the relevant datasets, but make sure that the weights have the # same number of axes as the visibilities (inserting length-1 axes as needed) ssv = ss.datasets[dset][:].view(np.ndarray) ssw = match_axes(ss.datasets[dset], ss.weight).view(np.ndarray) dist_axis_pos = list(ss.datasets[dset].attrs["axis"]).index(axis) freq_axis_pos = list(ss.datasets[dset].attrs["axis"]).index("freq") # Once we have selected elements of dist_axis the location of freq_axis_pos may # be one lower sel_freq_axis_pos = ( freq_axis_pos if freq_axis_pos < dist_axis_pos else freq_axis_pos - 1 ) for lbi, bi in ss.datasets[dset][:].enumerate(axis=dist_axis_pos): # Extract the part of the array that we are processing, and # transpose/reshape to make a 2D array with frequency as axis=0 vis_local = _take_view(ssv, lbi, dist_axis_pos) vis_2D = _move_front(vis_local, sel_freq_axis_pos, vis_local.shape) weight_local = _take_view(ssw, lbi, dist_axis_pos) weight_2D = _move_front(weight_local, sel_freq_axis_pos, weight_local.shape) # In micro seconds delay_cut = self._delay_cut(ss, axis, bi) # Calculate the number of samples needed to construct the delay null space. # `4 * tau_max * bandwidth` is the amount recommended in the DAYENU paper # and seems to work well here number_cut = int(4.0 * bandwidth * delay_cut + 0.5) # Flag frequencies and times (or all other axes) with zero weight. This # works much better if the incoming weight can be factorized f_samp = (weight_2D > 0.0).sum(axis=1) f_mask = (f_samp == f_samp.max()).astype(np.float64) t_samp = (weight_2D > 0.0).sum(axis=0) t_mask = (t_samp == t_samp.max()).astype(np.float64) # This has occasionally failed to converge, catch this and output enough # info to debug after the fact try: NF = null_delay_filter( freq, delay_cut, f_mask, num_delay=number_cut, window=self.window, ) except la.LinAlgError as e: raise RuntimeError( f"Failed to converge while processing baseline {bi}" ) from e vis_local[:] = _inv_move_front( np.dot(NF, vis_2D), sel_freq_axis_pos, vis_local.shape ) weight_local[:] *= _inv_move_front( f_mask[:, np.newaxis] * t_mask[np.newaxis, :], sel_freq_axis_pos, weight_local.shape, ) return ss class DelaySpectrumEstimator(task.SingleTask, random.RandomTask): """Calculate the delay spectrum of a Sidereal/TimeStream for instrumental Stokes I. The spectrum is calculated by Gibbs sampling. However, at the moment only the final sample is used to calculate the spectrum. Attributes ---------- nsamp : int, optional The number of Gibbs samples to draw. freq_zero : float, optional The physical frequency (in MHz) of the *zero* channel. That is the DC channel coming out of the F-engine. If not specified, use the first frequency channel of the stream. freq_spacing : float, optional The spacing between the underlying channels (in MHz). This is conjugate to the length of a frame of time samples that is transformed. If not set, then use the smallest gap found between channels in the dataset. nfreq : int, optional The number of frequency channels in the full set produced by the F-engine. If not set, assume the last included frequency is the last of the full set (or is the penultimate if `skip_nyquist` is set). skip_nyquist : bool, optional Whether the Nyquist frequency is included in the data. This is `True` by default to align with the output of CASPER PFBs. apply_window : bool, optional Whether to apply apodisation to frequency axis. Default: True. window : one of {'nuttall', 'blackman_nuttall', 'blackman_harris'}, optional Apodisation to perform on frequency axis. Default: 'nuttall'. complex_timedomain : bool, optional Whether to assume the original time samples that were channelized into a frequency spectrum were purely real (False) or complex (True). If True, `freq_zero`, `nfreq`, and `skip_nyquist` are ignored. Default: False. initial_amplitude : float, optional The Gibbs sampler will be initialized with a flat power spectrum with this amplitude. Default: 10. """ nsamp = config.Property(proptype=int, default=20) freq_zero = config.Property(proptype=float, default=None) freq_spacing = config.Property(proptype=float, default=None) nfreq = config.Property(proptype=int, default=None) skip_nyquist = config.Property(proptype=bool, default=True) apply_window = config.Property(proptype=bool, default=True) window = config.enum( ["nuttall", "blackman_nuttall", "blackman_harris"], default="nuttall" ) complex_timedomain = config.Property(proptype=bool, default=False) initial_amplitude = config.Property(proptype=float, default=10.0) def setup(self, telescope): """Set the telescope needed to generate Stokes I. Parameters ---------- telescope : TransitTelescope """ self.telescope = io.get_telescope(telescope) def process(self, ss): """Estimate the delay spectrum. Parameters ---------- ss : SiderealStream or TimeStream Returns ------- dspec : DelaySpectrum """ tel = self.telescope ss.redistribute("freq") # Construct the Stokes I vis vis_I, vis_weight, baselines = stokes_I(ss, tel) # ==== Figure out the frequency structure and delay values ==== if self.freq_zero is None: self.freq_zero = ss.freq[0] if self.freq_spacing is None: self.freq_spacing = np.abs(np.diff(ss.freq[:])).min() if self.complex_timedomain: self.nfreq = len(ss.freq) channel_ind = np.arange(self.nfreq) ndelay = self.nfreq else: channel_ind = ( np.abs(ss.freq[:] - self.freq_zero) / self.freq_spacing ).astype(np.int) if self.nfreq is None: self.nfreq = channel_ind[-1] + 1 if self.skip_nyquist: self.nfreq += 1 # Assume each transformed frame was an even number of samples long ndelay = 2 * (self.nfreq - 1) # Compute delays corresponding to output delay power spectrum delays = np.fft.fftshift(np.fft.fftfreq(ndelay, d=self.freq_spacing)) # in us # Initialise the spectrum container delay_spec = containers.DelaySpectrum( baseline=baselines, delay=delays, attrs_from=ss ) delay_spec.redistribute("baseline") delay_spec.spectrum[:] = 0.0 initial_S = np.ones_like(delays) * self.initial_amplitude # Initialize the random number generator we'll use rng = self.rng # Iterate over all baselines and use the Gibbs sampler to estimate the spectrum for lbi, bi in delay_spec.spectrum[:].enumerate(axis=0): self.log.debug("Delay transforming baseline %i/%i", bi, len(baselines)) # Get the local selections data = vis_I.local_array[lbi].T weight = vis_weight.local_array[lbi] # Mask out data with completely zero'd weights and generate time # averaged weights weight_cut = ( 1e-4 * weight.mean() ) # Use approx threshold to ignore small weights data = data * (weight.T > weight_cut) weight = np.mean(weight, axis=1) if (data == 0.0).all(): continue # If there are no non-zero weighted entries skip non_zero = weight > 0 if not non_zero.any(): continue # Remove any frequency channel which is entirely zero, this is just to # reduce the computational cost, it should make no difference to the result data = data[:, non_zero] weight = weight[non_zero] non_zero_channel = channel_ind[non_zero] spec = delay_spectrum_gibbs( data, ndelay, weight, initial_S, window=self.window if self.apply_window else None, fsel=non_zero_channel, niter=self.nsamp, rng=rng, complex_timedomain=self.complex_timedomain, ) # Take an average over the last half of the delay spectrum samples # (presuming that removes the burn-in) spec_av = np.median(spec[-(self.nsamp // 2) :], axis=0) delay_spec.spectrum[bi] = np.fft.fftshift(spec_av) return delay_spec class DelaySpectrumEstimatorBase(task.SingleTask, random.RandomTask): """Calculate the delay spectrum of any container with a frequency axis. The spectrum is calculated by Gibbs sampling. The spectrum returned is the median of the final half of the samples calculated. The delay spectrum output is indexed by a `baseline` axis. This axis is the composite axis of all the axes in the container except the frequency axis or the `average_axis`. These constituent axes are included in the index map, and their order is given by the `baseline_axes` attribute. Attributes ---------- nsamp : int, optional The number of Gibbs samples to draw. freq_zero : float, optional The physical frequency (in MHz) of the *zero* channel. That is the DC channel coming out of the F-engine. If not specified, use the first frequency channel of the stream. freq_spacing : float, optional The spacing between the underlying channels (in MHz). This is conjugate to the length of a frame of time samples that is transformed. If not set, then use the smallest gap found between channels in the dataset. nfreq : int, optional The number of frequency channels in the full set produced by the F-engine. If not set, assume the last included frequency is the last of the full set (or is the penultimate if `skip_nyquist` is set). skip_nyquist : bool, optional Whether the Nyquist frequency is included in the data. This is `True` by default to align with the output of CASPER PFBs. dataset : str Calculate the delay spectrum of this dataset (e.g., "vis", "map", "beam"). average_axis : str Name of the axis to take the average over. apply_window : bool, optional Whether to apply apodisation to frequency axis. Default: True. window : one of {'nuttall', 'blackman_nuttall', 'blackman_harris', optional Apodisation to perform on frequency axis. Default: 'nuttall'. complex_timedomain : bool, optional Whether to assume the original time samples that were channelized into a frequency spectrum were purely real (False) or complex (True). If True, `freq_zero`, `nfreq`, and `skip_nyquist` are ignored. Default: False. initial_amplitude : float, optional The Gibbs sampler will be initialized with a flat power spectrum with this amplitude. Default: 10. """ nsamp = config.Property(proptype=int, default=20) freq_zero = config.Property(proptype=float, default=None) freq_spacing = config.Property(proptype=float, default=None) nfreq = config.Property(proptype=int, default=None) skip_nyquist = config.Property(proptype=bool, default=True) apply_window = config.Property(proptype=bool, default=True) window = config.enum( ["nuttall", "blackman_nuttall", "blackman_harris"], default="nuttall" ) dataset = config.Property(proptype=str, default="vis") average_axis = config.Property(proptype=str) complex_timedomain = config.Property(proptype=bool, default=False) initial_amplitude = config.Property(proptype=float, default=10.0) def setup(self, telescope: io.TelescopeConvertible): """Set the telescope needed to generate Stokes I. Parameters ---------- telescope : TransitTelescope """ self.telescope = io.get_telescope(telescope) def process(self, ss: FreqContainerType) -> containers.DelaySpectrum: """Estimate the delay spectrum. Parameters ---------- ss Data to transform. Must have a frequency axis and one other axis to average over. Returns ------- dspec : DelaySpectrum """ ss.redistribute("freq") if self.dataset not in ss.datasets: raise ValueError( f"Specified dataset to delay transform ({self.dataset}) not in " f"container of type {type(ss)}." ) if ( self.average_axis not in ss.axes or self.average_axis not in ss.datasets[self.dataset].attrs["axis"] ): raise ValueError( f"Specified axis to average over ({self.average_axis}) not in " f"container of type {type(ss)}." ) # ==== Figure out the frequency structure and delay values ==== if self.freq_zero is None: self.freq_zero = ss.freq[0] if self.freq_spacing is None: self.freq_spacing = np.abs(np.diff(ss.freq[:])).min() if self.complex_timedomain: self.nfreq = len(ss.freq) channel_ind = np.arange(self.nfreq) ndelay = self.nfreq else: channel_ind = ( np.abs(ss.freq[:] - self.freq_zero) / self.freq_spacing ).astype(np.int) if self.nfreq is None: self.nfreq = channel_ind[-1] + 1 if self.skip_nyquist: self.nfreq += 1 # Assume each transformed frame was an even number of samples long ndelay = 2 * (self.nfreq - 1) # Compute delays corresponding to output delay power spectrum delays = np.fft.fftshift(np.fft.fftfreq(ndelay, d=self.freq_spacing)) # in us # Find the relevant axis positions data_axes = ss.datasets[self.dataset].attrs["axis"] freq_axis_pos = list(data_axes).index("freq") average_axis_pos = list(data_axes).index(self.average_axis) # Create a view of the dataset with the relevant axes at the back, # and all other axes compressed data_view = np.moveaxis( ss.datasets[self.dataset][:].local_array, [average_axis_pos, freq_axis_pos], [-2, -1], ) data_view = data_view.reshape(-1, data_view.shape[-2], data_view.shape[-1]) data_view = mpiarray.MPIArray.wrap(data_view, axis=2, comm=ss.comm) nbase = int(np.prod(data_view.shape[:-2])) data_view = data_view.redistribute(axis=0) # ... do the same for the weights, but we also need to make the weights full # size weight_full = np.zeros( ss.datasets[self.dataset][:].shape, dtype=ss.weight.dtype ) weight_full[:] = match_axes(ss.datasets[self.dataset], ss.weight) weight_view = np.moveaxis( weight_full, [average_axis_pos, freq_axis_pos], [-2, -1] ) weight_view = weight_view.reshape( -1, weight_view.shape[-2], weight_view.shape[-1] ) weight_view = mpiarray.MPIArray.wrap(weight_view, axis=2, comm=ss.comm) weight_view = weight_view.redistribute(axis=0) # Use the "baselines" axis to generically represent all the other axes # Initialise the spectrum container delay_spec = containers.DelaySpectrum( baseline=nbase, delay=delays, attrs_from=ss ) delay_spec.redistribute("baseline") delay_spec.spectrum[:] = 0.0 bl_axes = [da for da in data_axes if da not in [self.average_axis, "freq"]] # Copy the index maps for all the flattened axes into the output container, and # write out their order into an attribute so we can reconstruct this easily # when loading in the spectrum for ax in bl_axes: delay_spec.create_index_map(ax, ss.index_map[ax]) delay_spec.attrs["baseline_axes"] = bl_axes initial_S = np.ones_like(delays) * self.initial_amplitude # Initialize the random number generator we'll use rng = self.rng # Iterate over all baselines and use the Gibbs sampler to estimate the spectrum for lbi, bi in delay_spec.spectrum[:].enumerate(axis=0): self.log.debug(f"Delay transforming baseline {bi}/{nbase}") # Get the local selections data = data_view.local_array[lbi] weight = weight_view.local_array[lbi] # Mask out data with completely zero'd weights and generate time # averaged weights weight_cut = ( 1e-4 * weight.mean() ) # Use approx threshold to ignore small weights data = data * (weight > weight_cut) weight = np.mean(weight, axis=0) if (data == 0.0).all(): continue # If there are no non-zero weighted entries skip non_zero = weight > 0 if not non_zero.any(): continue # Remove any frequency channel which is entirely zero, this is just to # reduce the computational cost, it should make no difference to the result data = data[:, non_zero] weight = weight[non_zero] non_zero_channel = channel_ind[non_zero] spec = delay_spectrum_gibbs( data, ndelay, weight, initial_S, window=self.window if self.apply_window else None, fsel=non_zero_channel, niter=self.nsamp, rng=rng, complex_timedomain=self.complex_timedomain, ) # Take an average over the last half of the delay spectrum samples # (presuming that removes the burn-in) spec_av = np.median(spec[-(self.nsamp // 2) :], axis=0) delay_spec.spectrum[bi] = np.fft.fftshift(spec_av) return delay_spec def stokes_I(sstream, tel): """Extract instrumental Stokes I from a time/sidereal stream. Parameters ---------- sstream : containers.SiderealStream, container.TimeStream Stream of correlation data. tel : TransitTelescope Instance describing the telescope. Returns ------- vis_I : mpiarray.MPIArray[nbase, nfreq, ntime] The instrumental Stokes I visibilities, distributed over baselines. vis_weight : mpiarray.MPIArray[nbase, nfreq, ntime] The weights for each visibility, distributed over baselines. ubase : np.ndarray[nbase, 2] Baseline vectors corresponding to output. """ # Construct a complex number representing each baseline (used for determining # unique baselines). # NOTE: due to floating point precision, some baselines don't get matched as having # the same lengths. To get around this, round all separations to 0.1 mm precision bl_round = np.around(tel.baselines[:, 0] + 1.0j * tel.baselines[:, 1], 4) # ==== Unpack into Stokes I ubase, uinv, ucount = np.unique(bl_round, return_inverse=True, return_counts=True) ubase = ubase.astype(np.complex128, copy=False).view(np.float64).reshape(-1, 2) nbase = ubase.shape[0] vis_shape = (nbase, sstream.vis.global_shape[0], sstream.vis.global_shape[2]) vis_I = mpiarray.zeros(vis_shape, dtype=sstream.vis.dtype, axis=1) vis_weight = mpiarray.zeros(vis_shape, dtype=sstream.weight.dtype, axis=1) # Iterate over products to construct the Stokes I vis # TODO: this should be updated when driftscan gains a concept of polarisation ssv = sstream.vis[:] ssw = sstream.weight[:] # Cache beamclass as it's regenerated every call beamclass = tel.beamclass[:] for ii, ui in enumerate(uinv): # Skip if not all polarisations were included if ucount[ui] < 4: continue fi, fj = tel.uniquepairs[ii] bi, bj = beamclass[fi], beamclass[fj] upi = tel.feedmap[fi, fj] if upi == -1: continue if bi == bj: vis_I[ui] += ssv[:, ii] vis_weight[ui] += ssw[:, ii] vis_I = vis_I.redistribute(axis=0) vis_weight = vis_weight.redistribute(axis=0) return vis_I, vis_weight, ubase def window_generalised(x, window="nuttall"): """A generalised high-order window at arbitrary locations. Parameters ---------- x : np.ndarray[n] Location to evaluate at. Must be in the range 0 to 1. window : one of {'nuttall', 'blackman_nuttall', 'blackman_harris'} Type of window function to return. Returns ------- w : np.ndarray[n] Window function. """ a_table = { "nuttall": np.array([0.355768, -0.487396, 0.144232, -0.012604]), "blackman_nuttall": np.array([0.3635819, -0.4891775, 0.1365995, -0.0106411]), "blackman_harris": np.array([0.35875, -0.48829, 0.14128, -0.01168]), } a = a_table[window] t = 2 * np.pi * np.arange(4)[:, np.newaxis] * x[np.newaxis, :] w = (a[:, np.newaxis] * np.cos(t)).sum(axis=0) return w def fourier_matrix_r2c(N, fsel=None): """Generate a Fourier matrix to represent a real to complex FFT. Parameters ---------- N : integer Length of timestream that we are transforming to. Must be even. fsel : array_like, optional Indexes of the frequency channels to include in the transformation matrix. By default, assume all channels. Returns ------- Fr : np.ndarray An array performing the Fourier transform from a real time series to frequencies packed as alternating real and imaginary elements, """ if fsel is None: fa = np.arange(N // 2 + 1) else: fa = np.array(fsel) fa = fa[:, np.newaxis] ta = np.arange(N)[np.newaxis, :] Fr = np.zeros((2 * fa.shape[0], N), dtype=np.float64) Fr[0::2] = np.cos(2 * np.pi * ta * fa / N) Fr[1::2] = -np.sin(2 * np.pi * ta * fa / N) return Fr def fourier_matrix_c2r(N, fsel=None): """Generate a Fourier matrix to represent a complex to real FFT. Parameters ---------- N : integer Length of timestream that we are transforming to. Must be even. fsel : array_like, optional Indexes of the frequency channels to include in the transformation matrix. By default, assume all channels. Returns ------- Fr : np.ndarray An array performing the Fourier transform from frequencies packed as alternating real and imaginary elements, to the real time series. """ if fsel is None: fa = np.arange(N // 2 + 1) else: fa = np.array(fsel) fa = fa[np.newaxis, :] mul = np.where((fa == 0) | (fa == N // 2), 1.0, 2.0) / N ta = np.arange(N)[:, np.newaxis] Fr = np.zeros((N, 2 * fa.shape[1]), dtype=np.float64) Fr[:, 0::2] = np.cos(2 * np.pi * ta * fa / N) * mul Fr[:, 1::2] = -np.sin(2 * np.pi * ta * fa / N) * mul return Fr def fourier_matrix_c2c(N, fsel=None): """Generate a Fourier matrix to represent a complex to complex FFT. These Fourier conventions match `numpy.fft.fft()`. Parameters ---------- N : integer Length of timestream that we are transforming to. fsel : array_like, optional Indices of the frequency channels to include in the transformation matrix. By default, assume all channels. Returns ------- F : np.ndarray An array performing the Fourier transform from a complex time series to frequencies, with both input and output packed as alternating real and imaginary elements. """ if fsel is None: fa = np.arange(N) else: fa = np.array(fsel) fa = fa[:, np.newaxis] ta = np.arange(N)[np.newaxis, :] F = np.zeros((2 * fa.shape[0], 2 * N), dtype=np.float64) arg = 2 * np.pi * ta * fa / N F[0::2, 0::2] = np.cos(arg) F[0::2, 1::2] = np.sin(arg) F[1::2, 0::2] = -np.sin(arg) F[1::2, 1::2] = np.cos(arg) return F def _complex_to_alternating_real(array): """View complex numbers as an array with alternating real and imaginary components. Parameters ---------- array : array_like Input array of complex numbers. Returns ------- out : array_like Output array of alternating real and imaginary components. These components are expanded along the last axis, such that if `array` has `N` complex elements in its last axis, `out` will have `2N` real elements. """ return array.astype(np.complex128, order="C").view(np.float64) def _alternating_real_to_complex(array): """View real numbers as complex, interpreted as alternating real and imag. components. Parameters ---------- array : array_like Input array of real numbers. Last axis must have even number of elements. Returns ------- out : array_like Output array of complex numbers, derived from compressing the last axis (if `array` has `N` real elements in the last axis, `out` will have `N/2` complex elements). """ return array.astype(np.float64, order="C").view(np.complex128) def delay_spectrum_gibbs( data, N, Ni, initial_S, window="nuttall", fsel=None, niter=20, rng=None, complex_timedomain=False, ): """Estimate the delay power spectrum by Gibbs sampling. This routine estimates the spectrum at the `N` delay samples conjugate to an input frequency spectrum with ``N/2 + 1`` channels (if the delay spectrum is assumed real) or `N` channels (if the delay spectrum is assumed complex). A subset of these channels can be specified using the `fsel` argument. Parameters ---------- data : np.ndarray[:, freq] Data to estimate the delay spectrum of. N : int The length of the output delay spectrum. There are assumed to be `N/2 + 1` total frequency channels if assuming a real delay spectrum, or `N` channels for a complex delay spectrum. Ni : np.ndarray[freq] Inverse noise variance. initial_S : np.ndarray[delay] The initial delay power spectrum guess. window : one of {'nuttall', 'blackman_nuttall', 'blackman_harris', None}, optional Apply an apodisation function. Default: 'nuttall'. fsel : np.ndarray[freq], optional Indices of channels that we have data at. By default assume all channels. niter : int, optional Number of Gibbs samples to generate. rng : np.random.Generator, optional A generator to use to produce the random samples. complex_timedomain : bool, optional If True, assume input data arose from a complex timestream. If False, assume input data arose from a real timestream, such that the first and last frequency channels have purely real values. Default: False. Returns ------- spec : list List of spectrum samples. """ # Get reference to RNG if rng is None: rng = random.default_rng() spec = [] total_freq = N if complex_timedomain else N // 2 + 1 if fsel is None: fsel = np.arange(total_freq) # Construct the Fourier matrix F = ( fourier_matrix_c2c(N, fsel) if complex_timedomain else fourier_matrix_r2c(N, fsel) ) # Construct a view of the data with alternating real and imaginary parts data = _complex_to_alternating_real(data).T.copy() # Window the frequency data if window is not None: # Construct the window function x = fsel * 1.0 / total_freq w = window_generalised(x, window=window) w = np.repeat(w, 2) # Apply to the projection matrix and the data F *= w[:, np.newaxis] data *= w[:, np.newaxis] if complex_timedomain: is_real_freq = np.zeros_like(fsel).astype(bool) else: is_real_freq = (fsel == 0) | (fsel == N // 2) # Construct the Noise inverse array for the real and imaginary parts of the # frequency spectrum (taking into account that the zero and Nyquist frequencies are # strictly real if the delay spectrum is assumed to be real) Ni_r = np.zeros(2 * Ni.shape[0]) Ni_r[0::2] = np.where(is_real_freq, Ni, Ni / 2**0.5) Ni_r[1::2] = np.where(is_real_freq, 0.0, Ni / 2**0.5) # Create the transpose of the Fourier matrix weighted by the noise # (this is used multiple times) FTNih = F.T * Ni_r[np.newaxis, :] ** 0.5 FTNiF = np.dot(FTNih, FTNih.T) # Pre-whiten the data to save doing it repeatedly data = data * Ni_r[:, np.newaxis] ** 0.5 # Set the initial guess for the delay power spectrum. S_samp = initial_S def _draw_signal_sample_f(S): # Draw a random sample of the signal (delay spectrum) assuming a Gaussian model # with a given delay power spectrum `S`. Do this using the perturbed Wiener # filter approach # This method is fastest if the number of frequencies is larger than the number # of delays we are solving for. Typically this isn't true, so we probably want # `_draw_signal_sample_t` # Construct the Wiener covariance if complex_timedomain: # If delay spectrum is complex, extend S to correspond to the individual # real and imaginary components of the delay spectrum, each of which have # power spectrum equal to 0.5 times the power spectrum of the complex # delay spectrum, if the statistics are circularly symmetric S = 0.5 * np.repeat(S, 2) Si = 1.0 / S Ci = np.diag(Si) + FTNiF # Draw random vectors that form the perturbations if complex_timedomain: # If delay spectrum is complex, draw for real and imaginary components # separately w1 = rng.standard_normal((2 * N, data.shape[1])) else: w1 = rng.standard_normal((N, data.shape[1])) w2 = rng.standard_normal(data.shape) # Construct the random signal sample by forming a perturbed vector and # then doing a matrix solve y = np.dot(FTNih, data + w2) + Si[:, np.newaxis] ** 0.5 * w1 return la.solve(Ci, y, sym_pos=True) def _draw_signal_sample_t(S): # This method is fastest if the number of delays is larger than the number of # frequencies. This is usually the regime we are in. # Construct various dependent matrices if complex_timedomain: # If delay spectrum is complex, extend S to correspond to the individual # real and imaginary components of the delay spectrum, each of which have # power spectrum equal to 0.5 times the power spectrum of the complex # delay spectrum, if the statistics are circularly symmetric S = 0.5 * np.repeat(S, 2) Sh = S**0.5 Rt = Sh[:, np.newaxis] * FTNih R = Rt.T.conj() # Draw random vectors that form the perturbations if complex_timedomain: # If delay spectrum is complex, draw for real and imaginary components # separately w1 = rng.standard_normal((2 * N, data.shape[1])) else: w1 = rng.standard_normal((N, data.shape[1])) w2 = rng.standard_normal(data.shape) # Perform the solve step (rather than explicitly using the inverse) y = data + w2 - np.dot(R, w1) Ci = np.identity(len(Ni_r)) + np.dot(R, Rt) x = la.solve(Ci, y, sym_pos=True) s = Sh[:, np.newaxis] * (np.dot(Rt, x) + w1) return s def _draw_ps_sample(d): # Draw a random delay power spectrum sample assuming the signal is Gaussian and # we have a flat prior on the power spectrum. # This means drawing from a inverse chi^2. if complex_timedomain: # If delay spectrum is complex, combine real and imaginary components # stored in d, such that variance below is variance of complex spectrum d = d[0::2] + 1.0j * d[1::2] S_hat = d.var(axis=1) df = d.shape[1] chi2 = rng.chisquare(df, size=d.shape[0]) S_samp = S_hat * df / chi2 return S_samp # Select the method to use for the signal sample based on how many frequencies # versus delays there are _draw_signal_sample = ( _draw_signal_sample_f if (len(fsel) > 0.25 * N) else _draw_signal_sample_t ) # Perform the Gibbs sampling iteration for a given number of loops and # return the power spectrum output of them. for ii in range(niter): d_samp = _draw_signal_sample(S_samp) S_samp = _draw_ps_sample(d_samp) spec.append(S_samp) return spec def null_delay_filter(freq, max_delay, mask, num_delay=200, tol=1e-8, window=True): """Take frequency data and null out any delays below some value. Parameters ---------- freq : np.ndarray[freq] Frequencies we have data at. max_delay : float Maximum delay to keep. mask : np.ndarray[freq] Frequencies to mask out. num_delay : int, optional Number of delay values to use. tol : float, optional Cut off value for singular values. window : bool, optional Apply a window function to the data while filtering. Returns ------- filter : np.ndarray[freq, freq] The filter as a 2D matrix. """ # Construct the window function x = (freq - freq.min()) / freq.ptp() w = window_generalised(x, window="nuttall") delay = np.linspace(-max_delay, max_delay, num_delay) # Construct the Fourier matrix F = mask[:, np.newaxis] * np.exp( 2.0j * np.pi * delay[np.newaxis, :] * freq[:, np.newaxis] ) if window: F *= w[:, np.newaxis] # Use an SVD to figure out the set of significant modes spanning the delays # we are wanting to get rid of. # NOTE: we've experienced some convergence failures in here which ultimately seem # to be the fault of MKL (see https://github.com/scipy/scipy/issues/10032 and links # therein). This seems to be limited to the `gesdd` LAPACK routine, so we can get # around it by switching to `gesvd`. u, sig, vh = la.svd(F, lapack_driver="gesvd") nmodes = np.sum(sig > tol * sig.max()) p = u[:, :nmodes] # Construct a projection matrix for the filter proj = np.identity(len(freq)) - np.dot(p, p.T.conj()) proj *= mask[np.newaxis, :] if window: proj *= w[np.newaxis, :] return proj def match_axes(dset1, dset2): """Make sure that dset2 has the same set of axes as dset1. Sometimes the weights are missing axes (usually where the entries would all be the same), we need to map these into one another and expand the weights to the same size as the visibilities. This assumes that the vis/weight axes are in the same order when present Parameters ---------- dset1 The dataset with more axes. dset2 The dataset with a subset of axes. For the moment these are assumed to be in the same order. Returns ------- dset2_view A view of dset2 with length-1 axes inserted to match the axes missing from dset1. """ axes1 = dset1.attrs["axis"] axes2 = dset2.attrs["axis"] bcast_slice = tuple(slice(None) if ax in axes2 else np.newaxis for ax in axes1) return dset2[:][bcast_slice] def _move_front(arr: np.ndarray, axis: int, shape: tuple) -> np.ndarray: # Move the specified axis to the front and flatten to give a 2D array new_arr = np.moveaxis(arr, axis, 0) return new_arr.reshape(shape[axis], -1) def _inv_move_front(arr: np.ndarray, axis: int, shape: tuple) -> np.ndarray: # Move the first axis back to it's original position and return the original shape, # i.e. reverse the above operation rshape = (shape[axis],) + shape[:axis] + shape[(axis + 1) :] new_arr = arr.reshape(rshape) new_arr = np.moveaxis(new_arr, 0, axis) return new_arr.reshape(shape) def _take_view(arr: np.ndarray, ind: int, axis: int) -> np.ndarray: # Like np.take but returns a view (instead of a copy), but only supports a scalar # index sl = (slice(None),) * axis return arr[sl + (ind,)]
{ "content_hash": "3b3bc9f62696e7acf80d5b522cfc95e0", "timestamp": "", "source": "github", "line_count": 1292, "max_line_length": 90, "avg_line_length": 35.725232198142415, "alnum_prop": 0.606321901336742, "repo_name": "radiocosmology/draco", "id": "0d3d9eb07f5e994f9161a4c02f40980f3869871c", "size": "46157", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "draco/analysis/delay.py", "mode": "33188", "license": "mit", "language": [ { "name": "C++", "bytes": "3685" }, { "name": "Cython", "bytes": "9321" }, { "name": "Python", "bytes": "751627" } ], "symlink_target": "" }
from abc import ABCMeta, abstractmethod import numpy as np class BatchIterator(object): __metaclass__ = ABCMeta def __init__(self, batch): self.batch = batch self.current_batch = -1 @abstractmethod def next_batch(self): pass def back2start(self): self.current_batch = -1 def next(self): next_batch = self.next_batch() if next_batch == None: raise StopIteration return next_batch def __iter__(self): return self #endclass BatchIterator class DefaultBatchIterator(BatchIterator): def __init__(self, batch, data): BatchIterator.__init__(self, batch) self.data = data def next_batch(self): if self.current_batch >= self.batch - 1: return None self.current_batch += 1 return self.data[self.current_batch : : self.batch] #endclass DefaultBatchIterator class FileBatchIterator(BatchIterator): def _file_length(self, filename): count = 0 with open(filename, 'r') as fp: for line in fp: count += 1 return count def __init__(self, batch, filename): BatchIterator.__init__(self, batch) self.filename = filename self.file_length = self._file_length(filename) self._f = open(filename, 'r') self.one_batch_line = self.file_length / batch def next_batch(self): if self.current_batch >= self.batch - 1: return None self.current_batch += 1 data = [] max_range = self.one_batch_line if self.current_batch >= self.file_length % self.batch else self.one_batch_line + 1 for i in xrange(max_range): data.append(self._f.readline().strip()) return np.array(data) def back2start(self): BatchIterator.back2start(self) self._f.close() self._f = open(self.filename, 'r') #endclass FileBatchIterator if __name__ == '__main__': fbi = FileBatchIterator(5, 'backprop.py') print len(fbi.next()) print len(fbi.next()) print len(fbi.next()) print len(fbi.next()) print len(fbi.next())
{ "content_hash": "a71035e076554f94cdfb0e9f825cd5d9", "timestamp": "", "source": "github", "line_count": 80, "max_line_length": 117, "avg_line_length": 23.2875, "alnum_prop": 0.6859903381642513, "repo_name": "jasonwbw/EffictiveRBM", "id": "f06b682152aa2bf25bf93e4fc24e1bbe522b591b", "size": "1942", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "rbm/autoencoder/batch_iterator.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "52410" } ], "symlink_target": "" }
""" RemoveNamedArguments turns named arguments into regular ones. """ from pythran.analyses import Aliases from pythran.passmanager import Transformation from pythran.syntax import PythranSyntaxError import ast class RemoveNamedArguments(Transformation): ''' Replace call with named arguments to regular calls >>> import ast >>> from pythran import passmanager, backend >>> code = 'def foo(x, y): return x + y\\ndef bar(z): return foo(y=z, x=0)' >>> node = ast.parse(code) >>> pm = passmanager.PassManager("test") >>> _, node = pm.apply(RemoveNamedArguments, node) >>> print pm.dump(backend.Python, node) def foo(x, y): return (x + y) def bar(z): return foo(0, z) ''' def __init__(self): super(RemoveNamedArguments, self).__init__(Aliases) def handle_keywords(self, func, node, offset=0): ''' Gather keywords to positional argument information Assumes the named parameter exist, raises a ValueError otherwise ''' func_argument_names = {} for i, arg in enumerate(func.args.args[offset:]): assert isinstance(arg, ast.Name) func_argument_names[arg.id] = i nargs = len(func.args.args) - offset defaults = func.args.defaults keywords = {func_argument_names[kw.arg]: kw.value for kw in node.keywords} node.args.extend([None] * (1 + max(keywords.keys()) - len(node.args))) replacements = {} for index, arg in enumerate(node.args): if arg is None: if index in keywords: replacements[index] = keywords[index] else: # must be a default value replacements[index] = defaults[index - nargs] return replacements def visit_Call(self, node): if node.keywords: aliases = self.aliases[node.func].aliases assert aliases, "at least one alias" # all aliases should have the same structural type... # call to self.handle_keywords raises an exception otherwise try: replacements = {} for func_alias in aliases: if func_alias is None: # aliasing computation failed pass elif type(func_alias) is ast.Call: # nested function # func_alias looks like functools.partial(foo, a) # so we reorder using alias for 'foo' offset = len(func_alias.args) - 1 call = func_alias.args[0] for func_alias in self.aliases[call].aliases: replacements = self.handle_keywords(func_alias, node, offset) else: replacements = self.handle_keywords(func_alias, node) # if we reach this point, we should have a replacement # candidate, or nothing structural typing issues would have # raised an exception in handle_keywords if replacements: for index, value in replacements.iteritems(): node.args[index] = value node.keywords = [] except: err = "function aliases to incompatible types" raise PythranSyntaxError(err, node) return self.generic_visit(node)
{ "content_hash": "12e8654e64ae3508b787d6b8acb0b5e4", "timestamp": "", "source": "github", "line_count": 92, "max_line_length": 79, "avg_line_length": 38.44565217391305, "alnum_prop": 0.5515973989256432, "repo_name": "artas360/pythran", "id": "198d64ee73afe4990b3266cdba8d1675a020be78", "size": "3537", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "pythran/transformations/remove_named_arguments.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "C++", "bytes": "1335689" }, { "name": "Makefile", "bytes": "1185" }, { "name": "Python", "bytes": "1162293" }, { "name": "Shell", "bytes": "250" } ], "symlink_target": "" }
from django import template register = template.Library() class VarNode(template.Node): def __init__(self, var_name, var_to_resolve): self.var_name = var_name self.var_to_resolve = var_to_resolve def get_context(self, top_context): for context in top_context.dicts: if self.var_name in context: return context return top_context def render(self, context): try: resolved_var = template.resolve_variable( self.var_to_resolve, context ) self.get_context(context)[self.var_name] = resolved_var except template.VariableDoesNotExist: self.get_context(context)[self.var_name] = "" return "" @register.tag def var(parser, token): """ {% var foo = expression %} {% var foo = Model.foo_set.count %} {% var foo = foo|restructuredtext %} {{ foo }} {{ foo|escape }} """ args = token.split_contents() if len(args) != 4 or args[2] != "=": raise template.TemplateSyntaxError( "'%s' statement requires the form {% %s foo = bar %}." % ( args[0], args[0]) ) return VarNode(args[1], args[3])
{ "content_hash": "71a26d0a862ac7564f86070c7b17f79a", "timestamp": "", "source": "github", "line_count": 45, "max_line_length": 70, "avg_line_length": 27.6, "alnum_prop": 0.5523349436392915, "repo_name": "manufacturedba/pinax", "id": "e209fbf0610b30dcdb17ed3e47a5ff8a7044195c", "size": "1242", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "pinax/templatetags/templatetags/var_tag.py", "mode": "33188", "license": "mit", "language": [], "symlink_target": "" }
import re try: from urllib import quote except ImportError: from urllib.parse import quote import nltk from nltk.stem.wordnet import WordNetLemmatizer CLEAN_LINK = re.compile('(?<=^\/)\/+|\/+$') CLEAN_WORD = re.compile('[\[\],().:;"\/\'?*%!*+=@$;#%{}`~\r\n\t]') LONG_DASH = re.compile('(\&#8212;)') MIN_TAG_LENGTH = 2 SMART_QUOTES_D = re.compile('(\xe2\x80\x9c)|(\xe2\x80\x9d)|(\&#8220;)|(\&#8221;)') SMART_QUOTES_S = re.compile('(\xe2\x80\x98)|(\xe2\x80\x99)|(\&#8216;)|(\&#8217;)') STOP_WORDS = ['DT', 'IN', 'TO', 'VBD', 'VBD', 'VBG', 'VBN', 'VBZ', 'MD', 'RB', 'CC', 'WDT', 'PRO', 'PRP', 'PRP$'] class AutoTagify(): lemma = WordNetLemmatizer() def __init__(self): self.css = '' self.link = '' self.text = '' def _tokenize(self): """Tag words from the string.""" return nltk.pos_tag(nltk.word_tokenize(self._clean_text())) def _cleaned(self, word, strict): lemmatized = self.lemma.lemmatize(self._clean_text(word)) if strict: return lemmatized else: return quote(self._clean_text(word)) def _clean_text(self, word=''): if len(word) > MIN_TAG_LENGTH: return CLEAN_WORD.sub('', self._replace_special_chars(word.lower())) else: return CLEAN_WORD.sub('', self._replace_special_chars(self.text)) def _replace_special_chars(self, text): return SMART_QUOTES_S.sub('\'', SMART_QUOTES_D.sub('"', LONG_DASH.sub('-',text))) def generate(self, strict=True): """Return the HTML version of tags for the string.""" tag_words = [] for (word, word_type) in self._tokenize(): tag_word = self._cleaned(word, strict) if len(tag_word) > MIN_TAG_LENGTH and word_type not in STOP_WORDS: tag_words.append('<a href="%s/%s" class="%s">%s</a> ' % (CLEAN_LINK.sub('', self.link), quote(tag_word), CLEAN_WORD.sub('', self.css), self._replace_special_chars(word))) else: tag_words.append(word + ' ') return ''.join(tag_words) def tag_list(self, strict=True): """Return the tags from string as a list. If strict is set to True, then only return the stemmed version. Otherwise, return the full string - therefore, `cat` will be considered different from `cats`. """ tag_words = [] for (word, word_type) in self._tokenize(): tag_word = self._cleaned(word, strict) if len(tag_word) > MIN_TAG_LENGTH and word_type not in STOP_WORDS: tag_words.append(tag_word) return tag_words
{ "content_hash": "acb5a5ed58aeb3b53bacf90cf2fc2e9a", "timestamp": "", "source": "github", "line_count": 73, "max_line_length": 108, "avg_line_length": 39.178082191780824, "alnum_prop": 0.5223776223776224, "repo_name": "ednapiranha/auto-tagify", "id": "344d1f148d4f85b90f72e84da696990b4f659808", "size": "2884", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "auto_tagify2/auto_tagify2.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "6997" } ], "symlink_target": "" }
from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('conservation', '0006_managementaction_document'), ] operations = [ migrations.RemoveField( model_name='document', name='management_actions', ), ]
{ "content_hash": "f7d190e3fc38fb9be653202236a2b46a", "timestamp": "", "source": "github", "line_count": 15, "max_line_length": 59, "avg_line_length": 20.333333333333332, "alnum_prop": 0.5967213114754099, "repo_name": "parksandwildlife/wastd", "id": "910966f8810156e6f4e983828d5589135508e533", "size": "354", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "conservation/migrations/0007_remove_document_management_actions.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "9150" }, { "name": "HTML", "bytes": "60851" }, { "name": "JavaScript", "bytes": "18966" }, { "name": "Python", "bytes": "853568" }, { "name": "Shell", "bytes": "4200" }, { "name": "TeX", "bytes": "16951" } ], "symlink_target": "" }
"""Main entry point""" import sys if sys.argv[0].endswith("__main__.py"): sys.argv[0] = "nose2" __unittest = True if __name__ == '__main__': from nose2 import discover discover()
{ "content_hash": "d1f3d3257faac4aee7b4eba2471dd9d5", "timestamp": "", "source": "github", "line_count": 12, "max_line_length": 39, "avg_line_length": 16.25, "alnum_prop": 0.5794871794871795, "repo_name": "ezigman/nose2", "id": "b8bbf93f48811aae7e8fd1f303e5f83af91a16c0", "size": "195", "binary": false, "copies": "17", "ref": "refs/heads/master", "path": "nose2/__main__.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "Batchfile", "bytes": "142" }, { "name": "Python", "bytes": "403392" }, { "name": "Shell", "bytes": "1125" } ], "symlink_target": "" }