Search is not available for this dataset
text
stringlengths
75
104k
def _glob_to_regexp(pat): """Compile a glob pattern into a regexp. We need to do this because fnmatch allows * to match /, which we don't want. E.g. an MANIFEST.in exclude of 'dirname/*css' should match 'dirname/foo.css' but not 'dirname/subdir/bar.css'. """ pat = fnmatch.translate(pat) # Note that distutils in Python 2.6 has a buggy glob_to_re in # distutils.filelist -- it converts '*.cfg' to '[^/]*cfg' instead # of '[^\\]*cfg' on Windows. sep = r'\\\\' if os.path.sep == '\\' else os.path.sep return re.sub(r'((?<!\\)(\\\\)*)\.', r'\1[^%s]' % sep, pat)
def file_matches(filename, patterns): """Does this filename match any of the patterns?""" return any(fnmatch.fnmatch(filename, pat) for pat in patterns)
def get_versioned_files(): """List all files versioned by git in the current directory.""" # Git for Windows uses UTF-8 instead of the locale encoding. # Regular Git on sane POSIX systems uses the locale encoding encoding = 'UTF-8' if sys.platform == 'win32' else None output = run(['git', 'ls-files', '-z'], encoding=encoding) return add_directories(output.split('\0')[:-1])
def start_kernel(self, **kwargs): """Start a new kernel.""" kernel_id = unicode(uuid.uuid4()) # use base KernelManager for each Kernel km = self.kernel_manager_factory(connection_file=os.path.join( self.connection_dir, "kernel-%s.json" % kernel_id), config=self.config, ) km.start_kernel(**kwargs) # start just the shell channel, needed for graceful restart km.start_channels(shell=True, sub=False, stdin=False, hb=False) self._kernels[kernel_id] = km return kernel_id
def shutdown_kernel(self, kernel_id): """Shutdown a kernel by its kernel uuid. Parameters ========== kernel_id : uuid The id of the kernel to shutdown. """ self.get_kernel(kernel_id).shutdown_kernel() del self._kernels[kernel_id]
def kill_kernel(self, kernel_id): """Kill a kernel by its kernel uuid. Parameters ========== kernel_id : uuid The id of the kernel to kill. """ self.get_kernel(kernel_id).kill_kernel() del self._kernels[kernel_id]
def get_kernel(self, kernel_id): """Get the single KernelManager object for a kernel by its uuid. Parameters ========== kernel_id : uuid The id of the kernel. """ km = self._kernels.get(kernel_id) if km is not None: return km else: raise KeyError("Kernel with id not found: %s" % kernel_id)
def get_kernel_ports(self, kernel_id): """Return a dictionary of ports for a kernel. Parameters ========== kernel_id : uuid The id of the kernel. Returns ======= port_dict : dict A dict of key, value pairs where the keys are the names (stdin_port,iopub_port,shell_port) and the values are the integer port numbers for those channels. """ # this will raise a KeyError if not found: km = self.get_kernel(kernel_id) return dict(shell_port=km.shell_port, iopub_port=km.iopub_port, stdin_port=km.stdin_port, hb_port=km.hb_port, )
def notebook_for_kernel(self, kernel_id): """Return the notebook_id for a kernel_id or None.""" notebook_ids = [k for k, v in self._notebook_mapping.iteritems() if v == kernel_id] if len(notebook_ids) == 1: return notebook_ids[0] else: return None
def delete_mapping_for_kernel(self, kernel_id): """Remove the kernel/notebook mapping for kernel_id.""" notebook_id = self.notebook_for_kernel(kernel_id) if notebook_id is not None: del self._notebook_mapping[notebook_id]
def start_kernel(self, notebook_id=None, **kwargs): """Start a kernel for a notebok an return its kernel_id. Parameters ---------- notebook_id : uuid The uuid of the notebook to associate the new kernel with. If this is not None, this kernel will be persistent whenever the notebook requests a kernel. """ kernel_id = self.kernel_for_notebook(notebook_id) if kernel_id is None: kwargs['extra_arguments'] = self.kernel_argv kernel_id = super(MappingKernelManager, self).start_kernel(**kwargs) self.set_kernel_for_notebook(notebook_id, kernel_id) self.log.info("Kernel started: %s" % kernel_id) self.log.debug("Kernel args: %r" % kwargs) else: self.log.info("Using existing kernel: %s" % kernel_id) return kernel_id
def shutdown_kernel(self, kernel_id): """Shutdown a kernel and remove its notebook association.""" self._check_kernel_id(kernel_id) super(MappingKernelManager, self).shutdown_kernel(kernel_id) self.delete_mapping_for_kernel(kernel_id) self.log.info("Kernel shutdown: %s" % kernel_id)
def interrupt_kernel(self, kernel_id): """Interrupt a kernel.""" self._check_kernel_id(kernel_id) super(MappingKernelManager, self).interrupt_kernel(kernel_id) self.log.info("Kernel interrupted: %s" % kernel_id)
def restart_kernel(self, kernel_id): """Restart a kernel while keeping clients connected.""" self._check_kernel_id(kernel_id) km = self.get_kernel(kernel_id) km.restart_kernel() self.log.info("Kernel restarted: %s" % kernel_id) return kernel_id # the following remains, in case the KM restart machinery is # somehow unacceptable # Get the notebook_id to preserve the kernel/notebook association. notebook_id = self.notebook_for_kernel(kernel_id) # Create the new kernel first so we can move the clients over. new_kernel_id = self.start_kernel() # Now kill the old kernel. self.kill_kernel(kernel_id) # Now save the new kernel/notebook association. We have to save it # after the old kernel is killed as that will delete the mapping. self.set_kernel_for_notebook(notebook_id, new_kernel_id) self.log.info("Kernel restarted: %s" % new_kernel_id) return new_kernel_id
def create_iopub_stream(self, kernel_id): """Create a new iopub stream.""" self._check_kernel_id(kernel_id) return super(MappingKernelManager, self).create_iopub_stream(kernel_id)
def create_shell_stream(self, kernel_id): """Create a new shell stream.""" self._check_kernel_id(kernel_id) return super(MappingKernelManager, self).create_shell_stream(kernel_id)
def create_hb_stream(self, kernel_id): """Create a new hb stream.""" self._check_kernel_id(kernel_id) return super(MappingKernelManager, self).create_hb_stream(kernel_id)
def configure(self, options, conf): """Configure plugin. """ if not self.can_configure: return self.conf = conf disable = getattr(options, 'noDeprecated', False) if disable: self.enabled = False
def reset(self): """Reset all OneTimeProperty attributes that may have fired already.""" instdict = self.__dict__ classdict = self.__class__.__dict__ # To reset them, we simply remove them from the instance dict. At that # point, it's as if they had never been computed. On the next access, # the accessor function from the parent class will be called, simply # because that's how the python descriptor protocol works. for mname, mval in classdict.items(): if mname in instdict and isinstance(mval, OneTimeProperty): delattr(self, mname)
def iseq(start=0, stop=None, inc=1): """ Generate integers from start to (and including!) stop, with increment of inc. Alternative to range/xrange. """ if stop is None: # allow isequence(3) to be 0, 1, 2, 3 # take 1st arg as stop, start as 0, and inc=1 stop = start; start = 0; inc = 1 return arange(start, stop+inc, inc)
def export_html(html, filename, image_tag = None, inline = True): """ Export the contents of the ConsoleWidget as HTML. Parameters: ----------- html : str, A utf-8 encoded Python string containing the Qt HTML to export. filename : str The file to be saved. image_tag : callable, optional (default None) Used to convert images. See ``default_image_tag()`` for information. inline : bool, optional [default True] If True, include images as inline PNGs. Otherwise, include them as links to external PNG files, mimicking web browsers' "Web Page, Complete" behavior. """ if image_tag is None: image_tag = default_image_tag else: image_tag = ensure_utf8(image_tag) if inline: path = None else: root,ext = os.path.splitext(filename) path = root + "_files" if os.path.isfile(path): raise OSError("%s exists, but is not a directory." % path) with open(filename, 'w') as f: html = fix_html(html) f.write(IMG_RE.sub(lambda x: image_tag(x, path = path, format = "png"), html))
def export_xhtml(html, filename, image_tag=None): """ Export the contents of the ConsoleWidget as XHTML with inline SVGs. Parameters: ----------- html : str, A utf-8 encoded Python string containing the Qt HTML to export. filename : str The file to be saved. image_tag : callable, optional (default None) Used to convert images. See ``default_image_tag()`` for information. """ if image_tag is None: image_tag = default_image_tag else: image_tag = ensure_utf8(image_tag) with open(filename, 'w') as f: # Hack to make xhtml header -- note that we are not doing any check for # valid XML. offset = html.find("<html>") assert offset > -1, 'Invalid HTML string: no <html> tag.' html = ('<html xmlns="http://www.w3.org/1999/xhtml">\n'+ html[offset+6:]) html = fix_html(html) f.write(IMG_RE.sub(lambda x: image_tag(x, path = None, format = "svg"), html))
def ensure_utf8(image_tag): """wrapper for ensuring image_tag returns utf8-encoded str on Python 2""" if py3compat.PY3: # nothing to do on Python 3 return image_tag def utf8_image_tag(*args, **kwargs): s = image_tag(*args, **kwargs) if isinstance(s, unicode): s = s.encode('utf8') return s return utf8_image_tag
def fix_html(html): """ Transforms a Qt-generated HTML string into a standards-compliant one. Parameters: ----------- html : str, A utf-8 encoded Python string containing the Qt HTML. """ # A UTF-8 declaration is needed for proper rendering of some characters # (e.g., indented commands) when viewing exported HTML on a local system # (i.e., without seeing an encoding declaration in an HTTP header). # C.f. http://www.w3.org/International/O-charset for details. offset = html.find('<head>') if offset > -1: html = (html[:offset+6]+ '\n<meta http-equiv="Content-Type" '+ 'content="text/html; charset=utf-8" />\n'+ html[offset+6:]) # Replace empty paragraphs tags with line breaks. html = re.sub(EMPTY_P_RE, '<br/>', html) return html
def export(self): """ Displays a dialog for exporting HTML generated by Qt's rich text system. Returns ------- The name of the file that was saved, or None if no file was saved. """ parent = self.control.window() dialog = QtGui.QFileDialog(parent, 'Save as...') dialog.setAcceptMode(QtGui.QFileDialog.AcceptSave) filters = [ 'HTML with PNG figures (*.html *.htm)', 'XHTML with inline SVG figures (*.xhtml *.xml)' ] dialog.setNameFilters(filters) if self.filename: dialog.selectFile(self.filename) root,ext = os.path.splitext(self.filename) if ext.lower() in ('.xml', '.xhtml'): dialog.selectNameFilter(filters[-1]) if dialog.exec_(): self.filename = dialog.selectedFiles()[0] choice = dialog.selectedNameFilter() html = self.control.document().toHtml().encode('utf-8') # Configure the exporter. if choice.startswith('XHTML'): exporter = export_xhtml else: # If there are PNGs, decide how to export them. inline = self.inline_png if inline is None and IMG_RE.search(html): dialog = QtGui.QDialog(parent) dialog.setWindowTitle('Save as...') layout = QtGui.QVBoxLayout(dialog) msg = "Exporting HTML with PNGs" info = "Would you like inline PNGs (single large html " \ "file) or external image files?" checkbox = QtGui.QCheckBox("&Don't ask again") checkbox.setShortcut('D') ib = QtGui.QPushButton("&Inline") ib.setShortcut('I') eb = QtGui.QPushButton("&External") eb.setShortcut('E') box = QtGui.QMessageBox(QtGui.QMessageBox.Question, dialog.windowTitle(), msg) box.setInformativeText(info) box.addButton(ib, QtGui.QMessageBox.NoRole) box.addButton(eb, QtGui.QMessageBox.YesRole) layout.setSpacing(0) layout.addWidget(box) layout.addWidget(checkbox) dialog.setLayout(layout) dialog.show() reply = box.exec_() dialog.hide() inline = (reply == 0) if checkbox.checkState(): # Don't ask anymore; always use this choice. self.inline_png = inline exporter = lambda h, f, i: export_html(h, f, i, inline) # Perform the export! try: return exporter(html, self.filename, self.image_tag) except Exception, e: msg = "Error exporting HTML to %s\n" % self.filename + str(e) reply = QtGui.QMessageBox.warning(parent, 'Error', msg, QtGui.QMessageBox.Ok, QtGui.QMessageBox.Ok) return None
def get_unique_or_none(klass, *args, **kwargs): """ Returns a unique instance of `klass` or None """ try: return klass.objects.get(*args, **kwargs) except klass.DoesNotExist: return None except klass.MultipleObjectsReturned: return None return None
def get_or_create_unique(klass, defaults, unique_fields): """ Returns a tuple of (instance, created), where `instance` is the retrieved or created instance of `klass` and `created` is a boolean specifying whether a new object was created. The value for the unique fields must be present in the defaults dictionary. """ if not unique_fields or not defaults: return (None, False) uniqueness_query = {k: v for k, v in defaults.items() if k in unique_fields} try: with transaction.atomic(): instance, created = klass.objects.get_or_create(defaults=defaults, **uniqueness_query) except IntegrityError: try: instance, created = klass(**defaults).save(), True except Exception as err: return (None, False) except Exception as err: return (None, False) if instance and not created: for attr, value in defaults.items(): if getattr(instance, attr): setattr(instance, attr, value) instance.save() return (instance, created)
def get_text_tokenizer(query_string): """ Tokenize the input string and return two lists, exclude list is for words that start with a dash (ex: -word) and include list is for all other words """ # Regex to split on double-quotes, single-quotes, and continuous non-whitespace characters. split_pattern = re.compile('("[^"]+"|\'[^\']+\'|\S+)') # Pattern to remove more than one inter white-spaces and more than one "-" space_cleanup_pattern = re.compile('[\s]{2,}') dash_cleanup_pattern = re.compile('^[-]{2,}') # Return the list of keywords. keywords = [dash_cleanup_pattern.sub('-', space_cleanup_pattern.sub(' ', t.strip(' "\''))) for t in split_pattern.findall(query_string) if len(t.strip(' "\'')) > 0] include = [word for word in keywords if not word.startswith('-')] exclude = [word.lstrip('-') for word in keywords if word.startswith('-')] return include, exclude
def get_query_includes(tokenized_terms, search_fields): """ Builds a query for included terms in a text search. """ query = None for term in tokenized_terms: or_query = None for field_name in search_fields: q = Q(**{"%s__icontains" % field_name: term}) if or_query is None: or_query = q else: or_query = or_query | q if query is None: query = or_query else: query = query & or_query return query
def get_text_query(query_string, search_fields): """ Builds a query for both included & excluded terms in a text search. """ include_terms, exclude_terms = get_text_tokenizer(query_string) include_q = get_query_includes(include_terms, search_fields) exclude_q = get_query_excludes(exclude_terms, search_fields) query = None if include_q and exclude_q: query = include_q & ~exclude_q elif not exclude_q: query = include_q else: query = ~exclude_q return query
def get_date_greater_query(days, date_field): """ Query for if date_field is within number of "days" ago. """ query = None days = get_integer(days) if days: past = get_days_ago(days) query = Q(**{"%s__gte" % date_field: past.isoformat()}) return query
def get_date_less_query(days, date_field): """ Query for if date_field is within number of "days" from now. """ query = None days = get_integer(days) if days: future = get_days_from_now(days) query = Q(**{"%s__lte" % date_field: future.isoformat()}) return query
def get_null_or_blank_query(field=None): """ Query for null or blank field. """ if not field: return field null_q = get_null_query(field) blank_q = get_blank_query(field) return (null_q | blank_q)
def case_insensitive(self, fields_dict): """ Converts queries to case insensitive for special fields. """ if hasattr(self.model, 'CASE_INSENSITIVE_FIELDS'): for field in self.model.CASE_INSENSITIVE_FIELDS: if field in fields_dict: fields_dict[field + '__iexact'] = fields_dict[field] del fields_dict[field]
def attr(*args, **kwargs): """Decorator that adds attributes to classes or functions for use with the Attribute (-a) plugin. """ def wrap_ob(ob): for name in args: setattr(ob, name, True) for name, value in kwargs.iteritems(): setattr(ob, name, value) return ob return wrap_ob
def get_method_attr(method, cls, attr_name, default = False): """Look up an attribute on a method/ function. If the attribute isn't found there, looking it up in the method's class, if any. """ Missing = object() value = getattr(method, attr_name, Missing) if value is Missing and cls is not None: value = getattr(cls, attr_name, Missing) if value is Missing: return default return value
def options(self, parser, env): """Register command line options""" parser.add_option("-a", "--attr", dest="attr", action="append", default=env.get('NOSE_ATTR'), metavar="ATTR", help="Run only tests that have attributes " "specified by ATTR [NOSE_ATTR]") # disable in < 2.4: eval can't take needed args if compat_24: parser.add_option("-A", "--eval-attr", dest="eval_attr", metavar="EXPR", action="append", default=env.get('NOSE_EVAL_ATTR'), help="Run only tests for whose attributes " "the Python expression EXPR evaluates " "to True [NOSE_EVAL_ATTR]")
def configure(self, options, config): """Configure the plugin and system, based on selected options. attr and eval_attr may each be lists. self.attribs will be a list of lists of tuples. In that list, each list is a group of attributes, all of which must match for the rule to match. """ self.attribs = [] # handle python eval-expression parameter if compat_24 and options.eval_attr: eval_attr = tolist(options.eval_attr) for attr in eval_attr: # "<python expression>" # -> eval(expr) in attribute context must be True def eval_in_context(expr, obj, cls): return eval(expr, None, ContextHelper(obj, cls)) self.attribs.append([(attr, eval_in_context)]) # attribute requirements are a comma separated list of # 'key=value' pairs if options.attr: std_attr = tolist(options.attr) for attr in std_attr: # all attributes within an attribute group must match attr_group = [] for attrib in attr.strip().split(","): # don't die on trailing comma if not attrib: continue items = attrib.split("=", 1) if len(items) > 1: # "name=value" # -> 'str(obj.name) == value' must be True key, value = items else: key = items[0] if key[0] == "!": # "!name" # 'bool(obj.name)' must be False key = key[1:] value = False else: # "name" # -> 'bool(obj.name)' must be True value = True attr_group.append((key, value)) self.attribs.append(attr_group) if self.attribs: self.enabled = True
def validateAttrib(self, method, cls = None): """Verify whether a method has the required attributes The method is considered a match if it matches all attributes for any attribute group. .""" # TODO: is there a need for case-sensitive value comparison? any = False for group in self.attribs: match = True for key, value in group: attr = get_method_attr(method, cls, key) if callable(value): if not value(key, method, cls): match = False break elif value is True: # value must exist and be True if not bool(attr): match = False break elif value is False: # value must not exist or be False if bool(attr): match = False break elif type(attr) in (list, tuple): # value must be found in the list attribute if not str(value).lower() in [str(x).lower() for x in attr]: match = False break else: # value must match, convert to string and compare if (value != attr and str(value).lower() != str(attr).lower()): match = False break any = any or match if any: # not True because we don't want to FORCE the selection of the # item, only say that it is acceptable return None return False
def wantMethod(self, method): """Accept the method if its attributes match. """ try: cls = method.im_class except AttributeError: return False return self.validateAttrib(method, cls)
def rotate(self): """ Rotate the kill ring, then yank back the new top. """ if self._prev_yank: text = self._ring.rotate() if text: self._skip_cursor = True cursor = self._text_edit.textCursor() cursor.movePosition(QtGui.QTextCursor.Left, QtGui.QTextCursor.KeepAnchor, n = len(self._prev_yank)) cursor.insertText(text) self._prev_yank = text
def patch_pyzmq(): """backport a few patches from newer pyzmq These can be removed as we bump our minimum pyzmq version """ import zmq # ioloop.install, introduced in pyzmq 2.1.7 from zmq.eventloop import ioloop def install(): import tornado.ioloop tornado.ioloop.IOLoop = ioloop.IOLoop if not hasattr(ioloop, 'install'): ioloop.install = install # fix missing DEALER/ROUTER aliases in pyzmq < 2.1.9 if not hasattr(zmq, 'DEALER'): zmq.DEALER = zmq.XREQ if not hasattr(zmq, 'ROUTER'): zmq.ROUTER = zmq.XREP # fallback on stdlib json if jsonlib is selected, because jsonlib breaks things. # jsonlib support is removed from pyzmq >= 2.2.0 from zmq.utils import jsonapi if jsonapi.jsonmod.__name__ == 'jsonlib': import json jsonapi.jsonmod = json
def version_from_schema(schema_el): """ returns: API version number <str> raises: <VersionNotFound> NOTE: relies on presence of comment tags in the XSD, which are currently present for both ebaySvc.xsd (TradingAPI) and ShoppingService.xsd (ShoppingAPI) """ vc_el = schema_el while True: vc_el = vc_el.getprevious() if vc_el is None: break if vc_el.tag is etree.Comment: match = VERSION_COMMENT.search(vc_el.text) if match: try: return match.group(1) except IndexError: pass raise VersionNotFound('Version comment not found preceeding schema node')
def _default_ns_prefix(nsmap): """ XML doc may have several prefix:namespace_url pairs, can also specify a namespace_url as default, tags in that namespace don't need a prefix NOTE: we rely on default namespace also present in prefixed form, I'm not sure if this is an XML certainty or a quirk of the eBay WSDLs in our case the WSDL contains: <wsdl:documentation> <Version>1.0.0</Version> </wsdl:documentation> ...but our query needs to give a prefix to the path of `Version` so we need to determine the default namespace of the doc, find the matching prefix and return it """ if None in nsmap: default_url = nsmap[None] prefix = None for key, val in nsmap.iteritems(): if val == default_url and key is not None: prefix = key break else: raise ValueError( "Default namespace {url} not found as a prefix".format( url=default_url ) ) return prefix raise ValueError("No default namespace found in map")
def version_from_wsdl(wsdl_tree): """ returns: API version number <str> raises: <VersionNotFound> NOTE: relies on presence of documentation node in the WSDLs: <wsdl:documentation> <Version>1.0.0</Version> </wsdl:documentation> """ prefix = _default_ns_prefix(wsdl_tree.nsmap) # XPath doesn't allow empty prefix: safe_map = wsdl_tree.nsmap.copy() try: del safe_map[None] except KeyError: pass try: # various eBay WSDLs are inconsistent - need case-insensitive matching version_el = wsdl_tree.xpath( 'wsdl:service/wsdl:documentation/' '*[self::{p}:version or self::{p}:Version]'.format(p=prefix), namespaces=safe_map )[0] except IndexError: raise VersionNotFound( 'Version not found in WSDL service documentation' ) else: return version_el.text
def parser_from_schema(schema_url, require_version=True): """ Returns an XSD-schema-enabled lxml parser from a WSDL or XSD `schema_url` can of course be local path via file:// url """ schema_tree = etree.parse(schema_url) def get_version(element, getter): try: return getter(element) except VersionNotFound: if require_version: raise else: return None root = schema_tree.getroot() if root.tag == '{%s}definitions' % namespaces.WSDL: # wsdl should contain an embedded schema schema_el = schema_tree.find('wsdl:types/xs:schema', namespaces=NS_MAP) version = get_version(root, version_from_wsdl) else: schema_el = root version = get_version(schema_el, version_from_schema) schema = etree.XMLSchema(schema_el) return objectify.makeparser(schema=schema), version
def askQuestion(question, required = True, answers = dict(), tries = 3): """Ask a question to STDOUT and return answer from STDIN Args: question: A string question that will be printed to stdout Kwargs: required: True indicates the question must be answered answers: A sequence of dicts with key value pairs where the key will be a user-selectable choice menu item with the value as its related description. tries: Integer value of maximum amount of attempts users may exercise to select a valid answer Returns: A user specified string when required is False and answers was not provided. A user selected key from answers when required is True and answers was provided. None if required was false and user did not enter a answer. None if required was True and user reached reached maximum limit of tries. """ print question if not answers: # we get the user's answer via a named utility because direct calls to # raw_input() are hard to test (this way, tests can provide their own # implementation of ISelectedChoice without calls to raw_input()) answer = getUtility(ISelectedChoice, 'sparc.common.cli_selected_choice').selection() _attempts = 0 while True: if tries and _attempts > tries: print (u"Too many attempts") return None if not required or answer: return answer if answer else None print (u"Invalid input, please try again.") answer = getUtility(ISelectedChoice, 'sparc.common.cli_selected_choice').selection() _attempts += 1 for selection_pair in answers: for key, potential_answer in selection_pair.iteritems(): print "(" + key + ") " + potential_answer break # only process the first dict entry for the sequence _attempts = 0 while True: answer = getUtility(ISelectedChoice, 'sparc.common.cli_selected_choice').selection() if tries and _attempts > tries: print _(u"Too many attempts") return None if not answer and not required: return None for selection_pair in answers: if answer in selection_pair: return answer print (u"Invalid selection: {}, please try again.".format(answer)) answer = getUtility(ISelectedChoice, 'sparc.common.cli_selected_choice').selection() _attempts += 1
def authenticate_unless_readonly(f, self, *args, **kwargs): """authenticate this page *unless* readonly view is active. In read-only mode, the notebook list and print view should be accessible without authentication. """ @web.authenticated def auth_f(self, *args, **kwargs): return f(self, *args, **kwargs) if self.application.read_only: return f(self, *args, **kwargs) else: return auth_f(self, *args, **kwargs)
def ws_url(self): """websocket url matching the current request turns http[s]://host[:port] into ws[s]://host[:port] """ proto = self.request.protocol.replace('http', 'ws') host = self.application.ipython_app.websocket_host # default to config value if host == '': host = self.request.host # get from request return "%s://%s" % (proto, host)
def _reserialize_reply(self, msg_list): """Reserialize a reply message using JSON. This takes the msg list from the ZMQ socket, unserializes it using self.session and then serializes the result using JSON. This method should be used by self._on_zmq_reply to build messages that can be sent back to the browser. """ idents, msg_list = self.session.feed_identities(msg_list) msg = self.session.unserialize(msg_list) try: msg['header'].pop('date') except KeyError: pass try: msg['parent_header'].pop('date') except KeyError: pass msg.pop('buffers') return jsonapi.dumps(msg, default=date_default)
def _inject_cookie_message(self, msg): """Inject the first message, which is the document cookie, for authentication.""" if isinstance(msg, unicode): # Cookie can't constructor doesn't accept unicode strings for some reason msg = msg.encode('utf8', 'replace') try: self.request._cookies = Cookie.SimpleCookie(msg) except: logging.warn("couldn't parse cookie string: %s",msg, exc_info=True)
def start_hb(self, callback): """Start the heartbeating and call the callback if the kernel dies.""" if not self._beating: self._kernel_alive = True def ping_or_dead(): self.hb_stream.flush() if self._kernel_alive: self._kernel_alive = False self.hb_stream.send(b'ping') # flush stream to force immediate socket send self.hb_stream.flush() else: try: callback() except: pass finally: self.stop_hb() def beat_received(msg): self._kernel_alive = True self.hb_stream.on_recv(beat_received) loop = ioloop.IOLoop.instance() self._hb_periodic_callback = ioloop.PeriodicCallback(ping_or_dead, self.time_to_dead*1000, loop) loop.add_timeout(time.time()+self.first_beat, self._really_start_hb) self._beating= True
def _really_start_hb(self): """callback for delayed heartbeat start Only start the hb loop if we haven't been closed during the wait. """ if self._beating and not self.hb_stream.closed(): self._hb_periodic_callback.start()
def stop_hb(self): """Stop the heartbeating and cancel all related callbacks.""" if self._beating: self._beating = False self._hb_periodic_callback.stop() if not self.hb_stream.closed(): self.hb_stream.on_recv(None)
def fload(self): """Load file object.""" # read data and parse into blocks if hasattr(self, 'fobj') and self.fobj is not None: self.fobj.close() if hasattr(self.src, "read"): # It seems to be a file or a file-like object self.fobj = self.src else: # Assume it's a string or something that can be converted to one self.fobj = open(self.fname)
def reload(self): """Reload source from disk and initialize state.""" self.fload() self.src = self.fobj.read() src_b = [b.strip() for b in self.re_stop.split(self.src) if b] self._silent = [bool(self.re_silent.findall(b)) for b in src_b] self._auto = [bool(self.re_auto.findall(b)) for b in src_b] # if auto_all is not given (def. None), we read it from the file if self.auto_all is None: self.auto_all = bool(self.re_auto_all.findall(src_b[0])) else: self.auto_all = bool(self.auto_all) # Clean the sources from all markup so it doesn't get displayed when # running the demo src_blocks = [] auto_strip = lambda s: self.re_auto.sub('',s) for i,b in enumerate(src_b): if self._auto[i]: src_blocks.append(auto_strip(b)) else: src_blocks.append(b) # remove the auto_all marker src_blocks[0] = self.re_auto_all.sub('',src_blocks[0]) self.nblocks = len(src_blocks) self.src_blocks = src_blocks # also build syntax-highlighted source self.src_blocks_colored = map(self.ip_colorize,self.src_blocks) # ensure clean namespace and seek offset self.reset()
def _get_index(self,index): """Get the current block index, validating and checking status. Returns None if the demo is finished""" if index is None: if self.finished: print >>io.stdout, 'Demo finished. Use <demo_name>.reset() if you want to rerun it.' return None index = self.block_index else: self._validate_index(index) return index
def seek(self,index): """Move the current seek pointer to the given block. You can use negative indices to seek from the end, with identical semantics to those of Python lists.""" if index<0: index = self.nblocks + index self._validate_index(index) self.block_index = index self.finished = False
def edit(self,index=None): """Edit a block. If no number is given, use the last block executed. This edits the in-memory copy of the demo, it does NOT modify the original source file. If you want to do that, simply open the file in an editor and use reload() when you make changes to the file. This method is meant to let you change a block during a demonstration for explanatory purposes, without damaging your original script.""" index = self._get_index(index) if index is None: return # decrease the index by one (unless we're at the very beginning), so # that the default demo.edit() call opens up the sblock we've last run if index>0: index -= 1 filename = self.shell.mktempfile(self.src_blocks[index]) self.shell.hooks.editor(filename,1) new_block = file_read(filename) # update the source and colored block self.src_blocks[index] = new_block self.src_blocks_colored[index] = self.ip_colorize(new_block) self.block_index = index # call to run with the newly edited index self()
def show(self,index=None): """Show a single block on screen""" index = self._get_index(index) if index is None: return print >>io.stdout, self.marquee('<%s> block # %s (%s remaining)' % (self.title,index,self.nblocks-index-1)) print >>io.stdout,(self.src_blocks_colored[index]) sys.stdout.flush()
def show_all(self): """Show entire demo on screen, block by block""" fname = self.title title = self.title nblocks = self.nblocks silent = self._silent marquee = self.marquee for index,block in enumerate(self.src_blocks_colored): if silent[index]: print >>io.stdout, marquee('<%s> SILENT block # %s (%s remaining)' % (title,index,nblocks-index-1)) else: print >>io.stdout, marquee('<%s> block # %s (%s remaining)' % (title,index,nblocks-index-1)) print >>io.stdout, block, sys.stdout.flush()
def reload(self): """Reload source from disk and initialize state.""" # read data and parse into blocks self.fload() lines = self.fobj.readlines() src_b = [l for l in lines if l.strip()] nblocks = len(src_b) self.src = ''.join(lines) self._silent = [False]*nblocks self._auto = [True]*nblocks self.auto_all = True self.nblocks = nblocks self.src_blocks = src_b # also build syntax-highlighted source self.src_blocks_colored = map(self.ip_colorize,self.src_blocks) # ensure clean namespace and seek offset self.reset()
def series(collection, method, prints = 15, *args, **kwargs): ''' Processes a collection in series Parameters ---------- collection : list list of Record objects method : method to call on each Record prints : int number of timer prints to the screen Returns ------- collection : list list of Record objects after going through method called If more than one collection is given, the function is called with an argument list consisting of the corresponding item of each collection, substituting None for missing values when not all collection have the same length. If the function is None, return the original collection (or a list of tuples if multiple collections). Example ------- adding 2 to every number in a range >>> import turntable >>> collection = range(100) >>> method = lambda x: x + 2 >>> collection = turntable.spin.series(collection, method) ''' if 'verbose' in kwargs.keys(): verbose = kwargs['verbose'] else: verbose = True results = [] timer = turntable.utils.Timer(nLoops=len(collection), numPrints=prints, verbose=verbose) for subject in collection: results.append(method(subject, *args, **kwargs)) timer.loop() timer.fin() return results
def batch(collection, method, processes=None, batch_size=None, quiet=False, kwargs_to_dump=None, args=None, **kwargs): '''Processes a collection in parallel batches, each batch processes in series on a single process. Running batches in parallel can be more effficient that splitting a list across cores as in spin.parallel because of parallel processing has high IO requirements. Parameters ---------- collection : list i.e. list of Record objects method : method to call on each Record processes : int number of processes to run on [defaults to number of cores on machine] batch_size : int lenght of each batch [defaults to number of elements / number of processes] Returns ------- collection : list list of Record objects after going through method called Example ------- adding 2 to every number in a range >>> import turntable >>> collection = range(100) >>> def jam(record): >>> return record + 2 >>> collection = turntable.spin.batch(collection, jam) Note ---- lambda functions do not work in parallel ''' if processes is None: # default to the number of processes, not exceeding 20 or the number of # subjects processes = min(mp.cpu_count(), 20, len(collection)) if batch_size is None: # floor divide rounds down to nearest int batch_size = max(len(collection) // processes, 1) print 'size of each batch =', batch_size mod = len(collection) % processes # batch_list is a list of cars broken in to batch size chunks batch_list = [collection[x:x + batch_size] for x in xrange(0, len(collection) - mod, batch_size)] # remainder handling if mod != 0: batch_list[len(batch_list) - 1] += collection[-mod:] print 'number of batches =', len(batch_list) # New args if args is None: args = method else: if isinstance(args, tuple) == False: args = (args,) args = (method,) + args # Applying the mp method w/ or w/o dumping using the custom operator # method if kwargs_to_dump is None: res = parallel( batch_list, new_function_batch, processes=processes, args=args, **kwargs) else: res = process_dump( batch_list, new_function_batch, kwargs_to_dump, processes=processes, args=args, **kwargs) returnList = [] for l in res: returnList += l # toc = time.time() # elapsed = toc-tic # if quiet is False: # if processes is None: # print "Total Elapsed time: %s :-)" % str(elapsed) # else: # print "Total Elapsed time: %s on %s processes :-)" % # (str(elapsed),str(processes)) return returnList
def thread(function, sequence, cores=None, runSeries=False, quiet=False): '''sets up the threadpool with map for parallel processing''' # Make the Pool of workes if cores is None: pool = ThreadPool() else: pool = ThreadPool(cores) # Operate on the list of subjects with the requested function # in the split threads tic = time.time() if runSeries is False: try: results = pool.map(function, sequence) # close the pool and wiat for teh work to finish pool.close() pool.join() except: print 'thread Failed... running in series :-(' results = series(sequence, function) else: results = series(sequence, function) toc = time.time() elapsed = toc - tic if quiet is False: if cores is None: print "Elapsed time: %s :-)\n" % str(elapsed) else: print "Elapsed time: %s on %s threads :-)\n" % (str(elapsed), str(cores)) # Noes: # import functools # abc = map(functools.partial(sb.dist, distName = 'weibull'), wbldfList) return results
def parallel(collection, method, processes=None, args=None, **kwargs): '''Processes a collection in parallel. Parameters ---------- collection : list i.e. list of Record objects method : method to call on each Record processes : int number of processes to run on [defaults to number of cores on machine] batch_size : int lenght of each batch [defaults to number of elements / number of processes] Returns ------- collection : list list of Record objects after going through method called Example ------- adding 2 to every number in a range >>> import turntable >>> collection = range(100) >>> def jam(record): >>> return record + 2 >>> collection = turntable.spin.parallel(collection, jam) Note ---- lambda functions do not work in parallel ''' if processes is None: # default to the number of cores, not exceeding 20 processes = min(mp.cpu_count(), 20) print "Running parallel process on " + str(processes) + " cores. :-)" pool = mp.Pool(processes=processes) PROC = [] tic = time.time() for main_arg in collection: if args is None: ARGS = (main_arg,) else: if isinstance(args, tuple) == False: args = (args,) ARGS = (main_arg,) + args PROC.append(pool.apply_async(method, args=ARGS, kwds=kwargs)) #RES = [p.get() for p in PROC] RES = [] for p in PROC: try: RES.append(p.get()) except Exception as e: print "shit happens..." print e RES.append(None) pool.close() pool.join() toc = time.time() elapsed = toc - tic print "Elapsed time: %s on %s processes :-)\n" % (str(elapsed), str(processes)) return RES
def install_mathjax(tag='v1.1', replace=False): """Download and install MathJax for offline use. This will install mathjax to the 'static' dir in the IPython notebook package, so it will fail if the caller does not have write access to that location. MathJax is a ~15MB download, and ~150MB installed. Parameters ---------- replace : bool [False] Whether to remove and replace an existing install. tag : str ['v1.1'] Which tag to download. Default is 'v1.1', the current stable release, but alternatives include 'v1.1a' and 'master'. """ mathjax_url = "https://github.com/mathjax/MathJax/tarball/%s"%tag nbdir = os.path.dirname(os.path.abspath(nbmod.__file__)) static = os.path.join(nbdir, 'static') dest = os.path.join(static, 'mathjax') # check for existence and permissions if not os.access(static, os.W_OK): raise IOError("Need have write access to %s"%static) if os.path.exists(dest): if replace: if not os.access(dest, os.W_OK): raise IOError("Need have write access to %s"%dest) print "removing previous MathJax install" shutil.rmtree(dest) else: print "offline MathJax apparently already installed" return # download mathjax print "Downloading mathjax source..." response = urllib2.urlopen(mathjax_url) print "done" # use 'r|gz' stream mode, because socket file-like objects can't seek: tar = tarfile.open(fileobj=response.fp, mode='r|gz') topdir = tar.firstmember.path print "Extracting to %s"%dest tar.extractall(static) # it will be mathjax-MathJax-<sha>, rename to just mathjax os.rename(os.path.join(static, topdir), dest)
def with_it(obj): ''' wrap `with obj` out of func. example: ``` py @with_it(Lock()) def func(): pass ``` ''' def _wrap(func): @functools.wraps(func) def wrapper(*args, **kwargs): with obj: return func(*args, **kwargs) return wrapper return _wrap
def with_objattr(name): ''' wrap `with getattr(self, name)` out of func. usage: ``` py class A: def __init__(self): self._lock = RLock() @with_objattr('_lock') # so easy to make a sync instance method ! def func(): pass ``` ''' def _wrap(func): @functools.wraps(func) def wrapper(self, *args, **kwargs): with getattr(self, name): return func(self, *args, **kwargs) return wrapper return _wrap
def with_objattrs(*names): ''' like `with_objattr` but enter context one by one. ''' def _wrap(func): @functools.wraps(func) def wrapper(self, *args, **kwargs): with contextlib.ExitStack() as stack: for name in names: stack.enter_context(getattr(self, name)) return func(self, *args, **kwargs) return wrapper return _wrap
def inspect_traceback(tb): """Inspect a traceback and its frame, returning source for the expression where the exception was raised, with simple variable replacement performed and the line on which the exception was raised marked with '>>' """ log.debug('inspect traceback %s', tb) # we only want the innermost frame, where the exception was raised while tb.tb_next: tb = tb.tb_next frame = tb.tb_frame lines, exc_line = tbsource(tb) # figure out the set of lines to grab. inspect_lines, mark_line = find_inspectable_lines(lines, exc_line) src = StringIO(textwrap.dedent(''.join(inspect_lines))) exp = Expander(frame.f_locals, frame.f_globals) while inspect_lines: try: for tok in tokenize.generate_tokens(src.readline): exp(*tok) except tokenize.TokenError, e: # this can happen if our inspectable region happens to butt up # against the end of a construct like a docstring with the closing # """ on separate line log.debug("Tokenizer error: %s", e) inspect_lines.pop(0) mark_line -= 1 src = StringIO(textwrap.dedent(''.join(inspect_lines))) exp = Expander(frame.f_locals, frame.f_globals) continue break padded = [] if exp.expanded_source: exp_lines = exp.expanded_source.split('\n') ep = 0 for line in exp_lines: if ep == mark_line: padded.append('>> ' + line) else: padded.append(' ' + line) ep += 1 return '\n'.join(padded)
def tbsource(tb, context=6): """Get source from a traceback object. A tuple of two things is returned: a list of lines of context from the source code, and the index of the current line within that list. The optional second argument specifies the number of lines of context to return, which are centered around the current line. .. Note :: This is adapted from inspect.py in the python 2.4 standard library, since a bug in the 2.3 version of inspect prevents it from correctly locating source lines in a traceback frame. """ lineno = tb.tb_lineno frame = tb.tb_frame if context > 0: start = lineno - 1 - context//2 log.debug("lineno: %s start: %s", lineno, start) try: lines, dummy = inspect.findsource(frame) except IOError: lines, index = [''], 0 else: all_lines = lines start = max(start, 1) start = max(0, min(start, len(lines) - context)) lines = lines[start:start+context] index = lineno - 1 - start # python 2.5 compat: if previous line ends in a continuation, # decrement start by 1 to match 2.4 behavior if sys.version_info >= (2, 5) and index > 0: while lines[index-1].strip().endswith('\\'): start -= 1 lines = all_lines[start:start+context] else: lines, index = [''], 0 log.debug("tbsource lines '''%s''' around index %s", lines, index) return (lines, index)
def find_inspectable_lines(lines, pos): """Find lines in home that are inspectable. Walk back from the err line up to 3 lines, but don't walk back over changes in indent level. Walk forward up to 3 lines, counting \ separated lines as 1. Don't walk over changes in indent level (unless part of an extended line) """ cnt = re.compile(r'\\[\s\n]*$') df = re.compile(r':[\s\n]*$') ind = re.compile(r'^(\s*)') toinspect = [] home = lines[pos] home_indent = ind.match(home).groups()[0] before = lines[max(pos-3, 0):pos] before.reverse() after = lines[pos+1:min(pos+4, len(lines))] for line in before: if ind.match(line).groups()[0] == home_indent: toinspect.append(line) else: break toinspect.reverse() toinspect.append(home) home_pos = len(toinspect)-1 continued = cnt.search(home) for line in after: if ((continued or ind.match(line).groups()[0] == home_indent) and not df.search(line)): toinspect.append(line) continued = cnt.search(line) else: break log.debug("Inspecting lines '''%s''' around %s", toinspect, home_pos) return toinspect, home_pos
def countdown(name, date, description='', id='', granularity='sec', start=None, progressbar=False, progressbar_inversed=False, showpct=False): ''' Create a countdown. ''' end_date = dateparse.parse_datetime(date) end = dateformat.format(end_date, 'U') content = '<div class="name">' + name + '</div>' content += '<div class="description">' + description + '</div>' if progressbar: if not end: raise Exception('For progressbar, start date is requried.') parsed_date = datetime.datetime.combine( dateparse.parse_date(start), datetime.time()) start_date = dateparse.parse_datetime(start) or parsed_date now = datetime.datetime.now() pct = (now - start_date).total_seconds() /\ (end_date - start_date).total_seconds() pct = int(pct * 100) if progressbar_inversed: pct = 100 - pct # Note: the output is for bootstrap! bar = '<div class="progress progress-striped active">' bar += '<div class="progress-bar" role="progressbar" aria-valuenow="{pct}" aria-valuemin="0" aria-valuemax="100" style="width: {pct}%">' bar += '<span class="sr-only">{pct}% Complete</span>' bar += '</div>' bar += '</div>' if showpct: bar += '<div class="percentage">{pct}%</div>' bar = bar.format(pct=pct) content += bar content += '<div class="counter"></div>' attr = { 'class': 'countdownbox', 'data-datetime': end, 'data-granularity': granularity } if id: attr['id'] = id return html.tag('div', content, attr)
def cleanup(controller, engines): """Cleanup routine to shut down all subprocesses we opened.""" import signal, time print('Starting cleanup') print('Stopping engines...') for e in engines: e.send_signal(signal.SIGINT) print('Stopping controller...') # so it can shut down its queues controller.send_signal(signal.SIGINT) time.sleep(0.1) print('Killing controller...') controller.kill() print('Cleanup done')
def pre_call(self, ctxt, pre_mod, post_mod, action): """ A modifier hook function. This is called in priority order prior to invoking the ``Action`` for the step. This allows a modifier to alter the context, or to take over subsequent action invocation. :param ctxt: The context object. :param pre_mod: A list of the modifiers preceding this modifier in the list of modifiers that is applicable to the action. This list is in priority order. :param post_mod: A list of the modifiers following this modifier in the list of modifiers that is applicable to the action. This list is in priority order. :param action: The action that will be performed. :returns: A ``None`` return value indicates that the modifier is taking no action. A non-``None`` return value should consist of a ``StepResult`` object; this will suspend further ``pre_call()`` processing and proceed to the ``post_call()`` processing. This implementation returns a ``StepResult`` with state ``SKIPPED`` if the condition does not evaluate to ``True``. """ # Check the condition if not self.condition(ctxt): return steps.StepResult(state=steps.SKIPPED) return None
def post_call(self, ctxt, result, action, post_mod, pre_mod): """ A modifier hook function. This is called in reverse-priority order after invoking the ``Action`` for the step. This allows a modifier to inspect or alter the result of the step. :param ctxt: The context object. :param result: The result of the action. This will be a ``StepResult`` object. :param action: The action that was performed. :param post_mod: A list of modifiers following this modifier in the list of modifiers that is applicable to the action. This list is in priority order. :param pre_mod: A list of modifiers preceding this modifier in the list of modifiers that is applicable to the action. This list is in priority order. :returns: The result for the action, optionally modified. If the result is not modified, ``result`` must be returned unchanged. This implementation alters the ``ignore`` property of the ``result`` object to match the configured value. """ # Set the ignore state result.ignore = self.config return result
def save_ids(f, self, *args, **kwargs): """Keep our history and outstanding attributes up to date after a method call.""" n_previous = len(self.client.history) try: ret = f(self, *args, **kwargs) finally: nmsgs = len(self.client.history) - n_previous msg_ids = self.client.history[-nmsgs:] self.history.extend(msg_ids) map(self.outstanding.add, msg_ids) return ret
def sync_results(f, self, *args, **kwargs): """sync relevant results from self.client to our results attribute.""" ret = f(self, *args, **kwargs) delta = self.outstanding.difference(self.client.outstanding) completed = self.outstanding.intersection(delta) self.outstanding = self.outstanding.difference(completed) return ret
def spin_after(f, self, *args, **kwargs): """call spin after the method.""" ret = f(self, *args, **kwargs) self.spin() return ret
def add_record(self, msg_id, rec): """Add a new Task Record, by msg_id.""" # print rec rec = self._binary_buffers(rec) self._records.insert(rec)
def get_record(self, msg_id): """Get a specific Task Record, by msg_id.""" r = self._records.find_one({'msg_id': msg_id}) if not r: # r will be '' if nothing is found raise KeyError(msg_id) return r
def update_record(self, msg_id, rec): """Update the data in an existing record.""" rec = self._binary_buffers(rec) self._records.update({'msg_id':msg_id}, {'$set': rec})
def find_records(self, check, keys=None): """Find records matching a query dict, optionally extracting subset of keys. Returns list of matching records. Parameters ---------- check: dict mongodb-style query argument keys: list of strs [optional] if specified, the subset of keys to extract. msg_id will *always* be included. """ if keys and 'msg_id' not in keys: keys.append('msg_id') matches = list(self._records.find(check,keys)) for rec in matches: rec.pop('_id') return matches
def get_history(self): """get all msg_ids, ordered by time submitted.""" cursor = self._records.find({},{'msg_id':1}).sort('submitted') return [ rec['msg_id'] for rec in cursor ]
def get_msgs(self): """Get all messages that are currently ready.""" msgs = [] while True: try: msgs.append(self.get_msg(block=False)) except Empty: break return msgs
def get_msg(self, block=True, timeout=None): "Gets a message if there is one that is ready." return self._in_queue.get(block, timeout)
def prop(func=None, *, field = _UNSET, get: bool = True, set: bool = True, del_: bool = False, default = _UNSET, types: tuple = _UNSET): ''' `prop` is a sugar for `property`. ``` py @prop def value(self): pass # equals: @property def value(self): return self._value @value.setter def value(self, val): self._value = val ``` ''' def wrap(func): if not callable(func): raise TypeError prop_name = func.__name__ key = field if key is _UNSET: key = '_' + prop_name fget, fset, fdel = None, None, None if get: def fget(self): try: return self.__dict__[key] except KeyError: if default is not _UNSET: return default raise AttributeError(f"'{type(self).__name__}' object has no attribute '{key}'") if set: def fset(self, val): if types is not _UNSET and not isinstance(val, types): if isinstance(types, tuple): types_name = tuple(x.__name__ for x in types) else: types_name = types.__name__ raise TypeError(f'type of {type(self).__name__}.{prop_name} must be {types_name}; ' f'got {type(val).__name__} instead') self.__dict__[key] = val if del_: def fdel(self): del self.__dict__[key] return property(fget, fset, fdel, func.__doc__) return wrap(func) if func else wrap
def get_onlys(*fields): ''' `get_onlys` is a sugar for multi-`property`. ``` py name, age = get_onlys('_name', '_age') # equals: @property def name(self): return getattr(self, '_name') @property def age(self): return getattr(self, '_age') ``` ''' return tuple(property(lambda self, f=f: getattr(self, f)) for f in fields)
def parse(url): """Parses a database URL.""" config = {} if not isinstance(url, six.string_types): url = '' url = urlparse.urlparse(url) # Remove query strings. path = url.path[1:] path = path.split('?', 2)[0] # Update with environment configuration. config.update({ 'NAME': path, 'USER': url.username, 'PASSWORD': url.password, 'HOST': url.hostname, 'PORT': url.port, }) if url.scheme in SCHEMES: config['ENGINE'] = SCHEMES[url.scheme] return config
def module_list(path): """ Return the list containing the names of the modules available in the given folder. """ # sys.path has the cwd as an empty string, but isdir/listdir need it as '.' if path == '': path = '.' if os.path.isdir(path): folder_list = os.listdir(path) elif path.endswith('.egg'): try: folder_list = [f for f in zipimporter(path)._files] except: folder_list = [] else: folder_list = [] if not folder_list: return [] # A few local constants to be used in loops below isfile = os.path.isfile pjoin = os.path.join basename = os.path.basename def is_importable_file(path): """Returns True if the provided path is a valid importable module""" name, extension = os.path.splitext( path ) return import_re.match(path) and py3compat.isidentifier(name) # Now find actual path matches for packages or modules folder_list = [p for p in folder_list if isfile(pjoin(path, p,'__init__.py')) or is_importable_file(p) ] return [basename(p).split('.')[0] for p in folder_list]
def get_root_modules(): """ Returns a list containing the names of all the modules available in the folders of the pythonpath. """ ip = get_ipython() if 'rootmodules' in ip.db: return ip.db['rootmodules'] t = time() store = False modules = list(sys.builtin_module_names) for path in sys.path: modules += module_list(path) if time() - t >= TIMEOUT_STORAGE and not store: store = True print("\nCaching the list of root modules, please wait!") print("(This will only be done once - type '%rehashx' to " "reset cache!)\n") sys.stdout.flush() if time() - t > TIMEOUT_GIVEUP: print("This is taking too long, we give up.\n") ip.db['rootmodules'] = [] return [] modules = set(modules) if '__init__' in modules: modules.remove('__init__') modules = list(modules) if store: ip.db['rootmodules'] = modules return modules
def quick_completer(cmd, completions): """ Easily create a trivial completer for a command. Takes either a list of completions, or all completions in string (that will be split on whitespace). Example:: [d:\ipython]|1> import ipy_completers [d:\ipython]|2> ipy_completers.quick_completer('foo', ['bar','baz']) [d:\ipython]|3> foo b<TAB> bar baz [d:\ipython]|3> foo ba """ if isinstance(completions, basestring): completions = completions.split() def do_complete(self, event): return completions get_ipython().set_hook('complete_command',do_complete, str_key = cmd)
def module_completion(line): """ Returns a list containing the completion possibilities for an import line. The line looks like this : 'import xml.d' 'from xml.dom import' """ words = line.split(' ') nwords = len(words) # from whatever <tab> -> 'import ' if nwords == 3 and words[0] == 'from': return ['import '] # 'from xy<tab>' or 'import xy<tab>' if nwords < 3 and (words[0] in ['import','from']) : if nwords == 1: return get_root_modules() mod = words[1].split('.') if len(mod) < 2: return get_root_modules() completion_list = try_import('.'.join(mod[:-1]), True) return ['.'.join(mod[:-1] + [el]) for el in completion_list] # 'from xyz import abc<tab>' if nwords >= 3 and words[0] == 'from': mod = words[1] return try_import(mod)
def magic_run_completer(self, event): """Complete files that end in .py or .ipy for the %run command. """ comps = arg_split(event.line, strict=False) relpath = (len(comps) > 1 and comps[-1] or '').strip("'\"") #print("\nev=", event) # dbg #print("rp=", relpath) # dbg #print('comps=', comps) # dbg lglob = glob.glob isdir = os.path.isdir relpath, tilde_expand, tilde_val = expand_user(relpath) dirs = [f.replace('\\','/') + "/" for f in lglob(relpath+'*') if isdir(f)] # Find if the user has already typed the first filename, after which we # should complete on all files, since after the first one other files may # be arguments to the input script. if filter(magic_run_re.match, comps): pys = [f.replace('\\','/') for f in lglob('*')] else: pys = [f.replace('\\','/') for f in lglob(relpath+'*.py') + lglob(relpath+'*.ipy') + lglob(relpath + '*.pyw')] #print('run comp:', dirs+pys) # dbg return [compress_user(p, tilde_expand, tilde_val) for p in dirs+pys]
def cd_completer(self, event): """Completer function for cd, which only returns directories.""" ip = get_ipython() relpath = event.symbol #print(event) # dbg if event.line.endswith('-b') or ' -b ' in event.line: # return only bookmark completions bkms = self.db.get('bookmarks', None) if bkms: return bkms.keys() else: return [] if event.symbol == '-': width_dh = str(len(str(len(ip.user_ns['_dh']) + 1))) # jump in directory history by number fmt = '-%0' + width_dh +'d [%s]' ents = [ fmt % (i,s) for i,s in enumerate(ip.user_ns['_dh'])] if len(ents) > 1: return ents return [] if event.symbol.startswith('--'): return ["--" + os.path.basename(d) for d in ip.user_ns['_dh']] # Expand ~ in path and normalize directory separators. relpath, tilde_expand, tilde_val = expand_user(relpath) relpath = relpath.replace('\\','/') found = [] for d in [f.replace('\\','/') + '/' for f in glob.glob(relpath+'*') if os.path.isdir(f)]: if ' ' in d: # we don't want to deal with any of that, complex code # for this is elsewhere raise TryNext found.append(d) if not found: if os.path.isdir(relpath): return [compress_user(relpath, tilde_expand, tilde_val)] # if no completions so far, try bookmarks bks = self.db.get('bookmarks',{}).iterkeys() bkmatches = [s for s in bks if s.startswith(event.symbol)] if bkmatches: return bkmatches raise TryNext return [compress_user(p, tilde_expand, tilde_val) for p in found]
def interact(self): """This should call display(Javascript(jscode)).""" jscode = self.render() display(Javascript(data=jscode,lib=self.jslibs))
def _quoteattr(self, attr): """Escape an XML attribute. Value can be unicode.""" attr = xml_safe(attr) if isinstance(attr, unicode) and not UNICODE_STRINGS: attr = attr.encode(self.encoding) return saxutils.quoteattr(attr)
def configure(self, options, config): """Configures the xunit plugin.""" Plugin.configure(self, options, config) self.config = config if self.enabled: self.stats = {'errors': 0, 'failures': 0, 'passes': 0, 'skipped': 0 } self.errorlist = [] self.error_report_file = codecs.open(options.xunit_file, 'w', self.encoding, 'replace')
def report(self, stream): """Writes an Xunit-formatted XML file The file includes a report of test errors and failures. """ self.stats['encoding'] = self.encoding self.stats['total'] = (self.stats['errors'] + self.stats['failures'] + self.stats['passes'] + self.stats['skipped']) self.error_report_file.write( u'<?xml version="1.0" encoding="%(encoding)s"?>' u'<testsuite name="nosetests" tests="%(total)d" ' u'errors="%(errors)d" failures="%(failures)d" ' u'skip="%(skipped)d">' % self.stats) self.error_report_file.write(u''.join([self._forceUnicode(e) for e in self.errorlist])) self.error_report_file.write(u'</testsuite>') self.error_report_file.close() if self.config.verbosity > 1: stream.writeln("-" * 70) stream.writeln("XML: %s" % self.error_report_file.name)