Search is not available for this dataset
text
stringlengths
75
104k
def render(self, context=None): """Render this template by applying it to `context`. `context` is a dictionary of values to use in this rendering. """ # Make the complete context we'll use. ctx = dict(self.context) if context: ctx.update(context) return self.render_function(ctx, self.do_dots)
def do_dots(self, value, *dots): """Evaluate dotted expressions at runtime.""" for dot in dots: try: value = getattr(value, dot) except AttributeError: value = value[dot] if hasattr(value, '__call__'): value = value() return value
def render_template(tpl, context): ''' A shortcut function to render a partial template with context and return the output. ''' templates = [tpl] if type(tpl) != list else tpl tpl_instance = None for tpl in templates: try: tpl_instance = template.loader.get_template(tpl) break except template.TemplateDoesNotExist: pass if not tpl_instance: raise Exception('Template does not exist: ' + templates[-1]) return tpl_instance.render(template.Context(context))
def format_display_data(obj, include=None, exclude=None): """Return a format data dict for an object. By default all format types will be computed. The following MIME types are currently implemented: * text/plain * text/html * text/latex * application/json * application/javascript * image/png * image/jpeg * image/svg+xml Parameters ---------- obj : object The Python object whose format data will be computed. Returns ------- format_dict : dict A dictionary of key/value pairs, one or each format that was generated for the object. The keys are the format types, which will usually be MIME type strings and the values and JSON'able data structure containing the raw data for the representation in that format. include : list or tuple, optional A list of format type strings (MIME types) to include in the format data dict. If this is set *only* the format types included in this list will be computed. exclude : list or tuple, optional A list of format type string (MIME types) to exclue in the format data dict. If this is set all format types will be computed, except for those included in this argument. """ from IPython.core.interactiveshell import InteractiveShell InteractiveShell.instance().display_formatter.format( obj, include, exclude )
def _formatters_default(self): """Activate the default formatters.""" formatter_classes = [ PlainTextFormatter, HTMLFormatter, SVGFormatter, PNGFormatter, JPEGFormatter, LatexFormatter, JSONFormatter, JavascriptFormatter ] d = {} for cls in formatter_classes: f = cls(config=self.config) d[f.format_type] = f return d
def format(self, obj, include=None, exclude=None): """Return a format data dict for an object. By default all format types will be computed. The following MIME types are currently implemented: * text/plain * text/html * text/latex * application/json * application/javascript * image/png * image/jpeg * image/svg+xml Parameters ---------- obj : object The Python object whose format data will be computed. include : list or tuple, optional A list of format type strings (MIME types) to include in the format data dict. If this is set *only* the format types included in this list will be computed. exclude : list or tuple, optional A list of format type string (MIME types) to exclue in the format data dict. If this is set all format types will be computed, except for those included in this argument. Returns ------- format_dict : dict A dictionary of key/value pairs, one or each format that was generated for the object. The keys are the format types, which will usually be MIME type strings and the values and JSON'able data structure containing the raw data for the representation in that format. """ format_dict = {} # If plain text only is active if self.plain_text_only: formatter = self.formatters['text/plain'] try: data = formatter(obj) except: # FIXME: log the exception raise if data is not None: format_dict['text/plain'] = data return format_dict for format_type, formatter in self.formatters.items(): if include is not None: if format_type not in include: continue if exclude is not None: if format_type in exclude: continue try: data = formatter(obj) except: # FIXME: log the exception raise if data is not None: format_dict[format_type] = data return format_dict
def for_type(self, typ, func): """Add a format function for a given type. Parameters ----------- typ : class The class of the object that will be formatted using `func`. func : callable The callable that will be called to compute the format data. The call signature of this function is simple, it must take the object to be formatted and return the raw data for the given format. Subclasses may use a different call signature for the `func` argument. """ oldfunc = self.type_printers.get(typ, None) if func is not None: # To support easy restoration of old printers, we need to ignore # Nones. self.type_printers[typ] = func return oldfunc
def for_type_by_name(self, type_module, type_name, func): """Add a format function for a type specified by the full dotted module and name of the type, rather than the type of the object. Parameters ---------- type_module : str The full dotted name of the module the type is defined in, like ``numpy``. type_name : str The name of the type (the class name), like ``dtype`` func : callable The callable that will be called to compute the format data. The call signature of this function is simple, it must take the object to be formatted and return the raw data for the given format. Subclasses may use a different call signature for the `func` argument. """ key = (type_module, type_name) oldfunc = self.deferred_printers.get(key, None) if func is not None: # To support easy restoration of old printers, we need to ignore # Nones. self.deferred_printers[key] = func return oldfunc
def _in_deferred_types(self, cls): """ Check if the given class is specified in the deferred type registry. Returns the printer from the registry if it exists, and None if the class is not in the registry. Successful matches will be moved to the regular type registry for future use. """ mod = getattr(cls, '__module__', None) name = getattr(cls, '__name__', None) key = (mod, name) printer = None if key in self.deferred_printers: # Move the printer over to the regular registry. printer = self.deferred_printers.pop(key) self.type_printers[cls] = printer return printer
def _float_precision_changed(self, name, old, new): """float_precision changed, set float_format accordingly. float_precision can be set by int or str. This will set float_format, after interpreting input. If numpy has been imported, numpy print precision will also be set. integer `n` sets format to '%.nf', otherwise, format set directly. An empty string returns to defaults (repr for float, 8 for numpy). This parameter can be set via the '%precision' magic. """ if '%' in new: # got explicit format string fmt = new try: fmt%3.14159 except Exception: raise ValueError("Precision must be int or format string, not %r"%new) elif new: # otherwise, should be an int try: i = int(new) assert i >= 0 except ValueError: raise ValueError("Precision must be int or format string, not %r"%new) except AssertionError: raise ValueError("int precision must be non-negative, not %r"%i) fmt = '%%.%if'%i if 'numpy' in sys.modules: # set numpy precision if it has been imported import numpy numpy.set_printoptions(precision=i) else: # default back to repr fmt = '%r' if 'numpy' in sys.modules: import numpy # numpy default is 8 numpy.set_printoptions(precision=8) self.float_format = fmt
def user_config_files(): """Return path to any existing user config files """ return filter(os.path.exists, map(os.path.expanduser, config_files))
def flag(val): """Does the value look like an on/off flag?""" if val == 1: return True elif val == 0: return False val = str(val) if len(val) > 5: return False return val.upper() in ('1', '0', 'F', 'T', 'TRUE', 'FALSE', 'ON', 'OFF')
def configure(self, argv=None, doc=None): """Configure the nose running environment. Execute configure before collecting tests with nose.TestCollector to enable output capture and other features. """ env = self.env if argv is None: argv = sys.argv cfg_files = getattr(self, 'files', []) options, args = self._parseArgs(argv, cfg_files) # If -c --config has been specified on command line, # load those config files and reparse if getattr(options, 'files', []): options, args = self._parseArgs(argv, options.files) self.options = options if args: self.testNames = args if options.testNames is not None: self.testNames.extend(tolist(options.testNames)) if options.py3where is not None: if sys.version_info >= (3,): options.where = options.py3where # `where` is an append action, so it can't have a default value # in the parser, or that default will always be in the list if not options.where: options.where = env.get('NOSE_WHERE', None) # include and exclude also if not options.ignoreFiles: options.ignoreFiles = env.get('NOSE_IGNORE_FILES', []) if not options.include: options.include = env.get('NOSE_INCLUDE', []) if not options.exclude: options.exclude = env.get('NOSE_EXCLUDE', []) self.addPaths = options.addPaths self.stopOnError = options.stopOnError self.verbosity = options.verbosity self.includeExe = options.includeExe self.traverseNamespace = options.traverseNamespace self.debug = options.debug self.debugLog = options.debugLog self.loggingConfig = options.loggingConfig self.firstPackageWins = options.firstPackageWins self.configureLogging() if options.where is not None: self.configureWhere(options.where) if options.testMatch: self.testMatch = re.compile(options.testMatch) if options.ignoreFiles: self.ignoreFiles = map(re.compile, tolist(options.ignoreFiles)) log.info("Ignoring files matching %s", options.ignoreFiles) else: log.info("Ignoring files matching %s", self.ignoreFilesDefaultStrings) if options.include: self.include = map(re.compile, tolist(options.include)) log.info("Including tests matching %s", options.include) if options.exclude: self.exclude = map(re.compile, tolist(options.exclude)) log.info("Excluding tests matching %s", options.exclude) # When listing plugins we don't want to run them if not options.showPlugins: self.plugins.configure(options, self) self.plugins.begin()
def configureLogging(self): """Configure logging for nose, or optionally other packages. Any logger name may be set with the debug option, and that logger will be set to debug level and be assigned the same handler as the nose loggers, unless it already has a handler. """ if self.loggingConfig: from logging.config import fileConfig fileConfig(self.loggingConfig) return format = logging.Formatter('%(name)s: %(levelname)s: %(message)s') if self.debugLog: handler = logging.FileHandler(self.debugLog) else: handler = logging.StreamHandler(self.logStream) handler.setFormatter(format) logger = logging.getLogger('nose') logger.propagate = 0 # only add our default handler if there isn't already one there # this avoids annoying duplicate log messages. if handler not in logger.handlers: logger.addHandler(handler) # default level lvl = logging.WARNING if self.verbosity >= 5: lvl = 0 elif self.verbosity >= 4: lvl = logging.DEBUG elif self.verbosity >= 3: lvl = logging.INFO logger.setLevel(lvl) # individual overrides if self.debug: # no blanks debug_loggers = [ name for name in self.debug.split(',') if name ] for logger_name in debug_loggers: l = logging.getLogger(logger_name) l.setLevel(logging.DEBUG) if not l.handlers and not logger_name.startswith('nose'): l.addHandler(handler)
def configureWhere(self, where): """Configure the working directory or directories for the test run. """ from nose.importer import add_path self.workingDir = None where = tolist(where) warned = False for path in where: if not self.workingDir: abs_path = absdir(path) if abs_path is None: raise ValueError("Working directory %s not found, or " "not a directory" % path) log.info("Set working dir to %s", abs_path) self.workingDir = abs_path if self.addPaths and \ os.path.exists(os.path.join(abs_path, '__init__.py')): log.info("Working directory %s is a package; " "adding to sys.path" % abs_path) add_path(abs_path) continue if not warned: warn("Use of multiple -w arguments is deprecated and " "support may be removed in a future release. You can " "get the same behavior by passing directories without " "the -w argument on the command line, or by using the " "--tests argument in a configuration file.", DeprecationWarning) self.testNames.append(path)
def getParser(self, doc=None): """Get the command line option parser. """ if self.parser: return self.parser env = self.env parser = self.parserClass(doc) parser.add_option( "-V","--version", action="store_true", dest="version", default=False, help="Output nose version and exit") parser.add_option( "-p", "--plugins", action="store_true", dest="showPlugins", default=False, help="Output list of available plugins and exit. Combine with " "higher verbosity for greater detail") parser.add_option( "-v", "--verbose", action="count", dest="verbosity", default=self.verbosity, help="Be more verbose. [NOSE_VERBOSE]") parser.add_option( "--verbosity", action="store", dest="verbosity", metavar='VERBOSITY', type="int", help="Set verbosity; --verbosity=2 is " "the same as -v") parser.add_option( "-q", "--quiet", action="store_const", const=0, dest="verbosity", help="Be less verbose") parser.add_option( "-c", "--config", action="append", dest="files", metavar="FILES", help="Load configuration from config file(s). May be specified " "multiple times; in that case, all config files will be " "loaded and combined") parser.add_option( "-w", "--where", action="append", dest="where", metavar="WHERE", help="Look for tests in this directory. " "May be specified multiple times. The first directory passed " "will be used as the working directory, in place of the current " "working directory, which is the default. Others will be added " "to the list of tests to execute. [NOSE_WHERE]" ) parser.add_option( "--py3where", action="append", dest="py3where", metavar="PY3WHERE", help="Look for tests in this directory under Python 3.x. " "Functions the same as 'where', but only applies if running under " "Python 3.x or above. Note that, if present under 3.x, this " "option completely replaces any directories specified with " "'where', so the 'where' option becomes ineffective. " "[NOSE_PY3WHERE]" ) parser.add_option( "-m", "--match", "--testmatch", action="store", dest="testMatch", metavar="REGEX", help="Files, directories, function names, and class names " "that match this regular expression are considered tests. " "Default: %s [NOSE_TESTMATCH]" % self.testMatchPat, default=self.testMatchPat) parser.add_option( "--tests", action="store", dest="testNames", default=None, metavar='NAMES', help="Run these tests (comma-separated list). This argument is " "useful mainly from configuration files; on the command line, " "just pass the tests to run as additional arguments with no " "switch.") parser.add_option( "-l", "--debug", action="store", dest="debug", default=self.debug, help="Activate debug logging for one or more systems. " "Available debug loggers: nose, nose.importer, " "nose.inspector, nose.plugins, nose.result and " "nose.selector. Separate multiple names with a comma.") parser.add_option( "--debug-log", dest="debugLog", action="store", default=self.debugLog, metavar="FILE", help="Log debug messages to this file " "(default: sys.stderr)") parser.add_option( "--logging-config", "--log-config", dest="loggingConfig", action="store", default=self.loggingConfig, metavar="FILE", help="Load logging config from this file -- bypasses all other" " logging config settings.") parser.add_option( "-I", "--ignore-files", action="append", dest="ignoreFiles", metavar="REGEX", help="Completely ignore any file that matches this regular " "expression. Takes precedence over any other settings or " "plugins. " "Specifying this option will replace the default setting. " "Specify this option multiple times " "to add more regular expressions [NOSE_IGNORE_FILES]") parser.add_option( "-e", "--exclude", action="append", dest="exclude", metavar="REGEX", help="Don't run tests that match regular " "expression [NOSE_EXCLUDE]") parser.add_option( "-i", "--include", action="append", dest="include", metavar="REGEX", help="This regular expression will be applied to files, " "directories, function names, and class names for a chance " "to include additional tests that do not match TESTMATCH. " "Specify this option multiple times " "to add more regular expressions [NOSE_INCLUDE]") parser.add_option( "-x", "--stop", action="store_true", dest="stopOnError", default=self.stopOnError, help="Stop running tests after the first error or failure") parser.add_option( "-P", "--no-path-adjustment", action="store_false", dest="addPaths", default=self.addPaths, help="Don't make any changes to sys.path when " "loading tests [NOSE_NOPATH]") parser.add_option( "--exe", action="store_true", dest="includeExe", default=self.includeExe, help="Look for tests in python modules that are " "executable. Normal behavior is to exclude executable " "modules, since they may not be import-safe " "[NOSE_INCLUDE_EXE]") parser.add_option( "--noexe", action="store_false", dest="includeExe", help="DO NOT look for tests in python modules that are " "executable. (The default on the windows platform is to " "do so.)") parser.add_option( "--traverse-namespace", action="store_true", default=self.traverseNamespace, dest="traverseNamespace", help="Traverse through all path entries of a namespace package") parser.add_option( "--first-package-wins", "--first-pkg-wins", "--1st-pkg-wins", action="store_true", default=False, dest="firstPackageWins", help="nose's importer will normally evict a package from sys." "modules if it sees a package with the same name in a different " "location. Set this option to disable that behavior.") self.plugins.loadPlugins() self.pluginOpts(parser) self.parser = parser return parser
def page_dumb(strng, start=0, screen_lines=25): """Very dumb 'pager' in Python, for when nothing else works. Only moves forward, same interface as page(), except for pager_cmd and mode.""" out_ln = strng.splitlines()[start:] screens = chop(out_ln,screen_lines-1) if len(screens) == 1: print >>io.stdout, os.linesep.join(screens[0]) else: last_escape = "" for scr in screens[0:-1]: hunk = os.linesep.join(scr) print >>io.stdout, last_escape + hunk if not page_more(): return esc_list = esc_re.findall(hunk) if len(esc_list) > 0: last_escape = esc_list[-1] print >>io.stdout, last_escape + os.linesep.join(screens[-1])
def _detect_screen_size(use_curses, screen_lines_def): """Attempt to work out the number of lines on the screen. This is called by page(). It can raise an error (e.g. when run in the test suite), so it's separated out so it can easily be called in a try block. """ TERM = os.environ.get('TERM',None) if (TERM=='xterm' or TERM=='xterm-color') and sys.platform != 'sunos5': local_use_curses = use_curses else: # curses causes problems on many terminals other than xterm, and # some termios calls lock up on Sun OS5. local_use_curses = False if local_use_curses: import termios import curses # There is a bug in curses, where *sometimes* it fails to properly # initialize, and then after the endwin() call is made, the # terminal is left in an unusable state. Rather than trying to # check everytime for this (by requesting and comparing termios # flags each time), we just save the initial terminal state and # unconditionally reset it every time. It's cheaper than making # the checks. term_flags = termios.tcgetattr(sys.stdout) # Curses modifies the stdout buffer size by default, which messes # up Python's normal stdout buffering. This would manifest itself # to IPython users as delayed printing on stdout after having used # the pager. # # We can prevent this by manually setting the NCURSES_NO_SETBUF # environment variable. For more details, see: # http://bugs.python.org/issue10144 NCURSES_NO_SETBUF = os.environ.get('NCURSES_NO_SETBUF', None) os.environ['NCURSES_NO_SETBUF'] = '' # Proceed with curses initialization scr = curses.initscr() screen_lines_real,screen_cols = scr.getmaxyx() curses.endwin() # Restore environment if NCURSES_NO_SETBUF is None: del os.environ['NCURSES_NO_SETBUF'] else: os.environ['NCURSES_NO_SETBUF'] = NCURSES_NO_SETBUF # Restore terminal state in case endwin() didn't. termios.tcsetattr(sys.stdout,termios.TCSANOW,term_flags) # Now we have what we needed: the screen size in rows/columns return screen_lines_real #print '***Screen size:',screen_lines_real,'lines x',\ #screen_cols,'columns.' # dbg else: return screen_lines_def
def page(strng, start=0, screen_lines=0, pager_cmd=None): """Print a string, piping through a pager after a certain length. The screen_lines parameter specifies the number of *usable* lines of your terminal screen (total lines minus lines you need to reserve to show other information). If you set screen_lines to a number <=0, page() will try to auto-determine your screen size and will only use up to (screen_size+screen_lines) for printing, paging after that. That is, if you want auto-detection but need to reserve the bottom 3 lines of the screen, use screen_lines = -3, and for auto-detection without any lines reserved simply use screen_lines = 0. If a string won't fit in the allowed lines, it is sent through the specified pager command. If none given, look for PAGER in the environment, and ultimately default to less. If no system pager works, the string is sent through a 'dumb pager' written in python, very simplistic. """ # Some routines may auto-compute start offsets incorrectly and pass a # negative value. Offset to 0 for robustness. start = max(0, start) # first, try the hook ip = ipapi.get() if ip: try: ip.hooks.show_in_pager(strng) return except TryNext: pass # Ugly kludge, but calling curses.initscr() flat out crashes in emacs TERM = os.environ.get('TERM','dumb') if TERM in ['dumb','emacs'] and os.name != 'nt': print strng return # chop off the topmost part of the string we don't want to see str_lines = strng.splitlines()[start:] str_toprint = os.linesep.join(str_lines) num_newlines = len(str_lines) len_str = len(str_toprint) # Dumb heuristics to guesstimate number of on-screen lines the string # takes. Very basic, but good enough for docstrings in reasonable # terminals. If someone later feels like refining it, it's not hard. numlines = max(num_newlines,int(len_str/80)+1) screen_lines_def = get_terminal_size()[1] # auto-determine screen size if screen_lines <= 0: try: screen_lines += _detect_screen_size(use_curses, screen_lines_def) except (TypeError, UnsupportedOperation): print >>io.stdout, str_toprint return #print 'numlines',numlines,'screenlines',screen_lines # dbg if numlines <= screen_lines : #print '*** normal print' # dbg print >>io.stdout, str_toprint else: # Try to open pager and default to internal one if that fails. # All failure modes are tagged as 'retval=1', to match the return # value of a failed system command. If any intermediate attempt # sets retval to 1, at the end we resort to our own page_dumb() pager. pager_cmd = get_pager_cmd(pager_cmd) pager_cmd += ' ' + get_pager_start(pager_cmd,start) if os.name == 'nt': if pager_cmd.startswith('type'): # The default WinXP 'type' command is failing on complex strings. retval = 1 else: tmpname = tempfile.mktemp('.txt') tmpfile = open(tmpname,'wt') tmpfile.write(strng) tmpfile.close() cmd = "%s < %s" % (pager_cmd,tmpname) if os.system(cmd): retval = 1 else: retval = None os.remove(tmpname) else: try: retval = None # if I use popen4, things hang. No idea why. #pager,shell_out = os.popen4(pager_cmd) pager = os.popen(pager_cmd,'w') pager.write(strng) pager.close() retval = pager.close() # success returns None except IOError,msg: # broken pipe when user quits if msg.args == (32,'Broken pipe'): retval = None else: retval = 1 except OSError: # Other strange problems, sometimes seen in Win2k/cygwin retval = 1 if retval is not None: page_dumb(strng,screen_lines=screen_lines)
def page_file(fname, start=0, pager_cmd=None): """Page a file, using an optional pager command and starting line. """ pager_cmd = get_pager_cmd(pager_cmd) pager_cmd += ' ' + get_pager_start(pager_cmd,start) try: if os.environ['TERM'] in ['emacs','dumb']: raise EnvironmentError system(pager_cmd + ' ' + fname) except: try: if start > 0: start -= 1 page(open(fname).read(),start) except: print 'Unable to show file',`fname`
def get_pager_cmd(pager_cmd=None): """Return a pager command. Makes some attempts at finding an OS-correct one. """ if os.name == 'posix': default_pager_cmd = 'less -r' # -r for color control sequences elif os.name in ['nt','dos']: default_pager_cmd = 'type' if pager_cmd is None: try: pager_cmd = os.environ['PAGER'] except: pager_cmd = default_pager_cmd return pager_cmd
def get_pager_start(pager, start): """Return the string for paging files with an offset. This is the '+N' argument which less and more (under Unix) accept. """ if pager in ['less','more']: if start: start_string = '+' + str(start) else: start_string = '' else: start_string = '' return start_string
def snip_print(str,width = 75,print_full = 0,header = ''): """Print a string snipping the midsection to fit in width. print_full: mode control: - 0: only snip long strings - 1: send to page() directly. - 2: snip long strings and ask for full length viewing with page() Return 1 if snipping was necessary, 0 otherwise.""" if print_full == 1: page(header+str) return 0 print header, if len(str) < width: print str snip = 0 else: whalf = int((width -5)/2) print str[:whalf] + ' <...> ' + str[-whalf:] snip = 1 if snip and print_full == 2: if raw_input(header+' Snipped. View (y/n)? [N]').lower() == 'y': page(str) return snip
def timings_out(reps,func,*args,**kw): """timings_out(reps,func,*args,**kw) -> (t_total,t_per_call,output) Execute a function reps times, return a tuple with the elapsed total CPU time in seconds, the time per call and the function's output. Under Unix, the return value is the sum of user+system time consumed by the process, computed via the resource module. This prevents problems related to the wraparound effect which the time.clock() function has. Under Windows the return value is in wall clock seconds. See the documentation for the time module for more details.""" reps = int(reps) assert reps >=1, 'reps must be >= 1' if reps==1: start = clock() out = func(*args,**kw) tot_time = clock()-start else: rng = xrange(reps-1) # the last time is executed separately to store output start = clock() for dummy in rng: func(*args,**kw) out = func(*args,**kw) # one last time tot_time = clock()-start av_time = tot_time / reps return tot_time,av_time,out
def timings(reps,func,*args,**kw): """timings(reps,func,*args,**kw) -> (t_total,t_per_call) Execute a function reps times, return a tuple with the elapsed total CPU time in seconds and the time per call. These are just the first two values in timings_out().""" return timings_out(reps,func,*args,**kw)[0:2]
def print_basic_unicode(o, p, cycle): """A function to pretty print sympy Basic objects.""" if cycle: return p.text('Basic(...)') out = pretty(o, use_unicode=True) if '\n' in out: p.text(u'\n') p.text(out)
def print_png(o): """ A function to display sympy expression using inline style LaTeX in PNG. """ s = latex(o, mode='inline') # mathtext does not understand certain latex flags, so we try to replace # them with suitable subs. s = s.replace('\\operatorname','') s = s.replace('\\overline', '\\bar') png = latex_to_png(s) return png
def print_display_png(o): """ A function to display sympy expression using display style LaTeX in PNG. """ s = latex(o, mode='plain') s = s.strip('$') # As matplotlib does not support display style, dvipng backend is # used here. png = latex_to_png('$$%s$$' % s, backend='dvipng') return png
def can_print_latex(o): """ Return True if type o can be printed with LaTeX. If o is a container type, this is True if and only if every element of o can be printed with LaTeX. """ import sympy if isinstance(o, (list, tuple, set, frozenset)): return all(can_print_latex(i) for i in o) elif isinstance(o, dict): return all((isinstance(i, basestring) or can_print_latex(i)) and can_print_latex(o[i]) for i in o) elif isinstance(o,(sympy.Basic, sympy.matrices.Matrix, int, long, float)): return True return False
def print_latex(o): """A function to generate the latex representation of sympy expressions.""" if can_print_latex(o): s = latex(o, mode='plain') s = s.replace('\\dag','\\dagger') s = s.strip('$') return '$$%s$$' % s # Fallback to the string printer return None
def load_ipython_extension(ip): """Load the extension in IPython.""" import sympy # sympyprinting extension has been moved to SymPy as of 0.7.2, if it # exists there, warn the user and import it try: import sympy.interactive.ipythonprinting except ImportError: pass else: warnings.warn("The sympyprinting extension in IPython is deprecated, " "use sympy.interactive.ipythonprinting") ip.extension_manager.load_extension('sympy.interactive.ipythonprinting') return global _loaded if not _loaded: plaintext_formatter = ip.display_formatter.formatters['text/plain'] for cls in (object, str): plaintext_formatter.for_type(cls, print_basic_unicode) printable_containers = [list, tuple] # set and frozen set were broken with SymPy's latex() function, but # was fixed in the 0.7.1-git development version. See # http://code.google.com/p/sympy/issues/detail?id=3062. if sympy.__version__ > '0.7.1': printable_containers += [set, frozenset] else: plaintext_formatter.for_type(cls, print_basic_unicode) plaintext_formatter.for_type_by_name( 'sympy.core.basic', 'Basic', print_basic_unicode ) plaintext_formatter.for_type_by_name( 'sympy.matrices.matrices', 'Matrix', print_basic_unicode ) png_formatter = ip.display_formatter.formatters['image/png'] png_formatter.for_type_by_name( 'sympy.core.basic', 'Basic', print_png ) png_formatter.for_type_by_name( 'sympy.matrices.matrices', 'Matrix', print_display_png ) for cls in [dict, int, long, float] + printable_containers: png_formatter.for_type(cls, print_png) latex_formatter = ip.display_formatter.formatters['text/latex'] latex_formatter.for_type_by_name( 'sympy.core.basic', 'Basic', print_latex ) latex_formatter.for_type_by_name( 'sympy.matrices.matrices', 'Matrix', print_latex ) for cls in printable_containers: # Use LaTeX only if every element is printable by latex latex_formatter.for_type(cls, print_latex) _loaded = True
def str2tokens(string, delimiter): """ Usage: {% with 'this, is a, string'|str2tokens:',' as token_list %}do something{% endwith %} """ token_list = [token.strip() for token in string.split(delimiter)] return token_list
def str2tokenstags(string, delimiter): """ Usage: {% str2tokens 'a/b/c/d' '/' as token_list %} """ token_list = [token.strip() for token in string.split(delimiter)] return token_list
def add_options(self, parser, env=None): """Non-camel-case version of func name for backwards compatibility. .. warning :: DEPRECATED: Do not use this method, use :meth:`options <nose.plugins.base.IPluginInterface.options>` instead. """ # FIXME raise deprecation warning if wasn't called by wrapper if env is None: env = os.environ try: self.options(parser, env) self.can_configure = True except OptionConflictError, e: warn("Plugin %s has conflicting option string: %s and will " "be disabled" % (self, e), RuntimeWarning) self.enabled = False self.can_configure = False
def options(self, parser, env): """Register commandline options. Implement this method for normal options behavior with protection from OptionConflictErrors. If you override this method and want the default --with-$name option to be registered, be sure to call super(). """ env_opt = 'NOSE_WITH_%s' % self.name.upper() env_opt = env_opt.replace('-', '_') parser.add_option("--with-%s" % self.name, action="store_true", dest=self.enableOpt, default=env.get(env_opt), help="Enable plugin %s: %s [%s]" % (self.__class__.__name__, self.help(), env_opt))
def configure(self, options, conf): """Configure the plugin and system, based on selected options. The base plugin class sets the plugin to enabled if the enable option for the plugin (self.enableOpt) is true. """ if not self.can_configure: return self.conf = conf if hasattr(options, self.enableOpt): self.enabled = getattr(options, self.enableOpt)
def validate_string_list(lst): """Validate that the input is a list of strings. Raises ValueError if not.""" if not isinstance(lst, list): raise ValueError('input %r must be a list' % lst) for x in lst: if not isinstance(x, basestring): raise ValueError('element %r in list must be a string' % x)
def validate_string_dict(dct): """Validate that the input is a dict with string keys and values. Raises ValueError if not.""" for k,v in dct.iteritems(): if not isinstance(k, basestring): raise ValueError('key %r in dict must be a string' % k) if not isinstance(v, basestring): raise ValueError('value %r in dict must be a string' % v)
def _run_loop(self): """Run my loop, ignoring EINTR events in the poller""" while True: try: self.ioloop.start() except ZMQError as e: if e.errno == errno.EINTR: continue else: raise except Exception: if self._exiting: break else: raise else: break
def _queue_send(self, msg): """Queue a message to be sent from the IOLoop's thread. Parameters ---------- msg : message to send This is threadsafe, as it uses IOLoop.add_callback to give the loop's thread control of the action. """ def thread_send(): self.session.send(self.stream, msg) self.ioloop.add_callback(thread_send)
def _handle_recv(self, msg): """callback for stream.on_recv unpacks message, and calls handlers with it. """ ident,smsg = self.session.feed_identities(msg) self.call_handlers(self.session.unserialize(smsg))
def run(self): """The thread's main activity. Call start() instead.""" self.socket = self.context.socket(zmq.DEALER) self.socket.setsockopt(zmq.IDENTITY, self.session.bsession) self.socket.connect('tcp://%s:%i' % self.address) self.stream = zmqstream.ZMQStream(self.socket, self.ioloop) self.stream.on_recv(self._handle_recv) self._run_loop() try: self.socket.close() except: pass
def execute(self, code, silent=False, user_variables=None, user_expressions=None, allow_stdin=None): """Execute code in the kernel. Parameters ---------- code : str A string of Python code. silent : bool, optional (default False) If set, the kernel will execute the code as quietly possible. user_variables : list, optional A list of variable names to pull from the user's namespace. They will come back as a dict with these names as keys and their :func:`repr` as values. user_expressions : dict, optional A dict with string keys and to pull from the user's namespace. They will come back as a dict with these names as keys and their :func:`repr` as values. allow_stdin : bool, optional Flag for A dict with string keys and to pull from the user's namespace. They will come back as a dict with these names as keys and their :func:`repr` as values. Returns ------- The msg_id of the message sent. """ if user_variables is None: user_variables = [] if user_expressions is None: user_expressions = {} if allow_stdin is None: allow_stdin = self.allow_stdin # Don't waste network traffic if inputs are invalid if not isinstance(code, basestring): raise ValueError('code %r must be a string' % code) validate_string_list(user_variables) validate_string_dict(user_expressions) # Create class for content/msg creation. Related to, but possibly # not in Session. content = dict(code=code, silent=silent, user_variables=user_variables, user_expressions=user_expressions, allow_stdin=allow_stdin, ) msg = self.session.msg('execute_request', content) self._queue_send(msg) return msg['header']['msg_id']
def complete(self, text, line, cursor_pos, block=None): """Tab complete text in the kernel's namespace. Parameters ---------- text : str The text to complete. line : str The full line of text that is the surrounding context for the text to complete. cursor_pos : int The position of the cursor in the line where the completion was requested. block : str, optional The full block of code in which the completion is being requested. Returns ------- The msg_id of the message sent. """ content = dict(text=text, line=line, block=block, cursor_pos=cursor_pos) msg = self.session.msg('complete_request', content) self._queue_send(msg) return msg['header']['msg_id']
def object_info(self, oname, detail_level=0): """Get metadata information about an object. Parameters ---------- oname : str A string specifying the object name. detail_level : int, optional The level of detail for the introspection (0-2) Returns ------- The msg_id of the message sent. """ content = dict(oname=oname, detail_level=detail_level) msg = self.session.msg('object_info_request', content) self._queue_send(msg) return msg['header']['msg_id']
def history(self, raw=True, output=False, hist_access_type='range', **kwargs): """Get entries from the history list. Parameters ---------- raw : bool If True, return the raw input. output : bool If True, then return the output as well. hist_access_type : str 'range' (fill in session, start and stop params), 'tail' (fill in n) or 'search' (fill in pattern param). session : int For a range request, the session from which to get lines. Session numbers are positive integers; negative ones count back from the current session. start : int The first line number of a history range. stop : int The final (excluded) line number of a history range. n : int The number of lines of history to get for a tail request. pattern : str The glob-syntax pattern for a search request. Returns ------- The msg_id of the message sent. """ content = dict(raw=raw, output=output, hist_access_type=hist_access_type, **kwargs) msg = self.session.msg('history_request', content) self._queue_send(msg) return msg['header']['msg_id']
def shutdown(self, restart=False): """Request an immediate kernel shutdown. Upon receipt of the (empty) reply, client code can safely assume that the kernel has shut down and it's safe to forcefully terminate it if it's still alive. The kernel will send the reply via a function registered with Python's atexit module, ensuring it's truly done as the kernel is done with all normal operation. """ # Send quit message to kernel. Once we implement kernel-side setattr, # this should probably be done that way, but for now this will do. msg = self.session.msg('shutdown_request', {'restart':restart}) self._queue_send(msg) return msg['header']['msg_id']
def flush(self, timeout=1.0): """Immediately processes all pending messages on the SUB channel. Callers should use this method to ensure that :method:`call_handlers` has been called for all messages that have been received on the 0MQ SUB socket of this channel. This method is thread safe. Parameters ---------- timeout : float, optional The maximum amount of time to spend flushing, in seconds. The default is one second. """ # We do the IOLoop callback process twice to ensure that the IOLoop # gets to perform at least one full poll. stop_time = time.time() + timeout for i in xrange(2): self._flushed = False self.ioloop.add_callback(self._flush) while not self._flushed and time.time() < stop_time: time.sleep(0.01)
def input(self, string): """Send a string of raw input to the kernel.""" content = dict(value=string) msg = self.session.msg('input_reply', content) self._queue_send(msg)
def _poll(self, start_time): """poll for heartbeat replies until we reach self.time_to_dead Ignores interrupts, and returns the result of poll(), which will be an empty list if no messages arrived before the timeout, or the event tuple if there is a message to receive. """ until_dead = self.time_to_dead - (time.time() - start_time) # ensure poll at least once until_dead = max(until_dead, 1e-3) events = [] while True: try: events = self.poller.poll(1000 * until_dead) except ZMQError as e: if e.errno == errno.EINTR: # ignore interrupts during heartbeat # this may never actually happen until_dead = self.time_to_dead - (time.time() - start_time) until_dead = max(until_dead, 1e-3) pass else: raise except Exception: if self._exiting: break else: raise else: break return events
def run(self): """The thread's main activity. Call start() instead.""" self._create_socket() self._running = True self._beating = True while self._running: if self._pause: # just sleep, and skip the rest of the loop time.sleep(self.time_to_dead) continue since_last_heartbeat = 0.0 # io.rprint('Ping from HB channel') # dbg # no need to catch EFSM here, because the previous event was # either a recv or connect, which cannot be followed by EFSM self.socket.send(b'ping') request_time = time.time() ready = self._poll(request_time) if ready: self._beating = True # the poll above guarantees we have something to recv self.socket.recv() # sleep the remainder of the cycle remainder = self.time_to_dead - (time.time() - request_time) if remainder > 0: time.sleep(remainder) continue else: # nothing was received within the time limit, signal heart failure self._beating = False since_last_heartbeat = time.time() - request_time self.call_handlers(since_last_heartbeat) # and close/reopen the socket, because the REQ/REP cycle has been broken self._create_socket() continue try: self.socket.close() except: pass
def is_beating(self): """Is the heartbeat running and responsive (and not paused).""" if self.is_alive() and not self._pause and self._beating: return True else: return False
def start_channels(self, shell=True, sub=True, stdin=True, hb=True): """Starts the channels for this kernel. This will create the channels if they do not exist and then start them. If port numbers of 0 are being used (random ports) then you must first call :method:`start_kernel`. If the channels have been stopped and you call this, :class:`RuntimeError` will be raised. """ if shell: self.shell_channel.start() if sub: self.sub_channel.start() if stdin: self.stdin_channel.start() self.shell_channel.allow_stdin = True else: self.shell_channel.allow_stdin = False if hb: self.hb_channel.start()
def stop_channels(self): """Stops all the running channels for this kernel. """ if self.shell_channel.is_alive(): self.shell_channel.stop() if self.sub_channel.is_alive(): self.sub_channel.stop() if self.stdin_channel.is_alive(): self.stdin_channel.stop() if self.hb_channel.is_alive(): self.hb_channel.stop()
def channels_running(self): """Are any of the channels created and running?""" return (self.shell_channel.is_alive() or self.sub_channel.is_alive() or self.stdin_channel.is_alive() or self.hb_channel.is_alive())
def cleanup_connection_file(self): """cleanup connection file *if we wrote it* Will not raise if the connection file was already removed somehow. """ if self._connection_file_written: # cleanup connection files on full shutdown of kernel we started self._connection_file_written = False try: os.remove(self.connection_file) except OSError: pass
def load_connection_file(self): """load connection info from JSON dict in self.connection_file""" with open(self.connection_file) as f: cfg = json.loads(f.read()) self.ip = cfg['ip'] self.shell_port = cfg['shell_port'] self.stdin_port = cfg['stdin_port'] self.iopub_port = cfg['iopub_port'] self.hb_port = cfg['hb_port'] self.session.key = str_to_bytes(cfg['key'])
def write_connection_file(self): """write connection info to JSON dict in self.connection_file""" if self._connection_file_written: return self.connection_file,cfg = write_connection_file(self.connection_file, ip=self.ip, key=self.session.key, stdin_port=self.stdin_port, iopub_port=self.iopub_port, shell_port=self.shell_port, hb_port=self.hb_port) # write_connection_file also sets default ports: self.shell_port = cfg['shell_port'] self.stdin_port = cfg['stdin_port'] self.iopub_port = cfg['iopub_port'] self.hb_port = cfg['hb_port'] self._connection_file_written = True
def start_kernel(self, **kw): """Starts a kernel process and configures the manager to use it. If random ports (port=0) are being used, this method must be called before the channels are created. Parameters: ----------- launcher : callable, optional (default None) A custom function for launching the kernel process (generally a wrapper around ``entry_point.base_launch_kernel``). In most cases, it should not be necessary to use this parameter. **kw : optional See respective options for IPython and Python kernels. """ if self.ip not in LOCAL_IPS: raise RuntimeError("Can only launch a kernel on a local interface. " "Make sure that the '*_address' attributes are " "configured properly. " "Currently valid addresses are: %s"%LOCAL_IPS ) # write connection file / get default ports self.write_connection_file() self._launch_args = kw.copy() launch_kernel = kw.pop('launcher', None) if launch_kernel is None: from ipkernel import launch_kernel self.kernel = launch_kernel(fname=self.connection_file, **kw)
def shutdown_kernel(self, restart=False): """ Attempts to the stop the kernel process cleanly. If the kernel cannot be stopped, it is killed, if possible. """ # FIXME: Shutdown does not work on Windows due to ZMQ errors! if sys.platform == 'win32': self.kill_kernel() return # Pause the heart beat channel if it exists. if self._hb_channel is not None: self._hb_channel.pause() # Don't send any additional kernel kill messages immediately, to give # the kernel a chance to properly execute shutdown actions. Wait for at # most 1s, checking every 0.1s. self.shell_channel.shutdown(restart=restart) for i in range(10): if self.is_alive: time.sleep(0.1) else: break else: # OK, we've waited long enough. if self.has_kernel: self.kill_kernel() if not restart and self._connection_file_written: # cleanup connection files on full shutdown of kernel we started self._connection_file_written = False try: os.remove(self.connection_file) except IOError: pass
def restart_kernel(self, now=False, **kw): """Restarts a kernel with the arguments that were used to launch it. If the old kernel was launched with random ports, the same ports will be used for the new kernel. Parameters ---------- now : bool, optional If True, the kernel is forcefully restarted *immediately*, without having a chance to do any cleanup action. Otherwise the kernel is given 1s to clean up before a forceful restart is issued. In all cases the kernel is restarted, the only difference is whether it is given a chance to perform a clean shutdown or not. **kw : optional Any options specified here will replace those used to launch the kernel. """ if self._launch_args is None: raise RuntimeError("Cannot restart the kernel. " "No previous call to 'start_kernel'.") else: # Stop currently running kernel. if self.has_kernel: if now: self.kill_kernel() else: self.shutdown_kernel(restart=True) # Start new kernel. self._launch_args.update(kw) self.start_kernel(**self._launch_args) # FIXME: Messages get dropped in Windows due to probable ZMQ bug # unless there is some delay here. if sys.platform == 'win32': time.sleep(0.2)
def kill_kernel(self): """ Kill the running kernel. """ if self.has_kernel: # Pause the heart beat channel if it exists. if self._hb_channel is not None: self._hb_channel.pause() # Attempt to kill the kernel. try: self.kernel.kill() except OSError, e: # In Windows, we will get an Access Denied error if the process # has already terminated. Ignore it. if sys.platform == 'win32': if e.winerror != 5: raise # On Unix, we may get an ESRCH error if the process has already # terminated. Ignore it. else: from errno import ESRCH if e.errno != ESRCH: raise self.kernel = None else: raise RuntimeError("Cannot kill kernel. No kernel is running!")
def interrupt_kernel(self): """ Interrupts the kernel. Unlike ``signal_kernel``, this operation is well supported on all platforms. """ if self.has_kernel: if sys.platform == 'win32': from parentpoller import ParentPollerWindows as Poller Poller.send_interrupt(self.kernel.win32_interrupt_event) else: self.kernel.send_signal(signal.SIGINT) else: raise RuntimeError("Cannot interrupt kernel. No kernel is running!")
def signal_kernel(self, signum): """ Sends a signal to the kernel. Note that since only SIGTERM is supported on Windows, this function is only useful on Unix systems. """ if self.has_kernel: self.kernel.send_signal(signum) else: raise RuntimeError("Cannot signal kernel. No kernel is running!")
def is_alive(self): """Is the kernel process still running?""" if self.has_kernel: if self.kernel.poll() is None: return True else: return False elif self._hb_channel is not None: # We didn't start the kernel with this KernelManager so we # use the heartbeat. return self._hb_channel.is_beating() else: # no heartbeat and not local, we can't tell if it's running, # so naively return True return True
def shell_channel(self): """Get the REQ socket channel object to make requests of the kernel.""" if self._shell_channel is None: self._shell_channel = self.shell_channel_class(self.context, self.session, (self.ip, self.shell_port)) return self._shell_channel
def sub_channel(self): """Get the SUB socket channel object.""" if self._sub_channel is None: self._sub_channel = self.sub_channel_class(self.context, self.session, (self.ip, self.iopub_port)) return self._sub_channel
def stdin_channel(self): """Get the REP socket channel object to handle stdin (raw_input).""" if self._stdin_channel is None: self._stdin_channel = self.stdin_channel_class(self.context, self.session, (self.ip, self.stdin_port)) return self._stdin_channel
def hb_channel(self): """Get the heartbeat socket channel object to check that the kernel is alive.""" if self._hb_channel is None: self._hb_channel = self.hb_channel_class(self.context, self.session, (self.ip, self.hb_port)) return self._hb_channel
def bind_kernel(**kwargs): """Bind an Engine's Kernel to be used as a full IPython kernel. This allows a running Engine to be used simultaneously as a full IPython kernel with the QtConsole or other frontends. This function returns immediately. """ from IPython.zmq.ipkernel import IPKernelApp from IPython.parallel.apps.ipengineapp import IPEngineApp # first check for IPKernelApp, in which case this should be a no-op # because there is already a bound kernel if IPKernelApp.initialized() and isinstance(IPKernelApp._instance, IPKernelApp): return if IPEngineApp.initialized(): try: app = IPEngineApp.instance() except MultipleInstanceError: pass else: return app.bind_kernel(**kwargs) raise RuntimeError("bind_kernel be called from an IPEngineApp instance")
def debug(self, level, message): """ Emit a debugging message depending on the debugging level. :param level: The debugging level. :param message: The message to emit. """ if self._debug >= level: print(message, file=sys.stderr)
def _get_extension_classes(cls): """ Retrieve the extension classes in priority order. :returns: A list of extension classes, in proper priority order. """ if cls._extension_classes is None: exts = {} # Iterate over the entrypoints for ext in entry.points[NAMESPACE_EXTENSIONS]: exts.setdefault(ext.priority, []) exts[ext.priority].append(ext) # Save the list of extension classes cls._extension_classes = list(utils.iter_prio_dict(exts)) return cls._extension_classes
def prepare(cls, parser): """ Prepare all the extensions. Extensions are prepared during argument parser preparation. An extension implementing the ``prepare()`` method is able to add command line arguments specific for that extension. :param parser: The argument parser, an instance of ``argparse.ArgumentParser``. """ debugger = ExtensionDebugger('prepare') for ext in cls._get_extension_classes(): with debugger(ext): ext.prepare(parser)
def activate(cls, ctxt, args): """ Initialize the extensions. This loops over each extension invoking its ``activate()`` method; those extensions that return an object are considered "activated" and will be called at later phases of extension processing. :param ctxt: An instance of ``timid.context.Context``. :param args: An instance of ``argparse.Namespace`` containing the result of processing command line arguments. :returns: An instance of ``ExtensionSet``. """ debugger = ExtensionDebugger('activate') exts = [] for ext in cls._get_extension_classes(): # Not using debugger as a context manager here, because we # want to know about the exception even if we're ignoring # it...but we need to notify which extension is being # processed! debugger(ext) try: # Check if the extension is being activated obj = ext.activate(ctxt, args) except Exception: # Hmmm, failed to activate; handle the error exc_info = sys.exc_info() if not debugger.__exit__(*exc_info): six.reraise(*exc_info) else: # OK, if the extension is activated, use it if obj is not None: exts.append(obj) debugger.debug(2, 'Activating extension "%s.%s"' % (ext.__module__, ext.__name__)) # Initialize and return the ExtensionSet return cls(exts)
def read_steps(self, ctxt, steps): """ Called after reading steps, prior to adding them to the list of test steps. Extensions are able to alter the list (in place). :param ctxt: An instance of ``timid.context.Context``. :param steps: A list of ``timid.steps.Step`` instances. :returns: The ``steps`` parameter, for convenience. """ debugger = ExtensionDebugger('read_steps') for ext in self.exts: with debugger(ext): ext.read_steps(ctxt, steps) # Convenience return return steps
def pre_step(self, ctxt, step, idx): """ Called prior to executing a step. :param ctxt: An instance of ``timid.context.Context``. :param step: An instance of ``timid.steps.Step`` describing the step to be executed. :param idx: The index of the step in the list of steps. :returns: A ``True`` value if the step is to be skipped, ``False`` otherwise. """ debugger = ExtensionDebugger('pre_step') for ext in self.exts: with debugger(ext): if ext.pre_step(ctxt, step, idx): # Step must be skipped debugger.debug(3, 'Skipping step %d' % idx) return True return False
def post_step(self, ctxt, step, idx, result): """ Called after executing a step. :param ctxt: An instance of ``timid.context.Context``. :param step: An instance of ``timid.steps.Step`` describing the step that was executed. :param idx: The index of the step in the list of steps. :param result: An instance of ``timid.steps.StepResult`` describing the result of executing the step. May be altered by the extension, e.g., to set the ``ignore`` attribute. :returns: The ``result`` parameter, for convenience. """ debugger = ExtensionDebugger('post_step') for ext in self.exts: with debugger(ext): ext.post_step(ctxt, step, idx, result) # Convenience return return result
def finalize(self, ctxt, result): """ Called at the end of processing. This call allows extensions to emit any additional data, such as timing information, prior to ``timid``'s exit. Extensions may also alter the return value. :param ctxt: An instance of ``timid.context.Context``. :param result: The return value of the basic ``timid`` call, or an ``Exception`` instance if an exception was raised. Without the extension, this would be passed directly to ``sys.exit()``. :returns: The final result. """ debugger = ExtensionDebugger('finalize') for ext in self.exts: with debugger(ext): result = ext.finalize(ctxt, result) return result
def walk_egg(egg_dir): """Walk an unpacked egg's contents, skipping the metadata directory""" walker = os.walk(egg_dir) base,dirs,files = walker.next() if 'EGG-INFO' in dirs: dirs.remove('EGG-INFO') yield base,dirs,files for bdf in walker: yield bdf
def scan_module(egg_dir, base, name, stubs): """Check whether module possibly uses unsafe-for-zipfile stuff""" filename = os.path.join(base,name) if filename[:-1] in stubs: return True # Extension module pkg = base[len(egg_dir)+1:].replace(os.sep,'.') module = pkg+(pkg and '.' or '')+os.path.splitext(name)[0] if sys.version_info < (3, 3): skip = 8 # skip magic & date else: skip = 12 # skip magic & date & file size f = open(filename,'rb'); f.read(skip) code = marshal.load(f); f.close() safe = True symbols = dict.fromkeys(iter_symbols(code)) for bad in ['__file__', '__path__']: if bad in symbols: log.warn("%s: module references %s", module, bad) safe = False if 'inspect' in symbols: for bad in [ 'getsource', 'getabsfile', 'getsourcefile', 'getfile' 'getsourcelines', 'findsource', 'getcomments', 'getframeinfo', 'getinnerframes', 'getouterframes', 'stack', 'trace' ]: if bad in symbols: log.warn("%s: module MAY be using inspect.%s", module, bad) safe = False if '__name__' in symbols and '__main__' in symbols and '.' not in module: if sys.version[:3]=="2.4": # -m works w/zipfiles in 2.5 log.warn("%s: top-level module may be 'python -m' script", module) safe = False return safe
def make_init_files(self): """Create missing package __init__ files""" init_files = [] for base,dirs,files in walk_egg(self.bdist_dir): if base==self.bdist_dir: # don't put an __init__ in the root continue for name in files: if name.endswith('.py'): if '__init__.py' not in files: pkg = base[len(self.bdist_dir)+1:].replace(os.sep,'.') if self.distribution.has_contents_for(pkg): log.warn("Creating missing __init__.py for %s",pkg) filename = os.path.join(base,'__init__.py') if not self.dry_run: f = open(filename,'w'); f.write(NS_PKG_STUB) f.close() init_files.append(filename) break else: # not a package, don't traverse to subdirectories dirs[:] = [] return init_files
def launch_new_instance(): """Create and run the IPython controller""" if sys.platform == 'win32': # make sure we don't get called from a multiprocessing subprocess # this can result in infinite Controllers being started on Windows # which doesn't have a proper fork, so multiprocessing is wonky # this only comes up when IPython has been installed using vanilla # setuptools, and *not* distribute. import multiprocessing p = multiprocessing.current_process() # the main process has name 'MainProcess' # subprocesses will have names like 'Process-1' if p.name != 'MainProcess': # we are a subprocess, don't start another Controller! return app = IPControllerApp.instance() app.initialize() app.start()
def save_connection_dict(self, fname, cdict): """save a connection dict to json file.""" c = self.config url = cdict['url'] location = cdict['location'] if not location: try: proto,ip,port = split_url(url) except AssertionError: pass else: try: location = socket.gethostbyname_ex(socket.gethostname())[2][-1] except (socket.gaierror, IndexError): self.log.warn("Could not identify this machine's IP, assuming 127.0.0.1." " You may need to specify '--location=<external_ip_address>' to help" " IPython decide when to connect via loopback.") location = '127.0.0.1' cdict['location'] = location fname = os.path.join(self.profile_dir.security_dir, fname) self.log.info("writing connection info to %s", fname) with open(fname, 'w') as f: f.write(json.dumps(cdict, indent=2)) os.chmod(fname, stat.S_IRUSR|stat.S_IWUSR)
def load_config_from_json(self): """load config from existing json connector files.""" c = self.config self.log.debug("loading config from JSON") # load from engine config fname = os.path.join(self.profile_dir.security_dir, self.engine_json_file) self.log.info("loading connection info from %s", fname) with open(fname) as f: cfg = json.loads(f.read()) key = cfg['exec_key'] # json gives unicode, Session.key wants bytes c.Session.key = key.encode('ascii') xport,addr = cfg['url'].split('://') c.HubFactory.engine_transport = xport ip,ports = addr.split(':') c.HubFactory.engine_ip = ip c.HubFactory.regport = int(ports) self.location = cfg['location'] if not self.engine_ssh_server: self.engine_ssh_server = cfg['ssh'] # load client config fname = os.path.join(self.profile_dir.security_dir, self.client_json_file) self.log.info("loading connection info from %s", fname) with open(fname) as f: cfg = json.loads(f.read()) assert key == cfg['exec_key'], "exec_key mismatch between engine and client keys" xport,addr = cfg['url'].split('://') c.HubFactory.client_transport = xport ip,ports = addr.split(':') c.HubFactory.client_ip = ip if not self.ssh_server: self.ssh_server = cfg['ssh'] assert int(ports) == c.HubFactory.regport, "regport mismatch"
def load_secondary_config(self): """secondary config, loading from JSON and setting defaults""" if self.reuse_files: try: self.load_config_from_json() except (AssertionError,IOError) as e: self.log.error("Could not load config from JSON: %s" % e) else: # successfully loaded config from JSON, and reuse=True # no need to wite back the same file self.write_connection_files = False # switch Session.key default to secure default_secure(self.config) self.log.debug("Config changed") self.log.debug(repr(self.config))
def script_args(f): """single decorator for adding script args""" args = [ magic_arguments.argument( '--out', type=str, help="""The variable in which to store stdout from the script. If the script is backgrounded, this will be the stdout *pipe*, instead of the stderr text itself. """ ), magic_arguments.argument( '--err', type=str, help="""The variable in which to store stderr from the script. If the script is backgrounded, this will be the stderr *pipe*, instead of the stderr text itself. """ ), magic_arguments.argument( '--bg', action="store_true", help="""Whether to run the script in the background. If given, the only way to see the output of the command is with --out/err. """ ), magic_arguments.argument( '--proc', type=str, help="""The variable in which to store Popen instance. This is used only when --bg option is given. """ ), ] for arg in args: f = arg(f) return f
def exec_args(f): """decorator for adding block/targets args for execution applied to %pxconfig and %%px """ args = [ magic_arguments.argument('-b', '--block', action="store_const", const=True, dest='block', help="use blocking (sync) execution", ), magic_arguments.argument('-a', '--noblock', action="store_const", const=False, dest='block', help="use non-blocking (async) execution", ), magic_arguments.argument('-t', '--targets', type=str, help="specify the targets on which to execute", ), magic_arguments.argument('--verbose', action="store_const", const=True, dest="set_verbose", help="print a message at each execution", ), magic_arguments.argument('--no-verbose', action="store_const", const=False, dest="set_verbose", help="don't print any messages", ), ] for a in args: f = a(f) return f
def output_args(f): """decorator for output-formatting args applied to %pxresult and %%px """ args = [ magic_arguments.argument('-r', action="store_const", dest='groupby', const='order', help="collate outputs in order (same as group-outputs=order)" ), magic_arguments.argument('-e', action="store_const", dest='groupby', const='engine', help="group outputs by engine (same as group-outputs=engine)" ), magic_arguments.argument('--group-outputs', dest='groupby', type=str, choices=['engine', 'order', 'type'], default='type', help="""Group the outputs in a particular way. Choices are: type: group outputs of all engines by type (stdout, stderr, displaypub, etc.). engine: display all output for each engine together. order: like type, but individual displaypub output from each engine is collated. For example, if multiple plots are generated by each engine, the first figure of each engine will be displayed, then the second of each, etc. """ ), magic_arguments.argument('-o', '--out', dest='save_name', type=str, help="""store the AsyncResult object for this computation in the global namespace under this name. """ ), ] for a in args: f = a(f) return f
def pxconfig(self, line): """configure default targets/blocking for %px magics""" args = magic_arguments.parse_argstring(self.pxconfig, line) if args.targets: self.view.targets = self._eval_target_str(args.targets) if args.block is not None: self.view.block = args.block if args.set_verbose is not None: self.verbose = args.set_verbose
def result(self, line=''): """Print the result of the last asynchronous %px command. This lets you recall the results of %px computations after asynchronous submission (block=False). Examples -------- :: In [23]: %px os.getpid() Async parallel execution on engine(s): all In [24]: %pxresult Out[8:10]: 60920 Out[9:10]: 60921 Out[10:10]: 60922 Out[11:10]: 60923 """ args = magic_arguments.parse_argstring(self.result, line) if self.last_result is None: raise UsageError(NO_LAST_RESULT) self.last_result.get() self.last_result.display_outputs(groupby=args.groupby)
def parallel_execute(self, cell, block=None, groupby='type', save_name=None): """implementation used by %px and %%parallel""" # defaults: block = self.view.block if block is None else block base = "Parallel" if block else "Async parallel" targets = self.view.targets if isinstance(targets, list) and len(targets) > 10: str_targets = str(targets[:4])[:-1] + ', ..., ' + str(targets[-4:])[1:] else: str_targets = str(targets) if self.verbose: print base + " execution on engine(s): %s" % str_targets result = self.view.execute(cell, silent=False, block=False) self.last_result = result if save_name: self.shell.user_ns[save_name] = result if block: result.get() result.display_outputs(groupby) else: # return AsyncResult only on non-blocking submission return result
def cell_px(self, line='', cell=None): """Executes the cell in parallel. Examples -------- :: In [24]: %%px --noblock ....: a = os.getpid() Async parallel execution on engine(s): all In [25]: %%px ....: print a [stdout:0] 1234 [stdout:1] 1235 [stdout:2] 1236 [stdout:3] 1237 """ args = magic_arguments.parse_argstring(self.cell_px, line) if args.targets: save_targets = self.view.targets self.view.targets = self._eval_target_str(args.targets) try: return self.parallel_execute(cell, block=args.block, groupby=args.groupby, save_name=args.save_name, ) finally: if args.targets: self.view.targets = save_targets
def _enable_autopx(self): """Enable %autopx mode by saving the original run_cell and installing pxrun_cell. """ # override run_cell self._original_run_cell = self.shell.run_cell self.shell.run_cell = self.pxrun_cell self._autopx = True print "%autopx enabled"
def _disable_autopx(self): """Disable %autopx by restoring the original InteractiveShell.run_cell. """ if self._autopx: self.shell.run_cell = self._original_run_cell self._autopx = False print "%autopx disabled"
def pxrun_cell(self, raw_cell, store_history=False, silent=False): """drop-in replacement for InteractiveShell.run_cell. This executes code remotely, instead of in the local namespace. See InteractiveShell.run_cell for details. """ if (not raw_cell) or raw_cell.isspace(): return ipself = self.shell with ipself.builtin_trap: cell = ipself.prefilter_manager.prefilter_lines(raw_cell) # Store raw and processed history if store_history: ipself.history_manager.store_inputs(ipself.execution_count, cell, raw_cell) # ipself.logger.log(cell, raw_cell) cell_name = ipself.compile.cache(cell, ipself.execution_count) try: ast.parse(cell, filename=cell_name) except (OverflowError, SyntaxError, ValueError, TypeError, MemoryError): # Case 1 ipself.showsyntaxerror() ipself.execution_count += 1 return None except NameError: # ignore name errors, because we don't know the remote keys pass if store_history: # Write output to the database. Does nothing unless # history output logging is enabled. ipself.history_manager.store_output(ipself.execution_count) # Each cell is a *single* input, regardless of how many lines it has ipself.execution_count += 1 if re.search(r'get_ipython\(\)\.magic\(u?["\']%?autopx', cell): self._disable_autopx() return False else: try: result = self.view.execute(cell, silent=False, block=False) except: ipself.showtraceback() return True else: if self.view.block: try: result.get() except: self.shell.showtraceback() return True else: with ipself.builtin_trap: result.display_outputs() return False
def run_heartbeat(message): """Internal ``CLOCK_CHANNEL`` consumer to process task runs""" then = arrow.get(message['time']) now = arrow.get() if (now - then) > timezone.timedelta(seconds=(TICK_FREQ+1)): pass # discard old ticks else: Task.run_tasks()
def run_task(message): """Internal ``RUN_TASK`` consumer to run the task's callable""" task = Task.objects.get(pk=message['id']) if task.allow_overlap: task.run(message) else: if not task.running: task.running = True task.save() try: task.run(message) finally: task.running = False task.save()
def remove_task(message): """Internal ``KILL_TASK`` consumer to remove retired tasks""" task = Task.objects.get(pk=message['id']) task.delete()
def patch_protocol_for_agent(protocol): """ Patch the protocol's makeConnection and connectionLost methods to make the protocol and its transport behave more like what `Agent` expects. While `Agent` is the driving force behind this, other clients and servers will no doubt have similar requirements. """ old_makeConnection = protocol.makeConnection old_connectionLost = protocol.connectionLost def new_makeConnection(transport): patch_transport_fake_push_producer(transport) patch_transport_abortConnection(transport, protocol) return old_makeConnection(transport) def new_connectionLost(reason): # Replace ConnectionDone with ConnectionAborted if we aborted. if protocol._fake_connection_aborted and reason.check(ConnectionDone): reason = Failure(ConnectionAborted()) return old_connectionLost(reason) protocol.makeConnection = new_makeConnection protocol.connectionLost = new_connectionLost protocol._fake_connection_aborted = False
def patch_if_missing(obj, name, method): """ Patch a method onto an object if it isn't already there. """ setattr(obj, name, getattr(obj, name, method))