Search is not available for this dataset
text
stringlengths
75
104k
def get_dict(self, timeout=-1): """Get the results as a dict, keyed by engine_id. timeout behavior is described in `get()`. """ results = self.get(timeout) engine_ids = [ md['engine_id'] for md in self._metadata ] bycount = sorted(engine_ids, key=lambda k: engine_ids.count(k)) maxcount = bycount.count(bycount[-1]) if maxcount > 1: raise ValueError("Cannot build dict, %i jobs ran on engine #%i"%( maxcount, bycount[-1])) return dict(zip(engine_ids,results))
def abort(self): """abort my tasks.""" assert not self.ready(), "Can't abort, I am already done!" return self._client.abort(self.msg_ids, targets=self._targets, block=True)
def timedelta(self, start, end, start_key=min, end_key=max): """compute the difference between two sets of timestamps The default behavior is to use the earliest of the first and the latest of the second list, but this can be changed by passing a different Parameters ---------- start : one or more datetime objects (e.g. ar.submitted) end : one or more datetime objects (e.g. ar.received) start_key : callable Function to call on `start` to extract the relevant entry [defalt: min] end_key : callable Function to call on `end` to extract the relevant entry [default: max] Returns ------- dt : float The time elapsed (in seconds) between the two selected timestamps. """ if not isinstance(start, datetime): # handle single_result AsyncResults, where ar.stamp is single object, # not a list start = start_key(start) if not isinstance(end, datetime): # handle single_result AsyncResults, where ar.stamp is single object, # not a list end = end_key(end) return _total_seconds(end - start)
def progress(self): """the number of tasks which have been completed at this point. Fractional progress would be given by 1.0 * ar.progress / len(ar) """ self.wait(0) return len(self) - len(set(self.msg_ids).intersection(self._client.outstanding))
def elapsed(self): """elapsed time since initial submission""" if self.ready(): return self.wall_time now = submitted = datetime.now() for msg_id in self.msg_ids: if msg_id in self._client.metadata: stamp = self._client.metadata[msg_id]['submitted'] if stamp and stamp < submitted: submitted = stamp return _total_seconds(now-submitted)
def serial_time(self): """serial computation time of a parallel calculation Computed as the sum of (completed-started) of each task """ t = 0 for md in self._metadata: t += _total_seconds(md['completed'] - md['started']) return t
def wait_interactive(self, interval=1., timeout=None): """interactive wait, printing progress at regular intervals""" N = len(self) tic = time.time() while not self.ready() and (timeout is None or time.time() - tic <= timeout): self.wait(interval) clear_output() print("%4i/%i tasks finished after %4i s" % (self.progress, N, self.elapsed), end="") sys.stdout.flush() print() print("done")
def _republish_displaypub(self, content, eid): """republish individual displaypub content dicts""" try: ip = get_ipython() except NameError: # displaypub is meaningless outside IPython return md = content['metadata'] or {} md['engine'] = eid ip.display_pub.publish(content['source'], content['data'], md)
def _wait_for_outputs(self, timeout=-1): """wait for the 'status=idle' message that indicates we have all outputs """ if not self._success: # don't wait on errors return tic = time.time() while not all(md['outputs_ready'] for md in self._metadata): time.sleep(0.01) self._client._flush_iopub(self._client._iopub_socket) if timeout >= 0 and time.time() > tic + timeout: break
def display_outputs(self, groupby="type"): """republish the outputs of the computation Parameters ---------- groupby : str [default: type] if 'type': Group outputs by type (show all stdout, then all stderr, etc.): [stdout:1] foo [stdout:2] foo [stderr:1] bar [stderr:2] bar if 'engine': Display outputs for each engine before moving on to the next: [stdout:1] foo [stderr:1] bar [stdout:2] foo [stderr:2] bar if 'order': Like 'type', but further collate individual displaypub outputs. This is meant for cases of each command producing several plots, and you would like to see all of the first plots together, then all of the second plots, and so on. """ if self._single_result: self._display_single_result() return stdouts = self.stdout stderrs = self.stderr pyouts = self.pyout output_lists = self.outputs results = self.get() targets = self.engine_id if groupby == "engine": for eid,stdout,stderr,outputs,r,pyout in zip( targets, stdouts, stderrs, output_lists, results, pyouts ): self._display_stream(stdout, '[stdout:%i] ' % eid) self._display_stream(stderr, '[stderr:%i] ' % eid, file=sys.stderr) try: get_ipython() except NameError: # displaypub is meaningless outside IPython return if outputs or pyout is not None: _raw_text('[output:%i]' % eid) for output in outputs: self._republish_displaypub(output, eid) if pyout is not None: display(r) elif groupby in ('type', 'order'): # republish stdout: for eid,stdout in zip(targets, stdouts): self._display_stream(stdout, '[stdout:%i] ' % eid) # republish stderr: for eid,stderr in zip(targets, stderrs): self._display_stream(stderr, '[stderr:%i] ' % eid, file=sys.stderr) try: get_ipython() except NameError: # displaypub is meaningless outside IPython return if groupby == 'order': output_dict = dict((eid, outputs) for eid,outputs in zip(targets, output_lists)) N = max(len(outputs) for outputs in output_lists) for i in range(N): for eid in targets: outputs = output_dict[eid] if len(outputs) >= N: _raw_text('[output:%i]' % eid) self._republish_displaypub(outputs[i], eid) else: # republish displaypub output for eid,outputs in zip(targets, output_lists): if outputs: _raw_text('[output:%i]' % eid) for output in outputs: self._republish_displaypub(output, eid) # finally, add pyout: for eid,r,pyout in zip(targets, results, pyouts): if pyout is not None: display(r) else: raise ValueError("groupby must be one of 'type', 'engine', 'collate', not %r" % groupby)
def _unordered_iter(self): """iterator for results *as they arrive*, on FCFS basis, ignoring submission order.""" try: rlist = self.get(0) except error.TimeoutError: pending = set(self.msg_ids) while pending: try: self._client.wait(pending, 1e-3) except error.TimeoutError: # ignore timeout error, because that only means # *some* jobs are outstanding pass # update ready set with those no longer outstanding: ready = pending.difference(self._client.outstanding) # update pending to exclude those that are finished pending = pending.difference(ready) while ready: msg_id = ready.pop() ar = AsyncResult(self._client, msg_id, self._fname) rlist = ar.get() try: for r in rlist: yield r except TypeError: # flattened, not a list # this could get broken by flattened data that returns iterables # but most calls to map do not expose the `flatten` argument yield rlist else: # already done for r in rlist: yield r
def wait(self, timeout=-1): """wait for result to complete.""" start = time.time() if self._ready: return local_ids = filter(lambda msg_id: msg_id in self._client.outstanding, self.msg_ids) local_ready = self._client.wait(local_ids, timeout) if local_ready: remote_ids = filter(lambda msg_id: msg_id not in self._client.results, self.msg_ids) if not remote_ids: self._ready = True else: rdict = self._client.result_status(remote_ids, status_only=False) pending = rdict['pending'] while pending and (timeout < 0 or time.time() < start+timeout): rdict = self._client.result_status(remote_ids, status_only=False) pending = rdict['pending'] if pending: time.sleep(0.1) if not pending: self._ready = True if self._ready: try: results = map(self._client.results.get, self.msg_ids) self._result = results if self._single_result: r = results[0] if isinstance(r, Exception): raise r else: results = error.collect_exceptions(results, self._fname) self._result = self._reconstruct_result(results) except Exception, e: self._exception = e self._success = False else: self._success = True finally: self._metadata = map(self._client.metadata.get, self.msg_ids)
def abs_file(filename): """Return the absolute normalized form of `filename`.""" path = os.path.expandvars(os.path.expanduser(filename)) path = os.path.abspath(os.path.realpath(path)) path = actual_path(path) return path
def prep_patterns(patterns): """Prepare the file patterns for use in a `FnmatchMatcher`. If a pattern starts with a wildcard, it is used as a pattern as-is. If it does not start with a wildcard, then it is made absolute with the current directory. If `patterns` is None, an empty list is returned. """ prepped = [] for p in patterns or []: if p.startswith("*") or p.startswith("?"): prepped.append(p) else: prepped.append(abs_file(p)) return prepped
def sep(s): """Find the path separator used in this string, or os.sep if none.""" sep_match = re.search(r"[\\/]", s) if sep_match: the_sep = sep_match.group(0) else: the_sep = os.sep return the_sep
def find_python_files(dirname): """Yield all of the importable Python files in `dirname`, recursively. To be importable, the files have to be in a directory with a __init__.py, except for `dirname` itself, which isn't required to have one. The assumption is that `dirname` was specified directly, so the user knows best, but subdirectories are checked for a __init__.py to be sure we only find the importable files. """ for i, (dirpath, dirnames, filenames) in enumerate(os.walk(dirname)): if i > 0 and '__init__.py' not in filenames: # If a directory doesn't have __init__.py, then it isn't # importable and neither are its files del dirnames[:] continue for filename in filenames: # We're only interested in files that look like reasonable Python # files: Must end with .py or .pyw, and must not have certain funny # characters that probably mean they are editor junk. if re.match(r"^[^.#~!$@%^&*()+=,]+\.pyw?$", filename): yield os.path.join(dirpath, filename)
def relative_filename(self, filename): """Return the relative form of `filename`. The filename will be relative to the current directory when the `FileLocator` was constructed. """ fnorm = os.path.normcase(filename) if fnorm.startswith(self.relative_dir): filename = filename[len(self.relative_dir):] return filename
def canonical_filename(self, filename): """Return a canonical filename for `filename`. An absolute path with no redundant components and normalized case. """ if filename not in self.canonical_filename_cache: if not os.path.isabs(filename): for path in [os.curdir] + sys.path: if path is None: continue f = os.path.join(path, filename) if os.path.exists(f): filename = f break cf = abs_file(filename) self.canonical_filename_cache[filename] = cf return self.canonical_filename_cache[filename]
def get_zip_data(self, filename): """Get data from `filename` if it is a zip file path. Returns the string data read from the zip file, or None if no zip file could be found or `filename` isn't in it. The data returned will be an empty string if the file is empty. """ import zipimport markers = ['.zip'+os.sep, '.egg'+os.sep] for marker in markers: if marker in filename: parts = filename.split(marker) try: zi = zipimport.zipimporter(parts[0]+marker[:-1]) except zipimport.ZipImportError: continue try: data = zi.get_data(parts[1]) except IOError: continue return to_string(data) return None
def match(self, fpath): """Does `fpath` indicate a file in one of our trees?""" for d in self.dirs: if fpath.startswith(d): if fpath == d: # This is the same file! return True if fpath[len(d)] == os.sep: # This is a file in the directory return True return False
def match(self, fpath): """Does `fpath` match one of our filename patterns?""" for pat in self.pats: if fnmatch.fnmatch(fpath, pat): return True return False
def add(self, pattern, result): """Add the `pattern`/`result` pair to the list of aliases. `pattern` is an `fnmatch`-style pattern. `result` is a simple string. When mapping paths, if a path starts with a match against `pattern`, then that match is replaced with `result`. This models isomorphic source trees being rooted at different places on two different machines. `pattern` can't end with a wildcard component, since that would match an entire tree, and not just its root. """ # The pattern can't end with a wildcard component. pattern = pattern.rstrip(r"\/") if pattern.endswith("*"): raise CoverageException("Pattern must not end with wildcards.") pattern_sep = sep(pattern) # The pattern is meant to match a filepath. Let's make it absolute # unless it already is, or is meant to match any prefix. if not pattern.startswith('*') and not isabs_anywhere(pattern): pattern = abs_file(pattern) pattern += pattern_sep # Make a regex from the pattern. fnmatch always adds a \Z or $ to # match the whole string, which we don't want. regex_pat = fnmatch.translate(pattern).replace(r'\Z(', '(') if regex_pat.endswith("$"): regex_pat = regex_pat[:-1] # We want */a/b.py to match on Windows too, so change slash to match # either separator. regex_pat = regex_pat.replace(r"\/", r"[\\/]") # We want case-insensitive matching, so add that flag. regex = re.compile(r"(?i)" + regex_pat) # Normalize the result: it must end with a path separator. result_sep = sep(result) result = result.rstrip(r"\/") + result_sep self.aliases.append((regex, result, pattern_sep, result_sep))
def map(self, path): """Map `path` through the aliases. `path` is checked against all of the patterns. The first pattern to match is used to replace the root of the path with the result root. Only one pattern is ever used. If no patterns match, `path` is returned unchanged. The separator style in the result is made to match that of the result in the alias. """ for regex, result, pattern_sep, result_sep in self.aliases: m = regex.match(path) if m: new = path.replace(m.group(0), result) if pattern_sep != result_sep: new = new.replace(pattern_sep, result_sep) if self.locator: new = self.locator.canonical_filename(new) return new return path
def loop_qt4(kernel): """Start a kernel with PyQt4 event loop integration.""" from IPython.external.qt_for_kernel import QtCore from IPython.lib.guisupport import get_app_qt4, start_event_loop_qt4 kernel.app = get_app_qt4([" "]) kernel.app.setQuitOnLastWindowClosed(False) kernel.timer = QtCore.QTimer() kernel.timer.timeout.connect(kernel.do_one_iteration) # Units for the timer are in milliseconds kernel.timer.start(1000*kernel._poll_interval) start_event_loop_qt4(kernel.app)
def loop_wx(kernel): """Start a kernel with wx event loop support.""" import wx from IPython.lib.guisupport import start_event_loop_wx doi = kernel.do_one_iteration # Wx uses milliseconds poll_interval = int(1000*kernel._poll_interval) # We have to put the wx.Timer in a wx.Frame for it to fire properly. # We make the Frame hidden when we create it in the main app below. class TimerFrame(wx.Frame): def __init__(self, func): wx.Frame.__init__(self, None, -1) self.timer = wx.Timer(self) # Units for the timer are in milliseconds self.timer.Start(poll_interval) self.Bind(wx.EVT_TIMER, self.on_timer) self.func = func def on_timer(self, event): self.func() # We need a custom wx.App to create our Frame subclass that has the # wx.Timer to drive the ZMQ event loop. class IPWxApp(wx.App): def OnInit(self): self.frame = TimerFrame(doi) self.frame.Show(False) return True # The redirect=False here makes sure that wx doesn't replace # sys.stdout/stderr with its own classes. kernel.app = IPWxApp(redirect=False) # The import of wx on Linux sets the handler for signal.SIGINT # to 0. This is a bug in wx or gtk. We fix by just setting it # back to the Python default. import signal if not callable(signal.getsignal(signal.SIGINT)): signal.signal(signal.SIGINT, signal.default_int_handler) start_event_loop_wx(kernel.app)
def loop_tk(kernel): """Start a kernel with the Tk event loop.""" import Tkinter doi = kernel.do_one_iteration # Tk uses milliseconds poll_interval = int(1000*kernel._poll_interval) # For Tkinter, we create a Tk object and call its withdraw method. class Timer(object): def __init__(self, func): self.app = Tkinter.Tk() self.app.withdraw() self.func = func def on_timer(self): self.func() self.app.after(poll_interval, self.on_timer) def start(self): self.on_timer() # Call it once to get things going. self.app.mainloop() kernel.timer = Timer(doi) kernel.timer.start()
def loop_gtk(kernel): """Start the kernel, coordinating with the GTK event loop""" from .gui.gtkembed import GTKEmbed gtk_kernel = GTKEmbed(kernel) gtk_kernel.start()
def loop_cocoa(kernel): """Start the kernel, coordinating with the Cocoa CFRunLoop event loop via the matplotlib MacOSX backend. """ import matplotlib if matplotlib.__version__ < '1.1.0': kernel.log.warn( "MacOSX backend in matplotlib %s doesn't have a Timer, " "falling back on Tk for CFRunLoop integration. Note that " "even this won't work if Tk is linked against X11 instead of " "Cocoa (e.g. EPD). To use the MacOSX backend in the kernel, " "you must use matplotlib >= 1.1.0, or a native libtk." ) return loop_tk(kernel) from matplotlib.backends.backend_macosx import TimerMac, show # scale interval for sec->ms poll_interval = int(1000*kernel._poll_interval) real_excepthook = sys.excepthook def handle_int(etype, value, tb): """don't let KeyboardInterrupts look like crashes""" if etype is KeyboardInterrupt: io.raw_print("KeyboardInterrupt caught in CFRunLoop") else: real_excepthook(etype, value, tb) # add doi() as a Timer to the CFRunLoop def doi(): # restore excepthook during IPython code sys.excepthook = real_excepthook kernel.do_one_iteration() # and back: sys.excepthook = handle_int t = TimerMac(poll_interval) t.add_callback(doi) t.start() # but still need a Poller for when there are no active windows, # during which time mainloop() returns immediately poller = zmq.Poller() if kernel.control_stream: poller.register(kernel.control_stream.socket, zmq.POLLIN) for stream in kernel.shell_streams: poller.register(stream.socket, zmq.POLLIN) while True: try: # double nested try/except, to properly catch KeyboardInterrupt # due to pyzmq Issue #130 try: # don't let interrupts during mainloop invoke crash_handler: sys.excepthook = handle_int show.mainloop() sys.excepthook = real_excepthook # use poller if mainloop returned (no windows) # scale by extra factor of 10, since it's a real poll poller.poll(10*poll_interval) kernel.do_one_iteration() except: raise except KeyboardInterrupt: # Ctrl-C shouldn't crash the kernel io.raw_print("KeyboardInterrupt caught in kernel") finally: # ensure excepthook is restored sys.excepthook = real_excepthook
def enable_gui(gui, kernel=None): """Enable integration with a given GUI""" if gui not in loop_map: raise ValueError("GUI %r not supported" % gui) if kernel is None: if Application.initialized(): kernel = getattr(Application.instance(), 'kernel', None) if kernel is None: raise RuntimeError("You didn't specify a kernel," " and no IPython Application with a kernel appears to be running." ) loop = loop_map[gui] if kernel.eventloop is not None and kernel.eventloop is not loop: raise RuntimeError("Cannot activate multiple GUI eventloops") kernel.eventloop = loop
def GOE(N): """Creates an NxN element of the Gaussian Orthogonal Ensemble""" m = ra.standard_normal((N,N)) m += m.T return m/2
def center_eigenvalue_diff(mat): """Compute the eigvals of mat and then find the center eigval difference.""" N = len(mat) evals = np.sort(la.eigvals(mat)) diff = np.abs(evals[N/2] - evals[N/2-1]) return diff
def ensemble_diffs(num, N): """Return num eigenvalue diffs for the NxN GOE ensemble.""" diffs = np.empty(num) for i in xrange(num): mat = GOE(N) diffs[i] = center_eigenvalue_diff(mat) return diffs
def init(self, ctxt, step_addr): """ Initialize the item. This calls the class constructor with the appropriate arguments and returns the initialized object. :param ctxt: The context object. :param step_addr: The address of the step in the test configuration. """ return self.cls(ctxt, self.name, self.conf, step_addr)
def parse_file(cls, ctxt, fname, key=None, step_addr=None): """ Parse a YAML file containing test steps. :param ctxt: The context object. :param fname: The name of the file to parse. :param key: An optional dictionary key. If specified, the file must be a YAML dictionary, and the referenced value will be interpreted as a list of steps. If not provided, the file must be a YAML list, which will be interpreted as the list of steps. :param step_addr: The address of the step in the test configuration. This may be used in the case of includes, for instance. :returns: A list of ``Step`` objects. """ # Load the YAML file try: with open(fname) as f: step_data = yaml.load(f) except Exception as exc: raise ConfigError( 'Failed to read file "%s": %s' % (fname, exc), step_addr, ) # Do we have a key? if key is not None: if (not isinstance(step_data, collections.Mapping) or key not in step_data): raise ConfigError( 'Bad step configuration file "%s": expecting dictionary ' 'with key "%s"' % (fname, key), step_addr, ) # Extract just the step data step_data = step_data[key] # Validate that it's a sequence if not isinstance(step_data, collections.Sequence): addr = ('%s[%s]' % (fname, key)) if key is not None else fname raise ConfigError( 'Bad step configuration sequence at %s: expecting list, ' 'not "%s"' % (addr, step_data.__class__.__name__), step_addr, ) # OK, assemble the step list and return it steps = [] for idx, step_conf in enumerate(step_data): steps.extend(cls.parse_step( ctxt, StepAddress(fname, idx, key), step_conf)) return steps
def parse_step(cls, ctxt, step_addr, step_conf): """ Parse a step dictionary. :param ctxt: The context object. :param step_addr: The address of the step in the test configuration. :param step_conf: The description of the step. This may be a scalar string or a dictionary. :returns: A list of steps. """ # Make sure the step makes sense if isinstance(step_conf, six.string_types): # Convert string to a dict for uniformity of processing step_conf = {step_conf: None} elif not isinstance(step_conf, collections.Mapping): raise ConfigError( 'Unable to parse step configuration: expecting string or ' 'dictionary, not "%s"' % step_conf.__class__.__name__, step_addr, ) # Parse the configuration into the action and modifier classes # and the configuration to apply to each action_item = None mod_items = {} kwargs = {} # extra args for Step.__init__() for key, key_conf in step_conf.items(): # Handle special keys first if key in cls.schemas: # Validate the key utils.schema_validate(key_conf, cls.schemas[key], ConfigError, key, step_addr=step_addr) # Save the value kwargs[key] = key_conf # Is it an action? elif key in entry.points[NAMESPACE_ACTION]: if action_item is not None: raise ConfigError( 'Bad step configuration: action "%s" specified, ' 'but action "%s" already processed' % (key, action_item.name), step_addr, ) action_item = StepItem( entry.points[NAMESPACE_ACTION][key], key, key_conf) # OK, is it a modifier? elif key in entry.points[NAMESPACE_MODIFIER]: mod_class = entry.points[NAMESPACE_MODIFIER][key] # Store it in priority order mod_items.setdefault(mod_class.priority, []) mod_items[mod_class.priority].append(StepItem( mod_class, key, key_conf)) # Couldn't resolve it else: raise ConfigError( 'Bad step configuration: unable to resolve action ' '"%s"' % key, step_addr, ) # Make sure we have an action if action_item is None: raise ConfigError( 'Bad step configuration: no action specified', step_addr, ) # What is the action type? action_type = (Modifier.STEP if action_item.cls.step_action else Modifier.NORMAL) # OK, build our modifiers list and preprocess the action # configuration modifiers = [] for mod_item in utils.iter_prio_dict(mod_items): # Verify that the modifier is compatible with the # action if mod_item.cls.restriction & action_type == 0: raise ConfigError( 'Bad step configuration: modifier "%s" is ' 'incompatible with the action "%s"' % (mod_item.name, action_item.name), step_addr, ) # Initialize the modifier modifier = mod_item.init(ctxt, step_addr) # Add it to the list of modifiers modifiers.append(modifier) # Apply the modifier's configuration processing action_item.conf = modifier.action_conf( ctxt, action_item.cls, action_item.name, action_item.conf, step_addr) # Now we can initialize the action action = action_item.init(ctxt, step_addr) # Create the step step = cls(step_addr, action, modifiers, **kwargs) # If the final_action is a StepAction, invoke it now and # return the list of steps. We do this after creating the # Step object so that we can take advantage of its handling of # modifiers. if action_item.cls.step_action: return step(ctxt) # Not a step action, return the step as a list of one element return [step]
def init_crash_handler(self): """Create a crash handler, typically setting sys.excepthook to it.""" self.crash_handler = self.crash_handler_class(self) sys.excepthook = self.excepthook def unset_crashhandler(): sys.excepthook = sys.__excepthook__ atexit.register(unset_crashhandler)
def excepthook(self, etype, evalue, tb): """this is sys.excepthook after init_crashhandler set self.verbose_crash=True to use our full crashhandler, instead of a regular traceback with a short message (crash_handler_lite) """ if self.verbose_crash: return self.crash_handler(etype, evalue, tb) else: return crashhandler.crash_handler_lite(etype, evalue, tb)
def load_config_file(self, suppress_errors=True): """Load the config file. By default, errors in loading config are handled, and a warning printed on screen. For testing, the suppress_errors option is set to False, so errors will make tests fail. """ self.log.debug("Searching path %s for config files", self.config_file_paths) base_config = 'ipython_config.py' self.log.debug("Attempting to load config file: %s" % base_config) try: Application.load_config_file( self, base_config, path=self.config_file_paths ) except ConfigFileNotFound: # ignore errors loading parent self.log.debug("Config file %s not found", base_config) pass if self.config_file_name == base_config: # don't load secondary config return self.log.debug("Attempting to load config file: %s" % self.config_file_name) try: Application.load_config_file( self, self.config_file_name, path=self.config_file_paths ) except ConfigFileNotFound: # Only warn if the default config file was NOT being used. if self.config_file_specified: msg = self.log.warn else: msg = self.log.debug msg("Config file not found, skipping: %s", self.config_file_name) except: # For testing purposes. if not suppress_errors: raise self.log.warn("Error loading config file: %s" % self.config_file_name, exc_info=True)
def init_profile_dir(self): """initialize the profile dir""" try: # location explicitly specified: location = self.config.ProfileDir.location except AttributeError: # location not specified, find by profile name try: p = ProfileDir.find_profile_dir_by_name(self.ipython_dir, self.profile, self.config) except ProfileDirError: # not found, maybe create it (always create default profile) if self.auto_create or self.profile=='default': try: p = ProfileDir.create_profile_dir_by_name(self.ipython_dir, self.profile, self.config) except ProfileDirError: self.log.fatal("Could not create profile: %r"%self.profile) self.exit(1) else: self.log.info("Created profile dir: %r"%p.location) else: self.log.fatal("Profile %r not found."%self.profile) self.exit(1) else: self.log.info("Using existing profile dir: %r"%p.location) else: # location is fully specified try: p = ProfileDir.find_profile_dir(location, self.config) except ProfileDirError: # not found, maybe create it if self.auto_create: try: p = ProfileDir.create_profile_dir(location, self.config) except ProfileDirError: self.log.fatal("Could not create profile directory: %r"%location) self.exit(1) else: self.log.info("Creating new profile dir: %r"%location) else: self.log.fatal("Profile directory %r not found."%location) self.exit(1) else: self.log.info("Using existing profile dir: %r"%location) self.profile_dir = p self.config_file_paths.append(p.location)
def init_config_files(self): """[optionally] copy default config files into profile dir.""" # copy config files path = self.builtin_profile_dir if self.copy_config_files: src = self.profile cfg = self.config_file_name if path and os.path.exists(os.path.join(path, cfg)): self.log.warn("Staging %r from %s into %r [overwrite=%s]"%( cfg, src, self.profile_dir.location, self.overwrite) ) self.profile_dir.copy_config_file(cfg, path=path, overwrite=self.overwrite) else: self.stage_default_config_file() else: # Still stage *bundled* config files, but not generated ones # This is necessary for `ipython profile=sympy` to load the profile # on the first go files = glob.glob(os.path.join(path, '*.py')) for fullpath in files: cfg = os.path.basename(fullpath) if self.profile_dir.copy_config_file(cfg, path=path, overwrite=False): # file was copied self.log.warn("Staging bundled %s from %s into %r"%( cfg, self.profile, self.profile_dir.location) )
def stage_default_config_file(self): """auto generate default config file, and stage it into the profile.""" s = self.generate_config_file() fname = os.path.join(self.profile_dir.location, self.config_file_name) if self.overwrite or not os.path.exists(fname): self.log.warn("Generating default config file: %r"%(fname)) with open(fname, 'w') as f: f.write(s)
def read(self): """Read coverage data from the coverage data file (if it exists).""" if self.use_file: self.lines, self.arcs = self._read_file(self.filename) else: self.lines, self.arcs = {}, {}
def write(self, suffix=None): """Write the collected coverage data to a file. `suffix` is a suffix to append to the base file name. This can be used for multiple or parallel execution, so that many coverage data files can exist simultaneously. A dot will be used to join the base name and the suffix. """ if self.use_file: filename = self.filename if suffix: filename += "." + suffix self.write_file(filename)
def erase(self): """Erase the data, both in this object, and from its file storage.""" if self.use_file: if self.filename: file_be_gone(self.filename) self.lines = {} self.arcs = {}
def line_data(self): """Return the map from filenames to lists of line numbers executed.""" return dict( [(f, sorted(lmap.keys())) for f, lmap in iitems(self.lines)] )
def arc_data(self): """Return the map from filenames to lists of line number pairs.""" return dict( [(f, sorted(amap.keys())) for f, amap in iitems(self.arcs)] )
def write_file(self, filename): """Write the coverage data to `filename`.""" # Create the file data. data = {} data['lines'] = self.line_data() arcs = self.arc_data() if arcs: data['arcs'] = arcs if self.collector: data['collector'] = self.collector if self.debug and self.debug.should('dataio'): self.debug.write("Writing data to %r" % (filename,)) # Write the pickle to the file. fdata = open(filename, 'wb') try: pickle.dump(data, fdata, 2) finally: fdata.close()
def read_file(self, filename): """Read the coverage data from `filename`.""" self.lines, self.arcs = self._read_file(filename)
def raw_data(self, filename): """Return the raw pickled data from `filename`.""" if self.debug and self.debug.should('dataio'): self.debug.write("Reading data from %r" % (filename,)) fdata = open(filename, 'rb') try: data = pickle.load(fdata) finally: fdata.close() return data
def _read_file(self, filename): """Return the stored coverage data from the given file. Returns two values, suitable for assigning to `self.lines` and `self.arcs`. """ lines = {} arcs = {} try: data = self.raw_data(filename) if isinstance(data, dict): # Unpack the 'lines' item. lines = dict([ (f, dict.fromkeys(linenos, None)) for f, linenos in iitems(data.get('lines', {})) ]) # Unpack the 'arcs' item. arcs = dict([ (f, dict.fromkeys(arcpairs, None)) for f, arcpairs in iitems(data.get('arcs', {})) ]) except Exception: pass return lines, arcs
def combine_parallel_data(self, aliases=None): """Combine a number of data files together. Treat `self.filename` as a file prefix, and combine the data from all of the data files starting with that prefix plus a dot. If `aliases` is provided, it's a `PathAliases` object that is used to re-map paths to match the local machine's. """ aliases = aliases or PathAliases() data_dir, local = os.path.split(self.filename) localdot = local + '.' for f in os.listdir(data_dir or '.'): if f.startswith(localdot): full_path = os.path.join(data_dir, f) new_lines, new_arcs = self._read_file(full_path) for filename, file_data in iitems(new_lines): filename = aliases.map(filename) self.lines.setdefault(filename, {}).update(file_data) for filename, file_data in iitems(new_arcs): filename = aliases.map(filename) self.arcs.setdefault(filename, {}).update(file_data) if f != local: os.remove(full_path)
def add_line_data(self, line_data): """Add executed line data. `line_data` is { filename: { lineno: None, ... }, ...} """ for filename, linenos in iitems(line_data): self.lines.setdefault(filename, {}).update(linenos)
def add_arc_data(self, arc_data): """Add measured arc data. `arc_data` is { filename: { (l1,l2): None, ... }, ...} """ for filename, arcs in iitems(arc_data): self.arcs.setdefault(filename, {}).update(arcs)
def add_to_hash(self, filename, hasher): """Contribute `filename`'s data to the Md5Hash `hasher`.""" hasher.update(self.executed_lines(filename)) hasher.update(self.executed_arcs(filename))
def summary(self, fullpath=False): """Return a dict summarizing the coverage data. Keys are based on the filenames, and values are the number of executed lines. If `fullpath` is true, then the keys are the full pathnames of the files, otherwise they are the basenames of the files. """ summ = {} if fullpath: filename_fn = lambda f: f else: filename_fn = os.path.basename for filename, lines in iitems(self.lines): summ[filename_fn(filename)] = len(lines) return summ
def get_pasted_lines(sentinel, l_input=py3compat.input): """ Yield pasted lines until the user enters the given sentinel value. """ print "Pasting code; enter '%s' alone on the line to stop or use Ctrl-D." \ % sentinel while True: try: l = l_input(':') if l == sentinel: return else: yield l except EOFError: print '<EOF>' return
def mainloop(self, display_banner=None): """Start the mainloop. If an optional banner argument is given, it will override the internally created default banner. """ with nested(self.builtin_trap, self.display_trap): while 1: try: self.interact(display_banner=display_banner) #self.interact_with_readline() # XXX for testing of a readline-decoupled repl loop, call # interact_with_readline above break except KeyboardInterrupt: # this should not be necessary, but KeyboardInterrupt # handling seems rather unpredictable... self.write("\nKeyboardInterrupt in interact()\n")
def _replace_rlhist_multiline(self, source_raw, hlen_before_cell): """Store multiple lines as a single entry in history""" # do nothing without readline or disabled multiline if not self.has_readline or not self.multiline_history: return hlen_before_cell # windows rl has no remove_history_item if not hasattr(self.readline, "remove_history_item"): return hlen_before_cell # skip empty cells if not source_raw.rstrip(): return hlen_before_cell # nothing changed do nothing, e.g. when rl removes consecutive dups hlen = self.readline.get_current_history_length() if hlen == hlen_before_cell: return hlen_before_cell for i in range(hlen - hlen_before_cell): self.readline.remove_history_item(hlen - i - 1) stdin_encoding = get_stream_enc(sys.stdin, 'utf-8') self.readline.add_history(py3compat.unicode_to_str(source_raw.rstrip(), stdin_encoding)) return self.readline.get_current_history_length()
def interact(self, display_banner=None): """Closely emulate the interactive Python console.""" # batch run -> do not interact if self.exit_now: return if display_banner is None: display_banner = self.display_banner if isinstance(display_banner, basestring): self.show_banner(display_banner) elif display_banner: self.show_banner() more = False if self.has_readline: self.readline_startup_hook(self.pre_readline) hlen_b4_cell = self.readline.get_current_history_length() else: hlen_b4_cell = 0 # exit_now is set by a call to %Exit or %Quit, through the # ask_exit callback. while not self.exit_now: self.hooks.pre_prompt_hook() if more: try: prompt = self.prompt_manager.render('in2') except: self.showtraceback() if self.autoindent: self.rl_do_indent = True else: try: prompt = self.separate_in + self.prompt_manager.render('in') except: self.showtraceback() try: line = self.raw_input(prompt) if self.exit_now: # quick exit on sys.std[in|out] close break if self.autoindent: self.rl_do_indent = False except KeyboardInterrupt: #double-guard against keyboardinterrupts during kbdint handling try: self.write('\nKeyboardInterrupt\n') source_raw = self.input_splitter.source_raw_reset()[1] hlen_b4_cell = \ self._replace_rlhist_multiline(source_raw, hlen_b4_cell) more = False except KeyboardInterrupt: pass except EOFError: if self.autoindent: self.rl_do_indent = False if self.has_readline: self.readline_startup_hook(None) self.write('\n') self.exit() except bdb.BdbQuit: warn('The Python debugger has exited with a BdbQuit exception.\n' 'Because of how pdb handles the stack, it is impossible\n' 'for IPython to properly format this particular exception.\n' 'IPython will resume normal operation.') except: # exceptions here are VERY RARE, but they can be triggered # asynchronously by signal handlers, for example. self.showtraceback() else: self.input_splitter.push(line) more = self.input_splitter.push_accepts_more() if (self.SyntaxTB.last_syntax_error and self.autoedit_syntax): self.edit_syntax_error() if not more: source_raw = self.input_splitter.source_raw_reset()[1] self.run_cell(source_raw, store_history=True) hlen_b4_cell = \ self._replace_rlhist_multiline(source_raw, hlen_b4_cell) # Turn off the exit flag, so the mainloop can be restarted if desired self.exit_now = False
def raw_input(self, prompt=''): """Write a prompt and read a line. The returned line does not include the trailing newline. When the user enters the EOF key sequence, EOFError is raised. Optional inputs: - prompt(''): a string to be printed to prompt the user. - continue_prompt(False): whether this line is the first one or a continuation in a sequence of inputs. """ # Code run by the user may have modified the readline completer state. # We must ensure that our completer is back in place. if self.has_readline: self.set_readline_completer() # raw_input expects str, but we pass it unicode sometimes prompt = py3compat.cast_bytes_py2(prompt) try: line = py3compat.str_to_unicode(self.raw_input_original(prompt)) except ValueError: warn("\n********\nYou or a %run:ed script called sys.stdin.close()" " or sys.stdout.close()!\nExiting IPython!\n") self.ask_exit() return "" # Try to be reasonably smart about not re-indenting pasted input more # than necessary. We do this by trimming out the auto-indent initial # spaces, if the user's actual input started itself with whitespace. if self.autoindent: if num_ini_spaces(line) > self.indent_current_nsp: line = line[self.indent_current_nsp:] self.indent_current_nsp = 0 return line
def edit_syntax_error(self): """The bottom half of the syntax error handler called in the main loop. Loop until syntax error is fixed or user cancels. """ while self.SyntaxTB.last_syntax_error: # copy and clear last_syntax_error err = self.SyntaxTB.clear_err_state() if not self._should_recompile(err): return try: # may set last_syntax_error again if a SyntaxError is raised self.safe_execfile(err.filename,self.user_ns) except: self.showtraceback() else: try: f = open(err.filename) try: # This should be inside a display_trap block and I # think it is. sys.displayhook(f.read()) finally: f.close() except: self.showtraceback()
def _should_recompile(self,e): """Utility routine for edit_syntax_error""" if e.filename in ('<ipython console>','<input>','<string>', '<console>','<BackgroundJob compilation>', None): return False try: if (self.autoedit_syntax and not self.ask_yes_no('Return to editor to correct syntax error? ' '[Y/n] ','y')): return False except EOFError: return False def int0(x): try: return int(x) except TypeError: return 0 # always pass integer line and offset values to editor hook try: self.hooks.fix_error_editor(e.filename, int0(e.lineno),int0(e.offset),e.msg) except TryNext: warn('Could not open editor') return False return True
def exit(self): """Handle interactive exit. This method calls the ask_exit callback.""" if self.confirm_exit: if self.ask_yes_no('Do you really want to exit ([y]/n)?','y'): self.ask_exit() else: self.ask_exit()
def get_url_rev(self): """ Returns the correct repository URL and revision by parsing the given repository URL """ error_message= ( "Sorry, '%s' is a malformed VCS url. " "Ihe format is <vcs>+<protocol>://<url>, " "e.g. svn+http://myrepo/svn/MyApp#egg=MyApp") assert '+' in self.url, error_message % self.url url = self.url.split('+', 1)[1] scheme, netloc, path, query, frag = urlparse.urlsplit(url) rev = None if '@' in path: path, rev = path.rsplit('@', 1) url = urlparse.urlunsplit((scheme, netloc, path, query, '')) return url, rev
def dispose_at_exit(exitable): ''' register `exitable.__exit__()` into `atexit` module. return the `exitable` itself. ''' @atexit.register def callback(): exitable.__exit__(*sys.exc_info()) return exitable
def new_frontend_master(self): """ Create and return new frontend attached to new kernel, launched on localhost. """ ip = self.ip if self.ip in LOCAL_IPS else LOCALHOST kernel_manager = self.kernel_manager_class( ip=ip, connection_file=self._new_connection_file(), config=self.config, ) # start the kernel kwargs = dict() kwargs['extra_arguments'] = self.kernel_argv kernel_manager.start_kernel(**kwargs) kernel_manager.start_channels() widget = self.widget_factory(config=self.config, local_kernel=True) self.init_colors(widget) widget.kernel_manager = kernel_manager widget._existing = False widget._may_close = True widget._confirm_exit = self.confirm_exit return widget
def new_frontend_slave(self, current_widget): """Create and return a new frontend attached to an existing kernel. Parameters ---------- current_widget : IPythonWidget The IPythonWidget whose kernel this frontend is to share """ kernel_manager = self.kernel_manager_class( connection_file=current_widget.kernel_manager.connection_file, config = self.config, ) kernel_manager.load_connection_file() kernel_manager.start_channels() widget = self.widget_factory(config=self.config, local_kernel=False) self.init_colors(widget) widget._existing = True widget._may_close = False widget._confirm_exit = False widget.kernel_manager = kernel_manager return widget
def init_colors(self, widget): """Configure the coloring of the widget""" # Note: This will be dramatically simplified when colors # are removed from the backend. # parse the colors arg down to current known labels try: colors = self.config.ZMQInteractiveShell.colors except AttributeError: colors = None try: style = self.config.IPythonWidget.syntax_style except AttributeError: style = None try: sheet = self.config.IPythonWidget.style_sheet except AttributeError: sheet = None # find the value for colors: if colors: colors=colors.lower() if colors in ('lightbg', 'light'): colors='lightbg' elif colors in ('dark', 'linux'): colors='linux' else: colors='nocolor' elif style: if style=='bw': colors='nocolor' elif styles.dark_style(style): colors='linux' else: colors='lightbg' else: colors=None # Configure the style if style: widget.style_sheet = styles.sheet_from_template(style, colors) widget.syntax_style = style widget._syntax_style_changed() widget._style_sheet_changed() elif colors: # use a default dark/light/bw style widget.set_default_style(colors=colors) if self.stylesheet: # we got an explicit stylesheet if os.path.isfile(self.stylesheet): with open(self.stylesheet) as f: sheet = f.read() else: raise IOError("Stylesheet %r not found." % self.stylesheet) if sheet: widget.style_sheet = sheet widget._style_sheet_changed()
def info(self): """return the connection info for this object's sockets.""" return (self.identity, self.url, self.pub_url, self.location)
def connect(self, peers): """connect to peers. `peers` will be a dict of 4-tuples, keyed by name. {peer : (ident, addr, pub_addr, location)} where peer is the name, ident is the XREP identity, addr,pub_addr are the """ for peer, (ident, url, pub_url, location) in peers.items(): self.peers[peer] = ident if ident != self.identity: self.sub.connect(disambiguate_url(pub_url, location)) if ident > self.identity: # prevent duplicate xrep, by only connecting # engines to engines with higher IDENTITY # a doubly-connected pair will crash self.socket.connect(disambiguate_url(url, location))
def Rconverter(Robj, dataframe=False): """ Convert an object in R's namespace to one suitable for ipython's namespace. For a data.frame, it tries to return a structured array. It first checks for colnames, then names. If all are NULL, it returns np.asarray(Robj), else it tries to construct a recarray Parameters ---------- Robj: an R object returned from rpy2 """ is_data_frame = ro.r('is.data.frame') colnames = ro.r('colnames') rownames = ro.r('rownames') # with pandas, these could be used for the index names = ro.r('names') if dataframe: as_data_frame = ro.r('as.data.frame') cols = colnames(Robj) _names = names(Robj) if cols != ri.NULL: Robj = as_data_frame(Robj) names = tuple(np.array(cols)) elif _names != ri.NULL: names = tuple(np.array(_names)) else: # failed to find names return np.asarray(Robj) Robj = np.rec.fromarrays(Robj, names = names) return np.asarray(Robj)
def findsource(object): """Return the entire source file and starting line number for an object. The argument may be a module, class, method, function, traceback, frame, or code object. The source code is returned as a list of all the lines in the file and the line number indexes a line in that list. An IOError is raised if the source code cannot be retrieved. FIXED version with which we monkeypatch the stdlib to work around a bug.""" file = getsourcefile(object) or getfile(object) # If the object is a frame, then trying to get the globals dict from its # module won't work. Instead, the frame object itself has the globals # dictionary. globals_dict = None if inspect.isframe(object): # XXX: can this ever be false? globals_dict = object.f_globals else: module = getmodule(object, file) if module: globals_dict = module.__dict__ lines = linecache.getlines(file, globals_dict) if not lines: raise IOError('could not get source code') if ismodule(object): return lines, 0 if isclass(object): name = object.__name__ pat = re.compile(r'^(\s*)class\s*' + name + r'\b') # make some effort to find the best matching class definition: # use the one with the least indentation, which is the one # that's most probably not inside a function definition. candidates = [] for i in range(len(lines)): match = pat.match(lines[i]) if match: # if it's at toplevel, it's already the best one if lines[i][0] == 'c': return lines, i # else add whitespace to candidate list candidates.append((match.group(1), i)) if candidates: # this will sort by whitespace, and by line number, # less whitespace first candidates.sort() return lines, candidates[0][1] else: raise IOError('could not find class definition') if ismethod(object): object = object.im_func if isfunction(object): object = object.func_code if istraceback(object): object = object.tb_frame if isframe(object): object = object.f_code if iscode(object): if not hasattr(object, 'co_firstlineno'): raise IOError('could not find function definition') pat = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)') pmatch = pat.match # fperez - fix: sometimes, co_firstlineno can give a number larger than # the length of lines, which causes an error. Safeguard against that. lnum = min(object.co_firstlineno,len(lines))-1 while lnum > 0: if pmatch(lines[lnum]): break lnum -= 1 return lines, lnum raise IOError('could not find code object')
def fix_frame_records_filenames(records): """Try to fix the filenames in each record from inspect.getinnerframes(). Particularly, modules loaded from within zip files have useless filenames attached to their code object, and inspect.getinnerframes() just uses it. """ fixed_records = [] for frame, filename, line_no, func_name, lines, index in records: # Look inside the frame's globals dictionary for __file__, which should # be better. better_fn = frame.f_globals.get('__file__', None) if isinstance(better_fn, str): # Check the type just in case someone did something weird with # __file__. It might also be None if the error occurred during # import. filename = better_fn fixed_records.append((frame, filename, line_no, func_name, lines, index)) return fixed_records
def set_colors(self,*args,**kw): """Shorthand access to the color table scheme selector method.""" # Set own color table self.color_scheme_table.set_active_scheme(*args,**kw) # for convenience, set Colors to the active scheme self.Colors = self.color_scheme_table.active_colors # Also set colors of debugger if hasattr(self,'pdb') and self.pdb is not None: self.pdb.set_colors(*args,**kw)
def color_toggle(self): """Toggle between the currently active color scheme and NoColor.""" if self.color_scheme_table.active_scheme_name == 'NoColor': self.color_scheme_table.set_active_scheme(self.old_scheme) self.Colors = self.color_scheme_table.active_colors else: self.old_scheme = self.color_scheme_table.active_scheme_name self.color_scheme_table.set_active_scheme('NoColor') self.Colors = self.color_scheme_table.active_colors
def text(self, etype, value, tb, tb_offset=None, context=5): """Return formatted traceback. Subclasses may override this if they add extra arguments. """ tb_list = self.structured_traceback(etype, value, tb, tb_offset, context) return self.stb2text(tb_list)
def structured_traceback(self, etype, value, elist, tb_offset=None, context=5): """Return a color formatted string with the traceback info. Parameters ---------- etype : exception type Type of the exception raised. value : object Data stored in the exception elist : list List of frames, see class docstring for details. tb_offset : int, optional Number of frames in the traceback to skip. If not given, the instance value is used (set in constructor). context : int, optional Number of lines of context information to print. Returns ------- String with formatted exception. """ tb_offset = self.tb_offset if tb_offset is None else tb_offset Colors = self.Colors out_list = [] if elist: if tb_offset and len(elist) > tb_offset: elist = elist[tb_offset:] out_list.append('Traceback %s(most recent call last)%s:' % (Colors.normalEm, Colors.Normal) + '\n') out_list.extend(self._format_list(elist)) # The exception info should be a single entry in the list. lines = ''.join(self._format_exception_only(etype, value)) out_list.append(lines) # Note: this code originally read: ## for line in lines[:-1]: ## out_list.append(" "+line) ## out_list.append(lines[-1]) # This means it was indenting everything but the last line by a little # bit. I've disabled this for now, but if we see ugliness somewhre we # can restore it. return out_list
def _format_list(self, extracted_list): """Format a list of traceback entry tuples for printing. Given a list of tuples as returned by extract_tb() or extract_stack(), return a list of strings ready for printing. Each string in the resulting list corresponds to the item with the same index in the argument list. Each string ends in a newline; the strings may contain internal newlines as well, for those items whose source text line is not None. Lifted almost verbatim from traceback.py """ Colors = self.Colors list = [] for filename, lineno, name, line in extracted_list[:-1]: item = ' File %s"%s"%s, line %s%d%s, in %s%s%s\n' % \ (Colors.filename, filename, Colors.Normal, Colors.lineno, lineno, Colors.Normal, Colors.name, name, Colors.Normal) if line: item += ' %s\n' % line.strip() list.append(item) # Emphasize the last entry filename, lineno, name, line = extracted_list[-1] item = '%s File %s"%s"%s, line %s%d%s, in %s%s%s%s\n' % \ (Colors.normalEm, Colors.filenameEm, filename, Colors.normalEm, Colors.linenoEm, lineno, Colors.normalEm, Colors.nameEm, name, Colors.normalEm, Colors.Normal) if line: item += '%s %s%s\n' % (Colors.line, line.strip(), Colors.Normal) list.append(item) #from pprint import pformat; print 'LISTTB', pformat(list) # dbg return list
def _format_exception_only(self, etype, value): """Format the exception part of a traceback. The arguments are the exception type and value such as given by sys.exc_info()[:2]. The return value is a list of strings, each ending in a newline. Normally, the list contains a single string; however, for SyntaxError exceptions, it contains several lines that (when printed) display detailed information about where the syntax error occurred. The message indicating which exception occurred is the always last string in the list. Also lifted nearly verbatim from traceback.py """ have_filedata = False Colors = self.Colors list = [] stype = Colors.excName + etype.__name__ + Colors.Normal if value is None: # Not sure if this can still happen in Python 2.6 and above list.append( str(stype) + '\n') else: if etype is SyntaxError: have_filedata = True #print 'filename is',filename # dbg if not value.filename: value.filename = "<string>" list.append('%s File %s"%s"%s, line %s%d%s\n' % \ (Colors.normalEm, Colors.filenameEm, value.filename, Colors.normalEm, Colors.linenoEm, value.lineno, Colors.Normal )) if value.text is not None: i = 0 while i < len(value.text) and value.text[i].isspace(): i += 1 list.append('%s %s%s\n' % (Colors.line, value.text.strip(), Colors.Normal)) if value.offset is not None: s = ' ' for c in value.text[i:value.offset-1]: if c.isspace(): s += c else: s += ' ' list.append('%s%s^%s\n' % (Colors.caret, s, Colors.Normal) ) try: s = value.msg except Exception: s = self._some_str(value) if s: list.append('%s%s:%s %s\n' % (str(stype), Colors.excName, Colors.Normal, s)) else: list.append('%s\n' % str(stype)) # sync with user hooks if have_filedata: ipinst = ipapi.get() if ipinst is not None: ipinst.hooks.synchronize_with_editor(value.filename, value.lineno, 0) return list
def show_exception_only(self, etype, evalue): """Only print the exception type and message, without a traceback. Parameters ---------- etype : exception type value : exception value """ # This method needs to use __call__ from *this* class, not the one from # a subclass whose signature or behavior may be different ostream = self.ostream ostream.flush() ostream.write('\n'.join(self.get_exception_only(etype, evalue))) ostream.flush()
def structured_traceback(self, etype, evalue, etb, tb_offset=None, context=5): """Return a nice text document describing the traceback.""" tb_offset = self.tb_offset if tb_offset is None else tb_offset # some locals try: etype = etype.__name__ except AttributeError: pass Colors = self.Colors # just a shorthand + quicker name lookup ColorsNormal = Colors.Normal # used a lot col_scheme = self.color_scheme_table.active_scheme_name indent = ' '*INDENT_SIZE em_normal = '%s\n%s%s' % (Colors.valEm, indent,ColorsNormal) undefined = '%sundefined%s' % (Colors.em, ColorsNormal) exc = '%s%s%s' % (Colors.excName,etype,ColorsNormal) # some internal-use functions def text_repr(value): """Hopefully pretty robust repr equivalent.""" # this is pretty horrible but should always return *something* try: return pydoc.text.repr(value) except KeyboardInterrupt: raise except: try: return repr(value) except KeyboardInterrupt: raise except: try: # all still in an except block so we catch # getattr raising name = getattr(value, '__name__', None) if name: # ick, recursion return text_repr(name) klass = getattr(value, '__class__', None) if klass: return '%s instance' % text_repr(klass) except KeyboardInterrupt: raise except: return 'UNRECOVERABLE REPR FAILURE' def eqrepr(value, repr=text_repr): return '=%s' % repr(value) def nullrepr(value, repr=text_repr): return '' # meat of the code begins try: etype = etype.__name__ except AttributeError: pass if self.long_header: # Header with the exception type, python version, and date pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable date = time.ctime(time.time()) head = '%s%s%s\n%s%s%s\n%s' % (Colors.topline, '-'*75, ColorsNormal, exc, ' '*(75-len(str(etype))-len(pyver)), pyver, date.rjust(75) ) head += "\nA problem occured executing Python code. Here is the sequence of function"\ "\ncalls leading up to the error, with the most recent (innermost) call last." else: # Simplified header head = '%s%s%s\n%s%s' % (Colors.topline, '-'*75, ColorsNormal,exc, 'Traceback (most recent call last)'.\ rjust(75 - len(str(etype)) ) ) frames = [] # Flush cache before calling inspect. This helps alleviate some of the # problems with python 2.3's inspect.py. ##self.check_cache() # Drop topmost frames if requested try: # Try the default getinnerframes and Alex's: Alex's fixes some # problems, but it generates empty tracebacks for console errors # (5 blanks lines) where none should be returned. #records = inspect.getinnerframes(etb, context)[tb_offset:] #print 'python records:', records # dbg records = _fixed_getinnerframes(etb, context, tb_offset) #print 'alex records:', records # dbg except: # FIXME: I've been getting many crash reports from python 2.3 # users, traceable to inspect.py. If I can find a small test-case # to reproduce this, I should either write a better workaround or # file a bug report against inspect (if that's the real problem). # So far, I haven't been able to find an isolated example to # reproduce the problem. inspect_error() traceback.print_exc(file=self.ostream) info('\nUnfortunately, your original traceback can not be constructed.\n') return '' # build some color string templates outside these nested loops tpl_link = '%s%%s%s' % (Colors.filenameEm,ColorsNormal) tpl_call = 'in %s%%s%s%%s%s' % (Colors.vName, Colors.valEm, ColorsNormal) tpl_call_fail = 'in %s%%s%s(***failed resolving arguments***)%s' % \ (Colors.vName, Colors.valEm, ColorsNormal) tpl_local_var = '%s%%s%s' % (Colors.vName, ColorsNormal) tpl_global_var = '%sglobal%s %s%%s%s' % (Colors.em, ColorsNormal, Colors.vName, ColorsNormal) tpl_name_val = '%%s %s= %%s%s' % (Colors.valEm, ColorsNormal) tpl_line = '%s%%s%s %%s' % (Colors.lineno, ColorsNormal) tpl_line_em = '%s%%s%s %%s%s' % (Colors.linenoEm,Colors.line, ColorsNormal) # now, loop over all records printing context and info abspath = os.path.abspath for frame, file, lnum, func, lines, index in records: #print '*** record:',file,lnum,func,lines,index # dbg if not file: file = '?' elif not(file.startswith("<") and file.endswith(">")): # Guess that filenames like <string> aren't real filenames, so # don't call abspath on them. try: file = abspath(file) except OSError: # Not sure if this can still happen: abspath now works with # file names like <string> pass link = tpl_link % file args, varargs, varkw, locals = inspect.getargvalues(frame) if func == '?': call = '' else: # Decide whether to include variable details or not var_repr = self.include_vars and eqrepr or nullrepr try: call = tpl_call % (func,inspect.formatargvalues(args, varargs, varkw, locals,formatvalue=var_repr)) except KeyError: # This happens in situations like errors inside generator # expressions, where local variables are listed in the # line, but can't be extracted from the frame. I'm not # 100% sure this isn't actually a bug in inspect itself, # but since there's no info for us to compute with, the # best we can do is report the failure and move on. Here # we must *not* call any traceback construction again, # because that would mess up use of %debug later on. So we # simply report the failure and move on. The only # limitation will be that this frame won't have locals # listed in the call signature. Quite subtle problem... # I can't think of a good way to validate this in a unit # test, but running a script consisting of: # dict( (k,v.strip()) for (k,v) in range(10) ) # will illustrate the error, if this exception catch is # disabled. call = tpl_call_fail % func # Don't attempt to tokenize binary files. if file.endswith(('.so', '.pyd', '.dll')): frames.append('%s %s\n' % (link,call)) continue elif file.endswith(('.pyc','.pyo')): # Look up the corresponding source file. file = pyfile.source_from_cache(file) def linereader(file=file, lnum=[lnum], getline=linecache.getline): line = getline(file, lnum[0]) lnum[0] += 1 return line # Build the list of names on this line of code where the exception # occurred. try: names = [] name_cont = False for token_type, token, start, end, line in generate_tokens(linereader): # build composite names if token_type == tokenize.NAME and token not in keyword.kwlist: if name_cont: # Continuation of a dotted name try: names[-1].append(token) except IndexError: names.append([token]) name_cont = False else: # Regular new names. We append everything, the caller # will be responsible for pruning the list later. It's # very tricky to try to prune as we go, b/c composite # names can fool us. The pruning at the end is easy # to do (or the caller can print a list with repeated # names if so desired. names.append([token]) elif token == '.': name_cont = True elif token_type == tokenize.NEWLINE: break except (IndexError, UnicodeDecodeError): # signals exit of tokenizer pass except tokenize.TokenError,msg: _m = ("An unexpected error occurred while tokenizing input\n" "The following traceback may be corrupted or invalid\n" "The error message is: %s\n" % msg) error(_m) # Join composite names (e.g. "dict.fromkeys") names = ['.'.join(n) for n in names] # prune names list of duplicates, but keep the right order unique_names = uniq_stable(names) # Start loop over vars lvals = [] if self.include_vars: for name_full in unique_names: name_base = name_full.split('.',1)[0] if name_base in frame.f_code.co_varnames: if locals.has_key(name_base): try: value = repr(eval(name_full,locals)) except: value = undefined else: value = undefined name = tpl_local_var % name_full else: if frame.f_globals.has_key(name_base): try: value = repr(eval(name_full,frame.f_globals)) except: value = undefined else: value = undefined name = tpl_global_var % name_full lvals.append(tpl_name_val % (name,value)) if lvals: lvals = '%s%s' % (indent,em_normal.join(lvals)) else: lvals = '' level = '%s %s\n' % (link,call) if index is None: frames.append(level) else: frames.append('%s%s' % (level,''.join( _format_traceback_lines(lnum,index,lines,Colors,lvals, col_scheme)))) # Get (safely) a string form of the exception info try: etype_str,evalue_str = map(str,(etype,evalue)) except: # User exception is improperly defined. etype,evalue = str,sys.exc_info()[:2] etype_str,evalue_str = map(str,(etype,evalue)) # ... and format it exception = ['%s%s%s: %s' % (Colors.excName, etype_str, ColorsNormal, evalue_str)] if (not py3compat.PY3) and type(evalue) is types.InstanceType: try: names = [w for w in dir(evalue) if isinstance(w, basestring)] except: # Every now and then, an object with funny inernals blows up # when dir() is called on it. We do the best we can to report # the problem and continue _m = '%sException reporting error (object with broken dir())%s:' exception.append(_m % (Colors.excName,ColorsNormal)) etype_str,evalue_str = map(str,sys.exc_info()[:2]) exception.append('%s%s%s: %s' % (Colors.excName,etype_str, ColorsNormal, evalue_str)) names = [] for name in names: value = text_repr(getattr(evalue, name)) exception.append('\n%s%s = %s' % (indent, name, value)) # vds: >> if records: filepath, lnum = records[-1][1:3] #print "file:", str(file), "linenb", str(lnum) # dbg filepath = os.path.abspath(filepath) ipinst = ipapi.get() if ipinst is not None: ipinst.hooks.synchronize_with_editor(filepath, lnum, 0) # vds: << # return all our info assembled as a single string # return '%s\n\n%s\n%s' % (head,'\n'.join(frames),''.join(exception[0]) ) return [head] + frames + [''.join(exception[0])]
def debugger(self,force=False): """Call up the pdb debugger if desired, always clean up the tb reference. Keywords: - force(False): by default, this routine checks the instance call_pdb flag and does not actually invoke the debugger if the flag is false. The 'force' option forces the debugger to activate even if the flag is false. If the call_pdb flag is set, the pdb interactive debugger is invoked. In all cases, the self.tb reference to the current traceback is deleted to prevent lingering references which hamper memory management. Note that each call to pdb() does an 'import readline', so if your app requires a special setup for the readline completers, you'll have to fix that by hand after invoking the exception handler.""" if force or self.call_pdb: if self.pdb is None: self.pdb = debugger.Pdb( self.color_scheme_table.active_scheme_name) # the system displayhook may have changed, restore the original # for pdb display_trap = DisplayTrap(hook=sys.__displayhook__) with display_trap: self.pdb.reset() # Find the right frame so we don't pop up inside ipython itself if hasattr(self,'tb') and self.tb is not None: etb = self.tb else: etb = self.tb = sys.last_traceback while self.tb is not None and self.tb.tb_next is not None: self.tb = self.tb.tb_next if etb and etb.tb_next: etb = etb.tb_next self.pdb.botframe = etb.tb_frame self.pdb.interaction(self.tb.tb_frame, self.tb) if hasattr(self,'tb'): del self.tb
def set_mode(self,mode=None): """Switch to the desired mode. If mode is not specified, cycles through the available modes.""" if not mode: new_idx = ( self.valid_modes.index(self.mode) + 1 ) % \ len(self.valid_modes) self.mode = self.valid_modes[new_idx] elif mode not in self.valid_modes: raise ValueError, 'Unrecognized mode in FormattedTB: <'+mode+'>\n'\ 'Valid modes: '+str(self.valid_modes) else: self.mode = mode # include variable details only in 'Verbose' mode self.include_vars = (self.mode == self.valid_modes[2]) # Set the join character for generating text tracebacks self.tb_join_char = self._join_chars[self.mode]
def group_required(group, login_url=None, redirect_field_name=REDIRECT_FIELD_NAME, skip_superuser=True): """ View decorator for requiring a user group. """ def decorator(view_func): @login_required(redirect_field_name=redirect_field_name, login_url=login_url) def _wrapped_view(request, *args, **kwargs): if not (request.user.is_superuser and skip_superuser): if request.user.groups.filter(name=group).count() == 0: raise PermissionDenied return view_func(request, *args, **kwargs) return _wrapped_view return decorator
def get_parent(globals, level): """ parent, name = get_parent(globals, level) Return the package that an import is being performed in. If globals comes from the module foo.bar.bat (not itself a package), this returns the sys.modules entry for foo.bar. If globals is from a package's __init__.py, the package's entry in sys.modules is returned. If globals doesn't come from a package or a module in a package, or a corresponding entry is not found in sys.modules, None is returned. """ orig_level = level if not level or not isinstance(globals, dict): return None, '' pkgname = globals.get('__package__', None) if pkgname is not None: # __package__ is set, so use it if not hasattr(pkgname, 'rindex'): raise ValueError('__package__ set to non-string') if len(pkgname) == 0: if level > 0: raise ValueError('Attempted relative import in non-package') return None, '' name = pkgname else: # __package__ not set, so figure it out and set it if '__name__' not in globals: return None, '' modname = globals['__name__'] if '__path__' in globals: # __path__ is set, so modname is already the package name globals['__package__'] = name = modname else: # Normal module, so work out the package name if any lastdot = modname.rfind('.') if lastdot < 0 and level > 0: raise ValueError("Attempted relative import in non-package") if lastdot < 0: globals['__package__'] = None return None, '' globals['__package__'] = name = modname[:lastdot] dot = len(name) for x in xrange(level, 1, -1): try: dot = name.rindex('.', 0, dot) except ValueError: raise ValueError("attempted relative import beyond top-level " "package") name = name[:dot] try: parent = sys.modules[name] except: if orig_level < 1: warn("Parent module '%.200s' not found while handling absolute " "import" % name) parent = None else: raise SystemError("Parent module '%.200s' not loaded, cannot " "perform relative import" % name) # We expect, but can't guarantee, if parent != None, that: # - parent.__name__ == name # - parent.__dict__ is globals # If this is violated... Who cares? return parent, name
def load_next(mod, altmod, name, buf): """ mod, name, buf = load_next(mod, altmod, name, buf) altmod is either None or same as mod """ if len(name) == 0: # completely empty module name should only happen in # 'from . import' (or '__import__("")') return mod, None, buf dot = name.find('.') if dot == 0: raise ValueError('Empty module name') if dot < 0: subname = name next = None else: subname = name[:dot] next = name[dot+1:] if buf != '': buf += '.' buf += subname result = import_submodule(mod, subname, buf) if result is None and mod != altmod: result = import_submodule(altmod, subname, subname) if result is not None: buf = subname if result is None: raise ImportError("No module named %.200s" % name) return result, next, buf
def import_submodule(mod, subname, fullname): """m = import_submodule(mod, subname, fullname)""" # Require: # if mod == None: subname == fullname # else: mod.__name__ + "." + subname == fullname global found_now if fullname in found_now and fullname in sys.modules: m = sys.modules[fullname] else: print 'Reloading', fullname found_now[fullname] = 1 oldm = sys.modules.get(fullname, None) if mod is None: path = None elif hasattr(mod, '__path__'): path = mod.__path__ else: return None try: # This appears to be necessary on Python 3, because imp.find_module() # tries to import standard libraries (like io) itself, and we don't # want them to be processed by our deep_import_hook. with replace_import_hook(original_import): fp, filename, stuff = imp.find_module(subname, path) except ImportError: return None try: m = imp.load_module(fullname, fp, filename, stuff) except: # load_module probably removed name from modules because of # the error. Put back the original module object. if oldm: sys.modules[fullname] = oldm raise finally: if fp: fp.close() add_submodule(mod, m, fullname, subname) return m
def add_submodule(mod, submod, fullname, subname): """mod.{subname} = submod""" if mod is None: return #Nothing to do here. if submod is None: submod = sys.modules[fullname] setattr(mod, subname, submod) return
def ensure_fromlist(mod, fromlist, buf, recursive): """Handle 'from module import a, b, c' imports.""" if not hasattr(mod, '__path__'): return for item in fromlist: if not hasattr(item, 'rindex'): raise TypeError("Item in ``from list'' not a string") if item == '*': if recursive: continue # avoid endless recursion try: all = mod.__all__ except AttributeError: pass else: ret = ensure_fromlist(mod, all, buf, 1) if not ret: return 0 elif not hasattr(mod, item): import_submodule(mod, item, buf + '.' + item)
def deep_import_hook(name, globals=None, locals=None, fromlist=None, level=-1): """Replacement for __import__()""" parent, buf = get_parent(globals, level) head, name, buf = load_next(parent, None if level < 0 else parent, name, buf) tail = head while name: tail, name, buf = load_next(tail, tail, name, buf) # If tail is None, both get_parent and load_next found # an empty module name: someone called __import__("") or # doctored faulty bytecode if tail is None: raise ValueError('Empty module name') if not fromlist: return head ensure_fromlist(tail, fromlist, buf, 0) return tail
def deep_reload_hook(m): """Replacement for reload().""" if not isinstance(m, ModuleType): raise TypeError("reload() argument must be module") name = m.__name__ if name not in sys.modules: raise ImportError("reload(): module %.200s not in sys.modules" % name) global modules_reloading try: return modules_reloading[name] except: modules_reloading[name] = m dot = name.rfind('.') if dot < 0: subname = name path = None else: try: parent = sys.modules[name[:dot]] except KeyError: modules_reloading.clear() raise ImportError("reload(): parent %.200s not in sys.modules" % name[:dot]) subname = name[dot+1:] path = getattr(parent, "__path__", None) try: # This appears to be necessary on Python 3, because imp.find_module() # tries to import standard libraries (like io) itself, and we don't # want them to be processed by our deep_import_hook. with replace_import_hook(original_import): fp, filename, stuff = imp.find_module(subname, path) finally: modules_reloading.clear() try: newm = imp.load_module(name, fp, filename, stuff) except: # load_module probably removed name from modules because of # the error. Put back the original module object. sys.modules[name] = m raise finally: if fp: fp.close() modules_reloading.clear() return newm
def reload(module, exclude=['sys', 'os.path', '__builtin__', '__main__']): """Recursively reload all modules used in the given module. Optionally takes a list of modules to exclude from reloading. The default exclude list contains sys, __main__, and __builtin__, to prevent, e.g., resetting display, exception, and io hooks. """ global found_now for i in exclude: found_now[i] = 1 try: with replace_import_hook(deep_import_hook): ret = deep_reload_hook(module) finally: found_now = {} return ret
def code_name(code, number=0): """ Compute a (probably) unique name for code for caching. This now expects code to be unicode. """ hash_digest = hashlib.md5(code.encode("utf-8")).hexdigest() # Include the number and 12 characters of the hash in the name. It's # pretty much impossible that in a single session we'll have collisions # even with truncated hashes, and the full one makes tracebacks too long return '<ipython-input-{0}-{1}>'.format(number, hash_digest[:12])
def ast_parse(self, source, filename='<unknown>', symbol='exec'): """Parse code to an AST with the current compiler flags active. Arguments are exactly the same as ast.parse (in the standard library), and are passed to the built-in compile function.""" return compile(source, filename, symbol, self.flags | PyCF_ONLY_AST, 1)
def cache(self, code, number=0): """Make a name for a block of code, and cache the code. Parameters ---------- code : str The Python source code to cache. number : int A number which forms part of the code's name. Used for the execution counter. Returns ------- The name of the cached code (as a string). Pass this as the filename argument to compilation, so that tracebacks are correctly hooked up. """ name = code_name(code, number) entry = (len(code), time.time(), [line+'\n' for line in code.splitlines()], name) linecache.cache[name] = entry linecache._ipython_cache[name] = entry return name
def check_cache(self, *args): """Call linecache.checkcache() safely protecting our cached values. """ # First call the orignal checkcache as intended linecache._checkcache_ori(*args) # Then, update back the cache with our data, so that tracebacks related # to our compiled codes can be produced. linecache.cache.update(linecache._ipython_cache)
def add_line(self, line): """Add a line of source to the code. Don't include indentations or newlines. """ self.code.append(" " * self.indent_amount) self.code.append(line) self.code.append("\n")
def add_section(self): """Add a section, a sub-CodeBuilder.""" sect = CodeBuilder(self.indent_amount) self.code.append(sect) return sect
def get_function(self, fn_name): """Compile the code, and return the function `fn_name`.""" assert self.indent_amount == 0 g = {} code_text = str(self) exec(code_text, g) return g[fn_name]
def expr_code(self, expr): """Generate a Python expression for `expr`.""" if "|" in expr: pipes = expr.split("|") code = self.expr_code(pipes[0]) for func in pipes[1:]: self.all_vars.add(func) code = "c_%s(%s)" % (func, code) elif "." in expr: dots = expr.split(".") code = self.expr_code(dots[0]) args = [repr(d) for d in dots[1:]] code = "dot(%s, %s)" % (code, ", ".join(args)) else: self.all_vars.add(expr) code = "c_%s" % expr return code