Search is not available for this dataset
text
stringlengths
75
104k
def write_connection_file(fname=None, shell_port=0, iopub_port=0, stdin_port=0, hb_port=0, ip=LOCALHOST, key=b''): """Generates a JSON config file, including the selection of random ports. Parameters ---------- fname : unicode The path to the file to write shell_port : int, optional The port to use for ROUTER channel. iopub_port : int, optional The port to use for the SUB channel. stdin_port : int, optional The port to use for the REQ (raw input) channel. hb_port : int, optional The port to use for the hearbeat REP channel. ip : str, optional The ip address the kernel will bind to. key : str, optional The Session key used for HMAC authentication. """ # default to temporary connector file if not fname: fname = tempfile.mktemp('.json') # Find open ports as necessary. ports = [] ports_needed = int(shell_port <= 0) + int(iopub_port <= 0) + \ int(stdin_port <= 0) + int(hb_port <= 0) for i in xrange(ports_needed): sock = socket.socket() sock.bind(('', 0)) ports.append(sock) for i, sock in enumerate(ports): port = sock.getsockname()[1] sock.close() ports[i] = port if shell_port <= 0: shell_port = ports.pop(0) if iopub_port <= 0: iopub_port = ports.pop(0) if stdin_port <= 0: stdin_port = ports.pop(0) if hb_port <= 0: hb_port = ports.pop(0) cfg = dict( shell_port=shell_port, iopub_port=iopub_port, stdin_port=stdin_port, hb_port=hb_port, ) cfg['ip'] = ip cfg['key'] = bytes_to_str(key) with open(fname, 'w') as f: f.write(json.dumps(cfg, indent=2)) return fname, cfg
def base_launch_kernel(code, fname, stdin=None, stdout=None, stderr=None, executable=None, independent=False, extra_arguments=[], cwd=None): """ Launches a localhost kernel, binding to the specified ports. Parameters ---------- code : str, A string of Python code that imports and executes a kernel entry point. stdin, stdout, stderr : optional (default None) Standards streams, as defined in subprocess.Popen. fname : unicode, optional The JSON connector file, containing ip/port/hmac key information. key : str, optional The Session key used for HMAC authentication. executable : str, optional (default sys.executable) The Python executable to use for the kernel process. independent : bool, optional (default False) If set, the kernel process is guaranteed to survive if this process dies. If not set, an effort is made to ensure that the kernel is killed when this process dies. Note that in this case it is still good practice to kill kernels manually before exiting. extra_arguments : list, optional A list of extra arguments to pass when executing the launch code. cwd : path, optional The working dir of the kernel process (default: cwd of this process). Returns ------- A tuple of form: (kernel_process, shell_port, iopub_port, stdin_port, hb_port) where kernel_process is a Popen object and the ports are integers. """ # Build the kernel launch command. if executable is None: executable = sys.executable arguments = [ executable, '-c', code, '-f', fname ] arguments.extend(extra_arguments) # Popen will fail (sometimes with a deadlock) if stdin, stdout, and stderr # are invalid. Unfortunately, there is in general no way to detect whether # they are valid. The following two blocks redirect them to (temporary) # pipes in certain important cases. # If this process has been backgrounded, our stdin is invalid. Since there # is no compelling reason for the kernel to inherit our stdin anyway, we'll # place this one safe and always redirect. redirect_in = True _stdin = PIPE if stdin is None else stdin # If this process in running on pythonw, we know that stdin, stdout, and # stderr are all invalid. redirect_out = sys.executable.endswith('pythonw.exe') if redirect_out: _stdout = PIPE if stdout is None else stdout _stderr = PIPE if stderr is None else stderr else: _stdout, _stderr = stdout, stderr # Spawn a kernel. if sys.platform == 'win32': # Create a Win32 event for interrupting the kernel. interrupt_event = ParentPollerWindows.create_interrupt_event() arguments += [ '--interrupt=%i'%interrupt_event ] # If the kernel is running on pythonw and stdout/stderr are not been # re-directed, it will crash when more than 4KB of data is written to # stdout or stderr. This is a bug that has been with Python for a very # long time; see http://bugs.python.org/issue706263. # A cleaner solution to this problem would be to pass os.devnull to # Popen directly. Unfortunately, that does not work. if executable.endswith('pythonw.exe'): if stdout is None: arguments.append('--no-stdout') if stderr is None: arguments.append('--no-stderr') # Launch the kernel process. if independent: proc = Popen(arguments, creationflags=512, # CREATE_NEW_PROCESS_GROUP stdin=_stdin, stdout=_stdout, stderr=_stderr) else: try: from _winapi import DuplicateHandle, GetCurrentProcess, \ DUPLICATE_SAME_ACCESS except: from _subprocess import DuplicateHandle, GetCurrentProcess, \ DUPLICATE_SAME_ACCESS pid = GetCurrentProcess() handle = DuplicateHandle(pid, pid, pid, 0, True, # Inheritable by new processes. DUPLICATE_SAME_ACCESS) proc = Popen(arguments + ['--parent=%i'%int(handle)], stdin=_stdin, stdout=_stdout, stderr=_stderr) # Attach the interrupt event to the Popen objet so it can be used later. proc.win32_interrupt_event = interrupt_event else: if independent: proc = Popen(arguments, preexec_fn=lambda: os.setsid(), stdin=_stdin, stdout=_stdout, stderr=_stderr, cwd=cwd) else: proc = Popen(arguments + ['--parent=1'], stdin=_stdin, stdout=_stdout, stderr=_stderr, cwd=cwd) # Clean up pipes created to work around Popen bug. if redirect_in: if stdin is None: proc.stdin.close() if redirect_out: if stdout is None: proc.stdout.close() if stderr is None: proc.stderr.close() return proc
def create_zipfile(context): """This is the actual zest.releaser entry point Relevant items in the context dict: name Name of the project being released tagdir Directory where the tag checkout is placed (*if* a tag checkout has been made) version Version we're releasing workingdir Original working directory """ if not prerequisites_ok(): return # Create a zipfile. subprocess.call(['make', 'zip']) for zipfile in glob.glob('*.zip'): first_part = zipfile.split('.')[0] new_name = "%s.%s.zip" % (first_part, context['version']) target = os.path.join(context['workingdir'], new_name) shutil.copy(zipfile, target) print("Copied %s to %s" % (zipfile, target))
def fix_version(context): """Fix the version in metadata.txt Relevant context dict item for both prerelease and postrelease: ``new_version``. """ if not prerequisites_ok(): return lines = codecs.open('metadata.txt', 'rU', 'utf-8').readlines() for index, line in enumerate(lines): if line.startswith('version'): new_line = 'version=%s\n' % context['new_version'] lines[index] = new_line time.sleep(1) codecs.open('metadata.txt', 'w', 'utf-8').writelines(lines)
def mappable(obj): """return whether an object is mappable or not.""" if isinstance(obj, (tuple,list)): return True for m in arrayModules: if isinstance(obj,m['type']): return True return False
def getPartition(self, seq, p, q): """Returns the pth partition of q partitions of seq.""" # Test for error conditions here if p<0 or p>=q: print "No partition exists." return remainder = len(seq)%q basesize = len(seq)//q hi = [] lo = [] for n in range(q): if n < remainder: lo.append(n * (basesize + 1)) hi.append(lo[-1] + basesize + 1) else: lo.append(n*basesize + remainder) hi.append(lo[-1] + basesize) try: result = seq[lo[p]:hi[p]] except TypeError: # some objects (iterators) can't be sliced, # use islice: result = list(islice(seq, lo[p], hi[p])) return result
def pexpect_monkeypatch(): """Patch pexpect to prevent unhandled exceptions at VM teardown. Calling this function will monkeypatch the pexpect.spawn class and modify its __del__ method to make it more robust in the face of failures that can occur if it is called when the Python VM is shutting down. Since Python may fire __del__ methods arbitrarily late, it's possible for them to execute during the teardown of the Python VM itself. At this point, various builtin modules have been reset to None. Thus, the call to self.close() will trigger an exception because it tries to call os.close(), and os is now None. """ if pexpect.__version__[:3] >= '2.2': # No need to patch, fix is already the upstream version. return def __del__(self): """This makes sure that no system resources are left open. Python only garbage collects Python objects. OS file descriptors are not Python objects, so they must be handled explicitly. If the child file descriptor was opened outside of this class (passed to the constructor) then this does not close it. """ if not self.closed: try: self.close() except AttributeError: pass pexpect.spawn.__del__ = __del__
def main(): """Run as a command-line script.""" parser = optparse.OptionParser(usage=MAIN_USAGE) newopt = parser.add_option newopt('--ipython',action='store_const',dest='mode',const='ipython', help='IPython interactive runner (default).') newopt('--python',action='store_const',dest='mode',const='python', help='Python interactive runner.') newopt('--sage',action='store_const',dest='mode',const='sage', help='SAGE interactive runner.') opts,args = parser.parse_args() runners = dict(ipython=IPythonRunner, python=PythonRunner, sage=SAGERunner) try: ext = os.path.splitext(args[0])[-1] except IndexError: ext = '' modes = {'.ipy':'ipython', '.py':'python', '.sage':'sage'} mode = modes.get(ext,"ipython") if opts.mode: mode = opts.mode runners[mode]().main(args)
def run_file(self,fname,interact=False,get_output=False): """Run the given file interactively. Inputs: -fname: name of the file to execute. See the run_source docstring for the meaning of the optional arguments.""" fobj = open(fname,'r') try: out = self.run_source(fobj,interact,get_output) finally: fobj.close() if get_output: return out
def run_source(self,source,interact=False,get_output=False): """Run the given source code interactively. Inputs: - source: a string of code to be executed, or an open file object we can iterate over. Optional inputs: - interact(False): if true, start to interact with the running program at the end of the script. Otherwise, just exit. - get_output(False): if true, capture the output of the child process (filtering the input commands out) and return it as a string. Returns: A string containing the process output, but only if requested. """ # if the source is a string, chop it up in lines so we can iterate # over it just as if it were an open file. if isinstance(source, basestring): source = source.splitlines(True) if self.echo: # normalize all strings we write to use the native OS line # separators. linesep = os.linesep stdwrite = self.out.write write = lambda s: stdwrite(s.replace('\r\n',linesep)) else: # Quiet mode, all writes are no-ops write = lambda s: None c = self.child prompts = c.compile_pattern_list(self.prompts) prompt_idx = c.expect_list(prompts) # Flag whether the script ends normally or not, to know whether we can # do anything further with the underlying process. end_normal = True # If the output was requested, store it in a list for return at the end if get_output: output = [] store_output = output.append for cmd in source: # skip blank lines for all matches to the 'main' prompt, while the # secondary prompts do not if prompt_idx==0 and \ (cmd.isspace() or cmd.lstrip().startswith('#')): write(cmd) continue # write('AFTER: '+c.after) # dbg write(c.after) c.send(cmd) try: prompt_idx = c.expect_list(prompts) except pexpect.EOF: # this will happen if the child dies unexpectedly write(c.before) end_normal = False break write(c.before) # With an echoing process, the output we get in c.before contains # the command sent, a newline, and then the actual process output if get_output: store_output(c.before[len(cmd+'\n'):]) #write('CMD: <<%s>>' % cmd) # dbg #write('OUTPUT: <<%s>>' % output[-1]) # dbg self.out.flush() if end_normal: if interact: c.send('\n') print '<< Starting interactive mode >>', try: c.interact() except OSError: # This is what fires when the child stops. Simply print a # newline so the system prompt is aligned. The extra # space is there to make sure it gets printed, otherwise # OS buffering sometimes just suppresses it. write(' \n') self.out.flush() else: if interact: e="Further interaction is not possible: child process is dead." print >> sys.stderr, e # Leave the child ready for more input later on, otherwise select just # hangs on the second invocation. if c.isalive(): c.send('\n') # Return any requested output if get_output: return ''.join(output)
def main(self,argv=None): """Run as a command-line script.""" parser = optparse.OptionParser(usage=USAGE % self.__class__.__name__) newopt = parser.add_option newopt('-i','--interact',action='store_true',default=False, help='Interact with the program after the script is run.') opts,args = parser.parse_args(argv) if len(args) != 1: print >> sys.stderr,"You must supply exactly one file to run." sys.exit(1) self.run_file(args[0],opts.interact)
def report(self, morfs, outfile=None): """Generate a Cobertura-compatible XML report for `morfs`. `morfs` is a list of modules or filenames. `outfile` is a file object to write the XML to. """ # Initial setup. outfile = outfile or sys.stdout # Create the DOM that will store the data. impl = xml.dom.minidom.getDOMImplementation() docType = impl.createDocumentType( "coverage", None, "http://cobertura.sourceforge.net/xml/coverage-03.dtd" ) self.xml_out = impl.createDocument(None, "coverage", docType) # Write header stuff. xcoverage = self.xml_out.documentElement xcoverage.setAttribute("version", __version__) xcoverage.setAttribute("timestamp", str(int(time.time()*1000))) xcoverage.appendChild(self.xml_out.createComment( " Generated by coverage.py: %s " % __url__ )) xpackages = self.xml_out.createElement("packages") xcoverage.appendChild(xpackages) # Call xml_file for each file in the data. self.packages = {} self.report_files(self.xml_file, morfs) lnum_tot, lhits_tot = 0, 0 bnum_tot, bhits_tot = 0, 0 # Populate the XML DOM with the package info. for pkg_name in sorted(self.packages.keys()): pkg_data = self.packages[pkg_name] class_elts, lhits, lnum, bhits, bnum = pkg_data xpackage = self.xml_out.createElement("package") xpackages.appendChild(xpackage) xclasses = self.xml_out.createElement("classes") xpackage.appendChild(xclasses) for class_name in sorted(class_elts.keys()): xclasses.appendChild(class_elts[class_name]) xpackage.setAttribute("name", pkg_name.replace(os.sep, '.')) xpackage.setAttribute("line-rate", rate(lhits, lnum)) xpackage.setAttribute("branch-rate", rate(bhits, bnum)) xpackage.setAttribute("complexity", "0") lnum_tot += lnum lhits_tot += lhits bnum_tot += bnum bhits_tot += bhits xcoverage.setAttribute("line-rate", rate(lhits_tot, lnum_tot)) xcoverage.setAttribute("branch-rate", rate(bhits_tot, bnum_tot)) # Use the DOM to write the output file. outfile.write(self.xml_out.toprettyxml()) # Return the total percentage. denom = lnum_tot + bnum_tot if denom == 0: pct = 0.0 else: pct = 100.0 * (lhits_tot + bhits_tot) / denom return pct
def xml_file(self, cu, analysis): """Add to the XML report for a single file.""" # Create the 'lines' and 'package' XML elements, which # are populated later. Note that a package == a directory. package_name = rpartition(cu.name, ".")[0] className = cu.name package = self.packages.setdefault(package_name, [{}, 0, 0, 0, 0]) xclass = self.xml_out.createElement("class") xclass.appendChild(self.xml_out.createElement("methods")) xlines = self.xml_out.createElement("lines") xclass.appendChild(xlines) xclass.setAttribute("name", className) filename = cu.file_locator.relative_filename(cu.filename) xclass.setAttribute("filename", filename.replace("\\", "/")) xclass.setAttribute("complexity", "0") branch_stats = analysis.branch_stats() # For each statement, create an XML 'line' element. for line in sorted(analysis.statements): xline = self.xml_out.createElement("line") xline.setAttribute("number", str(line)) # Q: can we get info about the number of times a statement is # executed? If so, that should be recorded here. xline.setAttribute("hits", str(int(line not in analysis.missing))) if self.arcs: if line in branch_stats: total, taken = branch_stats[line] xline.setAttribute("branch", "true") xline.setAttribute("condition-coverage", "%d%% (%d/%d)" % (100*taken/total, taken, total) ) xlines.appendChild(xline) class_lines = len(analysis.statements) class_hits = class_lines - len(analysis.missing) if self.arcs: class_branches = sum([t for t,k in branch_stats.values()]) missing_branches = sum([t-k for t,k in branch_stats.values()]) class_br_hits = class_branches - missing_branches else: class_branches = 0.0 class_br_hits = 0.0 # Finalize the statistics that are collected in the XML DOM. xclass.setAttribute("line-rate", rate(class_hits, class_lines)) xclass.setAttribute("branch-rate", rate(class_br_hits, class_branches)) package[0][className] = xclass package[1] += class_hits package[2] += class_lines package[3] += class_br_hits package[4] += class_branches
def phistogram(view, a, bins=10, rng=None, normed=False): """Compute the histogram of a remote array a. Parameters ---------- view IPython DirectView instance a : str String name of the remote array bins : int Number of histogram bins rng : (float, float) Tuple of min, max of the range to histogram normed : boolean Should the histogram counts be normalized to 1 """ nengines = len(view.targets) # view.push(dict(bins=bins, rng=rng)) with view.sync_imports(): import numpy rets = view.apply_sync(lambda a, b, rng: numpy.histogram(a,b,rng), Reference(a), bins, rng) hists = [ r[0] for r in rets ] lower_edges = [ r[1] for r in rets ] # view.execute('hist, lower_edges = numpy.histogram(%s, bins, rng)' % a) lower_edges = view.pull('lower_edges', targets=0) hist_array = numpy.array(hists).reshape(nengines, -1) # hist_array.shape = (nengines,-1) total_hist = numpy.sum(hist_array, 0) if normed: total_hist = total_hist/numpy.sum(total_hist,dtype=float) return total_hist, lower_edges
def fetch_pi_file(filename): """This will download a segment of pi from super-computing.org if the file is not already present. """ import os, urllib ftpdir="ftp://pi.super-computing.org/.2/pi200m/" if os.path.exists(filename): # we already have it return else: # download it urllib.urlretrieve(ftpdir+filename,filename)
def reduce_freqs(freqlist): """ Add up a list of freq counts to get the total counts. """ allfreqs = np.zeros_like(freqlist[0]) for f in freqlist: allfreqs += f return allfreqs
def compute_n_digit_freqs(filename, n): """ Read digits of pi from a file and compute the n digit frequencies. """ d = txt_file_to_digits(filename) freqs = n_digit_freqs(d, n) return freqs
def txt_file_to_digits(filename, the_type=str): """ Yield the digits of pi read from a .txt file. """ with open(filename, 'r') as f: for line in f.readlines(): for c in line: if c != '\n' and c!= ' ': yield the_type(c)
def one_digit_freqs(digits, normalize=False): """ Consume digits of pi and compute 1 digit freq. counts. """ freqs = np.zeros(10, dtype='i4') for d in digits: freqs[int(d)] += 1 if normalize: freqs = freqs/freqs.sum() return freqs
def two_digit_freqs(digits, normalize=False): """ Consume digits of pi and compute 2 digits freq. counts. """ freqs = np.zeros(100, dtype='i4') last = digits.next() this = digits.next() for d in digits: index = int(last + this) freqs[index] += 1 last = this this = d if normalize: freqs = freqs/freqs.sum() return freqs
def n_digit_freqs(digits, n, normalize=False): """ Consume digits of pi and compute n digits freq. counts. This should only be used for 1-6 digits. """ freqs = np.zeros(pow(10,n), dtype='i4') current = np.zeros(n, dtype=int) for i in range(n): current[i] = digits.next() for d in digits: index = int(''.join(map(str, current))) freqs[index] += 1 current[0:-1] = current[1:] current[-1] = d if normalize: freqs = freqs/freqs.sum() return freqs
def plot_two_digit_freqs(f2): """ Plot two digits frequency counts using matplotlib. """ f2_copy = f2.copy() f2_copy.shape = (10,10) ax = plt.matshow(f2_copy) plt.colorbar() for i in range(10): for j in range(10): plt.text(i-0.2, j+0.2, str(j)+str(i)) plt.ylabel('First digit') plt.xlabel('Second digit') return ax
def plot_one_digit_freqs(f1): """ Plot one digit frequency counts using matplotlib. """ ax = plt.plot(f1,'bo-') plt.title('Single digit counts in pi') plt.xlabel('Digit') plt.ylabel('Count') return ax
def __extend_uri(self, short): """ Extend a prefixed uri with the help of a specific dictionary of prefixes :param short: Prefixed uri to be extended :return: """ if short == 'a': return RDF.type for prefix in sorted(self.__prefixes, key=lambda x: len(x), reverse=True): if short.startswith(prefix): return URIRef(short.replace(prefix + ':', self.__prefixes[prefix])) return short
def get_object_or_none(qs, *args, **kwargs): """ Try to retrieve a model, and return None if it is not found. Useful if you do not want to bother with the try/except block. """ try: return qs.get(*args, **kwargs) except models.ObjectDoesNotExist: return None
def extract_vars(*names,**kw): """Extract a set of variables by name from another frame. :Parameters: - `*names`: strings One or more variable names which will be extracted from the caller's frame. :Keywords: - `depth`: integer (0) How many frames in the stack to walk when looking for your variables. Examples: In [2]: def func(x): ...: y = 1 ...: print sorted(extract_vars('x','y').items()) ...: In [3]: func('hello') [('x', 'hello'), ('y', 1)] """ depth = kw.get('depth',0) callerNS = sys._getframe(depth+1).f_locals return dict((k,callerNS[k]) for k in names)
def extract_vars_above(*names): """Extract a set of variables by name from another frame. Similar to extractVars(), but with a specified depth of 1, so that names are exctracted exactly from above the caller. This is simply a convenience function so that the very common case (for us) of skipping exactly 1 frame doesn't have to construct a special dict for keyword passing.""" callerNS = sys._getframe(2).f_locals return dict((k,callerNS[k]) for k in names)
def debugx(expr,pre_msg=''): """Print the value of an expression from the caller's frame. Takes an expression, evaluates it in the caller's frame and prints both the given expression and the resulting value (as well as a debug mark indicating the name of the calling function. The input must be of a form suitable for eval(). An optional message can be passed, which will be prepended to the printed expr->value pair.""" cf = sys._getframe(1) print '[DBG:%s] %s%s -> %r' % (cf.f_code.co_name,pre_msg,expr, eval(expr,cf.f_globals,cf.f_locals))
def extract_module_locals(depth=0): """Returns (module, locals) of the funciton `depth` frames away from the caller""" f = sys._getframe(depth + 1) global_ns = f.f_globals module = sys.modules[global_ns['__name__']] return (module, f.f_locals)
def reverse(view, *args, **kwargs): ''' User-friendly reverse. Pass arguments and keyword arguments to Django's `reverse` as `args` and `kwargs` arguments, respectively. The special optional keyword argument `query` is a dictionary of query (or GET) parameters that can be appended to the `reverse`d URL. Example: reverse('products:category', categoryId = 5, query = {'page': 2}) is equivalent to django.core.urlresolvers.reverse('products:category', kwargs = {'categoryId': 5}) + '?page=2' ''' if 'query' in kwargs: query = kwargs.pop('query') else: query = None base = urlresolvers.reverse(view, args = args, kwargs = kwargs) if query: return '{}?{}'.format(base, django.utils.http.urlencode(query)) else: return base
def is_private(prefix, base): """prefix, base -> true iff name prefix + "." + base is "private". Prefix may be an empty string, and base does not contain a period. Prefix is ignored (although functions you write conforming to this protocol may make use of it). Return true iff base begins with an (at least one) underscore, but does not both begin and end with (at least) two underscores. """ warnings.warn("is_private is deprecated; it wasn't useful; " "examine DocTestFinder.find() lists instead", DeprecationWarning, stacklevel=2) return base[:1] == "_" and not base[:2] == "__" == base[-2:]
def _extract_future_flags(globs): """ Return the compiler-flags associated with the future features that have been imported into the given namespace (globs). """ flags = 0 for fname in __future__.all_feature_names: feature = globs.get(fname, None) if feature is getattr(__future__, fname): flags |= feature.compiler_flag return flags
def _normalize_module(module, depth=2): """ Return the module specified by `module`. In particular: - If `module` is a module, then return module. - If `module` is a string, then import and return the module with that name. - If `module` is None, then return the calling module. The calling module is assumed to be the module of the stack frame at the given depth in the call stack. """ if inspect.ismodule(module): return module elif isinstance(module, (str, unicode)): return __import__(module, globals(), locals(), ["*"]) elif module is None: return sys.modules[sys._getframe(depth).f_globals['__name__']] else: raise TypeError("Expected a module, string, or None")
def _exception_traceback(exc_info): """ Return a string containing a traceback message for the given exc_info tuple (as returned by sys.exc_info()). """ # Get a traceback message. excout = StringIO() exc_type, exc_val, exc_tb = exc_info traceback.print_exception(exc_type, exc_val, exc_tb, file=excout) return excout.getvalue()
def run_docstring_examples(f, globs, verbose=False, name="NoName", compileflags=None, optionflags=0): """ Test examples in the given object's docstring (`f`), using `globs` as globals. Optional argument `name` is used in failure messages. If the optional argument `verbose` is true, then generate output even if there are no failures. `compileflags` gives the set of flags that should be used by the Python compiler when running the examples. If not specified, then it will default to the set of future-import flags that apply to `globs`. Optional keyword arg `optionflags` specifies options for the testing and output. See the documentation for `testmod` for more information. """ # Find, parse, and run all tests in the given module. finder = DocTestFinder(verbose=verbose, recurse=False) runner = DocTestRunner(verbose=verbose, optionflags=optionflags) for test in finder.find(f, name, globs=globs): runner.run(test, compileflags=compileflags)
def DocFileSuite(*paths, **kw): """A unittest suite for one or more doctest files. The path to each doctest file is given as a string; the interpretation of that string depends on the keyword argument "module_relative". A number of options may be provided as keyword arguments: module_relative If "module_relative" is True, then the given file paths are interpreted as os-independent module-relative paths. By default, these paths are relative to the calling module's directory; but if the "package" argument is specified, then they are relative to that package. To ensure os-independence, "filename" should use "/" characters to separate path segments, and may not be an absolute path (i.e., it may not begin with "/"). If "module_relative" is False, then the given file paths are interpreted as os-specific paths. These paths may be absolute or relative (to the current working directory). package A Python package or the name of a Python package whose directory should be used as the base directory for module relative paths. If "package" is not specified, then the calling module's directory is used as the base directory for module relative filenames. It is an error to specify "package" if "module_relative" is False. setUp A set-up function. This is called before running the tests in each file. The setUp function will be passed a DocTest object. The setUp function can access the test globals as the globs attribute of the test passed. tearDown A tear-down function. This is called after running the tests in each file. The tearDown function will be passed a DocTest object. The tearDown function can access the test globals as the globs attribute of the test passed. globs A dictionary containing initial global variables for the tests. optionflags A set of doctest option flags expressed as an integer. parser A DocTestParser (or subclass) that should be used to extract tests from the files. """ suite = unittest.TestSuite() # We do this here so that _normalize_module is called at the right # level. If it were called in DocFileTest, then this function # would be the caller and we might guess the package incorrectly. if kw.get('module_relative', True): kw['package'] = _normalize_module(kw.get('package')) for path in paths: suite.addTest(DocFileTest(path, **kw)) return suite
def debug_src(src, pm=False, globs=None): """Debug a single doctest docstring, in argument `src`'""" testsrc = script_from_examples(src) debug_script(testsrc, pm, globs)
def debug_script(src, pm=False, globs=None): "Debug a test script. `src` is the script, as a string." import pdb # Note that tempfile.NameTemporaryFile() cannot be used. As the # docs say, a file so created cannot be opened by name a second time # on modern Windows boxes, and execfile() needs to open it. srcfilename = tempfile.mktemp(".py", "doctestdebug") f = open(srcfilename, 'w') f.write(src) f.close() try: if globs: globs = globs.copy() else: globs = {} if pm: try: execfile(srcfilename, globs, globs) except: print sys.exc_info()[1] pdb.post_mortem(sys.exc_info()[2]) else: # Note that %r is vital here. '%s' instead can, e.g., cause # backslashes to get treated as metacharacters on Windows. pdb.run("execfile(%r)" % srcfilename, globs, globs) finally: os.remove(srcfilename)
def debug(module, name, pm=False): """Debug a single doctest docstring. Provide the module (or dotted name of the module) containing the test to be debugged and the name (within the module) of the object with the docstring with tests to be debugged. """ module = _normalize_module(module) testsrc = testsource(module, name) debug_script(testsrc, pm, module.__dict__)
def check_output(self, want, got, optionflags): """ Return True iff the actual output from an example (`got`) matches the expected output (`want`). These strings are always considered to match if they are identical; but depending on what option flags the test runner is using, several non-exact match types are also possible. See the documentation for `TestRunner` for more information about option flags. """ # Handle the common case first, for efficiency: # if they're string-identical, always return true. if got == want: return True # The values True and False replaced 1 and 0 as the return # value for boolean comparisons in Python 2.3. if not (optionflags & DONT_ACCEPT_TRUE_FOR_1): if (got,want) == ("True\n", "1\n"): return True if (got,want) == ("False\n", "0\n"): return True # <BLANKLINE> can be used as a special sequence to signify a # blank line, unless the DONT_ACCEPT_BLANKLINE flag is used. if not (optionflags & DONT_ACCEPT_BLANKLINE): # Replace <BLANKLINE> in want with a blank line. want = re.sub('(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER), '', want) # If a line in got contains only spaces, then remove the # spaces. got = re.sub('(?m)^\s*?$', '', got) if got == want: return True # This flag causes doctest to ignore any differences in the # contents of whitespace strings. Note that this can be used # in conjunction with the ELLIPSIS flag. if optionflags & NORMALIZE_WHITESPACE: got = ' '.join(got.split()) want = ' '.join(want.split()) if got == want: return True # The ELLIPSIS flag says to let the sequence "..." in `want` # match any substring in `got`. if optionflags & ELLIPSIS: if _ellipsis_match(want, got): return True # We didn't find any match; return false. return False
def output_difference(self, example, got, optionflags): """ Return a string describing the differences between the expected output for a given example (`example`) and the actual output (`got`). `optionflags` is the set of option flags used to compare `want` and `got`. """ want = example.want # If <BLANKLINE>s are being used, then replace blank lines # with <BLANKLINE> in the actual output string. if not (optionflags & DONT_ACCEPT_BLANKLINE): got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got) # Check if we should use diff. if self._do_a_fancy_diff(want, got, optionflags): # Split want & got into lines. want_lines = want.splitlines(True) # True == keep line ends got_lines = got.splitlines(True) # Use difflib to find their differences. if optionflags & REPORT_UDIFF: diff = difflib.unified_diff(want_lines, got_lines, n=2) diff = list(diff)[2:] # strip the diff header kind = 'unified diff with -expected +actual' elif optionflags & REPORT_CDIFF: diff = difflib.context_diff(want_lines, got_lines, n=2) diff = list(diff)[2:] # strip the diff header kind = 'context diff with expected followed by actual' elif optionflags & REPORT_NDIFF: engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK) diff = list(engine.compare(want_lines, got_lines)) kind = 'ndiff with -expected +actual' else: assert 0, 'Bad diff option' # Remove trailing whitespace on diff output. diff = [line.rstrip() + '\n' for line in diff] return 'Differences (%s):\n' % kind + _indent(''.join(diff)) # If we're not using diff, then simply list the expected # output followed by the actual output. if want and got: return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got)) elif want: return 'Expected:\n%sGot nothing\n' % _indent(want) elif got: return 'Expected nothing\nGot:\n%s' % _indent(got) else: return 'Expected nothing\nGot nothing\n'
def hset(self, hashroot, key, value): """ hashed set """ hroot = self.root / hashroot if not hroot.isdir(): hroot.makedirs() hfile = hroot / gethashfile(key) d = self.get(hfile, {}) d.update( {key : value}) self[hfile] = d
def hdict(self, hashroot): """ Get all data contained in hashed category 'hashroot' as dict """ hfiles = self.keys(hashroot + "/*") hfiles.sort() last = len(hfiles) and hfiles[-1] or '' if last.endswith('xx'): # print "using xx" hfiles = [last] + hfiles[:-1] all = {} for f in hfiles: # print "using",f try: all.update(self[f]) except KeyError: print "Corrupt",f,"deleted - hset is not threadsafe!" del self[f] self.uncache(f) return all
def hcompress(self, hashroot): """ Compress category 'hashroot', so hset is fast again hget will fail if fast_only is True for compressed items (that were hset before hcompress). """ hfiles = self.keys(hashroot + "/*") all = {} for f in hfiles: # print "using",f all.update(self[f]) self.uncache(f) self[hashroot + '/xx'] = all for f in hfiles: p = self.root / f if p.basename() == 'xx': continue p.remove()
def keys(self, globpat = None): """ All keys in DB, or all keys matching a glob""" if globpat is None: files = self.root.walkfiles() else: files = [Path(p) for p in glob.glob(self.root/globpat)] return [self._normalized(p) for p in files if p.isfile()]
def eventFilter(self, obj, event): """ Reimplemented to handle keyboard input and to auto-hide when the text edit loses focus. """ if obj == self._text_edit: etype = event.type() if etype in( QtCore.QEvent.KeyPress, QtCore.QEvent.FocusOut ): self.cancel_completion() return super(CompletionPlain, self).eventFilter(obj, event)
def show_items(self, cursor, items): """ Shows the completion widget with 'items' at the position specified by 'cursor'. """ if not items : return self.cancel_completion() strng = text.columnize(items) self._console_widget._fill_temporary_buffer(cursor, strng, html=False)
def allow(self, record): """returns whether this record should be printed""" if not self: # nothing to filter return True return self._allow(record) and not self._deny(record)
def _any_match(matchers, record): """return the bool of whether `record` starts with any item in `matchers`""" def record_matches_key(key): return record == key or record.startswith(key + '.') return anyp(bool, map(record_matches_key, matchers))
def options(self, parser, env): """Register commandline options. """ parser.add_option( "--nologcapture", action="store_false", default=not env.get(self.env_opt), dest="logcapture", help="Disable logging capture plugin. " "Logging configurtion will be left intact." " [NOSE_NOLOGCAPTURE]") parser.add_option( "--logging-format", action="store", dest="logcapture_format", default=env.get('NOSE_LOGFORMAT') or self.logformat, metavar="FORMAT", help="Specify custom format to print statements. " "Uses the same format as used by standard logging handlers." " [NOSE_LOGFORMAT]") parser.add_option( "--logging-datefmt", action="store", dest="logcapture_datefmt", default=env.get('NOSE_LOGDATEFMT') or self.logdatefmt, metavar="FORMAT", help="Specify custom date/time format to print statements. " "Uses the same format as used by standard logging handlers." " [NOSE_LOGDATEFMT]") parser.add_option( "--logging-filter", action="store", dest="logcapture_filters", default=env.get('NOSE_LOGFILTER'), metavar="FILTER", help="Specify which statements to filter in/out. " "By default, everything is captured. If the output is too" " verbose,\nuse this option to filter out needless output.\n" "Example: filter=foo will capture statements issued ONLY to\n" " foo or foo.what.ever.sub but not foobar or other logger.\n" "Specify multiple loggers with comma: filter=foo,bar,baz.\n" "If any logger name is prefixed with a minus, eg filter=-foo,\n" "it will be excluded rather than included. Default: " "exclude logging messages from nose itself (-nose)." " [NOSE_LOGFILTER]\n") parser.add_option( "--logging-clear-handlers", action="store_true", default=False, dest="logcapture_clear", help="Clear all other logging handlers") parser.add_option( "--logging-level", action="store", default='NOTSET', dest="logcapture_level", help="Set the log level to capture")
def configure(self, options, conf): """Configure plugin. """ self.conf = conf # Disable if explicitly disabled, or if logging is # configured via logging config file if not options.logcapture or conf.loggingConfig: self.enabled = False self.logformat = options.logcapture_format self.logdatefmt = options.logcapture_datefmt self.clear = options.logcapture_clear self.loglevel = options.logcapture_level if options.logcapture_filters: self.filters = options.logcapture_filters.split(',')
def formatError(self, test, err): """Add captured log messages to error output. """ # logic flow copied from Capture.formatError test.capturedLogging = records = self.formatLogRecords() if not records: return err ec, ev, tb = err return (ec, self.addCaptureToErr(ev, records), tb)
def embed(**kwargs): """Call this to embed IPython at the current point in your program. The first invocation of this will create an :class:`InteractiveShellEmbed` instance and then call it. Consecutive calls just call the already created instance. Here is a simple example:: from IPython import embed a = 10 b = 20 embed('First time') c = 30 d = 40 embed Full customization can be done by passing a :class:`Struct` in as the config argument. """ config = kwargs.get('config') header = kwargs.pop('header', u'') if config is None: config = load_default_config() config.InteractiveShellEmbed = config.TerminalInteractiveShell kwargs['config'] = config global _embedded_shell if _embedded_shell is None: _embedded_shell = InteractiveShellEmbed(**kwargs) _embedded_shell(header=header, stack_depth=2)
def mainloop(self, local_ns=None, module=None, stack_depth=0, display_banner=None, global_ns=None): """Embeds IPython into a running python program. Input: - header: An optional header message can be specified. - local_ns, module: working local namespace (a dict) and module (a module or similar object). If given as None, they are automatically taken from the scope where the shell was called, so that program variables become visible. - stack_depth: specifies how many levels in the stack to go to looking for namespaces (when local_ns or module is None). This allows an intermediate caller to make sure that this function gets the namespace from the intended level in the stack. By default (0) it will get its locals and globals from the immediate caller. Warning: it's possible to use this in a program which is being run by IPython itself (via %run), but some funny things will happen (a few globals get overwritten). In the future this will be cleaned up, as there is no fundamental reason why it can't work perfectly.""" if (global_ns is not None) and (module is None): class DummyMod(object): """A dummy module object for embedded IPython.""" pass warnings.warn("global_ns is deprecated, use module instead.", DeprecationWarning) module = DummyMod() module.__dict__ = global_ns # Get locals and globals from caller if (local_ns is None or module is None) and self.default_user_namespaces: call_frame = sys._getframe(stack_depth).f_back if local_ns is None: local_ns = call_frame.f_locals if module is None: global_ns = call_frame.f_globals module = sys.modules[global_ns['__name__']] # Save original namespace and module so we can restore them after # embedding; otherwise the shell doesn't shut down correctly. orig_user_module = self.user_module orig_user_ns = self.user_ns # Update namespaces and fire up interpreter # The global one is easy, we can just throw it in if module is not None: self.user_module = module # But the user/local one is tricky: ipython needs it to store internal # data, but we also need the locals. We'll throw our hidden variables # like _ih and get_ipython() into the local namespace, but delete them # later. if local_ns is not None: self.user_ns = local_ns self.init_user_ns() # Patch for global embedding to make sure that things don't overwrite # user globals accidentally. Thanks to Richard <rxe@renre-europe.com> # FIXME. Test this a bit more carefully (the if.. is new) # N.B. This can't now ever be called. Not sure what it was for. # And now, since it wasn't called in the previous version, I'm # commenting out these lines so they can't be called with my new changes # --TK, 2011-12-10 #if local_ns is None and module is None: # self.user_global_ns.update(__main__.__dict__) # make sure the tab-completer has the correct frame information, so it # actually completes using the frame's locals/globals self.set_completer_frame() with nested(self.builtin_trap, self.display_trap): self.interact(display_banner=display_banner) # now, purge out the local namespace of IPython's hidden variables. if local_ns is not None: for name in self.user_ns_hidden: local_ns.pop(name, None) # Restore original namespace so shell can shut down when we exit. self.user_module = orig_user_module self.user_ns = orig_user_ns
def dir2(obj): """dir2(obj) -> list of strings Extended version of the Python builtin dir(), which does a few extra checks, and supports common objects with unusual internals that confuse dir(), such as Traits and PyCrust. This version is guaranteed to return only a list of true strings, whereas dir() returns anything that objects inject into themselves, even if they are later not really valid for attribute access (many extension libraries have such bugs). """ # Start building the attribute list via dir(), and then complete it # with a few extra special-purpose calls. words = set(dir(obj)) if hasattr(obj, '__class__'): #words.add('__class__') words |= set(get_class_members(obj.__class__)) # for objects with Enthought's traits, add trait_names() list # for PyCrust-style, add _getAttributeNames() magic method list for attr in ('trait_names', '_getAttributeNames'): if hasattr(obj, attr): try: func = getattr(obj, attr) if callable(func): words |= set(func()) except: # TypeError: obj is class not instance pass # filter out non-string attributes which may be stuffed by dir() calls # and poor coding in third-party modules words = [w for w in words if isinstance(w, basestring)] return sorted(words)
def _get_all_po_filenames(locale_root, lang, po_files_path): """ Get all po filenames from locale folder and return list of them. Assumes a directory structure: <locale_root>/<lang>/<po_files_path>/<filename>. """ all_files = os.listdir(os.path.join(locale_root, lang, po_files_path)) return filter(lambda s: s.endswith('.po'), all_files)
def _get_new_csv_writers(trans_title, meta_title, trans_csv_path, meta_csv_path): """ Prepare new csv writers, write title rows and return them. """ trans_writer = UnicodeWriter(trans_csv_path) trans_writer.writerow(trans_title) meta_writer = UnicodeWriter(meta_csv_path) meta_writer.writerow(meta_title) return trans_writer, meta_writer
def _prepare_locale_dirs(languages, locale_root): """ Prepare locale dirs for writing po files. Create new directories if they doesn't exist. """ trans_languages = [] for i, t in enumerate(languages): lang = t.split(':')[0] trans_languages.append(lang) lang_path = os.path.join(locale_root, lang) if not os.path.exists(lang_path): os.makedirs(lang_path) return trans_languages
def _prepare_polib_files(files_dict, filename, languages, locale_root, po_files_path, header): """ Prepare polib file object for writing/reading from them. Create directories and write header if needed. For each language, ensure there's a translation file named "filename" in the correct place. Assumes (and creates) a directory structure: <locale_root>/<lang>/<po_files_path>/<filename>. """ files_dict[filename] = {} for lang in languages: file_path = os.path.join(locale_root, lang, po_files_path) if not os.path.exists(file_path): os.makedirs(file_path) if header is not None: _write_header(os.path.join(file_path, filename), lang, header) files_dict[filename][lang] = polib.pofile( os.path.join(file_path, filename), encoding="UTF-8")
def _write_entries(po_files, languages, msgid, msgstrs, metadata, comment): """ Write msgstr for every language with all needed metadata and comment. Metadata are parser from string into dict, so read them only from gdocs. """ start = re.compile(r'^[\s]+') end = re.compile(r'[\s]+$') for i, lang in enumerate(languages): meta = ast.literal_eval(metadata) entry = polib.POEntry(**meta) entry.tcomment = comment entry.msgid = msgid if msgstrs[i]: start_ws = start.search(msgid) end_ws = end.search(msgid) entry.msgstr = str(start_ws.group() if start_ws else '') + \ unicode(msgstrs[i].strip()) + \ str(end_ws.group() if end_ws else '') else: entry.msgstr = '' po_files[lang].append(entry)
def _write_header(po_path, lang, header): """ Write header into po file for specific lang. Metadata are read from settings file. """ po_file = open(po_path, 'w') po_file.write(header + '\n') po_file.write( 'msgid ""' + '\nmsgstr ""' + '\n"MIME-Version: ' + settings.METADATA['MIME-Version'] + r'\n"' '\n"Content-Type: ' + settings.METADATA['Content-Type'] + r'\n"' '\n"Content-Transfer-Encoding: ' + settings.METADATA['Content-Transfer-Encoding'] + r'\n"' '\n"Language: ' + lang + r'\n"' + '\n') po_file.close()
def _write_new_messages(po_file_path, trans_writer, meta_writer, msgids, msgstrs, languages): """ Write new msgids which appeared in po files with empty msgstrs values and metadata. Look for all new msgids which are diffed with msgids list provided as an argument. """ po_filename = os.path.basename(po_file_path) po_file = polib.pofile(po_file_path) new_trans = 0 for entry in po_file: if entry.msgid not in msgids: new_trans += 1 trans = [po_filename, entry.tcomment, entry.msgid, entry.msgstr] for lang in languages[1:]: trans.append(msgstrs[lang].get(entry.msgid, '')) meta = dict(entry.__dict__) meta.pop('msgid', None) meta.pop('msgstr', None) meta.pop('tcomment', None) trans_writer.writerow(trans) meta_writer.writerow([str(meta)]) return new_trans
def _get_new_msgstrs(po_file_path, msgids): """ Write new msgids which appeared in po files with empty msgstrs values and metadata. Look for all new msgids which are diffed with msgids list provided as an argument. """ po_file = polib.pofile(po_file_path) msgstrs = {} for entry in po_file: if entry.msgid not in msgids: msgstrs[entry.msgid] = entry.msgstr return msgstrs
def po_to_csv_merge(languages, locale_root, po_files_path, local_trans_csv, local_meta_csv, gdocs_trans_csv, gdocs_meta_csv): """ Converts po file to csv GDocs spreadsheet readable format. Merges them if some msgid aren't in the spreadsheet. :param languages: list of language codes :param locale_root: path to locale root folder containing directories with languages :param po_files_path: path from lang directory to po file :param local_trans_csv: path where local csv with translations will be created :param local_meta_csv: path where local csv with metadata will be created :param gdocs_trans_csv: path to gdoc csv with translations """ msgids = [] trans_reader = UnicodeReader(gdocs_trans_csv) meta_reader = UnicodeReader(gdocs_meta_csv) try: trans_title = trans_reader.next() meta_title = meta_reader.next() except StopIteration: trans_title = ['file', 'comment', 'msgid'] trans_title += map(lambda s: s + ':msgstr', languages) meta_title = ['metadata'] trans_writer, meta_writer = _get_new_csv_writers( trans_title, meta_title, local_trans_csv, local_meta_csv) for trans_row, meta_row in izip_longest(trans_reader, meta_reader): msgids.append(trans_row[2]) trans_writer.writerow(trans_row) meta_writer.writerow(meta_row if meta_row else [METADATA_EMPTY]) trans_reader.close() meta_reader.close() po_files = _get_all_po_filenames(locale_root, languages[0], po_files_path) new_trans = False for po_filename in po_files: new_msgstrs = {} for lang in languages[1:]: po_file_path = os.path.join(locale_root, lang, po_files_path, po_filename) if not os.path.exists(po_file_path): open(po_file_path, 'a').close() new_msgstrs[lang] = _get_new_msgstrs(po_file_path, msgids) if len(new_msgstrs[languages[1]].keys()) > 0: new_trans = True po_file_path = os.path.join(locale_root, languages[0], po_files_path, po_filename) _write_new_messages(po_file_path, trans_writer, meta_writer, msgids, new_msgstrs, languages) trans_writer.close() meta_writer.close() return new_trans
def csv_to_po(trans_csv_path, meta_csv_path, locale_root, po_files_path, header=None): """ Converts GDocs spreadsheet generated csv file into po file. :param trans_csv_path: path to temporary file with translations :param meta_csv_path: path to temporary file with meta information :param locale_root: path to locale root folder containing directories with languages :param po_files_path: path from lang directory to po file """ pattern = "^\w+.*po$" for root, dirs, files in os.walk(locale_root): for f in filter(lambda x: re.match(pattern, x), files): os.remove(os.path.join(root, f)) # read title row and prepare descriptors for po files in each lang trans_reader = UnicodeReader(trans_csv_path) meta_reader = UnicodeReader(meta_csv_path) try: title_row = trans_reader.next() except StopIteration: # empty file return trans_languages = _prepare_locale_dirs(title_row[3:], locale_root) po_files = {} meta_reader.next() # go through every row in downloaded csv file for trans_row, meta_row in izip_longest(trans_reader, meta_reader): filename = trans_row[0].rstrip() metadata = meta_row[0].rstrip() if meta_row else METADATA_EMPTY comment = trans_row[1] msgid = trans_row[2] if filename not in po_files: _prepare_polib_files(po_files, filename, trans_languages, locale_root, po_files_path, header) _write_entries(po_files[filename], trans_languages, msgid, trans_row[3:], metadata, comment) for filename in po_files: for lang in po_files[filename]: po_files[filename][lang].save() trans_reader.close() meta_reader.close()
def subscribe_user(self, user): """ method to subscribe a user to a service """ url = self.root_url + "subscribe_user" values = {} values["username"] = user return self._query(url, values)
def send_notification(self, to=None, msg=None, label=None, title=None, uri=None): """ method to send a message to a user Parameters: to -> recipient msg -> message to send label -> application description title -> name of the notification event uri -> callback uri """ url = self.root_url + "send_notification" values = {} if to is not None: values["to"] = to if msg is not None: values["msg"] = msg if label is not None: values["label"] = label if title is not None: values["title"] = title if uri is not None: values["uri"] = uri return self._query(url, values)
def send_message(self, to=None, msg=None): """ method to send a message to a user Parameters: to -> recipient msg -> message to send """ url = self.root_url + "send_message" values = {} if to is not None: values["to"] = to if msg is not None: values["msg"] = msg return self._query(url, values)
def _query(self, url, data = None): """ query method to do HTTP POST/GET Parameters: url -> the url to POST/GET data -> header_data as a dict (only for POST) Returns: Parsed JSON data as dict or None on error """ auth = encodestring('%s:%s' % (self.user, self.secret)).replace('\n', '') if data is not None: # we have POST data if there is data values = urllib.urlencode(data) request = urllib2.Request(url, values) request.add_header("Authorization", "Basic %s" % auth) else: # do a GET otherwise request = urllib2.Request(url) request.add_header("Authorization", "Basic %s" % auth) try: response = urllib2.urlopen(request) except IOError, e: # no connection return {"status" : "error", "response_code" : e.code, "response_message" : e.msg } return json.loads(response.read())
def init_parser(): """ function to init option parser """ usage = "usage: %prog -u user -s secret -n name [-l label] \ [-t title] [-c callback] [TEXT]" parser = OptionParser(usage, version="%prog " + notifo.__version__) parser.add_option("-u", "--user", action="store", dest="user", help="your notifo username") parser.add_option("-s", "--secret", action="store", dest="secret", help="your notifo API secret") parser.add_option("-n", "--name", action="store", dest="name", help="recipient for the notification") parser.add_option("-l", "--label", action="store", dest="label", help="label for the notification") parser.add_option("-t", "--title", action="store", dest="title", help="title of the notification") parser.add_option("-c", "--callback", action="store", dest="callback", help="callback URL to call") parser.add_option("-m", "--message", action="store_true", dest="message", default=False, help="send message instead of notification") (options, args) = parser.parse_args() return (parser, options, args)
def main(): """ main function """ # get options and arguments (parser, options, args) = init_parser() # initialize result variable result = None # check for values which are always needed if not options.user: parser.error("No user given.") if not options.secret: parser.error("No API secret given.") if not options.name: parser.error("No recipient given.") # If there is no message, we probably want to subscribe a user if len(args) < 1: result = notifo.subscribe_user(options.user, options.secret, options.name) else: params = {} params["to"] = options.name m = '' for a in args: m = "%s %s" %(m, a) params["msg"] = m if options.message == True: result = notifo.send_message(options.user, options.secret, **params) else: if options.label: params["label"] = options.label if options.title: params["title"] = options.title if options.callback: params["uri"] = options.callback result = notifo.send_notification(options.user,options.secret, **params) if result is None: print "Something went wrong. Check parameters and try again."
def rsplit1(s, sep): """The same as s.rsplit(sep, 1), but works in 2.3""" parts = s.split(sep) return sep.join(parts[:-1]), parts[-1]
def run_python_module(modulename, args): """Run a python module, as though with ``python -m name args...``. `modulename` is the name of the module, possibly a dot-separated name. `args` is the argument array to present as sys.argv, including the first element naming the module being executed. """ openfile = None glo, loc = globals(), locals() try: try: # Search for the module - inside its parent package, if any - using # standard import mechanics. if '.' in modulename: packagename, name = rsplit1(modulename, '.') package = __import__(packagename, glo, loc, ['__path__']) searchpath = package.__path__ else: packagename, name = None, modulename searchpath = None # "top-level search" in imp.find_module() openfile, pathname, _ = imp.find_module(name, searchpath) # Complain if this is a magic non-file module. if openfile is None and pathname is None: raise NoSource( "module does not live in a file: %r" % modulename ) # If `modulename` is actually a package, not a mere module, then we # pretend to be Python 2.7 and try running its __main__.py script. if openfile is None: packagename = modulename name = '__main__' package = __import__(packagename, glo, loc, ['__path__']) searchpath = package.__path__ openfile, pathname, _ = imp.find_module(name, searchpath) except ImportError: _, err, _ = sys.exc_info() raise NoSource(str(err)) finally: if openfile: openfile.close() # Finally, hand the file off to run_python_file for execution. pathname = os.path.abspath(pathname) args[0] = pathname run_python_file(pathname, args, package=packagename)
def run_python_file(filename, args, package=None): """Run a python file as if it were the main program on the command line. `filename` is the path to the file to execute, it need not be a .py file. `args` is the argument array to present as sys.argv, including the first element naming the file being executed. `package` is the name of the enclosing package, if any. """ # Create a module to serve as __main__ old_main_mod = sys.modules['__main__'] main_mod = imp.new_module('__main__') sys.modules['__main__'] = main_mod main_mod.__file__ = filename if package: main_mod.__package__ = package main_mod.__builtins__ = BUILTINS # Set sys.argv properly. old_argv = sys.argv sys.argv = args try: # Make a code object somehow. if filename.endswith(".pyc") or filename.endswith(".pyo"): code = make_code_from_pyc(filename) else: code = make_code_from_py(filename) # Execute the code object. try: exec_code_object(code, main_mod.__dict__) except SystemExit: # The user called sys.exit(). Just pass it along to the upper # layers, where it will be handled. raise except: # Something went wrong while executing the user code. # Get the exc_info, and pack them into an exception that we can # throw up to the outer loop. We peel two layers off the traceback # so that the coverage.py code doesn't appear in the final printed # traceback. typ, err, tb = sys.exc_info() raise ExceptionDuringRun(typ, err, tb.tb_next.tb_next) finally: # Restore the old __main__ sys.modules['__main__'] = old_main_mod # Restore the old argv and path sys.argv = old_argv
def make_code_from_py(filename): """Get source from `filename` and make a code object of it.""" # Open the source file. try: source_file = open_source(filename) except IOError: raise NoSource("No file to run: %r" % filename) try: source = source_file.read() finally: source_file.close() # We have the source. `compile` still needs the last line to be clean, # so make sure it is, then compile a code object from it. if not source or source[-1] != '\n': source += '\n' code = compile(source, filename, "exec") return code
def make_code_from_pyc(filename): """Get a code object from a .pyc file.""" try: fpyc = open(filename, "rb") except IOError: raise NoCode("No file to run: %r" % filename) try: # First four bytes are a version-specific magic number. It has to # match or we won't run the file. magic = fpyc.read(4) if magic != imp.get_magic(): raise NoCode("Bad magic number in .pyc file") # Skip the junk in the header that we don't need. fpyc.read(4) # Skip the moddate. if sys.version_info >= (3, 3): # 3.3 added another long to the header (size), skip it. fpyc.read(4) # The rest of the file is the code object we want. code = marshal.load(fpyc) finally: fpyc.close() return code
def html_tableify(item_matrix, select=None, header=None , footer=None) : """ returnr a string for an html table""" if not item_matrix : return '' html_cols = [] tds = lambda text : u'<td>'+text+u' </td>' trs = lambda text : u'<tr>'+text+u'</tr>' tds_items = [map(tds, row) for row in item_matrix] if select : row, col = select tds_items[row][col] = u'<td class="inverted">'\ +item_matrix[row][col]\ +u' </td>' #select the right item html_cols = map(trs, (u''.join(row) for row in tds_items)) head = '' foot = '' if header : head = (u'<tr>'\ +''.join((u'<td>'+header+u'</td>')*len(item_matrix[0]))\ +'</tr>') if footer : foot = (u'<tr>'\ +''.join((u'<td>'+footer+u'</td>')*len(item_matrix[0]))\ +'</tr>') html = (u'<table class="completion" style="white-space:pre">'+head+(u''.join(html_cols))+foot+u'</table>') return html
def current(self, value): """set current cursor position""" current = min(max(self._min, value), self._max) self._current = current if current > self._stop : self._stop = current self._start = current-self._width elif current < self._start : self._start = current self._stop = current + self._width if abs(self._start - self._min) <= self._sticky_lenght : self._start = self._min if abs(self._stop - self._max) <= self._sticky_lenght : self._stop = self._max
def eventFilter(self, obj, event): """ Reimplemented to handle keyboard input and to auto-hide when the text edit loses focus. """ if obj == self._text_edit: etype = event.type() if etype == QtCore.QEvent.KeyPress: key = event.key() if self._consecutive_tab == 0 and key in (QtCore.Qt.Key_Tab,): return False elif self._consecutive_tab == 1 and key in (QtCore.Qt.Key_Tab,): # ok , called twice, we grab focus, and show the cursor self._consecutive_tab = self._consecutive_tab+1 self._update_list() return True elif self._consecutive_tab == 2: if key in (QtCore.Qt.Key_Return, QtCore.Qt.Key_Enter): self._complete_current() return True if key in (QtCore.Qt.Key_Tab,): self.select_right() self._update_list() return True elif key in ( QtCore.Qt.Key_Down,): self.select_down() self._update_list() return True elif key in (QtCore.Qt.Key_Right,): self.select_right() self._update_list() return True elif key in ( QtCore.Qt.Key_Up,): self.select_up() self._update_list() return True elif key in ( QtCore.Qt.Key_Left,): self.select_left() self._update_list() return True elif key in ( QtCore.Qt.Key_Escape,): self.cancel_completion() return True else : self.cancel_completion() else: self.cancel_completion() elif etype == QtCore.QEvent.FocusOut: self.cancel_completion() return super(CompletionHtml, self).eventFilter(obj, event)
def cancel_completion(self): """Cancel the completion should be called when the completer have to be dismissed This reset internal variable, clearing the temporary buffer of the console where the completion are shown. """ self._consecutive_tab = 0 self._slice_start = 0 self._console_widget._clear_temporary_buffer() self._index = (0, 0) if(self._sliding_interval): self._sliding_interval = None
def _select_index(self, row, col): """Change the selection index, and make sure it stays in the right range A little more complicated than just dooing modulo the number of row columns to be sure to cycle through all element. horizontaly, the element are maped like this : to r <-- a b c d e f --> to g to f <-- g h i j k l --> to m to l <-- m n o p q r --> to a and vertically a d g j m p b e h k n q c f i l o r """ nr, nc = self._size nr = nr-1 nc = nc-1 # case 1 if (row > nr and col >= nc) or (row >= nr and col > nc): self._select_index(0, 0) # case 2 elif (row <= 0 and col < 0) or (row < 0 and col <= 0): self._select_index(nr, nc) # case 3 elif row > nr : self._select_index(0, col+1) # case 4 elif row < 0 : self._select_index(nr, col-1) # case 5 elif col > nc : self._select_index(row+1, 0) # case 6 elif col < 0 : self._select_index(row-1, nc) elif 0 <= row and row <= nr and 0 <= col and col <= nc : self._index = (row, col) else : raise NotImplementedError("you'r trying to go where no completion\ have gone before : %d:%d (%d:%d)"%(row, col, nr, nc) )
def select_up(self): """move cursor up""" r, c = self._index self._select_index(r-1, c)
def select_down(self): """move cursor down""" r, c = self._index self._select_index(r+1, c)
def select_left(self): """move cursor left""" r, c = self._index self._select_index(r, c-1)
def select_right(self): """move cursor right""" r, c = self._index self._select_index(r, c+1)
def show_items(self, cursor, items): """ Shows the completion widget with 'items' at the position specified by 'cursor'. """ if not items : return self._start_position = cursor.position() self._consecutive_tab = 1 items_m, ci = text.compute_item_matrix(items, empty=' ') self._sliding_interval = SlidingInterval(len(items_m)-1) self._items = items_m self._size = (ci['rows_numbers'], ci['columns_numbers']) self._old_cursor = cursor self._index = (0, 0) sjoin = lambda x : [ y.ljust(w, ' ') for y, w in zip(x, ci['columns_width'])] self._justified_items = map(sjoin, items_m) self._update_list(hilight=False)
def _update_list(self, hilight=True): """ update the list of completion and hilight the currently selected completion """ self._sliding_interval.current = self._index[0] head = None foot = None if self._sliding_interval.start > 0 : head = '...' if self._sliding_interval.stop < self._sliding_interval._max: foot = '...' items_m = self._justified_items[\ self._sliding_interval.start:\ self._sliding_interval.stop+1\ ] self._console_widget._clear_temporary_buffer() if(hilight): sel = (self._sliding_interval.nth, self._index[1]) else : sel = None strng = html_tableify(items_m, select=sel, header=head, footer=foot) self._console_widget._fill_temporary_buffer(self._old_cursor, strng, html=True)
def _complete_current(self): """ Perform the completion with the currently selected item. """ i = self._index item = self._items[i[0]][i[1]] item = item.strip() if item : self._current_text_cursor().insertText(item) self.cancel_completion()
def wordfreq(text, is_filename=False): """Return a dictionary of words and word counts in a string.""" if is_filename: with open(text) as f: text = f.read() freqs = {} for word in text.split(): lword = word.lower() freqs[lword] = freqs.get(lword, 0) + 1 return freqs
def print_wordfreq(freqs, n=10): """Print the n most common words and counts in the freqs dict.""" words, counts = freqs.keys(), freqs.values() items = zip(counts, words) items.sort(reverse=True) for (count, word) in items[:n]: print(word, count)
def tostring(self): """Return the string representation of the job description XML.""" root = self.as_element() indent(root) txt = ET.tostring(root, encoding="utf-8") # Now remove the tokens used to order the attributes. txt = re.sub(r'_[A-Z]_','',txt) txt = '<?xml version="1.0" encoding="utf-8"?>\n' + txt return txt
def write(self, filename): """Write the XML job description to a file.""" txt = self.tostring() with open(filename, 'w') as f: f.write(txt)
def validate_pin(pin): """ Validate the given pin against the schema. :param dict pin: The pin to validate: :raises pypebbleapi.schemas.DocumentError: If the pin is not valid. """ v = _Validator(schemas.pin) if v.validate(pin): return else: raise schemas.DocumentError(errors=v.errors)
def send_shared_pin(self, topics, pin, skip_validation=False): """ Send a shared pin for the given topics. :param list topics: The list of topics. :param dict pin: The pin. :param bool skip_validation: Whether to skip the validation. :raises pypebbleapi.schemas.DocumentError: If the validation process failed. :raises `requests.exceptions.HTTPError`: If an HTTP error occurred. """ if not self.api_key: raise ValueError("You need to specify an api_key.") if not skip_validation: validate_pin(pin) response = _request('PUT', url=self.url_v1('/shared/pins/' + pin['id']), user_agent=self.user_agent, api_key=self.api_key, topics_list=topics, json=pin, ) _raise_for_status(response)
def delete_shared_pin(self, pin_id): """ Delete a shared pin. :param str pin_id: The id of the pin to delete. :raises `requests.exceptions.HTTPError`: If an HTTP error occurred. """ if not self.api_key: raise ValueError("You need to specify an api_key.") response = _request('DELETE', url=self.url_v1('/shared/pins/' + pin_id), user_agent=self.user_agent, api_key=self.api_key, ) _raise_for_status(response)
def send_user_pin(self, user_token, pin, skip_validation=False): """ Send a user pin. :param str user_token: The token of the user. :param dict pin: The pin. :param bool skip_validation: Whether to skip the validation. :raises pypebbleapi.schemas.DocumentError: If the validation process failed. :raises `requests.exceptions.HTTPError`: If an HTTP error occurred. """ if not skip_validation: validate_pin(pin) response = _request('PUT', url=self.url_v1('/user/pins/' + pin['id']), user_agent=self.user_agent, user_token=user_token, json=pin, ) _raise_for_status(response)
def delete_user_pin(self, user_token, pin_id): """ Delete a user pin. :param str user_token: The token of the user. :param str pin_id: The id of the pin to delete. :raises `requests.exceptions.HTTPError`: If an HTTP error occurred. """ response = _request('DELETE', url=self.url_v1('/user/pins/' + pin_id), user_agent=self.user_agent, user_token=user_token, ) _raise_for_status(response)
def subscribe(self, user_token, topic): """ Subscribe a user to the given topic. :param str user_token: The token of the user. :param str topic: The topic. :raises `requests.exceptions.HTTPError`: If an HTTP error occurred. """ response = _request('POST', url=self.url_v1('/user/subscriptions/' + topic), user_agent=self.user_agent, user_token=user_token, ) _raise_for_status(response)
def list_subscriptions(self, user_token): """ Get the list of the topics which a user is subscribed to. :param str user_token: The token of the user. :return: The list of the topics. :rtype: list :raises `requests.exceptions.HTTPError`: If an HTTP error occurred. """ response = _request('GET', url=self.url_v1('/user/subscriptions'), user_agent=self.user_agent, user_token=user_token, ) _raise_for_status(response) return response.json()['topics']
def monitored(total: int, name=None, message=None): """ Decorate a function to automatically begin and end a task on the progressmonitor. The function must have a parameter called 'monitor' """ def decorator(f): nonlocal name monitor_index = list(inspect.signature(f).parameters.keys()).index('monitor') if name is None: name=f.__name__ @wraps(f) def wrapper(*args, **kargs): if len(args) > monitor_index: monitor = args[monitor_index] elif 'monitor' in kargs: monitor = kargs['monitor'] else: monitor = kargs['monitor'] = NullMonitor() with monitor.task(total, name, message): f(*args, **kargs) return wrapper return decorator