INSTRUCTION
stringlengths
1
8.43k
RESPONSE
stringlengths
75
104k
Interpret a template string. This returns a callable taking one argument -- this context -- and returning a string rendered from the template.
def template(self, string): """ Interpret a template string. This returns a callable taking one argument--this context--and returning a string rendered from the template. :param string: The template string. :returns: A callable of one argument that will return the desired string. """ # Short-circuit if the template "string" isn't actually a # string if not isinstance(string, six.string_types): return lambda ctxt: string # Create the template and return the callable tmpl = self._jinja.from_string(string) return lambda ctxt: tmpl.render(ctxt.variables)
Interpret an expression string. This returns a callable taking one argument -- this context -- and returning the result of evaluating the expression.
def expression(self, string): """ Interpret an expression string. This returns a callable taking one argument--this context--and returning the result of evaluating the expression. :param string: The expression. :returns: A callable of one argument that will return the desired expression. """ # Short-circuit if the expression "string" isn't actually a # string if not isinstance(string, six.string_types): return lambda ctxt: string # Create the expression and return the callable expr = self._jinja.compile_expression(string) return lambda ctxt: expr(ctxt.variables)
Get the output of the last command exevuted.
def last_error(self): """Get the output of the last command exevuted.""" if not len(self.log): raise RuntimeError('Nothing executed') try: errs = [l for l in self.log if l[1] != 0] return errs[-1][2] except IndexError: # odd case where there were no errors #TODO return 'no last error'
Wrapper for subprocess. check_output.
def check_output(self, cmd): """Wrapper for subprocess.check_output.""" ret, output = self._exec(cmd) if not ret == 0: raise CommandError(self) return output
Fake the interface of subprocess. call ().
def check_call(self, cmd): """Fake the interface of subprocess.call().""" ret, _ = self._exec(cmd) if not ret == 0: raise CommandError(self) return ret
Unpack multidict and positional args into a list appropriate for subprocess.: param param_kwargs: ParamDict storing -- param style data.: param positional_args: flags: param gnu: if True long - name args are unpacked as: -- parameter = argument otherwise they are unpacked as: -- parameter argument: returns: list appropriate for sending to subprocess
def unpack_pargs(positional_args, param_kwargs, gnu=False): """Unpack multidict and positional args into a list appropriate for subprocess. :param param_kwargs: ``ParamDict`` storing '--param' style data. :param positional_args: flags :param gnu: if True, long-name args are unpacked as: --parameter=argument otherwise, they are unpacked as: --parameter argument :returns: list appropriate for sending to subprocess """ def _transform(argname): """Transform a python identifier into a shell-appropriate argument name """ if len(argname) == 1: return '-{}'.format(argname) return '--{}'.format(argname.replace('_', '-')) args = [] for item in param_kwargs.keys(): for value in param_kwargs.getlist(item): if gnu: args.append('{}={}'.format( _transform(item), value )) else: args.extend([ _transform(item), value ]) if positional_args: for item in positional_args: args.append(_transform(item)) return args
Find the source for filename.
def find_source(self, filename): """Find the source for `filename`. Returns two values: the actual filename, and the source. The source returned depends on which of these cases holds: * The filename seems to be a non-source file: returns None * The filename is a source file, and actually exists: returns None. * The filename is a source file, and is in a zip file or egg: returns the source. * The filename is a source file, but couldn't be found: raises `NoSource`. """ source = None base, ext = os.path.splitext(filename) TRY_EXTS = { '.py': ['.py', '.pyw'], '.pyw': ['.pyw'], } try_exts = TRY_EXTS.get(ext) if not try_exts: return filename, None for try_ext in try_exts: try_filename = base + try_ext if os.path.exists(try_filename): return try_filename, None source = self.coverage.file_locator.get_zip_data(try_filename) if source: return try_filename, source raise NoSource("No source for code: '%s'" % filename)
Returns a sorted list of the arcs actually executed in the code.
def arcs_executed(self): """Returns a sorted list of the arcs actually executed in the code.""" executed = self.coverage.data.executed_arcs(self.filename) m2fl = self.parser.first_line executed = [(m2fl(l1), m2fl(l2)) for (l1,l2) in executed] return sorted(executed)
Returns a sorted list of the arcs in the code not executed.
def arcs_missing(self): """Returns a sorted list of the arcs in the code not executed.""" possible = self.arc_possibilities() executed = self.arcs_executed() missing = [ p for p in possible if p not in executed and p[0] not in self.no_branch ] return sorted(missing)
Returns a sorted list of the executed arcs missing from the code.
def arcs_unpredicted(self): """Returns a sorted list of the executed arcs missing from the code.""" possible = self.arc_possibilities() executed = self.arcs_executed() # Exclude arcs here which connect a line to itself. They can occur # in executed data in some cases. This is where they can cause # trouble, and here is where it's the least burden to remove them. unpredicted = [ e for e in executed if e not in possible and e[0] != e[1] ] return sorted(unpredicted)
Returns a list of line numbers that have more than one exit.
def branch_lines(self): """Returns a list of line numbers that have more than one exit.""" exit_counts = self.parser.exit_counts() return [l1 for l1,count in iitems(exit_counts) if count > 1]
How many total branches are there?
def total_branches(self): """How many total branches are there?""" exit_counts = self.parser.exit_counts() return sum([count for count in exit_counts.values() if count > 1])
Return arcs that weren t executed from branch lines.
def missing_branch_arcs(self): """Return arcs that weren't executed from branch lines. Returns {l1:[l2a,l2b,...], ...} """ missing = self.arcs_missing() branch_lines = set(self.branch_lines()) mba = {} for l1, l2 in missing: if l1 in branch_lines: if l1 not in mba: mba[l1] = [] mba[l1].append(l2) return mba
Get stats about branches.
def branch_stats(self): """Get stats about branches. Returns a dict mapping line numbers to a tuple: (total_exits, taken_exits). """ exit_counts = self.parser.exit_counts() missing_arcs = self.missing_branch_arcs() stats = {} for lnum in self.branch_lines(): exits = exit_counts[lnum] try: missing = len(missing_arcs[lnum]) except KeyError: missing = 0 stats[lnum] = (exits, exits - missing) return stats
Set the number of decimal places used to report percentages.
def set_precision(cls, precision): """Set the number of decimal places used to report percentages.""" assert 0 <= precision < 10 cls._precision = precision cls._near0 = 1.0 / 10**precision cls._near100 = 100.0 - cls._near0
Returns a single percentage value for coverage.
def _get_pc_covered(self): """Returns a single percentage value for coverage.""" if self.n_statements > 0: pc_cov = (100.0 * (self.n_executed + self.n_executed_branches) / (self.n_statements + self.n_branches)) else: pc_cov = 100.0 return pc_cov
Returns the percent covered as a string without a percent sign.
def _get_pc_covered_str(self): """Returns the percent covered, as a string, without a percent sign. Note that "0" is only returned when the value is truly zero, and "100" is only returned when the value is truly 100. Rounding can never result in either "0" or "100". """ pc = self.pc_covered if 0 < pc < self._near0: pc = self._near0 elif self._near100 < pc < 100: pc = self._near100 else: pc = round(pc, self._precision) return "%.*f" % (self._precision, pc)
Applies cls_name to all needles found in haystack.
def highlight_text(needles, haystack, cls_name='highlighted', words=False, case=False): """ Applies cls_name to all needles found in haystack. """ if not needles: return haystack if not haystack: return '' if words: pattern = r"(%s)" % "|".join(['\\b{}\\b'.format(re.escape(n)) for n in needles]) else: pattern = r"(%s)" % "|".join([re.escape(n) for n in needles]) if case: regex = re.compile(pattern) else: regex = re.compile(pattern, re.I) i, out = 0, "" for m in regex.finditer(haystack): out += "".join([haystack[i:m.start()], '<span class="%s">' % cls_name, haystack[m.start():m.end()], "</span>"]) i = m.end() return mark_safe(out + haystack[i:])
Given an list of words this function highlights the matched text in the given string.
def highlight(string, keywords, cls_name='highlighted'): """ Given an list of words, this function highlights the matched text in the given string. """ if not keywords: return string if not string: return '' include, exclude = get_text_tokenizer(keywords) highlighted = highlight_text(include, string, cls_name) return highlighted
Given an list of words this function highlights the matched words in the given string.
def highlight_words(string, keywords, cls_name='highlighted'): """ Given an list of words, this function highlights the matched words in the given string. """ if not keywords: return string if not string: return '' include, exclude = get_text_tokenizer(keywords) highlighted = highlight_text(include, string, cls_name, words=True) return highlighted
Run a distutils setup script sandboxed in its directory
def run_setup(setup_script, args): """Run a distutils setup script, sandboxed in its directory""" old_dir = os.getcwd() save_argv = sys.argv[:] save_path = sys.path[:] setup_dir = os.path.abspath(os.path.dirname(setup_script)) temp_dir = os.path.join(setup_dir,'temp') if not os.path.isdir(temp_dir): os.makedirs(temp_dir) save_tmp = tempfile.tempdir save_modules = sys.modules.copy() pr_state = pkg_resources.__getstate__() try: tempfile.tempdir = temp_dir os.chdir(setup_dir) try: sys.argv[:] = [setup_script]+list(args) sys.path.insert(0, setup_dir) DirectorySandbox(setup_dir).run( lambda: execfile( "setup.py", {'__file__':setup_script, '__name__':'__main__'} ) ) except SystemExit, v: if v.args and v.args[0]: raise # Normal exit, just return finally: pkg_resources.__setstate__(pr_state) sys.modules.update(save_modules) # remove any modules imported within the sandbox del_modules = [ mod_name for mod_name in sys.modules if mod_name not in save_modules # exclude any encodings modules. See #285 and not mod_name.startswith('encodings.') ] map(sys.modules.__delitem__, del_modules) os.chdir(old_dir) sys.path[:] = save_path sys.argv[:] = save_argv tempfile.tempdir = save_tmp
Run func under os sandboxing
def run(self, func): """Run 'func' under os sandboxing""" try: self._copy(self) if _file: __builtin__.file = self._file __builtin__.open = self._open self._active = True return func() finally: self._active = False if _file: __builtin__.file = _file __builtin__.open = _open self._copy(_os)
Called for low - level os. open ()
def open(self, file, flags, mode=0777): """Called for low-level os.open()""" if flags & WRITE_FLAGS and not self._ok(file): self._violation("os.open", file, flags, mode) return _os.open(file,flags,mode)
Remove a single pair of quotes from the endpoints of a string.
def unquote_ends(istr): """Remove a single pair of quotes from the endpoints of a string.""" if not istr: return istr if (istr[0]=="'" and istr[-1]=="'") or \ (istr[0]=='"' and istr[-1]=='"'): return istr[1:-1] else: return istr
Similar to Perl s qw () operator but with some more options.
def qw(words,flat=0,sep=None,maxsplit=-1): """Similar to Perl's qw() operator, but with some more options. qw(words,flat=0,sep=' ',maxsplit=-1) -> words.split(sep,maxsplit) words can also be a list itself, and with flat=1, the output will be recursively flattened. Examples: >>> qw('1 2') ['1', '2'] >>> qw(['a b','1 2',['m n','p q']]) [['a', 'b'], ['1', '2'], [['m', 'n'], ['p', 'q']]] >>> qw(['a b','1 2',['m n','p q']],flat=1) ['a', 'b', '1', '2', 'm', 'n', 'p', 'q'] """ if isinstance(words, basestring): return [word.strip() for word in words.split(sep,maxsplit) if word and not word.isspace() ] if flat: return flatten(map(qw,words,[1]*len(words))) return map(qw,words)
Simple minded grep - like function. grep ( pat list ) returns occurrences of pat in list None on failure.
def grep(pat,list,case=1): """Simple minded grep-like function. grep(pat,list) returns occurrences of pat in list, None on failure. It only does simple string matching, with no support for regexps. Use the option case=0 for case-insensitive matching.""" # This is pretty crude. At least it should implement copying only references # to the original data in case it's big. Now it copies the data for output. out=[] if case: for term in list: if term.find(pat)>-1: out.append(term) else: lpat=pat.lower() for term in list: if term.lower().find(lpat)>-1: out.append(term) if len(out): return out else: return None
Return grep () on dir () + dir ( __builtins__ ).
def dgrep(pat,*opts): """Return grep() on dir()+dir(__builtins__). A very common use of grep() when working interactively.""" return grep(pat,dir(__main__)+dir(__main__.__builtins__),*opts)
Indent a string a given number of spaces or tabstops.
def indent(instr,nspaces=4, ntabs=0, flatten=False): """Indent a string a given number of spaces or tabstops. indent(str,nspaces=4,ntabs=0) -> indent str by ntabs+nspaces. Parameters ---------- instr : basestring The string to be indented. nspaces : int (default: 4) The number of spaces to be indented. ntabs : int (default: 0) The number of tabs to be indented. flatten : bool (default: False) Whether to scrub existing indentation. If True, all lines will be aligned to the same indentation. If False, existing indentation will be strictly increased. Returns ------- str|unicode : string indented by ntabs and nspaces. """ if instr is None: return ind = '\t'*ntabs+' '*nspaces if flatten: pat = re.compile(r'^\s*', re.MULTILINE) else: pat = re.compile(r'^', re.MULTILINE) outstr = re.sub(pat, ind, instr) if outstr.endswith(os.linesep+ind): return outstr[:-len(ind)] else: return outstr
Convert ( in - place ) a file to line - ends native to the current OS.
def native_line_ends(filename,backup=1): """Convert (in-place) a file to line-ends native to the current OS. If the optional backup argument is given as false, no backup of the original file is left. """ backup_suffixes = {'posix':'~','dos':'.bak','nt':'.bak','mac':'.bak'} bak_filename = filename + backup_suffixes[os.name] original = open(filename).read() shutil.copy2(filename,bak_filename) try: new = open(filename,'wb') new.write(os.linesep.join(original.splitlines())) new.write(os.linesep) # ALWAYS put an eol at the end of the file new.close() except: os.rename(bak_filename,filename) if not backup: try: os.remove(bak_filename) except: pass
Return the input string centered in a marquee.
def marquee(txt='',width=78,mark='*'): """Return the input string centered in a 'marquee'. :Examples: In [16]: marquee('A test',40) Out[16]: '**************** A test ****************' In [17]: marquee('A test',40,'-') Out[17]: '---------------- A test ----------------' In [18]: marquee('A test',40,' ') Out[18]: ' A test ' """ if not txt: return (mark*width)[:width] nmark = (width-len(txt)-2)//len(mark)//2 if nmark < 0: nmark =0 marks = mark*nmark return '%s %s %s' % (marks,txt,marks)
Format a string for screen printing.
def format_screen(strng): """Format a string for screen printing. This removes some latex-type format codes.""" # Paragraph continue par_re = re.compile(r'\\$',re.MULTILINE) strng = par_re.sub('',strng) return strng
Equivalent of textwrap. dedent that ignores unindented first line.
def dedent(text): """Equivalent of textwrap.dedent that ignores unindented first line. This means it will still dedent strings like: '''foo is a bar ''' For use in wrap_paragraphs. """ if text.startswith('\n'): # text starts with blank line, don't ignore the first line return textwrap.dedent(text) # split first line splits = text.split('\n',1) if len(splits) == 1: # only one line return textwrap.dedent(text) first, rest = splits # dedent everything but the first line rest = textwrap.dedent(rest) return '\n'.join([first, rest])
Wrap multiple paragraphs to fit a specified width.
def wrap_paragraphs(text, ncols=80): """Wrap multiple paragraphs to fit a specified width. This is equivalent to textwrap.wrap, but with support for multiple paragraphs, as separated by empty lines. Returns ------- list of complete paragraphs, wrapped to fill `ncols` columns. """ paragraph_re = re.compile(r'\n(\s*\n)+', re.MULTILINE) text = dedent(text).strip() paragraphs = paragraph_re.split(text)[::2] # every other entry is space out_ps = [] indent_re = re.compile(r'\n\s+', re.MULTILINE) for p in paragraphs: # presume indentation that survives dedent is meaningful formatting, # so don't fill unless text is flush. if indent_re.search(p) is None: # wrap paragraph p = textwrap.fill(p, ncols) out_ps.append(p) return out_ps
Return the longest common substring in a list of strings. Credit: http:// stackoverflow. com/ questions/ 2892931/ longest - common - substring - from - more - than - two - strings - python
def long_substr(data): """Return the longest common substring in a list of strings. Credit: http://stackoverflow.com/questions/2892931/longest-common-substring-from-more-than-two-strings-python """ substr = '' if len(data) > 1 and len(data[0]) > 0: for i in range(len(data[0])): for j in range(len(data[0])-i+1): if j > len(substr) and all(data[0][i:i+j] in x for x in data): substr = data[0][i:i+j] elif len(data) == 1: substr = data[0] return substr
Strip leading email quotation characters ( > ).
def strip_email_quotes(text): """Strip leading email quotation characters ('>'). Removes any combination of leading '>' interspersed with whitespace that appears *identically* in all lines of the input text. Parameters ---------- text : str Examples -------- Simple uses:: In [2]: strip_email_quotes('> > text') Out[2]: 'text' In [3]: strip_email_quotes('> > text\\n> > more') Out[3]: 'text\\nmore' Note how only the common prefix that appears in all lines is stripped:: In [4]: strip_email_quotes('> > text\\n> > more\\n> more...') Out[4]: '> text\\n> more\\nmore...' So if any line has no quote marks ('>') , then none are stripped from any of them :: In [5]: strip_email_quotes('> > text\\n> > more\\nlast different') Out[5]: '> > text\\n> > more\\nlast different' """ lines = text.splitlines() matches = set() for line in lines: prefix = re.match(r'^(\s*>[ >]*)', line) if prefix: matches.add(prefix.group(1)) else: break else: prefix = long_substr(list(matches)) if prefix: strip = len(prefix) text = '\n'.join([ ln[strip:] for ln in lines]) return text
Calculate optimal info to columnize a list of string
def _find_optimal(rlist , separator_size=2 , displaywidth=80): """Calculate optimal info to columnize a list of string""" for nrow in range(1, len(rlist)+1) : chk = map(max,_chunks(rlist, nrow)) sumlength = sum(chk) ncols = len(chk) if sumlength+separator_size*(ncols-1) <= displaywidth : break; return {'columns_numbers' : ncols, 'optimal_separator_width':(displaywidth - sumlength)/(ncols-1) if (ncols -1) else 0, 'rows_numbers' : nrow, 'columns_width' : chk }
return list item number or default if don t exist
def _get_or_default(mylist, i, default=None): """return list item number, or default if don't exist""" if i >= len(mylist): return default else : return mylist[i]
Returns a nested list and info to columnize items
def compute_item_matrix(items, empty=None, *args, **kwargs) : """Returns a nested list, and info to columnize items Parameters : ------------ items : list of strings to columize empty : (default None) default value to fill list if needed separator_size : int (default=2) How much caracters will be used as a separation between each columns. displaywidth : int (default=80) The width of the area onto wich the columns should enter Returns : --------- Returns a tuple of (strings_matrix, dict_info) strings_matrix : nested list of string, the outer most list contains as many list as rows, the innermost lists have each as many element as colums. If the total number of elements in `items` does not equal the product of rows*columns, the last element of some lists are filled with `None`. dict_info : some info to make columnize easier: columns_numbers : number of columns rows_numbers : number of rows columns_width : list of with of each columns optimal_separator_width : best separator width between columns Exemple : --------- In [1]: l = ['aaa','b','cc','d','eeeee','f','g','h','i','j','k','l'] ...: compute_item_matrix(l,displaywidth=12) Out[1]: ([['aaa', 'f', 'k'], ['b', 'g', 'l'], ['cc', 'h', None], ['d', 'i', None], ['eeeee', 'j', None]], {'columns_numbers': 3, 'columns_width': [5, 1, 1], 'optimal_separator_width': 2, 'rows_numbers': 5}) """ info = _find_optimal(map(len, items), *args, **kwargs) nrow, ncol = info['rows_numbers'], info['columns_numbers'] return ([[ _get_or_default(items, c*nrow+i, default=empty) for c in range(ncol) ] for i in range(nrow) ], info)
Transform a list of strings into a single string with columns.
def columnize(items, separator=' ', displaywidth=80): """ Transform a list of strings into a single string with columns. Parameters ---------- items : sequence of strings The strings to process. separator : str, optional [default is two spaces] The string that separates columns. displaywidth : int, optional [default is 80] Width of the display in number of characters. Returns ------- The formatted string. """ if not items : return '\n' matrix, info = compute_item_matrix(items, separator_size=len(separator), displaywidth=displaywidth) fmatrix = [filter(None, x) for x in matrix] sjoin = lambda x : separator.join([ y.ljust(w, ' ') for y, w in zip(x, info['columns_width'])]) return '\n'.join(map(sjoin, fmatrix))+'\n'
Return all strings matching pattern ( a regex or callable )
def grep(self, pattern, prune = False, field = None): """ Return all strings matching 'pattern' (a regex or callable) This is case-insensitive. If prune is true, return all items NOT matching the pattern. If field is specified, the match must occur in the specified whitespace-separated field. Examples:: a.grep( lambda x: x.startswith('C') ) a.grep('Cha.*log', prune=1) a.grep('chm', field=-1) """ def match_target(s): if field is None: return s parts = s.split() try: tgt = parts[field] return tgt except IndexError: return "" if isinstance(pattern, basestring): pred = lambda x : re.search(pattern, x, re.IGNORECASE) else: pred = pattern if not prune: return SList([el for el in self if pred(match_target(el))]) else: return SList([el for el in self if not pred(match_target(el))])
Collect whitespace - separated fields from string list
def fields(self, *fields): """ Collect whitespace-separated fields from string list Allows quick awk-like usage of string lists. Example data (in var a, created by 'a = !ls -l'):: -rwxrwxrwx 1 ville None 18 Dec 14 2006 ChangeLog drwxrwxrwx+ 6 ville None 0 Oct 24 18:05 IPython a.fields(0) is ['-rwxrwxrwx', 'drwxrwxrwx+'] a.fields(1,0) is ['1 -rwxrwxrwx', '6 drwxrwxrwx+'] (note the joining by space). a.fields(-1) is ['ChangeLog', 'IPython'] IndexErrors are ignored. Without args, fields() just split()'s the strings. """ if len(fields) == 0: return [el.split() for el in self] res = SList() for el in [f.split() for f in self]: lineparts = [] for fd in fields: try: lineparts.append(el[fd]) except IndexError: pass if lineparts: res.append(" ".join(lineparts)) return res
sort by specified fields ( see fields () )
def sort(self,field= None, nums = False): """ sort by specified fields (see fields()) Example:: a.sort(1, nums = True) Sorts a by second field, in numerical order (so that 21 > 3) """ #decorate, sort, undecorate if field is not None: dsu = [[SList([line]).fields(field), line] for line in self] else: dsu = [[line, line] for line in self] if nums: for i in range(len(dsu)): numstr = "".join([ch for ch in dsu[i][0] if ch.isdigit()]) try: n = int(numstr) except ValueError: n = 0; dsu[i][0] = n dsu.sort() return SList([t[1] for t in dsu])
Read a Python file using the encoding declared inside the file. Parameters ---------- filename: str The path to the file to read. skip_encoding_cookie: bool If True ( the default ) and the encoding declaration is found in the first two lines that line will be excluded from the output - compiling a unicode string with an encoding declaration is a SyntaxError in Python 2. Returns ------- A unicode string containing the contents of the file.
def read_py_file(filename, skip_encoding_cookie=True): """Read a Python file, using the encoding declared inside the file. Parameters ---------- filename : str The path to the file to read. skip_encoding_cookie : bool If True (the default), and the encoding declaration is found in the first two lines, that line will be excluded from the output - compiling a unicode string with an encoding declaration is a SyntaxError in Python 2. Returns ------- A unicode string containing the contents of the file. """ with open(filename) as f: # the open function defined in this module. if skip_encoding_cookie: return "".join(strip_encoding_cookie(f)) else: return f.read()
Read a Python file from a URL using the encoding declared inside the file. Parameters ---------- url: str The URL from which to fetch the file. errors: str How to handle decoding errors in the file. Options are the same as for bytes. decode () but here replace is the default. skip_encoding_cookie: bool If True ( the default ) and the encoding declaration is found in the first two lines that line will be excluded from the output - compiling a unicode string with an encoding declaration is a SyntaxError in Python 2. Returns ------- A unicode string containing the contents of the file.
def read_py_url(url, errors='replace', skip_encoding_cookie=True): """Read a Python file from a URL, using the encoding declared inside the file. Parameters ---------- url : str The URL from which to fetch the file. errors : str How to handle decoding errors in the file. Options are the same as for bytes.decode(), but here 'replace' is the default. skip_encoding_cookie : bool If True (the default), and the encoding declaration is found in the first two lines, that line will be excluded from the output - compiling a unicode string with an encoding declaration is a SyntaxError in Python 2. Returns ------- A unicode string containing the contents of the file. """ response = urllib.urlopen(url) buffer = io.BytesIO(response.read()) encoding, lines = detect_encoding(buffer.readline) buffer.seek(0) text = TextIOWrapper(buffer, encoding, errors=errors, line_buffering=True) text.mode = 'r' if skip_encoding_cookie: return "".join(strip_encoding_cookie(text)) else: return text.read()
build argv to be passed to kernel subprocess
def build_kernel_argv(self, argv=None): """build argv to be passed to kernel subprocess""" if argv is None: argv = sys.argv[1:] self.kernel_argv = swallow_argv(argv, self.frontend_aliases, self.frontend_flags) # kernel should inherit default config file from frontend self.kernel_argv.append("--KernelApp.parent_appname='%s'"%self.name)
find the connection file and load the info if found. The current working directory and the current profile s security directory will be searched for the file if it is not given by absolute path. When attempting to connect to an existing kernel and the -- existing argument does not match an existing file it will be interpreted as a fileglob and the matching file in the current profile s security dir with the latest access time will be used. After this method is called self. connection_file contains the * full path * to the connection file never just its name.
def init_connection_file(self): """find the connection file, and load the info if found. The current working directory and the current profile's security directory will be searched for the file if it is not given by absolute path. When attempting to connect to an existing kernel and the `--existing` argument does not match an existing file, it will be interpreted as a fileglob, and the matching file in the current profile's security dir with the latest access time will be used. After this method is called, self.connection_file contains the *full path* to the connection file, never just its name. """ if self.existing: try: cf = find_connection_file(self.existing) except Exception: self.log.critical("Could not find existing kernel connection file %s", self.existing) self.exit(1) self.log.info("Connecting to existing kernel: %s" % cf) self.connection_file = cf else: # not existing, check if we are going to write the file # and ensure that self.connection_file is a full path, not just the shortname try: cf = find_connection_file(self.connection_file) except Exception: # file might not exist if self.connection_file == os.path.basename(self.connection_file): # just shortname, put it in security dir cf = os.path.join(self.profile_dir.security_dir, self.connection_file) else: cf = self.connection_file self.connection_file = cf # should load_connection_file only be used for existing? # as it is now, this allows reusing ports if an existing # file is requested try: self.load_connection_file() except Exception: self.log.error("Failed to load connection file: %r", self.connection_file, exc_info=True) self.exit(1)
set up ssh tunnels if needed.
def init_ssh(self): """set up ssh tunnels, if needed.""" if not self.sshserver and not self.sshkey: return if self.sshkey and not self.sshserver: # specifying just the key implies that we are connecting directly self.sshserver = self.ip self.ip = LOCALHOST # build connection dict for tunnels: info = dict(ip=self.ip, shell_port=self.shell_port, iopub_port=self.iopub_port, stdin_port=self.stdin_port, hb_port=self.hb_port ) self.log.info("Forwarding connections to %s via %s"%(self.ip, self.sshserver)) # tunnels return a new set of ports, which will be on localhost: self.ip = LOCALHOST try: newports = tunnel_to_kernel(info, self.sshserver, self.sshkey) except: # even catch KeyboardInterrupt self.log.error("Could not setup tunnels", exc_info=True) self.exit(1) self.shell_port, self.iopub_port, self.stdin_port, self.hb_port = newports cf = self.connection_file base,ext = os.path.splitext(cf) base = os.path.basename(base) self.connection_file = os.path.basename(base)+'-ssh'+ext self.log.critical("To connect another client via this tunnel, use:") self.log.critical("--existing %s" % self.connection_file)
Classes which mix this class in should call: IPythonConsoleApp. initialize ( self argv )
def initialize(self, argv=None): """ Classes which mix this class in should call: IPythonConsoleApp.initialize(self,argv) """ self.init_connection_file() default_secure(self.config) self.init_ssh() self.init_kernel_manager()
Return message as dict: return dict
def prepare_message(self, data=None): """ Return message as dict :return dict """ message = { 'protocol': self.protocol, 'node': self._node, 'chip_id': self._chip_id, 'event': '', 'parameters': {}, 'response': '', 'targets': [ 'ALL' ] } if type(data) is dict: for k, v in data.items(): if k in message: message[k] = v return message
Decode json string to dict. Validate against node name ( targets ) and protocol version: return dict | None
def decode_message(self, message): """ Decode json string to dict. Validate against node name(targets) and protocol version :return dict | None """ try: message = json.loads(message) if not self._validate_message(message): message = None except ValueError: message = None return message
: return boolean
def _validate_message(self, message): """:return boolean""" if 'protocol' not in message or 'targets' not in message or \ type(message['targets']) is not list: return False if message['protocol'] != self.protocol: return False if self.node not in message['targets'] and 'ALL' not in message['targets']: return False return True
Pretty print the object s representation.
def pretty(obj, verbose=False, max_width=79, newline='\n'): """ Pretty print the object's representation. """ stream = StringIO() printer = RepresentationPrinter(stream, verbose, max_width, newline) printer.pretty(obj) printer.flush() return stream.getvalue()
Like pretty but print to stdout.
def pprint(obj, verbose=False, max_width=79, newline='\n'): """ Like `pretty` but print to stdout. """ printer = RepresentationPrinter(sys.stdout, verbose, max_width, newline) printer.pretty(obj) printer.flush() sys.stdout.write(newline) sys.stdout.flush()
Get a reasonable method resolution order of a class and its superclasses for both old - style and new - style classes.
def _get_mro(obj_class): """ Get a reasonable method resolution order of a class and its superclasses for both old-style and new-style classes. """ if not hasattr(obj_class, '__mro__'): # Old-style class. Mix in object to make a fake new-style class. try: obj_class = type(obj_class.__name__, (obj_class, object), {}) except TypeError: # Old-style extension type that does not descend from object. # FIXME: try to construct a more thorough MRO. mro = [obj_class] else: mro = obj_class.__mro__[1:-1] else: mro = obj_class.__mro__ return mro
The default print function. Used if an object does not provide one and it s none of the builtin objects.
def _default_pprint(obj, p, cycle): """ The default print function. Used if an object does not provide one and it's none of the builtin objects. """ klass = getattr(obj, '__class__', None) or type(obj) if getattr(klass, '__repr__', None) not in _baseclass_reprs: # A user-provided repr. p.text(repr(obj)) return p.begin_group(1, '<') p.pretty(klass) p.text(' at 0x%x' % id(obj)) if cycle: p.text(' ...') elif p.verbose: first = True for key in dir(obj): if not key.startswith('_'): try: value = getattr(obj, key) except AttributeError: continue if isinstance(value, types.MethodType): continue if not first: p.text(',') p.breakable() p.text(key) p.text('=') step = len(key) + 1 p.indentation += step p.pretty(value) p.indentation -= step first = False p.end_group(1, '>')
Factory that returns a pprint function useful for sequences. Used by the default pprint for tuples dicts lists sets and frozensets.
def _seq_pprinter_factory(start, end, basetype): """ Factory that returns a pprint function useful for sequences. Used by the default pprint for tuples, dicts, lists, sets and frozensets. """ def inner(obj, p, cycle): typ = type(obj) if basetype is not None and typ is not basetype and typ.__repr__ != basetype.__repr__: # If the subclass provides its own repr, use it instead. return p.text(typ.__repr__(obj)) if cycle: return p.text(start + '...' + end) step = len(start) p.begin_group(step, start) for idx, x in enumerate(obj): if idx: p.text(',') p.breakable() p.pretty(x) if len(obj) == 1 and type(obj) is tuple: # Special case for 1-item tuples. p.text(',') p.end_group(step, end) return inner
Factory that returns a pprint function used by the default pprint of dicts and dict proxies.
def _dict_pprinter_factory(start, end, basetype=None): """ Factory that returns a pprint function used by the default pprint of dicts and dict proxies. """ def inner(obj, p, cycle): typ = type(obj) if basetype is not None and typ is not basetype and typ.__repr__ != basetype.__repr__: # If the subclass provides its own repr, use it instead. return p.text(typ.__repr__(obj)) if cycle: return p.text('{...}') p.begin_group(1, start) keys = obj.keys() try: keys.sort() except Exception, e: # Sometimes the keys don't sort. pass for idx, key in enumerate(keys): if idx: p.text(',') p.breakable() p.pretty(key) p.text(': ') p.pretty(obj[key]) p.end_group(1, end) return inner
The pprint for the super type.
def _super_pprint(obj, p, cycle): """The pprint for the super type.""" p.begin_group(8, '<super: ') p.pretty(obj.__self_class__) p.text(',') p.breakable() p.pretty(obj.__self__) p.end_group(8, '>')
The pprint function for regular expression patterns.
def _re_pattern_pprint(obj, p, cycle): """The pprint function for regular expression patterns.""" p.text('re.compile(') pattern = repr(obj.pattern) if pattern[:1] in 'uU': pattern = pattern[1:] prefix = 'ur' else: prefix = 'r' pattern = prefix + pattern.replace('\\\\', '\\') p.text(pattern) if obj.flags: p.text(',') p.breakable() done_one = False for flag in ('TEMPLATE', 'IGNORECASE', 'LOCALE', 'MULTILINE', 'DOTALL', 'UNICODE', 'VERBOSE', 'DEBUG'): if obj.flags & getattr(re, flag): if done_one: p.text('|') p.text('re.' + flag) done_one = True p.text(')')
The pprint for classes and types.
def _type_pprint(obj, p, cycle): """The pprint for classes and types.""" if obj.__module__ in ('__builtin__', 'exceptions'): name = obj.__name__ else: name = obj.__module__ + '.' + obj.__name__ p.text(name)
Base pprint for all functions and builtin functions.
def _function_pprint(obj, p, cycle): """Base pprint for all functions and builtin functions.""" if obj.__module__ in ('__builtin__', 'exceptions') or not obj.__module__: name = obj.__name__ else: name = obj.__module__ + '.' + obj.__name__ p.text('<function %s>' % name)
Base pprint for all exceptions.
def _exception_pprint(obj, p, cycle): """Base pprint for all exceptions.""" if obj.__class__.__module__ in ('exceptions', 'builtins'): name = obj.__class__.__name__ else: name = '%s.%s' % ( obj.__class__.__module__, obj.__class__.__name__ ) step = len(name) + 1 p.begin_group(step, name + '(') for idx, arg in enumerate(getattr(obj, 'args', ())): if idx: p.text(',') p.breakable() p.pretty(arg) p.end_group(step, ')')
Add a pretty printer for a given type.
def for_type(typ, func): """ Add a pretty printer for a given type. """ oldfunc = _type_pprinters.get(typ, None) if func is not None: # To support easy restoration of old pprinters, we need to ignore Nones. _type_pprinters[typ] = func return oldfunc
Add a pretty printer for a type specified by the module and name of a type rather than the type object itself.
def for_type_by_name(type_module, type_name, func): """ Add a pretty printer for a type specified by the module and name of a type rather than the type object itself. """ key = (type_module, type_name) oldfunc = _deferred_type_pprinters.get(key, None) if func is not None: # To support easy restoration of old pprinters, we need to ignore Nones. _deferred_type_pprinters[key] = func return oldfunc
like begin_group/ end_group but for the with statement.
def group(self, indent=0, open='', close=''): """like begin_group / end_group but for the with statement.""" self.begin_group(indent, open) try: yield finally: self.end_group(indent, close)
Add literal text to the output.
def text(self, obj): """Add literal text to the output.""" width = len(obj) if self.buffer: text = self.buffer[-1] if not isinstance(text, Text): text = Text() self.buffer.append(text) text.add(obj, width) self.buffer_width += width self._break_outer_groups() else: self.output.write(obj) self.output_width += width
Add a breakable separator to the output. This does not mean that it will automatically break here. If no breaking on this position takes place the sep is inserted which default to one space.
def breakable(self, sep=' '): """ Add a breakable separator to the output. This does not mean that it will automatically break here. If no breaking on this position takes place the `sep` is inserted which default to one space. """ width = len(sep) group = self.group_stack[-1] if group.want_break: self.flush() self.output.write(self.newline) self.output.write(' ' * self.indentation) self.output_width = self.indentation self.buffer_width = 0 else: self.buffer.append(Breakable(sep, width, self)) self.buffer_width += width self._break_outer_groups()
Begin a group. If you want support for python < 2. 5 which doesn t has the with statement this is the preferred way:
def begin_group(self, indent=0, open=''): """ Begin a group. If you want support for python < 2.5 which doesn't has the with statement this is the preferred way: p.begin_group(1, '{') ... p.end_group(1, '}') The python 2.5 expression would be this: with p.group(1, '{', '}'): ... The first parameter specifies the indentation for the next line (usually the width of the opening text), the second the opening text. All parameters are optional. """ if open: self.text(open) group = Group(self.group_stack[-1].depth + 1) self.group_stack.append(group) self.group_queue.enq(group) self.indentation += indent
End a group. See begin_group for more details.
def end_group(self, dedent=0, close=''): """End a group. See `begin_group` for more details.""" self.indentation -= dedent group = self.group_stack.pop() if not group.breakables: self.group_queue.remove(group) if close: self.text(close)
Flush data that is left in the buffer.
def flush(self): """Flush data that is left in the buffer.""" for data in self.buffer: self.output_width += data.output(self.output, self.output_width) self.buffer.clear() self.buffer_width = 0
Pretty print the given object.
def pretty(self, obj): """Pretty print the given object.""" obj_id = id(obj) cycle = obj_id in self.stack self.stack.append(obj_id) self.begin_group() try: obj_class = getattr(obj, '__class__', None) or type(obj) # First try to find registered singleton printers for the type. try: printer = self.singleton_pprinters[obj_id] except (TypeError, KeyError): pass else: return printer(obj, self, cycle) # Next walk the mro and check for either: # 1) a registered printer # 2) a _repr_pretty_ method for cls in _get_mro(obj_class): if cls in self.type_pprinters: # printer registered in self.type_pprinters return self.type_pprinters[cls](obj, self, cycle) else: # deferred printer printer = self._in_deferred_types(cls) if printer is not None: return printer(obj, self, cycle) else: # Finally look for special method names. # Some objects automatically create any requested # attribute. Try to ignore most of them by checking for # callability. if '_repr_pretty_' in obj_class.__dict__: meth = obj_class._repr_pretty_ if callable(meth): return meth(obj, self, cycle) return _default_pprint(obj, self, cycle) finally: self.end_group() self.stack.pop()
Check if the given class is specified in the deferred type registry.
def _in_deferred_types(self, cls): """ Check if the given class is specified in the deferred type registry. Returns the printer from the registry if it exists, and None if the class is not in the registry. Successful matches will be moved to the regular type registry for future use. """ mod = getattr(cls, '__module__', None) name = getattr(cls, '__name__', None) key = (mod, name) printer = None if key in self.deferred_pprinters: # Move the printer over to the regular registry. printer = self.deferred_pprinters.pop(key) self.type_pprinters[cls] = printer return printer
Return a color table with fields for exception reporting.
def exception_colors(): """Return a color table with fields for exception reporting. The table is an instance of ColorSchemeTable with schemes added for 'Linux', 'LightBG' and 'NoColor' and fields for exception handling filled in. Examples: >>> ec = exception_colors() >>> ec.active_scheme_name '' >>> print ec.active_colors None Now we activate a color scheme: >>> ec.set_active_scheme('NoColor') >>> ec.active_scheme_name 'NoColor' >>> sorted(ec.active_colors.keys()) ['Normal', 'caret', 'em', 'excName', 'filename', 'filenameEm', 'line', 'lineno', 'linenoEm', 'name', 'nameEm', 'normalEm', 'topline', 'vName', 'val', 'valEm'] """ ex_colors = ColorSchemeTable() # Populate it with color schemes C = TermColors # shorthand and local lookup ex_colors.add_scheme(ColorScheme( 'NoColor', # The color to be used for the top line topline = C.NoColor, # The colors to be used in the traceback filename = C.NoColor, lineno = C.NoColor, name = C.NoColor, vName = C.NoColor, val = C.NoColor, em = C.NoColor, # Emphasized colors for the last frame of the traceback normalEm = C.NoColor, filenameEm = C.NoColor, linenoEm = C.NoColor, nameEm = C.NoColor, valEm = C.NoColor, # Colors for printing the exception excName = C.NoColor, line = C.NoColor, caret = C.NoColor, Normal = C.NoColor )) # make some schemes as instances so we can copy them for modification easily ex_colors.add_scheme(ColorScheme( 'Linux', # The color to be used for the top line topline = C.LightRed, # The colors to be used in the traceback filename = C.Green, lineno = C.Green, name = C.Purple, vName = C.Cyan, val = C.Green, em = C.LightCyan, # Emphasized colors for the last frame of the traceback normalEm = C.LightCyan, filenameEm = C.LightGreen, linenoEm = C.LightGreen, nameEm = C.LightPurple, valEm = C.LightBlue, # Colors for printing the exception excName = C.LightRed, line = C.Yellow, caret = C.White, Normal = C.Normal )) # For light backgrounds, swap dark/light colors ex_colors.add_scheme(ColorScheme( 'LightBG', # The color to be used for the top line topline = C.Red, # The colors to be used in the traceback filename = C.LightGreen, lineno = C.LightGreen, name = C.LightPurple, vName = C.Cyan, val = C.LightGreen, em = C.Cyan, # Emphasized colors for the last frame of the traceback normalEm = C.Cyan, filenameEm = C.Green, linenoEm = C.Green, nameEm = C.Purple, valEm = C.Blue, # Colors for printing the exception excName = C.Red, #line = C.Brown, # brown often is displayed as yellow line = C.Red, caret = C.Normal, Normal = C.Normal, )) return ex_colors
As patterns () in django.
def patterns(prefix, *args): """As patterns() in django.""" pattern_list = [] for t in args: if isinstance(t, (list, tuple)): t = url(prefix=prefix, *t) elif isinstance(t, RegexURLPattern): t.add_prefix(prefix) pattern_list.append(t) return pattern_list
As url () in Django.
def url(regex, view, kwargs=None, name=None, prefix=''): """As url() in Django.""" if isinstance(view, (list, tuple)): # For include(...) processing. urlconf_module, app_name, namespace = view return URLResolver(regex, urlconf_module, kwargs, app_name=app_name, namespace=namespace) else: if isinstance(view, six.string_types): if not view: raise ImproperlyConfigured('Empty URL pattern view name not permitted (for pattern %r)' % regex) if prefix: view = prefix + '.' + view view = get_callable(view) return CBVRegexURLPattern(regex, view, kwargs, name)
Prepare columns in new ods file create new sheet for metadata set columns color and width. Set formatting style info in your settings. py file in ~/. c3po/ folder.
def _prepare_ods_columns(ods, trans_title_row): """ Prepare columns in new ods file, create new sheet for metadata, set columns color and width. Set formatting style info in your settings.py file in ~/.c3po/ folder. """ ods.content.getSheet(0).setSheetName('Translations') ods.content.makeSheet('Meta options') ods.content.getColumn(0).setWidth('5.0in') ods.content.getCell(0, 0).stringValue('metadata')\ .setCellColor(settings.TITLE_ROW_BG_COLOR) \ .setBold(True).setFontColor(settings.TITLE_ROW_FONT_COLOR) ods.content.getSheet(0) ods.content.getColumn(0).setWidth('1.5in') ods.content.getCell(0, 0) \ .setCellColor(settings.TITLE_ROW_BG_COLOR) \ .setBold(True).setFontColor(settings.TITLE_ROW_FONT_COLOR) for i, title in enumerate(trans_title_row): ods.content.getColumn(i).setWidth(settings.MSGSTR_COLUMN_WIDTH) ods.content.getCell(i, 0).stringValue(title)\ .setCellColor(settings.TITLE_ROW_BG_COLOR) \ .setBold(True).setFontColor(settings.TITLE_ROW_FONT_COLOR) ods.content.getColumn(0).setWidth(settings.NOTES_COLUMN_WIDTH)
Write translations from po files into ods one file. Assumes a directory structure: <locale_root >/ <lang >/ <po_files_path >/ <filename >.
def _write_trans_into_ods(ods, languages, locale_root, po_files_path, po_filename, start_row): """ Write translations from po files into ods one file. Assumes a directory structure: <locale_root>/<lang>/<po_files_path>/<filename>. """ ods.content.getSheet(0) for i, lang in enumerate(languages[1:]): lang_po_path = os.path.join(locale_root, lang, po_files_path, po_filename) if os.path.exists(lang_po_path): po_file = polib.pofile(lang_po_path) for j, entry in enumerate(po_file): # start from 4th column, 1st row row = j+start_row ods.content.getCell(i+4, row).stringValue( _escape_apostrophe(entry.msgstr)) if i % 2 == 1: ods.content.getCell(i+4, row).setCellColor( settings.ODD_COLUMN_BG_COLOR) else: ods.content.getCell(i+4, row).setCellColor( settings.EVEN_COLUMN_BG_COLOR)
Write row with translations to ods file into specified sheet and row_no.
def _write_row_into_ods(ods, sheet_no, row_no, row): """ Write row with translations to ods file into specified sheet and row_no. """ ods.content.getSheet(sheet_no) for j, col in enumerate(row): cell = ods.content.getCell(j, row_no+1) cell.stringValue(_escape_apostrophe(col)) if j % 2 == 1: cell.setCellColor(settings.EVEN_COLUMN_BG_COLOR) else: cell.setCellColor(settings.ODD_COLUMN_BG_COLOR)
Converts po file to csv GDocs spreadsheet readable format.: param languages: list of language codes: param locale_root: path to locale root folder containing directories with languages: param po_files_path: path from lang directory to po file: param temp_file_path: path where temporary files will be saved
def po_to_ods(languages, locale_root, po_files_path, temp_file_path): """ Converts po file to csv GDocs spreadsheet readable format. :param languages: list of language codes :param locale_root: path to locale root folder containing directories with languages :param po_files_path: path from lang directory to po file :param temp_file_path: path where temporary files will be saved """ title_row = ['file', 'comment', 'msgid'] title_row += map(lambda s: s + ':msgstr', languages) ods = ODS() _prepare_ods_columns(ods, title_row) po_files = _get_all_po_filenames(locale_root, languages[0], po_files_path) i = 1 for po_filename in po_files: po_file_path = os.path.join(locale_root, languages[0], po_files_path, po_filename) start_row = i po = polib.pofile(po_file_path) for entry in po: meta = dict(entry.__dict__) meta.pop('msgid', None) meta.pop('msgstr', None) meta.pop('tcomment', None) ods.content.getSheet(1) ods.content.getCell(0, i).stringValue( str(meta)).setCellColor(settings.EVEN_COLUMN_BG_COLOR) ods.content.getSheet(0) ods.content.getCell(0, i) \ .stringValue(po_filename) \ .setCellColor(settings.ODD_COLUMN_BG_COLOR) ods.content.getCell(1, i) \ .stringValue(_escape_apostrophe(entry.tcomment)) \ .setCellColor(settings.ODD_COLUMN_BG_COLOR) ods.content.getCell(2, i) \ .stringValue(_escape_apostrophe(entry.msgid)) \ .setCellColor(settings.EVEN_COLUMN_BG_COLOR) ods.content.getCell(3, i) \ .stringValue(_escape_apostrophe(entry.msgstr))\ .setCellColor(settings.ODD_COLUMN_BG_COLOR) i += 1 _write_trans_into_ods(ods, languages, locale_root, po_files_path, po_filename, start_row) ods.save(temp_file_path)
Converts csv files to one ods file: param trans_csv: path to csv file with translations: param meta_csv: path to csv file with metadata: param local_ods: path to new ods file
def csv_to_ods(trans_csv, meta_csv, local_ods): """ Converts csv files to one ods file :param trans_csv: path to csv file with translations :param meta_csv: path to csv file with metadata :param local_ods: path to new ods file """ trans_reader = UnicodeReader(trans_csv) meta_reader = UnicodeReader(meta_csv) ods = ODS() trans_title = trans_reader.next() meta_reader.next() _prepare_ods_columns(ods, trans_title) for i, (trans_row, meta_row) in enumerate(izip(trans_reader, meta_reader)): _write_row_into_ods(ods, 0, i, trans_row) _write_row_into_ods(ods, 1, i, meta_row) trans_reader.close() meta_reader.close() ods.save(local_ods)
Get the current clipboard s text on Windows.
def win32_clipboard_get(): """ Get the current clipboard's text on Windows. Requires Mark Hammond's pywin32 extensions. """ try: import win32clipboard except ImportError: raise TryNext("Getting text from the clipboard requires the pywin32 " "extensions: http://sourceforge.net/projects/pywin32/") win32clipboard.OpenClipboard() text = win32clipboard.GetClipboardData(win32clipboard.CF_TEXT) # FIXME: convert \r\n to \n? win32clipboard.CloseClipboard() return text
Get the clipboard s text on OS X.
def osx_clipboard_get(): """ Get the clipboard's text on OS X. """ p = subprocess.Popen(['pbpaste', '-Prefer', 'ascii'], stdout=subprocess.PIPE) text, stderr = p.communicate() # Text comes in with old Mac \r line endings. Change them to \n. text = text.replace('\r', '\n') return text
Get the clipboard s text using Tkinter.
def tkinter_clipboard_get(): """ Get the clipboard's text using Tkinter. This is the default on systems that are not Windows or OS X. It may interfere with other UI toolkits and should be replaced with an implementation that uses that toolkit. """ try: import Tkinter except ImportError: raise TryNext("Getting text from the clipboard on this platform " "requires Tkinter.") root = Tkinter.Tk() root.withdraw() text = root.clipboard_get() root.destroy() return text
Returns a safe build_prefix
def _get_build_prefix(): """ Returns a safe build_prefix """ path = os.path.join( tempfile.gettempdir(), 'pip_build_%s' % __get_username().replace(' ', '_') ) if WINDOWS: """ on windows(tested on 7) temp dirs are isolated """ return path try: os.mkdir(path) write_delete_marker_file(path) except OSError: file_uid = None try: # raises OSError for symlinks # https://github.com/pypa/pip/pull/935#discussion_r5307003 file_uid = get_path_uid(path) except OSError: file_uid = None if file_uid != os.geteuid(): msg = ( "The temporary folder for building (%s) is either not owned by" " you, or is a symlink." % path ) print(msg) print( "pip will not work until the temporary folder is either " "deleted or is a real directory owned by your user account." ) raise exceptions.InstallationError(msg) return path
Find the subdomain rank ( tuple ) for each processor and determine the neighbor info.
def prepare_communication (self): """ Find the subdomain rank (tuple) for each processor and determine the neighbor info. """ nsd_ = self.nsd if nsd_<1: print('Number of space dimensions is %d, nothing to do' %nsd_) return self.subd_rank = [-1,-1,-1] self.subd_lo_ix = [-1,-1,-1] self.subd_hi_ix = [-1,-1,-1] self.lower_neighbors = [-1,-1,-1] self.upper_neighbors = [-1,-1,-1] num_procs = self.num_procs my_id = self.my_id num_subds = 1 for i in range(nsd_): num_subds = num_subds*self.num_parts[i] if my_id==0: print("# subds=", num_subds) # should check num_subds againt num_procs offsets = [1, 0, 0] # find the subdomain rank self.subd_rank[0] = my_id%self.num_parts[0] if nsd_>=2: offsets[1] = self.num_parts[0] self.subd_rank[1] = my_id/offsets[1] if nsd_==3: offsets[1] = self.num_parts[0] offsets[2] = self.num_parts[0]*self.num_parts[1] self.subd_rank[1] = (my_id%offsets[2])/self.num_parts[0] self.subd_rank[2] = my_id/offsets[2] print("my_id=%d, subd_rank: "%my_id, self.subd_rank) if my_id==0: print("offsets=", offsets) # find the neighbor ids for i in range(nsd_): rank = self.subd_rank[i] if rank>0: self.lower_neighbors[i] = my_id-offsets[i] if rank<self.num_parts[i]-1: self.upper_neighbors[i] = my_id+offsets[i] k = self.global_num_cells[i]/self.num_parts[i] m = self.global_num_cells[i]%self.num_parts[i] ix = rank*k+max(0,rank+m-self.num_parts[i]) self.subd_lo_ix[i] = ix ix = ix+k if rank>=(self.num_parts[i]-m): ix = ix+1 # load balancing if rank<self.num_parts[i]-1: ix = ix+1 # one cell of overlap self.subd_hi_ix[i] = ix print("subd_rank:",self.subd_rank,\ "lower_neig:", self.lower_neighbors, \ "upper_neig:", self.upper_neighbors) print("subd_rank:",self.subd_rank,"subd_lo_ix:", self.subd_lo_ix, \ "subd_hi_ix:", self.subd_hi_ix)
Prepare the buffers to be used for later communications
def prepare_communication (self): """ Prepare the buffers to be used for later communications """ RectPartitioner.prepare_communication (self) if self.lower_neighbors[0]>=0: self.in_lower_buffers = [zeros(1, float)] self.out_lower_buffers = [zeros(1, float)] if self.upper_neighbors[0]>=0: self.in_upper_buffers = [zeros(1, float)] self.out_upper_buffers = [zeros(1, float)]
Prepare the buffers to be used for later communications
def prepare_communication (self): """ Prepare the buffers to be used for later communications """ RectPartitioner.prepare_communication (self) self.in_lower_buffers = [[], []] self.out_lower_buffers = [[], []] self.in_upper_buffers = [[], []] self.out_upper_buffers = [[], []] size1 = self.subd_hi_ix[1]-self.subd_lo_ix[1]+1 if self.lower_neighbors[0]>=0: self.in_lower_buffers[0] = zeros(size1, float) self.out_lower_buffers[0] = zeros(size1, float) if self.upper_neighbors[0]>=0: self.in_upper_buffers[0] = zeros(size1, float) self.out_upper_buffers[0] = zeros(size1, float) size0 = self.subd_hi_ix[0]-self.subd_lo_ix[0]+1 if self.lower_neighbors[1]>=0: self.in_lower_buffers[1] = zeros(size0, float) self.out_lower_buffers[1] = zeros(size0, float) if self.upper_neighbors[1]>=0: self.in_upper_buffers[1] = zeros(size0, float) self.out_upper_buffers[1] = zeros(size0, float)
update the inner boundary with the same send/ recv pattern as the MPIPartitioner
def update_internal_boundary_x_y (self, solution_array): """update the inner boundary with the same send/recv pattern as the MPIPartitioner""" nsd_ = self.nsd dtype = solution_array.dtype if nsd_!=len(self.in_lower_buffers) | nsd_!=len(self.out_lower_buffers): print("Buffers for communicating with lower neighbors not ready") return if nsd_!=len(self.in_upper_buffers) | nsd_!=len(self.out_upper_buffers): print("Buffers for communicating with upper neighbors not ready") return loc_nx = self.subd_hi_ix[0]-self.subd_lo_ix[0] loc_ny = self.subd_hi_ix[1]-self.subd_lo_ix[1] lower_x_neigh = self.lower_neighbors[0] upper_x_neigh = self.upper_neighbors[0] lower_y_neigh = self.lower_neighbors[1] upper_y_neigh = self.upper_neighbors[1] trackers = [] flags = dict(copy=False, track=False) # communicate in the x-direction first if lower_x_neigh>-1: if self.slice_copy: self.out_lower_buffers[0] = ascontiguousarray(solution_array[1,:]) else: for i in xrange(0,loc_ny+1): self.out_lower_buffers[0][i] = solution_array[1,i] t = self.comm.west.send(self.out_lower_buffers[0], **flags) trackers.append(t) if upper_x_neigh>-1: msg = self.comm.east.recv(copy=False) self.in_upper_buffers[0] = frombuffer(msg, dtype=dtype) if self.slice_copy: solution_array[loc_nx,:] = self.in_upper_buffers[0] self.out_upper_buffers[0] = ascontiguousarray(solution_array[loc_nx-1,:]) else: for i in xrange(0,loc_ny+1): solution_array[loc_nx,i] = self.in_upper_buffers[0][i] self.out_upper_buffers[0][i] = solution_array[loc_nx-1,i] t = self.comm.east.send(self.out_upper_buffers[0], **flags) trackers.append(t) if lower_x_neigh>-1: msg = self.comm.west.recv(copy=False) self.in_lower_buffers[0] = frombuffer(msg, dtype=dtype) if self.slice_copy: solution_array[0,:] = self.in_lower_buffers[0] else: for i in xrange(0,loc_ny+1): solution_array[0,i] = self.in_lower_buffers[0][i] # communicate in the y-direction afterwards if lower_y_neigh>-1: if self.slice_copy: self.out_lower_buffers[1] = ascontiguousarray(solution_array[:,1]) else: for i in xrange(0,loc_nx+1): self.out_lower_buffers[1][i] = solution_array[i,1] t = self.comm.south.send(self.out_lower_buffers[1], **flags) trackers.append(t) if upper_y_neigh>-1: msg = self.comm.north.recv(copy=False) self.in_upper_buffers[1] = frombuffer(msg, dtype=dtype) if self.slice_copy: solution_array[:,loc_ny] = self.in_upper_buffers[1] self.out_upper_buffers[1] = ascontiguousarray(solution_array[:,loc_ny-1]) else: for i in xrange(0,loc_nx+1): solution_array[i,loc_ny] = self.in_upper_buffers[1][i] self.out_upper_buffers[1][i] = solution_array[i,loc_ny-1] t = self.comm.north.send(self.out_upper_buffers[1], **flags) trackers.append(t) if lower_y_neigh>-1: msg = self.comm.south.recv(copy=False) self.in_lower_buffers[1] = frombuffer(msg, dtype=dtype) if self.slice_copy: solution_array[:,0] = self.in_lower_buffers[1] else: for i in xrange(0,loc_nx+1): solution_array[i,0] = self.in_lower_buffers[1][i] # wait for sends to complete: if flags['track']: for t in trackers: t.wait()
Rekey a dict that has been forced to use str keys where there should be ints by json.
def rekey(dikt): """Rekey a dict that has been forced to use str keys where there should be ints by json.""" for k in dikt.iterkeys(): if isinstance(k, basestring): ik=fk=None try: ik = int(k) except ValueError: try: fk = float(k) except ValueError: continue if ik is not None: nk = ik else: nk = fk if nk in dikt: raise KeyError("already have key %r"%nk) dikt[nk] = dikt.pop(k) return dikt
extract ISO8601 dates from unpacked JSON
def extract_dates(obj): """extract ISO8601 dates from unpacked JSON""" if isinstance(obj, dict): obj = dict(obj) # don't clobber for k,v in obj.iteritems(): obj[k] = extract_dates(v) elif isinstance(obj, (list, tuple)): obj = [ extract_dates(o) for o in obj ] elif isinstance(obj, basestring): if ISO8601_PAT.match(obj): obj = datetime.strptime(obj, ISO8601) return obj
squash datetime objects into ISO8601 strings
def squash_dates(obj): """squash datetime objects into ISO8601 strings""" if isinstance(obj, dict): obj = dict(obj) # don't clobber for k,v in obj.iteritems(): obj[k] = squash_dates(v) elif isinstance(obj, (list, tuple)): obj = [ squash_dates(o) for o in obj ] elif isinstance(obj, datetime): obj = obj.strftime(ISO8601) return obj
default function for packing datetime objects in JSON.
def date_default(obj): """default function for packing datetime objects in JSON.""" if isinstance(obj, datetime): return obj.strftime(ISO8601) else: raise TypeError("%r is not JSON serializable"%obj)
b64 - encodes images in a displaypub format dict Perhaps this should be handled in json_clean itself? Parameters ---------- format_dict: dict A dictionary of display data keyed by mime - type Returns ------- format_dict: dict A copy of the same dictionary but binary image data ( image/ png or image/ jpeg ) is base64 - encoded.
def encode_images(format_dict): """b64-encodes images in a displaypub format dict Perhaps this should be handled in json_clean itself? Parameters ---------- format_dict : dict A dictionary of display data keyed by mime-type Returns ------- format_dict : dict A copy of the same dictionary, but binary image data ('image/png' or 'image/jpeg') is base64-encoded. """ encoded = format_dict.copy() pngdata = format_dict.get('image/png') if isinstance(pngdata, bytes) and pngdata[:8] == PNG: encoded['image/png'] = encodestring(pngdata).decode('ascii') jpegdata = format_dict.get('image/jpeg') if isinstance(jpegdata, bytes) and jpegdata[:2] == JPEG: encoded['image/jpeg'] = encodestring(jpegdata).decode('ascii') return encoded
Clean an object to ensure it s safe to encode in JSON. Atomic immutable objects are returned unmodified. Sets and tuples are converted to lists lists are copied and dicts are also copied.
def json_clean(obj): """Clean an object to ensure it's safe to encode in JSON. Atomic, immutable objects are returned unmodified. Sets and tuples are converted to lists, lists are copied and dicts are also copied. Note: dicts whose keys could cause collisions upon encoding (such as a dict with both the number 1 and the string '1' as keys) will cause a ValueError to be raised. Parameters ---------- obj : any python object Returns ------- out : object A version of the input which will not cause an encoding error when encoded as JSON. Note that this function does not *encode* its inputs, it simply sanitizes it so that there will be no encoding errors later. Examples -------- >>> json_clean(4) 4 >>> json_clean(range(10)) [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] >>> sorted(json_clean(dict(x=1, y=2)).items()) [('x', 1), ('y', 2)] >>> sorted(json_clean(dict(x=1, y=2, z=[1,2,3])).items()) [('x', 1), ('y', 2), ('z', [1, 2, 3])] >>> json_clean(True) True """ # types that are 'atomic' and ok in json as-is. bool doesn't need to be # listed explicitly because bools pass as int instances atomic_ok = (unicode, int, types.NoneType) # containers that we need to convert into lists container_to_list = (tuple, set, types.GeneratorType) if isinstance(obj, float): # cast out-of-range floats to their reprs if math.isnan(obj) or math.isinf(obj): return repr(obj) return obj if isinstance(obj, atomic_ok): return obj if isinstance(obj, bytes): return obj.decode(DEFAULT_ENCODING, 'replace') if isinstance(obj, container_to_list) or ( hasattr(obj, '__iter__') and hasattr(obj, next_attr_name)): obj = list(obj) if isinstance(obj, list): return [json_clean(x) for x in obj] if isinstance(obj, dict): # First, validate that the dict won't lose data in conversion due to # key collisions after stringification. This can happen with keys like # True and 'true' or 1 and '1', which collide in JSON. nkeys = len(obj) nkeys_collapsed = len(set(map(str, obj))) if nkeys != nkeys_collapsed: raise ValueError('dict can not be safely converted to JSON: ' 'key collision would lead to dropped values') # If all OK, proceed by making the new dict that will be json-safe out = {} for k,v in obj.iteritems(): out[str(k)] = json_clean(v) return out # If we get here, we don't know how to handle the object, so we just get # its repr and return that. This will catch lambdas, open sockets, class # objects, and any other complicated contraption that json can't encode return repr(obj)
Verify that self. install_dir is. pth - capable dir if needed
def check_site_dir(self): """Verify that self.install_dir is .pth-capable dir, if needed""" instdir = normalize_path(self.install_dir) pth_file = os.path.join(instdir, 'easy-install.pth') # Is it a configured, PYTHONPATH, implicit, or explicit site dir? is_site_dir = instdir in self.all_site_dirs if not is_site_dir and not self.multi_version: # No? Then directly test whether it does .pth file processing is_site_dir = self.check_pth_processing() else: # make sure we can write to target dir testfile = self.pseudo_tempname() + '.write-test' test_exists = os.path.exists(testfile) try: if test_exists: os.unlink(testfile) open(testfile, 'w').close() os.unlink(testfile) except (OSError, IOError): self.cant_write_to_target() if not is_site_dir and not self.multi_version: # Can't install non-multi to non-site dir raise DistutilsError(self.no_default_version_msg()) if is_site_dir: if self.pth_file is None: self.pth_file = PthDistributions(pth_file, self.all_site_dirs) else: self.pth_file = None PYTHONPATH = os.environ.get('PYTHONPATH', '').split(os.pathsep) if instdir not in map(normalize_path, [_f for _f in PYTHONPATH if _f]): # only PYTHONPATH dirs need a site.py, so pretend it's there self.sitepy_installed = True elif self.multi_version and not os.path.exists(pth_file): self.sitepy_installed = True # don't need site.py in this case self.pth_file = None # and don't create a .pth file self.install_dir = instdir
Write an executable file to the scripts directory
def write_script(self, script_name, contents, mode="t", *ignored): """Write an executable file to the scripts directory""" from setuptools.command.easy_install import chmod, current_umask log.info("Installing %s script to %s", script_name, self.install_dir) target = os.path.join(self.install_dir, script_name) self.outfiles.append(target) mask = current_umask() if not self.dry_run: ensure_directory(target) f = open(target,"w"+mode) f.write(contents) f.close() chmod(target, 0777-mask)
simple function that takes args prints a short message sleeps for a time and returns the same args
def sleep_here(count, t): """simple function that takes args, prints a short message, sleeps for a time, and returns the same args""" import time,sys print("hi from engine %i" % id) sys.stdout.flush() time.sleep(t) return count,t
Save the args and kwargs to get/ post/ put/ delete for future use.
def _save_method_args(self, *args, **kwargs): """Save the args and kwargs to get/post/put/delete for future use. These arguments are not saved in the request or handler objects, but are often needed by methods such as get_stream(). """ self._method_args = args self._method_kwargs = kwargs
Set up any environment changes requested ( e. g. Python path and Django settings ) then run this command.
def run_from_argv(self, argv): """ Set up any environment changes requested (e.g., Python path and Django settings), then run this command. """ parser = self.create_parser(argv[0], argv[1]) self.arguments = parser.parse_args(argv[2:]) handle_default_options(self.arguments) options = vars(self.arguments) self.execute(**options)
Create and return the ArgumentParser which will be used to parse the arguments to this command.
def create_parser(self, prog_name, subcommand): """ Create and return the ``ArgumentParser`` which will be used to parse the arguments to this command. """ parser = ArgumentParser( description=self.description, epilog=self.epilog, add_help=self.add_help, prog=self.prog, usage=self.get_usage(subcommand), ) parser.add_argument('--version', action='version', version=self.get_version()) self.add_arguments(parser) return parser