Search is not available for this dataset
text stringlengths 75 104k |
|---|
def open(self, file, flags, mode=0777):
"""Called for low-level os.open()"""
if flags & WRITE_FLAGS and not self._ok(file):
self._violation("os.open", file, flags, mode)
return _os.open(file,flags,mode) |
def unquote_ends(istr):
"""Remove a single pair of quotes from the endpoints of a string."""
if not istr:
return istr
if (istr[0]=="'" and istr[-1]=="'") or \
(istr[0]=='"' and istr[-1]=='"'):
return istr[1:-1]
else:
return istr |
def qw(words,flat=0,sep=None,maxsplit=-1):
"""Similar to Perl's qw() operator, but with some more options.
qw(words,flat=0,sep=' ',maxsplit=-1) -> words.split(sep,maxsplit)
words can also be a list itself, and with flat=1, the output will be
recursively flattened.
Examples:
>>> qw('1 2')
['1', '2']
>>> qw(['a b','1 2',['m n','p q']])
[['a', 'b'], ['1', '2'], [['m', 'n'], ['p', 'q']]]
>>> qw(['a b','1 2',['m n','p q']],flat=1)
['a', 'b', '1', '2', 'm', 'n', 'p', 'q']
"""
if isinstance(words, basestring):
return [word.strip() for word in words.split(sep,maxsplit)
if word and not word.isspace() ]
if flat:
return flatten(map(qw,words,[1]*len(words)))
return map(qw,words) |
def grep(pat,list,case=1):
"""Simple minded grep-like function.
grep(pat,list) returns occurrences of pat in list, None on failure.
It only does simple string matching, with no support for regexps. Use the
option case=0 for case-insensitive matching."""
# This is pretty crude. At least it should implement copying only references
# to the original data in case it's big. Now it copies the data for output.
out=[]
if case:
for term in list:
if term.find(pat)>-1: out.append(term)
else:
lpat=pat.lower()
for term in list:
if term.lower().find(lpat)>-1: out.append(term)
if len(out): return out
else: return None |
def dgrep(pat,*opts):
"""Return grep() on dir()+dir(__builtins__).
A very common use of grep() when working interactively."""
return grep(pat,dir(__main__)+dir(__main__.__builtins__),*opts) |
def indent(instr,nspaces=4, ntabs=0, flatten=False):
"""Indent a string a given number of spaces or tabstops.
indent(str,nspaces=4,ntabs=0) -> indent str by ntabs+nspaces.
Parameters
----------
instr : basestring
The string to be indented.
nspaces : int (default: 4)
The number of spaces to be indented.
ntabs : int (default: 0)
The number of tabs to be indented.
flatten : bool (default: False)
Whether to scrub existing indentation. If True, all lines will be
aligned to the same indentation. If False, existing indentation will
be strictly increased.
Returns
-------
str|unicode : string indented by ntabs and nspaces.
"""
if instr is None:
return
ind = '\t'*ntabs+' '*nspaces
if flatten:
pat = re.compile(r'^\s*', re.MULTILINE)
else:
pat = re.compile(r'^', re.MULTILINE)
outstr = re.sub(pat, ind, instr)
if outstr.endswith(os.linesep+ind):
return outstr[:-len(ind)]
else:
return outstr |
def native_line_ends(filename,backup=1):
"""Convert (in-place) a file to line-ends native to the current OS.
If the optional backup argument is given as false, no backup of the
original file is left. """
backup_suffixes = {'posix':'~','dos':'.bak','nt':'.bak','mac':'.bak'}
bak_filename = filename + backup_suffixes[os.name]
original = open(filename).read()
shutil.copy2(filename,bak_filename)
try:
new = open(filename,'wb')
new.write(os.linesep.join(original.splitlines()))
new.write(os.linesep) # ALWAYS put an eol at the end of the file
new.close()
except:
os.rename(bak_filename,filename)
if not backup:
try:
os.remove(bak_filename)
except:
pass |
def marquee(txt='',width=78,mark='*'):
"""Return the input string centered in a 'marquee'.
:Examples:
In [16]: marquee('A test',40)
Out[16]: '**************** A test ****************'
In [17]: marquee('A test',40,'-')
Out[17]: '---------------- A test ----------------'
In [18]: marquee('A test',40,' ')
Out[18]: ' A test '
"""
if not txt:
return (mark*width)[:width]
nmark = (width-len(txt)-2)//len(mark)//2
if nmark < 0: nmark =0
marks = mark*nmark
return '%s %s %s' % (marks,txt,marks) |
def format_screen(strng):
"""Format a string for screen printing.
This removes some latex-type format codes."""
# Paragraph continue
par_re = re.compile(r'\\$',re.MULTILINE)
strng = par_re.sub('',strng)
return strng |
def dedent(text):
"""Equivalent of textwrap.dedent that ignores unindented first line.
This means it will still dedent strings like:
'''foo
is a bar
'''
For use in wrap_paragraphs.
"""
if text.startswith('\n'):
# text starts with blank line, don't ignore the first line
return textwrap.dedent(text)
# split first line
splits = text.split('\n',1)
if len(splits) == 1:
# only one line
return textwrap.dedent(text)
first, rest = splits
# dedent everything but the first line
rest = textwrap.dedent(rest)
return '\n'.join([first, rest]) |
def wrap_paragraphs(text, ncols=80):
"""Wrap multiple paragraphs to fit a specified width.
This is equivalent to textwrap.wrap, but with support for multiple
paragraphs, as separated by empty lines.
Returns
-------
list of complete paragraphs, wrapped to fill `ncols` columns.
"""
paragraph_re = re.compile(r'\n(\s*\n)+', re.MULTILINE)
text = dedent(text).strip()
paragraphs = paragraph_re.split(text)[::2] # every other entry is space
out_ps = []
indent_re = re.compile(r'\n\s+', re.MULTILINE)
for p in paragraphs:
# presume indentation that survives dedent is meaningful formatting,
# so don't fill unless text is flush.
if indent_re.search(p) is None:
# wrap paragraph
p = textwrap.fill(p, ncols)
out_ps.append(p)
return out_ps |
def long_substr(data):
"""Return the longest common substring in a list of strings.
Credit: http://stackoverflow.com/questions/2892931/longest-common-substring-from-more-than-two-strings-python
"""
substr = ''
if len(data) > 1 and len(data[0]) > 0:
for i in range(len(data[0])):
for j in range(len(data[0])-i+1):
if j > len(substr) and all(data[0][i:i+j] in x for x in data):
substr = data[0][i:i+j]
elif len(data) == 1:
substr = data[0]
return substr |
def strip_email_quotes(text):
"""Strip leading email quotation characters ('>').
Removes any combination of leading '>' interspersed with whitespace that
appears *identically* in all lines of the input text.
Parameters
----------
text : str
Examples
--------
Simple uses::
In [2]: strip_email_quotes('> > text')
Out[2]: 'text'
In [3]: strip_email_quotes('> > text\\n> > more')
Out[3]: 'text\\nmore'
Note how only the common prefix that appears in all lines is stripped::
In [4]: strip_email_quotes('> > text\\n> > more\\n> more...')
Out[4]: '> text\\n> more\\nmore...'
So if any line has no quote marks ('>') , then none are stripped from any
of them ::
In [5]: strip_email_quotes('> > text\\n> > more\\nlast different')
Out[5]: '> > text\\n> > more\\nlast different'
"""
lines = text.splitlines()
matches = set()
for line in lines:
prefix = re.match(r'^(\s*>[ >]*)', line)
if prefix:
matches.add(prefix.group(1))
else:
break
else:
prefix = long_substr(list(matches))
if prefix:
strip = len(prefix)
text = '\n'.join([ ln[strip:] for ln in lines])
return text |
def _find_optimal(rlist , separator_size=2 , displaywidth=80):
"""Calculate optimal info to columnize a list of string"""
for nrow in range(1, len(rlist)+1) :
chk = map(max,_chunks(rlist, nrow))
sumlength = sum(chk)
ncols = len(chk)
if sumlength+separator_size*(ncols-1) <= displaywidth :
break;
return {'columns_numbers' : ncols,
'optimal_separator_width':(displaywidth - sumlength)/(ncols-1) if (ncols -1) else 0,
'rows_numbers' : nrow,
'columns_width' : chk
} |
def _get_or_default(mylist, i, default=None):
"""return list item number, or default if don't exist"""
if i >= len(mylist):
return default
else :
return mylist[i] |
def compute_item_matrix(items, empty=None, *args, **kwargs) :
"""Returns a nested list, and info to columnize items
Parameters :
------------
items :
list of strings to columize
empty : (default None)
default value to fill list if needed
separator_size : int (default=2)
How much caracters will be used as a separation between each columns.
displaywidth : int (default=80)
The width of the area onto wich the columns should enter
Returns :
---------
Returns a tuple of (strings_matrix, dict_info)
strings_matrix :
nested list of string, the outer most list contains as many list as
rows, the innermost lists have each as many element as colums. If the
total number of elements in `items` does not equal the product of
rows*columns, the last element of some lists are filled with `None`.
dict_info :
some info to make columnize easier:
columns_numbers : number of columns
rows_numbers : number of rows
columns_width : list of with of each columns
optimal_separator_width : best separator width between columns
Exemple :
---------
In [1]: l = ['aaa','b','cc','d','eeeee','f','g','h','i','j','k','l']
...: compute_item_matrix(l,displaywidth=12)
Out[1]:
([['aaa', 'f', 'k'],
['b', 'g', 'l'],
['cc', 'h', None],
['d', 'i', None],
['eeeee', 'j', None]],
{'columns_numbers': 3,
'columns_width': [5, 1, 1],
'optimal_separator_width': 2,
'rows_numbers': 5})
"""
info = _find_optimal(map(len, items), *args, **kwargs)
nrow, ncol = info['rows_numbers'], info['columns_numbers']
return ([[ _get_or_default(items, c*nrow+i, default=empty) for c in range(ncol) ] for i in range(nrow) ], info) |
def columnize(items, separator=' ', displaywidth=80):
""" Transform a list of strings into a single string with columns.
Parameters
----------
items : sequence of strings
The strings to process.
separator : str, optional [default is two spaces]
The string that separates columns.
displaywidth : int, optional [default is 80]
Width of the display in number of characters.
Returns
-------
The formatted string.
"""
if not items :
return '\n'
matrix, info = compute_item_matrix(items, separator_size=len(separator), displaywidth=displaywidth)
fmatrix = [filter(None, x) for x in matrix]
sjoin = lambda x : separator.join([ y.ljust(w, ' ') for y, w in zip(x, info['columns_width'])])
return '\n'.join(map(sjoin, fmatrix))+'\n' |
def grep(self, pattern, prune = False, field = None):
""" Return all strings matching 'pattern' (a regex or callable)
This is case-insensitive. If prune is true, return all items
NOT matching the pattern.
If field is specified, the match must occur in the specified
whitespace-separated field.
Examples::
a.grep( lambda x: x.startswith('C') )
a.grep('Cha.*log', prune=1)
a.grep('chm', field=-1)
"""
def match_target(s):
if field is None:
return s
parts = s.split()
try:
tgt = parts[field]
return tgt
except IndexError:
return ""
if isinstance(pattern, basestring):
pred = lambda x : re.search(pattern, x, re.IGNORECASE)
else:
pred = pattern
if not prune:
return SList([el for el in self if pred(match_target(el))])
else:
return SList([el for el in self if not pred(match_target(el))]) |
def fields(self, *fields):
""" Collect whitespace-separated fields from string list
Allows quick awk-like usage of string lists.
Example data (in var a, created by 'a = !ls -l')::
-rwxrwxrwx 1 ville None 18 Dec 14 2006 ChangeLog
drwxrwxrwx+ 6 ville None 0 Oct 24 18:05 IPython
a.fields(0) is ['-rwxrwxrwx', 'drwxrwxrwx+']
a.fields(1,0) is ['1 -rwxrwxrwx', '6 drwxrwxrwx+']
(note the joining by space).
a.fields(-1) is ['ChangeLog', 'IPython']
IndexErrors are ignored.
Without args, fields() just split()'s the strings.
"""
if len(fields) == 0:
return [el.split() for el in self]
res = SList()
for el in [f.split() for f in self]:
lineparts = []
for fd in fields:
try:
lineparts.append(el[fd])
except IndexError:
pass
if lineparts:
res.append(" ".join(lineparts))
return res |
def sort(self,field= None, nums = False):
""" sort by specified fields (see fields())
Example::
a.sort(1, nums = True)
Sorts a by second field, in numerical order (so that 21 > 3)
"""
#decorate, sort, undecorate
if field is not None:
dsu = [[SList([line]).fields(field), line] for line in self]
else:
dsu = [[line, line] for line in self]
if nums:
for i in range(len(dsu)):
numstr = "".join([ch for ch in dsu[i][0] if ch.isdigit()])
try:
n = int(numstr)
except ValueError:
n = 0;
dsu[i][0] = n
dsu.sort()
return SList([t[1] for t in dsu]) |
def read_py_file(filename, skip_encoding_cookie=True):
"""Read a Python file, using the encoding declared inside the file.
Parameters
----------
filename : str
The path to the file to read.
skip_encoding_cookie : bool
If True (the default), and the encoding declaration is found in the first
two lines, that line will be excluded from the output - compiling a
unicode string with an encoding declaration is a SyntaxError in Python 2.
Returns
-------
A unicode string containing the contents of the file.
"""
with open(filename) as f: # the open function defined in this module.
if skip_encoding_cookie:
return "".join(strip_encoding_cookie(f))
else:
return f.read() |
def read_py_url(url, errors='replace', skip_encoding_cookie=True):
"""Read a Python file from a URL, using the encoding declared inside the file.
Parameters
----------
url : str
The URL from which to fetch the file.
errors : str
How to handle decoding errors in the file. Options are the same as for
bytes.decode(), but here 'replace' is the default.
skip_encoding_cookie : bool
If True (the default), and the encoding declaration is found in the first
two lines, that line will be excluded from the output - compiling a
unicode string with an encoding declaration is a SyntaxError in Python 2.
Returns
-------
A unicode string containing the contents of the file.
"""
response = urllib.urlopen(url)
buffer = io.BytesIO(response.read())
encoding, lines = detect_encoding(buffer.readline)
buffer.seek(0)
text = TextIOWrapper(buffer, encoding, errors=errors, line_buffering=True)
text.mode = 'r'
if skip_encoding_cookie:
return "".join(strip_encoding_cookie(text))
else:
return text.read() |
def build_kernel_argv(self, argv=None):
"""build argv to be passed to kernel subprocess"""
if argv is None:
argv = sys.argv[1:]
self.kernel_argv = swallow_argv(argv, self.frontend_aliases, self.frontend_flags)
# kernel should inherit default config file from frontend
self.kernel_argv.append("--KernelApp.parent_appname='%s'"%self.name) |
def init_connection_file(self):
"""find the connection file, and load the info if found.
The current working directory and the current profile's security
directory will be searched for the file if it is not given by
absolute path.
When attempting to connect to an existing kernel and the `--existing`
argument does not match an existing file, it will be interpreted as a
fileglob, and the matching file in the current profile's security dir
with the latest access time will be used.
After this method is called, self.connection_file contains the *full path*
to the connection file, never just its name.
"""
if self.existing:
try:
cf = find_connection_file(self.existing)
except Exception:
self.log.critical("Could not find existing kernel connection file %s", self.existing)
self.exit(1)
self.log.info("Connecting to existing kernel: %s" % cf)
self.connection_file = cf
else:
# not existing, check if we are going to write the file
# and ensure that self.connection_file is a full path, not just the shortname
try:
cf = find_connection_file(self.connection_file)
except Exception:
# file might not exist
if self.connection_file == os.path.basename(self.connection_file):
# just shortname, put it in security dir
cf = os.path.join(self.profile_dir.security_dir, self.connection_file)
else:
cf = self.connection_file
self.connection_file = cf
# should load_connection_file only be used for existing?
# as it is now, this allows reusing ports if an existing
# file is requested
try:
self.load_connection_file()
except Exception:
self.log.error("Failed to load connection file: %r", self.connection_file, exc_info=True)
self.exit(1) |
def init_ssh(self):
"""set up ssh tunnels, if needed."""
if not self.sshserver and not self.sshkey:
return
if self.sshkey and not self.sshserver:
# specifying just the key implies that we are connecting directly
self.sshserver = self.ip
self.ip = LOCALHOST
# build connection dict for tunnels:
info = dict(ip=self.ip,
shell_port=self.shell_port,
iopub_port=self.iopub_port,
stdin_port=self.stdin_port,
hb_port=self.hb_port
)
self.log.info("Forwarding connections to %s via %s"%(self.ip, self.sshserver))
# tunnels return a new set of ports, which will be on localhost:
self.ip = LOCALHOST
try:
newports = tunnel_to_kernel(info, self.sshserver, self.sshkey)
except:
# even catch KeyboardInterrupt
self.log.error("Could not setup tunnels", exc_info=True)
self.exit(1)
self.shell_port, self.iopub_port, self.stdin_port, self.hb_port = newports
cf = self.connection_file
base,ext = os.path.splitext(cf)
base = os.path.basename(base)
self.connection_file = os.path.basename(base)+'-ssh'+ext
self.log.critical("To connect another client via this tunnel, use:")
self.log.critical("--existing %s" % self.connection_file) |
def initialize(self, argv=None):
"""
Classes which mix this class in should call:
IPythonConsoleApp.initialize(self,argv)
"""
self.init_connection_file()
default_secure(self.config)
self.init_ssh()
self.init_kernel_manager() |
def prepare_message(self, data=None):
"""
Return message as dict
:return dict
"""
message = {
'protocol': self.protocol,
'node': self._node,
'chip_id': self._chip_id,
'event': '',
'parameters': {},
'response': '',
'targets': [
'ALL'
]
}
if type(data) is dict:
for k, v in data.items():
if k in message:
message[k] = v
return message |
def decode_message(self, message):
"""
Decode json string to dict. Validate against node name(targets) and protocol version
:return dict | None
"""
try:
message = json.loads(message)
if not self._validate_message(message):
message = None
except ValueError:
message = None
return message |
def _validate_message(self, message):
""":return boolean"""
if 'protocol' not in message or 'targets' not in message or \
type(message['targets']) is not list:
return False
if message['protocol'] != self.protocol:
return False
if self.node not in message['targets'] and 'ALL' not in message['targets']:
return False
return True |
def pretty(obj, verbose=False, max_width=79, newline='\n'):
"""
Pretty print the object's representation.
"""
stream = StringIO()
printer = RepresentationPrinter(stream, verbose, max_width, newline)
printer.pretty(obj)
printer.flush()
return stream.getvalue() |
def pprint(obj, verbose=False, max_width=79, newline='\n'):
"""
Like `pretty` but print to stdout.
"""
printer = RepresentationPrinter(sys.stdout, verbose, max_width, newline)
printer.pretty(obj)
printer.flush()
sys.stdout.write(newline)
sys.stdout.flush() |
def _get_mro(obj_class):
""" Get a reasonable method resolution order of a class and its superclasses
for both old-style and new-style classes.
"""
if not hasattr(obj_class, '__mro__'):
# Old-style class. Mix in object to make a fake new-style class.
try:
obj_class = type(obj_class.__name__, (obj_class, object), {})
except TypeError:
# Old-style extension type that does not descend from object.
# FIXME: try to construct a more thorough MRO.
mro = [obj_class]
else:
mro = obj_class.__mro__[1:-1]
else:
mro = obj_class.__mro__
return mro |
def _default_pprint(obj, p, cycle):
"""
The default print function. Used if an object does not provide one and
it's none of the builtin objects.
"""
klass = getattr(obj, '__class__', None) or type(obj)
if getattr(klass, '__repr__', None) not in _baseclass_reprs:
# A user-provided repr.
p.text(repr(obj))
return
p.begin_group(1, '<')
p.pretty(klass)
p.text(' at 0x%x' % id(obj))
if cycle:
p.text(' ...')
elif p.verbose:
first = True
for key in dir(obj):
if not key.startswith('_'):
try:
value = getattr(obj, key)
except AttributeError:
continue
if isinstance(value, types.MethodType):
continue
if not first:
p.text(',')
p.breakable()
p.text(key)
p.text('=')
step = len(key) + 1
p.indentation += step
p.pretty(value)
p.indentation -= step
first = False
p.end_group(1, '>') |
def _seq_pprinter_factory(start, end, basetype):
"""
Factory that returns a pprint function useful for sequences. Used by
the default pprint for tuples, dicts, lists, sets and frozensets.
"""
def inner(obj, p, cycle):
typ = type(obj)
if basetype is not None and typ is not basetype and typ.__repr__ != basetype.__repr__:
# If the subclass provides its own repr, use it instead.
return p.text(typ.__repr__(obj))
if cycle:
return p.text(start + '...' + end)
step = len(start)
p.begin_group(step, start)
for idx, x in enumerate(obj):
if idx:
p.text(',')
p.breakable()
p.pretty(x)
if len(obj) == 1 and type(obj) is tuple:
# Special case for 1-item tuples.
p.text(',')
p.end_group(step, end)
return inner |
def _dict_pprinter_factory(start, end, basetype=None):
"""
Factory that returns a pprint function used by the default pprint of
dicts and dict proxies.
"""
def inner(obj, p, cycle):
typ = type(obj)
if basetype is not None and typ is not basetype and typ.__repr__ != basetype.__repr__:
# If the subclass provides its own repr, use it instead.
return p.text(typ.__repr__(obj))
if cycle:
return p.text('{...}')
p.begin_group(1, start)
keys = obj.keys()
try:
keys.sort()
except Exception, e:
# Sometimes the keys don't sort.
pass
for idx, key in enumerate(keys):
if idx:
p.text(',')
p.breakable()
p.pretty(key)
p.text(': ')
p.pretty(obj[key])
p.end_group(1, end)
return inner |
def _super_pprint(obj, p, cycle):
"""The pprint for the super type."""
p.begin_group(8, '<super: ')
p.pretty(obj.__self_class__)
p.text(',')
p.breakable()
p.pretty(obj.__self__)
p.end_group(8, '>') |
def _re_pattern_pprint(obj, p, cycle):
"""The pprint function for regular expression patterns."""
p.text('re.compile(')
pattern = repr(obj.pattern)
if pattern[:1] in 'uU':
pattern = pattern[1:]
prefix = 'ur'
else:
prefix = 'r'
pattern = prefix + pattern.replace('\\\\', '\\')
p.text(pattern)
if obj.flags:
p.text(',')
p.breakable()
done_one = False
for flag in ('TEMPLATE', 'IGNORECASE', 'LOCALE', 'MULTILINE', 'DOTALL',
'UNICODE', 'VERBOSE', 'DEBUG'):
if obj.flags & getattr(re, flag):
if done_one:
p.text('|')
p.text('re.' + flag)
done_one = True
p.text(')') |
def _type_pprint(obj, p, cycle):
"""The pprint for classes and types."""
if obj.__module__ in ('__builtin__', 'exceptions'):
name = obj.__name__
else:
name = obj.__module__ + '.' + obj.__name__
p.text(name) |
def _function_pprint(obj, p, cycle):
"""Base pprint for all functions and builtin functions."""
if obj.__module__ in ('__builtin__', 'exceptions') or not obj.__module__:
name = obj.__name__
else:
name = obj.__module__ + '.' + obj.__name__
p.text('<function %s>' % name) |
def _exception_pprint(obj, p, cycle):
"""Base pprint for all exceptions."""
if obj.__class__.__module__ in ('exceptions', 'builtins'):
name = obj.__class__.__name__
else:
name = '%s.%s' % (
obj.__class__.__module__,
obj.__class__.__name__
)
step = len(name) + 1
p.begin_group(step, name + '(')
for idx, arg in enumerate(getattr(obj, 'args', ())):
if idx:
p.text(',')
p.breakable()
p.pretty(arg)
p.end_group(step, ')') |
def for_type(typ, func):
"""
Add a pretty printer for a given type.
"""
oldfunc = _type_pprinters.get(typ, None)
if func is not None:
# To support easy restoration of old pprinters, we need to ignore Nones.
_type_pprinters[typ] = func
return oldfunc |
def for_type_by_name(type_module, type_name, func):
"""
Add a pretty printer for a type specified by the module and name of a type
rather than the type object itself.
"""
key = (type_module, type_name)
oldfunc = _deferred_type_pprinters.get(key, None)
if func is not None:
# To support easy restoration of old pprinters, we need to ignore Nones.
_deferred_type_pprinters[key] = func
return oldfunc |
def group(self, indent=0, open='', close=''):
"""like begin_group / end_group but for the with statement."""
self.begin_group(indent, open)
try:
yield
finally:
self.end_group(indent, close) |
def text(self, obj):
"""Add literal text to the output."""
width = len(obj)
if self.buffer:
text = self.buffer[-1]
if not isinstance(text, Text):
text = Text()
self.buffer.append(text)
text.add(obj, width)
self.buffer_width += width
self._break_outer_groups()
else:
self.output.write(obj)
self.output_width += width |
def breakable(self, sep=' '):
"""
Add a breakable separator to the output. This does not mean that it
will automatically break here. If no breaking on this position takes
place the `sep` is inserted which default to one space.
"""
width = len(sep)
group = self.group_stack[-1]
if group.want_break:
self.flush()
self.output.write(self.newline)
self.output.write(' ' * self.indentation)
self.output_width = self.indentation
self.buffer_width = 0
else:
self.buffer.append(Breakable(sep, width, self))
self.buffer_width += width
self._break_outer_groups() |
def begin_group(self, indent=0, open=''):
"""
Begin a group. If you want support for python < 2.5 which doesn't has
the with statement this is the preferred way:
p.begin_group(1, '{')
...
p.end_group(1, '}')
The python 2.5 expression would be this:
with p.group(1, '{', '}'):
...
The first parameter specifies the indentation for the next line (usually
the width of the opening text), the second the opening text. All
parameters are optional.
"""
if open:
self.text(open)
group = Group(self.group_stack[-1].depth + 1)
self.group_stack.append(group)
self.group_queue.enq(group)
self.indentation += indent |
def end_group(self, dedent=0, close=''):
"""End a group. See `begin_group` for more details."""
self.indentation -= dedent
group = self.group_stack.pop()
if not group.breakables:
self.group_queue.remove(group)
if close:
self.text(close) |
def flush(self):
"""Flush data that is left in the buffer."""
for data in self.buffer:
self.output_width += data.output(self.output, self.output_width)
self.buffer.clear()
self.buffer_width = 0 |
def pretty(self, obj):
"""Pretty print the given object."""
obj_id = id(obj)
cycle = obj_id in self.stack
self.stack.append(obj_id)
self.begin_group()
try:
obj_class = getattr(obj, '__class__', None) or type(obj)
# First try to find registered singleton printers for the type.
try:
printer = self.singleton_pprinters[obj_id]
except (TypeError, KeyError):
pass
else:
return printer(obj, self, cycle)
# Next walk the mro and check for either:
# 1) a registered printer
# 2) a _repr_pretty_ method
for cls in _get_mro(obj_class):
if cls in self.type_pprinters:
# printer registered in self.type_pprinters
return self.type_pprinters[cls](obj, self, cycle)
else:
# deferred printer
printer = self._in_deferred_types(cls)
if printer is not None:
return printer(obj, self, cycle)
else:
# Finally look for special method names.
# Some objects automatically create any requested
# attribute. Try to ignore most of them by checking for
# callability.
if '_repr_pretty_' in obj_class.__dict__:
meth = obj_class._repr_pretty_
if callable(meth):
return meth(obj, self, cycle)
return _default_pprint(obj, self, cycle)
finally:
self.end_group()
self.stack.pop() |
def _in_deferred_types(self, cls):
"""
Check if the given class is specified in the deferred type registry.
Returns the printer from the registry if it exists, and None if the
class is not in the registry. Successful matches will be moved to the
regular type registry for future use.
"""
mod = getattr(cls, '__module__', None)
name = getattr(cls, '__name__', None)
key = (mod, name)
printer = None
if key in self.deferred_pprinters:
# Move the printer over to the regular registry.
printer = self.deferred_pprinters.pop(key)
self.type_pprinters[cls] = printer
return printer |
def exception_colors():
"""Return a color table with fields for exception reporting.
The table is an instance of ColorSchemeTable with schemes added for
'Linux', 'LightBG' and 'NoColor' and fields for exception handling filled
in.
Examples:
>>> ec = exception_colors()
>>> ec.active_scheme_name
''
>>> print ec.active_colors
None
Now we activate a color scheme:
>>> ec.set_active_scheme('NoColor')
>>> ec.active_scheme_name
'NoColor'
>>> sorted(ec.active_colors.keys())
['Normal', 'caret', 'em', 'excName', 'filename', 'filenameEm', 'line',
'lineno', 'linenoEm', 'name', 'nameEm', 'normalEm', 'topline', 'vName',
'val', 'valEm']
"""
ex_colors = ColorSchemeTable()
# Populate it with color schemes
C = TermColors # shorthand and local lookup
ex_colors.add_scheme(ColorScheme(
'NoColor',
# The color to be used for the top line
topline = C.NoColor,
# The colors to be used in the traceback
filename = C.NoColor,
lineno = C.NoColor,
name = C.NoColor,
vName = C.NoColor,
val = C.NoColor,
em = C.NoColor,
# Emphasized colors for the last frame of the traceback
normalEm = C.NoColor,
filenameEm = C.NoColor,
linenoEm = C.NoColor,
nameEm = C.NoColor,
valEm = C.NoColor,
# Colors for printing the exception
excName = C.NoColor,
line = C.NoColor,
caret = C.NoColor,
Normal = C.NoColor
))
# make some schemes as instances so we can copy them for modification easily
ex_colors.add_scheme(ColorScheme(
'Linux',
# The color to be used for the top line
topline = C.LightRed,
# The colors to be used in the traceback
filename = C.Green,
lineno = C.Green,
name = C.Purple,
vName = C.Cyan,
val = C.Green,
em = C.LightCyan,
# Emphasized colors for the last frame of the traceback
normalEm = C.LightCyan,
filenameEm = C.LightGreen,
linenoEm = C.LightGreen,
nameEm = C.LightPurple,
valEm = C.LightBlue,
# Colors for printing the exception
excName = C.LightRed,
line = C.Yellow,
caret = C.White,
Normal = C.Normal
))
# For light backgrounds, swap dark/light colors
ex_colors.add_scheme(ColorScheme(
'LightBG',
# The color to be used for the top line
topline = C.Red,
# The colors to be used in the traceback
filename = C.LightGreen,
lineno = C.LightGreen,
name = C.LightPurple,
vName = C.Cyan,
val = C.LightGreen,
em = C.Cyan,
# Emphasized colors for the last frame of the traceback
normalEm = C.Cyan,
filenameEm = C.Green,
linenoEm = C.Green,
nameEm = C.Purple,
valEm = C.Blue,
# Colors for printing the exception
excName = C.Red,
#line = C.Brown, # brown often is displayed as yellow
line = C.Red,
caret = C.Normal,
Normal = C.Normal,
))
return ex_colors |
def patterns(prefix, *args):
"""As patterns() in django."""
pattern_list = []
for t in args:
if isinstance(t, (list, tuple)):
t = url(prefix=prefix, *t)
elif isinstance(t, RegexURLPattern):
t.add_prefix(prefix)
pattern_list.append(t)
return pattern_list |
def url(regex, view, kwargs=None, name=None, prefix=''):
"""As url() in Django."""
if isinstance(view, (list, tuple)):
# For include(...) processing.
urlconf_module, app_name, namespace = view
return URLResolver(regex, urlconf_module, kwargs, app_name=app_name, namespace=namespace)
else:
if isinstance(view, six.string_types):
if not view:
raise ImproperlyConfigured('Empty URL pattern view name not permitted (for pattern %r)' % regex)
if prefix:
view = prefix + '.' + view
view = get_callable(view)
return CBVRegexURLPattern(regex, view, kwargs, name) |
def _prepare_ods_columns(ods, trans_title_row):
"""
Prepare columns in new ods file, create new sheet for metadata,
set columns color and width. Set formatting style info in your
settings.py file in ~/.c3po/ folder.
"""
ods.content.getSheet(0).setSheetName('Translations')
ods.content.makeSheet('Meta options')
ods.content.getColumn(0).setWidth('5.0in')
ods.content.getCell(0, 0).stringValue('metadata')\
.setCellColor(settings.TITLE_ROW_BG_COLOR) \
.setBold(True).setFontColor(settings.TITLE_ROW_FONT_COLOR)
ods.content.getSheet(0)
ods.content.getColumn(0).setWidth('1.5in')
ods.content.getCell(0, 0) \
.setCellColor(settings.TITLE_ROW_BG_COLOR) \
.setBold(True).setFontColor(settings.TITLE_ROW_FONT_COLOR)
for i, title in enumerate(trans_title_row):
ods.content.getColumn(i).setWidth(settings.MSGSTR_COLUMN_WIDTH)
ods.content.getCell(i, 0).stringValue(title)\
.setCellColor(settings.TITLE_ROW_BG_COLOR) \
.setBold(True).setFontColor(settings.TITLE_ROW_FONT_COLOR)
ods.content.getColumn(0).setWidth(settings.NOTES_COLUMN_WIDTH) |
def _write_trans_into_ods(ods, languages, locale_root,
po_files_path, po_filename, start_row):
"""
Write translations from po files into ods one file.
Assumes a directory structure:
<locale_root>/<lang>/<po_files_path>/<filename>.
"""
ods.content.getSheet(0)
for i, lang in enumerate(languages[1:]):
lang_po_path = os.path.join(locale_root, lang,
po_files_path, po_filename)
if os.path.exists(lang_po_path):
po_file = polib.pofile(lang_po_path)
for j, entry in enumerate(po_file):
# start from 4th column, 1st row
row = j+start_row
ods.content.getCell(i+4, row).stringValue(
_escape_apostrophe(entry.msgstr))
if i % 2 == 1:
ods.content.getCell(i+4, row).setCellColor(
settings.ODD_COLUMN_BG_COLOR)
else:
ods.content.getCell(i+4, row).setCellColor(
settings.EVEN_COLUMN_BG_COLOR) |
def _write_row_into_ods(ods, sheet_no, row_no, row):
"""
Write row with translations to ods file into specified sheet and row_no.
"""
ods.content.getSheet(sheet_no)
for j, col in enumerate(row):
cell = ods.content.getCell(j, row_no+1)
cell.stringValue(_escape_apostrophe(col))
if j % 2 == 1:
cell.setCellColor(settings.EVEN_COLUMN_BG_COLOR)
else:
cell.setCellColor(settings.ODD_COLUMN_BG_COLOR) |
def po_to_ods(languages, locale_root, po_files_path, temp_file_path):
"""
Converts po file to csv GDocs spreadsheet readable format.
:param languages: list of language codes
:param locale_root: path to locale root folder containing directories
with languages
:param po_files_path: path from lang directory to po file
:param temp_file_path: path where temporary files will be saved
"""
title_row = ['file', 'comment', 'msgid']
title_row += map(lambda s: s + ':msgstr', languages)
ods = ODS()
_prepare_ods_columns(ods, title_row)
po_files = _get_all_po_filenames(locale_root, languages[0], po_files_path)
i = 1
for po_filename in po_files:
po_file_path = os.path.join(locale_root, languages[0],
po_files_path, po_filename)
start_row = i
po = polib.pofile(po_file_path)
for entry in po:
meta = dict(entry.__dict__)
meta.pop('msgid', None)
meta.pop('msgstr', None)
meta.pop('tcomment', None)
ods.content.getSheet(1)
ods.content.getCell(0, i).stringValue(
str(meta)).setCellColor(settings.EVEN_COLUMN_BG_COLOR)
ods.content.getSheet(0)
ods.content.getCell(0, i) \
.stringValue(po_filename) \
.setCellColor(settings.ODD_COLUMN_BG_COLOR)
ods.content.getCell(1, i) \
.stringValue(_escape_apostrophe(entry.tcomment)) \
.setCellColor(settings.ODD_COLUMN_BG_COLOR)
ods.content.getCell(2, i) \
.stringValue(_escape_apostrophe(entry.msgid)) \
.setCellColor(settings.EVEN_COLUMN_BG_COLOR)
ods.content.getCell(3, i) \
.stringValue(_escape_apostrophe(entry.msgstr))\
.setCellColor(settings.ODD_COLUMN_BG_COLOR)
i += 1
_write_trans_into_ods(ods, languages, locale_root,
po_files_path, po_filename, start_row)
ods.save(temp_file_path) |
def csv_to_ods(trans_csv, meta_csv, local_ods):
"""
Converts csv files to one ods file
:param trans_csv: path to csv file with translations
:param meta_csv: path to csv file with metadata
:param local_ods: path to new ods file
"""
trans_reader = UnicodeReader(trans_csv)
meta_reader = UnicodeReader(meta_csv)
ods = ODS()
trans_title = trans_reader.next()
meta_reader.next()
_prepare_ods_columns(ods, trans_title)
for i, (trans_row, meta_row) in enumerate(izip(trans_reader, meta_reader)):
_write_row_into_ods(ods, 0, i, trans_row)
_write_row_into_ods(ods, 1, i, meta_row)
trans_reader.close()
meta_reader.close()
ods.save(local_ods) |
def win32_clipboard_get():
""" Get the current clipboard's text on Windows.
Requires Mark Hammond's pywin32 extensions.
"""
try:
import win32clipboard
except ImportError:
raise TryNext("Getting text from the clipboard requires the pywin32 "
"extensions: http://sourceforge.net/projects/pywin32/")
win32clipboard.OpenClipboard()
text = win32clipboard.GetClipboardData(win32clipboard.CF_TEXT)
# FIXME: convert \r\n to \n?
win32clipboard.CloseClipboard()
return text |
def osx_clipboard_get():
""" Get the clipboard's text on OS X.
"""
p = subprocess.Popen(['pbpaste', '-Prefer', 'ascii'],
stdout=subprocess.PIPE)
text, stderr = p.communicate()
# Text comes in with old Mac \r line endings. Change them to \n.
text = text.replace('\r', '\n')
return text |
def tkinter_clipboard_get():
""" Get the clipboard's text using Tkinter.
This is the default on systems that are not Windows or OS X. It may
interfere with other UI toolkits and should be replaced with an
implementation that uses that toolkit.
"""
try:
import Tkinter
except ImportError:
raise TryNext("Getting text from the clipboard on this platform "
"requires Tkinter.")
root = Tkinter.Tk()
root.withdraw()
text = root.clipboard_get()
root.destroy()
return text |
def _get_build_prefix():
""" Returns a safe build_prefix """
path = os.path.join(
tempfile.gettempdir(),
'pip_build_%s' % __get_username().replace(' ', '_')
)
if WINDOWS:
""" on windows(tested on 7) temp dirs are isolated """
return path
try:
os.mkdir(path)
write_delete_marker_file(path)
except OSError:
file_uid = None
try:
# raises OSError for symlinks
# https://github.com/pypa/pip/pull/935#discussion_r5307003
file_uid = get_path_uid(path)
except OSError:
file_uid = None
if file_uid != os.geteuid():
msg = (
"The temporary folder for building (%s) is either not owned by"
" you, or is a symlink." % path
)
print(msg)
print(
"pip will not work until the temporary folder is either "
"deleted or is a real directory owned by your user account."
)
raise exceptions.InstallationError(msg)
return path |
def prepare_communication (self):
"""
Find the subdomain rank (tuple) for each processor and
determine the neighbor info.
"""
nsd_ = self.nsd
if nsd_<1:
print('Number of space dimensions is %d, nothing to do' %nsd_)
return
self.subd_rank = [-1,-1,-1]
self.subd_lo_ix = [-1,-1,-1]
self.subd_hi_ix = [-1,-1,-1]
self.lower_neighbors = [-1,-1,-1]
self.upper_neighbors = [-1,-1,-1]
num_procs = self.num_procs
my_id = self.my_id
num_subds = 1
for i in range(nsd_):
num_subds = num_subds*self.num_parts[i]
if my_id==0:
print("# subds=", num_subds)
# should check num_subds againt num_procs
offsets = [1, 0, 0]
# find the subdomain rank
self.subd_rank[0] = my_id%self.num_parts[0]
if nsd_>=2:
offsets[1] = self.num_parts[0]
self.subd_rank[1] = my_id/offsets[1]
if nsd_==3:
offsets[1] = self.num_parts[0]
offsets[2] = self.num_parts[0]*self.num_parts[1]
self.subd_rank[1] = (my_id%offsets[2])/self.num_parts[0]
self.subd_rank[2] = my_id/offsets[2]
print("my_id=%d, subd_rank: "%my_id, self.subd_rank)
if my_id==0:
print("offsets=", offsets)
# find the neighbor ids
for i in range(nsd_):
rank = self.subd_rank[i]
if rank>0:
self.lower_neighbors[i] = my_id-offsets[i]
if rank<self.num_parts[i]-1:
self.upper_neighbors[i] = my_id+offsets[i]
k = self.global_num_cells[i]/self.num_parts[i]
m = self.global_num_cells[i]%self.num_parts[i]
ix = rank*k+max(0,rank+m-self.num_parts[i])
self.subd_lo_ix[i] = ix
ix = ix+k
if rank>=(self.num_parts[i]-m):
ix = ix+1 # load balancing
if rank<self.num_parts[i]-1:
ix = ix+1 # one cell of overlap
self.subd_hi_ix[i] = ix
print("subd_rank:",self.subd_rank,\
"lower_neig:", self.lower_neighbors, \
"upper_neig:", self.upper_neighbors)
print("subd_rank:",self.subd_rank,"subd_lo_ix:", self.subd_lo_ix, \
"subd_hi_ix:", self.subd_hi_ix) |
def prepare_communication (self):
"""
Prepare the buffers to be used for later communications
"""
RectPartitioner.prepare_communication (self)
if self.lower_neighbors[0]>=0:
self.in_lower_buffers = [zeros(1, float)]
self.out_lower_buffers = [zeros(1, float)]
if self.upper_neighbors[0]>=0:
self.in_upper_buffers = [zeros(1, float)]
self.out_upper_buffers = [zeros(1, float)] |
def prepare_communication (self):
"""
Prepare the buffers to be used for later communications
"""
RectPartitioner.prepare_communication (self)
self.in_lower_buffers = [[], []]
self.out_lower_buffers = [[], []]
self.in_upper_buffers = [[], []]
self.out_upper_buffers = [[], []]
size1 = self.subd_hi_ix[1]-self.subd_lo_ix[1]+1
if self.lower_neighbors[0]>=0:
self.in_lower_buffers[0] = zeros(size1, float)
self.out_lower_buffers[0] = zeros(size1, float)
if self.upper_neighbors[0]>=0:
self.in_upper_buffers[0] = zeros(size1, float)
self.out_upper_buffers[0] = zeros(size1, float)
size0 = self.subd_hi_ix[0]-self.subd_lo_ix[0]+1
if self.lower_neighbors[1]>=0:
self.in_lower_buffers[1] = zeros(size0, float)
self.out_lower_buffers[1] = zeros(size0, float)
if self.upper_neighbors[1]>=0:
self.in_upper_buffers[1] = zeros(size0, float)
self.out_upper_buffers[1] = zeros(size0, float) |
def update_internal_boundary_x_y (self, solution_array):
"""update the inner boundary with the same send/recv pattern as the MPIPartitioner"""
nsd_ = self.nsd
dtype = solution_array.dtype
if nsd_!=len(self.in_lower_buffers) | nsd_!=len(self.out_lower_buffers):
print("Buffers for communicating with lower neighbors not ready")
return
if nsd_!=len(self.in_upper_buffers) | nsd_!=len(self.out_upper_buffers):
print("Buffers for communicating with upper neighbors not ready")
return
loc_nx = self.subd_hi_ix[0]-self.subd_lo_ix[0]
loc_ny = self.subd_hi_ix[1]-self.subd_lo_ix[1]
lower_x_neigh = self.lower_neighbors[0]
upper_x_neigh = self.upper_neighbors[0]
lower_y_neigh = self.lower_neighbors[1]
upper_y_neigh = self.upper_neighbors[1]
trackers = []
flags = dict(copy=False, track=False)
# communicate in the x-direction first
if lower_x_neigh>-1:
if self.slice_copy:
self.out_lower_buffers[0] = ascontiguousarray(solution_array[1,:])
else:
for i in xrange(0,loc_ny+1):
self.out_lower_buffers[0][i] = solution_array[1,i]
t = self.comm.west.send(self.out_lower_buffers[0], **flags)
trackers.append(t)
if upper_x_neigh>-1:
msg = self.comm.east.recv(copy=False)
self.in_upper_buffers[0] = frombuffer(msg, dtype=dtype)
if self.slice_copy:
solution_array[loc_nx,:] = self.in_upper_buffers[0]
self.out_upper_buffers[0] = ascontiguousarray(solution_array[loc_nx-1,:])
else:
for i in xrange(0,loc_ny+1):
solution_array[loc_nx,i] = self.in_upper_buffers[0][i]
self.out_upper_buffers[0][i] = solution_array[loc_nx-1,i]
t = self.comm.east.send(self.out_upper_buffers[0], **flags)
trackers.append(t)
if lower_x_neigh>-1:
msg = self.comm.west.recv(copy=False)
self.in_lower_buffers[0] = frombuffer(msg, dtype=dtype)
if self.slice_copy:
solution_array[0,:] = self.in_lower_buffers[0]
else:
for i in xrange(0,loc_ny+1):
solution_array[0,i] = self.in_lower_buffers[0][i]
# communicate in the y-direction afterwards
if lower_y_neigh>-1:
if self.slice_copy:
self.out_lower_buffers[1] = ascontiguousarray(solution_array[:,1])
else:
for i in xrange(0,loc_nx+1):
self.out_lower_buffers[1][i] = solution_array[i,1]
t = self.comm.south.send(self.out_lower_buffers[1], **flags)
trackers.append(t)
if upper_y_neigh>-1:
msg = self.comm.north.recv(copy=False)
self.in_upper_buffers[1] = frombuffer(msg, dtype=dtype)
if self.slice_copy:
solution_array[:,loc_ny] = self.in_upper_buffers[1]
self.out_upper_buffers[1] = ascontiguousarray(solution_array[:,loc_ny-1])
else:
for i in xrange(0,loc_nx+1):
solution_array[i,loc_ny] = self.in_upper_buffers[1][i]
self.out_upper_buffers[1][i] = solution_array[i,loc_ny-1]
t = self.comm.north.send(self.out_upper_buffers[1], **flags)
trackers.append(t)
if lower_y_neigh>-1:
msg = self.comm.south.recv(copy=False)
self.in_lower_buffers[1] = frombuffer(msg, dtype=dtype)
if self.slice_copy:
solution_array[:,0] = self.in_lower_buffers[1]
else:
for i in xrange(0,loc_nx+1):
solution_array[i,0] = self.in_lower_buffers[1][i]
# wait for sends to complete:
if flags['track']:
for t in trackers:
t.wait() |
def rekey(dikt):
"""Rekey a dict that has been forced to use str keys where there should be
ints by json."""
for k in dikt.iterkeys():
if isinstance(k, basestring):
ik=fk=None
try:
ik = int(k)
except ValueError:
try:
fk = float(k)
except ValueError:
continue
if ik is not None:
nk = ik
else:
nk = fk
if nk in dikt:
raise KeyError("already have key %r"%nk)
dikt[nk] = dikt.pop(k)
return dikt |
def extract_dates(obj):
"""extract ISO8601 dates from unpacked JSON"""
if isinstance(obj, dict):
obj = dict(obj) # don't clobber
for k,v in obj.iteritems():
obj[k] = extract_dates(v)
elif isinstance(obj, (list, tuple)):
obj = [ extract_dates(o) for o in obj ]
elif isinstance(obj, basestring):
if ISO8601_PAT.match(obj):
obj = datetime.strptime(obj, ISO8601)
return obj |
def squash_dates(obj):
"""squash datetime objects into ISO8601 strings"""
if isinstance(obj, dict):
obj = dict(obj) # don't clobber
for k,v in obj.iteritems():
obj[k] = squash_dates(v)
elif isinstance(obj, (list, tuple)):
obj = [ squash_dates(o) for o in obj ]
elif isinstance(obj, datetime):
obj = obj.strftime(ISO8601)
return obj |
def date_default(obj):
"""default function for packing datetime objects in JSON."""
if isinstance(obj, datetime):
return obj.strftime(ISO8601)
else:
raise TypeError("%r is not JSON serializable"%obj) |
def encode_images(format_dict):
"""b64-encodes images in a displaypub format dict
Perhaps this should be handled in json_clean itself?
Parameters
----------
format_dict : dict
A dictionary of display data keyed by mime-type
Returns
-------
format_dict : dict
A copy of the same dictionary,
but binary image data ('image/png' or 'image/jpeg')
is base64-encoded.
"""
encoded = format_dict.copy()
pngdata = format_dict.get('image/png')
if isinstance(pngdata, bytes) and pngdata[:8] == PNG:
encoded['image/png'] = encodestring(pngdata).decode('ascii')
jpegdata = format_dict.get('image/jpeg')
if isinstance(jpegdata, bytes) and jpegdata[:2] == JPEG:
encoded['image/jpeg'] = encodestring(jpegdata).decode('ascii')
return encoded |
def json_clean(obj):
"""Clean an object to ensure it's safe to encode in JSON.
Atomic, immutable objects are returned unmodified. Sets and tuples are
converted to lists, lists are copied and dicts are also copied.
Note: dicts whose keys could cause collisions upon encoding (such as a dict
with both the number 1 and the string '1' as keys) will cause a ValueError
to be raised.
Parameters
----------
obj : any python object
Returns
-------
out : object
A version of the input which will not cause an encoding error when
encoded as JSON. Note that this function does not *encode* its inputs,
it simply sanitizes it so that there will be no encoding errors later.
Examples
--------
>>> json_clean(4)
4
>>> json_clean(range(10))
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> sorted(json_clean(dict(x=1, y=2)).items())
[('x', 1), ('y', 2)]
>>> sorted(json_clean(dict(x=1, y=2, z=[1,2,3])).items())
[('x', 1), ('y', 2), ('z', [1, 2, 3])]
>>> json_clean(True)
True
"""
# types that are 'atomic' and ok in json as-is. bool doesn't need to be
# listed explicitly because bools pass as int instances
atomic_ok = (unicode, int, types.NoneType)
# containers that we need to convert into lists
container_to_list = (tuple, set, types.GeneratorType)
if isinstance(obj, float):
# cast out-of-range floats to their reprs
if math.isnan(obj) or math.isinf(obj):
return repr(obj)
return obj
if isinstance(obj, atomic_ok):
return obj
if isinstance(obj, bytes):
return obj.decode(DEFAULT_ENCODING, 'replace')
if isinstance(obj, container_to_list) or (
hasattr(obj, '__iter__') and hasattr(obj, next_attr_name)):
obj = list(obj)
if isinstance(obj, list):
return [json_clean(x) for x in obj]
if isinstance(obj, dict):
# First, validate that the dict won't lose data in conversion due to
# key collisions after stringification. This can happen with keys like
# True and 'true' or 1 and '1', which collide in JSON.
nkeys = len(obj)
nkeys_collapsed = len(set(map(str, obj)))
if nkeys != nkeys_collapsed:
raise ValueError('dict can not be safely converted to JSON: '
'key collision would lead to dropped values')
# If all OK, proceed by making the new dict that will be json-safe
out = {}
for k,v in obj.iteritems():
out[str(k)] = json_clean(v)
return out
# If we get here, we don't know how to handle the object, so we just get
# its repr and return that. This will catch lambdas, open sockets, class
# objects, and any other complicated contraption that json can't encode
return repr(obj) |
def check_site_dir(self):
"""Verify that self.install_dir is .pth-capable dir, if needed"""
instdir = normalize_path(self.install_dir)
pth_file = os.path.join(instdir, 'easy-install.pth')
# Is it a configured, PYTHONPATH, implicit, or explicit site dir?
is_site_dir = instdir in self.all_site_dirs
if not is_site_dir and not self.multi_version:
# No? Then directly test whether it does .pth file processing
is_site_dir = self.check_pth_processing()
else:
# make sure we can write to target dir
testfile = self.pseudo_tempname() + '.write-test'
test_exists = os.path.exists(testfile)
try:
if test_exists:
os.unlink(testfile)
open(testfile, 'w').close()
os.unlink(testfile)
except (OSError, IOError):
self.cant_write_to_target()
if not is_site_dir and not self.multi_version:
# Can't install non-multi to non-site dir
raise DistutilsError(self.no_default_version_msg())
if is_site_dir:
if self.pth_file is None:
self.pth_file = PthDistributions(pth_file, self.all_site_dirs)
else:
self.pth_file = None
PYTHONPATH = os.environ.get('PYTHONPATH', '').split(os.pathsep)
if instdir not in map(normalize_path, [_f for _f in PYTHONPATH if _f]):
# only PYTHONPATH dirs need a site.py, so pretend it's there
self.sitepy_installed = True
elif self.multi_version and not os.path.exists(pth_file):
self.sitepy_installed = True # don't need site.py in this case
self.pth_file = None # and don't create a .pth file
self.install_dir = instdir |
def write_script(self, script_name, contents, mode="t", *ignored):
"""Write an executable file to the scripts directory"""
from setuptools.command.easy_install import chmod, current_umask
log.info("Installing %s script to %s", script_name, self.install_dir)
target = os.path.join(self.install_dir, script_name)
self.outfiles.append(target)
mask = current_umask()
if not self.dry_run:
ensure_directory(target)
f = open(target,"w"+mode)
f.write(contents)
f.close()
chmod(target, 0777-mask) |
def sleep_here(count, t):
"""simple function that takes args, prints a short message, sleeps for a time, and returns the same args"""
import time,sys
print("hi from engine %i" % id)
sys.stdout.flush()
time.sleep(t)
return count,t |
def _save_method_args(self, *args, **kwargs):
"""Save the args and kwargs to get/post/put/delete for future use.
These arguments are not saved in the request or handler objects, but
are often needed by methods such as get_stream().
"""
self._method_args = args
self._method_kwargs = kwargs |
def run_from_argv(self, argv):
"""
Set up any environment changes requested (e.g., Python path
and Django settings), then run this command.
"""
parser = self.create_parser(argv[0], argv[1])
self.arguments = parser.parse_args(argv[2:])
handle_default_options(self.arguments)
options = vars(self.arguments)
self.execute(**options) |
def create_parser(self, prog_name, subcommand):
"""
Create and return the ``ArgumentParser`` which will be used to
parse the arguments to this command.
"""
parser = ArgumentParser(
description=self.description,
epilog=self.epilog,
add_help=self.add_help,
prog=self.prog,
usage=self.get_usage(subcommand),
)
parser.add_argument('--version', action='version', version=self.get_version())
self.add_arguments(parser)
return parser |
def connect(self, south_peer=None, west_peer=None):
"""connect to peers. `peers` will be a 3-tuples, of the form:
(location, north_addr, east_addr)
as produced by
"""
if south_peer is not None:
location, url, _ = south_peer
self.south.connect(disambiguate_url(url, location))
if west_peer is not None:
location, _, url = west_peer
self.west.connect(disambiguate_url(url, location)) |
def _convert_pyx_sources_to_c(self):
"convert .pyx extensions to .c"
def pyx_to_c(source):
if source.endswith('.pyx'):
source = source[:-4] + '.c'
return source
self.sources = map(pyx_to_c, self.sources) |
def main(connection_file):
"""watch iopub channel, and print messages"""
ctx = zmq.Context.instance()
with open(connection_file) as f:
cfg = json.loads(f.read())
location = cfg['location']
reg_url = cfg['url']
session = Session(key=str_to_bytes(cfg['exec_key']))
query = ctx.socket(zmq.DEALER)
query.connect(disambiguate_url(cfg['url'], location))
session.send(query, "connection_request")
idents,msg = session.recv(query, mode=0)
c = msg['content']
iopub_url = disambiguate_url(c['iopub'], location)
sub = ctx.socket(zmq.SUB)
# This will subscribe to all messages:
sub.setsockopt(zmq.SUBSCRIBE, b'')
# replace with b'' with b'engine.1.stdout' to subscribe only to engine 1's stdout
# 0MQ subscriptions are simple 'foo*' matches, so 'engine.1.' subscribes
# to everything from engine 1, but there is no way to subscribe to
# just stdout from everyone.
# multiple calls to subscribe will add subscriptions, e.g. to subscribe to
# engine 1's stderr and engine 2's stdout:
# sub.setsockopt(zmq.SUBSCRIBE, b'engine.1.stderr')
# sub.setsockopt(zmq.SUBSCRIBE, b'engine.2.stdout')
sub.connect(iopub_url)
while True:
try:
idents,msg = session.recv(sub, mode=0)
except KeyboardInterrupt:
return
# ident always length 1 here
topic = idents[0]
if msg['msg_type'] == 'stream':
# stdout/stderr
# stream names are in msg['content']['name'], if you want to handle
# them differently
print("%s: %s" % (topic, msg['content']['data']))
elif msg['msg_type'] == 'pyerr':
# Python traceback
c = msg['content']
print(topic + ':')
for line in c['traceback']:
# indent lines
print(' ' + line) |
def decorator(caller, func=None):
"""
decorator(caller) converts a caller function into a decorator;
decorator(caller, func) decorates a function using a caller.
"""
if func is not None: # returns a decorated function
evaldict = func.func_globals.copy()
evaldict['_call_'] = caller
evaldict['_func_'] = func
return FunctionMaker.create(
func, "return _call_(_func_, %(shortsignature)s)",
evaldict, undecorated=func, __wrapped__=func)
else: # returns a decorator
if isinstance(caller, partial):
return partial(decorator, caller)
# otherwise assume caller is a function
first = inspect.getargspec(caller)[0][0] # first arg
evaldict = caller.func_globals.copy()
evaldict['_call_'] = caller
evaldict['decorator'] = decorator
return FunctionMaker.create(
'%s(%s)' % (caller.__name__, first),
'return decorator(_call_, %s)' % first,
evaldict, undecorated=caller, __wrapped__=caller,
doc=caller.__doc__, module=caller.__module__) |
def _build_package_finder(self, options, index_urls, session):
"""
Create a package finder appropriate to this install command.
This method is meant to be overridden by subclasses, not
called directly.
"""
return PackageFinder(
find_links=options.find_links,
index_urls=index_urls,
use_wheel=options.use_wheel,
allow_external=options.allow_external,
allow_unverified=options.allow_unverified,
allow_all_external=options.allow_all_external,
trusted_hosts=options.trusted_hosts,
allow_all_prereleases=options.pre,
process_dependency_links=options.process_dependency_links,
session=session,
) |
def catch_config_error(method, app, *args, **kwargs):
"""Method decorator for catching invalid config (Trait/ArgumentErrors) during init.
On a TraitError (generally caused by bad config), this will print the trait's
message, and exit the app.
For use on init methods, to prevent invoking excepthook on invalid input.
"""
try:
return method(app, *args, **kwargs)
except (TraitError, ArgumentError) as e:
app.print_description()
app.print_help()
app.print_examples()
app.log.fatal("Bad config encountered during initialization:")
app.log.fatal(str(e))
app.log.debug("Config at the time: %s", app.config)
app.exit(1) |
def boolean_flag(name, configurable, set_help='', unset_help=''):
"""Helper for building basic --trait, --no-trait flags.
Parameters
----------
name : str
The name of the flag.
configurable : str
The 'Class.trait' string of the trait to be set/unset with the flag
set_help : unicode
help string for --name flag
unset_help : unicode
help string for --no-name flag
Returns
-------
cfg : dict
A dict with two keys: 'name', and 'no-name', for setting and unsetting
the trait, respectively.
"""
# default helpstrings
set_help = set_help or "set %s=True"%configurable
unset_help = unset_help or "set %s=False"%configurable
cls,trait = configurable.split('.')
setter = {cls : {trait : True}}
unsetter = {cls : {trait : False}}
return {name : (setter, set_help), 'no-'+name : (unsetter, unset_help)} |
def _log_level_changed(self, name, old, new):
"""Adjust the log level when log_level is set."""
if isinstance(new, basestring):
new = getattr(logging, new)
self.log_level = new
self.log.setLevel(new) |
def _log_default(self):
"""Start logging for this application.
The default is to log to stdout using a StreaHandler. The log level
starts at loggin.WARN, but this can be adjusted by setting the
``log_level`` attribute.
"""
log = logging.getLogger(self.__class__.__name__)
log.setLevel(self.log_level)
if sys.executable.endswith('pythonw.exe'):
# this should really go to a file, but file-logging is only
# hooked up in parallel applications
_log_handler = logging.StreamHandler(open(os.devnull, 'w'))
else:
_log_handler = logging.StreamHandler()
_log_formatter = logging.Formatter(self.log_format)
_log_handler.setFormatter(_log_formatter)
log.addHandler(_log_handler)
return log |
def _flags_changed(self, name, old, new):
"""ensure flags dict is valid"""
for key,value in new.iteritems():
assert len(value) == 2, "Bad flag: %r:%s"%(key,value)
assert isinstance(value[0], (dict, Config)), "Bad flag: %r:%s"%(key,value)
assert isinstance(value[1], basestring), "Bad flag: %r:%s"%(key,value) |
def print_alias_help(self):
"""Print the alias part of the help."""
if not self.aliases:
return
lines = []
classdict = {}
for cls in self.classes:
# include all parents (up to, but excluding Configurable) in available names
for c in cls.mro()[:-3]:
classdict[c.__name__] = c
for alias, longname in self.aliases.iteritems():
classname, traitname = longname.split('.',1)
cls = classdict[classname]
trait = cls.class_traits(config=True)[traitname]
help = cls.class_get_trait_help(trait).splitlines()
# reformat first line
help[0] = help[0].replace(longname, alias) + ' (%s)'%longname
if len(alias) == 1:
help[0] = help[0].replace('--%s='%alias, '-%s '%alias)
lines.extend(help)
# lines.append('')
print os.linesep.join(lines) |
def print_flag_help(self):
"""Print the flag part of the help."""
if not self.flags:
return
lines = []
for m, (cfg,help) in self.flags.iteritems():
prefix = '--' if len(m) > 1 else '-'
lines.append(prefix+m)
lines.append(indent(dedent(help.strip())))
# lines.append('')
print os.linesep.join(lines) |
def print_subcommands(self):
"""Print the subcommand part of the help."""
if not self.subcommands:
return
lines = ["Subcommands"]
lines.append('-'*len(lines[0]))
lines.append('')
for p in wrap_paragraphs(self.subcommand_description):
lines.append(p)
lines.append('')
for subc, (cls, help) in self.subcommands.iteritems():
lines.append(subc)
if help:
lines.append(indent(dedent(help.strip())))
lines.append('')
print os.linesep.join(lines) |
def print_help(self, classes=False):
"""Print the help for each Configurable class in self.classes.
If classes=False (the default), only flags and aliases are printed.
"""
self.print_subcommands()
self.print_options()
if classes:
if self.classes:
print "Class parameters"
print "----------------"
print
for p in wrap_paragraphs(self.keyvalue_description):
print p
print
for cls in self.classes:
cls.class_print_help()
print
else:
print "To see all available configurables, use `--help-all`"
print |
def print_examples(self):
"""Print usage and examples.
This usage string goes at the end of the command line help string
and should contain examples of the application's usage.
"""
if self.examples:
print "Examples"
print "--------"
print
print indent(dedent(self.examples.strip()))
print |
def update_config(self, config):
"""Fire the traits events when the config is updated."""
# Save a copy of the current config.
newconfig = deepcopy(self.config)
# Merge the new config into the current one.
newconfig._merge(config)
# Save the combined config as self.config, which triggers the traits
# events.
self.config = newconfig |
def initialize_subcommand(self, subc, argv=None):
"""Initialize a subcommand with argv."""
subapp,help = self.subcommands.get(subc)
if isinstance(subapp, basestring):
subapp = import_item(subapp)
# clear existing instances
self.__class__.clear_instance()
# instantiate
self.subapp = subapp.instance()
# and initialize subapp
self.subapp.initialize(argv) |
def flatten_flags(self):
"""flatten flags and aliases, so cl-args override as expected.
This prevents issues such as an alias pointing to InteractiveShell,
but a config file setting the same trait in TerminalInteraciveShell
getting inappropriate priority over the command-line arg.
Only aliases with exactly one descendent in the class list
will be promoted.
"""
# build a tree of classes in our list that inherit from a particular
# it will be a dict by parent classname of classes in our list
# that are descendents
mro_tree = defaultdict(list)
for cls in self.classes:
clsname = cls.__name__
for parent in cls.mro()[1:-3]:
# exclude cls itself and Configurable,HasTraits,object
mro_tree[parent.__name__].append(clsname)
# flatten aliases, which have the form:
# { 'alias' : 'Class.trait' }
aliases = {}
for alias, cls_trait in self.aliases.iteritems():
cls,trait = cls_trait.split('.',1)
children = mro_tree[cls]
if len(children) == 1:
# exactly one descendent, promote alias
cls = children[0]
aliases[alias] = '.'.join([cls,trait])
# flatten flags, which are of the form:
# { 'key' : ({'Cls' : {'trait' : value}}, 'help')}
flags = {}
for key, (flagdict, help) in self.flags.iteritems():
newflag = {}
for cls, subdict in flagdict.iteritems():
children = mro_tree[cls]
# exactly one descendent, promote flag section
if len(children) == 1:
cls = children[0]
newflag[cls] = subdict
flags[key] = (newflag, help)
return flags, aliases |
def parse_command_line(self, argv=None):
"""Parse the command line arguments."""
argv = sys.argv[1:] if argv is None else argv
if argv and argv[0] == 'help':
# turn `ipython help notebook` into `ipython notebook -h`
argv = argv[1:] + ['-h']
if self.subcommands and len(argv) > 0:
# we have subcommands, and one may have been specified
subc, subargv = argv[0], argv[1:]
if re.match(r'^\w(\-?\w)*$', subc) and subc in self.subcommands:
# it's a subcommand, and *not* a flag or class parameter
return self.initialize_subcommand(subc, subargv)
if '-h' in argv or '--help' in argv or '--help-all' in argv:
self.print_description()
self.print_help('--help-all' in argv)
self.print_examples()
self.exit(0)
if '--version' in argv or '-V' in argv:
self.print_version()
self.exit(0)
# flatten flags&aliases, so cl-args get appropriate priority:
flags,aliases = self.flatten_flags()
loader = KVArgParseConfigLoader(argv=argv, aliases=aliases,
flags=flags)
config = loader.load_config()
self.update_config(config)
# store unparsed args in extra_args
self.extra_args = loader.extra_args |
def load_config_file(self, filename, path=None):
"""Load a .py based config file by filename and path."""
loader = PyFileConfigLoader(filename, path=path)
try:
config = loader.load_config()
except ConfigFileNotFound:
# problem finding the file, raise
raise
except Exception:
# try to get the full filename, but it will be empty in the
# unlikely event that the error raised before filefind finished
filename = loader.full_filename or filename
# problem while running the file
self.log.error("Exception while loading config file %s",
filename, exc_info=True)
else:
self.log.debug("Loaded config file: %s", loader.full_filename)
self.update_config(config) |
def generate_config_file(self):
"""generate default config file from Configurables"""
lines = ["# Configuration file for %s."%self.name]
lines.append('')
lines.append('c = get_config()')
lines.append('')
for cls in self.classes:
lines.append(cls.class_config_section())
return '\n'.join(lines) |
def downsample(array, k):
"""Choose k random elements of array."""
length = array.shape[0]
indices = random.sample(xrange(length), k)
return array[indices] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.