partition
stringclasses
3 values
func_name
stringlengths
1
134
docstring
stringlengths
1
46.9k
path
stringlengths
4
223
original_string
stringlengths
75
104k
code
stringlengths
75
104k
docstring_tokens
listlengths
1
1.97k
repo
stringlengths
7
55
language
stringclasses
1 value
url
stringlengths
87
315
code_tokens
listlengths
19
28.4k
sha
stringlengths
40
40
test
Collector._installation_trace
Called on new threads, installs the real tracer.
virtualEnvironment/lib/python2.7/site-packages/coverage/collector.py
def _installation_trace(self, frame_unused, event_unused, arg_unused): """Called on new threads, installs the real tracer.""" # Remove ourselves as the trace function sys.settrace(None) # Install the real tracer. fn = self._start_tracer() # Invoke the real trace function with the current event, to be sure # not to lose an event. if fn: fn = fn(frame_unused, event_unused, arg_unused) # Return the new trace function to continue tracing in this scope. return fn
def _installation_trace(self, frame_unused, event_unused, arg_unused): """Called on new threads, installs the real tracer.""" # Remove ourselves as the trace function sys.settrace(None) # Install the real tracer. fn = self._start_tracer() # Invoke the real trace function with the current event, to be sure # not to lose an event. if fn: fn = fn(frame_unused, event_unused, arg_unused) # Return the new trace function to continue tracing in this scope. return fn
[ "Called", "on", "new", "threads", "installs", "the", "real", "tracer", "." ]
tnkteja/myhelp
python
https://github.com/tnkteja/myhelp/blob/fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb/virtualEnvironment/lib/python2.7/site-packages/coverage/collector.py#L245-L256
[ "def", "_installation_trace", "(", "self", ",", "frame_unused", ",", "event_unused", ",", "arg_unused", ")", ":", "# Remove ourselves as the trace function", "sys", ".", "settrace", "(", "None", ")", "# Install the real tracer.", "fn", "=", "self", ".", "_start_tracer", "(", ")", "# Invoke the real trace function with the current event, to be sure", "# not to lose an event.", "if", "fn", ":", "fn", "=", "fn", "(", "frame_unused", ",", "event_unused", ",", "arg_unused", ")", "# Return the new trace function to continue tracing in this scope.", "return", "fn" ]
fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb
test
Collector.start
Start collecting trace information.
virtualEnvironment/lib/python2.7/site-packages/coverage/collector.py
def start(self): """Start collecting trace information.""" if self._collectors: self._collectors[-1].pause() self._collectors.append(self) #print("Started: %r" % self._collectors, file=sys.stderr) # Check to see whether we had a fullcoverage tracer installed. traces0 = [] if hasattr(sys, "gettrace"): fn0 = sys.gettrace() if fn0: tracer0 = getattr(fn0, '__self__', None) if tracer0: traces0 = getattr(tracer0, 'traces', []) # Install the tracer on this thread. fn = self._start_tracer() for args in traces0: (frame, event, arg), lineno = args try: fn(frame, event, arg, lineno=lineno) except TypeError: raise Exception( "fullcoverage must be run with the C trace function." ) # Install our installation tracer in threading, to jump start other # threads. threading.settrace(self._installation_trace)
def start(self): """Start collecting trace information.""" if self._collectors: self._collectors[-1].pause() self._collectors.append(self) #print("Started: %r" % self._collectors, file=sys.stderr) # Check to see whether we had a fullcoverage tracer installed. traces0 = [] if hasattr(sys, "gettrace"): fn0 = sys.gettrace() if fn0: tracer0 = getattr(fn0, '__self__', None) if tracer0: traces0 = getattr(tracer0, 'traces', []) # Install the tracer on this thread. fn = self._start_tracer() for args in traces0: (frame, event, arg), lineno = args try: fn(frame, event, arg, lineno=lineno) except TypeError: raise Exception( "fullcoverage must be run with the C trace function." ) # Install our installation tracer in threading, to jump start other # threads. threading.settrace(self._installation_trace)
[ "Start", "collecting", "trace", "information", "." ]
tnkteja/myhelp
python
https://github.com/tnkteja/myhelp/blob/fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb/virtualEnvironment/lib/python2.7/site-packages/coverage/collector.py#L258-L288
[ "def", "start", "(", "self", ")", ":", "if", "self", ".", "_collectors", ":", "self", ".", "_collectors", "[", "-", "1", "]", ".", "pause", "(", ")", "self", ".", "_collectors", ".", "append", "(", "self", ")", "#print(\"Started: %r\" % self._collectors, file=sys.stderr)", "# Check to see whether we had a fullcoverage tracer installed.", "traces0", "=", "[", "]", "if", "hasattr", "(", "sys", ",", "\"gettrace\"", ")", ":", "fn0", "=", "sys", ".", "gettrace", "(", ")", "if", "fn0", ":", "tracer0", "=", "getattr", "(", "fn0", ",", "'__self__'", ",", "None", ")", "if", "tracer0", ":", "traces0", "=", "getattr", "(", "tracer0", ",", "'traces'", ",", "[", "]", ")", "# Install the tracer on this thread.", "fn", "=", "self", ".", "_start_tracer", "(", ")", "for", "args", "in", "traces0", ":", "(", "frame", ",", "event", ",", "arg", ")", ",", "lineno", "=", "args", "try", ":", "fn", "(", "frame", ",", "event", ",", "arg", ",", "lineno", "=", "lineno", ")", "except", "TypeError", ":", "raise", "Exception", "(", "\"fullcoverage must be run with the C trace function.\"", ")", "# Install our installation tracer in threading, to jump start other", "# threads.", "threading", ".", "settrace", "(", "self", ".", "_installation_trace", ")" ]
fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb
test
Collector.stop
Stop collecting trace information.
virtualEnvironment/lib/python2.7/site-packages/coverage/collector.py
def stop(self): """Stop collecting trace information.""" #print >>sys.stderr, "Stopping: %r" % self._collectors assert self._collectors assert self._collectors[-1] is self self.pause() self.tracers = [] # Remove this Collector from the stack, and resume the one underneath # (if any). self._collectors.pop() if self._collectors: self._collectors[-1].resume()
def stop(self): """Stop collecting trace information.""" #print >>sys.stderr, "Stopping: %r" % self._collectors assert self._collectors assert self._collectors[-1] is self self.pause() self.tracers = [] # Remove this Collector from the stack, and resume the one underneath # (if any). self._collectors.pop() if self._collectors: self._collectors[-1].resume()
[ "Stop", "collecting", "trace", "information", "." ]
tnkteja/myhelp
python
https://github.com/tnkteja/myhelp/blob/fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb/virtualEnvironment/lib/python2.7/site-packages/coverage/collector.py#L290-L303
[ "def", "stop", "(", "self", ")", ":", "#print >>sys.stderr, \"Stopping: %r\" % self._collectors", "assert", "self", ".", "_collectors", "assert", "self", ".", "_collectors", "[", "-", "1", "]", "is", "self", "self", ".", "pause", "(", ")", "self", ".", "tracers", "=", "[", "]", "# Remove this Collector from the stack, and resume the one underneath", "# (if any).", "self", ".", "_collectors", ".", "pop", "(", ")", "if", "self", ".", "_collectors", ":", "self", ".", "_collectors", "[", "-", "1", "]", ".", "resume", "(", ")" ]
fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb
test
Collector.pause
Pause tracing, but be prepared to `resume`.
virtualEnvironment/lib/python2.7/site-packages/coverage/collector.py
def pause(self): """Pause tracing, but be prepared to `resume`.""" for tracer in self.tracers: tracer.stop() stats = tracer.get_stats() if stats: print("\nCoverage.py tracer stats:") for k in sorted(stats.keys()): print("%16s: %s" % (k, stats[k])) threading.settrace(None)
def pause(self): """Pause tracing, but be prepared to `resume`.""" for tracer in self.tracers: tracer.stop() stats = tracer.get_stats() if stats: print("\nCoverage.py tracer stats:") for k in sorted(stats.keys()): print("%16s: %s" % (k, stats[k])) threading.settrace(None)
[ "Pause", "tracing", "but", "be", "prepared", "to", "resume", "." ]
tnkteja/myhelp
python
https://github.com/tnkteja/myhelp/blob/fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb/virtualEnvironment/lib/python2.7/site-packages/coverage/collector.py#L305-L314
[ "def", "pause", "(", "self", ")", ":", "for", "tracer", "in", "self", ".", "tracers", ":", "tracer", ".", "stop", "(", ")", "stats", "=", "tracer", ".", "get_stats", "(", ")", "if", "stats", ":", "print", "(", "\"\\nCoverage.py tracer stats:\"", ")", "for", "k", "in", "sorted", "(", "stats", ".", "keys", "(", ")", ")", ":", "print", "(", "\"%16s: %s\"", "%", "(", "k", ",", "stats", "[", "k", "]", ")", ")", "threading", ".", "settrace", "(", "None", ")" ]
fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb
test
Collector.resume
Resume tracing after a `pause`.
virtualEnvironment/lib/python2.7/site-packages/coverage/collector.py
def resume(self): """Resume tracing after a `pause`.""" for tracer in self.tracers: tracer.start() threading.settrace(self._installation_trace)
def resume(self): """Resume tracing after a `pause`.""" for tracer in self.tracers: tracer.start() threading.settrace(self._installation_trace)
[ "Resume", "tracing", "after", "a", "pause", "." ]
tnkteja/myhelp
python
https://github.com/tnkteja/myhelp/blob/fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb/virtualEnvironment/lib/python2.7/site-packages/coverage/collector.py#L316-L320
[ "def", "resume", "(", "self", ")", ":", "for", "tracer", "in", "self", ".", "tracers", ":", "tracer", ".", "start", "(", ")", "threading", ".", "settrace", "(", "self", ".", "_installation_trace", ")" ]
fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb
test
Collector.get_line_data
Return the line data collected. Data is { filename: { lineno: None, ...}, ...}
virtualEnvironment/lib/python2.7/site-packages/coverage/collector.py
def get_line_data(self): """Return the line data collected. Data is { filename: { lineno: None, ...}, ...} """ if self.branch: # If we were measuring branches, then we have to re-build the dict # to show line data. line_data = {} for f, arcs in self.data.items(): line_data[f] = ldf = {} for l1, _ in list(arcs.keys()): if l1: ldf[l1] = None return line_data else: return self.data
def get_line_data(self): """Return the line data collected. Data is { filename: { lineno: None, ...}, ...} """ if self.branch: # If we were measuring branches, then we have to re-build the dict # to show line data. line_data = {} for f, arcs in self.data.items(): line_data[f] = ldf = {} for l1, _ in list(arcs.keys()): if l1: ldf[l1] = None return line_data else: return self.data
[ "Return", "the", "line", "data", "collected", "." ]
tnkteja/myhelp
python
https://github.com/tnkteja/myhelp/blob/fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb/virtualEnvironment/lib/python2.7/site-packages/coverage/collector.py#L322-L339
[ "def", "get_line_data", "(", "self", ")", ":", "if", "self", ".", "branch", ":", "# If we were measuring branches, then we have to re-build the dict", "# to show line data.", "line_data", "=", "{", "}", "for", "f", ",", "arcs", "in", "self", ".", "data", ".", "items", "(", ")", ":", "line_data", "[", "f", "]", "=", "ldf", "=", "{", "}", "for", "l1", ",", "_", "in", "list", "(", "arcs", ".", "keys", "(", ")", ")", ":", "if", "l1", ":", "ldf", "[", "l1", "]", "=", "None", "return", "line_data", "else", ":", "return", "self", ".", "data" ]
fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb
test
new_code_cell
Create a new code cell with input and output
environment/lib/python2.7/site-packages/IPython/nbformat/v1/nbbase.py
def new_code_cell(code=None, prompt_number=None): """Create a new code cell with input and output""" cell = NotebookNode() cell.cell_type = u'code' if code is not None: cell.code = unicode(code) if prompt_number is not None: cell.prompt_number = int(prompt_number) return cell
def new_code_cell(code=None, prompt_number=None): """Create a new code cell with input and output""" cell = NotebookNode() cell.cell_type = u'code' if code is not None: cell.code = unicode(code) if prompt_number is not None: cell.prompt_number = int(prompt_number) return cell
[ "Create", "a", "new", "code", "cell", "with", "input", "and", "output" ]
cloud9ers/gurumate
python
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/nbformat/v1/nbbase.py#L44-L52
[ "def", "new_code_cell", "(", "code", "=", "None", ",", "prompt_number", "=", "None", ")", ":", "cell", "=", "NotebookNode", "(", ")", "cell", ".", "cell_type", "=", "u'code'", "if", "code", "is", "not", "None", ":", "cell", ".", "code", "=", "unicode", "(", "code", ")", "if", "prompt_number", "is", "not", "None", ":", "cell", ".", "prompt_number", "=", "int", "(", "prompt_number", ")", "return", "cell" ]
075dc74d1ee62a8c6b7a8bf2b271364f01629d1e
test
new_text_cell
Create a new text cell.
environment/lib/python2.7/site-packages/IPython/nbformat/v1/nbbase.py
def new_text_cell(text=None): """Create a new text cell.""" cell = NotebookNode() if text is not None: cell.text = unicode(text) cell.cell_type = u'text' return cell
def new_text_cell(text=None): """Create a new text cell.""" cell = NotebookNode() if text is not None: cell.text = unicode(text) cell.cell_type = u'text' return cell
[ "Create", "a", "new", "text", "cell", "." ]
cloud9ers/gurumate
python
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/nbformat/v1/nbbase.py#L55-L61
[ "def", "new_text_cell", "(", "text", "=", "None", ")", ":", "cell", "=", "NotebookNode", "(", ")", "if", "text", "is", "not", "None", ":", "cell", ".", "text", "=", "unicode", "(", "text", ")", "cell", ".", "cell_type", "=", "u'text'", "return", "cell" ]
075dc74d1ee62a8c6b7a8bf2b271364f01629d1e
test
new_notebook
Create a notebook by name, id and a list of worksheets.
environment/lib/python2.7/site-packages/IPython/nbformat/v1/nbbase.py
def new_notebook(cells=None): """Create a notebook by name, id and a list of worksheets.""" nb = NotebookNode() if cells is not None: nb.cells = cells else: nb.cells = [] return nb
def new_notebook(cells=None): """Create a notebook by name, id and a list of worksheets.""" nb = NotebookNode() if cells is not None: nb.cells = cells else: nb.cells = [] return nb
[ "Create", "a", "notebook", "by", "name", "id", "and", "a", "list", "of", "worksheets", "." ]
cloud9ers/gurumate
python
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/nbformat/v1/nbbase.py#L64-L71
[ "def", "new_notebook", "(", "cells", "=", "None", ")", ":", "nb", "=", "NotebookNode", "(", ")", "if", "cells", "is", "not", "None", ":", "nb", ".", "cells", "=", "cells", "else", ":", "nb", ".", "cells", "=", "[", "]", "return", "nb" ]
075dc74d1ee62a8c6b7a8bf2b271364f01629d1e
test
eq_
Shorthand for 'assert a == b, "%r != %r" % (a, b)
environment/lib/python2.7/site-packages/nose/tools/trivial.py
def eq_(a, b, msg=None): """Shorthand for 'assert a == b, "%r != %r" % (a, b) """ if not a == b: raise AssertionError(msg or "%r != %r" % (a, b))
def eq_(a, b, msg=None): """Shorthand for 'assert a == b, "%r != %r" % (a, b) """ if not a == b: raise AssertionError(msg or "%r != %r" % (a, b))
[ "Shorthand", "for", "assert", "a", "==", "b", "%r", "!", "=", "%r", "%", "(", "a", "b", ")" ]
cloud9ers/gurumate
python
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/nose/tools/trivial.py#L25-L29
[ "def", "eq_", "(", "a", ",", "b", ",", "msg", "=", "None", ")", ":", "if", "not", "a", "==", "b", ":", "raise", "AssertionError", "(", "msg", "or", "\"%r != %r\"", "%", "(", "a", ",", "b", ")", ")" ]
075dc74d1ee62a8c6b7a8bf2b271364f01629d1e
test
collect_exceptions
check a result dict for errors, and raise CompositeError if any exist. Passthrough otherwise.
environment/lib/python2.7/site-packages/IPython/parallel/error.py
def collect_exceptions(rdict_or_list, method='unspecified'): """check a result dict for errors, and raise CompositeError if any exist. Passthrough otherwise.""" elist = [] if isinstance(rdict_or_list, dict): rlist = rdict_or_list.values() else: rlist = rdict_or_list for r in rlist: if isinstance(r, RemoteError): en, ev, etb, ei = r.ename, r.evalue, r.traceback, r.engine_info # Sometimes we could have CompositeError in our list. Just take # the errors out of them and put them in our new list. This # has the effect of flattening lists of CompositeErrors into one # CompositeError if en=='CompositeError': for e in ev.elist: elist.append(e) else: elist.append((en, ev, etb, ei)) if len(elist)==0: return rdict_or_list else: msg = "one or more exceptions from call to method: %s" % (method) # This silliness is needed so the debugger has access to the exception # instance (e in this case) try: raise CompositeError(msg, elist) except CompositeError as e: raise e
def collect_exceptions(rdict_or_list, method='unspecified'): """check a result dict for errors, and raise CompositeError if any exist. Passthrough otherwise.""" elist = [] if isinstance(rdict_or_list, dict): rlist = rdict_or_list.values() else: rlist = rdict_or_list for r in rlist: if isinstance(r, RemoteError): en, ev, etb, ei = r.ename, r.evalue, r.traceback, r.engine_info # Sometimes we could have CompositeError in our list. Just take # the errors out of them and put them in our new list. This # has the effect of flattening lists of CompositeErrors into one # CompositeError if en=='CompositeError': for e in ev.elist: elist.append(e) else: elist.append((en, ev, etb, ei)) if len(elist)==0: return rdict_or_list else: msg = "one or more exceptions from call to method: %s" % (method) # This silliness is needed so the debugger has access to the exception # instance (e in this case) try: raise CompositeError(msg, elist) except CompositeError as e: raise e
[ "check", "a", "result", "dict", "for", "errors", "and", "raise", "CompositeError", "if", "any", "exist", ".", "Passthrough", "otherwise", "." ]
cloud9ers/gurumate
python
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/parallel/error.py#L293-L322
[ "def", "collect_exceptions", "(", "rdict_or_list", ",", "method", "=", "'unspecified'", ")", ":", "elist", "=", "[", "]", "if", "isinstance", "(", "rdict_or_list", ",", "dict", ")", ":", "rlist", "=", "rdict_or_list", ".", "values", "(", ")", "else", ":", "rlist", "=", "rdict_or_list", "for", "r", "in", "rlist", ":", "if", "isinstance", "(", "r", ",", "RemoteError", ")", ":", "en", ",", "ev", ",", "etb", ",", "ei", "=", "r", ".", "ename", ",", "r", ".", "evalue", ",", "r", ".", "traceback", ",", "r", ".", "engine_info", "# Sometimes we could have CompositeError in our list. Just take", "# the errors out of them and put them in our new list. This", "# has the effect of flattening lists of CompositeErrors into one", "# CompositeError", "if", "en", "==", "'CompositeError'", ":", "for", "e", "in", "ev", ".", "elist", ":", "elist", ".", "append", "(", "e", ")", "else", ":", "elist", ".", "append", "(", "(", "en", ",", "ev", ",", "etb", ",", "ei", ")", ")", "if", "len", "(", "elist", ")", "==", "0", ":", "return", "rdict_or_list", "else", ":", "msg", "=", "\"one or more exceptions from call to method: %s\"", "%", "(", "method", ")", "# This silliness is needed so the debugger has access to the exception", "# instance (e in this case)", "try", ":", "raise", "CompositeError", "(", "msg", ",", "elist", ")", "except", "CompositeError", "as", "e", ":", "raise", "e" ]
075dc74d1ee62a8c6b7a8bf2b271364f01629d1e
test
CompositeError.render_traceback
render one or all of my tracebacks to a list of lines
environment/lib/python2.7/site-packages/IPython/parallel/error.py
def render_traceback(self, excid=None): """render one or all of my tracebacks to a list of lines""" lines = [] if excid is None: for (en,ev,etb,ei) in self.elist: lines.append(self._get_engine_str(ei)) lines.extend((etb or 'No traceback available').splitlines()) lines.append('') else: try: en,ev,etb,ei = self.elist[excid] except: raise IndexError("an exception with index %i does not exist"%excid) else: lines.append(self._get_engine_str(ei)) lines.extend((etb or 'No traceback available').splitlines()) return lines
def render_traceback(self, excid=None): """render one or all of my tracebacks to a list of lines""" lines = [] if excid is None: for (en,ev,etb,ei) in self.elist: lines.append(self._get_engine_str(ei)) lines.extend((etb or 'No traceback available').splitlines()) lines.append('') else: try: en,ev,etb,ei = self.elist[excid] except: raise IndexError("an exception with index %i does not exist"%excid) else: lines.append(self._get_engine_str(ei)) lines.extend((etb or 'No traceback available').splitlines()) return lines
[ "render", "one", "or", "all", "of", "my", "tracebacks", "to", "a", "list", "of", "lines" ]
cloud9ers/gurumate
python
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/parallel/error.py#L262-L279
[ "def", "render_traceback", "(", "self", ",", "excid", "=", "None", ")", ":", "lines", "=", "[", "]", "if", "excid", "is", "None", ":", "for", "(", "en", ",", "ev", ",", "etb", ",", "ei", ")", "in", "self", ".", "elist", ":", "lines", ".", "append", "(", "self", ".", "_get_engine_str", "(", "ei", ")", ")", "lines", ".", "extend", "(", "(", "etb", "or", "'No traceback available'", ")", ".", "splitlines", "(", ")", ")", "lines", ".", "append", "(", "''", ")", "else", ":", "try", ":", "en", ",", "ev", ",", "etb", ",", "ei", "=", "self", ".", "elist", "[", "excid", "]", "except", ":", "raise", "IndexError", "(", "\"an exception with index %i does not exist\"", "%", "excid", ")", "else", ":", "lines", ".", "append", "(", "self", ".", "_get_engine_str", "(", "ei", ")", ")", "lines", ".", "extend", "(", "(", "etb", "or", "'No traceback available'", ")", ".", "splitlines", "(", ")", ")", "return", "lines" ]
075dc74d1ee62a8c6b7a8bf2b271364f01629d1e
test
process_startup
Call this at Python startup to perhaps measure coverage. If the environment variable COVERAGE_PROCESS_START is defined, coverage measurement is started. The value of the variable is the config file to use. There are two ways to configure your Python installation to invoke this function when Python starts: #. Create or append to sitecustomize.py to add these lines:: import coverage coverage.process_startup() #. Create a .pth file in your Python installation containing:: import coverage; coverage.process_startup()
virtualEnvironment/lib/python2.7/site-packages/coverage/control.py
def process_startup(): """Call this at Python startup to perhaps measure coverage. If the environment variable COVERAGE_PROCESS_START is defined, coverage measurement is started. The value of the variable is the config file to use. There are two ways to configure your Python installation to invoke this function when Python starts: #. Create or append to sitecustomize.py to add these lines:: import coverage coverage.process_startup() #. Create a .pth file in your Python installation containing:: import coverage; coverage.process_startup() """ cps = os.environ.get("COVERAGE_PROCESS_START") if cps: cov = coverage(config_file=cps, auto_data=True) cov.start() cov._warn_no_data = False cov._warn_unimported_source = False
def process_startup(): """Call this at Python startup to perhaps measure coverage. If the environment variable COVERAGE_PROCESS_START is defined, coverage measurement is started. The value of the variable is the config file to use. There are two ways to configure your Python installation to invoke this function when Python starts: #. Create or append to sitecustomize.py to add these lines:: import coverage coverage.process_startup() #. Create a .pth file in your Python installation containing:: import coverage; coverage.process_startup() """ cps = os.environ.get("COVERAGE_PROCESS_START") if cps: cov = coverage(config_file=cps, auto_data=True) cov.start() cov._warn_no_data = False cov._warn_unimported_source = False
[ "Call", "this", "at", "Python", "startup", "to", "perhaps", "measure", "coverage", "." ]
tnkteja/myhelp
python
https://github.com/tnkteja/myhelp/blob/fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb/virtualEnvironment/lib/python2.7/site-packages/coverage/control.py#L750-L775
[ "def", "process_startup", "(", ")", ":", "cps", "=", "os", ".", "environ", ".", "get", "(", "\"COVERAGE_PROCESS_START\"", ")", "if", "cps", ":", "cov", "=", "coverage", "(", "config_file", "=", "cps", ",", "auto_data", "=", "True", ")", "cov", ".", "start", "(", ")", "cov", ".", "_warn_no_data", "=", "False", "cov", ".", "_warn_unimported_source", "=", "False" ]
fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb
test
coverage._canonical_dir
Return the canonical directory of the module or file `morf`.
virtualEnvironment/lib/python2.7/site-packages/coverage/control.py
def _canonical_dir(self, morf): """Return the canonical directory of the module or file `morf`.""" return os.path.split(CodeUnit(morf, self.file_locator).filename)[0]
def _canonical_dir(self, morf): """Return the canonical directory of the module or file `morf`.""" return os.path.split(CodeUnit(morf, self.file_locator).filename)[0]
[ "Return", "the", "canonical", "directory", "of", "the", "module", "or", "file", "morf", "." ]
tnkteja/myhelp
python
https://github.com/tnkteja/myhelp/blob/fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb/virtualEnvironment/lib/python2.7/site-packages/coverage/control.py#L209-L211
[ "def", "_canonical_dir", "(", "self", ",", "morf", ")", ":", "return", "os", ".", "path", ".", "split", "(", "CodeUnit", "(", "morf", ",", "self", ".", "file_locator", ")", ".", "filename", ")", "[", "0", "]" ]
fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb
test
coverage._source_for_file
Return the source file for `filename`.
virtualEnvironment/lib/python2.7/site-packages/coverage/control.py
def _source_for_file(self, filename): """Return the source file for `filename`.""" if not filename.endswith(".py"): if filename[-4:-1] == ".py": filename = filename[:-1] elif filename.endswith("$py.class"): # jython filename = filename[:-9] + ".py" return filename
def _source_for_file(self, filename): """Return the source file for `filename`.""" if not filename.endswith(".py"): if filename[-4:-1] == ".py": filename = filename[:-1] elif filename.endswith("$py.class"): # jython filename = filename[:-9] + ".py" return filename
[ "Return", "the", "source", "file", "for", "filename", "." ]
tnkteja/myhelp
python
https://github.com/tnkteja/myhelp/blob/fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb/virtualEnvironment/lib/python2.7/site-packages/coverage/control.py#L213-L220
[ "def", "_source_for_file", "(", "self", ",", "filename", ")", ":", "if", "not", "filename", ".", "endswith", "(", "\".py\"", ")", ":", "if", "filename", "[", "-", "4", ":", "-", "1", "]", "==", "\".py\"", ":", "filename", "=", "filename", "[", ":", "-", "1", "]", "elif", "filename", ".", "endswith", "(", "\"$py.class\"", ")", ":", "# jython", "filename", "=", "filename", "[", ":", "-", "9", "]", "+", "\".py\"", "return", "filename" ]
fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb
test
coverage._should_trace_with_reason
Decide whether to trace execution in `filename`, with a reason. This function is called from the trace function. As each new file name is encountered, this function determines whether it is traced or not. Returns a pair of values: the first indicates whether the file should be traced: it's a canonicalized filename if it should be traced, None if it should not. The second value is a string, the resason for the decision.
virtualEnvironment/lib/python2.7/site-packages/coverage/control.py
def _should_trace_with_reason(self, filename, frame): """Decide whether to trace execution in `filename`, with a reason. This function is called from the trace function. As each new file name is encountered, this function determines whether it is traced or not. Returns a pair of values: the first indicates whether the file should be traced: it's a canonicalized filename if it should be traced, None if it should not. The second value is a string, the resason for the decision. """ if not filename: # Empty string is pretty useless return None, "empty string isn't a filename" if filename.startswith('<'): # Lots of non-file execution is represented with artificial # filenames like "<string>", "<doctest readme.txt[0]>", or # "<exec_function>". Don't ever trace these executions, since we # can't do anything with the data later anyway. return None, "not a real filename" self._check_for_packages() # Compiled Python files have two filenames: frame.f_code.co_filename is # the filename at the time the .pyc was compiled. The second name is # __file__, which is where the .pyc was actually loaded from. Since # .pyc files can be moved after compilation (for example, by being # installed), we look for __file__ in the frame and prefer it to the # co_filename value. dunder_file = frame.f_globals.get('__file__') if dunder_file: filename = self._source_for_file(dunder_file) # Jython reports the .class file to the tracer, use the source file. if filename.endswith("$py.class"): filename = filename[:-9] + ".py" canonical = self.file_locator.canonical_filename(filename) # If the user specified source or include, then that's authoritative # about the outer bound of what to measure and we don't have to apply # any canned exclusions. If they didn't, then we have to exclude the # stdlib and coverage.py directories. if self.source_match: if not self.source_match.match(canonical): return None, "falls outside the --source trees" elif self.include_match: if not self.include_match.match(canonical): return None, "falls outside the --include trees" else: # If we aren't supposed to trace installed code, then check if this # is near the Python standard library and skip it if so. if self.pylib_match and self.pylib_match.match(canonical): return None, "is in the stdlib" # We exclude the coverage code itself, since a little of it will be # measured otherwise. if self.cover_match and self.cover_match.match(canonical): return None, "is part of coverage.py" # Check the file against the omit pattern. if self.omit_match and self.omit_match.match(canonical): return None, "is inside an --omit pattern" return canonical, "because we love you"
def _should_trace_with_reason(self, filename, frame): """Decide whether to trace execution in `filename`, with a reason. This function is called from the trace function. As each new file name is encountered, this function determines whether it is traced or not. Returns a pair of values: the first indicates whether the file should be traced: it's a canonicalized filename if it should be traced, None if it should not. The second value is a string, the resason for the decision. """ if not filename: # Empty string is pretty useless return None, "empty string isn't a filename" if filename.startswith('<'): # Lots of non-file execution is represented with artificial # filenames like "<string>", "<doctest readme.txt[0]>", or # "<exec_function>". Don't ever trace these executions, since we # can't do anything with the data later anyway. return None, "not a real filename" self._check_for_packages() # Compiled Python files have two filenames: frame.f_code.co_filename is # the filename at the time the .pyc was compiled. The second name is # __file__, which is where the .pyc was actually loaded from. Since # .pyc files can be moved after compilation (for example, by being # installed), we look for __file__ in the frame and prefer it to the # co_filename value. dunder_file = frame.f_globals.get('__file__') if dunder_file: filename = self._source_for_file(dunder_file) # Jython reports the .class file to the tracer, use the source file. if filename.endswith("$py.class"): filename = filename[:-9] + ".py" canonical = self.file_locator.canonical_filename(filename) # If the user specified source or include, then that's authoritative # about the outer bound of what to measure and we don't have to apply # any canned exclusions. If they didn't, then we have to exclude the # stdlib and coverage.py directories. if self.source_match: if not self.source_match.match(canonical): return None, "falls outside the --source trees" elif self.include_match: if not self.include_match.match(canonical): return None, "falls outside the --include trees" else: # If we aren't supposed to trace installed code, then check if this # is near the Python standard library and skip it if so. if self.pylib_match and self.pylib_match.match(canonical): return None, "is in the stdlib" # We exclude the coverage code itself, since a little of it will be # measured otherwise. if self.cover_match and self.cover_match.match(canonical): return None, "is part of coverage.py" # Check the file against the omit pattern. if self.omit_match and self.omit_match.match(canonical): return None, "is inside an --omit pattern" return canonical, "because we love you"
[ "Decide", "whether", "to", "trace", "execution", "in", "filename", "with", "a", "reason", "." ]
tnkteja/myhelp
python
https://github.com/tnkteja/myhelp/blob/fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb/virtualEnvironment/lib/python2.7/site-packages/coverage/control.py#L222-L288
[ "def", "_should_trace_with_reason", "(", "self", ",", "filename", ",", "frame", ")", ":", "if", "not", "filename", ":", "# Empty string is pretty useless", "return", "None", ",", "\"empty string isn't a filename\"", "if", "filename", ".", "startswith", "(", "'<'", ")", ":", "# Lots of non-file execution is represented with artificial", "# filenames like \"<string>\", \"<doctest readme.txt[0]>\", or", "# \"<exec_function>\". Don't ever trace these executions, since we", "# can't do anything with the data later anyway.", "return", "None", ",", "\"not a real filename\"", "self", ".", "_check_for_packages", "(", ")", "# Compiled Python files have two filenames: frame.f_code.co_filename is", "# the filename at the time the .pyc was compiled. The second name is", "# __file__, which is where the .pyc was actually loaded from. Since", "# .pyc files can be moved after compilation (for example, by being", "# installed), we look for __file__ in the frame and prefer it to the", "# co_filename value.", "dunder_file", "=", "frame", ".", "f_globals", ".", "get", "(", "'__file__'", ")", "if", "dunder_file", ":", "filename", "=", "self", ".", "_source_for_file", "(", "dunder_file", ")", "# Jython reports the .class file to the tracer, use the source file.", "if", "filename", ".", "endswith", "(", "\"$py.class\"", ")", ":", "filename", "=", "filename", "[", ":", "-", "9", "]", "+", "\".py\"", "canonical", "=", "self", ".", "file_locator", ".", "canonical_filename", "(", "filename", ")", "# If the user specified source or include, then that's authoritative", "# about the outer bound of what to measure and we don't have to apply", "# any canned exclusions. If they didn't, then we have to exclude the", "# stdlib and coverage.py directories.", "if", "self", ".", "source_match", ":", "if", "not", "self", ".", "source_match", ".", "match", "(", "canonical", ")", ":", "return", "None", ",", "\"falls outside the --source trees\"", "elif", "self", ".", "include_match", ":", "if", "not", "self", ".", "include_match", ".", "match", "(", "canonical", ")", ":", "return", "None", ",", "\"falls outside the --include trees\"", "else", ":", "# If we aren't supposed to trace installed code, then check if this", "# is near the Python standard library and skip it if so.", "if", "self", ".", "pylib_match", "and", "self", ".", "pylib_match", ".", "match", "(", "canonical", ")", ":", "return", "None", ",", "\"is in the stdlib\"", "# We exclude the coverage code itself, since a little of it will be", "# measured otherwise.", "if", "self", ".", "cover_match", "and", "self", ".", "cover_match", ".", "match", "(", "canonical", ")", ":", "return", "None", ",", "\"is part of coverage.py\"", "# Check the file against the omit pattern.", "if", "self", ".", "omit_match", "and", "self", ".", "omit_match", ".", "match", "(", "canonical", ")", ":", "return", "None", ",", "\"is inside an --omit pattern\"", "return", "canonical", ",", "\"because we love you\"" ]
fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb
test
coverage._should_trace
Decide whether to trace execution in `filename`. Calls `_should_trace_with_reason`, and returns just the decision.
virtualEnvironment/lib/python2.7/site-packages/coverage/control.py
def _should_trace(self, filename, frame): """Decide whether to trace execution in `filename`. Calls `_should_trace_with_reason`, and returns just the decision. """ canonical, reason = self._should_trace_with_reason(filename, frame) if self.debug.should('trace'): if not canonical: msg = "Not tracing %r: %s" % (filename, reason) else: msg = "Tracing %r" % (filename,) self.debug.write(msg) return canonical
def _should_trace(self, filename, frame): """Decide whether to trace execution in `filename`. Calls `_should_trace_with_reason`, and returns just the decision. """ canonical, reason = self._should_trace_with_reason(filename, frame) if self.debug.should('trace'): if not canonical: msg = "Not tracing %r: %s" % (filename, reason) else: msg = "Tracing %r" % (filename,) self.debug.write(msg) return canonical
[ "Decide", "whether", "to", "trace", "execution", "in", "filename", "." ]
tnkteja/myhelp
python
https://github.com/tnkteja/myhelp/blob/fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb/virtualEnvironment/lib/python2.7/site-packages/coverage/control.py#L290-L303
[ "def", "_should_trace", "(", "self", ",", "filename", ",", "frame", ")", ":", "canonical", ",", "reason", "=", "self", ".", "_should_trace_with_reason", "(", "filename", ",", "frame", ")", "if", "self", ".", "debug", ".", "should", "(", "'trace'", ")", ":", "if", "not", "canonical", ":", "msg", "=", "\"Not tracing %r: %s\"", "%", "(", "filename", ",", "reason", ")", "else", ":", "msg", "=", "\"Tracing %r\"", "%", "(", "filename", ",", ")", "self", ".", "debug", ".", "write", "(", "msg", ")", "return", "canonical" ]
fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb
test
coverage._warn
Use `msg` as a warning.
virtualEnvironment/lib/python2.7/site-packages/coverage/control.py
def _warn(self, msg): """Use `msg` as a warning.""" self._warnings.append(msg) sys.stderr.write("Coverage.py warning: %s\n" % msg)
def _warn(self, msg): """Use `msg` as a warning.""" self._warnings.append(msg) sys.stderr.write("Coverage.py warning: %s\n" % msg)
[ "Use", "msg", "as", "a", "warning", "." ]
tnkteja/myhelp
python
https://github.com/tnkteja/myhelp/blob/fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb/virtualEnvironment/lib/python2.7/site-packages/coverage/control.py#L305-L308
[ "def", "_warn", "(", "self", ",", "msg", ")", ":", "self", ".", "_warnings", ".", "append", "(", "msg", ")", "sys", ".", "stderr", ".", "write", "(", "\"Coverage.py warning: %s\\n\"", "%", "msg", ")" ]
fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb
test
coverage._check_for_packages
Update the source_match matcher with latest imported packages.
virtualEnvironment/lib/python2.7/site-packages/coverage/control.py
def _check_for_packages(self): """Update the source_match matcher with latest imported packages.""" # Our self.source_pkgs attribute is a list of package names we want to # measure. Each time through here, we see if we've imported any of # them yet. If so, we add its file to source_match, and we don't have # to look for that package any more. if self.source_pkgs: found = [] for pkg in self.source_pkgs: try: mod = sys.modules[pkg] except KeyError: continue found.append(pkg) try: pkg_file = mod.__file__ except AttributeError: pkg_file = None else: d, f = os.path.split(pkg_file) if f.startswith('__init__'): # This is actually a package, return the directory. pkg_file = d else: pkg_file = self._source_for_file(pkg_file) pkg_file = self.file_locator.canonical_filename(pkg_file) if not os.path.exists(pkg_file): pkg_file = None if pkg_file: self.source.append(pkg_file) self.source_match.add(pkg_file) else: self._warn("Module %s has no Python source." % pkg) for pkg in found: self.source_pkgs.remove(pkg)
def _check_for_packages(self): """Update the source_match matcher with latest imported packages.""" # Our self.source_pkgs attribute is a list of package names we want to # measure. Each time through here, we see if we've imported any of # them yet. If so, we add its file to source_match, and we don't have # to look for that package any more. if self.source_pkgs: found = [] for pkg in self.source_pkgs: try: mod = sys.modules[pkg] except KeyError: continue found.append(pkg) try: pkg_file = mod.__file__ except AttributeError: pkg_file = None else: d, f = os.path.split(pkg_file) if f.startswith('__init__'): # This is actually a package, return the directory. pkg_file = d else: pkg_file = self._source_for_file(pkg_file) pkg_file = self.file_locator.canonical_filename(pkg_file) if not os.path.exists(pkg_file): pkg_file = None if pkg_file: self.source.append(pkg_file) self.source_match.add(pkg_file) else: self._warn("Module %s has no Python source." % pkg) for pkg in found: self.source_pkgs.remove(pkg)
[ "Update", "the", "source_match", "matcher", "with", "latest", "imported", "packages", "." ]
tnkteja/myhelp
python
https://github.com/tnkteja/myhelp/blob/fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb/virtualEnvironment/lib/python2.7/site-packages/coverage/control.py#L310-L348
[ "def", "_check_for_packages", "(", "self", ")", ":", "# Our self.source_pkgs attribute is a list of package names we want to", "# measure. Each time through here, we see if we've imported any of", "# them yet. If so, we add its file to source_match, and we don't have", "# to look for that package any more.", "if", "self", ".", "source_pkgs", ":", "found", "=", "[", "]", "for", "pkg", "in", "self", ".", "source_pkgs", ":", "try", ":", "mod", "=", "sys", ".", "modules", "[", "pkg", "]", "except", "KeyError", ":", "continue", "found", ".", "append", "(", "pkg", ")", "try", ":", "pkg_file", "=", "mod", ".", "__file__", "except", "AttributeError", ":", "pkg_file", "=", "None", "else", ":", "d", ",", "f", "=", "os", ".", "path", ".", "split", "(", "pkg_file", ")", "if", "f", ".", "startswith", "(", "'__init__'", ")", ":", "# This is actually a package, return the directory.", "pkg_file", "=", "d", "else", ":", "pkg_file", "=", "self", ".", "_source_for_file", "(", "pkg_file", ")", "pkg_file", "=", "self", ".", "file_locator", ".", "canonical_filename", "(", "pkg_file", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "pkg_file", ")", ":", "pkg_file", "=", "None", "if", "pkg_file", ":", "self", ".", "source", ".", "append", "(", "pkg_file", ")", "self", ".", "source_match", ".", "add", "(", "pkg_file", ")", "else", ":", "self", ".", "_warn", "(", "\"Module %s has no Python source.\"", "%", "pkg", ")", "for", "pkg", "in", "found", ":", "self", ".", "source_pkgs", ".", "remove", "(", "pkg", ")" ]
fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb
test
coverage.start
Start measuring code coverage. Coverage measurement actually occurs in functions called after `start` is invoked. Statements in the same scope as `start` won't be measured. Once you invoke `start`, you must also call `stop` eventually, or your process might not shut down cleanly.
virtualEnvironment/lib/python2.7/site-packages/coverage/control.py
def start(self): """Start measuring code coverage. Coverage measurement actually occurs in functions called after `start` is invoked. Statements in the same scope as `start` won't be measured. Once you invoke `start`, you must also call `stop` eventually, or your process might not shut down cleanly. """ if self.run_suffix: # Calling start() means we're running code, so use the run_suffix # as the data_suffix when we eventually save the data. self.data_suffix = self.run_suffix if self.auto_data: self.load() # Create the matchers we need for _should_trace if self.source or self.source_pkgs: self.source_match = TreeMatcher(self.source) else: if self.cover_dir: self.cover_match = TreeMatcher([self.cover_dir]) if self.pylib_dirs: self.pylib_match = TreeMatcher(self.pylib_dirs) if self.include: self.include_match = FnmatchMatcher(self.include) if self.omit: self.omit_match = FnmatchMatcher(self.omit) # The user may want to debug things, show info if desired. if self.debug.should('config'): self.debug.write("Configuration values:") config_info = sorted(self.config.__dict__.items()) self.debug.write_formatted_info(config_info) if self.debug.should('sys'): self.debug.write("Debugging info:") self.debug.write_formatted_info(self.sysinfo()) self.collector.start() self._started = True self._measured = True
def start(self): """Start measuring code coverage. Coverage measurement actually occurs in functions called after `start` is invoked. Statements in the same scope as `start` won't be measured. Once you invoke `start`, you must also call `stop` eventually, or your process might not shut down cleanly. """ if self.run_suffix: # Calling start() means we're running code, so use the run_suffix # as the data_suffix when we eventually save the data. self.data_suffix = self.run_suffix if self.auto_data: self.load() # Create the matchers we need for _should_trace if self.source or self.source_pkgs: self.source_match = TreeMatcher(self.source) else: if self.cover_dir: self.cover_match = TreeMatcher([self.cover_dir]) if self.pylib_dirs: self.pylib_match = TreeMatcher(self.pylib_dirs) if self.include: self.include_match = FnmatchMatcher(self.include) if self.omit: self.omit_match = FnmatchMatcher(self.omit) # The user may want to debug things, show info if desired. if self.debug.should('config'): self.debug.write("Configuration values:") config_info = sorted(self.config.__dict__.items()) self.debug.write_formatted_info(config_info) if self.debug.should('sys'): self.debug.write("Debugging info:") self.debug.write_formatted_info(self.sysinfo()) self.collector.start() self._started = True self._measured = True
[ "Start", "measuring", "code", "coverage", "." ]
tnkteja/myhelp
python
https://github.com/tnkteja/myhelp/blob/fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb/virtualEnvironment/lib/python2.7/site-packages/coverage/control.py#L363-L405
[ "def", "start", "(", "self", ")", ":", "if", "self", ".", "run_suffix", ":", "# Calling start() means we're running code, so use the run_suffix", "# as the data_suffix when we eventually save the data.", "self", ".", "data_suffix", "=", "self", ".", "run_suffix", "if", "self", ".", "auto_data", ":", "self", ".", "load", "(", ")", "# Create the matchers we need for _should_trace", "if", "self", ".", "source", "or", "self", ".", "source_pkgs", ":", "self", ".", "source_match", "=", "TreeMatcher", "(", "self", ".", "source", ")", "else", ":", "if", "self", ".", "cover_dir", ":", "self", ".", "cover_match", "=", "TreeMatcher", "(", "[", "self", ".", "cover_dir", "]", ")", "if", "self", ".", "pylib_dirs", ":", "self", ".", "pylib_match", "=", "TreeMatcher", "(", "self", ".", "pylib_dirs", ")", "if", "self", ".", "include", ":", "self", ".", "include_match", "=", "FnmatchMatcher", "(", "self", ".", "include", ")", "if", "self", ".", "omit", ":", "self", ".", "omit_match", "=", "FnmatchMatcher", "(", "self", ".", "omit", ")", "# The user may want to debug things, show info if desired.", "if", "self", ".", "debug", ".", "should", "(", "'config'", ")", ":", "self", ".", "debug", ".", "write", "(", "\"Configuration values:\"", ")", "config_info", "=", "sorted", "(", "self", ".", "config", ".", "__dict__", ".", "items", "(", ")", ")", "self", ".", "debug", ".", "write_formatted_info", "(", "config_info", ")", "if", "self", ".", "debug", ".", "should", "(", "'sys'", ")", ":", "self", ".", "debug", ".", "write", "(", "\"Debugging info:\"", ")", "self", ".", "debug", ".", "write_formatted_info", "(", "self", ".", "sysinfo", "(", ")", ")", "self", ".", "collector", ".", "start", "(", ")", "self", ".", "_started", "=", "True", "self", ".", "_measured", "=", "True" ]
fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb
test
coverage._atexit
Clean up on process shutdown.
virtualEnvironment/lib/python2.7/site-packages/coverage/control.py
def _atexit(self): """Clean up on process shutdown.""" if self._started: self.stop() if self.auto_data: self.save()
def _atexit(self): """Clean up on process shutdown.""" if self._started: self.stop() if self.auto_data: self.save()
[ "Clean", "up", "on", "process", "shutdown", "." ]
tnkteja/myhelp
python
https://github.com/tnkteja/myhelp/blob/fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb/virtualEnvironment/lib/python2.7/site-packages/coverage/control.py#L412-L417
[ "def", "_atexit", "(", "self", ")", ":", "if", "self", ".", "_started", ":", "self", ".", "stop", "(", ")", "if", "self", ".", "auto_data", ":", "self", ".", "save", "(", ")" ]
fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb
test
coverage.exclude
Exclude source lines from execution consideration. A number of lists of regular expressions are maintained. Each list selects lines that are treated differently during reporting. `which` determines which list is modified. The "exclude" list selects lines that are not considered executable at all. The "partial" list indicates lines with branches that are not taken. `regex` is a regular expression. The regex is added to the specified list. If any of the regexes in the list is found in a line, the line is marked for special treatment during reporting.
virtualEnvironment/lib/python2.7/site-packages/coverage/control.py
def exclude(self, regex, which='exclude'): """Exclude source lines from execution consideration. A number of lists of regular expressions are maintained. Each list selects lines that are treated differently during reporting. `which` determines which list is modified. The "exclude" list selects lines that are not considered executable at all. The "partial" list indicates lines with branches that are not taken. `regex` is a regular expression. The regex is added to the specified list. If any of the regexes in the list is found in a line, the line is marked for special treatment during reporting. """ excl_list = getattr(self.config, which + "_list") excl_list.append(regex) self._exclude_regex_stale()
def exclude(self, regex, which='exclude'): """Exclude source lines from execution consideration. A number of lists of regular expressions are maintained. Each list selects lines that are treated differently during reporting. `which` determines which list is modified. The "exclude" list selects lines that are not considered executable at all. The "partial" list indicates lines with branches that are not taken. `regex` is a regular expression. The regex is added to the specified list. If any of the regexes in the list is found in a line, the line is marked for special treatment during reporting. """ excl_list = getattr(self.config, which + "_list") excl_list.append(regex) self._exclude_regex_stale()
[ "Exclude", "source", "lines", "from", "execution", "consideration", "." ]
tnkteja/myhelp
python
https://github.com/tnkteja/myhelp/blob/fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb/virtualEnvironment/lib/python2.7/site-packages/coverage/control.py#L434-L451
[ "def", "exclude", "(", "self", ",", "regex", ",", "which", "=", "'exclude'", ")", ":", "excl_list", "=", "getattr", "(", "self", ".", "config", ",", "which", "+", "\"_list\"", ")", "excl_list", ".", "append", "(", "regex", ")", "self", ".", "_exclude_regex_stale", "(", ")" ]
fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb
test
coverage._exclude_regex
Return a compiled regex for the given exclusion list.
virtualEnvironment/lib/python2.7/site-packages/coverage/control.py
def _exclude_regex(self, which): """Return a compiled regex for the given exclusion list.""" if which not in self._exclude_re: excl_list = getattr(self.config, which + "_list") self._exclude_re[which] = join_regex(excl_list) return self._exclude_re[which]
def _exclude_regex(self, which): """Return a compiled regex for the given exclusion list.""" if which not in self._exclude_re: excl_list = getattr(self.config, which + "_list") self._exclude_re[which] = join_regex(excl_list) return self._exclude_re[which]
[ "Return", "a", "compiled", "regex", "for", "the", "given", "exclusion", "list", "." ]
tnkteja/myhelp
python
https://github.com/tnkteja/myhelp/blob/fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb/virtualEnvironment/lib/python2.7/site-packages/coverage/control.py#L457-L462
[ "def", "_exclude_regex", "(", "self", ",", "which", ")", ":", "if", "which", "not", "in", "self", ".", "_exclude_re", ":", "excl_list", "=", "getattr", "(", "self", ".", "config", ",", "which", "+", "\"_list\"", ")", "self", ".", "_exclude_re", "[", "which", "]", "=", "join_regex", "(", "excl_list", ")", "return", "self", ".", "_exclude_re", "[", "which", "]" ]
fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb
test
coverage.save
Save the collected coverage data to the data file.
virtualEnvironment/lib/python2.7/site-packages/coverage/control.py
def save(self): """Save the collected coverage data to the data file.""" data_suffix = self.data_suffix if data_suffix is True: # If data_suffix was a simple true value, then make a suffix with # plenty of distinguishing information. We do this here in # `save()` at the last minute so that the pid will be correct even # if the process forks. extra = "" if _TEST_NAME_FILE: f = open(_TEST_NAME_FILE) test_name = f.read() f.close() extra = "." + test_name data_suffix = "%s%s.%s.%06d" % ( socket.gethostname(), extra, os.getpid(), random.randint(0, 999999) ) self._harvest_data() self.data.write(suffix=data_suffix)
def save(self): """Save the collected coverage data to the data file.""" data_suffix = self.data_suffix if data_suffix is True: # If data_suffix was a simple true value, then make a suffix with # plenty of distinguishing information. We do this here in # `save()` at the last minute so that the pid will be correct even # if the process forks. extra = "" if _TEST_NAME_FILE: f = open(_TEST_NAME_FILE) test_name = f.read() f.close() extra = "." + test_name data_suffix = "%s%s.%s.%06d" % ( socket.gethostname(), extra, os.getpid(), random.randint(0, 999999) ) self._harvest_data() self.data.write(suffix=data_suffix)
[ "Save", "the", "collected", "coverage", "data", "to", "the", "data", "file", "." ]
tnkteja/myhelp
python
https://github.com/tnkteja/myhelp/blob/fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb/virtualEnvironment/lib/python2.7/site-packages/coverage/control.py#L473-L493
[ "def", "save", "(", "self", ")", ":", "data_suffix", "=", "self", ".", "data_suffix", "if", "data_suffix", "is", "True", ":", "# If data_suffix was a simple true value, then make a suffix with", "# plenty of distinguishing information. We do this here in", "# `save()` at the last minute so that the pid will be correct even", "# if the process forks.", "extra", "=", "\"\"", "if", "_TEST_NAME_FILE", ":", "f", "=", "open", "(", "_TEST_NAME_FILE", ")", "test_name", "=", "f", ".", "read", "(", ")", "f", ".", "close", "(", ")", "extra", "=", "\".\"", "+", "test_name", "data_suffix", "=", "\"%s%s.%s.%06d\"", "%", "(", "socket", ".", "gethostname", "(", ")", ",", "extra", ",", "os", ".", "getpid", "(", ")", ",", "random", ".", "randint", "(", "0", ",", "999999", ")", ")", "self", ".", "_harvest_data", "(", ")", "self", ".", "data", ".", "write", "(", "suffix", "=", "data_suffix", ")" ]
fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb
test
coverage.combine
Combine together a number of similarly-named coverage data files. All coverage data files whose name starts with `data_file` (from the coverage() constructor) will be read, and combined together into the current measurements.
virtualEnvironment/lib/python2.7/site-packages/coverage/control.py
def combine(self): """Combine together a number of similarly-named coverage data files. All coverage data files whose name starts with `data_file` (from the coverage() constructor) will be read, and combined together into the current measurements. """ aliases = None if self.config.paths: aliases = PathAliases(self.file_locator) for paths in self.config.paths.values(): result = paths[0] for pattern in paths[1:]: aliases.add(pattern, result) self.data.combine_parallel_data(aliases=aliases)
def combine(self): """Combine together a number of similarly-named coverage data files. All coverage data files whose name starts with `data_file` (from the coverage() constructor) will be read, and combined together into the current measurements. """ aliases = None if self.config.paths: aliases = PathAliases(self.file_locator) for paths in self.config.paths.values(): result = paths[0] for pattern in paths[1:]: aliases.add(pattern, result) self.data.combine_parallel_data(aliases=aliases)
[ "Combine", "together", "a", "number", "of", "similarly", "-", "named", "coverage", "data", "files", "." ]
tnkteja/myhelp
python
https://github.com/tnkteja/myhelp/blob/fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb/virtualEnvironment/lib/python2.7/site-packages/coverage/control.py#L495-L510
[ "def", "combine", "(", "self", ")", ":", "aliases", "=", "None", "if", "self", ".", "config", ".", "paths", ":", "aliases", "=", "PathAliases", "(", "self", ".", "file_locator", ")", "for", "paths", "in", "self", ".", "config", ".", "paths", ".", "values", "(", ")", ":", "result", "=", "paths", "[", "0", "]", "for", "pattern", "in", "paths", "[", "1", ":", "]", ":", "aliases", ".", "add", "(", "pattern", ",", "result", ")", "self", ".", "data", ".", "combine_parallel_data", "(", "aliases", "=", "aliases", ")" ]
fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb
test
coverage._harvest_data
Get the collected data and reset the collector. Also warn about various problems collecting data.
virtualEnvironment/lib/python2.7/site-packages/coverage/control.py
def _harvest_data(self): """Get the collected data and reset the collector. Also warn about various problems collecting data. """ if not self._measured: return self.data.add_line_data(self.collector.get_line_data()) self.data.add_arc_data(self.collector.get_arc_data()) self.collector.reset() # If there are still entries in the source_pkgs list, then we never # encountered those packages. if self._warn_unimported_source: for pkg in self.source_pkgs: self._warn("Module %s was never imported." % pkg) # Find out if we got any data. summary = self.data.summary() if not summary and self._warn_no_data: self._warn("No data was collected.") # Find files that were never executed at all. for src in self.source: for py_file in find_python_files(src): py_file = self.file_locator.canonical_filename(py_file) if self.omit_match and self.omit_match.match(py_file): # Turns out this file was omitted, so don't pull it back # in as unexecuted. continue self.data.touch_file(py_file) self._measured = False
def _harvest_data(self): """Get the collected data and reset the collector. Also warn about various problems collecting data. """ if not self._measured: return self.data.add_line_data(self.collector.get_line_data()) self.data.add_arc_data(self.collector.get_arc_data()) self.collector.reset() # If there are still entries in the source_pkgs list, then we never # encountered those packages. if self._warn_unimported_source: for pkg in self.source_pkgs: self._warn("Module %s was never imported." % pkg) # Find out if we got any data. summary = self.data.summary() if not summary and self._warn_no_data: self._warn("No data was collected.") # Find files that were never executed at all. for src in self.source: for py_file in find_python_files(src): py_file = self.file_locator.canonical_filename(py_file) if self.omit_match and self.omit_match.match(py_file): # Turns out this file was omitted, so don't pull it back # in as unexecuted. continue self.data.touch_file(py_file) self._measured = False
[ "Get", "the", "collected", "data", "and", "reset", "the", "collector", "." ]
tnkteja/myhelp
python
https://github.com/tnkteja/myhelp/blob/fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb/virtualEnvironment/lib/python2.7/site-packages/coverage/control.py#L512-L548
[ "def", "_harvest_data", "(", "self", ")", ":", "if", "not", "self", ".", "_measured", ":", "return", "self", ".", "data", ".", "add_line_data", "(", "self", ".", "collector", ".", "get_line_data", "(", ")", ")", "self", ".", "data", ".", "add_arc_data", "(", "self", ".", "collector", ".", "get_arc_data", "(", ")", ")", "self", ".", "collector", ".", "reset", "(", ")", "# If there are still entries in the source_pkgs list, then we never", "# encountered those packages.", "if", "self", ".", "_warn_unimported_source", ":", "for", "pkg", "in", "self", ".", "source_pkgs", ":", "self", ".", "_warn", "(", "\"Module %s was never imported.\"", "%", "pkg", ")", "# Find out if we got any data.", "summary", "=", "self", ".", "data", ".", "summary", "(", ")", "if", "not", "summary", "and", "self", ".", "_warn_no_data", ":", "self", ".", "_warn", "(", "\"No data was collected.\"", ")", "# Find files that were never executed at all.", "for", "src", "in", "self", ".", "source", ":", "for", "py_file", "in", "find_python_files", "(", "src", ")", ":", "py_file", "=", "self", ".", "file_locator", ".", "canonical_filename", "(", "py_file", ")", "if", "self", ".", "omit_match", "and", "self", ".", "omit_match", ".", "match", "(", "py_file", ")", ":", "# Turns out this file was omitted, so don't pull it back", "# in as unexecuted.", "continue", "self", ".", "data", ".", "touch_file", "(", "py_file", ")", "self", ".", "_measured", "=", "False" ]
fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb
test
coverage.analysis
Like `analysis2` but doesn't return excluded line numbers.
virtualEnvironment/lib/python2.7/site-packages/coverage/control.py
def analysis(self, morf): """Like `analysis2` but doesn't return excluded line numbers.""" f, s, _, m, mf = self.analysis2(morf) return f, s, m, mf
def analysis(self, morf): """Like `analysis2` but doesn't return excluded line numbers.""" f, s, _, m, mf = self.analysis2(morf) return f, s, m, mf
[ "Like", "analysis2", "but", "doesn", "t", "return", "excluded", "line", "numbers", "." ]
tnkteja/myhelp
python
https://github.com/tnkteja/myhelp/blob/fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb/virtualEnvironment/lib/python2.7/site-packages/coverage/control.py#L551-L554
[ "def", "analysis", "(", "self", ",", "morf", ")", ":", "f", ",", "s", ",", "_", ",", "m", ",", "mf", "=", "self", ".", "analysis2", "(", "morf", ")", "return", "f", ",", "s", ",", "m", ",", "mf" ]
fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb
test
coverage.analysis2
Analyze a module. `morf` is a module or a filename. It will be analyzed to determine its coverage statistics. The return value is a 5-tuple: * The filename for the module. * A list of line numbers of executable statements. * A list of line numbers of excluded statements. * A list of line numbers of statements not run (missing from execution). * A readable formatted string of the missing line numbers. The analysis uses the source file itself and the current measured coverage data.
virtualEnvironment/lib/python2.7/site-packages/coverage/control.py
def analysis2(self, morf): """Analyze a module. `morf` is a module or a filename. It will be analyzed to determine its coverage statistics. The return value is a 5-tuple: * The filename for the module. * A list of line numbers of executable statements. * A list of line numbers of excluded statements. * A list of line numbers of statements not run (missing from execution). * A readable formatted string of the missing line numbers. The analysis uses the source file itself and the current measured coverage data. """ analysis = self._analyze(morf) return ( analysis.filename, sorted(analysis.statements), sorted(analysis.excluded), sorted(analysis.missing), analysis.missing_formatted(), )
def analysis2(self, morf): """Analyze a module. `morf` is a module or a filename. It will be analyzed to determine its coverage statistics. The return value is a 5-tuple: * The filename for the module. * A list of line numbers of executable statements. * A list of line numbers of excluded statements. * A list of line numbers of statements not run (missing from execution). * A readable formatted string of the missing line numbers. The analysis uses the source file itself and the current measured coverage data. """ analysis = self._analyze(morf) return ( analysis.filename, sorted(analysis.statements), sorted(analysis.excluded), sorted(analysis.missing), analysis.missing_formatted(), )
[ "Analyze", "a", "module", "." ]
tnkteja/myhelp
python
https://github.com/tnkteja/myhelp/blob/fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb/virtualEnvironment/lib/python2.7/site-packages/coverage/control.py#L556-L580
[ "def", "analysis2", "(", "self", ",", "morf", ")", ":", "analysis", "=", "self", ".", "_analyze", "(", "morf", ")", "return", "(", "analysis", ".", "filename", ",", "sorted", "(", "analysis", ".", "statements", ")", ",", "sorted", "(", "analysis", ".", "excluded", ")", ",", "sorted", "(", "analysis", ".", "missing", ")", ",", "analysis", ".", "missing_formatted", "(", ")", ",", ")" ]
fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb
test
coverage._analyze
Analyze a single morf or code unit. Returns an `Analysis` object.
virtualEnvironment/lib/python2.7/site-packages/coverage/control.py
def _analyze(self, it): """Analyze a single morf or code unit. Returns an `Analysis` object. """ self._harvest_data() if not isinstance(it, CodeUnit): it = code_unit_factory(it, self.file_locator)[0] return Analysis(self, it)
def _analyze(self, it): """Analyze a single morf or code unit. Returns an `Analysis` object. """ self._harvest_data() if not isinstance(it, CodeUnit): it = code_unit_factory(it, self.file_locator)[0] return Analysis(self, it)
[ "Analyze", "a", "single", "morf", "or", "code", "unit", "." ]
tnkteja/myhelp
python
https://github.com/tnkteja/myhelp/blob/fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb/virtualEnvironment/lib/python2.7/site-packages/coverage/control.py#L582-L592
[ "def", "_analyze", "(", "self", ",", "it", ")", ":", "self", ".", "_harvest_data", "(", ")", "if", "not", "isinstance", "(", "it", ",", "CodeUnit", ")", ":", "it", "=", "code_unit_factory", "(", "it", ",", "self", ".", "file_locator", ")", "[", "0", "]", "return", "Analysis", "(", "self", ",", "it", ")" ]
fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb
test
coverage.report
Write a summary report to `file`. Each module in `morfs` is listed, with counts of statements, executed statements, missing statements, and a list of lines missed. `include` is a list of filename patterns. Modules whose filenames match those patterns will be included in the report. Modules matching `omit` will not be included in the report. Returns a float, the total percentage covered.
virtualEnvironment/lib/python2.7/site-packages/coverage/control.py
def report(self, morfs=None, show_missing=True, ignore_errors=None, file=None, # pylint: disable=W0622 omit=None, include=None ): """Write a summary report to `file`. Each module in `morfs` is listed, with counts of statements, executed statements, missing statements, and a list of lines missed. `include` is a list of filename patterns. Modules whose filenames match those patterns will be included in the report. Modules matching `omit` will not be included in the report. Returns a float, the total percentage covered. """ self._harvest_data() self.config.from_args( ignore_errors=ignore_errors, omit=omit, include=include, show_missing=show_missing, ) reporter = SummaryReporter(self, self.config) return reporter.report(morfs, outfile=file)
def report(self, morfs=None, show_missing=True, ignore_errors=None, file=None, # pylint: disable=W0622 omit=None, include=None ): """Write a summary report to `file`. Each module in `morfs` is listed, with counts of statements, executed statements, missing statements, and a list of lines missed. `include` is a list of filename patterns. Modules whose filenames match those patterns will be included in the report. Modules matching `omit` will not be included in the report. Returns a float, the total percentage covered. """ self._harvest_data() self.config.from_args( ignore_errors=ignore_errors, omit=omit, include=include, show_missing=show_missing, ) reporter = SummaryReporter(self, self.config) return reporter.report(morfs, outfile=file)
[ "Write", "a", "summary", "report", "to", "file", "." ]
tnkteja/myhelp
python
https://github.com/tnkteja/myhelp/blob/fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb/virtualEnvironment/lib/python2.7/site-packages/coverage/control.py#L594-L616
[ "def", "report", "(", "self", ",", "morfs", "=", "None", ",", "show_missing", "=", "True", ",", "ignore_errors", "=", "None", ",", "file", "=", "None", ",", "# pylint: disable=W0622", "omit", "=", "None", ",", "include", "=", "None", ")", ":", "self", ".", "_harvest_data", "(", ")", "self", ".", "config", ".", "from_args", "(", "ignore_errors", "=", "ignore_errors", ",", "omit", "=", "omit", ",", "include", "=", "include", ",", "show_missing", "=", "show_missing", ",", ")", "reporter", "=", "SummaryReporter", "(", "self", ",", "self", ".", "config", ")", "return", "reporter", ".", "report", "(", "morfs", ",", "outfile", "=", "file", ")" ]
fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb
test
coverage.annotate
Annotate a list of modules. Each module in `morfs` is annotated. The source is written to a new file, named with a ",cover" suffix, with each line prefixed with a marker to indicate the coverage of the line. Covered lines have ">", excluded lines have "-", and missing lines have "!". See `coverage.report()` for other arguments.
virtualEnvironment/lib/python2.7/site-packages/coverage/control.py
def annotate(self, morfs=None, directory=None, ignore_errors=None, omit=None, include=None): """Annotate a list of modules. Each module in `morfs` is annotated. The source is written to a new file, named with a ",cover" suffix, with each line prefixed with a marker to indicate the coverage of the line. Covered lines have ">", excluded lines have "-", and missing lines have "!". See `coverage.report()` for other arguments. """ self._harvest_data() self.config.from_args( ignore_errors=ignore_errors, omit=omit, include=include ) reporter = AnnotateReporter(self, self.config) reporter.report(morfs, directory=directory)
def annotate(self, morfs=None, directory=None, ignore_errors=None, omit=None, include=None): """Annotate a list of modules. Each module in `morfs` is annotated. The source is written to a new file, named with a ",cover" suffix, with each line prefixed with a marker to indicate the coverage of the line. Covered lines have ">", excluded lines have "-", and missing lines have "!". See `coverage.report()` for other arguments. """ self._harvest_data() self.config.from_args( ignore_errors=ignore_errors, omit=omit, include=include ) reporter = AnnotateReporter(self, self.config) reporter.report(morfs, directory=directory)
[ "Annotate", "a", "list", "of", "modules", "." ]
tnkteja/myhelp
python
https://github.com/tnkteja/myhelp/blob/fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb/virtualEnvironment/lib/python2.7/site-packages/coverage/control.py#L618-L635
[ "def", "annotate", "(", "self", ",", "morfs", "=", "None", ",", "directory", "=", "None", ",", "ignore_errors", "=", "None", ",", "omit", "=", "None", ",", "include", "=", "None", ")", ":", "self", ".", "_harvest_data", "(", ")", "self", ".", "config", ".", "from_args", "(", "ignore_errors", "=", "ignore_errors", ",", "omit", "=", "omit", ",", "include", "=", "include", ")", "reporter", "=", "AnnotateReporter", "(", "self", ",", "self", ".", "config", ")", "reporter", ".", "report", "(", "morfs", ",", "directory", "=", "directory", ")" ]
fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb
test
coverage.html_report
Generate an HTML report. The HTML is written to `directory`. The file "index.html" is the overview starting point, with links to more detailed pages for individual modules. `extra_css` is a path to a file of other CSS to apply on the page. It will be copied into the HTML directory. `title` is a text string (not HTML) to use as the title of the HTML report. See `coverage.report()` for other arguments. Returns a float, the total percentage covered.
virtualEnvironment/lib/python2.7/site-packages/coverage/control.py
def html_report(self, morfs=None, directory=None, ignore_errors=None, omit=None, include=None, extra_css=None, title=None): """Generate an HTML report. The HTML is written to `directory`. The file "index.html" is the overview starting point, with links to more detailed pages for individual modules. `extra_css` is a path to a file of other CSS to apply on the page. It will be copied into the HTML directory. `title` is a text string (not HTML) to use as the title of the HTML report. See `coverage.report()` for other arguments. Returns a float, the total percentage covered. """ self._harvest_data() self.config.from_args( ignore_errors=ignore_errors, omit=omit, include=include, html_dir=directory, extra_css=extra_css, html_title=title, ) reporter = HtmlReporter(self, self.config) return reporter.report(morfs)
def html_report(self, morfs=None, directory=None, ignore_errors=None, omit=None, include=None, extra_css=None, title=None): """Generate an HTML report. The HTML is written to `directory`. The file "index.html" is the overview starting point, with links to more detailed pages for individual modules. `extra_css` is a path to a file of other CSS to apply on the page. It will be copied into the HTML directory. `title` is a text string (not HTML) to use as the title of the HTML report. See `coverage.report()` for other arguments. Returns a float, the total percentage covered. """ self._harvest_data() self.config.from_args( ignore_errors=ignore_errors, omit=omit, include=include, html_dir=directory, extra_css=extra_css, html_title=title, ) reporter = HtmlReporter(self, self.config) return reporter.report(morfs)
[ "Generate", "an", "HTML", "report", "." ]
tnkteja/myhelp
python
https://github.com/tnkteja/myhelp/blob/fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb/virtualEnvironment/lib/python2.7/site-packages/coverage/control.py#L637-L662
[ "def", "html_report", "(", "self", ",", "morfs", "=", "None", ",", "directory", "=", "None", ",", "ignore_errors", "=", "None", ",", "omit", "=", "None", ",", "include", "=", "None", ",", "extra_css", "=", "None", ",", "title", "=", "None", ")", ":", "self", ".", "_harvest_data", "(", ")", "self", ".", "config", ".", "from_args", "(", "ignore_errors", "=", "ignore_errors", ",", "omit", "=", "omit", ",", "include", "=", "include", ",", "html_dir", "=", "directory", ",", "extra_css", "=", "extra_css", ",", "html_title", "=", "title", ",", ")", "reporter", "=", "HtmlReporter", "(", "self", ",", "self", ".", "config", ")", "return", "reporter", ".", "report", "(", "morfs", ")" ]
fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb
test
coverage.xml_report
Generate an XML report of coverage results. The report is compatible with Cobertura reports. Each module in `morfs` is included in the report. `outfile` is the path to write the file to, "-" will write to stdout. See `coverage.report()` for other arguments. Returns a float, the total percentage covered.
virtualEnvironment/lib/python2.7/site-packages/coverage/control.py
def xml_report(self, morfs=None, outfile=None, ignore_errors=None, omit=None, include=None): """Generate an XML report of coverage results. The report is compatible with Cobertura reports. Each module in `morfs` is included in the report. `outfile` is the path to write the file to, "-" will write to stdout. See `coverage.report()` for other arguments. Returns a float, the total percentage covered. """ self._harvest_data() self.config.from_args( ignore_errors=ignore_errors, omit=omit, include=include, xml_output=outfile, ) file_to_close = None delete_file = False if self.config.xml_output: if self.config.xml_output == '-': outfile = sys.stdout else: outfile = open(self.config.xml_output, "w") file_to_close = outfile try: try: reporter = XmlReporter(self, self.config) return reporter.report(morfs, outfile=outfile) except CoverageException: delete_file = True raise finally: if file_to_close: file_to_close.close() if delete_file: file_be_gone(self.config.xml_output)
def xml_report(self, morfs=None, outfile=None, ignore_errors=None, omit=None, include=None): """Generate an XML report of coverage results. The report is compatible with Cobertura reports. Each module in `morfs` is included in the report. `outfile` is the path to write the file to, "-" will write to stdout. See `coverage.report()` for other arguments. Returns a float, the total percentage covered. """ self._harvest_data() self.config.from_args( ignore_errors=ignore_errors, omit=omit, include=include, xml_output=outfile, ) file_to_close = None delete_file = False if self.config.xml_output: if self.config.xml_output == '-': outfile = sys.stdout else: outfile = open(self.config.xml_output, "w") file_to_close = outfile try: try: reporter = XmlReporter(self, self.config) return reporter.report(morfs, outfile=outfile) except CoverageException: delete_file = True raise finally: if file_to_close: file_to_close.close() if delete_file: file_be_gone(self.config.xml_output)
[ "Generate", "an", "XML", "report", "of", "coverage", "results", "." ]
tnkteja/myhelp
python
https://github.com/tnkteja/myhelp/blob/fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb/virtualEnvironment/lib/python2.7/site-packages/coverage/control.py#L664-L702
[ "def", "xml_report", "(", "self", ",", "morfs", "=", "None", ",", "outfile", "=", "None", ",", "ignore_errors", "=", "None", ",", "omit", "=", "None", ",", "include", "=", "None", ")", ":", "self", ".", "_harvest_data", "(", ")", "self", ".", "config", ".", "from_args", "(", "ignore_errors", "=", "ignore_errors", ",", "omit", "=", "omit", ",", "include", "=", "include", ",", "xml_output", "=", "outfile", ",", ")", "file_to_close", "=", "None", "delete_file", "=", "False", "if", "self", ".", "config", ".", "xml_output", ":", "if", "self", ".", "config", ".", "xml_output", "==", "'-'", ":", "outfile", "=", "sys", ".", "stdout", "else", ":", "outfile", "=", "open", "(", "self", ".", "config", ".", "xml_output", ",", "\"w\"", ")", "file_to_close", "=", "outfile", "try", ":", "try", ":", "reporter", "=", "XmlReporter", "(", "self", ",", "self", ".", "config", ")", "return", "reporter", ".", "report", "(", "morfs", ",", "outfile", "=", "outfile", ")", "except", "CoverageException", ":", "delete_file", "=", "True", "raise", "finally", ":", "if", "file_to_close", ":", "file_to_close", ".", "close", "(", ")", "if", "delete_file", ":", "file_be_gone", "(", "self", ".", "config", ".", "xml_output", ")" ]
fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb
test
coverage.sysinfo
Return a list of (key, value) pairs showing internal information.
virtualEnvironment/lib/python2.7/site-packages/coverage/control.py
def sysinfo(self): """Return a list of (key, value) pairs showing internal information.""" import coverage as covmod import platform, re try: implementation = platform.python_implementation() except AttributeError: implementation = "unknown" info = [ ('version', covmod.__version__), ('coverage', covmod.__file__), ('cover_dir', self.cover_dir), ('pylib_dirs', self.pylib_dirs), ('tracer', self.collector.tracer_name()), ('config_files', self.config.attempted_config_files), ('configs_read', self.config.config_files), ('data_path', self.data.filename), ('python', sys.version.replace('\n', '')), ('platform', platform.platform()), ('implementation', implementation), ('executable', sys.executable), ('cwd', os.getcwd()), ('path', sys.path), ('environment', sorted([ ("%s = %s" % (k, v)) for k, v in iitems(os.environ) if re.search(r"^COV|^PY", k) ])), ('command_line', " ".join(getattr(sys, 'argv', ['???']))), ] if self.source_match: info.append(('source_match', self.source_match.info())) if self.include_match: info.append(('include_match', self.include_match.info())) if self.omit_match: info.append(('omit_match', self.omit_match.info())) if self.cover_match: info.append(('cover_match', self.cover_match.info())) if self.pylib_match: info.append(('pylib_match', self.pylib_match.info())) return info
def sysinfo(self): """Return a list of (key, value) pairs showing internal information.""" import coverage as covmod import platform, re try: implementation = platform.python_implementation() except AttributeError: implementation = "unknown" info = [ ('version', covmod.__version__), ('coverage', covmod.__file__), ('cover_dir', self.cover_dir), ('pylib_dirs', self.pylib_dirs), ('tracer', self.collector.tracer_name()), ('config_files', self.config.attempted_config_files), ('configs_read', self.config.config_files), ('data_path', self.data.filename), ('python', sys.version.replace('\n', '')), ('platform', platform.platform()), ('implementation', implementation), ('executable', sys.executable), ('cwd', os.getcwd()), ('path', sys.path), ('environment', sorted([ ("%s = %s" % (k, v)) for k, v in iitems(os.environ) if re.search(r"^COV|^PY", k) ])), ('command_line', " ".join(getattr(sys, 'argv', ['???']))), ] if self.source_match: info.append(('source_match', self.source_match.info())) if self.include_match: info.append(('include_match', self.include_match.info())) if self.omit_match: info.append(('omit_match', self.omit_match.info())) if self.cover_match: info.append(('cover_match', self.cover_match.info())) if self.pylib_match: info.append(('pylib_match', self.pylib_match.info())) return info
[ "Return", "a", "list", "of", "(", "key", "value", ")", "pairs", "showing", "internal", "information", "." ]
tnkteja/myhelp
python
https://github.com/tnkteja/myhelp/blob/fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb/virtualEnvironment/lib/python2.7/site-packages/coverage/control.py#L704-L747
[ "def", "sysinfo", "(", "self", ")", ":", "import", "coverage", "as", "covmod", "import", "platform", ",", "re", "try", ":", "implementation", "=", "platform", ".", "python_implementation", "(", ")", "except", "AttributeError", ":", "implementation", "=", "\"unknown\"", "info", "=", "[", "(", "'version'", ",", "covmod", ".", "__version__", ")", ",", "(", "'coverage'", ",", "covmod", ".", "__file__", ")", ",", "(", "'cover_dir'", ",", "self", ".", "cover_dir", ")", ",", "(", "'pylib_dirs'", ",", "self", ".", "pylib_dirs", ")", ",", "(", "'tracer'", ",", "self", ".", "collector", ".", "tracer_name", "(", ")", ")", ",", "(", "'config_files'", ",", "self", ".", "config", ".", "attempted_config_files", ")", ",", "(", "'configs_read'", ",", "self", ".", "config", ".", "config_files", ")", ",", "(", "'data_path'", ",", "self", ".", "data", ".", "filename", ")", ",", "(", "'python'", ",", "sys", ".", "version", ".", "replace", "(", "'\\n'", ",", "''", ")", ")", ",", "(", "'platform'", ",", "platform", ".", "platform", "(", ")", ")", ",", "(", "'implementation'", ",", "implementation", ")", ",", "(", "'executable'", ",", "sys", ".", "executable", ")", ",", "(", "'cwd'", ",", "os", ".", "getcwd", "(", ")", ")", ",", "(", "'path'", ",", "sys", ".", "path", ")", ",", "(", "'environment'", ",", "sorted", "(", "[", "(", "\"%s = %s\"", "%", "(", "k", ",", "v", ")", ")", "for", "k", ",", "v", "in", "iitems", "(", "os", ".", "environ", ")", "if", "re", ".", "search", "(", "r\"^COV|^PY\"", ",", "k", ")", "]", ")", ")", ",", "(", "'command_line'", ",", "\" \"", ".", "join", "(", "getattr", "(", "sys", ",", "'argv'", ",", "[", "'???'", "]", ")", ")", ")", ",", "]", "if", "self", ".", "source_match", ":", "info", ".", "append", "(", "(", "'source_match'", ",", "self", ".", "source_match", ".", "info", "(", ")", ")", ")", "if", "self", ".", "include_match", ":", "info", ".", "append", "(", "(", "'include_match'", ",", "self", ".", "include_match", ".", "info", "(", ")", ")", ")", "if", "self", ".", "omit_match", ":", "info", ".", "append", "(", "(", "'omit_match'", ",", "self", ".", "omit_match", ".", "info", "(", ")", ")", ")", "if", "self", ".", "cover_match", ":", "info", ".", "append", "(", "(", "'cover_match'", ",", "self", ".", "cover_match", ".", "info", "(", ")", ")", ")", "if", "self", ".", "pylib_match", ":", "info", ".", "append", "(", "(", "'pylib_match'", ",", "self", ".", "pylib_match", ".", "info", "(", ")", ")", ")", "return", "info" ]
fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb
test
display
Display a Python object in all frontends. By default all representations will be computed and sent to the frontends. Frontends can decide which representation is used and how. Parameters ---------- objs : tuple of objects The Python objects to display. include : list or tuple, optional A list of format type strings (MIME types) to include in the format data dict. If this is set *only* the format types included in this list will be computed. exclude : list or tuple, optional A list of format type string (MIME types) to exclue in the format data dict. If this is set all format types will be computed, except for those included in this argument.
environment/lib/python2.7/site-packages/IPython/core/display.py
def display(*objs, **kwargs): """Display a Python object in all frontends. By default all representations will be computed and sent to the frontends. Frontends can decide which representation is used and how. Parameters ---------- objs : tuple of objects The Python objects to display. include : list or tuple, optional A list of format type strings (MIME types) to include in the format data dict. If this is set *only* the format types included in this list will be computed. exclude : list or tuple, optional A list of format type string (MIME types) to exclue in the format data dict. If this is set all format types will be computed, except for those included in this argument. """ include = kwargs.get('include') exclude = kwargs.get('exclude') from IPython.core.interactiveshell import InteractiveShell inst = InteractiveShell.instance() format = inst.display_formatter.format publish = inst.display_pub.publish for obj in objs: format_dict = format(obj, include=include, exclude=exclude) publish('IPython.core.display.display', format_dict)
def display(*objs, **kwargs): """Display a Python object in all frontends. By default all representations will be computed and sent to the frontends. Frontends can decide which representation is used and how. Parameters ---------- objs : tuple of objects The Python objects to display. include : list or tuple, optional A list of format type strings (MIME types) to include in the format data dict. If this is set *only* the format types included in this list will be computed. exclude : list or tuple, optional A list of format type string (MIME types) to exclue in the format data dict. If this is set all format types will be computed, except for those included in this argument. """ include = kwargs.get('include') exclude = kwargs.get('exclude') from IPython.core.interactiveshell import InteractiveShell inst = InteractiveShell.instance() format = inst.display_formatter.format publish = inst.display_pub.publish for obj in objs: format_dict = format(obj, include=include, exclude=exclude) publish('IPython.core.display.display', format_dict)
[ "Display", "a", "Python", "object", "in", "all", "frontends", "." ]
cloud9ers/gurumate
python
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/display.py#L35-L64
[ "def", "display", "(", "*", "objs", ",", "*", "*", "kwargs", ")", ":", "include", "=", "kwargs", ".", "get", "(", "'include'", ")", "exclude", "=", "kwargs", ".", "get", "(", "'exclude'", ")", "from", "IPython", ".", "core", ".", "interactiveshell", "import", "InteractiveShell", "inst", "=", "InteractiveShell", ".", "instance", "(", ")", "format", "=", "inst", ".", "display_formatter", ".", "format", "publish", "=", "inst", ".", "display_pub", ".", "publish", "for", "obj", "in", "objs", ":", "format_dict", "=", "format", "(", "obj", ",", "include", "=", "include", ",", "exclude", "=", "exclude", ")", "publish", "(", "'IPython.core.display.display'", ",", "format_dict", ")" ]
075dc74d1ee62a8c6b7a8bf2b271364f01629d1e
test
display_pretty
Display the pretty (default) representation of an object. Parameters ---------- objs : tuple of objects The Python objects to display, or if raw=True raw text data to display. raw : bool Are the data objects raw data or Python objects that need to be formatted before display? [default: False]
environment/lib/python2.7/site-packages/IPython/core/display.py
def display_pretty(*objs, **kwargs): """Display the pretty (default) representation of an object. Parameters ---------- objs : tuple of objects The Python objects to display, or if raw=True raw text data to display. raw : bool Are the data objects raw data or Python objects that need to be formatted before display? [default: False] """ raw = kwargs.pop('raw',False) if raw: for obj in objs: publish_pretty(obj) else: display(*objs, include=['text/plain'])
def display_pretty(*objs, **kwargs): """Display the pretty (default) representation of an object. Parameters ---------- objs : tuple of objects The Python objects to display, or if raw=True raw text data to display. raw : bool Are the data objects raw data or Python objects that need to be formatted before display? [default: False] """ raw = kwargs.pop('raw',False) if raw: for obj in objs: publish_pretty(obj) else: display(*objs, include=['text/plain'])
[ "Display", "the", "pretty", "(", "default", ")", "representation", "of", "an", "object", "." ]
cloud9ers/gurumate
python
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/display.py#L67-L84
[ "def", "display_pretty", "(", "*", "objs", ",", "*", "*", "kwargs", ")", ":", "raw", "=", "kwargs", ".", "pop", "(", "'raw'", ",", "False", ")", "if", "raw", ":", "for", "obj", "in", "objs", ":", "publish_pretty", "(", "obj", ")", "else", ":", "display", "(", "*", "objs", ",", "include", "=", "[", "'text/plain'", "]", ")" ]
075dc74d1ee62a8c6b7a8bf2b271364f01629d1e
test
display_html
Display the HTML representation of an object. Parameters ---------- objs : tuple of objects The Python objects to display, or if raw=True raw HTML data to display. raw : bool Are the data objects raw data or Python objects that need to be formatted before display? [default: False]
environment/lib/python2.7/site-packages/IPython/core/display.py
def display_html(*objs, **kwargs): """Display the HTML representation of an object. Parameters ---------- objs : tuple of objects The Python objects to display, or if raw=True raw HTML data to display. raw : bool Are the data objects raw data or Python objects that need to be formatted before display? [default: False] """ raw = kwargs.pop('raw',False) if raw: for obj in objs: publish_html(obj) else: display(*objs, include=['text/plain','text/html'])
def display_html(*objs, **kwargs): """Display the HTML representation of an object. Parameters ---------- objs : tuple of objects The Python objects to display, or if raw=True raw HTML data to display. raw : bool Are the data objects raw data or Python objects that need to be formatted before display? [default: False] """ raw = kwargs.pop('raw',False) if raw: for obj in objs: publish_html(obj) else: display(*objs, include=['text/plain','text/html'])
[ "Display", "the", "HTML", "representation", "of", "an", "object", "." ]
cloud9ers/gurumate
python
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/display.py#L87-L104
[ "def", "display_html", "(", "*", "objs", ",", "*", "*", "kwargs", ")", ":", "raw", "=", "kwargs", ".", "pop", "(", "'raw'", ",", "False", ")", "if", "raw", ":", "for", "obj", "in", "objs", ":", "publish_html", "(", "obj", ")", "else", ":", "display", "(", "*", "objs", ",", "include", "=", "[", "'text/plain'", ",", "'text/html'", "]", ")" ]
075dc74d1ee62a8c6b7a8bf2b271364f01629d1e
test
display_svg
Display the SVG representation of an object. Parameters ---------- objs : tuple of objects The Python objects to display, or if raw=True raw svg data to display. raw : bool Are the data objects raw data or Python objects that need to be formatted before display? [default: False]
environment/lib/python2.7/site-packages/IPython/core/display.py
def display_svg(*objs, **kwargs): """Display the SVG representation of an object. Parameters ---------- objs : tuple of objects The Python objects to display, or if raw=True raw svg data to display. raw : bool Are the data objects raw data or Python objects that need to be formatted before display? [default: False] """ raw = kwargs.pop('raw',False) if raw: for obj in objs: publish_svg(obj) else: display(*objs, include=['text/plain','image/svg+xml'])
def display_svg(*objs, **kwargs): """Display the SVG representation of an object. Parameters ---------- objs : tuple of objects The Python objects to display, or if raw=True raw svg data to display. raw : bool Are the data objects raw data or Python objects that need to be formatted before display? [default: False] """ raw = kwargs.pop('raw',False) if raw: for obj in objs: publish_svg(obj) else: display(*objs, include=['text/plain','image/svg+xml'])
[ "Display", "the", "SVG", "representation", "of", "an", "object", "." ]
cloud9ers/gurumate
python
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/display.py#L107-L124
[ "def", "display_svg", "(", "*", "objs", ",", "*", "*", "kwargs", ")", ":", "raw", "=", "kwargs", ".", "pop", "(", "'raw'", ",", "False", ")", "if", "raw", ":", "for", "obj", "in", "objs", ":", "publish_svg", "(", "obj", ")", "else", ":", "display", "(", "*", "objs", ",", "include", "=", "[", "'text/plain'", ",", "'image/svg+xml'", "]", ")" ]
075dc74d1ee62a8c6b7a8bf2b271364f01629d1e
test
display_png
Display the PNG representation of an object. Parameters ---------- objs : tuple of objects The Python objects to display, or if raw=True raw png data to display. raw : bool Are the data objects raw data or Python objects that need to be formatted before display? [default: False]
environment/lib/python2.7/site-packages/IPython/core/display.py
def display_png(*objs, **kwargs): """Display the PNG representation of an object. Parameters ---------- objs : tuple of objects The Python objects to display, or if raw=True raw png data to display. raw : bool Are the data objects raw data or Python objects that need to be formatted before display? [default: False] """ raw = kwargs.pop('raw',False) if raw: for obj in objs: publish_png(obj) else: display(*objs, include=['text/plain','image/png'])
def display_png(*objs, **kwargs): """Display the PNG representation of an object. Parameters ---------- objs : tuple of objects The Python objects to display, or if raw=True raw png data to display. raw : bool Are the data objects raw data or Python objects that need to be formatted before display? [default: False] """ raw = kwargs.pop('raw',False) if raw: for obj in objs: publish_png(obj) else: display(*objs, include=['text/plain','image/png'])
[ "Display", "the", "PNG", "representation", "of", "an", "object", "." ]
cloud9ers/gurumate
python
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/display.py#L127-L144
[ "def", "display_png", "(", "*", "objs", ",", "*", "*", "kwargs", ")", ":", "raw", "=", "kwargs", ".", "pop", "(", "'raw'", ",", "False", ")", "if", "raw", ":", "for", "obj", "in", "objs", ":", "publish_png", "(", "obj", ")", "else", ":", "display", "(", "*", "objs", ",", "include", "=", "[", "'text/plain'", ",", "'image/png'", "]", ")" ]
075dc74d1ee62a8c6b7a8bf2b271364f01629d1e
test
display_jpeg
Display the JPEG representation of an object. Parameters ---------- objs : tuple of objects The Python objects to display, or if raw=True raw JPEG data to display. raw : bool Are the data objects raw data or Python objects that need to be formatted before display? [default: False]
environment/lib/python2.7/site-packages/IPython/core/display.py
def display_jpeg(*objs, **kwargs): """Display the JPEG representation of an object. Parameters ---------- objs : tuple of objects The Python objects to display, or if raw=True raw JPEG data to display. raw : bool Are the data objects raw data or Python objects that need to be formatted before display? [default: False] """ raw = kwargs.pop('raw',False) if raw: for obj in objs: publish_jpeg(obj) else: display(*objs, include=['text/plain','image/jpeg'])
def display_jpeg(*objs, **kwargs): """Display the JPEG representation of an object. Parameters ---------- objs : tuple of objects The Python objects to display, or if raw=True raw JPEG data to display. raw : bool Are the data objects raw data or Python objects that need to be formatted before display? [default: False] """ raw = kwargs.pop('raw',False) if raw: for obj in objs: publish_jpeg(obj) else: display(*objs, include=['text/plain','image/jpeg'])
[ "Display", "the", "JPEG", "representation", "of", "an", "object", "." ]
cloud9ers/gurumate
python
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/display.py#L147-L164
[ "def", "display_jpeg", "(", "*", "objs", ",", "*", "*", "kwargs", ")", ":", "raw", "=", "kwargs", ".", "pop", "(", "'raw'", ",", "False", ")", "if", "raw", ":", "for", "obj", "in", "objs", ":", "publish_jpeg", "(", "obj", ")", "else", ":", "display", "(", "*", "objs", ",", "include", "=", "[", "'text/plain'", ",", "'image/jpeg'", "]", ")" ]
075dc74d1ee62a8c6b7a8bf2b271364f01629d1e
test
display_latex
Display the LaTeX representation of an object. Parameters ---------- objs : tuple of objects The Python objects to display, or if raw=True raw latex data to display. raw : bool Are the data objects raw data or Python objects that need to be formatted before display? [default: False]
environment/lib/python2.7/site-packages/IPython/core/display.py
def display_latex(*objs, **kwargs): """Display the LaTeX representation of an object. Parameters ---------- objs : tuple of objects The Python objects to display, or if raw=True raw latex data to display. raw : bool Are the data objects raw data or Python objects that need to be formatted before display? [default: False] """ raw = kwargs.pop('raw',False) if raw: for obj in objs: publish_latex(obj) else: display(*objs, include=['text/plain','text/latex'])
def display_latex(*objs, **kwargs): """Display the LaTeX representation of an object. Parameters ---------- objs : tuple of objects The Python objects to display, or if raw=True raw latex data to display. raw : bool Are the data objects raw data or Python objects that need to be formatted before display? [default: False] """ raw = kwargs.pop('raw',False) if raw: for obj in objs: publish_latex(obj) else: display(*objs, include=['text/plain','text/latex'])
[ "Display", "the", "LaTeX", "representation", "of", "an", "object", "." ]
cloud9ers/gurumate
python
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/display.py#L167-L184
[ "def", "display_latex", "(", "*", "objs", ",", "*", "*", "kwargs", ")", ":", "raw", "=", "kwargs", ".", "pop", "(", "'raw'", ",", "False", ")", "if", "raw", ":", "for", "obj", "in", "objs", ":", "publish_latex", "(", "obj", ")", "else", ":", "display", "(", "*", "objs", ",", "include", "=", "[", "'text/plain'", ",", "'text/latex'", "]", ")" ]
075dc74d1ee62a8c6b7a8bf2b271364f01629d1e
test
display_json
Display the JSON representation of an object. Note that not many frontends support displaying JSON. Parameters ---------- objs : tuple of objects The Python objects to display, or if raw=True raw json data to display. raw : bool Are the data objects raw data or Python objects that need to be formatted before display? [default: False]
environment/lib/python2.7/site-packages/IPython/core/display.py
def display_json(*objs, **kwargs): """Display the JSON representation of an object. Note that not many frontends support displaying JSON. Parameters ---------- objs : tuple of objects The Python objects to display, or if raw=True raw json data to display. raw : bool Are the data objects raw data or Python objects that need to be formatted before display? [default: False] """ raw = kwargs.pop('raw',False) if raw: for obj in objs: publish_json(obj) else: display(*objs, include=['text/plain','application/json'])
def display_json(*objs, **kwargs): """Display the JSON representation of an object. Note that not many frontends support displaying JSON. Parameters ---------- objs : tuple of objects The Python objects to display, or if raw=True raw json data to display. raw : bool Are the data objects raw data or Python objects that need to be formatted before display? [default: False] """ raw = kwargs.pop('raw',False) if raw: for obj in objs: publish_json(obj) else: display(*objs, include=['text/plain','application/json'])
[ "Display", "the", "JSON", "representation", "of", "an", "object", "." ]
cloud9ers/gurumate
python
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/display.py#L187-L206
[ "def", "display_json", "(", "*", "objs", ",", "*", "*", "kwargs", ")", ":", "raw", "=", "kwargs", ".", "pop", "(", "'raw'", ",", "False", ")", "if", "raw", ":", "for", "obj", "in", "objs", ":", "publish_json", "(", "obj", ")", "else", ":", "display", "(", "*", "objs", ",", "include", "=", "[", "'text/plain'", ",", "'application/json'", "]", ")" ]
075dc74d1ee62a8c6b7a8bf2b271364f01629d1e
test
display_javascript
Display the Javascript representation of an object. Parameters ---------- objs : tuple of objects The Python objects to display, or if raw=True raw javascript data to display. raw : bool Are the data objects raw data or Python objects that need to be formatted before display? [default: False]
environment/lib/python2.7/site-packages/IPython/core/display.py
def display_javascript(*objs, **kwargs): """Display the Javascript representation of an object. Parameters ---------- objs : tuple of objects The Python objects to display, or if raw=True raw javascript data to display. raw : bool Are the data objects raw data or Python objects that need to be formatted before display? [default: False] """ raw = kwargs.pop('raw',False) if raw: for obj in objs: publish_javascript(obj) else: display(*objs, include=['text/plain','application/javascript'])
def display_javascript(*objs, **kwargs): """Display the Javascript representation of an object. Parameters ---------- objs : tuple of objects The Python objects to display, or if raw=True raw javascript data to display. raw : bool Are the data objects raw data or Python objects that need to be formatted before display? [default: False] """ raw = kwargs.pop('raw',False) if raw: for obj in objs: publish_javascript(obj) else: display(*objs, include=['text/plain','application/javascript'])
[ "Display", "the", "Javascript", "representation", "of", "an", "object", "." ]
cloud9ers/gurumate
python
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/display.py#L209-L226
[ "def", "display_javascript", "(", "*", "objs", ",", "*", "*", "kwargs", ")", ":", "raw", "=", "kwargs", ".", "pop", "(", "'raw'", ",", "False", ")", "if", "raw", ":", "for", "obj", "in", "objs", ":", "publish_javascript", "(", "obj", ")", "else", ":", "display", "(", "*", "objs", ",", "include", "=", "[", "'text/plain'", ",", "'application/javascript'", "]", ")" ]
075dc74d1ee62a8c6b7a8bf2b271364f01629d1e
test
clear_output
Clear the output of the current cell receiving output. Optionally, each of stdout/stderr or other non-stream data (e.g. anything produced by display()) can be excluded from the clear event. By default, everything is cleared. Parameters ---------- stdout : bool [default: True] Whether to clear stdout. stderr : bool [default: True] Whether to clear stderr. other : bool [default: True] Whether to clear everything else that is not stdout/stderr (e.g. figures,images,HTML, any result of display()).
environment/lib/python2.7/site-packages/IPython/core/display.py
def clear_output(stdout=True, stderr=True, other=True): """Clear the output of the current cell receiving output. Optionally, each of stdout/stderr or other non-stream data (e.g. anything produced by display()) can be excluded from the clear event. By default, everything is cleared. Parameters ---------- stdout : bool [default: True] Whether to clear stdout. stderr : bool [default: True] Whether to clear stderr. other : bool [default: True] Whether to clear everything else that is not stdout/stderr (e.g. figures,images,HTML, any result of display()). """ from IPython.core.interactiveshell import InteractiveShell if InteractiveShell.initialized(): InteractiveShell.instance().display_pub.clear_output( stdout=stdout, stderr=stderr, other=other, ) else: from IPython.utils import io if stdout: print('\033[2K\r', file=io.stdout, end='') io.stdout.flush() if stderr: print('\033[2K\r', file=io.stderr, end='') io.stderr.flush()
def clear_output(stdout=True, stderr=True, other=True): """Clear the output of the current cell receiving output. Optionally, each of stdout/stderr or other non-stream data (e.g. anything produced by display()) can be excluded from the clear event. By default, everything is cleared. Parameters ---------- stdout : bool [default: True] Whether to clear stdout. stderr : bool [default: True] Whether to clear stderr. other : bool [default: True] Whether to clear everything else that is not stdout/stderr (e.g. figures,images,HTML, any result of display()). """ from IPython.core.interactiveshell import InteractiveShell if InteractiveShell.initialized(): InteractiveShell.instance().display_pub.clear_output( stdout=stdout, stderr=stderr, other=other, ) else: from IPython.utils import io if stdout: print('\033[2K\r', file=io.stdout, end='') io.stdout.flush() if stderr: print('\033[2K\r', file=io.stderr, end='') io.stderr.flush()
[ "Clear", "the", "output", "of", "the", "current", "cell", "receiving", "output", ".", "Optionally", "each", "of", "stdout", "/", "stderr", "or", "other", "non", "-", "stream", "data", "(", "e", ".", "g", ".", "anything", "produced", "by", "display", "()", ")", "can", "be", "excluded", "from", "the", "clear", "event", ".", "By", "default", "everything", "is", "cleared", ".", "Parameters", "----------", "stdout", ":", "bool", "[", "default", ":", "True", "]", "Whether", "to", "clear", "stdout", ".", "stderr", ":", "bool", "[", "default", ":", "True", "]", "Whether", "to", "clear", "stderr", ".", "other", ":", "bool", "[", "default", ":", "True", "]", "Whether", "to", "clear", "everything", "else", "that", "is", "not", "stdout", "/", "stderr", "(", "e", ".", "g", ".", "figures", "images", "HTML", "any", "result", "of", "display", "()", ")", "." ]
cloud9ers/gurumate
python
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/display.py#L503-L533
[ "def", "clear_output", "(", "stdout", "=", "True", ",", "stderr", "=", "True", ",", "other", "=", "True", ")", ":", "from", "IPython", ".", "core", ".", "interactiveshell", "import", "InteractiveShell", "if", "InteractiveShell", ".", "initialized", "(", ")", ":", "InteractiveShell", ".", "instance", "(", ")", ".", "display_pub", ".", "clear_output", "(", "stdout", "=", "stdout", ",", "stderr", "=", "stderr", ",", "other", "=", "other", ",", ")", "else", ":", "from", "IPython", ".", "utils", "import", "io", "if", "stdout", ":", "print", "(", "'\\033[2K\\r'", ",", "file", "=", "io", ".", "stdout", ",", "end", "=", "''", ")", "io", ".", "stdout", ".", "flush", "(", ")", "if", "stderr", ":", "print", "(", "'\\033[2K\\r'", ",", "file", "=", "io", ".", "stderr", ",", "end", "=", "''", ")", "io", ".", "stderr", ".", "flush", "(", ")" ]
075dc74d1ee62a8c6b7a8bf2b271364f01629d1e
test
DisplayObject.reload
Reload the raw data from file or URL.
environment/lib/python2.7/site-packages/IPython/core/display.py
def reload(self): """Reload the raw data from file or URL.""" if self.filename is not None: with open(self.filename, self._read_flags) as f: self.data = f.read() elif self.url is not None: try: import urllib2 response = urllib2.urlopen(self.url) self.data = response.read() # extract encoding from header, if there is one: encoding = None for sub in response.headers['content-type'].split(';'): sub = sub.strip() if sub.startswith('charset'): encoding = sub.split('=')[-1].strip() break # decode data, if an encoding was specified if encoding: self.data = self.data.decode(encoding, 'replace') except: self.data = None
def reload(self): """Reload the raw data from file or URL.""" if self.filename is not None: with open(self.filename, self._read_flags) as f: self.data = f.read() elif self.url is not None: try: import urllib2 response = urllib2.urlopen(self.url) self.data = response.read() # extract encoding from header, if there is one: encoding = None for sub in response.headers['content-type'].split(';'): sub = sub.strip() if sub.startswith('charset'): encoding = sub.split('=')[-1].strip() break # decode data, if an encoding was specified if encoding: self.data = self.data.decode(encoding, 'replace') except: self.data = None
[ "Reload", "the", "raw", "data", "from", "file", "or", "URL", "." ]
cloud9ers/gurumate
python
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/display.py#L267-L288
[ "def", "reload", "(", "self", ")", ":", "if", "self", ".", "filename", "is", "not", "None", ":", "with", "open", "(", "self", ".", "filename", ",", "self", ".", "_read_flags", ")", "as", "f", ":", "self", ".", "data", "=", "f", ".", "read", "(", ")", "elif", "self", ".", "url", "is", "not", "None", ":", "try", ":", "import", "urllib2", "response", "=", "urllib2", ".", "urlopen", "(", "self", ".", "url", ")", "self", ".", "data", "=", "response", ".", "read", "(", ")", "# extract encoding from header, if there is one:", "encoding", "=", "None", "for", "sub", "in", "response", ".", "headers", "[", "'content-type'", "]", ".", "split", "(", "';'", ")", ":", "sub", "=", "sub", ".", "strip", "(", ")", "if", "sub", ".", "startswith", "(", "'charset'", ")", ":", "encoding", "=", "sub", ".", "split", "(", "'='", ")", "[", "-", "1", "]", ".", "strip", "(", ")", "break", "# decode data, if an encoding was specified", "if", "encoding", ":", "self", ".", "data", "=", "self", ".", "data", ".", "decode", "(", "encoding", ",", "'replace'", ")", "except", ":", "self", ".", "data", "=", "None" ]
075dc74d1ee62a8c6b7a8bf2b271364f01629d1e
test
pip_version_check
Check for an update for pip. Limit the frequency of checks to once per week. State is stored either in the active virtualenv or in the user's USER_CACHE_DIR keyed off the prefix of the pip script path.
virtualEnvironment/lib/python2.7/site-packages/pip/utils/outdated.py
def pip_version_check(session): """Check for an update for pip. Limit the frequency of checks to once per week. State is stored either in the active virtualenv or in the user's USER_CACHE_DIR keyed off the prefix of the pip script path. """ import pip # imported here to prevent circular imports pypi_version = None try: state = load_selfcheck_statefile() current_time = datetime.datetime.utcnow() # Determine if we need to refresh the state if "last_check" in state.state and "pypi_version" in state.state: last_check = datetime.datetime.strptime( state.state["last_check"], SELFCHECK_DATE_FMT ) if total_seconds(current_time - last_check) < 7 * 24 * 60 * 60: pypi_version = state.state["pypi_version"] # Refresh the version if we need to or just see if we need to warn if pypi_version is None: resp = session.get( PyPI.pip_json_url, headers={"Accept": "application/json"}, ) resp.raise_for_status() pypi_version = resp.json()["info"]["version"] # save that we've performed a check state.save(pypi_version, current_time) pip_version = pkg_resources.parse_version(pip.__version__) # Determine if our pypi_version is older if pip_version < pkg_resources.parse_version(pypi_version): logger.warning( "You are using pip version %s, however version %s is " "available.\nYou should consider upgrading via the " "'pip install --upgrade pip' command." % (pip.__version__, pypi_version) ) except Exception: logger.debug( "There was an error checking the latest version of pip", exc_info=True, )
def pip_version_check(session): """Check for an update for pip. Limit the frequency of checks to once per week. State is stored either in the active virtualenv or in the user's USER_CACHE_DIR keyed off the prefix of the pip script path. """ import pip # imported here to prevent circular imports pypi_version = None try: state = load_selfcheck_statefile() current_time = datetime.datetime.utcnow() # Determine if we need to refresh the state if "last_check" in state.state and "pypi_version" in state.state: last_check = datetime.datetime.strptime( state.state["last_check"], SELFCHECK_DATE_FMT ) if total_seconds(current_time - last_check) < 7 * 24 * 60 * 60: pypi_version = state.state["pypi_version"] # Refresh the version if we need to or just see if we need to warn if pypi_version is None: resp = session.get( PyPI.pip_json_url, headers={"Accept": "application/json"}, ) resp.raise_for_status() pypi_version = resp.json()["info"]["version"] # save that we've performed a check state.save(pypi_version, current_time) pip_version = pkg_resources.parse_version(pip.__version__) # Determine if our pypi_version is older if pip_version < pkg_resources.parse_version(pypi_version): logger.warning( "You are using pip version %s, however version %s is " "available.\nYou should consider upgrading via the " "'pip install --upgrade pip' command." % (pip.__version__, pypi_version) ) except Exception: logger.debug( "There was an error checking the latest version of pip", exc_info=True, )
[ "Check", "for", "an", "update", "for", "pip", "." ]
tnkteja/myhelp
python
https://github.com/tnkteja/myhelp/blob/fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb/virtualEnvironment/lib/python2.7/site-packages/pip/utils/outdated.py#L99-L149
[ "def", "pip_version_check", "(", "session", ")", ":", "import", "pip", "# imported here to prevent circular imports", "pypi_version", "=", "None", "try", ":", "state", "=", "load_selfcheck_statefile", "(", ")", "current_time", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", "# Determine if we need to refresh the state", "if", "\"last_check\"", "in", "state", ".", "state", "and", "\"pypi_version\"", "in", "state", ".", "state", ":", "last_check", "=", "datetime", ".", "datetime", ".", "strptime", "(", "state", ".", "state", "[", "\"last_check\"", "]", ",", "SELFCHECK_DATE_FMT", ")", "if", "total_seconds", "(", "current_time", "-", "last_check", ")", "<", "7", "*", "24", "*", "60", "*", "60", ":", "pypi_version", "=", "state", ".", "state", "[", "\"pypi_version\"", "]", "# Refresh the version if we need to or just see if we need to warn", "if", "pypi_version", "is", "None", ":", "resp", "=", "session", ".", "get", "(", "PyPI", ".", "pip_json_url", ",", "headers", "=", "{", "\"Accept\"", ":", "\"application/json\"", "}", ",", ")", "resp", ".", "raise_for_status", "(", ")", "pypi_version", "=", "resp", ".", "json", "(", ")", "[", "\"info\"", "]", "[", "\"version\"", "]", "# save that we've performed a check", "state", ".", "save", "(", "pypi_version", ",", "current_time", ")", "pip_version", "=", "pkg_resources", ".", "parse_version", "(", "pip", ".", "__version__", ")", "# Determine if our pypi_version is older", "if", "pip_version", "<", "pkg_resources", ".", "parse_version", "(", "pypi_version", ")", ":", "logger", ".", "warning", "(", "\"You are using pip version %s, however version %s is \"", "\"available.\\nYou should consider upgrading via the \"", "\"'pip install --upgrade pip' command.\"", "%", "(", "pip", ".", "__version__", ",", "pypi_version", ")", ")", "except", "Exception", ":", "logger", ".", "debug", "(", "\"There was an error checking the latest version of pip\"", ",", "exc_info", "=", "True", ",", ")" ]
fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb
test
_find_cmd
Find the full path to a command using which.
environment/lib/python2.7/site-packages/IPython/utils/_process_posix.py
def _find_cmd(cmd): """Find the full path to a command using which.""" path = sp.Popen(['/usr/bin/env', 'which', cmd], stdout=sp.PIPE, stderr=sp.PIPE).communicate()[0] return py3compat.bytes_to_str(path)
def _find_cmd(cmd): """Find the full path to a command using which.""" path = sp.Popen(['/usr/bin/env', 'which', cmd], stdout=sp.PIPE, stderr=sp.PIPE).communicate()[0] return py3compat.bytes_to_str(path)
[ "Find", "the", "full", "path", "to", "a", "command", "using", "which", "." ]
cloud9ers/gurumate
python
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/utils/_process_posix.py#L35-L40
[ "def", "_find_cmd", "(", "cmd", ")", ":", "path", "=", "sp", ".", "Popen", "(", "[", "'/usr/bin/env'", ",", "'which'", ",", "cmd", "]", ",", "stdout", "=", "sp", ".", "PIPE", ",", "stderr", "=", "sp", ".", "PIPE", ")", ".", "communicate", "(", ")", "[", "0", "]", "return", "py3compat", ".", "bytes_to_str", "(", "path", ")" ]
075dc74d1ee62a8c6b7a8bf2b271364f01629d1e
test
ProcessHandler.getoutput_pexpect
Run a command and return its stdout/stderr as a string. Parameters ---------- cmd : str A command to be executed in the system shell. Returns ------- output : str A string containing the combination of stdout and stderr from the subprocess, in whatever order the subprocess originally wrote to its file descriptors (so the order of the information in this string is the correct order as would be seen if running the command in a terminal).
environment/lib/python2.7/site-packages/IPython/utils/_process_posix.py
def getoutput_pexpect(self, cmd): """Run a command and return its stdout/stderr as a string. Parameters ---------- cmd : str A command to be executed in the system shell. Returns ------- output : str A string containing the combination of stdout and stderr from the subprocess, in whatever order the subprocess originally wrote to its file descriptors (so the order of the information in this string is the correct order as would be seen if running the command in a terminal). """ try: return pexpect.run(self.sh, args=['-c', cmd]).replace('\r\n', '\n') except KeyboardInterrupt: print('^C', file=sys.stderr, end='')
def getoutput_pexpect(self, cmd): """Run a command and return its stdout/stderr as a string. Parameters ---------- cmd : str A command to be executed in the system shell. Returns ------- output : str A string containing the combination of stdout and stderr from the subprocess, in whatever order the subprocess originally wrote to its file descriptors (so the order of the information in this string is the correct order as would be seen if running the command in a terminal). """ try: return pexpect.run(self.sh, args=['-c', cmd]).replace('\r\n', '\n') except KeyboardInterrupt: print('^C', file=sys.stderr, end='')
[ "Run", "a", "command", "and", "return", "its", "stdout", "/", "stderr", "as", "a", "string", "." ]
cloud9ers/gurumate
python
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/utils/_process_posix.py#L98-L117
[ "def", "getoutput_pexpect", "(", "self", ",", "cmd", ")", ":", "try", ":", "return", "pexpect", ".", "run", "(", "self", ".", "sh", ",", "args", "=", "[", "'-c'", ",", "cmd", "]", ")", ".", "replace", "(", "'\\r\\n'", ",", "'\\n'", ")", "except", "KeyboardInterrupt", ":", "print", "(", "'^C'", ",", "file", "=", "sys", ".", "stderr", ",", "end", "=", "''", ")" ]
075dc74d1ee62a8c6b7a8bf2b271364f01629d1e
test
ProcessHandler.system
Execute a command in a subshell. Parameters ---------- cmd : str A command to be executed in the system shell. Returns ------- int : child's exitstatus
environment/lib/python2.7/site-packages/IPython/utils/_process_posix.py
def system(self, cmd): """Execute a command in a subshell. Parameters ---------- cmd : str A command to be executed in the system shell. Returns ------- int : child's exitstatus """ # Get likely encoding for the output. enc = DEFAULT_ENCODING # Patterns to match on the output, for pexpect. We read input and # allow either a short timeout or EOF patterns = [pexpect.TIMEOUT, pexpect.EOF] # the index of the EOF pattern in the list. # even though we know it's 1, this call means we don't have to worry if # we change the above list, and forget to change this value: EOF_index = patterns.index(pexpect.EOF) # The size of the output stored so far in the process output buffer. # Since pexpect only appends to this buffer, each time we print we # record how far we've printed, so that next time we only print *new* # content from the buffer. out_size = 0 try: # Since we're not really searching the buffer for text patterns, we # can set pexpect's search window to be tiny and it won't matter. # We only search for the 'patterns' timeout or EOF, which aren't in # the text itself. #child = pexpect.spawn(pcmd, searchwindowsize=1) if hasattr(pexpect, 'spawnb'): child = pexpect.spawnb(self.sh, args=['-c', cmd]) # Pexpect-U else: child = pexpect.spawn(self.sh, args=['-c', cmd]) # Vanilla Pexpect flush = sys.stdout.flush while True: # res is the index of the pattern that caused the match, so we # know whether we've finished (if we matched EOF) or not res_idx = child.expect_list(patterns, self.read_timeout) print(child.before[out_size:].decode(enc, 'replace'), end='') flush() if res_idx==EOF_index: break # Update the pointer to what we've already printed out_size = len(child.before) except KeyboardInterrupt: # We need to send ^C to the process. The ascii code for '^C' is 3 # (the character is known as ETX for 'End of Text', see # curses.ascii.ETX). child.sendline(chr(3)) # Read and print any more output the program might produce on its # way out. try: out_size = len(child.before) child.expect_list(patterns, self.terminate_timeout) print(child.before[out_size:].decode(enc, 'replace'), end='') sys.stdout.flush() except KeyboardInterrupt: # Impatient users tend to type it multiple times pass finally: # Ensure the subprocess really is terminated child.terminate(force=True) # add isalive check, to ensure exitstatus is set: child.isalive() return child.exitstatus
def system(self, cmd): """Execute a command in a subshell. Parameters ---------- cmd : str A command to be executed in the system shell. Returns ------- int : child's exitstatus """ # Get likely encoding for the output. enc = DEFAULT_ENCODING # Patterns to match on the output, for pexpect. We read input and # allow either a short timeout or EOF patterns = [pexpect.TIMEOUT, pexpect.EOF] # the index of the EOF pattern in the list. # even though we know it's 1, this call means we don't have to worry if # we change the above list, and forget to change this value: EOF_index = patterns.index(pexpect.EOF) # The size of the output stored so far in the process output buffer. # Since pexpect only appends to this buffer, each time we print we # record how far we've printed, so that next time we only print *new* # content from the buffer. out_size = 0 try: # Since we're not really searching the buffer for text patterns, we # can set pexpect's search window to be tiny and it won't matter. # We only search for the 'patterns' timeout or EOF, which aren't in # the text itself. #child = pexpect.spawn(pcmd, searchwindowsize=1) if hasattr(pexpect, 'spawnb'): child = pexpect.spawnb(self.sh, args=['-c', cmd]) # Pexpect-U else: child = pexpect.spawn(self.sh, args=['-c', cmd]) # Vanilla Pexpect flush = sys.stdout.flush while True: # res is the index of the pattern that caused the match, so we # know whether we've finished (if we matched EOF) or not res_idx = child.expect_list(patterns, self.read_timeout) print(child.before[out_size:].decode(enc, 'replace'), end='') flush() if res_idx==EOF_index: break # Update the pointer to what we've already printed out_size = len(child.before) except KeyboardInterrupt: # We need to send ^C to the process. The ascii code for '^C' is 3 # (the character is known as ETX for 'End of Text', see # curses.ascii.ETX). child.sendline(chr(3)) # Read and print any more output the program might produce on its # way out. try: out_size = len(child.before) child.expect_list(patterns, self.terminate_timeout) print(child.before[out_size:].decode(enc, 'replace'), end='') sys.stdout.flush() except KeyboardInterrupt: # Impatient users tend to type it multiple times pass finally: # Ensure the subprocess really is terminated child.terminate(force=True) # add isalive check, to ensure exitstatus is set: child.isalive() return child.exitstatus
[ "Execute", "a", "command", "in", "a", "subshell", "." ]
cloud9ers/gurumate
python
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/utils/_process_posix.py#L119-L187
[ "def", "system", "(", "self", ",", "cmd", ")", ":", "# Get likely encoding for the output.", "enc", "=", "DEFAULT_ENCODING", "# Patterns to match on the output, for pexpect. We read input and", "# allow either a short timeout or EOF", "patterns", "=", "[", "pexpect", ".", "TIMEOUT", ",", "pexpect", ".", "EOF", "]", "# the index of the EOF pattern in the list.", "# even though we know it's 1, this call means we don't have to worry if", "# we change the above list, and forget to change this value:", "EOF_index", "=", "patterns", ".", "index", "(", "pexpect", ".", "EOF", ")", "# The size of the output stored so far in the process output buffer.", "# Since pexpect only appends to this buffer, each time we print we", "# record how far we've printed, so that next time we only print *new*", "# content from the buffer.", "out_size", "=", "0", "try", ":", "# Since we're not really searching the buffer for text patterns, we", "# can set pexpect's search window to be tiny and it won't matter.", "# We only search for the 'patterns' timeout or EOF, which aren't in", "# the text itself.", "#child = pexpect.spawn(pcmd, searchwindowsize=1)", "if", "hasattr", "(", "pexpect", ",", "'spawnb'", ")", ":", "child", "=", "pexpect", ".", "spawnb", "(", "self", ".", "sh", ",", "args", "=", "[", "'-c'", ",", "cmd", "]", ")", "# Pexpect-U", "else", ":", "child", "=", "pexpect", ".", "spawn", "(", "self", ".", "sh", ",", "args", "=", "[", "'-c'", ",", "cmd", "]", ")", "# Vanilla Pexpect", "flush", "=", "sys", ".", "stdout", ".", "flush", "while", "True", ":", "# res is the index of the pattern that caused the match, so we", "# know whether we've finished (if we matched EOF) or not", "res_idx", "=", "child", ".", "expect_list", "(", "patterns", ",", "self", ".", "read_timeout", ")", "print", "(", "child", ".", "before", "[", "out_size", ":", "]", ".", "decode", "(", "enc", ",", "'replace'", ")", ",", "end", "=", "''", ")", "flush", "(", ")", "if", "res_idx", "==", "EOF_index", ":", "break", "# Update the pointer to what we've already printed", "out_size", "=", "len", "(", "child", ".", "before", ")", "except", "KeyboardInterrupt", ":", "# We need to send ^C to the process. The ascii code for '^C' is 3", "# (the character is known as ETX for 'End of Text', see", "# curses.ascii.ETX).", "child", ".", "sendline", "(", "chr", "(", "3", ")", ")", "# Read and print any more output the program might produce on its", "# way out.", "try", ":", "out_size", "=", "len", "(", "child", ".", "before", ")", "child", ".", "expect_list", "(", "patterns", ",", "self", ".", "terminate_timeout", ")", "print", "(", "child", ".", "before", "[", "out_size", ":", "]", ".", "decode", "(", "enc", ",", "'replace'", ")", ",", "end", "=", "''", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "except", "KeyboardInterrupt", ":", "# Impatient users tend to type it multiple times", "pass", "finally", ":", "# Ensure the subprocess really is terminated", "child", ".", "terminate", "(", "force", "=", "True", ")", "# add isalive check, to ensure exitstatus is set:", "child", ".", "isalive", "(", ")", "return", "child", ".", "exitstatus" ]
075dc74d1ee62a8c6b7a8bf2b271364f01629d1e
test
forward_read_events
Forward read events from an FD over a socket. This method wraps a file in a socket pair, so it can be polled for read events by select (specifically zmq.eventloop.ioloop)
environment/lib/python2.7/site-packages/IPython/parallel/apps/win32support.py
def forward_read_events(fd, context=None): """Forward read events from an FD over a socket. This method wraps a file in a socket pair, so it can be polled for read events by select (specifically zmq.eventloop.ioloop) """ if context is None: context = zmq.Context.instance() push = context.socket(zmq.PUSH) push.setsockopt(zmq.LINGER, -1) pull = context.socket(zmq.PULL) addr='inproc://%s'%uuid.uuid4() push.bind(addr) pull.connect(addr) forwarder = ForwarderThread(push, fd) forwarder.start() return pull
def forward_read_events(fd, context=None): """Forward read events from an FD over a socket. This method wraps a file in a socket pair, so it can be polled for read events by select (specifically zmq.eventloop.ioloop) """ if context is None: context = zmq.Context.instance() push = context.socket(zmq.PUSH) push.setsockopt(zmq.LINGER, -1) pull = context.socket(zmq.PULL) addr='inproc://%s'%uuid.uuid4() push.bind(addr) pull.connect(addr) forwarder = ForwarderThread(push, fd) forwarder.start() return pull
[ "Forward", "read", "events", "from", "an", "FD", "over", "a", "socket", "." ]
cloud9ers/gurumate
python
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/parallel/apps/win32support.py#L53-L69
[ "def", "forward_read_events", "(", "fd", ",", "context", "=", "None", ")", ":", "if", "context", "is", "None", ":", "context", "=", "zmq", ".", "Context", ".", "instance", "(", ")", "push", "=", "context", ".", "socket", "(", "zmq", ".", "PUSH", ")", "push", ".", "setsockopt", "(", "zmq", ".", "LINGER", ",", "-", "1", ")", "pull", "=", "context", ".", "socket", "(", "zmq", ".", "PULL", ")", "addr", "=", "'inproc://%s'", "%", "uuid", ".", "uuid4", "(", ")", "push", ".", "bind", "(", "addr", ")", "pull", ".", "connect", "(", "addr", ")", "forwarder", "=", "ForwarderThread", "(", "push", ",", "fd", ")", "forwarder", ".", "start", "(", ")", "return", "pull" ]
075dc74d1ee62a8c6b7a8bf2b271364f01629d1e
test
ForwarderThread.run
Loop through lines in self.fd, and send them over self.sock.
environment/lib/python2.7/site-packages/IPython/parallel/apps/win32support.py
def run(self): """Loop through lines in self.fd, and send them over self.sock.""" line = self.fd.readline() # allow for files opened in unicode mode if isinstance(line, unicode): send = self.sock.send_unicode else: send = self.sock.send while line: send(line) line = self.fd.readline() # line == '' means EOF self.fd.close() self.sock.close()
def run(self): """Loop through lines in self.fd, and send them over self.sock.""" line = self.fd.readline() # allow for files opened in unicode mode if isinstance(line, unicode): send = self.sock.send_unicode else: send = self.sock.send while line: send(line) line = self.fd.readline() # line == '' means EOF self.fd.close() self.sock.close()
[ "Loop", "through", "lines", "in", "self", ".", "fd", "and", "send", "them", "over", "self", ".", "sock", "." ]
cloud9ers/gurumate
python
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/parallel/apps/win32support.py#L38-L51
[ "def", "run", "(", "self", ")", ":", "line", "=", "self", ".", "fd", ".", "readline", "(", ")", "# allow for files opened in unicode mode", "if", "isinstance", "(", "line", ",", "unicode", ")", ":", "send", "=", "self", ".", "sock", ".", "send_unicode", "else", ":", "send", "=", "self", ".", "sock", ".", "send", "while", "line", ":", "send", "(", "line", ")", "line", "=", "self", ".", "fd", ".", "readline", "(", ")", "# line == '' means EOF", "self", ".", "fd", ".", "close", "(", ")", "self", ".", "sock", ".", "close", "(", ")" ]
075dc74d1ee62a8c6b7a8bf2b271364f01629d1e
test
find_launcher_class
Return a launcher for a given clsname and kind. Parameters ========== clsname : str The full name of the launcher class, either with or without the module path, or an abbreviation (MPI, SSH, SGE, PBS, LSF, WindowsHPC). kind : str Either 'EngineSet' or 'Controller'.
environment/lib/python2.7/site-packages/IPython/parallel/apps/ipclusterapp.py
def find_launcher_class(clsname, kind): """Return a launcher for a given clsname and kind. Parameters ========== clsname : str The full name of the launcher class, either with or without the module path, or an abbreviation (MPI, SSH, SGE, PBS, LSF, WindowsHPC). kind : str Either 'EngineSet' or 'Controller'. """ if '.' not in clsname: # not a module, presume it's the raw name in apps.launcher if kind and kind not in clsname: # doesn't match necessary full class name, assume it's # just 'PBS' or 'MPI' prefix: clsname = clsname + kind + 'Launcher' clsname = 'IPython.parallel.apps.launcher.'+clsname klass = import_item(clsname) return klass
def find_launcher_class(clsname, kind): """Return a launcher for a given clsname and kind. Parameters ========== clsname : str The full name of the launcher class, either with or without the module path, or an abbreviation (MPI, SSH, SGE, PBS, LSF, WindowsHPC). kind : str Either 'EngineSet' or 'Controller'. """ if '.' not in clsname: # not a module, presume it's the raw name in apps.launcher if kind and kind not in clsname: # doesn't match necessary full class name, assume it's # just 'PBS' or 'MPI' prefix: clsname = clsname + kind + 'Launcher' clsname = 'IPython.parallel.apps.launcher.'+clsname klass = import_item(clsname) return klass
[ "Return", "a", "launcher", "for", "a", "given", "clsname", "and", "kind", "." ]
cloud9ers/gurumate
python
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/parallel/apps/ipclusterapp.py#L112-L132
[ "def", "find_launcher_class", "(", "clsname", ",", "kind", ")", ":", "if", "'.'", "not", "in", "clsname", ":", "# not a module, presume it's the raw name in apps.launcher", "if", "kind", "and", "kind", "not", "in", "clsname", ":", "# doesn't match necessary full class name, assume it's", "# just 'PBS' or 'MPI' prefix:", "clsname", "=", "clsname", "+", "kind", "+", "'Launcher'", "clsname", "=", "'IPython.parallel.apps.launcher.'", "+", "clsname", "klass", "=", "import_item", "(", "clsname", ")", "return", "klass" ]
075dc74d1ee62a8c6b7a8bf2b271364f01629d1e
test
IPClusterStop.start
Start the app for the stop subcommand.
environment/lib/python2.7/site-packages/IPython/parallel/apps/ipclusterapp.py
def start(self): """Start the app for the stop subcommand.""" try: pid = self.get_pid_from_file() except PIDFileError: self.log.critical( 'Could not read pid file, cluster is probably not running.' ) # Here I exit with a unusual exit status that other processes # can watch for to learn how I existed. self.remove_pid_file() self.exit(ALREADY_STOPPED) if not self.check_pid(pid): self.log.critical( 'Cluster [pid=%r] is not running.' % pid ) self.remove_pid_file() # Here I exit with a unusual exit status that other processes # can watch for to learn how I existed. self.exit(ALREADY_STOPPED) elif os.name=='posix': sig = self.signal self.log.info( "Stopping cluster [pid=%r] with [signal=%r]" % (pid, sig) ) try: os.kill(pid, sig) except OSError: self.log.error("Stopping cluster failed, assuming already dead.", exc_info=True) self.remove_pid_file() elif os.name=='nt': try: # kill the whole tree p = check_call(['taskkill', '-pid', str(pid), '-t', '-f'], stdout=PIPE,stderr=PIPE) except (CalledProcessError, OSError): self.log.error("Stopping cluster failed, assuming already dead.", exc_info=True) self.remove_pid_file()
def start(self): """Start the app for the stop subcommand.""" try: pid = self.get_pid_from_file() except PIDFileError: self.log.critical( 'Could not read pid file, cluster is probably not running.' ) # Here I exit with a unusual exit status that other processes # can watch for to learn how I existed. self.remove_pid_file() self.exit(ALREADY_STOPPED) if not self.check_pid(pid): self.log.critical( 'Cluster [pid=%r] is not running.' % pid ) self.remove_pid_file() # Here I exit with a unusual exit status that other processes # can watch for to learn how I existed. self.exit(ALREADY_STOPPED) elif os.name=='posix': sig = self.signal self.log.info( "Stopping cluster [pid=%r] with [signal=%r]" % (pid, sig) ) try: os.kill(pid, sig) except OSError: self.log.error("Stopping cluster failed, assuming already dead.", exc_info=True) self.remove_pid_file() elif os.name=='nt': try: # kill the whole tree p = check_call(['taskkill', '-pid', str(pid), '-t', '-f'], stdout=PIPE,stderr=PIPE) except (CalledProcessError, OSError): self.log.error("Stopping cluster failed, assuming already dead.", exc_info=True) self.remove_pid_file()
[ "Start", "the", "app", "for", "the", "stop", "subcommand", "." ]
cloud9ers/gurumate
python
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/parallel/apps/ipclusterapp.py#L186-L226
[ "def", "start", "(", "self", ")", ":", "try", ":", "pid", "=", "self", ".", "get_pid_from_file", "(", ")", "except", "PIDFileError", ":", "self", ".", "log", ".", "critical", "(", "'Could not read pid file, cluster is probably not running.'", ")", "# Here I exit with a unusual exit status that other processes", "# can watch for to learn how I existed.", "self", ".", "remove_pid_file", "(", ")", "self", ".", "exit", "(", "ALREADY_STOPPED", ")", "if", "not", "self", ".", "check_pid", "(", "pid", ")", ":", "self", ".", "log", ".", "critical", "(", "'Cluster [pid=%r] is not running.'", "%", "pid", ")", "self", ".", "remove_pid_file", "(", ")", "# Here I exit with a unusual exit status that other processes", "# can watch for to learn how I existed.", "self", ".", "exit", "(", "ALREADY_STOPPED", ")", "elif", "os", ".", "name", "==", "'posix'", ":", "sig", "=", "self", ".", "signal", "self", ".", "log", ".", "info", "(", "\"Stopping cluster [pid=%r] with [signal=%r]\"", "%", "(", "pid", ",", "sig", ")", ")", "try", ":", "os", ".", "kill", "(", "pid", ",", "sig", ")", "except", "OSError", ":", "self", ".", "log", ".", "error", "(", "\"Stopping cluster failed, assuming already dead.\"", ",", "exc_info", "=", "True", ")", "self", ".", "remove_pid_file", "(", ")", "elif", "os", ".", "name", "==", "'nt'", ":", "try", ":", "# kill the whole tree", "p", "=", "check_call", "(", "[", "'taskkill'", ",", "'-pid'", ",", "str", "(", "pid", ")", ",", "'-t'", ",", "'-f'", "]", ",", "stdout", "=", "PIPE", ",", "stderr", "=", "PIPE", ")", "except", "(", "CalledProcessError", ",", "OSError", ")", ":", "self", ".", "log", ".", "error", "(", "\"Stopping cluster failed, assuming already dead.\"", ",", "exc_info", "=", "True", ")", "self", ".", "remove_pid_file", "(", ")" ]
075dc74d1ee62a8c6b7a8bf2b271364f01629d1e
test
IPClusterEngines.build_launcher
import and instantiate a Launcher based on importstring
environment/lib/python2.7/site-packages/IPython/parallel/apps/ipclusterapp.py
def build_launcher(self, clsname, kind=None): """import and instantiate a Launcher based on importstring""" try: klass = find_launcher_class(clsname, kind) except (ImportError, KeyError): self.log.fatal("Could not import launcher class: %r"%clsname) self.exit(1) launcher = klass( work_dir=u'.', config=self.config, log=self.log, profile_dir=self.profile_dir.location, cluster_id=self.cluster_id, ) return launcher
def build_launcher(self, clsname, kind=None): """import and instantiate a Launcher based on importstring""" try: klass = find_launcher_class(clsname, kind) except (ImportError, KeyError): self.log.fatal("Could not import launcher class: %r"%clsname) self.exit(1) launcher = klass( work_dir=u'.', config=self.config, log=self.log, profile_dir=self.profile_dir.location, cluster_id=self.cluster_id, ) return launcher
[ "import", "and", "instantiate", "a", "Launcher", "based", "on", "importstring" ]
cloud9ers/gurumate
python
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/parallel/apps/ipclusterapp.py#L331-L343
[ "def", "build_launcher", "(", "self", ",", "clsname", ",", "kind", "=", "None", ")", ":", "try", ":", "klass", "=", "find_launcher_class", "(", "clsname", ",", "kind", ")", "except", "(", "ImportError", ",", "KeyError", ")", ":", "self", ".", "log", ".", "fatal", "(", "\"Could not import launcher class: %r\"", "%", "clsname", ")", "self", ".", "exit", "(", "1", ")", "launcher", "=", "klass", "(", "work_dir", "=", "u'.'", ",", "config", "=", "self", ".", "config", ",", "log", "=", "self", ".", "log", ",", "profile_dir", "=", "self", ".", "profile_dir", ".", "location", ",", "cluster_id", "=", "self", ".", "cluster_id", ",", ")", "return", "launcher" ]
075dc74d1ee62a8c6b7a8bf2b271364f01629d1e
test
IPClusterEngines.start
Start the app for the engines subcommand.
environment/lib/python2.7/site-packages/IPython/parallel/apps/ipclusterapp.py
def start(self): """Start the app for the engines subcommand.""" self.log.info("IPython cluster: started") # First see if the cluster is already running # Now log and daemonize self.log.info( 'Starting engines with [daemon=%r]' % self.daemonize ) # TODO: Get daemonize working on Windows or as a Windows Server. if self.daemonize: if os.name=='posix': daemonize() dc = ioloop.DelayedCallback(self.start_engines, 0, self.loop) dc.start() # Now write the new pid file AFTER our new forked pid is active. # self.write_pid_file() try: self.loop.start() except KeyboardInterrupt: pass except zmq.ZMQError as e: if e.errno == errno.EINTR: pass else: raise
def start(self): """Start the app for the engines subcommand.""" self.log.info("IPython cluster: started") # First see if the cluster is already running # Now log and daemonize self.log.info( 'Starting engines with [daemon=%r]' % self.daemonize ) # TODO: Get daemonize working on Windows or as a Windows Server. if self.daemonize: if os.name=='posix': daemonize() dc = ioloop.DelayedCallback(self.start_engines, 0, self.loop) dc.start() # Now write the new pid file AFTER our new forked pid is active. # self.write_pid_file() try: self.loop.start() except KeyboardInterrupt: pass except zmq.ZMQError as e: if e.errno == errno.EINTR: pass else: raise
[ "Start", "the", "app", "for", "the", "engines", "subcommand", "." ]
cloud9ers/gurumate
python
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/parallel/apps/ipclusterapp.py#L411-L437
[ "def", "start", "(", "self", ")", ":", "self", ".", "log", ".", "info", "(", "\"IPython cluster: started\"", ")", "# First see if the cluster is already running", "# Now log and daemonize", "self", ".", "log", ".", "info", "(", "'Starting engines with [daemon=%r]'", "%", "self", ".", "daemonize", ")", "# TODO: Get daemonize working on Windows or as a Windows Server.", "if", "self", ".", "daemonize", ":", "if", "os", ".", "name", "==", "'posix'", ":", "daemonize", "(", ")", "dc", "=", "ioloop", ".", "DelayedCallback", "(", "self", ".", "start_engines", ",", "0", ",", "self", ".", "loop", ")", "dc", ".", "start", "(", ")", "# Now write the new pid file AFTER our new forked pid is active.", "# self.write_pid_file()", "try", ":", "self", ".", "loop", ".", "start", "(", ")", "except", "KeyboardInterrupt", ":", "pass", "except", "zmq", ".", "ZMQError", "as", "e", ":", "if", "e", ".", "errno", "==", "errno", ".", "EINTR", ":", "pass", "else", ":", "raise" ]
075dc74d1ee62a8c6b7a8bf2b271364f01629d1e
test
IPClusterStart.start
Start the app for the start subcommand.
environment/lib/python2.7/site-packages/IPython/parallel/apps/ipclusterapp.py
def start(self): """Start the app for the start subcommand.""" # First see if the cluster is already running try: pid = self.get_pid_from_file() except PIDFileError: pass else: if self.check_pid(pid): self.log.critical( 'Cluster is already running with [pid=%s]. ' 'use "ipcluster stop" to stop the cluster.' % pid ) # Here I exit with a unusual exit status that other processes # can watch for to learn how I existed. self.exit(ALREADY_STARTED) else: self.remove_pid_file() # Now log and daemonize self.log.info( 'Starting ipcluster with [daemon=%r]' % self.daemonize ) # TODO: Get daemonize working on Windows or as a Windows Server. if self.daemonize: if os.name=='posix': daemonize() dc = ioloop.DelayedCallback(self.start_controller, 0, self.loop) dc.start() dc = ioloop.DelayedCallback(self.start_engines, 1000*self.delay, self.loop) dc.start() # Now write the new pid file AFTER our new forked pid is active. self.write_pid_file() try: self.loop.start() except KeyboardInterrupt: pass except zmq.ZMQError as e: if e.errno == errno.EINTR: pass else: raise finally: self.remove_pid_file()
def start(self): """Start the app for the start subcommand.""" # First see if the cluster is already running try: pid = self.get_pid_from_file() except PIDFileError: pass else: if self.check_pid(pid): self.log.critical( 'Cluster is already running with [pid=%s]. ' 'use "ipcluster stop" to stop the cluster.' % pid ) # Here I exit with a unusual exit status that other processes # can watch for to learn how I existed. self.exit(ALREADY_STARTED) else: self.remove_pid_file() # Now log and daemonize self.log.info( 'Starting ipcluster with [daemon=%r]' % self.daemonize ) # TODO: Get daemonize working on Windows or as a Windows Server. if self.daemonize: if os.name=='posix': daemonize() dc = ioloop.DelayedCallback(self.start_controller, 0, self.loop) dc.start() dc = ioloop.DelayedCallback(self.start_engines, 1000*self.delay, self.loop) dc.start() # Now write the new pid file AFTER our new forked pid is active. self.write_pid_file() try: self.loop.start() except KeyboardInterrupt: pass except zmq.ZMQError as e: if e.errno == errno.EINTR: pass else: raise finally: self.remove_pid_file()
[ "Start", "the", "app", "for", "the", "start", "subcommand", "." ]
cloud9ers/gurumate
python
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/parallel/apps/ipclusterapp.py#L535-L580
[ "def", "start", "(", "self", ")", ":", "# First see if the cluster is already running", "try", ":", "pid", "=", "self", ".", "get_pid_from_file", "(", ")", "except", "PIDFileError", ":", "pass", "else", ":", "if", "self", ".", "check_pid", "(", "pid", ")", ":", "self", ".", "log", ".", "critical", "(", "'Cluster is already running with [pid=%s]. '", "'use \"ipcluster stop\" to stop the cluster.'", "%", "pid", ")", "# Here I exit with a unusual exit status that other processes", "# can watch for to learn how I existed.", "self", ".", "exit", "(", "ALREADY_STARTED", ")", "else", ":", "self", ".", "remove_pid_file", "(", ")", "# Now log and daemonize", "self", ".", "log", ".", "info", "(", "'Starting ipcluster with [daemon=%r]'", "%", "self", ".", "daemonize", ")", "# TODO: Get daemonize working on Windows or as a Windows Server.", "if", "self", ".", "daemonize", ":", "if", "os", ".", "name", "==", "'posix'", ":", "daemonize", "(", ")", "dc", "=", "ioloop", ".", "DelayedCallback", "(", "self", ".", "start_controller", ",", "0", ",", "self", ".", "loop", ")", "dc", ".", "start", "(", ")", "dc", "=", "ioloop", ".", "DelayedCallback", "(", "self", ".", "start_engines", ",", "1000", "*", "self", ".", "delay", ",", "self", ".", "loop", ")", "dc", ".", "start", "(", ")", "# Now write the new pid file AFTER our new forked pid is active.", "self", ".", "write_pid_file", "(", ")", "try", ":", "self", ".", "loop", ".", "start", "(", ")", "except", "KeyboardInterrupt", ":", "pass", "except", "zmq", ".", "ZMQError", "as", "e", ":", "if", "e", ".", "errno", "==", "errno", ".", "EINTR", ":", "pass", "else", ":", "raise", "finally", ":", "self", ".", "remove_pid_file", "(", ")" ]
075dc74d1ee62a8c6b7a8bf2b271364f01629d1e
test
get_app_wx
Create a new wx app or return an exiting one.
environment/lib/python2.7/site-packages/IPython/lib/guisupport.py
def get_app_wx(*args, **kwargs): """Create a new wx app or return an exiting one.""" import wx app = wx.GetApp() if app is None: if not kwargs.has_key('redirect'): kwargs['redirect'] = False app = wx.PySimpleApp(*args, **kwargs) return app
def get_app_wx(*args, **kwargs): """Create a new wx app or return an exiting one.""" import wx app = wx.GetApp() if app is None: if not kwargs.has_key('redirect'): kwargs['redirect'] = False app = wx.PySimpleApp(*args, **kwargs) return app
[ "Create", "a", "new", "wx", "app", "or", "return", "an", "exiting", "one", "." ]
cloud9ers/gurumate
python
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/lib/guisupport.py#L75-L83
[ "def", "get_app_wx", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "import", "wx", "app", "=", "wx", ".", "GetApp", "(", ")", "if", "app", "is", "None", ":", "if", "not", "kwargs", ".", "has_key", "(", "'redirect'", ")", ":", "kwargs", "[", "'redirect'", "]", "=", "False", "app", "=", "wx", ".", "PySimpleApp", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "app" ]
075dc74d1ee62a8c6b7a8bf2b271364f01629d1e
test
is_event_loop_running_wx
Is the wx event loop running.
environment/lib/python2.7/site-packages/IPython/lib/guisupport.py
def is_event_loop_running_wx(app=None): """Is the wx event loop running.""" if app is None: app = get_app_wx() if hasattr(app, '_in_event_loop'): return app._in_event_loop else: return app.IsMainLoopRunning()
def is_event_loop_running_wx(app=None): """Is the wx event loop running.""" if app is None: app = get_app_wx() if hasattr(app, '_in_event_loop'): return app._in_event_loop else: return app.IsMainLoopRunning()
[ "Is", "the", "wx", "event", "loop", "running", "." ]
cloud9ers/gurumate
python
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/lib/guisupport.py#L85-L92
[ "def", "is_event_loop_running_wx", "(", "app", "=", "None", ")", ":", "if", "app", "is", "None", ":", "app", "=", "get_app_wx", "(", ")", "if", "hasattr", "(", "app", ",", "'_in_event_loop'", ")", ":", "return", "app", ".", "_in_event_loop", "else", ":", "return", "app", ".", "IsMainLoopRunning", "(", ")" ]
075dc74d1ee62a8c6b7a8bf2b271364f01629d1e
test
start_event_loop_wx
Start the wx event loop in a consistent manner.
environment/lib/python2.7/site-packages/IPython/lib/guisupport.py
def start_event_loop_wx(app=None): """Start the wx event loop in a consistent manner.""" if app is None: app = get_app_wx() if not is_event_loop_running_wx(app): app._in_event_loop = True app.MainLoop() app._in_event_loop = False else: app._in_event_loop = True
def start_event_loop_wx(app=None): """Start the wx event loop in a consistent manner.""" if app is None: app = get_app_wx() if not is_event_loop_running_wx(app): app._in_event_loop = True app.MainLoop() app._in_event_loop = False else: app._in_event_loop = True
[ "Start", "the", "wx", "event", "loop", "in", "a", "consistent", "manner", "." ]
cloud9ers/gurumate
python
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/lib/guisupport.py#L94-L103
[ "def", "start_event_loop_wx", "(", "app", "=", "None", ")", ":", "if", "app", "is", "None", ":", "app", "=", "get_app_wx", "(", ")", "if", "not", "is_event_loop_running_wx", "(", "app", ")", ":", "app", ".", "_in_event_loop", "=", "True", "app", ".", "MainLoop", "(", ")", "app", ".", "_in_event_loop", "=", "False", "else", ":", "app", ".", "_in_event_loop", "=", "True" ]
075dc74d1ee62a8c6b7a8bf2b271364f01629d1e
test
get_app_qt4
Create a new qt4 app or return an existing one.
environment/lib/python2.7/site-packages/IPython/lib/guisupport.py
def get_app_qt4(*args, **kwargs): """Create a new qt4 app or return an existing one.""" from IPython.external.qt_for_kernel import QtGui app = QtGui.QApplication.instance() if app is None: if not args: args = ([''],) app = QtGui.QApplication(*args, **kwargs) return app
def get_app_qt4(*args, **kwargs): """Create a new qt4 app or return an existing one.""" from IPython.external.qt_for_kernel import QtGui app = QtGui.QApplication.instance() if app is None: if not args: args = ([''],) app = QtGui.QApplication(*args, **kwargs) return app
[ "Create", "a", "new", "qt4", "app", "or", "return", "an", "existing", "one", "." ]
cloud9ers/gurumate
python
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/lib/guisupport.py#L109-L117
[ "def", "get_app_qt4", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "from", "IPython", ".", "external", ".", "qt_for_kernel", "import", "QtGui", "app", "=", "QtGui", ".", "QApplication", ".", "instance", "(", ")", "if", "app", "is", "None", ":", "if", "not", "args", ":", "args", "=", "(", "[", "''", "]", ",", ")", "app", "=", "QtGui", ".", "QApplication", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "app" ]
075dc74d1ee62a8c6b7a8bf2b271364f01629d1e
test
is_event_loop_running_qt4
Is the qt4 event loop running.
environment/lib/python2.7/site-packages/IPython/lib/guisupport.py
def is_event_loop_running_qt4(app=None): """Is the qt4 event loop running.""" if app is None: app = get_app_qt4(['']) if hasattr(app, '_in_event_loop'): return app._in_event_loop else: # Does qt4 provide a other way to detect this? return False
def is_event_loop_running_qt4(app=None): """Is the qt4 event loop running.""" if app is None: app = get_app_qt4(['']) if hasattr(app, '_in_event_loop'): return app._in_event_loop else: # Does qt4 provide a other way to detect this? return False
[ "Is", "the", "qt4", "event", "loop", "running", "." ]
cloud9ers/gurumate
python
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/lib/guisupport.py#L119-L127
[ "def", "is_event_loop_running_qt4", "(", "app", "=", "None", ")", ":", "if", "app", "is", "None", ":", "app", "=", "get_app_qt4", "(", "[", "''", "]", ")", "if", "hasattr", "(", "app", ",", "'_in_event_loop'", ")", ":", "return", "app", ".", "_in_event_loop", "else", ":", "# Does qt4 provide a other way to detect this?", "return", "False" ]
075dc74d1ee62a8c6b7a8bf2b271364f01629d1e
test
start_event_loop_qt4
Start the qt4 event loop in a consistent manner.
environment/lib/python2.7/site-packages/IPython/lib/guisupport.py
def start_event_loop_qt4(app=None): """Start the qt4 event loop in a consistent manner.""" if app is None: app = get_app_qt4(['']) if not is_event_loop_running_qt4(app): app._in_event_loop = True app.exec_() app._in_event_loop = False else: app._in_event_loop = True
def start_event_loop_qt4(app=None): """Start the qt4 event loop in a consistent manner.""" if app is None: app = get_app_qt4(['']) if not is_event_loop_running_qt4(app): app._in_event_loop = True app.exec_() app._in_event_loop = False else: app._in_event_loop = True
[ "Start", "the", "qt4", "event", "loop", "in", "a", "consistent", "manner", "." ]
cloud9ers/gurumate
python
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/lib/guisupport.py#L129-L138
[ "def", "start_event_loop_qt4", "(", "app", "=", "None", ")", ":", "if", "app", "is", "None", ":", "app", "=", "get_app_qt4", "(", "[", "''", "]", ")", "if", "not", "is_event_loop_running_qt4", "(", "app", ")", ":", "app", ".", "_in_event_loop", "=", "True", "app", ".", "exec_", "(", ")", "app", ".", "_in_event_loop", "=", "False", "else", ":", "app", ".", "_in_event_loop", "=", "True" ]
075dc74d1ee62a8c6b7a8bf2b271364f01629d1e
test
build_py.check_package
Check namespace packages' __init__ for declare_namespace
environment/lib/python2.7/site-packages/distribute-0.6.31-py2.7.egg/setuptools/command/build_py.py
def check_package(self, package, package_dir): """Check namespace packages' __init__ for declare_namespace""" try: return self.packages_checked[package] except KeyError: pass init_py = _build_py.check_package(self, package, package_dir) self.packages_checked[package] = init_py if not init_py or not self.distribution.namespace_packages: return init_py for pkg in self.distribution.namespace_packages: if pkg==package or pkg.startswith(package+'.'): break else: return init_py f = open(init_py,'rbU') if 'declare_namespace'.encode() not in f.read(): from distutils import log log.warn( "WARNING: %s is a namespace package, but its __init__.py does\n" "not declare_namespace(); setuptools 0.7 will REQUIRE this!\n" '(See the setuptools manual under "Namespace Packages" for ' "details.)\n", package ) f.close() return init_py
def check_package(self, package, package_dir): """Check namespace packages' __init__ for declare_namespace""" try: return self.packages_checked[package] except KeyError: pass init_py = _build_py.check_package(self, package, package_dir) self.packages_checked[package] = init_py if not init_py or not self.distribution.namespace_packages: return init_py for pkg in self.distribution.namespace_packages: if pkg==package or pkg.startswith(package+'.'): break else: return init_py f = open(init_py,'rbU') if 'declare_namespace'.encode() not in f.read(): from distutils import log log.warn( "WARNING: %s is a namespace package, but its __init__.py does\n" "not declare_namespace(); setuptools 0.7 will REQUIRE this!\n" '(See the setuptools manual under "Namespace Packages" for ' "details.)\n", package ) f.close() return init_py
[ "Check", "namespace", "packages", "__init__", "for", "declare_namespace" ]
cloud9ers/gurumate
python
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/distribute-0.6.31-py2.7.egg/setuptools/command/build_py.py#L197-L226
[ "def", "check_package", "(", "self", ",", "package", ",", "package_dir", ")", ":", "try", ":", "return", "self", ".", "packages_checked", "[", "package", "]", "except", "KeyError", ":", "pass", "init_py", "=", "_build_py", ".", "check_package", "(", "self", ",", "package", ",", "package_dir", ")", "self", ".", "packages_checked", "[", "package", "]", "=", "init_py", "if", "not", "init_py", "or", "not", "self", ".", "distribution", ".", "namespace_packages", ":", "return", "init_py", "for", "pkg", "in", "self", ".", "distribution", ".", "namespace_packages", ":", "if", "pkg", "==", "package", "or", "pkg", ".", "startswith", "(", "package", "+", "'.'", ")", ":", "break", "else", ":", "return", "init_py", "f", "=", "open", "(", "init_py", ",", "'rbU'", ")", "if", "'declare_namespace'", ".", "encode", "(", ")", "not", "in", "f", ".", "read", "(", ")", ":", "from", "distutils", "import", "log", "log", ".", "warn", "(", "\"WARNING: %s is a namespace package, but its __init__.py does\\n\"", "\"not declare_namespace(); setuptools 0.7 will REQUIRE this!\\n\"", "'(See the setuptools manual under \"Namespace Packages\" for '", "\"details.)\\n\"", ",", "package", ")", "f", ".", "close", "(", ")", "return", "init_py" ]
075dc74d1ee62a8c6b7a8bf2b271364f01629d1e
test
Canvas.blank_canvas
Return a blank canvas to annotate. :param width: xdim (int) :param height: ydim (int) :returns: :class:`jicbioimage.illustrate.Canvas`
jicbioimage/illustrate/__init__.py
def blank_canvas(width, height): """Return a blank canvas to annotate. :param width: xdim (int) :param height: ydim (int) :returns: :class:`jicbioimage.illustrate.Canvas` """ canvas = np.zeros((height, width, 3), dtype=np.uint8) return canvas.view(Canvas)
def blank_canvas(width, height): """Return a blank canvas to annotate. :param width: xdim (int) :param height: ydim (int) :returns: :class:`jicbioimage.illustrate.Canvas` """ canvas = np.zeros((height, width, 3), dtype=np.uint8) return canvas.view(Canvas)
[ "Return", "a", "blank", "canvas", "to", "annotate", "." ]
JIC-CSB/jicbioimage.illustrate
python
https://github.com/JIC-CSB/jicbioimage.illustrate/blob/d88ddf81ee3eb3949677e2ef746af8169ce88092/jicbioimage/illustrate/__init__.py#L55-L63
[ "def", "blank_canvas", "(", "width", ",", "height", ")", ":", "canvas", "=", "np", ".", "zeros", "(", "(", "height", ",", "width", ",", "3", ")", ",", "dtype", "=", "np", ".", "uint8", ")", "return", "canvas", ".", "view", "(", "Canvas", ")" ]
d88ddf81ee3eb3949677e2ef746af8169ce88092
test
Canvas.draw_cross
Draw a cross on the canvas. :param position: (row, col) tuple :param color: RGB tuple :param radius: radius of the cross (int)
jicbioimage/illustrate/__init__.py
def draw_cross(self, position, color=(255, 0, 0), radius=4): """Draw a cross on the canvas. :param position: (row, col) tuple :param color: RGB tuple :param radius: radius of the cross (int) """ y, x = position for xmod in np.arange(-radius, radius+1, 1): xpos = x + xmod if xpos < 0: continue # Negative indices will draw on the opposite side. if xpos >= self.shape[1]: continue # Out of bounds. self[int(y), int(xpos)] = color for ymod in np.arange(-radius, radius+1, 1): ypos = y + ymod if ypos < 0: continue # Negative indices will draw on the opposite side. if ypos >= self.shape[0]: continue # Out of bounds. self[int(ypos), int(x)] = color
def draw_cross(self, position, color=(255, 0, 0), radius=4): """Draw a cross on the canvas. :param position: (row, col) tuple :param color: RGB tuple :param radius: radius of the cross (int) """ y, x = position for xmod in np.arange(-radius, radius+1, 1): xpos = x + xmod if xpos < 0: continue # Negative indices will draw on the opposite side. if xpos >= self.shape[1]: continue # Out of bounds. self[int(y), int(xpos)] = color for ymod in np.arange(-radius, radius+1, 1): ypos = y + ymod if ypos < 0: continue # Negative indices will draw on the opposite side. if ypos >= self.shape[0]: continue # Out of bounds. self[int(ypos), int(x)] = color
[ "Draw", "a", "cross", "on", "the", "canvas", "." ]
JIC-CSB/jicbioimage.illustrate
python
https://github.com/JIC-CSB/jicbioimage.illustrate/blob/d88ddf81ee3eb3949677e2ef746af8169ce88092/jicbioimage/illustrate/__init__.py#L65-L86
[ "def", "draw_cross", "(", "self", ",", "position", ",", "color", "=", "(", "255", ",", "0", ",", "0", ")", ",", "radius", "=", "4", ")", ":", "y", ",", "x", "=", "position", "for", "xmod", "in", "np", ".", "arange", "(", "-", "radius", ",", "radius", "+", "1", ",", "1", ")", ":", "xpos", "=", "x", "+", "xmod", "if", "xpos", "<", "0", ":", "continue", "# Negative indices will draw on the opposite side.", "if", "xpos", ">=", "self", ".", "shape", "[", "1", "]", ":", "continue", "# Out of bounds.", "self", "[", "int", "(", "y", ")", ",", "int", "(", "xpos", ")", "]", "=", "color", "for", "ymod", "in", "np", ".", "arange", "(", "-", "radius", ",", "radius", "+", "1", ",", "1", ")", ":", "ypos", "=", "y", "+", "ymod", "if", "ypos", "<", "0", ":", "continue", "# Negative indices will draw on the opposite side.", "if", "ypos", ">=", "self", ".", "shape", "[", "0", "]", ":", "continue", "# Out of bounds.", "self", "[", "int", "(", "ypos", ")", ",", "int", "(", "x", ")", "]", "=", "color" ]
d88ddf81ee3eb3949677e2ef746af8169ce88092
test
Canvas.draw_line
Draw a line between pos1 and pos2 on the canvas. :param pos1: position 1 (row, col) tuple :param pos2: position 2 (row, col) tuple :param color: RGB tuple
jicbioimage/illustrate/__init__.py
def draw_line(self, pos1, pos2, color=(255, 0, 0)): """Draw a line between pos1 and pos2 on the canvas. :param pos1: position 1 (row, col) tuple :param pos2: position 2 (row, col) tuple :param color: RGB tuple """ r1, c1 = tuple([int(round(i, 0)) for i in pos1]) r2, c2 = tuple([int(round(i, 0)) for i in pos2]) rr, cc = skimage.draw.line(r1, c1, r2, c2) self[rr, cc] = color
def draw_line(self, pos1, pos2, color=(255, 0, 0)): """Draw a line between pos1 and pos2 on the canvas. :param pos1: position 1 (row, col) tuple :param pos2: position 2 (row, col) tuple :param color: RGB tuple """ r1, c1 = tuple([int(round(i, 0)) for i in pos1]) r2, c2 = tuple([int(round(i, 0)) for i in pos2]) rr, cc = skimage.draw.line(r1, c1, r2, c2) self[rr, cc] = color
[ "Draw", "a", "line", "between", "pos1", "and", "pos2", "on", "the", "canvas", "." ]
JIC-CSB/jicbioimage.illustrate
python
https://github.com/JIC-CSB/jicbioimage.illustrate/blob/d88ddf81ee3eb3949677e2ef746af8169ce88092/jicbioimage/illustrate/__init__.py#L88-L98
[ "def", "draw_line", "(", "self", ",", "pos1", ",", "pos2", ",", "color", "=", "(", "255", ",", "0", ",", "0", ")", ")", ":", "r1", ",", "c1", "=", "tuple", "(", "[", "int", "(", "round", "(", "i", ",", "0", ")", ")", "for", "i", "in", "pos1", "]", ")", "r2", ",", "c2", "=", "tuple", "(", "[", "int", "(", "round", "(", "i", ",", "0", ")", ")", "for", "i", "in", "pos2", "]", ")", "rr", ",", "cc", "=", "skimage", ".", "draw", ".", "line", "(", "r1", ",", "c1", ",", "r2", ",", "c2", ")", "self", "[", "rr", ",", "cc", "]", "=", "color" ]
d88ddf81ee3eb3949677e2ef746af8169ce88092
test
Canvas.text_at
Write text at x, y top left corner position. By default the x and y coordinates represent the top left hand corner of the text. The text can be centered vertically and horizontally by using setting the ``center`` option to ``True``. :param text: text to write :param position: (row, col) tuple :param color: RGB tuple :param size: font size :param antialias: whether or not the text should be antialiased :param center: whether or not the text should be centered on the input coordinate
jicbioimage/illustrate/__init__.py
def text_at(self, text, position, color=(255, 255, 255), size=12, antialias=False, center=False): """Write text at x, y top left corner position. By default the x and y coordinates represent the top left hand corner of the text. The text can be centered vertically and horizontally by using setting the ``center`` option to ``True``. :param text: text to write :param position: (row, col) tuple :param color: RGB tuple :param size: font size :param antialias: whether or not the text should be antialiased :param center: whether or not the text should be centered on the input coordinate """ def antialias_value(value, normalisation): return int(round(value * normalisation)) def antialias_rgb(color, normalisation): return tuple([antialias_value(v, normalisation) for v in color]) def set_color(xpos, ypos, color): try: self[ypos, xpos] = color except IndexError: pass y, x = position font = PIL.ImageFont.truetype(DEFAULT_FONT_PATH, size=size) mask = font.getmask(text) width, height = mask.size if center: x = x - (width // 2) y = y - (height // 2) for ystep in range(height): for xstep in range(width): normalisation = mask[ystep * width + xstep] / 255. if antialias: if normalisation != 0: rgb_color = antialias_rgb(color, normalisation) set_color(x + xstep, y+ystep, rgb_color) else: if normalisation > .5: set_color(x + xstep, y + ystep, color)
def text_at(self, text, position, color=(255, 255, 255), size=12, antialias=False, center=False): """Write text at x, y top left corner position. By default the x and y coordinates represent the top left hand corner of the text. The text can be centered vertically and horizontally by using setting the ``center`` option to ``True``. :param text: text to write :param position: (row, col) tuple :param color: RGB tuple :param size: font size :param antialias: whether or not the text should be antialiased :param center: whether or not the text should be centered on the input coordinate """ def antialias_value(value, normalisation): return int(round(value * normalisation)) def antialias_rgb(color, normalisation): return tuple([antialias_value(v, normalisation) for v in color]) def set_color(xpos, ypos, color): try: self[ypos, xpos] = color except IndexError: pass y, x = position font = PIL.ImageFont.truetype(DEFAULT_FONT_PATH, size=size) mask = font.getmask(text) width, height = mask.size if center: x = x - (width // 2) y = y - (height // 2) for ystep in range(height): for xstep in range(width): normalisation = mask[ystep * width + xstep] / 255. if antialias: if normalisation != 0: rgb_color = antialias_rgb(color, normalisation) set_color(x + xstep, y+ystep, rgb_color) else: if normalisation > .5: set_color(x + xstep, y + ystep, color)
[ "Write", "text", "at", "x", "y", "top", "left", "corner", "position", "." ]
JIC-CSB/jicbioimage.illustrate
python
https://github.com/JIC-CSB/jicbioimage.illustrate/blob/d88ddf81ee3eb3949677e2ef746af8169ce88092/jicbioimage/illustrate/__init__.py#L108-L152
[ "def", "text_at", "(", "self", ",", "text", ",", "position", ",", "color", "=", "(", "255", ",", "255", ",", "255", ")", ",", "size", "=", "12", ",", "antialias", "=", "False", ",", "center", "=", "False", ")", ":", "def", "antialias_value", "(", "value", ",", "normalisation", ")", ":", "return", "int", "(", "round", "(", "value", "*", "normalisation", ")", ")", "def", "antialias_rgb", "(", "color", ",", "normalisation", ")", ":", "return", "tuple", "(", "[", "antialias_value", "(", "v", ",", "normalisation", ")", "for", "v", "in", "color", "]", ")", "def", "set_color", "(", "xpos", ",", "ypos", ",", "color", ")", ":", "try", ":", "self", "[", "ypos", ",", "xpos", "]", "=", "color", "except", "IndexError", ":", "pass", "y", ",", "x", "=", "position", "font", "=", "PIL", ".", "ImageFont", ".", "truetype", "(", "DEFAULT_FONT_PATH", ",", "size", "=", "size", ")", "mask", "=", "font", ".", "getmask", "(", "text", ")", "width", ",", "height", "=", "mask", ".", "size", "if", "center", ":", "x", "=", "x", "-", "(", "width", "//", "2", ")", "y", "=", "y", "-", "(", "height", "//", "2", ")", "for", "ystep", "in", "range", "(", "height", ")", ":", "for", "xstep", "in", "range", "(", "width", ")", ":", "normalisation", "=", "mask", "[", "ystep", "*", "width", "+", "xstep", "]", "/", "255.", "if", "antialias", ":", "if", "normalisation", "!=", "0", ":", "rgb_color", "=", "antialias_rgb", "(", "color", ",", "normalisation", ")", "set_color", "(", "x", "+", "xstep", ",", "y", "+", "ystep", ",", "rgb_color", ")", "else", ":", "if", "normalisation", ">", ".5", ":", "set_color", "(", "x", "+", "xstep", ",", "y", "+", "ystep", ",", "color", ")" ]
d88ddf81ee3eb3949677e2ef746af8169ce88092
test
AnnotatedImage.from_grayscale
Return a canvas from a grayscale image. :param im: single channel image :channels_on: channels to populate with input image :returns: :class:`jicbioimage.illustrate.Canvas`
jicbioimage/illustrate/__init__.py
def from_grayscale(im, channels_on=(True, True, True)): """Return a canvas from a grayscale image. :param im: single channel image :channels_on: channels to populate with input image :returns: :class:`jicbioimage.illustrate.Canvas` """ xdim, ydim = im.shape canvas = np.zeros((xdim, ydim, 3), dtype=np.uint8) for i, include in enumerate(channels_on): if include: canvas[:, :, i] = im return canvas.view(AnnotatedImage)
def from_grayscale(im, channels_on=(True, True, True)): """Return a canvas from a grayscale image. :param im: single channel image :channels_on: channels to populate with input image :returns: :class:`jicbioimage.illustrate.Canvas` """ xdim, ydim = im.shape canvas = np.zeros((xdim, ydim, 3), dtype=np.uint8) for i, include in enumerate(channels_on): if include: canvas[:, :, i] = im return canvas.view(AnnotatedImage)
[ "Return", "a", "canvas", "from", "a", "grayscale", "image", "." ]
JIC-CSB/jicbioimage.illustrate
python
https://github.com/JIC-CSB/jicbioimage.illustrate/blob/d88ddf81ee3eb3949677e2ef746af8169ce88092/jicbioimage/illustrate/__init__.py#L159-L171
[ "def", "from_grayscale", "(", "im", ",", "channels_on", "=", "(", "True", ",", "True", ",", "True", ")", ")", ":", "xdim", ",", "ydim", "=", "im", ".", "shape", "canvas", "=", "np", ".", "zeros", "(", "(", "xdim", ",", "ydim", ",", "3", ")", ",", "dtype", "=", "np", ".", "uint8", ")", "for", "i", ",", "include", "in", "enumerate", "(", "channels_on", ")", ":", "if", "include", ":", "canvas", "[", ":", ",", ":", ",", "i", "]", "=", "im", "return", "canvas", ".", "view", "(", "AnnotatedImage", ")" ]
d88ddf81ee3eb3949677e2ef746af8169ce88092
test
get_uuid
Returns a unique ID of a given length. User `version=2` for cross-systems uniqueness.
toolware/utils/generic.py
def get_uuid(length=32, version=1): """ Returns a unique ID of a given length. User `version=2` for cross-systems uniqueness. """ if version == 1: return uuid.uuid1().hex[:length] else: return uuid.uuid4().hex[:length]
def get_uuid(length=32, version=1): """ Returns a unique ID of a given length. User `version=2` for cross-systems uniqueness. """ if version == 1: return uuid.uuid1().hex[:length] else: return uuid.uuid4().hex[:length]
[ "Returns", "a", "unique", "ID", "of", "a", "given", "length", ".", "User", "version", "=", "2", "for", "cross", "-", "systems", "uniqueness", "." ]
un33k/django-toolware
python
https://github.com/un33k/django-toolware/blob/973f3e003dc38b812897dab88455bee37dcaf931/toolware/utils/generic.py#L17-L25
[ "def", "get_uuid", "(", "length", "=", "32", ",", "version", "=", "1", ")", ":", "if", "version", "==", "1", ":", "return", "uuid", ".", "uuid1", "(", ")", ".", "hex", "[", ":", "length", "]", "else", ":", "return", "uuid", ".", "uuid4", "(", ")", ".", "hex", "[", ":", "length", "]" ]
973f3e003dc38b812897dab88455bee37dcaf931
test
get_dict_to_encoded_url
Converts a dict to an encoded URL. Example: given data = {'a': 1, 'b': 2}, it returns 'a=1&b=2'
toolware/utils/generic.py
def get_dict_to_encoded_url(data): """ Converts a dict to an encoded URL. Example: given data = {'a': 1, 'b': 2}, it returns 'a=1&b=2' """ unicode_data = dict([(k, smart_str(v)) for k, v in data.items()]) encoded = urllib.urlencode(unicode_data) return encoded
def get_dict_to_encoded_url(data): """ Converts a dict to an encoded URL. Example: given data = {'a': 1, 'b': 2}, it returns 'a=1&b=2' """ unicode_data = dict([(k, smart_str(v)) for k, v in data.items()]) encoded = urllib.urlencode(unicode_data) return encoded
[ "Converts", "a", "dict", "to", "an", "encoded", "URL", ".", "Example", ":", "given", "data", "=", "{", "a", ":", "1", "b", ":", "2", "}", "it", "returns", "a", "=", "1&b", "=", "2" ]
un33k/django-toolware
python
https://github.com/un33k/django-toolware/blob/973f3e003dc38b812897dab88455bee37dcaf931/toolware/utils/generic.py#L61-L68
[ "def", "get_dict_to_encoded_url", "(", "data", ")", ":", "unicode_data", "=", "dict", "(", "[", "(", "k", ",", "smart_str", "(", "v", ")", ")", "for", "k", ",", "v", "in", "data", ".", "items", "(", ")", "]", ")", "encoded", "=", "urllib", ".", "urlencode", "(", "unicode_data", ")", "return", "encoded" ]
973f3e003dc38b812897dab88455bee37dcaf931
test
get_encoded_url_to_dict
Converts an encoded URL to a dict. Example: given string = 'a=1&b=2' it returns {'a': 1, 'b': 2}
toolware/utils/generic.py
def get_encoded_url_to_dict(string): """ Converts an encoded URL to a dict. Example: given string = 'a=1&b=2' it returns {'a': 1, 'b': 2} """ data = urllib.parse.parse_qsl(string, keep_blank_values=True) data = dict(data) return data
def get_encoded_url_to_dict(string): """ Converts an encoded URL to a dict. Example: given string = 'a=1&b=2' it returns {'a': 1, 'b': 2} """ data = urllib.parse.parse_qsl(string, keep_blank_values=True) data = dict(data) return data
[ "Converts", "an", "encoded", "URL", "to", "a", "dict", ".", "Example", ":", "given", "string", "=", "a", "=", "1&b", "=", "2", "it", "returns", "{", "a", ":", "1", "b", ":", "2", "}" ]
un33k/django-toolware
python
https://github.com/un33k/django-toolware/blob/973f3e003dc38b812897dab88455bee37dcaf931/toolware/utils/generic.py#L71-L78
[ "def", "get_encoded_url_to_dict", "(", "string", ")", ":", "data", "=", "urllib", ".", "parse", ".", "parse_qsl", "(", "string", ",", "keep_blank_values", "=", "True", ")", "data", "=", "dict", "(", "data", ")", "return", "data" ]
973f3e003dc38b812897dab88455bee37dcaf931
test
get_unique_key_from_get
Build a unique key from get data
toolware/utils/generic.py
def get_unique_key_from_get(get_dict): """ Build a unique key from get data """ site = Site.objects.get_current() key = get_dict_to_encoded_url(get_dict) cache_key = '{}_{}'.format(site.domain, key) return hashlib.md5(cache_key).hexdigest()
def get_unique_key_from_get(get_dict): """ Build a unique key from get data """ site = Site.objects.get_current() key = get_dict_to_encoded_url(get_dict) cache_key = '{}_{}'.format(site.domain, key) return hashlib.md5(cache_key).hexdigest()
[ "Build", "a", "unique", "key", "from", "get", "data" ]
un33k/django-toolware
python
https://github.com/un33k/django-toolware/blob/973f3e003dc38b812897dab88455bee37dcaf931/toolware/utils/generic.py#L93-L100
[ "def", "get_unique_key_from_get", "(", "get_dict", ")", ":", "site", "=", "Site", ".", "objects", ".", "get_current", "(", ")", "key", "=", "get_dict_to_encoded_url", "(", "get_dict", ")", "cache_key", "=", "'{}_{}'", ".", "format", "(", "site", ".", "domain", ",", "key", ")", "return", "hashlib", ".", "md5", "(", "cache_key", ")", ".", "hexdigest", "(", ")" ]
973f3e003dc38b812897dab88455bee37dcaf931
test
tobin
Given a decimal number, returns a string bitfield of length = len Example: given deci_num = 1 and len = 10, it return 0000000001
toolware/utils/generic.py
def tobin(deci_num, len=32): """ Given a decimal number, returns a string bitfield of length = len Example: given deci_num = 1 and len = 10, it return 0000000001 """ bitstr = "".join(map(lambda y: str((deci_num >> y) & 1), range(len - 1, -1, -1))) return bitstr
def tobin(deci_num, len=32): """ Given a decimal number, returns a string bitfield of length = len Example: given deci_num = 1 and len = 10, it return 0000000001 """ bitstr = "".join(map(lambda y: str((deci_num >> y) & 1), range(len - 1, -1, -1))) return bitstr
[ "Given", "a", "decimal", "number", "returns", "a", "string", "bitfield", "of", "length", "=", "len", "Example", ":", "given", "deci_num", "=", "1", "and", "len", "=", "10", "it", "return", "0000000001" ]
un33k/django-toolware
python
https://github.com/un33k/django-toolware/blob/973f3e003dc38b812897dab88455bee37dcaf931/toolware/utils/generic.py#L111-L117
[ "def", "tobin", "(", "deci_num", ",", "len", "=", "32", ")", ":", "bitstr", "=", "\"\"", ".", "join", "(", "map", "(", "lambda", "y", ":", "str", "(", "(", "deci_num", ">>", "y", ")", "&", "1", ")", ",", "range", "(", "len", "-", "1", ",", "-", "1", ",", "-", "1", ")", ")", ")", "return", "bitstr" ]
973f3e003dc38b812897dab88455bee37dcaf931
test
is_valid_email
Validates and email address. Note: valid emails must follow the <name>@<domain><.extension> patterns.
toolware/utils/generic.py
def is_valid_email(email): """ Validates and email address. Note: valid emails must follow the <name>@<domain><.extension> patterns. """ try: validate_email(email) except ValidationError: return False if simple_email_re.match(email): return True return False
def is_valid_email(email): """ Validates and email address. Note: valid emails must follow the <name>@<domain><.extension> patterns. """ try: validate_email(email) except ValidationError: return False if simple_email_re.match(email): return True return False
[ "Validates", "and", "email", "address", ".", "Note", ":", "valid", "emails", "must", "follow", "the", "<name", ">" ]
un33k/django-toolware
python
https://github.com/un33k/django-toolware/blob/973f3e003dc38b812897dab88455bee37dcaf931/toolware/utils/generic.py#L120-L131
[ "def", "is_valid_email", "(", "email", ")", ":", "try", ":", "validate_email", "(", "email", ")", "except", "ValidationError", ":", "return", "False", "if", "simple_email_re", ".", "match", "(", "email", ")", ":", "return", "True", "return", "False" ]
973f3e003dc38b812897dab88455bee37dcaf931
test
get_domain
Returns domain name portion of a URL
toolware/utils/generic.py
def get_domain(url): """ Returns domain name portion of a URL """ if 'http' not in url.lower(): url = 'http://{}'.format(url) return urllib.parse.urlparse(url).hostname
def get_domain(url): """ Returns domain name portion of a URL """ if 'http' not in url.lower(): url = 'http://{}'.format(url) return urllib.parse.urlparse(url).hostname
[ "Returns", "domain", "name", "portion", "of", "a", "URL" ]
un33k/django-toolware
python
https://github.com/un33k/django-toolware/blob/973f3e003dc38b812897dab88455bee37dcaf931/toolware/utils/generic.py#L134-L138
[ "def", "get_domain", "(", "url", ")", ":", "if", "'http'", "not", "in", "url", ".", "lower", "(", ")", ":", "url", "=", "'http://{}'", ".", "format", "(", "url", ")", "return", "urllib", ".", "parse", ".", "urlparse", "(", "url", ")", ".", "hostname" ]
973f3e003dc38b812897dab88455bee37dcaf931
test
get_url_args
Returns a dictionary from a URL params
toolware/utils/generic.py
def get_url_args(url): """ Returns a dictionary from a URL params """ url_data = urllib.parse.urlparse(url) arg_dict = urllib.parse.parse_qs(url_data.query) return arg_dict
def get_url_args(url): """ Returns a dictionary from a URL params """ url_data = urllib.parse.urlparse(url) arg_dict = urllib.parse.parse_qs(url_data.query) return arg_dict
[ "Returns", "a", "dictionary", "from", "a", "URL", "params" ]
un33k/django-toolware
python
https://github.com/un33k/django-toolware/blob/973f3e003dc38b812897dab88455bee37dcaf931/toolware/utils/generic.py#L141-L145
[ "def", "get_url_args", "(", "url", ")", ":", "url_data", "=", "urllib", ".", "parse", ".", "urlparse", "(", "url", ")", "arg_dict", "=", "urllib", ".", "parse", ".", "parse_qs", "(", "url_data", ".", "query", ")", "return", "arg_dict" ]
973f3e003dc38b812897dab88455bee37dcaf931
train
train
Trains a k-nearest neighbors classifier for face recognition. :param train_dir: directory that contains a sub-directory for each known person, with its name. (View in source code to see train_dir example tree structure) Structure: <train_dir>/ ├── <person1>/ │ ├── <somename1>.jpeg │ ├── <somename2>.jpeg │ ├── ... ├── <person2>/ │ ├── <somename1>.jpeg │ └── <somename2>.jpeg └── ... :param model_save_path: (optional) path to save model on disk :param n_neighbors: (optional) number of neighbors to weigh in classification. Chosen automatically if not specified :param knn_algo: (optional) underlying data structure to support knn.default is ball_tree :param verbose: verbosity of training :return: returns knn classifier that was trained on the given data.
examples/face_recognition_knn.py
def train(train_dir, model_save_path=None, n_neighbors=None, knn_algo='ball_tree', verbose=False): """ Trains a k-nearest neighbors classifier for face recognition. :param train_dir: directory that contains a sub-directory for each known person, with its name. (View in source code to see train_dir example tree structure) Structure: <train_dir>/ ├── <person1>/ │ ├── <somename1>.jpeg │ ├── <somename2>.jpeg │ ├── ... ├── <person2>/ │ ├── <somename1>.jpeg │ └── <somename2>.jpeg └── ... :param model_save_path: (optional) path to save model on disk :param n_neighbors: (optional) number of neighbors to weigh in classification. Chosen automatically if not specified :param knn_algo: (optional) underlying data structure to support knn.default is ball_tree :param verbose: verbosity of training :return: returns knn classifier that was trained on the given data. """ X = [] y = [] # Loop through each person in the training set for class_dir in os.listdir(train_dir): if not os.path.isdir(os.path.join(train_dir, class_dir)): continue # Loop through each training image for the current person for img_path in image_files_in_folder(os.path.join(train_dir, class_dir)): image = face_recognition.load_image_file(img_path) face_bounding_boxes = face_recognition.face_locations(image) if len(face_bounding_boxes) != 1: # If there are no people (or too many people) in a training image, skip the image. if verbose: print("Image {} not suitable for training: {}".format(img_path, "Didn't find a face" if len(face_bounding_boxes) < 1 else "Found more than one face")) else: # Add face encoding for current image to the training set X.append(face_recognition.face_encodings(image, known_face_locations=face_bounding_boxes)[0]) y.append(class_dir) # Determine how many neighbors to use for weighting in the KNN classifier if n_neighbors is None: n_neighbors = int(round(math.sqrt(len(X)))) if verbose: print("Chose n_neighbors automatically:", n_neighbors) # Create and train the KNN classifier knn_clf = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors, algorithm=knn_algo, weights='distance') knn_clf.fit(X, y) # Save the trained KNN classifier if model_save_path is not None: with open(model_save_path, 'wb') as f: pickle.dump(knn_clf, f) return knn_clf
def train(train_dir, model_save_path=None, n_neighbors=None, knn_algo='ball_tree', verbose=False): """ Trains a k-nearest neighbors classifier for face recognition. :param train_dir: directory that contains a sub-directory for each known person, with its name. (View in source code to see train_dir example tree structure) Structure: <train_dir>/ ├── <person1>/ │ ├── <somename1>.jpeg │ ├── <somename2>.jpeg │ ├── ... ├── <person2>/ │ ├── <somename1>.jpeg │ └── <somename2>.jpeg └── ... :param model_save_path: (optional) path to save model on disk :param n_neighbors: (optional) number of neighbors to weigh in classification. Chosen automatically if not specified :param knn_algo: (optional) underlying data structure to support knn.default is ball_tree :param verbose: verbosity of training :return: returns knn classifier that was trained on the given data. """ X = [] y = [] # Loop through each person in the training set for class_dir in os.listdir(train_dir): if not os.path.isdir(os.path.join(train_dir, class_dir)): continue # Loop through each training image for the current person for img_path in image_files_in_folder(os.path.join(train_dir, class_dir)): image = face_recognition.load_image_file(img_path) face_bounding_boxes = face_recognition.face_locations(image) if len(face_bounding_boxes) != 1: # If there are no people (or too many people) in a training image, skip the image. if verbose: print("Image {} not suitable for training: {}".format(img_path, "Didn't find a face" if len(face_bounding_boxes) < 1 else "Found more than one face")) else: # Add face encoding for current image to the training set X.append(face_recognition.face_encodings(image, known_face_locations=face_bounding_boxes)[0]) y.append(class_dir) # Determine how many neighbors to use for weighting in the KNN classifier if n_neighbors is None: n_neighbors = int(round(math.sqrt(len(X)))) if verbose: print("Chose n_neighbors automatically:", n_neighbors) # Create and train the KNN classifier knn_clf = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors, algorithm=knn_algo, weights='distance') knn_clf.fit(X, y) # Save the trained KNN classifier if model_save_path is not None: with open(model_save_path, 'wb') as f: pickle.dump(knn_clf, f) return knn_clf
[ "Trains", "a", "k", "-", "nearest", "neighbors", "classifier", "for", "face", "recognition", "." ]
ageitgey/face_recognition
python
https://github.com/ageitgey/face_recognition/blob/c96b010c02f15e8eeb0f71308c641179ac1f19bb/examples/face_recognition_knn.py#L46-L108
[ "def", "train", "(", "train_dir", ",", "model_save_path", "=", "None", ",", "n_neighbors", "=", "None", ",", "knn_algo", "=", "'ball_tree'", ",", "verbose", "=", "False", ")", ":", "X", "=", "[", "]", "y", "=", "[", "]", "# Loop through each person in the training set", "for", "class_dir", "in", "os", ".", "listdir", "(", "train_dir", ")", ":", "if", "not", "os", ".", "path", ".", "isdir", "(", "os", ".", "path", ".", "join", "(", "train_dir", ",", "class_dir", ")", ")", ":", "continue", "# Loop through each training image for the current person", "for", "img_path", "in", "image_files_in_folder", "(", "os", ".", "path", ".", "join", "(", "train_dir", ",", "class_dir", ")", ")", ":", "image", "=", "face_recognition", ".", "load_image_file", "(", "img_path", ")", "face_bounding_boxes", "=", "face_recognition", ".", "face_locations", "(", "image", ")", "if", "len", "(", "face_bounding_boxes", ")", "!=", "1", ":", "# If there are no people (or too many people) in a training image, skip the image.", "if", "verbose", ":", "print", "(", "\"Image {} not suitable for training: {}\"", ".", "format", "(", "img_path", ",", "\"Didn't find a face\"", "if", "len", "(", "face_bounding_boxes", ")", "<", "1", "else", "\"Found more than one face\"", ")", ")", "else", ":", "# Add face encoding for current image to the training set", "X", ".", "append", "(", "face_recognition", ".", "face_encodings", "(", "image", ",", "known_face_locations", "=", "face_bounding_boxes", ")", "[", "0", "]", ")", "y", ".", "append", "(", "class_dir", ")", "# Determine how many neighbors to use for weighting in the KNN classifier", "if", "n_neighbors", "is", "None", ":", "n_neighbors", "=", "int", "(", "round", "(", "math", ".", "sqrt", "(", "len", "(", "X", ")", ")", ")", ")", "if", "verbose", ":", "print", "(", "\"Chose n_neighbors automatically:\"", ",", "n_neighbors", ")", "# Create and train the KNN classifier", "knn_clf", "=", "neighbors", ".", "KNeighborsClassifier", "(", "n_neighbors", "=", "n_neighbors", ",", "algorithm", "=", "knn_algo", ",", "weights", "=", "'distance'", ")", "knn_clf", ".", "fit", "(", "X", ",", "y", ")", "# Save the trained KNN classifier", "if", "model_save_path", "is", "not", "None", ":", "with", "open", "(", "model_save_path", ",", "'wb'", ")", "as", "f", ":", "pickle", ".", "dump", "(", "knn_clf", ",", "f", ")", "return", "knn_clf" ]
c96b010c02f15e8eeb0f71308c641179ac1f19bb
train
predict
Recognizes faces in given image using a trained KNN classifier :param X_img_path: path to image to be recognized :param knn_clf: (optional) a knn classifier object. if not specified, model_save_path must be specified. :param model_path: (optional) path to a pickled knn classifier. if not specified, model_save_path must be knn_clf. :param distance_threshold: (optional) distance threshold for face classification. the larger it is, the more chance of mis-classifying an unknown person as a known one. :return: a list of names and face locations for the recognized faces in the image: [(name, bounding box), ...]. For faces of unrecognized persons, the name 'unknown' will be returned.
examples/face_recognition_knn.py
def predict(X_img_path, knn_clf=None, model_path=None, distance_threshold=0.6): """ Recognizes faces in given image using a trained KNN classifier :param X_img_path: path to image to be recognized :param knn_clf: (optional) a knn classifier object. if not specified, model_save_path must be specified. :param model_path: (optional) path to a pickled knn classifier. if not specified, model_save_path must be knn_clf. :param distance_threshold: (optional) distance threshold for face classification. the larger it is, the more chance of mis-classifying an unknown person as a known one. :return: a list of names and face locations for the recognized faces in the image: [(name, bounding box), ...]. For faces of unrecognized persons, the name 'unknown' will be returned. """ if not os.path.isfile(X_img_path) or os.path.splitext(X_img_path)[1][1:] not in ALLOWED_EXTENSIONS: raise Exception("Invalid image path: {}".format(X_img_path)) if knn_clf is None and model_path is None: raise Exception("Must supply knn classifier either thourgh knn_clf or model_path") # Load a trained KNN model (if one was passed in) if knn_clf is None: with open(model_path, 'rb') as f: knn_clf = pickle.load(f) # Load image file and find face locations X_img = face_recognition.load_image_file(X_img_path) X_face_locations = face_recognition.face_locations(X_img) # If no faces are found in the image, return an empty result. if len(X_face_locations) == 0: return [] # Find encodings for faces in the test iamge faces_encodings = face_recognition.face_encodings(X_img, known_face_locations=X_face_locations) # Use the KNN model to find the best matches for the test face closest_distances = knn_clf.kneighbors(faces_encodings, n_neighbors=1) are_matches = [closest_distances[0][i][0] <= distance_threshold for i in range(len(X_face_locations))] # Predict classes and remove classifications that aren't within the threshold return [(pred, loc) if rec else ("unknown", loc) for pred, loc, rec in zip(knn_clf.predict(faces_encodings), X_face_locations, are_matches)]
def predict(X_img_path, knn_clf=None, model_path=None, distance_threshold=0.6): """ Recognizes faces in given image using a trained KNN classifier :param X_img_path: path to image to be recognized :param knn_clf: (optional) a knn classifier object. if not specified, model_save_path must be specified. :param model_path: (optional) path to a pickled knn classifier. if not specified, model_save_path must be knn_clf. :param distance_threshold: (optional) distance threshold for face classification. the larger it is, the more chance of mis-classifying an unknown person as a known one. :return: a list of names and face locations for the recognized faces in the image: [(name, bounding box), ...]. For faces of unrecognized persons, the name 'unknown' will be returned. """ if not os.path.isfile(X_img_path) or os.path.splitext(X_img_path)[1][1:] not in ALLOWED_EXTENSIONS: raise Exception("Invalid image path: {}".format(X_img_path)) if knn_clf is None and model_path is None: raise Exception("Must supply knn classifier either thourgh knn_clf or model_path") # Load a trained KNN model (if one was passed in) if knn_clf is None: with open(model_path, 'rb') as f: knn_clf = pickle.load(f) # Load image file and find face locations X_img = face_recognition.load_image_file(X_img_path) X_face_locations = face_recognition.face_locations(X_img) # If no faces are found in the image, return an empty result. if len(X_face_locations) == 0: return [] # Find encodings for faces in the test iamge faces_encodings = face_recognition.face_encodings(X_img, known_face_locations=X_face_locations) # Use the KNN model to find the best matches for the test face closest_distances = knn_clf.kneighbors(faces_encodings, n_neighbors=1) are_matches = [closest_distances[0][i][0] <= distance_threshold for i in range(len(X_face_locations))] # Predict classes and remove classifications that aren't within the threshold return [(pred, loc) if rec else ("unknown", loc) for pred, loc, rec in zip(knn_clf.predict(faces_encodings), X_face_locations, are_matches)]
[ "Recognizes", "faces", "in", "given", "image", "using", "a", "trained", "KNN", "classifier" ]
ageitgey/face_recognition
python
https://github.com/ageitgey/face_recognition/blob/c96b010c02f15e8eeb0f71308c641179ac1f19bb/examples/face_recognition_knn.py#L111-L150
[ "def", "predict", "(", "X_img_path", ",", "knn_clf", "=", "None", ",", "model_path", "=", "None", ",", "distance_threshold", "=", "0.6", ")", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "X_img_path", ")", "or", "os", ".", "path", ".", "splitext", "(", "X_img_path", ")", "[", "1", "]", "[", "1", ":", "]", "not", "in", "ALLOWED_EXTENSIONS", ":", "raise", "Exception", "(", "\"Invalid image path: {}\"", ".", "format", "(", "X_img_path", ")", ")", "if", "knn_clf", "is", "None", "and", "model_path", "is", "None", ":", "raise", "Exception", "(", "\"Must supply knn classifier either thourgh knn_clf or model_path\"", ")", "# Load a trained KNN model (if one was passed in)", "if", "knn_clf", "is", "None", ":", "with", "open", "(", "model_path", ",", "'rb'", ")", "as", "f", ":", "knn_clf", "=", "pickle", ".", "load", "(", "f", ")", "# Load image file and find face locations", "X_img", "=", "face_recognition", ".", "load_image_file", "(", "X_img_path", ")", "X_face_locations", "=", "face_recognition", ".", "face_locations", "(", "X_img", ")", "# If no faces are found in the image, return an empty result.", "if", "len", "(", "X_face_locations", ")", "==", "0", ":", "return", "[", "]", "# Find encodings for faces in the test iamge", "faces_encodings", "=", "face_recognition", ".", "face_encodings", "(", "X_img", ",", "known_face_locations", "=", "X_face_locations", ")", "# Use the KNN model to find the best matches for the test face", "closest_distances", "=", "knn_clf", ".", "kneighbors", "(", "faces_encodings", ",", "n_neighbors", "=", "1", ")", "are_matches", "=", "[", "closest_distances", "[", "0", "]", "[", "i", "]", "[", "0", "]", "<=", "distance_threshold", "for", "i", "in", "range", "(", "len", "(", "X_face_locations", ")", ")", "]", "# Predict classes and remove classifications that aren't within the threshold", "return", "[", "(", "pred", ",", "loc", ")", "if", "rec", "else", "(", "\"unknown\"", ",", "loc", ")", "for", "pred", ",", "loc", ",", "rec", "in", "zip", "(", "knn_clf", ".", "predict", "(", "faces_encodings", ")", ",", "X_face_locations", ",", "are_matches", ")", "]" ]
c96b010c02f15e8eeb0f71308c641179ac1f19bb
train
show_prediction_labels_on_image
Shows the face recognition results visually. :param img_path: path to image to be recognized :param predictions: results of the predict function :return:
examples/face_recognition_knn.py
def show_prediction_labels_on_image(img_path, predictions): """ Shows the face recognition results visually. :param img_path: path to image to be recognized :param predictions: results of the predict function :return: """ pil_image = Image.open(img_path).convert("RGB") draw = ImageDraw.Draw(pil_image) for name, (top, right, bottom, left) in predictions: # Draw a box around the face using the Pillow module draw.rectangle(((left, top), (right, bottom)), outline=(0, 0, 255)) # There's a bug in Pillow where it blows up with non-UTF-8 text # when using the default bitmap font name = name.encode("UTF-8") # Draw a label with a name below the face text_width, text_height = draw.textsize(name) draw.rectangle(((left, bottom - text_height - 10), (right, bottom)), fill=(0, 0, 255), outline=(0, 0, 255)) draw.text((left + 6, bottom - text_height - 5), name, fill=(255, 255, 255, 255)) # Remove the drawing library from memory as per the Pillow docs del draw # Display the resulting image pil_image.show()
def show_prediction_labels_on_image(img_path, predictions): """ Shows the face recognition results visually. :param img_path: path to image to be recognized :param predictions: results of the predict function :return: """ pil_image = Image.open(img_path).convert("RGB") draw = ImageDraw.Draw(pil_image) for name, (top, right, bottom, left) in predictions: # Draw a box around the face using the Pillow module draw.rectangle(((left, top), (right, bottom)), outline=(0, 0, 255)) # There's a bug in Pillow where it blows up with non-UTF-8 text # when using the default bitmap font name = name.encode("UTF-8") # Draw a label with a name below the face text_width, text_height = draw.textsize(name) draw.rectangle(((left, bottom - text_height - 10), (right, bottom)), fill=(0, 0, 255), outline=(0, 0, 255)) draw.text((left + 6, bottom - text_height - 5), name, fill=(255, 255, 255, 255)) # Remove the drawing library from memory as per the Pillow docs del draw # Display the resulting image pil_image.show()
[ "Shows", "the", "face", "recognition", "results", "visually", "." ]
ageitgey/face_recognition
python
https://github.com/ageitgey/face_recognition/blob/c96b010c02f15e8eeb0f71308c641179ac1f19bb/examples/face_recognition_knn.py#L153-L181
[ "def", "show_prediction_labels_on_image", "(", "img_path", ",", "predictions", ")", ":", "pil_image", "=", "Image", ".", "open", "(", "img_path", ")", ".", "convert", "(", "\"RGB\"", ")", "draw", "=", "ImageDraw", ".", "Draw", "(", "pil_image", ")", "for", "name", ",", "(", "top", ",", "right", ",", "bottom", ",", "left", ")", "in", "predictions", ":", "# Draw a box around the face using the Pillow module", "draw", ".", "rectangle", "(", "(", "(", "left", ",", "top", ")", ",", "(", "right", ",", "bottom", ")", ")", ",", "outline", "=", "(", "0", ",", "0", ",", "255", ")", ")", "# There's a bug in Pillow where it blows up with non-UTF-8 text", "# when using the default bitmap font", "name", "=", "name", ".", "encode", "(", "\"UTF-8\"", ")", "# Draw a label with a name below the face", "text_width", ",", "text_height", "=", "draw", ".", "textsize", "(", "name", ")", "draw", ".", "rectangle", "(", "(", "(", "left", ",", "bottom", "-", "text_height", "-", "10", ")", ",", "(", "right", ",", "bottom", ")", ")", ",", "fill", "=", "(", "0", ",", "0", ",", "255", ")", ",", "outline", "=", "(", "0", ",", "0", ",", "255", ")", ")", "draw", ".", "text", "(", "(", "left", "+", "6", ",", "bottom", "-", "text_height", "-", "5", ")", ",", "name", ",", "fill", "=", "(", "255", ",", "255", ",", "255", ",", "255", ")", ")", "# Remove the drawing library from memory as per the Pillow docs", "del", "draw", "# Display the resulting image", "pil_image", ".", "show", "(", ")" ]
c96b010c02f15e8eeb0f71308c641179ac1f19bb
train
_rect_to_css
Convert a dlib 'rect' object to a plain tuple in (top, right, bottom, left) order :param rect: a dlib 'rect' object :return: a plain tuple representation of the rect in (top, right, bottom, left) order
face_recognition/api.py
def _rect_to_css(rect): """ Convert a dlib 'rect' object to a plain tuple in (top, right, bottom, left) order :param rect: a dlib 'rect' object :return: a plain tuple representation of the rect in (top, right, bottom, left) order """ return rect.top(), rect.right(), rect.bottom(), rect.left()
def _rect_to_css(rect): """ Convert a dlib 'rect' object to a plain tuple in (top, right, bottom, left) order :param rect: a dlib 'rect' object :return: a plain tuple representation of the rect in (top, right, bottom, left) order """ return rect.top(), rect.right(), rect.bottom(), rect.left()
[ "Convert", "a", "dlib", "rect", "object", "to", "a", "plain", "tuple", "in", "(", "top", "right", "bottom", "left", ")", "order" ]
ageitgey/face_recognition
python
https://github.com/ageitgey/face_recognition/blob/c96b010c02f15e8eeb0f71308c641179ac1f19bb/face_recognition/api.py#L32-L39
[ "def", "_rect_to_css", "(", "rect", ")", ":", "return", "rect", ".", "top", "(", ")", ",", "rect", ".", "right", "(", ")", ",", "rect", ".", "bottom", "(", ")", ",", "rect", ".", "left", "(", ")" ]
c96b010c02f15e8eeb0f71308c641179ac1f19bb
train
_trim_css_to_bounds
Make sure a tuple in (top, right, bottom, left) order is within the bounds of the image. :param css: plain tuple representation of the rect in (top, right, bottom, left) order :param image_shape: numpy shape of the image array :return: a trimmed plain tuple representation of the rect in (top, right, bottom, left) order
face_recognition/api.py
def _trim_css_to_bounds(css, image_shape): """ Make sure a tuple in (top, right, bottom, left) order is within the bounds of the image. :param css: plain tuple representation of the rect in (top, right, bottom, left) order :param image_shape: numpy shape of the image array :return: a trimmed plain tuple representation of the rect in (top, right, bottom, left) order """ return max(css[0], 0), min(css[1], image_shape[1]), min(css[2], image_shape[0]), max(css[3], 0)
def _trim_css_to_bounds(css, image_shape): """ Make sure a tuple in (top, right, bottom, left) order is within the bounds of the image. :param css: plain tuple representation of the rect in (top, right, bottom, left) order :param image_shape: numpy shape of the image array :return: a trimmed plain tuple representation of the rect in (top, right, bottom, left) order """ return max(css[0], 0), min(css[1], image_shape[1]), min(css[2], image_shape[0]), max(css[3], 0)
[ "Make", "sure", "a", "tuple", "in", "(", "top", "right", "bottom", "left", ")", "order", "is", "within", "the", "bounds", "of", "the", "image", "." ]
ageitgey/face_recognition
python
https://github.com/ageitgey/face_recognition/blob/c96b010c02f15e8eeb0f71308c641179ac1f19bb/face_recognition/api.py#L52-L60
[ "def", "_trim_css_to_bounds", "(", "css", ",", "image_shape", ")", ":", "return", "max", "(", "css", "[", "0", "]", ",", "0", ")", ",", "min", "(", "css", "[", "1", "]", ",", "image_shape", "[", "1", "]", ")", ",", "min", "(", "css", "[", "2", "]", ",", "image_shape", "[", "0", "]", ")", ",", "max", "(", "css", "[", "3", "]", ",", "0", ")" ]
c96b010c02f15e8eeb0f71308c641179ac1f19bb
train
face_distance
Given a list of face encodings, compare them to a known face encoding and get a euclidean distance for each comparison face. The distance tells you how similar the faces are. :param faces: List of face encodings to compare :param face_to_compare: A face encoding to compare against :return: A numpy ndarray with the distance for each face in the same order as the 'faces' array
face_recognition/api.py
def face_distance(face_encodings, face_to_compare): """ Given a list of face encodings, compare them to a known face encoding and get a euclidean distance for each comparison face. The distance tells you how similar the faces are. :param faces: List of face encodings to compare :param face_to_compare: A face encoding to compare against :return: A numpy ndarray with the distance for each face in the same order as the 'faces' array """ if len(face_encodings) == 0: return np.empty((0)) return np.linalg.norm(face_encodings - face_to_compare, axis=1)
def face_distance(face_encodings, face_to_compare): """ Given a list of face encodings, compare them to a known face encoding and get a euclidean distance for each comparison face. The distance tells you how similar the faces are. :param faces: List of face encodings to compare :param face_to_compare: A face encoding to compare against :return: A numpy ndarray with the distance for each face in the same order as the 'faces' array """ if len(face_encodings) == 0: return np.empty((0)) return np.linalg.norm(face_encodings - face_to_compare, axis=1)
[ "Given", "a", "list", "of", "face", "encodings", "compare", "them", "to", "a", "known", "face", "encoding", "and", "get", "a", "euclidean", "distance", "for", "each", "comparison", "face", ".", "The", "distance", "tells", "you", "how", "similar", "the", "faces", "are", "." ]
ageitgey/face_recognition
python
https://github.com/ageitgey/face_recognition/blob/c96b010c02f15e8eeb0f71308c641179ac1f19bb/face_recognition/api.py#L63-L75
[ "def", "face_distance", "(", "face_encodings", ",", "face_to_compare", ")", ":", "if", "len", "(", "face_encodings", ")", "==", "0", ":", "return", "np", ".", "empty", "(", "(", "0", ")", ")", "return", "np", ".", "linalg", ".", "norm", "(", "face_encodings", "-", "face_to_compare", ",", "axis", "=", "1", ")" ]
c96b010c02f15e8eeb0f71308c641179ac1f19bb
train
load_image_file
Loads an image file (.jpg, .png, etc) into a numpy array :param file: image file name or file object to load :param mode: format to convert the image to. Only 'RGB' (8-bit RGB, 3 channels) and 'L' (black and white) are supported. :return: image contents as numpy array
face_recognition/api.py
def load_image_file(file, mode='RGB'): """ Loads an image file (.jpg, .png, etc) into a numpy array :param file: image file name or file object to load :param mode: format to convert the image to. Only 'RGB' (8-bit RGB, 3 channels) and 'L' (black and white) are supported. :return: image contents as numpy array """ im = PIL.Image.open(file) if mode: im = im.convert(mode) return np.array(im)
def load_image_file(file, mode='RGB'): """ Loads an image file (.jpg, .png, etc) into a numpy array :param file: image file name or file object to load :param mode: format to convert the image to. Only 'RGB' (8-bit RGB, 3 channels) and 'L' (black and white) are supported. :return: image contents as numpy array """ im = PIL.Image.open(file) if mode: im = im.convert(mode) return np.array(im)
[ "Loads", "an", "image", "file", "(", ".", "jpg", ".", "png", "etc", ")", "into", "a", "numpy", "array" ]
ageitgey/face_recognition
python
https://github.com/ageitgey/face_recognition/blob/c96b010c02f15e8eeb0f71308c641179ac1f19bb/face_recognition/api.py#L78-L89
[ "def", "load_image_file", "(", "file", ",", "mode", "=", "'RGB'", ")", ":", "im", "=", "PIL", ".", "Image", ".", "open", "(", "file", ")", "if", "mode", ":", "im", "=", "im", ".", "convert", "(", "mode", ")", "return", "np", ".", "array", "(", "im", ")" ]
c96b010c02f15e8eeb0f71308c641179ac1f19bb
train
_raw_face_locations
Returns an array of bounding boxes of human faces in a image :param img: An image (as a numpy array) :param number_of_times_to_upsample: How many times to upsample the image looking for faces. Higher numbers find smaller faces. :param model: Which face detection model to use. "hog" is less accurate but faster on CPUs. "cnn" is a more accurate deep-learning model which is GPU/CUDA accelerated (if available). The default is "hog". :return: A list of dlib 'rect' objects of found face locations
face_recognition/api.py
def _raw_face_locations(img, number_of_times_to_upsample=1, model="hog"): """ Returns an array of bounding boxes of human faces in a image :param img: An image (as a numpy array) :param number_of_times_to_upsample: How many times to upsample the image looking for faces. Higher numbers find smaller faces. :param model: Which face detection model to use. "hog" is less accurate but faster on CPUs. "cnn" is a more accurate deep-learning model which is GPU/CUDA accelerated (if available). The default is "hog". :return: A list of dlib 'rect' objects of found face locations """ if model == "cnn": return cnn_face_detector(img, number_of_times_to_upsample) else: return face_detector(img, number_of_times_to_upsample)
def _raw_face_locations(img, number_of_times_to_upsample=1, model="hog"): """ Returns an array of bounding boxes of human faces in a image :param img: An image (as a numpy array) :param number_of_times_to_upsample: How many times to upsample the image looking for faces. Higher numbers find smaller faces. :param model: Which face detection model to use. "hog" is less accurate but faster on CPUs. "cnn" is a more accurate deep-learning model which is GPU/CUDA accelerated (if available). The default is "hog". :return: A list of dlib 'rect' objects of found face locations """ if model == "cnn": return cnn_face_detector(img, number_of_times_to_upsample) else: return face_detector(img, number_of_times_to_upsample)
[ "Returns", "an", "array", "of", "bounding", "boxes", "of", "human", "faces", "in", "a", "image" ]
ageitgey/face_recognition
python
https://github.com/ageitgey/face_recognition/blob/c96b010c02f15e8eeb0f71308c641179ac1f19bb/face_recognition/api.py#L92-L105
[ "def", "_raw_face_locations", "(", "img", ",", "number_of_times_to_upsample", "=", "1", ",", "model", "=", "\"hog\"", ")", ":", "if", "model", "==", "\"cnn\"", ":", "return", "cnn_face_detector", "(", "img", ",", "number_of_times_to_upsample", ")", "else", ":", "return", "face_detector", "(", "img", ",", "number_of_times_to_upsample", ")" ]
c96b010c02f15e8eeb0f71308c641179ac1f19bb
train
face_locations
Returns an array of bounding boxes of human faces in a image :param img: An image (as a numpy array) :param number_of_times_to_upsample: How many times to upsample the image looking for faces. Higher numbers find smaller faces. :param model: Which face detection model to use. "hog" is less accurate but faster on CPUs. "cnn" is a more accurate deep-learning model which is GPU/CUDA accelerated (if available). The default is "hog". :return: A list of tuples of found face locations in css (top, right, bottom, left) order
face_recognition/api.py
def face_locations(img, number_of_times_to_upsample=1, model="hog"): """ Returns an array of bounding boxes of human faces in a image :param img: An image (as a numpy array) :param number_of_times_to_upsample: How many times to upsample the image looking for faces. Higher numbers find smaller faces. :param model: Which face detection model to use. "hog" is less accurate but faster on CPUs. "cnn" is a more accurate deep-learning model which is GPU/CUDA accelerated (if available). The default is "hog". :return: A list of tuples of found face locations in css (top, right, bottom, left) order """ if model == "cnn": return [_trim_css_to_bounds(_rect_to_css(face.rect), img.shape) for face in _raw_face_locations(img, number_of_times_to_upsample, "cnn")] else: return [_trim_css_to_bounds(_rect_to_css(face), img.shape) for face in _raw_face_locations(img, number_of_times_to_upsample, model)]
def face_locations(img, number_of_times_to_upsample=1, model="hog"): """ Returns an array of bounding boxes of human faces in a image :param img: An image (as a numpy array) :param number_of_times_to_upsample: How many times to upsample the image looking for faces. Higher numbers find smaller faces. :param model: Which face detection model to use. "hog" is less accurate but faster on CPUs. "cnn" is a more accurate deep-learning model which is GPU/CUDA accelerated (if available). The default is "hog". :return: A list of tuples of found face locations in css (top, right, bottom, left) order """ if model == "cnn": return [_trim_css_to_bounds(_rect_to_css(face.rect), img.shape) for face in _raw_face_locations(img, number_of_times_to_upsample, "cnn")] else: return [_trim_css_to_bounds(_rect_to_css(face), img.shape) for face in _raw_face_locations(img, number_of_times_to_upsample, model)]
[ "Returns", "an", "array", "of", "bounding", "boxes", "of", "human", "faces", "in", "a", "image" ]
ageitgey/face_recognition
python
https://github.com/ageitgey/face_recognition/blob/c96b010c02f15e8eeb0f71308c641179ac1f19bb/face_recognition/api.py#L108-L121
[ "def", "face_locations", "(", "img", ",", "number_of_times_to_upsample", "=", "1", ",", "model", "=", "\"hog\"", ")", ":", "if", "model", "==", "\"cnn\"", ":", "return", "[", "_trim_css_to_bounds", "(", "_rect_to_css", "(", "face", ".", "rect", ")", ",", "img", ".", "shape", ")", "for", "face", "in", "_raw_face_locations", "(", "img", ",", "number_of_times_to_upsample", ",", "\"cnn\"", ")", "]", "else", ":", "return", "[", "_trim_css_to_bounds", "(", "_rect_to_css", "(", "face", ")", ",", "img", ".", "shape", ")", "for", "face", "in", "_raw_face_locations", "(", "img", ",", "number_of_times_to_upsample", ",", "model", ")", "]" ]
c96b010c02f15e8eeb0f71308c641179ac1f19bb
train
batch_face_locations
Returns an 2d array of bounding boxes of human faces in a image using the cnn face detector If you are using a GPU, this can give you much faster results since the GPU can process batches of images at once. If you aren't using a GPU, you don't need this function. :param img: A list of images (each as a numpy array) :param number_of_times_to_upsample: How many times to upsample the image looking for faces. Higher numbers find smaller faces. :param batch_size: How many images to include in each GPU processing batch. :return: A list of tuples of found face locations in css (top, right, bottom, left) order
face_recognition/api.py
def batch_face_locations(images, number_of_times_to_upsample=1, batch_size=128): """ Returns an 2d array of bounding boxes of human faces in a image using the cnn face detector If you are using a GPU, this can give you much faster results since the GPU can process batches of images at once. If you aren't using a GPU, you don't need this function. :param img: A list of images (each as a numpy array) :param number_of_times_to_upsample: How many times to upsample the image looking for faces. Higher numbers find smaller faces. :param batch_size: How many images to include in each GPU processing batch. :return: A list of tuples of found face locations in css (top, right, bottom, left) order """ def convert_cnn_detections_to_css(detections): return [_trim_css_to_bounds(_rect_to_css(face.rect), images[0].shape) for face in detections] raw_detections_batched = _raw_face_locations_batched(images, number_of_times_to_upsample, batch_size) return list(map(convert_cnn_detections_to_css, raw_detections_batched))
def batch_face_locations(images, number_of_times_to_upsample=1, batch_size=128): """ Returns an 2d array of bounding boxes of human faces in a image using the cnn face detector If you are using a GPU, this can give you much faster results since the GPU can process batches of images at once. If you aren't using a GPU, you don't need this function. :param img: A list of images (each as a numpy array) :param number_of_times_to_upsample: How many times to upsample the image looking for faces. Higher numbers find smaller faces. :param batch_size: How many images to include in each GPU processing batch. :return: A list of tuples of found face locations in css (top, right, bottom, left) order """ def convert_cnn_detections_to_css(detections): return [_trim_css_to_bounds(_rect_to_css(face.rect), images[0].shape) for face in detections] raw_detections_batched = _raw_face_locations_batched(images, number_of_times_to_upsample, batch_size) return list(map(convert_cnn_detections_to_css, raw_detections_batched))
[ "Returns", "an", "2d", "array", "of", "bounding", "boxes", "of", "human", "faces", "in", "a", "image", "using", "the", "cnn", "face", "detector", "If", "you", "are", "using", "a", "GPU", "this", "can", "give", "you", "much", "faster", "results", "since", "the", "GPU", "can", "process", "batches", "of", "images", "at", "once", ".", "If", "you", "aren", "t", "using", "a", "GPU", "you", "don", "t", "need", "this", "function", "." ]
ageitgey/face_recognition
python
https://github.com/ageitgey/face_recognition/blob/c96b010c02f15e8eeb0f71308c641179ac1f19bb/face_recognition/api.py#L135-L151
[ "def", "batch_face_locations", "(", "images", ",", "number_of_times_to_upsample", "=", "1", ",", "batch_size", "=", "128", ")", ":", "def", "convert_cnn_detections_to_css", "(", "detections", ")", ":", "return", "[", "_trim_css_to_bounds", "(", "_rect_to_css", "(", "face", ".", "rect", ")", ",", "images", "[", "0", "]", ".", "shape", ")", "for", "face", "in", "detections", "]", "raw_detections_batched", "=", "_raw_face_locations_batched", "(", "images", ",", "number_of_times_to_upsample", ",", "batch_size", ")", "return", "list", "(", "map", "(", "convert_cnn_detections_to_css", ",", "raw_detections_batched", ")", ")" ]
c96b010c02f15e8eeb0f71308c641179ac1f19bb
train
face_landmarks
Given an image, returns a dict of face feature locations (eyes, nose, etc) for each face in the image :param face_image: image to search :param face_locations: Optionally provide a list of face locations to check. :param model: Optional - which model to use. "large" (default) or "small" which only returns 5 points but is faster. :return: A list of dicts of face feature locations (eyes, nose, etc)
face_recognition/api.py
def face_landmarks(face_image, face_locations=None, model="large"): """ Given an image, returns a dict of face feature locations (eyes, nose, etc) for each face in the image :param face_image: image to search :param face_locations: Optionally provide a list of face locations to check. :param model: Optional - which model to use. "large" (default) or "small" which only returns 5 points but is faster. :return: A list of dicts of face feature locations (eyes, nose, etc) """ landmarks = _raw_face_landmarks(face_image, face_locations, model) landmarks_as_tuples = [[(p.x, p.y) for p in landmark.parts()] for landmark in landmarks] # For a definition of each point index, see https://cdn-images-1.medium.com/max/1600/1*AbEg31EgkbXSQehuNJBlWg.png if model == 'large': return [{ "chin": points[0:17], "left_eyebrow": points[17:22], "right_eyebrow": points[22:27], "nose_bridge": points[27:31], "nose_tip": points[31:36], "left_eye": points[36:42], "right_eye": points[42:48], "top_lip": points[48:55] + [points[64]] + [points[63]] + [points[62]] + [points[61]] + [points[60]], "bottom_lip": points[54:60] + [points[48]] + [points[60]] + [points[67]] + [points[66]] + [points[65]] + [points[64]] } for points in landmarks_as_tuples] elif model == 'small': return [{ "nose_tip": [points[4]], "left_eye": points[2:4], "right_eye": points[0:2], } for points in landmarks_as_tuples] else: raise ValueError("Invalid landmarks model type. Supported models are ['small', 'large'].")
def face_landmarks(face_image, face_locations=None, model="large"): """ Given an image, returns a dict of face feature locations (eyes, nose, etc) for each face in the image :param face_image: image to search :param face_locations: Optionally provide a list of face locations to check. :param model: Optional - which model to use. "large" (default) or "small" which only returns 5 points but is faster. :return: A list of dicts of face feature locations (eyes, nose, etc) """ landmarks = _raw_face_landmarks(face_image, face_locations, model) landmarks_as_tuples = [[(p.x, p.y) for p in landmark.parts()] for landmark in landmarks] # For a definition of each point index, see https://cdn-images-1.medium.com/max/1600/1*AbEg31EgkbXSQehuNJBlWg.png if model == 'large': return [{ "chin": points[0:17], "left_eyebrow": points[17:22], "right_eyebrow": points[22:27], "nose_bridge": points[27:31], "nose_tip": points[31:36], "left_eye": points[36:42], "right_eye": points[42:48], "top_lip": points[48:55] + [points[64]] + [points[63]] + [points[62]] + [points[61]] + [points[60]], "bottom_lip": points[54:60] + [points[48]] + [points[60]] + [points[67]] + [points[66]] + [points[65]] + [points[64]] } for points in landmarks_as_tuples] elif model == 'small': return [{ "nose_tip": [points[4]], "left_eye": points[2:4], "right_eye": points[0:2], } for points in landmarks_as_tuples] else: raise ValueError("Invalid landmarks model type. Supported models are ['small', 'large'].")
[ "Given", "an", "image", "returns", "a", "dict", "of", "face", "feature", "locations", "(", "eyes", "nose", "etc", ")", "for", "each", "face", "in", "the", "image" ]
ageitgey/face_recognition
python
https://github.com/ageitgey/face_recognition/blob/c96b010c02f15e8eeb0f71308c641179ac1f19bb/face_recognition/api.py#L168-L200
[ "def", "face_landmarks", "(", "face_image", ",", "face_locations", "=", "None", ",", "model", "=", "\"large\"", ")", ":", "landmarks", "=", "_raw_face_landmarks", "(", "face_image", ",", "face_locations", ",", "model", ")", "landmarks_as_tuples", "=", "[", "[", "(", "p", ".", "x", ",", "p", ".", "y", ")", "for", "p", "in", "landmark", ".", "parts", "(", ")", "]", "for", "landmark", "in", "landmarks", "]", "# For a definition of each point index, see https://cdn-images-1.medium.com/max/1600/1*AbEg31EgkbXSQehuNJBlWg.png", "if", "model", "==", "'large'", ":", "return", "[", "{", "\"chin\"", ":", "points", "[", "0", ":", "17", "]", ",", "\"left_eyebrow\"", ":", "points", "[", "17", ":", "22", "]", ",", "\"right_eyebrow\"", ":", "points", "[", "22", ":", "27", "]", ",", "\"nose_bridge\"", ":", "points", "[", "27", ":", "31", "]", ",", "\"nose_tip\"", ":", "points", "[", "31", ":", "36", "]", ",", "\"left_eye\"", ":", "points", "[", "36", ":", "42", "]", ",", "\"right_eye\"", ":", "points", "[", "42", ":", "48", "]", ",", "\"top_lip\"", ":", "points", "[", "48", ":", "55", "]", "+", "[", "points", "[", "64", "]", "]", "+", "[", "points", "[", "63", "]", "]", "+", "[", "points", "[", "62", "]", "]", "+", "[", "points", "[", "61", "]", "]", "+", "[", "points", "[", "60", "]", "]", ",", "\"bottom_lip\"", ":", "points", "[", "54", ":", "60", "]", "+", "[", "points", "[", "48", "]", "]", "+", "[", "points", "[", "60", "]", "]", "+", "[", "points", "[", "67", "]", "]", "+", "[", "points", "[", "66", "]", "]", "+", "[", "points", "[", "65", "]", "]", "+", "[", "points", "[", "64", "]", "]", "}", "for", "points", "in", "landmarks_as_tuples", "]", "elif", "model", "==", "'small'", ":", "return", "[", "{", "\"nose_tip\"", ":", "[", "points", "[", "4", "]", "]", ",", "\"left_eye\"", ":", "points", "[", "2", ":", "4", "]", ",", "\"right_eye\"", ":", "points", "[", "0", ":", "2", "]", ",", "}", "for", "points", "in", "landmarks_as_tuples", "]", "else", ":", "raise", "ValueError", "(", "\"Invalid landmarks model type. Supported models are ['small', 'large'].\"", ")" ]
c96b010c02f15e8eeb0f71308c641179ac1f19bb
train
face_encodings
Given an image, return the 128-dimension face encoding for each face in the image. :param face_image: The image that contains one or more faces :param known_face_locations: Optional - the bounding boxes of each face if you already know them. :param num_jitters: How many times to re-sample the face when calculating encoding. Higher is more accurate, but slower (i.e. 100 is 100x slower) :return: A list of 128-dimensional face encodings (one for each face in the image)
face_recognition/api.py
def face_encodings(face_image, known_face_locations=None, num_jitters=1): """ Given an image, return the 128-dimension face encoding for each face in the image. :param face_image: The image that contains one or more faces :param known_face_locations: Optional - the bounding boxes of each face if you already know them. :param num_jitters: How many times to re-sample the face when calculating encoding. Higher is more accurate, but slower (i.e. 100 is 100x slower) :return: A list of 128-dimensional face encodings (one for each face in the image) """ raw_landmarks = _raw_face_landmarks(face_image, known_face_locations, model="small") return [np.array(face_encoder.compute_face_descriptor(face_image, raw_landmark_set, num_jitters)) for raw_landmark_set in raw_landmarks]
def face_encodings(face_image, known_face_locations=None, num_jitters=1): """ Given an image, return the 128-dimension face encoding for each face in the image. :param face_image: The image that contains one or more faces :param known_face_locations: Optional - the bounding boxes of each face if you already know them. :param num_jitters: How many times to re-sample the face when calculating encoding. Higher is more accurate, but slower (i.e. 100 is 100x slower) :return: A list of 128-dimensional face encodings (one for each face in the image) """ raw_landmarks = _raw_face_landmarks(face_image, known_face_locations, model="small") return [np.array(face_encoder.compute_face_descriptor(face_image, raw_landmark_set, num_jitters)) for raw_landmark_set in raw_landmarks]
[ "Given", "an", "image", "return", "the", "128", "-", "dimension", "face", "encoding", "for", "each", "face", "in", "the", "image", "." ]
ageitgey/face_recognition
python
https://github.com/ageitgey/face_recognition/blob/c96b010c02f15e8eeb0f71308c641179ac1f19bb/face_recognition/api.py#L203-L213
[ "def", "face_encodings", "(", "face_image", ",", "known_face_locations", "=", "None", ",", "num_jitters", "=", "1", ")", ":", "raw_landmarks", "=", "_raw_face_landmarks", "(", "face_image", ",", "known_face_locations", ",", "model", "=", "\"small\"", ")", "return", "[", "np", ".", "array", "(", "face_encoder", ".", "compute_face_descriptor", "(", "face_image", ",", "raw_landmark_set", ",", "num_jitters", ")", ")", "for", "raw_landmark_set", "in", "raw_landmarks", "]" ]
c96b010c02f15e8eeb0f71308c641179ac1f19bb
train
_parse_datatype_string
Parses the given data type string to a :class:`DataType`. The data type string format equals to :class:`DataType.simpleString`, except that top level struct type can omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use ``byte`` instead of ``tinyint`` for :class:`ByteType`. We can also use ``int`` as a short name for :class:`IntegerType`. Since Spark 2.3, this also supports a schema in a DDL-formatted string and case-insensitive strings. >>> _parse_datatype_string("int ") IntegerType >>> _parse_datatype_string("INT ") IntegerType >>> _parse_datatype_string("a: byte, b: decimal( 16 , 8 ) ") StructType(List(StructField(a,ByteType,true),StructField(b,DecimalType(16,8),true))) >>> _parse_datatype_string("a DOUBLE, b STRING") StructType(List(StructField(a,DoubleType,true),StructField(b,StringType,true))) >>> _parse_datatype_string("a: array< short>") StructType(List(StructField(a,ArrayType(ShortType,true),true))) >>> _parse_datatype_string(" map<string , string > ") MapType(StringType,StringType,true) >>> # Error cases >>> _parse_datatype_string("blabla") # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ParseException:... >>> _parse_datatype_string("a: int,") # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ParseException:... >>> _parse_datatype_string("array<int") # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ParseException:... >>> _parse_datatype_string("map<int, boolean>>") # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ParseException:...
python/pyspark/sql/types.py
def _parse_datatype_string(s): """ Parses the given data type string to a :class:`DataType`. The data type string format equals to :class:`DataType.simpleString`, except that top level struct type can omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use ``byte`` instead of ``tinyint`` for :class:`ByteType`. We can also use ``int`` as a short name for :class:`IntegerType`. Since Spark 2.3, this also supports a schema in a DDL-formatted string and case-insensitive strings. >>> _parse_datatype_string("int ") IntegerType >>> _parse_datatype_string("INT ") IntegerType >>> _parse_datatype_string("a: byte, b: decimal( 16 , 8 ) ") StructType(List(StructField(a,ByteType,true),StructField(b,DecimalType(16,8),true))) >>> _parse_datatype_string("a DOUBLE, b STRING") StructType(List(StructField(a,DoubleType,true),StructField(b,StringType,true))) >>> _parse_datatype_string("a: array< short>") StructType(List(StructField(a,ArrayType(ShortType,true),true))) >>> _parse_datatype_string(" map<string , string > ") MapType(StringType,StringType,true) >>> # Error cases >>> _parse_datatype_string("blabla") # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ParseException:... >>> _parse_datatype_string("a: int,") # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ParseException:... >>> _parse_datatype_string("array<int") # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ParseException:... >>> _parse_datatype_string("map<int, boolean>>") # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ParseException:... """ sc = SparkContext._active_spark_context def from_ddl_schema(type_str): return _parse_datatype_json_string( sc._jvm.org.apache.spark.sql.types.StructType.fromDDL(type_str).json()) def from_ddl_datatype(type_str): return _parse_datatype_json_string( sc._jvm.org.apache.spark.sql.api.python.PythonSQLUtils.parseDataType(type_str).json()) try: # DDL format, "fieldname datatype, fieldname datatype". return from_ddl_schema(s) except Exception as e: try: # For backwards compatibility, "integer", "struct<fieldname: datatype>" and etc. return from_ddl_datatype(s) except: try: # For backwards compatibility, "fieldname: datatype, fieldname: datatype" case. return from_ddl_datatype("struct<%s>" % s.strip()) except: raise e
def _parse_datatype_string(s): """ Parses the given data type string to a :class:`DataType`. The data type string format equals to :class:`DataType.simpleString`, except that top level struct type can omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use ``byte`` instead of ``tinyint`` for :class:`ByteType`. We can also use ``int`` as a short name for :class:`IntegerType`. Since Spark 2.3, this also supports a schema in a DDL-formatted string and case-insensitive strings. >>> _parse_datatype_string("int ") IntegerType >>> _parse_datatype_string("INT ") IntegerType >>> _parse_datatype_string("a: byte, b: decimal( 16 , 8 ) ") StructType(List(StructField(a,ByteType,true),StructField(b,DecimalType(16,8),true))) >>> _parse_datatype_string("a DOUBLE, b STRING") StructType(List(StructField(a,DoubleType,true),StructField(b,StringType,true))) >>> _parse_datatype_string("a: array< short>") StructType(List(StructField(a,ArrayType(ShortType,true),true))) >>> _parse_datatype_string(" map<string , string > ") MapType(StringType,StringType,true) >>> # Error cases >>> _parse_datatype_string("blabla") # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ParseException:... >>> _parse_datatype_string("a: int,") # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ParseException:... >>> _parse_datatype_string("array<int") # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ParseException:... >>> _parse_datatype_string("map<int, boolean>>") # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ParseException:... """ sc = SparkContext._active_spark_context def from_ddl_schema(type_str): return _parse_datatype_json_string( sc._jvm.org.apache.spark.sql.types.StructType.fromDDL(type_str).json()) def from_ddl_datatype(type_str): return _parse_datatype_json_string( sc._jvm.org.apache.spark.sql.api.python.PythonSQLUtils.parseDataType(type_str).json()) try: # DDL format, "fieldname datatype, fieldname datatype". return from_ddl_schema(s) except Exception as e: try: # For backwards compatibility, "integer", "struct<fieldname: datatype>" and etc. return from_ddl_datatype(s) except: try: # For backwards compatibility, "fieldname: datatype, fieldname: datatype" case. return from_ddl_datatype("struct<%s>" % s.strip()) except: raise e
[ "Parses", "the", "given", "data", "type", "string", "to", "a", ":", "class", ":", "DataType", ".", "The", "data", "type", "string", "format", "equals", "to", ":", "class", ":", "DataType", ".", "simpleString", "except", "that", "top", "level", "struct", "type", "can", "omit", "the", "struct<", ">", "and", "atomic", "types", "use", "typeName", "()", "as", "their", "format", "e", ".", "g", ".", "use", "byte", "instead", "of", "tinyint", "for", ":", "class", ":", "ByteType", ".", "We", "can", "also", "use", "int", "as", "a", "short", "name", "for", ":", "class", ":", "IntegerType", ".", "Since", "Spark", "2", ".", "3", "this", "also", "supports", "a", "schema", "in", "a", "DDL", "-", "formatted", "string", "and", "case", "-", "insensitive", "strings", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/types.py#L758-L820
[ "def", "_parse_datatype_string", "(", "s", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "def", "from_ddl_schema", "(", "type_str", ")", ":", "return", "_parse_datatype_json_string", "(", "sc", ".", "_jvm", ".", "org", ".", "apache", ".", "spark", ".", "sql", ".", "types", ".", "StructType", ".", "fromDDL", "(", "type_str", ")", ".", "json", "(", ")", ")", "def", "from_ddl_datatype", "(", "type_str", ")", ":", "return", "_parse_datatype_json_string", "(", "sc", ".", "_jvm", ".", "org", ".", "apache", ".", "spark", ".", "sql", ".", "api", ".", "python", ".", "PythonSQLUtils", ".", "parseDataType", "(", "type_str", ")", ".", "json", "(", ")", ")", "try", ":", "# DDL format, \"fieldname datatype, fieldname datatype\".", "return", "from_ddl_schema", "(", "s", ")", "except", "Exception", "as", "e", ":", "try", ":", "# For backwards compatibility, \"integer\", \"struct<fieldname: datatype>\" and etc.", "return", "from_ddl_datatype", "(", "s", ")", "except", ":", "try", ":", "# For backwards compatibility, \"fieldname: datatype, fieldname: datatype\" case.", "return", "from_ddl_datatype", "(", "\"struct<%s>\"", "%", "s", ".", "strip", "(", ")", ")", "except", ":", "raise", "e" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
_int_size_to_type
Return the Catalyst datatype from the size of integers.
python/pyspark/sql/types.py
def _int_size_to_type(size): """ Return the Catalyst datatype from the size of integers. """ if size <= 8: return ByteType if size <= 16: return ShortType if size <= 32: return IntegerType if size <= 64: return LongType
def _int_size_to_type(size): """ Return the Catalyst datatype from the size of integers. """ if size <= 8: return ByteType if size <= 16: return ShortType if size <= 32: return IntegerType if size <= 64: return LongType
[ "Return", "the", "Catalyst", "datatype", "from", "the", "size", "of", "integers", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/types.py#L944-L955
[ "def", "_int_size_to_type", "(", "size", ")", ":", "if", "size", "<=", "8", ":", "return", "ByteType", "if", "size", "<=", "16", ":", "return", "ShortType", "if", "size", "<=", "32", ":", "return", "IntegerType", "if", "size", "<=", "64", ":", "return", "LongType" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
_infer_type
Infer the DataType from obj
python/pyspark/sql/types.py
def _infer_type(obj): """Infer the DataType from obj """ if obj is None: return NullType() if hasattr(obj, '__UDT__'): return obj.__UDT__ dataType = _type_mappings.get(type(obj)) if dataType is DecimalType: # the precision and scale of `obj` may be different from row to row. return DecimalType(38, 18) elif dataType is not None: return dataType() if isinstance(obj, dict): for key, value in obj.items(): if key is not None and value is not None: return MapType(_infer_type(key), _infer_type(value), True) return MapType(NullType(), NullType(), True) elif isinstance(obj, list): for v in obj: if v is not None: return ArrayType(_infer_type(obj[0]), True) return ArrayType(NullType(), True) elif isinstance(obj, array): if obj.typecode in _array_type_mappings: return ArrayType(_array_type_mappings[obj.typecode](), False) else: raise TypeError("not supported type: array(%s)" % obj.typecode) else: try: return _infer_schema(obj) except TypeError: raise TypeError("not supported type: %s" % type(obj))
def _infer_type(obj): """Infer the DataType from obj """ if obj is None: return NullType() if hasattr(obj, '__UDT__'): return obj.__UDT__ dataType = _type_mappings.get(type(obj)) if dataType is DecimalType: # the precision and scale of `obj` may be different from row to row. return DecimalType(38, 18) elif dataType is not None: return dataType() if isinstance(obj, dict): for key, value in obj.items(): if key is not None and value is not None: return MapType(_infer_type(key), _infer_type(value), True) return MapType(NullType(), NullType(), True) elif isinstance(obj, list): for v in obj: if v is not None: return ArrayType(_infer_type(obj[0]), True) return ArrayType(NullType(), True) elif isinstance(obj, array): if obj.typecode in _array_type_mappings: return ArrayType(_array_type_mappings[obj.typecode](), False) else: raise TypeError("not supported type: array(%s)" % obj.typecode) else: try: return _infer_schema(obj) except TypeError: raise TypeError("not supported type: %s" % type(obj))
[ "Infer", "the", "DataType", "from", "obj" ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/types.py#L1003-L1038
[ "def", "_infer_type", "(", "obj", ")", ":", "if", "obj", "is", "None", ":", "return", "NullType", "(", ")", "if", "hasattr", "(", "obj", ",", "'__UDT__'", ")", ":", "return", "obj", ".", "__UDT__", "dataType", "=", "_type_mappings", ".", "get", "(", "type", "(", "obj", ")", ")", "if", "dataType", "is", "DecimalType", ":", "# the precision and scale of `obj` may be different from row to row.", "return", "DecimalType", "(", "38", ",", "18", ")", "elif", "dataType", "is", "not", "None", ":", "return", "dataType", "(", ")", "if", "isinstance", "(", "obj", ",", "dict", ")", ":", "for", "key", ",", "value", "in", "obj", ".", "items", "(", ")", ":", "if", "key", "is", "not", "None", "and", "value", "is", "not", "None", ":", "return", "MapType", "(", "_infer_type", "(", "key", ")", ",", "_infer_type", "(", "value", ")", ",", "True", ")", "return", "MapType", "(", "NullType", "(", ")", ",", "NullType", "(", ")", ",", "True", ")", "elif", "isinstance", "(", "obj", ",", "list", ")", ":", "for", "v", "in", "obj", ":", "if", "v", "is", "not", "None", ":", "return", "ArrayType", "(", "_infer_type", "(", "obj", "[", "0", "]", ")", ",", "True", ")", "return", "ArrayType", "(", "NullType", "(", ")", ",", "True", ")", "elif", "isinstance", "(", "obj", ",", "array", ")", ":", "if", "obj", ".", "typecode", "in", "_array_type_mappings", ":", "return", "ArrayType", "(", "_array_type_mappings", "[", "obj", ".", "typecode", "]", "(", ")", ",", "False", ")", "else", ":", "raise", "TypeError", "(", "\"not supported type: array(%s)\"", "%", "obj", ".", "typecode", ")", "else", ":", "try", ":", "return", "_infer_schema", "(", "obj", ")", "except", "TypeError", ":", "raise", "TypeError", "(", "\"not supported type: %s\"", "%", "type", "(", "obj", ")", ")" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
_infer_schema
Infer the schema from dict/namedtuple/object
python/pyspark/sql/types.py
def _infer_schema(row, names=None): """Infer the schema from dict/namedtuple/object""" if isinstance(row, dict): items = sorted(row.items()) elif isinstance(row, (tuple, list)): if hasattr(row, "__fields__"): # Row items = zip(row.__fields__, tuple(row)) elif hasattr(row, "_fields"): # namedtuple items = zip(row._fields, tuple(row)) else: if names is None: names = ['_%d' % i for i in range(1, len(row) + 1)] elif len(names) < len(row): names.extend('_%d' % i for i in range(len(names) + 1, len(row) + 1)) items = zip(names, row) elif hasattr(row, "__dict__"): # object items = sorted(row.__dict__.items()) else: raise TypeError("Can not infer schema for type: %s" % type(row)) fields = [StructField(k, _infer_type(v), True) for k, v in items] return StructType(fields)
def _infer_schema(row, names=None): """Infer the schema from dict/namedtuple/object""" if isinstance(row, dict): items = sorted(row.items()) elif isinstance(row, (tuple, list)): if hasattr(row, "__fields__"): # Row items = zip(row.__fields__, tuple(row)) elif hasattr(row, "_fields"): # namedtuple items = zip(row._fields, tuple(row)) else: if names is None: names = ['_%d' % i for i in range(1, len(row) + 1)] elif len(names) < len(row): names.extend('_%d' % i for i in range(len(names) + 1, len(row) + 1)) items = zip(names, row) elif hasattr(row, "__dict__"): # object items = sorted(row.__dict__.items()) else: raise TypeError("Can not infer schema for type: %s" % type(row)) fields = [StructField(k, _infer_type(v), True) for k, v in items] return StructType(fields)
[ "Infer", "the", "schema", "from", "dict", "/", "namedtuple", "/", "object" ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/types.py#L1041-L1065
[ "def", "_infer_schema", "(", "row", ",", "names", "=", "None", ")", ":", "if", "isinstance", "(", "row", ",", "dict", ")", ":", "items", "=", "sorted", "(", "row", ".", "items", "(", ")", ")", "elif", "isinstance", "(", "row", ",", "(", "tuple", ",", "list", ")", ")", ":", "if", "hasattr", "(", "row", ",", "\"__fields__\"", ")", ":", "# Row", "items", "=", "zip", "(", "row", ".", "__fields__", ",", "tuple", "(", "row", ")", ")", "elif", "hasattr", "(", "row", ",", "\"_fields\"", ")", ":", "# namedtuple", "items", "=", "zip", "(", "row", ".", "_fields", ",", "tuple", "(", "row", ")", ")", "else", ":", "if", "names", "is", "None", ":", "names", "=", "[", "'_%d'", "%", "i", "for", "i", "in", "range", "(", "1", ",", "len", "(", "row", ")", "+", "1", ")", "]", "elif", "len", "(", "names", ")", "<", "len", "(", "row", ")", ":", "names", ".", "extend", "(", "'_%d'", "%", "i", "for", "i", "in", "range", "(", "len", "(", "names", ")", "+", "1", ",", "len", "(", "row", ")", "+", "1", ")", ")", "items", "=", "zip", "(", "names", ",", "row", ")", "elif", "hasattr", "(", "row", ",", "\"__dict__\"", ")", ":", "# object", "items", "=", "sorted", "(", "row", ".", "__dict__", ".", "items", "(", ")", ")", "else", ":", "raise", "TypeError", "(", "\"Can not infer schema for type: %s\"", "%", "type", "(", "row", ")", ")", "fields", "=", "[", "StructField", "(", "k", ",", "_infer_type", "(", "v", ")", ",", "True", ")", "for", "k", ",", "v", "in", "items", "]", "return", "StructType", "(", "fields", ")" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
_has_nulltype
Return whether there is NullType in `dt` or not
python/pyspark/sql/types.py
def _has_nulltype(dt): """ Return whether there is NullType in `dt` or not """ if isinstance(dt, StructType): return any(_has_nulltype(f.dataType) for f in dt.fields) elif isinstance(dt, ArrayType): return _has_nulltype((dt.elementType)) elif isinstance(dt, MapType): return _has_nulltype(dt.keyType) or _has_nulltype(dt.valueType) else: return isinstance(dt, NullType)
def _has_nulltype(dt): """ Return whether there is NullType in `dt` or not """ if isinstance(dt, StructType): return any(_has_nulltype(f.dataType) for f in dt.fields) elif isinstance(dt, ArrayType): return _has_nulltype((dt.elementType)) elif isinstance(dt, MapType): return _has_nulltype(dt.keyType) or _has_nulltype(dt.valueType) else: return isinstance(dt, NullType)
[ "Return", "whether", "there", "is", "NullType", "in", "dt", "or", "not" ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/types.py#L1068-L1077
[ "def", "_has_nulltype", "(", "dt", ")", ":", "if", "isinstance", "(", "dt", ",", "StructType", ")", ":", "return", "any", "(", "_has_nulltype", "(", "f", ".", "dataType", ")", "for", "f", "in", "dt", ".", "fields", ")", "elif", "isinstance", "(", "dt", ",", "ArrayType", ")", ":", "return", "_has_nulltype", "(", "(", "dt", ".", "elementType", ")", ")", "elif", "isinstance", "(", "dt", ",", "MapType", ")", ":", "return", "_has_nulltype", "(", "dt", ".", "keyType", ")", "or", "_has_nulltype", "(", "dt", ".", "valueType", ")", "else", ":", "return", "isinstance", "(", "dt", ",", "NullType", ")" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
_create_converter
Create a converter to drop the names of fields in obj
python/pyspark/sql/types.py
def _create_converter(dataType): """Create a converter to drop the names of fields in obj """ if not _need_converter(dataType): return lambda x: x if isinstance(dataType, ArrayType): conv = _create_converter(dataType.elementType) return lambda row: [conv(v) for v in row] elif isinstance(dataType, MapType): kconv = _create_converter(dataType.keyType) vconv = _create_converter(dataType.valueType) return lambda row: dict((kconv(k), vconv(v)) for k, v in row.items()) elif isinstance(dataType, NullType): return lambda x: None elif not isinstance(dataType, StructType): return lambda x: x # dataType must be StructType names = [f.name for f in dataType.fields] converters = [_create_converter(f.dataType) for f in dataType.fields] convert_fields = any(_need_converter(f.dataType) for f in dataType.fields) def convert_struct(obj): if obj is None: return if isinstance(obj, (tuple, list)): if convert_fields: return tuple(conv(v) for v, conv in zip(obj, converters)) else: return tuple(obj) if isinstance(obj, dict): d = obj elif hasattr(obj, "__dict__"): # object d = obj.__dict__ else: raise TypeError("Unexpected obj type: %s" % type(obj)) if convert_fields: return tuple([conv(d.get(name)) for name, conv in zip(names, converters)]) else: return tuple([d.get(name) for name in names]) return convert_struct
def _create_converter(dataType): """Create a converter to drop the names of fields in obj """ if not _need_converter(dataType): return lambda x: x if isinstance(dataType, ArrayType): conv = _create_converter(dataType.elementType) return lambda row: [conv(v) for v in row] elif isinstance(dataType, MapType): kconv = _create_converter(dataType.keyType) vconv = _create_converter(dataType.valueType) return lambda row: dict((kconv(k), vconv(v)) for k, v in row.items()) elif isinstance(dataType, NullType): return lambda x: None elif not isinstance(dataType, StructType): return lambda x: x # dataType must be StructType names = [f.name for f in dataType.fields] converters = [_create_converter(f.dataType) for f in dataType.fields] convert_fields = any(_need_converter(f.dataType) for f in dataType.fields) def convert_struct(obj): if obj is None: return if isinstance(obj, (tuple, list)): if convert_fields: return tuple(conv(v) for v, conv in zip(obj, converters)) else: return tuple(obj) if isinstance(obj, dict): d = obj elif hasattr(obj, "__dict__"): # object d = obj.__dict__ else: raise TypeError("Unexpected obj type: %s" % type(obj)) if convert_fields: return tuple([conv(d.get(name)) for name, conv in zip(names, converters)]) else: return tuple([d.get(name) for name in names]) return convert_struct
[ "Create", "a", "converter", "to", "drop", "the", "names", "of", "fields", "in", "obj" ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/types.py#L1133-L1180
[ "def", "_create_converter", "(", "dataType", ")", ":", "if", "not", "_need_converter", "(", "dataType", ")", ":", "return", "lambda", "x", ":", "x", "if", "isinstance", "(", "dataType", ",", "ArrayType", ")", ":", "conv", "=", "_create_converter", "(", "dataType", ".", "elementType", ")", "return", "lambda", "row", ":", "[", "conv", "(", "v", ")", "for", "v", "in", "row", "]", "elif", "isinstance", "(", "dataType", ",", "MapType", ")", ":", "kconv", "=", "_create_converter", "(", "dataType", ".", "keyType", ")", "vconv", "=", "_create_converter", "(", "dataType", ".", "valueType", ")", "return", "lambda", "row", ":", "dict", "(", "(", "kconv", "(", "k", ")", ",", "vconv", "(", "v", ")", ")", "for", "k", ",", "v", "in", "row", ".", "items", "(", ")", ")", "elif", "isinstance", "(", "dataType", ",", "NullType", ")", ":", "return", "lambda", "x", ":", "None", "elif", "not", "isinstance", "(", "dataType", ",", "StructType", ")", ":", "return", "lambda", "x", ":", "x", "# dataType must be StructType", "names", "=", "[", "f", ".", "name", "for", "f", "in", "dataType", ".", "fields", "]", "converters", "=", "[", "_create_converter", "(", "f", ".", "dataType", ")", "for", "f", "in", "dataType", ".", "fields", "]", "convert_fields", "=", "any", "(", "_need_converter", "(", "f", ".", "dataType", ")", "for", "f", "in", "dataType", ".", "fields", ")", "def", "convert_struct", "(", "obj", ")", ":", "if", "obj", "is", "None", ":", "return", "if", "isinstance", "(", "obj", ",", "(", "tuple", ",", "list", ")", ")", ":", "if", "convert_fields", ":", "return", "tuple", "(", "conv", "(", "v", ")", "for", "v", ",", "conv", "in", "zip", "(", "obj", ",", "converters", ")", ")", "else", ":", "return", "tuple", "(", "obj", ")", "if", "isinstance", "(", "obj", ",", "dict", ")", ":", "d", "=", "obj", "elif", "hasattr", "(", "obj", ",", "\"__dict__\"", ")", ":", "# object", "d", "=", "obj", ".", "__dict__", "else", ":", "raise", "TypeError", "(", "\"Unexpected obj type: %s\"", "%", "type", "(", "obj", ")", ")", "if", "convert_fields", ":", "return", "tuple", "(", "[", "conv", "(", "d", ".", "get", "(", "name", ")", ")", "for", "name", ",", "conv", "in", "zip", "(", "names", ",", "converters", ")", "]", ")", "else", ":", "return", "tuple", "(", "[", "d", ".", "get", "(", "name", ")", "for", "name", "in", "names", "]", ")", "return", "convert_struct" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
_make_type_verifier
Make a verifier that checks the type of obj against dataType and raises a TypeError if they do not match. This verifier also checks the value of obj against datatype and raises a ValueError if it's not within the allowed range, e.g. using 128 as ByteType will overflow. Note that, Python float is not checked, so it will become infinity when cast to Java float if it overflows. >>> _make_type_verifier(StructType([]))(None) >>> _make_type_verifier(StringType())("") >>> _make_type_verifier(LongType())(0) >>> _make_type_verifier(ArrayType(ShortType()))(list(range(3))) >>> _make_type_verifier(ArrayType(StringType()))(set()) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... TypeError:... >>> _make_type_verifier(MapType(StringType(), IntegerType()))({}) >>> _make_type_verifier(StructType([]))(()) >>> _make_type_verifier(StructType([]))([]) >>> _make_type_verifier(StructType([]))([1]) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ValueError:... >>> # Check if numeric values are within the allowed range. >>> _make_type_verifier(ByteType())(12) >>> _make_type_verifier(ByteType())(1234) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ValueError:... >>> _make_type_verifier(ByteType(), False)(None) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ValueError:... >>> _make_type_verifier( ... ArrayType(ShortType(), False))([1, None]) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ValueError:... >>> _make_type_verifier(MapType(StringType(), IntegerType()))({None: 1}) Traceback (most recent call last): ... ValueError:... >>> schema = StructType().add("a", IntegerType()).add("b", StringType(), False) >>> _make_type_verifier(schema)((1, None)) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ValueError:...
python/pyspark/sql/types.py
def _make_type_verifier(dataType, nullable=True, name=None): """ Make a verifier that checks the type of obj against dataType and raises a TypeError if they do not match. This verifier also checks the value of obj against datatype and raises a ValueError if it's not within the allowed range, e.g. using 128 as ByteType will overflow. Note that, Python float is not checked, so it will become infinity when cast to Java float if it overflows. >>> _make_type_verifier(StructType([]))(None) >>> _make_type_verifier(StringType())("") >>> _make_type_verifier(LongType())(0) >>> _make_type_verifier(ArrayType(ShortType()))(list(range(3))) >>> _make_type_verifier(ArrayType(StringType()))(set()) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... TypeError:... >>> _make_type_verifier(MapType(StringType(), IntegerType()))({}) >>> _make_type_verifier(StructType([]))(()) >>> _make_type_verifier(StructType([]))([]) >>> _make_type_verifier(StructType([]))([1]) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ValueError:... >>> # Check if numeric values are within the allowed range. >>> _make_type_verifier(ByteType())(12) >>> _make_type_verifier(ByteType())(1234) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ValueError:... >>> _make_type_verifier(ByteType(), False)(None) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ValueError:... >>> _make_type_verifier( ... ArrayType(ShortType(), False))([1, None]) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ValueError:... >>> _make_type_verifier(MapType(StringType(), IntegerType()))({None: 1}) Traceback (most recent call last): ... ValueError:... >>> schema = StructType().add("a", IntegerType()).add("b", StringType(), False) >>> _make_type_verifier(schema)((1, None)) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ValueError:... """ if name is None: new_msg = lambda msg: msg new_name = lambda n: "field %s" % n else: new_msg = lambda msg: "%s: %s" % (name, msg) new_name = lambda n: "field %s in %s" % (n, name) def verify_nullability(obj): if obj is None: if nullable: return True else: raise ValueError(new_msg("This field is not nullable, but got None")) else: return False _type = type(dataType) def assert_acceptable_types(obj): assert _type in _acceptable_types, \ new_msg("unknown datatype: %s for object %r" % (dataType, obj)) def verify_acceptable_types(obj): # subclass of them can not be fromInternal in JVM if type(obj) not in _acceptable_types[_type]: raise TypeError(new_msg("%s can not accept object %r in type %s" % (dataType, obj, type(obj)))) if isinstance(dataType, StringType): # StringType can work with any types verify_value = lambda _: _ elif isinstance(dataType, UserDefinedType): verifier = _make_type_verifier(dataType.sqlType(), name=name) def verify_udf(obj): if not (hasattr(obj, '__UDT__') and obj.__UDT__ == dataType): raise ValueError(new_msg("%r is not an instance of type %r" % (obj, dataType))) verifier(dataType.toInternal(obj)) verify_value = verify_udf elif isinstance(dataType, ByteType): def verify_byte(obj): assert_acceptable_types(obj) verify_acceptable_types(obj) if obj < -128 or obj > 127: raise ValueError(new_msg("object of ByteType out of range, got: %s" % obj)) verify_value = verify_byte elif isinstance(dataType, ShortType): def verify_short(obj): assert_acceptable_types(obj) verify_acceptable_types(obj) if obj < -32768 or obj > 32767: raise ValueError(new_msg("object of ShortType out of range, got: %s" % obj)) verify_value = verify_short elif isinstance(dataType, IntegerType): def verify_integer(obj): assert_acceptable_types(obj) verify_acceptable_types(obj) if obj < -2147483648 or obj > 2147483647: raise ValueError( new_msg("object of IntegerType out of range, got: %s" % obj)) verify_value = verify_integer elif isinstance(dataType, ArrayType): element_verifier = _make_type_verifier( dataType.elementType, dataType.containsNull, name="element in array %s" % name) def verify_array(obj): assert_acceptable_types(obj) verify_acceptable_types(obj) for i in obj: element_verifier(i) verify_value = verify_array elif isinstance(dataType, MapType): key_verifier = _make_type_verifier(dataType.keyType, False, name="key of map %s" % name) value_verifier = _make_type_verifier( dataType.valueType, dataType.valueContainsNull, name="value of map %s" % name) def verify_map(obj): assert_acceptable_types(obj) verify_acceptable_types(obj) for k, v in obj.items(): key_verifier(k) value_verifier(v) verify_value = verify_map elif isinstance(dataType, StructType): verifiers = [] for f in dataType.fields: verifier = _make_type_verifier(f.dataType, f.nullable, name=new_name(f.name)) verifiers.append((f.name, verifier)) def verify_struct(obj): assert_acceptable_types(obj) if isinstance(obj, dict): for f, verifier in verifiers: verifier(obj.get(f)) elif isinstance(obj, Row) and getattr(obj, "__from_dict__", False): # the order in obj could be different than dataType.fields for f, verifier in verifiers: verifier(obj[f]) elif isinstance(obj, (tuple, list)): if len(obj) != len(verifiers): raise ValueError( new_msg("Length of object (%d) does not match with " "length of fields (%d)" % (len(obj), len(verifiers)))) for v, (_, verifier) in zip(obj, verifiers): verifier(v) elif hasattr(obj, "__dict__"): d = obj.__dict__ for f, verifier in verifiers: verifier(d.get(f)) else: raise TypeError(new_msg("StructType can not accept object %r in type %s" % (obj, type(obj)))) verify_value = verify_struct else: def verify_default(obj): assert_acceptable_types(obj) verify_acceptable_types(obj) verify_value = verify_default def verify(obj): if not verify_nullability(obj): verify_value(obj) return verify
def _make_type_verifier(dataType, nullable=True, name=None): """ Make a verifier that checks the type of obj against dataType and raises a TypeError if they do not match. This verifier also checks the value of obj against datatype and raises a ValueError if it's not within the allowed range, e.g. using 128 as ByteType will overflow. Note that, Python float is not checked, so it will become infinity when cast to Java float if it overflows. >>> _make_type_verifier(StructType([]))(None) >>> _make_type_verifier(StringType())("") >>> _make_type_verifier(LongType())(0) >>> _make_type_verifier(ArrayType(ShortType()))(list(range(3))) >>> _make_type_verifier(ArrayType(StringType()))(set()) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... TypeError:... >>> _make_type_verifier(MapType(StringType(), IntegerType()))({}) >>> _make_type_verifier(StructType([]))(()) >>> _make_type_verifier(StructType([]))([]) >>> _make_type_verifier(StructType([]))([1]) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ValueError:... >>> # Check if numeric values are within the allowed range. >>> _make_type_verifier(ByteType())(12) >>> _make_type_verifier(ByteType())(1234) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ValueError:... >>> _make_type_verifier(ByteType(), False)(None) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ValueError:... >>> _make_type_verifier( ... ArrayType(ShortType(), False))([1, None]) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ValueError:... >>> _make_type_verifier(MapType(StringType(), IntegerType()))({None: 1}) Traceback (most recent call last): ... ValueError:... >>> schema = StructType().add("a", IntegerType()).add("b", StringType(), False) >>> _make_type_verifier(schema)((1, None)) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ValueError:... """ if name is None: new_msg = lambda msg: msg new_name = lambda n: "field %s" % n else: new_msg = lambda msg: "%s: %s" % (name, msg) new_name = lambda n: "field %s in %s" % (n, name) def verify_nullability(obj): if obj is None: if nullable: return True else: raise ValueError(new_msg("This field is not nullable, but got None")) else: return False _type = type(dataType) def assert_acceptable_types(obj): assert _type in _acceptable_types, \ new_msg("unknown datatype: %s for object %r" % (dataType, obj)) def verify_acceptable_types(obj): # subclass of them can not be fromInternal in JVM if type(obj) not in _acceptable_types[_type]: raise TypeError(new_msg("%s can not accept object %r in type %s" % (dataType, obj, type(obj)))) if isinstance(dataType, StringType): # StringType can work with any types verify_value = lambda _: _ elif isinstance(dataType, UserDefinedType): verifier = _make_type_verifier(dataType.sqlType(), name=name) def verify_udf(obj): if not (hasattr(obj, '__UDT__') and obj.__UDT__ == dataType): raise ValueError(new_msg("%r is not an instance of type %r" % (obj, dataType))) verifier(dataType.toInternal(obj)) verify_value = verify_udf elif isinstance(dataType, ByteType): def verify_byte(obj): assert_acceptable_types(obj) verify_acceptable_types(obj) if obj < -128 or obj > 127: raise ValueError(new_msg("object of ByteType out of range, got: %s" % obj)) verify_value = verify_byte elif isinstance(dataType, ShortType): def verify_short(obj): assert_acceptable_types(obj) verify_acceptable_types(obj) if obj < -32768 or obj > 32767: raise ValueError(new_msg("object of ShortType out of range, got: %s" % obj)) verify_value = verify_short elif isinstance(dataType, IntegerType): def verify_integer(obj): assert_acceptable_types(obj) verify_acceptable_types(obj) if obj < -2147483648 or obj > 2147483647: raise ValueError( new_msg("object of IntegerType out of range, got: %s" % obj)) verify_value = verify_integer elif isinstance(dataType, ArrayType): element_verifier = _make_type_verifier( dataType.elementType, dataType.containsNull, name="element in array %s" % name) def verify_array(obj): assert_acceptable_types(obj) verify_acceptable_types(obj) for i in obj: element_verifier(i) verify_value = verify_array elif isinstance(dataType, MapType): key_verifier = _make_type_verifier(dataType.keyType, False, name="key of map %s" % name) value_verifier = _make_type_verifier( dataType.valueType, dataType.valueContainsNull, name="value of map %s" % name) def verify_map(obj): assert_acceptable_types(obj) verify_acceptable_types(obj) for k, v in obj.items(): key_verifier(k) value_verifier(v) verify_value = verify_map elif isinstance(dataType, StructType): verifiers = [] for f in dataType.fields: verifier = _make_type_verifier(f.dataType, f.nullable, name=new_name(f.name)) verifiers.append((f.name, verifier)) def verify_struct(obj): assert_acceptable_types(obj) if isinstance(obj, dict): for f, verifier in verifiers: verifier(obj.get(f)) elif isinstance(obj, Row) and getattr(obj, "__from_dict__", False): # the order in obj could be different than dataType.fields for f, verifier in verifiers: verifier(obj[f]) elif isinstance(obj, (tuple, list)): if len(obj) != len(verifiers): raise ValueError( new_msg("Length of object (%d) does not match with " "length of fields (%d)" % (len(obj), len(verifiers)))) for v, (_, verifier) in zip(obj, verifiers): verifier(v) elif hasattr(obj, "__dict__"): d = obj.__dict__ for f, verifier in verifiers: verifier(d.get(f)) else: raise TypeError(new_msg("StructType can not accept object %r in type %s" % (obj, type(obj)))) verify_value = verify_struct else: def verify_default(obj): assert_acceptable_types(obj) verify_acceptable_types(obj) verify_value = verify_default def verify(obj): if not verify_nullability(obj): verify_value(obj) return verify
[ "Make", "a", "verifier", "that", "checks", "the", "type", "of", "obj", "against", "dataType", "and", "raises", "a", "TypeError", "if", "they", "do", "not", "match", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/types.py#L1202-L1391
[ "def", "_make_type_verifier", "(", "dataType", ",", "nullable", "=", "True", ",", "name", "=", "None", ")", ":", "if", "name", "is", "None", ":", "new_msg", "=", "lambda", "msg", ":", "msg", "new_name", "=", "lambda", "n", ":", "\"field %s\"", "%", "n", "else", ":", "new_msg", "=", "lambda", "msg", ":", "\"%s: %s\"", "%", "(", "name", ",", "msg", ")", "new_name", "=", "lambda", "n", ":", "\"field %s in %s\"", "%", "(", "n", ",", "name", ")", "def", "verify_nullability", "(", "obj", ")", ":", "if", "obj", "is", "None", ":", "if", "nullable", ":", "return", "True", "else", ":", "raise", "ValueError", "(", "new_msg", "(", "\"This field is not nullable, but got None\"", ")", ")", "else", ":", "return", "False", "_type", "=", "type", "(", "dataType", ")", "def", "assert_acceptable_types", "(", "obj", ")", ":", "assert", "_type", "in", "_acceptable_types", ",", "new_msg", "(", "\"unknown datatype: %s for object %r\"", "%", "(", "dataType", ",", "obj", ")", ")", "def", "verify_acceptable_types", "(", "obj", ")", ":", "# subclass of them can not be fromInternal in JVM", "if", "type", "(", "obj", ")", "not", "in", "_acceptable_types", "[", "_type", "]", ":", "raise", "TypeError", "(", "new_msg", "(", "\"%s can not accept object %r in type %s\"", "%", "(", "dataType", ",", "obj", ",", "type", "(", "obj", ")", ")", ")", ")", "if", "isinstance", "(", "dataType", ",", "StringType", ")", ":", "# StringType can work with any types", "verify_value", "=", "lambda", "_", ":", "_", "elif", "isinstance", "(", "dataType", ",", "UserDefinedType", ")", ":", "verifier", "=", "_make_type_verifier", "(", "dataType", ".", "sqlType", "(", ")", ",", "name", "=", "name", ")", "def", "verify_udf", "(", "obj", ")", ":", "if", "not", "(", "hasattr", "(", "obj", ",", "'__UDT__'", ")", "and", "obj", ".", "__UDT__", "==", "dataType", ")", ":", "raise", "ValueError", "(", "new_msg", "(", "\"%r is not an instance of type %r\"", "%", "(", "obj", ",", "dataType", ")", ")", ")", "verifier", "(", "dataType", ".", "toInternal", "(", "obj", ")", ")", "verify_value", "=", "verify_udf", "elif", "isinstance", "(", "dataType", ",", "ByteType", ")", ":", "def", "verify_byte", "(", "obj", ")", ":", "assert_acceptable_types", "(", "obj", ")", "verify_acceptable_types", "(", "obj", ")", "if", "obj", "<", "-", "128", "or", "obj", ">", "127", ":", "raise", "ValueError", "(", "new_msg", "(", "\"object of ByteType out of range, got: %s\"", "%", "obj", ")", ")", "verify_value", "=", "verify_byte", "elif", "isinstance", "(", "dataType", ",", "ShortType", ")", ":", "def", "verify_short", "(", "obj", ")", ":", "assert_acceptable_types", "(", "obj", ")", "verify_acceptable_types", "(", "obj", ")", "if", "obj", "<", "-", "32768", "or", "obj", ">", "32767", ":", "raise", "ValueError", "(", "new_msg", "(", "\"object of ShortType out of range, got: %s\"", "%", "obj", ")", ")", "verify_value", "=", "verify_short", "elif", "isinstance", "(", "dataType", ",", "IntegerType", ")", ":", "def", "verify_integer", "(", "obj", ")", ":", "assert_acceptable_types", "(", "obj", ")", "verify_acceptable_types", "(", "obj", ")", "if", "obj", "<", "-", "2147483648", "or", "obj", ">", "2147483647", ":", "raise", "ValueError", "(", "new_msg", "(", "\"object of IntegerType out of range, got: %s\"", "%", "obj", ")", ")", "verify_value", "=", "verify_integer", "elif", "isinstance", "(", "dataType", ",", "ArrayType", ")", ":", "element_verifier", "=", "_make_type_verifier", "(", "dataType", ".", "elementType", ",", "dataType", ".", "containsNull", ",", "name", "=", "\"element in array %s\"", "%", "name", ")", "def", "verify_array", "(", "obj", ")", ":", "assert_acceptable_types", "(", "obj", ")", "verify_acceptable_types", "(", "obj", ")", "for", "i", "in", "obj", ":", "element_verifier", "(", "i", ")", "verify_value", "=", "verify_array", "elif", "isinstance", "(", "dataType", ",", "MapType", ")", ":", "key_verifier", "=", "_make_type_verifier", "(", "dataType", ".", "keyType", ",", "False", ",", "name", "=", "\"key of map %s\"", "%", "name", ")", "value_verifier", "=", "_make_type_verifier", "(", "dataType", ".", "valueType", ",", "dataType", ".", "valueContainsNull", ",", "name", "=", "\"value of map %s\"", "%", "name", ")", "def", "verify_map", "(", "obj", ")", ":", "assert_acceptable_types", "(", "obj", ")", "verify_acceptable_types", "(", "obj", ")", "for", "k", ",", "v", "in", "obj", ".", "items", "(", ")", ":", "key_verifier", "(", "k", ")", "value_verifier", "(", "v", ")", "verify_value", "=", "verify_map", "elif", "isinstance", "(", "dataType", ",", "StructType", ")", ":", "verifiers", "=", "[", "]", "for", "f", "in", "dataType", ".", "fields", ":", "verifier", "=", "_make_type_verifier", "(", "f", ".", "dataType", ",", "f", ".", "nullable", ",", "name", "=", "new_name", "(", "f", ".", "name", ")", ")", "verifiers", ".", "append", "(", "(", "f", ".", "name", ",", "verifier", ")", ")", "def", "verify_struct", "(", "obj", ")", ":", "assert_acceptable_types", "(", "obj", ")", "if", "isinstance", "(", "obj", ",", "dict", ")", ":", "for", "f", ",", "verifier", "in", "verifiers", ":", "verifier", "(", "obj", ".", "get", "(", "f", ")", ")", "elif", "isinstance", "(", "obj", ",", "Row", ")", "and", "getattr", "(", "obj", ",", "\"__from_dict__\"", ",", "False", ")", ":", "# the order in obj could be different than dataType.fields", "for", "f", ",", "verifier", "in", "verifiers", ":", "verifier", "(", "obj", "[", "f", "]", ")", "elif", "isinstance", "(", "obj", ",", "(", "tuple", ",", "list", ")", ")", ":", "if", "len", "(", "obj", ")", "!=", "len", "(", "verifiers", ")", ":", "raise", "ValueError", "(", "new_msg", "(", "\"Length of object (%d) does not match with \"", "\"length of fields (%d)\"", "%", "(", "len", "(", "obj", ")", ",", "len", "(", "verifiers", ")", ")", ")", ")", "for", "v", ",", "(", "_", ",", "verifier", ")", "in", "zip", "(", "obj", ",", "verifiers", ")", ":", "verifier", "(", "v", ")", "elif", "hasattr", "(", "obj", ",", "\"__dict__\"", ")", ":", "d", "=", "obj", ".", "__dict__", "for", "f", ",", "verifier", "in", "verifiers", ":", "verifier", "(", "d", ".", "get", "(", "f", ")", ")", "else", ":", "raise", "TypeError", "(", "new_msg", "(", "\"StructType can not accept object %r in type %s\"", "%", "(", "obj", ",", "type", "(", "obj", ")", ")", ")", ")", "verify_value", "=", "verify_struct", "else", ":", "def", "verify_default", "(", "obj", ")", ":", "assert_acceptable_types", "(", "obj", ")", "verify_acceptable_types", "(", "obj", ")", "verify_value", "=", "verify_default", "def", "verify", "(", "obj", ")", ":", "if", "not", "verify_nullability", "(", "obj", ")", ":", "verify_value", "(", "obj", ")", "return", "verify" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
to_arrow_type
Convert Spark data type to pyarrow type
python/pyspark/sql/types.py
def to_arrow_type(dt): """ Convert Spark data type to pyarrow type """ import pyarrow as pa if type(dt) == BooleanType: arrow_type = pa.bool_() elif type(dt) == ByteType: arrow_type = pa.int8() elif type(dt) == ShortType: arrow_type = pa.int16() elif type(dt) == IntegerType: arrow_type = pa.int32() elif type(dt) == LongType: arrow_type = pa.int64() elif type(dt) == FloatType: arrow_type = pa.float32() elif type(dt) == DoubleType: arrow_type = pa.float64() elif type(dt) == DecimalType: arrow_type = pa.decimal128(dt.precision, dt.scale) elif type(dt) == StringType: arrow_type = pa.string() elif type(dt) == BinaryType: arrow_type = pa.binary() elif type(dt) == DateType: arrow_type = pa.date32() elif type(dt) == TimestampType: # Timestamps should be in UTC, JVM Arrow timestamps require a timezone to be read arrow_type = pa.timestamp('us', tz='UTC') elif type(dt) == ArrayType: if type(dt.elementType) in [StructType, TimestampType]: raise TypeError("Unsupported type in conversion to Arrow: " + str(dt)) arrow_type = pa.list_(to_arrow_type(dt.elementType)) elif type(dt) == StructType: if any(type(field.dataType) == StructType for field in dt): raise TypeError("Nested StructType not supported in conversion to Arrow") fields = [pa.field(field.name, to_arrow_type(field.dataType), nullable=field.nullable) for field in dt] arrow_type = pa.struct(fields) else: raise TypeError("Unsupported type in conversion to Arrow: " + str(dt)) return arrow_type
def to_arrow_type(dt): """ Convert Spark data type to pyarrow type """ import pyarrow as pa if type(dt) == BooleanType: arrow_type = pa.bool_() elif type(dt) == ByteType: arrow_type = pa.int8() elif type(dt) == ShortType: arrow_type = pa.int16() elif type(dt) == IntegerType: arrow_type = pa.int32() elif type(dt) == LongType: arrow_type = pa.int64() elif type(dt) == FloatType: arrow_type = pa.float32() elif type(dt) == DoubleType: arrow_type = pa.float64() elif type(dt) == DecimalType: arrow_type = pa.decimal128(dt.precision, dt.scale) elif type(dt) == StringType: arrow_type = pa.string() elif type(dt) == BinaryType: arrow_type = pa.binary() elif type(dt) == DateType: arrow_type = pa.date32() elif type(dt) == TimestampType: # Timestamps should be in UTC, JVM Arrow timestamps require a timezone to be read arrow_type = pa.timestamp('us', tz='UTC') elif type(dt) == ArrayType: if type(dt.elementType) in [StructType, TimestampType]: raise TypeError("Unsupported type in conversion to Arrow: " + str(dt)) arrow_type = pa.list_(to_arrow_type(dt.elementType)) elif type(dt) == StructType: if any(type(field.dataType) == StructType for field in dt): raise TypeError("Nested StructType not supported in conversion to Arrow") fields = [pa.field(field.name, to_arrow_type(field.dataType), nullable=field.nullable) for field in dt] arrow_type = pa.struct(fields) else: raise TypeError("Unsupported type in conversion to Arrow: " + str(dt)) return arrow_type
[ "Convert", "Spark", "data", "type", "to", "pyarrow", "type" ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/types.py#L1581-L1622
[ "def", "to_arrow_type", "(", "dt", ")", ":", "import", "pyarrow", "as", "pa", "if", "type", "(", "dt", ")", "==", "BooleanType", ":", "arrow_type", "=", "pa", ".", "bool_", "(", ")", "elif", "type", "(", "dt", ")", "==", "ByteType", ":", "arrow_type", "=", "pa", ".", "int8", "(", ")", "elif", "type", "(", "dt", ")", "==", "ShortType", ":", "arrow_type", "=", "pa", ".", "int16", "(", ")", "elif", "type", "(", "dt", ")", "==", "IntegerType", ":", "arrow_type", "=", "pa", ".", "int32", "(", ")", "elif", "type", "(", "dt", ")", "==", "LongType", ":", "arrow_type", "=", "pa", ".", "int64", "(", ")", "elif", "type", "(", "dt", ")", "==", "FloatType", ":", "arrow_type", "=", "pa", ".", "float32", "(", ")", "elif", "type", "(", "dt", ")", "==", "DoubleType", ":", "arrow_type", "=", "pa", ".", "float64", "(", ")", "elif", "type", "(", "dt", ")", "==", "DecimalType", ":", "arrow_type", "=", "pa", ".", "decimal128", "(", "dt", ".", "precision", ",", "dt", ".", "scale", ")", "elif", "type", "(", "dt", ")", "==", "StringType", ":", "arrow_type", "=", "pa", ".", "string", "(", ")", "elif", "type", "(", "dt", ")", "==", "BinaryType", ":", "arrow_type", "=", "pa", ".", "binary", "(", ")", "elif", "type", "(", "dt", ")", "==", "DateType", ":", "arrow_type", "=", "pa", ".", "date32", "(", ")", "elif", "type", "(", "dt", ")", "==", "TimestampType", ":", "# Timestamps should be in UTC, JVM Arrow timestamps require a timezone to be read", "arrow_type", "=", "pa", ".", "timestamp", "(", "'us'", ",", "tz", "=", "'UTC'", ")", "elif", "type", "(", "dt", ")", "==", "ArrayType", ":", "if", "type", "(", "dt", ".", "elementType", ")", "in", "[", "StructType", ",", "TimestampType", "]", ":", "raise", "TypeError", "(", "\"Unsupported type in conversion to Arrow: \"", "+", "str", "(", "dt", ")", ")", "arrow_type", "=", "pa", ".", "list_", "(", "to_arrow_type", "(", "dt", ".", "elementType", ")", ")", "elif", "type", "(", "dt", ")", "==", "StructType", ":", "if", "any", "(", "type", "(", "field", ".", "dataType", ")", "==", "StructType", "for", "field", "in", "dt", ")", ":", "raise", "TypeError", "(", "\"Nested StructType not supported in conversion to Arrow\"", ")", "fields", "=", "[", "pa", ".", "field", "(", "field", ".", "name", ",", "to_arrow_type", "(", "field", ".", "dataType", ")", ",", "nullable", "=", "field", ".", "nullable", ")", "for", "field", "in", "dt", "]", "arrow_type", "=", "pa", ".", "struct", "(", "fields", ")", "else", ":", "raise", "TypeError", "(", "\"Unsupported type in conversion to Arrow: \"", "+", "str", "(", "dt", ")", ")", "return", "arrow_type" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
to_arrow_schema
Convert a schema from Spark to Arrow
python/pyspark/sql/types.py
def to_arrow_schema(schema): """ Convert a schema from Spark to Arrow """ import pyarrow as pa fields = [pa.field(field.name, to_arrow_type(field.dataType), nullable=field.nullable) for field in schema] return pa.schema(fields)
def to_arrow_schema(schema): """ Convert a schema from Spark to Arrow """ import pyarrow as pa fields = [pa.field(field.name, to_arrow_type(field.dataType), nullable=field.nullable) for field in schema] return pa.schema(fields)
[ "Convert", "a", "schema", "from", "Spark", "to", "Arrow" ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/types.py#L1625-L1631
[ "def", "to_arrow_schema", "(", "schema", ")", ":", "import", "pyarrow", "as", "pa", "fields", "=", "[", "pa", ".", "field", "(", "field", ".", "name", ",", "to_arrow_type", "(", "field", ".", "dataType", ")", ",", "nullable", "=", "field", ".", "nullable", ")", "for", "field", "in", "schema", "]", "return", "pa", ".", "schema", "(", "fields", ")" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
from_arrow_type
Convert pyarrow type to Spark data type.
python/pyspark/sql/types.py
def from_arrow_type(at): """ Convert pyarrow type to Spark data type. """ import pyarrow.types as types if types.is_boolean(at): spark_type = BooleanType() elif types.is_int8(at): spark_type = ByteType() elif types.is_int16(at): spark_type = ShortType() elif types.is_int32(at): spark_type = IntegerType() elif types.is_int64(at): spark_type = LongType() elif types.is_float32(at): spark_type = FloatType() elif types.is_float64(at): spark_type = DoubleType() elif types.is_decimal(at): spark_type = DecimalType(precision=at.precision, scale=at.scale) elif types.is_string(at): spark_type = StringType() elif types.is_binary(at): spark_type = BinaryType() elif types.is_date32(at): spark_type = DateType() elif types.is_timestamp(at): spark_type = TimestampType() elif types.is_list(at): if types.is_timestamp(at.value_type): raise TypeError("Unsupported type in conversion from Arrow: " + str(at)) spark_type = ArrayType(from_arrow_type(at.value_type)) elif types.is_struct(at): if any(types.is_struct(field.type) for field in at): raise TypeError("Nested StructType not supported in conversion from Arrow: " + str(at)) return StructType( [StructField(field.name, from_arrow_type(field.type), nullable=field.nullable) for field in at]) else: raise TypeError("Unsupported type in conversion from Arrow: " + str(at)) return spark_type
def from_arrow_type(at): """ Convert pyarrow type to Spark data type. """ import pyarrow.types as types if types.is_boolean(at): spark_type = BooleanType() elif types.is_int8(at): spark_type = ByteType() elif types.is_int16(at): spark_type = ShortType() elif types.is_int32(at): spark_type = IntegerType() elif types.is_int64(at): spark_type = LongType() elif types.is_float32(at): spark_type = FloatType() elif types.is_float64(at): spark_type = DoubleType() elif types.is_decimal(at): spark_type = DecimalType(precision=at.precision, scale=at.scale) elif types.is_string(at): spark_type = StringType() elif types.is_binary(at): spark_type = BinaryType() elif types.is_date32(at): spark_type = DateType() elif types.is_timestamp(at): spark_type = TimestampType() elif types.is_list(at): if types.is_timestamp(at.value_type): raise TypeError("Unsupported type in conversion from Arrow: " + str(at)) spark_type = ArrayType(from_arrow_type(at.value_type)) elif types.is_struct(at): if any(types.is_struct(field.type) for field in at): raise TypeError("Nested StructType not supported in conversion from Arrow: " + str(at)) return StructType( [StructField(field.name, from_arrow_type(field.type), nullable=field.nullable) for field in at]) else: raise TypeError("Unsupported type in conversion from Arrow: " + str(at)) return spark_type
[ "Convert", "pyarrow", "type", "to", "Spark", "data", "type", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/types.py#L1634-L1674
[ "def", "from_arrow_type", "(", "at", ")", ":", "import", "pyarrow", ".", "types", "as", "types", "if", "types", ".", "is_boolean", "(", "at", ")", ":", "spark_type", "=", "BooleanType", "(", ")", "elif", "types", ".", "is_int8", "(", "at", ")", ":", "spark_type", "=", "ByteType", "(", ")", "elif", "types", ".", "is_int16", "(", "at", ")", ":", "spark_type", "=", "ShortType", "(", ")", "elif", "types", ".", "is_int32", "(", "at", ")", ":", "spark_type", "=", "IntegerType", "(", ")", "elif", "types", ".", "is_int64", "(", "at", ")", ":", "spark_type", "=", "LongType", "(", ")", "elif", "types", ".", "is_float32", "(", "at", ")", ":", "spark_type", "=", "FloatType", "(", ")", "elif", "types", ".", "is_float64", "(", "at", ")", ":", "spark_type", "=", "DoubleType", "(", ")", "elif", "types", ".", "is_decimal", "(", "at", ")", ":", "spark_type", "=", "DecimalType", "(", "precision", "=", "at", ".", "precision", ",", "scale", "=", "at", ".", "scale", ")", "elif", "types", ".", "is_string", "(", "at", ")", ":", "spark_type", "=", "StringType", "(", ")", "elif", "types", ".", "is_binary", "(", "at", ")", ":", "spark_type", "=", "BinaryType", "(", ")", "elif", "types", ".", "is_date32", "(", "at", ")", ":", "spark_type", "=", "DateType", "(", ")", "elif", "types", ".", "is_timestamp", "(", "at", ")", ":", "spark_type", "=", "TimestampType", "(", ")", "elif", "types", ".", "is_list", "(", "at", ")", ":", "if", "types", ".", "is_timestamp", "(", "at", ".", "value_type", ")", ":", "raise", "TypeError", "(", "\"Unsupported type in conversion from Arrow: \"", "+", "str", "(", "at", ")", ")", "spark_type", "=", "ArrayType", "(", "from_arrow_type", "(", "at", ".", "value_type", ")", ")", "elif", "types", ".", "is_struct", "(", "at", ")", ":", "if", "any", "(", "types", ".", "is_struct", "(", "field", ".", "type", ")", "for", "field", "in", "at", ")", ":", "raise", "TypeError", "(", "\"Nested StructType not supported in conversion from Arrow: \"", "+", "str", "(", "at", ")", ")", "return", "StructType", "(", "[", "StructField", "(", "field", ".", "name", ",", "from_arrow_type", "(", "field", ".", "type", ")", ",", "nullable", "=", "field", ".", "nullable", ")", "for", "field", "in", "at", "]", ")", "else", ":", "raise", "TypeError", "(", "\"Unsupported type in conversion from Arrow: \"", "+", "str", "(", "at", ")", ")", "return", "spark_type" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
from_arrow_schema
Convert schema from Arrow to Spark.
python/pyspark/sql/types.py
def from_arrow_schema(arrow_schema): """ Convert schema from Arrow to Spark. """ return StructType( [StructField(field.name, from_arrow_type(field.type), nullable=field.nullable) for field in arrow_schema])
def from_arrow_schema(arrow_schema): """ Convert schema from Arrow to Spark. """ return StructType( [StructField(field.name, from_arrow_type(field.type), nullable=field.nullable) for field in arrow_schema])
[ "Convert", "schema", "from", "Arrow", "to", "Spark", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/types.py#L1677-L1682
[ "def", "from_arrow_schema", "(", "arrow_schema", ")", ":", "return", "StructType", "(", "[", "StructField", "(", "field", ".", "name", ",", "from_arrow_type", "(", "field", ".", "type", ")", ",", "nullable", "=", "field", ".", "nullable", ")", "for", "field", "in", "arrow_schema", "]", ")" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
_check_series_localize_timestamps
Convert timezone aware timestamps to timezone-naive in the specified timezone or local timezone. If the input series is not a timestamp series, then the same series is returned. If the input series is a timestamp series, then a converted series is returned. :param s: pandas.Series :param timezone: the timezone to convert. if None then use local timezone :return pandas.Series that have been converted to tz-naive
python/pyspark/sql/types.py
def _check_series_localize_timestamps(s, timezone): """ Convert timezone aware timestamps to timezone-naive in the specified timezone or local timezone. If the input series is not a timestamp series, then the same series is returned. If the input series is a timestamp series, then a converted series is returned. :param s: pandas.Series :param timezone: the timezone to convert. if None then use local timezone :return pandas.Series that have been converted to tz-naive """ from pyspark.sql.utils import require_minimum_pandas_version require_minimum_pandas_version() from pandas.api.types import is_datetime64tz_dtype tz = timezone or _get_local_timezone() # TODO: handle nested timestamps, such as ArrayType(TimestampType())? if is_datetime64tz_dtype(s.dtype): return s.dt.tz_convert(tz).dt.tz_localize(None) else: return s
def _check_series_localize_timestamps(s, timezone): """ Convert timezone aware timestamps to timezone-naive in the specified timezone or local timezone. If the input series is not a timestamp series, then the same series is returned. If the input series is a timestamp series, then a converted series is returned. :param s: pandas.Series :param timezone: the timezone to convert. if None then use local timezone :return pandas.Series that have been converted to tz-naive """ from pyspark.sql.utils import require_minimum_pandas_version require_minimum_pandas_version() from pandas.api.types import is_datetime64tz_dtype tz = timezone or _get_local_timezone() # TODO: handle nested timestamps, such as ArrayType(TimestampType())? if is_datetime64tz_dtype(s.dtype): return s.dt.tz_convert(tz).dt.tz_localize(None) else: return s
[ "Convert", "timezone", "aware", "timestamps", "to", "timezone", "-", "naive", "in", "the", "specified", "timezone", "or", "local", "timezone", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/types.py#L1700-L1720
[ "def", "_check_series_localize_timestamps", "(", "s", ",", "timezone", ")", ":", "from", "pyspark", ".", "sql", ".", "utils", "import", "require_minimum_pandas_version", "require_minimum_pandas_version", "(", ")", "from", "pandas", ".", "api", ".", "types", "import", "is_datetime64tz_dtype", "tz", "=", "timezone", "or", "_get_local_timezone", "(", ")", "# TODO: handle nested timestamps, such as ArrayType(TimestampType())?", "if", "is_datetime64tz_dtype", "(", "s", ".", "dtype", ")", ":", "return", "s", ".", "dt", ".", "tz_convert", "(", "tz", ")", ".", "dt", ".", "tz_localize", "(", "None", ")", "else", ":", "return", "s" ]
618d6bff71073c8c93501ab7392c3cc579730f0b