_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q33500
GroupMixin.descendants
train
def descendants(self, include_clip=True): """ Return a generator to iterate over all descendant layers. Example:: # Iterate over all layers for layer in psd.descendants(): print(layer) # Iterate over all layers in reverse order for layer in reversed(list(psd.descendants())): print(layer) :param include_clip: include clipping layers. """ for layer in self: yield layer if layer.is_group(): for child in layer.descendants(include_clip): yield child if include_clip and hasattr(layer, 'clip_layers'): for clip_layer in layer.clip_layers: yield clip_layer
python
{ "resource": "" }
q33501
Artboard.compose
train
def compose(self, bbox=None, **kwargs): """ Compose the artboard. See :py:func:`~psd_tools.compose` for available extra arguments. :param bbox: Viewport tuple (left, top, right, bottom). :return: :py:class:`PIL.Image`, or `None` if there is no pixel. """ from psd_tools.api.composer import compose return compose(self, bbox=bbox or self.bbox, **kwargs)
python
{ "resource": "" }
q33502
SmartObjectLayer.smart_object
train
def smart_object(self): """ Associated smart object. :return: :py:class:`~psd_tools.api.smart_object.SmartObject`. """ if not hasattr(self, '_smart_object'): self._smart_object = SmartObject(self) return self._smart_object
python
{ "resource": "" }
q33503
ShapeLayer.stroke
train
def stroke(self): """Property for strokes.""" if not hasattr(self, '_stroke'): self._stroke = None stroke = self.tagged_blocks.get_data('VECTOR_STROKE_DATA') if stroke: self._stroke = Stroke(stroke) return self._stroke
python
{ "resource": "" }
q33504
read_fmt
train
def read_fmt(fmt, fp): """ Reads data from ``fp`` according to ``fmt``. """ fmt = str(">" + fmt) fmt_size = struct.calcsize(fmt) data = fp.read(fmt_size) assert len(data) == fmt_size, 'read=%d, expected=%d' % ( len(data), fmt_size ) return struct.unpack(fmt, data)
python
{ "resource": "" }
q33505
write_fmt
train
def write_fmt(fp, fmt, *args): """ Writes data to ``fp`` according to ``fmt``. """ fmt = str(">" + fmt) fmt_size = struct.calcsize(fmt) written = write_bytes(fp, struct.pack(fmt, *args)) assert written == fmt_size, 'written=%d, expected=%d' % ( written, fmt_size ) return written
python
{ "resource": "" }
q33506
write_bytes
train
def write_bytes(fp, data): """ Write bytes to the file object and returns bytes written. :return: written byte size """ pos = fp.tell() fp.write(data) written = fp.tell() - pos assert written == len(data), 'written=%d, expected=%d' % ( written, len(data) ) return written
python
{ "resource": "" }
q33507
read_length_block
train
def read_length_block(fp, fmt='I', padding=1): """ Read a block of data with a length marker at the beginning. :param fp: file-like :param fmt: format of the length marker :return: bytes object """ length = read_fmt(fmt, fp)[0] data = fp.read(length) assert len(data) == length, (len(data), length) read_padding(fp, length, padding) return data
python
{ "resource": "" }
q33508
write_length_block
train
def write_length_block(fp, writer, fmt='I', padding=1, **kwargs): """ Writes a block of data with a length marker at the beginning. Example:: with io.BytesIO() as fp: write_length_block(fp, lambda f: f.write(b'\x00\x00')) :param fp: file-like :param writer: function object that takes file-like object as an argument :param fmt: format of the length marker :param padding: divisor for padding not included in length marker :return: written byte size """ length_position = reserve_position(fp, fmt) written = writer(fp, **kwargs) written += write_position(fp, length_position, written, fmt) written += write_padding(fp, written, padding) return written
python
{ "resource": "" }
q33509
reserve_position
train
def reserve_position(fp, fmt='I'): """ Reserves the current position for write. Use with `write_position`. :param fp: file-like object :param fmt: format of the reserved position :return: the position """ position = fp.tell() fp.seek(struct.calcsize(str('>' + fmt)), 1) return position
python
{ "resource": "" }
q33510
write_position
train
def write_position(fp, position, value, fmt='I'): """ Writes a value to the specified position. :param fp: file-like object :param position: position of the value marker :param value: value to write :param fmt: format of the value :return: written byte size """ current_position = fp.tell() fp.seek(position) written = write_bytes(fp, struct.pack(str('>' + fmt), value)) fp.seek(current_position) return written
python
{ "resource": "" }
q33511
read_padding
train
def read_padding(fp, size, divisor=2): """ Read padding bytes for the given byte size. :param fp: file-like object :param divisor: divisor of the byte alignment :return: read byte size """ remainder = size % divisor if remainder: return fp.read(divisor - remainder) return b''
python
{ "resource": "" }
q33512
write_padding
train
def write_padding(fp, size, divisor=2): """ Writes padding bytes given the currently written size. :param fp: file-like object :param divisor: divisor of the byte alignment :return: written byte size """ remainder = size % divisor if remainder: return write_bytes(fp, struct.pack('%dx' % (divisor - remainder))) return 0
python
{ "resource": "" }
q33513
is_readable
train
def is_readable(fp, size=1): """ Check if the file-like object is readable. :param fp: file-like object :param size: byte size :return: bool """ read_size = len(fp.read(size)) fp.seek(-read_size, 1) return read_size == size
python
{ "resource": "" }
q33514
read_be_array
train
def read_be_array(fmt, count, fp): """ Reads an array from a file with big-endian data. """ arr = array.array(str(fmt)) if hasattr(arr, 'frombytes'): arr.frombytes(fp.read(count * arr.itemsize)) else: arr.fromstring(fp.read(count * arr.itemsize)) return fix_byteorder(arr)
python
{ "resource": "" }
q33515
be_array_from_bytes
train
def be_array_from_bytes(fmt, data): """ Reads an array from bytestring with big-endian data. """ arr = array.array(str(fmt), data) return fix_byteorder(arr)
python
{ "resource": "" }
q33516
be_array_to_bytes
train
def be_array_to_bytes(arr): """ Writes an array to bytestring with big-endian data. """ data = fix_byteorder(arr) if hasattr(arr, 'tobytes'): return data.tobytes() else: return data.tostring()
python
{ "resource": "" }
q33517
new_registry
train
def new_registry(attribute=None): """ Returns an empty dict and a @register decorator. """ registry = {} def register(key): def decorator(func): registry[key] = func if attribute: setattr(func, attribute, key) return func return decorator return registry, register
python
{ "resource": "" }
q33518
stop
train
def stop(): """Stop the server, invalidating any viewer URLs. This allows any previously-referenced data arrays to be garbage collected if there are no other references to them. """ global global_server if global_server is not None: ioloop = global_server.ioloop def stop_ioloop(): ioloop.stop() ioloop.close() global_server.ioloop.add_callback(stop_ioloop) global_server = None
python
{ "resource": "" }
q33519
defer_callback
train
def defer_callback(callback, *args, **kwargs): """Register `callback` to run in the server event loop thread.""" start() global_server.ioloop.add_callback(lambda: callback(*args, **kwargs))
python
{ "resource": "" }
q33520
compute_near_isotropic_downsampling_scales
train
def compute_near_isotropic_downsampling_scales(size, voxel_size, dimensions_to_downsample, max_scales=DEFAULT_MAX_DOWNSAMPLING_SCALES, max_downsampling=DEFAULT_MAX_DOWNSAMPLING, max_downsampled_size=DEFAULT_MAX_DOWNSAMPLED_SIZE): """Compute a list of successive downsampling factors.""" num_dims = len(voxel_size) cur_scale = np.ones((num_dims, ), dtype=int) scales = [tuple(cur_scale)] while (len(scales) < max_scales and (np.prod(cur_scale) < max_downsampling) and (size / cur_scale).max() > max_downsampled_size): # Find dimension with smallest voxelsize. cur_voxel_size = cur_scale * voxel_size smallest_cur_voxel_size_dim = dimensions_to_downsample[np.argmin(cur_voxel_size[ dimensions_to_downsample])] cur_scale[smallest_cur_voxel_size_dim] *= 2 target_voxel_size = cur_voxel_size[smallest_cur_voxel_size_dim] * 2 for d in dimensions_to_downsample: if d == smallest_cur_voxel_size_dim: continue d_voxel_size = cur_voxel_size[d] if abs(d_voxel_size - target_voxel_size) > abs(d_voxel_size * 2 - target_voxel_size): cur_scale[d] *= 2 scales.append(tuple(cur_scale)) return scales
python
{ "resource": "" }
q33521
compute_two_dimensional_near_isotropic_downsampling_scales
train
def compute_two_dimensional_near_isotropic_downsampling_scales( size, voxel_size, max_scales=float('inf'), max_downsampling=DEFAULT_MAX_DOWNSAMPLING, max_downsampled_size=DEFAULT_MAX_DOWNSAMPLED_SIZE): """Compute a list of successive downsampling factors for 2-d tiles.""" max_scales = min(max_scales, 10) # First compute a set of 2-d downsamplings for XY, XZ, and YZ with a high # number of max_scales, and ignoring other criteria. scales_transpose = [ compute_near_isotropic_downsampling_scales( size=size, voxel_size=voxel_size, dimensions_to_downsample=dimensions_to_downsample, max_scales=max_scales, max_downsampling=float('inf'), max_downsampled_size=0, ) for dimensions_to_downsample in [[0, 1], [0, 2], [1, 2]] ] # Truncate all list of scales to the same length, once the stopping criteria # is reached for all values of dimensions_to_downsample. scales = [((1, ) * 3, ) * 3] size = np.array(size) def scale_satisfies_criteria(scale): return np.prod(scale) < max_downsampling and (size / scale).max() > max_downsampled_size for i in range(1, max_scales): cur_scales = tuple(scales_transpose[d][i] for d in range(3)) if all(not scale_satisfies_criteria(scale) for scale in cur_scales): break scales.append(cur_scales) return scales
python
{ "resource": "" }
q33522
json_encoder_default
train
def json_encoder_default(obj): """JSON encoder function that handles some numpy types.""" if isinstance(obj, numbers.Integral) and (obj < min_safe_integer or obj > max_safe_integer): return str(obj) if isinstance(obj, np.integer): return str(obj) elif isinstance(obj, np.floating): return float(obj) elif isinstance(obj, np.ndarray): return list(obj) elif isinstance(obj, (set, frozenset)): return list(obj) raise TypeError
python
{ "resource": "" }
q33523
StateHandler._on_state_changed
train
def _on_state_changed(self): """Invoked when the viewer state changes.""" raw_state, generation = self.state.raw_state_and_generation if generation != self._last_generation: self._last_generation = generation self._send_update(raw_state, generation)
python
{ "resource": "" }
q33524
future_then_immediate
train
def future_then_immediate(future, func): """Returns a future that maps the result of `future` by `func`. If `future` succeeds, sets the result of the returned future to `func(future.result())`. If `future` fails or `func` raises an exception, the exception is stored in the returned future. If `future` has not yet finished, `func` is invoked by the same thread that finishes it. Otherwise, it is invoked immediately in the same thread that calls `future_then_immediate`. """ result = concurrent.futures.Future() def on_done(f): try: result.set_result(func(f.result())) except Exception as e: result.set_exception(e) future.add_done_callback(on_done) return result
python
{ "resource": "" }
q33525
downsample_with_averaging
train
def downsample_with_averaging(array, factor): """Downsample x by factor using averaging. @return: The downsampled array, of the same type as x. """ factor = tuple(factor) output_shape = tuple(int(math.ceil(s / f)) for s, f in zip(array.shape, factor)) temp = np.zeros(output_shape, dtype=np.float32) counts = np.zeros(output_shape, np.int) for offset in np.ndindex(factor): part = array[tuple(np.s_[o::f] for o, f in zip(offset, factor))] indexing_expr = tuple(np.s_[:s] for s in part.shape) temp[indexing_expr] += part counts[indexing_expr] += 1 return np.cast[array.dtype](temp / counts)
python
{ "resource": "" }
q33526
downsample_with_striding
train
def downsample_with_striding(array, factor): """Downsample x by factor using striding. @return: The downsampled array, of the same type as x. """ return array[tuple(np.s_[::f] for f in factor)]
python
{ "resource": "" }
q33527
EquivalenceMap._get_representative
train
def _get_representative(self, obj): """Finds and returns the root of the set containing `obj`.""" if obj not in self._parents: self._parents[obj] = obj self._weights[obj] = 1 self._prev_next[obj] = [obj, obj] self._min_values[obj] = obj return obj path = [obj] root = self._parents[obj] while root != path[-1]: path.append(root) root = self._parents[root] # compress the path and return for ancestor in path: self._parents[ancestor] = root return root
python
{ "resource": "" }
q33528
EquivalenceMap.members
train
def members(self, x): """Yields the members of the equivalence class containing `x`.""" if x not in self._parents: yield x return cur_x = x while True: yield cur_x cur_x = self._prev_next[cur_x][1] if cur_x == x: break
python
{ "resource": "" }
q33529
EquivalenceMap.sets
train
def sets(self): """Returns the equivalence classes as a set of sets.""" sets = {} for x in self._parents: sets.setdefault(self[x], set()).add(x) return frozenset(frozenset(v) for v in six.viewvalues(sets))
python
{ "resource": "" }
q33530
EquivalenceMap.to_json
train
def to_json(self): """Returns the equivalence classes a sorted list of sorted lists.""" sets = self.sets() return sorted(sorted(x) for x in sets)
python
{ "resource": "" }
q33531
EquivalenceMap.delete_set
train
def delete_set(self, x): """Removes the equivalence class containing `x`.""" if x not in self._parents: return members = list(self.members(x)) for v in members: del self._parents[v] del self._weights[v] del self._prev_next[v] del self._min_values[v]
python
{ "resource": "" }
q33532
EquivalenceMap.isolate_element
train
def isolate_element(self, x): """Isolates `x` from its equivalence class.""" members = list(self.members(x)) self.delete_set(x) self.union(*(v for v in members if v != x))
python
{ "resource": "" }
q33533
quaternion_slerp
train
def quaternion_slerp(a, b, t): """Spherical linear interpolation for unit quaternions. This is based on the implementation in the gl-matrix package: https://github.com/toji/gl-matrix """ if a is None: a = unit_quaternion() if b is None: b = unit_quaternion() # calc cosine cosom = np.dot(a, b) # adjust signs (if necessary) if cosom < 0.0: cosom = -cosom b = -b # calculate coefficients if (1.0 - cosom) > 0.000001: # standard case (slerp) omega = math.acos(cosom) sinom = math.sin(omega) scale0 = math.sin((1.0 - t) * omega) / sinom scale1 = math.sin(t * omega) / sinom else: # "from" and "to" quaternions are very close # ... so we can do a linear interpolation scale0 = 1.0 - t scale1 = t return scale0 * a + scale1 * b
python
{ "resource": "" }
q33534
GreedyMulticut.remove_edge_from_heap
train
def remove_edge_from_heap(self, segment_ids): """Remove an edge from the heap.""" self._initialize_heap() key = normalize_edge(segment_ids) if key in self.edge_map: self.edge_map[key][0] = None self.num_valid_edges -= 1
python
{ "resource": "" }
q33535
TrackableState.txn
train
def txn(self, overwrite=False, lock=True): """Context manager for a state modification transaction.""" if lock: self._lock.acquire() try: new_state, existing_generation = self.state_and_generation new_state = copy.deepcopy(new_state) yield new_state if overwrite: existing_generation = None self.set_state(new_state, existing_generation=existing_generation) finally: if lock: self._lock.release()
python
{ "resource": "" }
q33536
LocalVolume.invalidate
train
def invalidate(self): """Mark the data invalidated. Clients will refetch the volume.""" with self._mesh_generator_lock: self._mesh_generator_pending = None self._mesh_generator = None self._dispatch_changed_callbacks()
python
{ "resource": "" }
q33537
save_md
train
def save_md(p, *vsheets): 'pipe tables compatible with org-mode' with p.open_text(mode='w') as fp: for vs in vsheets: if len(vsheets) > 1: fp.write('# %s\n\n' % vs.name) fp.write('|' + '|'.join('%-*s' % (col.width or options.default_width, markdown_escape(col.name)) for col in vs.visibleCols) + '|\n') fp.write('|' + '+'.join(markdown_colhdr(col) for col in vs.visibleCols) + '|\n') for row in Progress(vs.rows, 'saving'): fp.write('|' + '|'.join('%-*s' % (col.width or options.default_width, markdown_escape(col.getDisplayValue(row))) for col in vs.visibleCols) + '|\n') fp.write('\n') status('%s save finished' % p)
python
{ "resource": "" }
q33538
load_pyobj
train
def load_pyobj(name, pyobj): 'Return Sheet object of appropriate type for given sources in `args`.' if isinstance(pyobj, list) or isinstance(pyobj, tuple): if getattr(pyobj, '_fields', None): # list of namedtuple return SheetNamedTuple(name, pyobj) else: return SheetList(name, pyobj) elif isinstance(pyobj, dict): return SheetDict(name, pyobj) elif isinstance(pyobj, object): return SheetObject(name, pyobj) else: error("cannot load '%s' as pyobj" % type(pyobj).__name__)
python
{ "resource": "" }
q33539
PyobjColumns
train
def PyobjColumns(obj): 'Return columns for each public attribute on an object.' return [ColumnAttr(k, type(getattr(obj, k))) for k in getPublicAttrs(obj)]
python
{ "resource": "" }
q33540
DictKeyColumns
train
def DictKeyColumns(d): 'Return a list of Column objects from dictionary keys.' return [ColumnItem(k, k, type=deduceType(d[k])) for k in d.keys()]
python
{ "resource": "" }
q33541
SheetList
train
def SheetList(name, src, **kwargs): 'Creates a Sheet from a list of homogenous dicts or namedtuples.' if not src: status('no content in ' + name) return if isinstance(src[0], dict): return ListOfDictSheet(name, source=src, **kwargs) elif isinstance(src[0], tuple): if getattr(src[0], '_fields', None): # looks like a namedtuple return ListOfNamedTupleSheet(name, source=src, **kwargs) # simple list return ListOfPyobjSheet(name, source=src, **kwargs)
python
{ "resource": "" }
q33542
SheetFreqTable.reload
train
def reload(self): 'Generate histrow for each row and then reverse-sort by length.' self.rows = [] # if len(self.origCols) == 1 and self.origCols[0].type in (int, float, currency): # self.numericBinning() # else: self.discreteBinning() # automatically add cache to all columns now that everything is binned for c in self.nonKeyVisibleCols: c._cachedValues = collections.OrderedDict()
python
{ "resource": "" }
q33543
saveToClipboard
train
def saveToClipboard(sheet, rows, filetype=None): 'copy rows from sheet to system clipboard' filetype = filetype or options.save_filetype vs = copy(sheet) vs.rows = rows status('copying rows to clipboard') clipboard().save(vs, filetype)
python
{ "resource": "" }
q33544
_Clipboard.copy
train
def copy(self, value): 'Copy a cell to the system clipboard.' with tempfile.NamedTemporaryFile() as temp: with open(temp.name, 'w', encoding=options.encoding) as fp: fp.write(str(value)) p = subprocess.Popen( self.command, stdin=open(temp.name, 'r', encoding=options.encoding), stdout=subprocess.DEVNULL) p.communicate()
python
{ "resource": "" }
q33545
_Clipboard.save
train
def save(self, vs, filetype): 'Copy rows to the system clipboard.' # use NTF to generate filename and delete file on context exit with tempfile.NamedTemporaryFile(suffix='.'+filetype) as temp: saveSheets(temp.name, vs) sync(1) p = subprocess.Popen( self.command, stdin=open(temp.name, 'r', encoding=options.encoding), stdout=subprocess.DEVNULL, close_fds=True) p.communicate()
python
{ "resource": "" }
q33546
LogSheet.amendPrevious
train
def amendPrevious(self, targethash): 'amend targethash with current index, then rebase newer commits on top' prevBranch = loggit_all('rev-parse', '--symbolic-full-name', '--abbrev-ref', 'HEAD').strip() ret = loggit_all('commit', '-m', 'MERGE '+targethash) # commit index to viewed branch newChanges = loggit_all('rev-parse', 'HEAD').strip() ret += loggit_all('stash', 'save', '--keep-index') # stash everything else with GitUndo('stash', 'pop'): tmpBranch = randomBranchName() ret += loggit_all('checkout', '-b', tmpBranch) # create/switch to tmp branch with GitUndo('checkout', prevBranch), GitUndo('branch', '-D', tmpBranch): ret += loggit_all('reset', '--hard', targethash) # tmpbranch now at targethash ret += loggit_all('cherry-pick', '-n', newChanges) # pick new change from original branch ret += loggit_all('commit', '--amend', '--no-edit') # recommit to fix targethash (which will change) ret += loggit_all('rebase', '--onto', tmpBranch, 'HEAD@{1}', prevBranch) # replay the rest return ret.splitlines()
python
{ "resource": "" }
q33547
save_html
train
def save_html(p, *vsheets): 'Save vsheets as HTML tables in a single file' with open(p.resolve(), 'w', encoding='ascii', errors='xmlcharrefreplace') as fp: for sheet in vsheets: fp.write('<h2 class="sheetname">%s</h2>\n'.format(sheetname=html.escape(sheet.name))) fp.write('<table id="{sheetname}">\n'.format(sheetname=html.escape(sheet.name))) # headers fp.write('<tr>') for col in sheet.visibleCols: contents = html.escape(col.name) fp.write('<th>{colname}</th>'.format(colname=contents)) fp.write('</tr>\n') # rows for r in Progress(sheet.rows, 'saving'): fp.write('<tr>') for col in sheet.visibleCols: fp.write('<td>') fp.write(html.escape(col.getDisplayValue(r))) fp.write('</td>') fp.write('</tr>\n') fp.write('</table>') status('%s save finished' % p)
python
{ "resource": "" }
q33548
tsv_trdict
train
def tsv_trdict(vs): 'returns string.translate dictionary for replacing tabs and newlines' if options.safety_first: delim = options.get('delimiter', vs) return {ord(delim): options.get('tsv_safe_tab', vs), # \t 10: options.get('tsv_safe_newline', vs), # \n 13: options.get('tsv_safe_newline', vs), # \r } return {}
python
{ "resource": "" }
q33549
save_tsv_header
train
def save_tsv_header(p, vs): 'Write tsv header for Sheet `vs` to Path `p`.' trdict = tsv_trdict(vs) delim = options.delimiter with p.open_text(mode='w') as fp: colhdr = delim.join(col.name.translate(trdict) for col in vs.visibleCols) + '\n' if colhdr.strip(): # is anything but whitespace fp.write(colhdr)
python
{ "resource": "" }
q33550
save_tsv
train
def save_tsv(p, vs): 'Write sheet to file `fn` as TSV.' delim = options.get('delimiter', vs) trdict = tsv_trdict(vs) save_tsv_header(p, vs) with p.open_text(mode='a') as fp: for dispvals in genAllValues(vs.rows, vs.visibleCols, trdict, format=True): fp.write(delim.join(dispvals)) fp.write('\n') status('%s save finished' % p)
python
{ "resource": "" }
q33551
append_tsv_row
train
def append_tsv_row(vs, row): 'Append `row` to vs.source, creating file with correct headers if necessary. For internal use only.' if not vs.source.exists(): with contextlib.suppress(FileExistsError): parentdir = vs.source.parent.resolve() if parentdir: os.makedirs(parentdir) save_tsv_header(vs.source, vs) with vs.source.open_text(mode='a') as fp: fp.write('\t'.join(col.getDisplayValue(row) for col in vs.visibleCols) + '\n')
python
{ "resource": "" }
q33552
TsvSheet.reload_sync
train
def reload_sync(self): 'Perform synchronous loading of TSV file, discarding header lines.' header_lines = options.get('header', self) delim = options.get('delimiter', self) with self.source.open_text() as fp: # get one line anyway to determine number of columns lines = list(getlines(fp, int(header_lines) or 1)) headers = [L.split(delim) for L in lines] if header_lines <= 0: self.columns = [ColumnItem('', i) for i in range(len(headers[0]))] else: self.columns = [ ColumnItem('\\n'.join(x), i) for i, x in enumerate(zip(*headers[:header_lines])) ] lines = lines[header_lines:] # in case of header_lines == 0 self._rowtype = namedlist('tsvobj', [c.name for c in self.columns]) self.recalc() self.rows = [] with Progress(total=self.source.filesize) as prog: for L in itertools.chain(lines, getlines(fp)): row = L.split(delim) ncols = self._rowtype.length() # current number of cols if len(row) > ncols: # add unnamed columns to the type not found in the header newcols = [ColumnItem('', len(row)+i, width=8) for i in range(len(row)-ncols)] self._rowtype = namedlist(self._rowtype.__name__, list(self._rowtype._fields) + ['_' for c in newcols]) for c in newcols: self.addColumn(c) elif len(row) < ncols: # extend rows that are missing entries row.extend([None]*(ncols-len(row))) self.addRow(self._rowtype(row)) prog.addProgress(len(L))
python
{ "resource": "" }
q33553
load_csv
train
def load_csv(vs): 'Convert from CSV, first handling header row specially.' with vs.source.open_text() as fp: for i in range(options.skip): wrappedNext(fp) # discard initial lines if options.safety_first: rdr = csv.reader(removeNulls(fp), **csvoptions()) else: rdr = csv.reader(fp, **csvoptions()) vs.rows = [] # headers first, to setup columns before adding rows headers = [wrappedNext(rdr) for i in range(int(options.header))] if headers: # columns ideally reflect the max number of fields over all rows vs.columns = ArrayNamedColumns('\\n'.join(x) for x in zip(*headers)) else: r = wrappedNext(rdr) vs.addRow(r) vs.columns = ArrayColumns(len(vs.rows[0])) if not vs.columns: vs.columns = [ColumnItem(0)] vs.recalc() # make columns usable with Progress(total=vs.source.filesize) as prog: try: samplelen = 0 for i in range(options_num_first_rows): # for progress below row = wrappedNext(rdr) vs.addRow(row) samplelen += sum(len(x) for x in row) samplelen //= options_num_first_rows # avg len of first n rows while True: vs.addRow(wrappedNext(rdr)) prog.addProgress(samplelen) except StopIteration: pass # as expected vs.recalc() return vs
python
{ "resource": "" }
q33554
save_csv
train
def save_csv(p, sheet): 'Save as single CSV file, handling column names as first line.' with p.open_text(mode='w') as fp: cw = csv.writer(fp, **csvoptions()) colnames = [col.name for col in sheet.visibleCols] if ''.join(colnames): cw.writerow(colnames) for r in Progress(sheet.rows, 'saving'): cw.writerow([col.getDisplayValue(r) for col in sheet.visibleCols])
python
{ "resource": "" }
q33555
currency_multiplier
train
def currency_multiplier(src_currency, dest_currency): 'returns equivalent value in USD for an amt of currency_code' if src_currency == 'USD': return 1.0 usd_mult = currency_rates()[src_currency] if dest_currency == 'USD': return usd_mult return usd_mult/currency_rates()[dest_currency]
python
{ "resource": "" }
q33556
moveVisibleCol
train
def moveVisibleCol(sheet, fromVisColIdx, toVisColIdx): 'Move visible column to another visible index in sheet.' toVisColIdx = min(max(toVisColIdx, 0), sheet.nVisibleCols) fromColIdx = sheet.columns.index(sheet.visibleCols[fromVisColIdx]) toColIdx = sheet.columns.index(sheet.visibleCols[toVisColIdx]) moveListItem(sheet.columns, fromColIdx, toColIdx) return toVisColIdx
python
{ "resource": "" }
q33557
moveListItem
train
def moveListItem(L, fromidx, toidx): "Move element within list `L` and return element's new index." r = L.pop(fromidx) L.insert(toidx, r) return toidx
python
{ "resource": "" }
q33558
urlcache
train
def urlcache(url, cachesecs=24*60*60): 'Returns Path object to local cache of url contents.' p = Path(os.path.join(options.visidata_dir, 'cache', urllib.parse.quote(url, safe=''))) if p.exists(): secs = time.time() - p.stat().st_mtime if secs < cachesecs: return p if not p.parent.exists(): os.makedirs(p.parent.resolve(), exist_ok=True) assert p.parent.is_dir(), p.parent req = urllib.request.Request(url, headers={'User-Agent': __version_info__}) with urllib.request.urlopen(req) as fp: ret = fp.read().decode('utf-8').strip() with p.open_text(mode='w') as fpout: fpout.write(ret) return p
python
{ "resource": "" }
q33559
fillNullValues
train
def fillNullValues(col, rows): 'Fill null cells in col with the previous non-null value' lastval = None nullfunc = isNullFunc() n = 0 rowsToFill = list(rows) for r in Progress(col.sheet.rows, 'filling'): # loop over all rows try: val = col.getValue(r) except Exception as e: val = e if nullfunc(val) and r in rowsToFill: if lastval: col.setValue(r, lastval) n += 1 else: lastval = val col.recalc() status("filled %d values" % n)
python
{ "resource": "" }
q33560
saveSheets
train
def saveSheets(fn, *vsheets, confirm_overwrite=False): 'Save sheet `vs` with given filename `fn`.' givenpath = Path(fn) # determine filetype to save as filetype = '' basename, ext = os.path.splitext(fn) if ext: filetype = ext[1:] filetype = filetype or options.save_filetype if len(vsheets) > 1: if not fn.endswith('/'): # forcibly specify save individual files into directory by ending path with / savefunc = getGlobals().get('multisave_' + filetype, None) if savefunc: # use specific multisave function return savefunc(givenpath, *vsheets) # more than one sheet; either no specific multisave for save filetype, or path ends with / # save as individual files in the givenpath directory if not givenpath.exists(): try: os.makedirs(givenpath.resolve(), exist_ok=True) except FileExistsError: pass assert givenpath.is_dir(), filetype + ' cannot save multiple sheets to non-dir' # get save function to call savefunc = getGlobals().get('save_' + filetype) or fail('no function save_'+filetype) if givenpath.exists(): if confirm_overwrite: confirm('%s already exists. overwrite? ' % fn) status('saving %s sheets to %s' % (len(vsheets), givenpath.fqpn)) for vs in vsheets: p = Path(os.path.join(givenpath.fqpn, vs.name+'.'+filetype)) savefunc(p, vs) else: # get save function to call savefunc = getGlobals().get('save_' + filetype) or fail('no function save_'+filetype) if givenpath.exists(): if confirm_overwrite: confirm('%s already exists. overwrite? ' % fn) status('saving to %s as %s' % (givenpath.fqpn, filetype)) savefunc(givenpath, vsheets[0])
python
{ "resource": "" }
q33561
open_txt
train
def open_txt(p): 'Create sheet from `.txt` file at Path `p`, checking whether it is TSV.' with p.open_text() as fp: if options.delimiter in next(fp): # peek at the first line return open_tsv(p) # TSV often have .txt extension return TextSheet(p.name, p)
python
{ "resource": "" }
q33562
loadInternalSheet
train
def loadInternalSheet(klass, p, **kwargs): 'Load internal sheet of given klass. Internal sheets are always tsv.' vs = klass(p.name, source=p, **kwargs) options._set('encoding', 'utf8', vs) if p.exists(): vd.sheets.insert(0, vs) vs.reload.__wrapped__(vs) vd.sheets.pop(0) return vs
python
{ "resource": "" }
q33563
namedlist
train
def namedlist(objname, fieldnames): 'like namedtuple but editable' class NamedListTemplate(list): __name__ = objname _fields = fieldnames def __init__(self, L=None, **kwargs): if L is None: L = [None]*len(fieldnames) super().__init__(L) for k, v in kwargs.items(): setattr(self, k, v) @classmethod def length(cls): return len(cls._fields) for i, attrname in enumerate(fieldnames): # create property getter/setter for each field setattr(NamedListTemplate, attrname, property(operator.itemgetter(i), itemsetter(i))) return NamedListTemplate
python
{ "resource": "" }
q33564
Host.get_by_ip
train
def get_by_ip(cls, ip): 'Returns Host instance for the given ip address.' ret = cls.hosts_by_ip.get(ip) if ret is None: ret = cls.hosts_by_ip[ip] = [Host(ip)] return ret
python
{ "resource": "" }
q33565
threadProfileCode
train
def threadProfileCode(func, *args, **kwargs): 'Toplevel thread profile wrapper.' with ThreadProfiler(threading.current_thread()) as prof: try: prof.thread.status = threadProfileCode.__wrapped__(func, *args, **kwargs) except EscapeException as e: prof.thread.status = e
python
{ "resource": "" }
q33566
combineColumns
train
def combineColumns(cols): 'Return Column object formed by joining fields in given columns.' return Column("+".join(c.name for c in cols), getter=lambda col,row,cols=cols,ch=' ': ch.join(c.getDisplayValue(row) for c in cols))
python
{ "resource": "" }
q33567
cancelThread
train
def cancelThread(*threads, exception=EscapeException): 'Raise exception on another thread.' for t in threads: ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(t.ident), ctypes.py_object(exception))
python
{ "resource": "" }
q33568
git_all
train
def git_all(*args, git=maybeloggit, **kwargs): 'Return entire output of git command.' try: cmd = git(*args, _err_to_out=True, _decode_errors='replace', **kwargs) out = cmd.stdout except sh.ErrorReturnCode as e: status('exit_code=%s' % e.exit_code) out = e.stdout out = out.decode('utf-8') return out
python
{ "resource": "" }
q33569
git_lines
train
def git_lines(*args, git=maybeloggit, **kwargs): 'Generator of stdout lines from given git command' err = io.StringIO() try: for line in git('--no-pager', _err=err, *args, _decode_errors='replace', _iter=True, _bg_exc=False, **kwargs): yield line[:-1] # remove EOL except sh.ErrorReturnCode as e: status('exit_code=%s' % e.exit_code) errlines = err.getvalue().splitlines() if len(errlines) < 3: for line in errlines: status(line) else: vd().push(TextSheet('git ' + ' '.join(args), errlines))
python
{ "resource": "" }
q33570
git_iter
train
def git_iter(sep, *args, git=maybeloggit, **kwargs): 'Generator of chunks of stdout from given git command, delineated by sep character' bufsize = 512 err = io.StringIO() chunks = [] try: for data in git('--no-pager', *args, _decode_errors='replace', _out_bufsize=bufsize, _iter=True, _err=err, **kwargs): while True: i = data.find(sep) if i < 0: break chunks.append(data[:i]) data = data[i+1:] yield ''.join(chunks) chunks.clear() chunks.append(data) except sh.ErrorReturnCode as e: status('exit_code=%s' % e.exit_code) r = ''.join(chunks) if r: yield r errlines = err.getvalue().splitlines() if len(errlines) < 3: for line in errlines: status(line) else: vd().push(TextSheet('git ' + ' '.join(args), errlines))
python
{ "resource": "" }
q33571
InvertedCanvas.scaleY
train
def scaleY(self, canvasY): 'returns plotter y coordinate, with y-axis inverted' plotterY = super().scaleY(canvasY) return (self.plotviewBox.ymax-plotterY+4)
python
{ "resource": "" }
q33572
Path.resolve
train
def resolve(self): 'Resolve pathname shell variables and ~userdir' return os.path.expandvars(os.path.expanduser(self.fqpn))
python
{ "resource": "" }
q33573
Plotter.getPixelAttrRandom
train
def getPixelAttrRandom(self, x, y): 'weighted-random choice of attr at this pixel.' c = list(attr for attr, rows in self.pixels[y][x].items() for r in rows if attr and attr not in self.hiddenAttrs) return random.choice(c) if c else 0
python
{ "resource": "" }
q33574
Plotter.getPixelAttrMost
train
def getPixelAttrMost(self, x, y): 'most common attr at this pixel.' r = self.pixels[y][x] c = sorted((len(rows), attr, rows) for attr, rows in list(r.items()) if attr and attr not in self.hiddenAttrs) if not c: return 0 _, attr, rows = c[-1] if isinstance(self.source, BaseSheet) and anySelected(self.source, rows): attr = CursesAttr(attr, 8).update_attr(colors.color_graph_selected, 10).attr return attr
python
{ "resource": "" }
q33575
Plotter.rowsWithin
train
def rowsWithin(self, bbox): 'return list of deduped rows within bbox' ret = {} for y in range(bbox.ymin, bbox.ymax+1): for x in range(bbox.xmin, bbox.xmax+1): for attr, rows in self.pixels[y][x].items(): if attr not in self.hiddenAttrs: for r in rows: ret[id(r)] = r return list(ret.values())
python
{ "resource": "" }
q33576
Canvas.setCursorSize
train
def setCursorSize(self, p): 'sets width based on diagonal corner p' self.cursorBox = BoundingBox(self.cursorBox.xmin, self.cursorBox.ymin, p.x, p.y) self.cursorBox.w = max(self.cursorBox.w, self.canvasCharWidth) self.cursorBox.h = max(self.cursorBox.h, self.canvasCharHeight)
python
{ "resource": "" }
q33577
Canvas.fixPoint
train
def fixPoint(self, plotterPoint, canvasPoint): 'adjust visibleBox.xymin so that canvasPoint is plotted at plotterPoint' self.visibleBox.xmin = canvasPoint.x - self.canvasW(plotterPoint.x-self.plotviewBox.xmin) self.visibleBox.ymin = canvasPoint.y - self.canvasH(plotterPoint.y-self.plotviewBox.ymin) self.refresh()
python
{ "resource": "" }
q33578
Canvas.zoomTo
train
def zoomTo(self, bbox): 'set visible area to bbox, maintaining aspectRatio if applicable' self.fixPoint(self.plotviewBox.xymin, bbox.xymin) self.zoomlevel=max(bbox.w/self.canvasBox.w, bbox.h/self.canvasBox.h)
python
{ "resource": "" }
q33579
Canvas.checkCursor
train
def checkCursor(self): 'override Sheet.checkCursor' if self.cursorBox: if self.cursorBox.h < self.canvasCharHeight: self.cursorBox.h = self.canvasCharHeight*3/4 if self.cursorBox.w < self.canvasCharWidth: self.cursorBox.w = self.canvasCharWidth*3/4 return False
python
{ "resource": "" }
q33580
Canvas.scaleX
train
def scaleX(self, x): 'returns plotter x coordinate' return round(self.plotviewBox.xmin+(x-self.visibleBox.xmin)*self.xScaler)
python
{ "resource": "" }
q33581
Canvas.scaleY
train
def scaleY(self, y): 'returns plotter y coordinate' return round(self.plotviewBox.ymin+(y-self.visibleBox.ymin)*self.yScaler)
python
{ "resource": "" }
q33582
Canvas.render
train
def render(self, h, w): 'resets plotter, cancels previous render threads, spawns a new render' self.needsRefresh = False cancelThread(*(t for t in self.currentThreads if t.name == 'plotAll_async')) self.labels.clear() self.resetCanvasDimensions(h, w) self.render_async()
python
{ "resource": "" }
q33583
Canvas.render_sync
train
def render_sync(self): 'plots points and lines and text onto the Plotter' self.setZoom() bb = self.visibleBox xmin, ymin, xmax, ymax = bb.xmin, bb.ymin, bb.xmax, bb.ymax xfactor, yfactor = self.xScaler, self.yScaler plotxmin, plotymin = self.plotviewBox.xmin, self.plotviewBox.ymin for vertexes, attr, row in Progress(self.polylines, 'rendering'): if len(vertexes) == 1: # single point x1, y1 = vertexes[0] x1, y1 = float(x1), float(y1) if xmin <= x1 <= xmax and ymin <= y1 <= ymax: x = plotxmin+(x1-xmin)*xfactor y = plotymin+(y1-ymin)*yfactor self.plotpixel(round(x), round(y), attr, row) continue prev_x, prev_y = vertexes[0] for x, y in vertexes[1:]: r = clipline(prev_x, prev_y, x, y, xmin, ymin, xmax, ymax) if r: x1, y1, x2, y2 = r x1 = plotxmin+float(x1-xmin)*xfactor y1 = plotymin+float(y1-ymin)*yfactor x2 = plotxmin+float(x2-xmin)*xfactor y2 = plotymin+float(y2-ymin)*yfactor self.plotline(x1, y1, x2, y2, attr, row) prev_x, prev_y = x, y for x, y, text, attr, row in Progress(self.gridlabels, 'labeling'): self.plotlabel(self.scaleX(x), self.scaleY(y), text, attr, row)
python
{ "resource": "" }
q33584
nextColRegex
train
def nextColRegex(sheet, colregex): 'Go to first visible column after the cursor matching `colregex`.' pivot = sheet.cursorVisibleColIndex for i in itertools.chain(range(pivot+1, len(sheet.visibleCols)), range(0, pivot+1)): c = sheet.visibleCols[i] if re.search(colregex, c.name, regex_flags()): return i fail('no column name matches /%s/' % colregex)
python
{ "resource": "" }
q33585
searchRegex
train
def searchRegex(vd, sheet, moveCursor=False, reverse=False, **kwargs): 'Set row index if moveCursor, otherwise return list of row indexes.' def findMatchingColumn(sheet, row, columns, func): 'Find column for which func matches the displayed value in this row' for c in columns: if func(c.getDisplayValue(row)): return c vd.searchContext.update(kwargs) regex = kwargs.get("regex") if regex: vd.searchContext["regex"] = re.compile(regex, regex_flags()) or error('invalid regex: %s' % regex) regex = vd.searchContext.get("regex") or fail("no regex") columns = vd.searchContext.get("columns") if columns == "cursorCol": columns = [sheet.cursorCol] elif columns == "visibleCols": columns = tuple(sheet.visibleCols) elif isinstance(columns, Column): columns = [columns] if not columns: error('bad columns') searchBackward = vd.searchContext.get("backward") if reverse: searchBackward = not searchBackward matchingRowIndexes = 0 for r in rotate_range(len(sheet.rows), sheet.cursorRowIndex, reverse=searchBackward): c = findMatchingColumn(sheet, sheet.rows[r], columns, regex.search) if c: if moveCursor: sheet.cursorRowIndex = r sheet.cursorVisibleColIndex = sheet.visibleCols.index(c) return else: matchingRowIndexes += 1 yield r status('%s matches for /%s/' % (matchingRowIndexes, regex.pattern))
python
{ "resource": "" }
q33586
clipstr
train
def clipstr(s, dispw): '''Return clipped string and width in terminal display characters. Note: width may differ from len(s) if East Asian chars are 'fullwidth'.''' w = 0 ret = '' ambig_width = options.disp_ambig_width for c in s: if c != ' ' and unicodedata.category(c) in ('Cc', 'Zs', 'Zl'): # control char, space, line sep c = options.disp_oddspace if c: c = c[0] # multi-char disp_oddspace just uses the first char ret += c eaw = unicodedata.east_asian_width(c) if eaw == 'A': # ambiguous w += ambig_width elif eaw in 'WF': # wide/full w += 2 elif not unicodedata.combining(c): w += 1 if w > dispw-len(options.disp_truncator)+1: ret = ret[:-2] + options.disp_truncator # replace final char with ellipsis w += len(options.disp_truncator) break return ret, w
python
{ "resource": "" }
q33587
cursesMain
train
def cursesMain(_scr, sheetlist): 'Populate VisiData object with sheets from a given list.' colors.setup() for vs in sheetlist: vd().push(vs) # first push does a reload status('Ctrl+H opens help') return vd().run(_scr)
python
{ "resource": "" }
q33588
SettingsMgr.iter
train
def iter(self, obj=None): 'Iterate through all keys considering context of obj. If obj is None, uses the context of the top sheet.' if obj is None and vd: obj = vd.sheet for o in self._mappings(obj): for k in self.keys(): for o2 in self[k]: if o == o2: yield (k, o), self[k][o2]
python
{ "resource": "" }
q33589
VisiData.status
train
def status(self, *args, priority=0): 'Add status message to be shown until next action.' k = (priority, args) self.statuses[k] = self.statuses.get(k, 0) + 1 if self.statusHistory: prevpri, prevargs, prevn = self.statusHistory[-1] if prevpri == priority and prevargs == args: self.statusHistory[-1][2] += 1 return True self.statusHistory.append([priority, args, 1]) return True
python
{ "resource": "" }
q33590
VisiData.callHook
train
def callHook(self, hookname, *args, **kwargs): 'Call all functions registered with `addHook` for the given hookname.' r = [] for f in self.hooks[hookname]: try: r.append(f(*args, **kwargs)) except Exception as e: exceptionCaught(e) return r
python
{ "resource": "" }
q33591
VisiData.checkForFinishedThreads
train
def checkForFinishedThreads(self): 'Mark terminated threads with endTime.' for t in self.unfinishedThreads: if not t.is_alive(): t.endTime = time.process_time() if getattr(t, 'status', None) is None: t.status = 'ended'
python
{ "resource": "" }
q33592
VisiData.sync
train
def sync(self, expectedThreads=0): 'Wait for all but expectedThreads async threads to finish.' while len(self.unfinishedThreads) > expectedThreads: time.sleep(.3) self.checkForFinishedThreads()
python
{ "resource": "" }
q33593
VisiData.editText
train
def editText(self, y, x, w, record=True, **kwargs): 'Wrap global editText with `preedit` and `postedit` hooks.' v = self.callHook('preedit') if record else None if not v or v[0] is None: with EnableCursor(): v = editText(self.scr, y, x, w, **kwargs) else: v = v[0] if kwargs.get('display', True): status('"%s"' % v) self.callHook('postedit', v) if record else None return v
python
{ "resource": "" }
q33594
VisiData.input
train
def input(self, prompt, type='', defaultLast=False, **kwargs): 'Get user input, with history of `type`, defaulting to last history item if no input and defaultLast is True.' if type: histlist = list(self.lastInputs[type].keys()) ret = self._inputLine(prompt, history=histlist, **kwargs) if ret: self.lastInputs[type][ret] = ret elif defaultLast: histlist or fail("no previous input") ret = histlist[-1] else: ret = self._inputLine(prompt, **kwargs) return ret
python
{ "resource": "" }
q33595
VisiData._inputLine
train
def _inputLine(self, prompt, **kwargs): 'Add prompt to bottom of screen and get line of input from user.' self.inInput = True rstatuslen = self.drawRightStatus(self.scr, self.sheets[0]) attr = 0 promptlen = clipdraw(self.scr, self.windowHeight-1, 0, prompt, attr, w=self.windowWidth-rstatuslen-1) ret = self.editText(self.windowHeight-1, promptlen, self.windowWidth-promptlen-rstatuslen-2, attr=colors.color_edit_cell, unprintablechar=options.disp_unprintable, truncchar=options.disp_truncator, **kwargs) self.inInput = False return ret
python
{ "resource": "" }
q33596
VisiData.getkeystroke
train
def getkeystroke(self, scr, vs=None): 'Get keystroke and display it on status bar.' k = None try: k = scr.get_wch() self.drawRightStatus(scr, vs or self.sheets[0]) # continue to display progress % except curses.error: return '' # curses timeout if isinstance(k, str): if ord(k) >= 32 and ord(k) != 127: # 127 == DEL or ^? return k k = ord(k) return curses.keyname(k).decode('utf-8')
python
{ "resource": "" }
q33597
VisiData.exceptionCaught
train
def exceptionCaught(self, exc=None, **kwargs): 'Maintain list of most recent errors and return most recent one.' if isinstance(exc, ExpectedException): # already reported, don't log return self.lastErrors.append(stacktrace()) if kwargs.get('status', True): status(self.lastErrors[-1][-1], priority=2) # last line of latest error if options.debug: raise
python
{ "resource": "" }
q33598
VisiData.drawLeftStatus
train
def drawLeftStatus(self, scr, vs): 'Draw left side of status bar.' cattr = CursesAttr(colors.color_status) attr = cattr.attr error_attr = cattr.update_attr(colors.color_error, 1).attr warn_attr = cattr.update_attr(colors.color_warning, 2).attr sep = options.disp_status_sep try: lstatus = vs.leftStatus() maxwidth = options.disp_lstatus_max if maxwidth > 0: lstatus = middleTruncate(lstatus, maxwidth//2) y = self.windowHeight-1 x = clipdraw(scr, y, 0, lstatus, attr) self.onMouse(scr, y, 0, 1, x, BUTTON1_PRESSED='sheets', BUTTON3_PRESSED='rename-sheet', BUTTON3_CLICKED='rename-sheet') one = False for (pri, msgparts), n in sorted(self.statuses.items(), key=lambda k: -k[0][0]): if x > self.windowWidth: break if one: # any messages already: x += clipdraw(scr, y, x, sep, attr, self.windowWidth) one = True msg = composeStatus(msgparts, n) if pri == 3: msgattr = error_attr elif pri == 2: msgattr = warn_attr elif pri == 1: msgattr = warn_attr else: msgattr = attr x += clipdraw(scr, y, x, msg, msgattr, self.windowWidth) except Exception as e: self.exceptionCaught(e)
python
{ "resource": "" }
q33599
VisiData.drawRightStatus
train
def drawRightStatus(self, scr, vs): 'Draw right side of status bar. Return length displayed.' rightx = self.windowWidth-1 ret = 0 for rstatcolor in self.callHook('rstatus', vs): if rstatcolor: try: rstatus, coloropt = rstatcolor rstatus = ' '+rstatus attr = colors.get_color(coloropt).attr statuslen = clipdraw(scr, self.windowHeight-1, rightx, rstatus, attr, rtl=True) rightx -= statuslen ret += statuslen except Exception as e: self.exceptionCaught(e) if scr: curses.doupdate() return ret
python
{ "resource": "" }