_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q33600
VisiData.rightStatus
train
def rightStatus(self, sheet): 'Compose right side of status bar.' if sheet.currentThreads: gerund = (' '+sheet.progresses[0].gerund) if sheet.progresses else '' status = '%9d %2d%%%s' % (len(sheet), sheet.progressPct, gerund) else: status = '%9d %s' % (len(sheet), sheet.rowtype) return status, 'color_status'
python
{ "resource": "" }
q33601
VisiData.run
train
def run(self, scr): 'Manage execution of keystrokes and subsequent redrawing of screen.' global sheet scr.timeout(int(options.curses_timeout)) with suppress(curses.error): curses.curs_set(0) self.scr = scr numTimeouts = 0 self.keystrokes = '' while True: if not self.sheets: # if no more sheets, exit return sheet = self.sheets[0] threading.current_thread().sheet = sheet try: sheet.draw(scr) except Exception as e: self.exceptionCaught(e) self.drawLeftStatus(scr, sheet) self.drawRightStatus(scr, sheet) # visible during this getkeystroke keystroke = self.getkeystroke(scr, sheet) if keystroke: # wait until next keystroke to clear statuses and previous keystrokes numTimeouts = 0 if not self.prefixWaiting: self.keystrokes = '' self.statuses.clear() if keystroke == 'KEY_MOUSE': self.keystrokes = '' clicktype = '' try: devid, x, y, z, bstate = curses.getmouse() sheet.mouseX, sheet.mouseY = x, y if bstate & curses.BUTTON_CTRL: clicktype += "CTRL-" bstate &= ~curses.BUTTON_CTRL if bstate & curses.BUTTON_ALT: clicktype += "ALT-" bstate &= ~curses.BUTTON_ALT if bstate & curses.BUTTON_SHIFT: clicktype += "SHIFT-" bstate &= ~curses.BUTTON_SHIFT keystroke = clicktype + curses.mouseEvents.get(bstate, str(bstate)) f = self.getMouse(scr, x, y, keystroke) if f: if isinstance(f, str): for cmd in f.split(): sheet.exec_keystrokes(cmd) else: f(y, x, keystroke) self.keystrokes = keystroke keystroke = '' except curses.error: pass except Exception as e: exceptionCaught(e) self.keystrokes += keystroke self.drawRightStatus(scr, sheet) # visible for commands that wait for input if not keystroke: # timeout instead of keypress pass elif keystroke == '^Q': return self.lastErrors and '\n'.join(self.lastErrors[-1]) elif bindkeys._get(self.keystrokes): sheet.exec_keystrokes(self.keystrokes) self.prefixWaiting = False elif keystroke in self.allPrefixes: self.keystrokes = ''.join(sorted(set(self.keystrokes))) # prefix order/quantity does not matter self.prefixWaiting = True else: status('no command for "%s"' % (self.keystrokes)) self.prefixWaiting = False self.checkForFinishedThreads() self.callHook('predraw') catchapply(sheet.checkCursor) # no idle redraw unless background threads are running time.sleep(0) # yield to other threads which may not have started yet if vd.unfinishedThreads: scr.timeout(options.curses_timeout) else: numTimeouts += 1 if numTimeouts > 1: scr.timeout(-1) else: scr.timeout(options.curses_timeout)
python
{ "resource": "" }
q33602
VisiData.push
train
def push(self, vs): 'Move given sheet `vs` to index 0 of list `sheets`.' if vs: vs.vd = self if vs in self.sheets: self.sheets.remove(vs) self.sheets.insert(0, vs) elif not vs.loaded: self.sheets.insert(0, vs) vs.reload() vs.recalc() # set up Columns else: self.sheets.insert(0, vs) if vs.precious and vs not in vs.vd.allSheets: vs.vd.allSheets[vs] = vs.name return vs
python
{ "resource": "" }
q33603
BaseSheet.exec_command
train
def exec_command(self, cmd, args='', vdglobals=None, keystrokes=None): "Execute `cmd` tuple with `vdglobals` as globals and this sheet's attributes as locals. Returns True if user cancelled." global sheet sheet = vd.sheets[0] if not cmd: debug('no command "%s"' % keystrokes) return True if isinstance(cmd, CommandLog): cmd.replay() return False escaped = False err = '' if vdglobals is None: vdglobals = getGlobals() if not self.vd: self.vd = vd() self.sheet = self try: self.vd.callHook('preexec', self, cmd, '', keystrokes) exec(cmd.execstr, vdglobals, LazyMap(self)) except EscapeException as e: # user aborted status('aborted') escaped = True except Exception as e: debug(cmd.execstr) err = self.vd.exceptionCaught(e) escaped = True try: self.vd.callHook('postexec', self.vd.sheets[0] if self.vd.sheets else None, escaped, err) except Exception: self.vd.exceptionCaught(e) catchapply(self.checkCursor) self.vd.refresh() return escaped
python
{ "resource": "" }
q33604
Sheet.column
train
def column(self, colregex): 'Return first column whose Column.name matches colregex.' for c in self.columns: if re.search(colregex, c.name, regex_flags()): return c
python
{ "resource": "" }
q33605
Sheet.deleteSelected
train
def deleteSelected(self): 'Delete all selected rows.' ndeleted = self.deleteBy(self.isSelected) nselected = len(self._selectedRows) self._selectedRows.clear() if ndeleted != nselected: error('expected %s' % nselected)
python
{ "resource": "" }
q33606
Sheet.visibleRows
train
def visibleRows(self): # onscreen rows 'List of rows onscreen. ' return self.rows[self.topRowIndex:self.topRowIndex+self.nVisibleRows]
python
{ "resource": "" }
q33607
Sheet.visibleCols
train
def visibleCols(self): # non-hidden cols 'List of `Column` which are not hidden.' return self.keyCols + [c for c in self.columns if not c.hidden and not c.keycol]
python
{ "resource": "" }
q33608
Sheet.nonKeyVisibleCols
train
def nonKeyVisibleCols(self): 'All columns which are not keysList of unhidden non-key columns.' return [c for c in self.columns if not c.hidden and c not in self.keyCols]
python
{ "resource": "" }
q33609
Sheet.statusLine
train
def statusLine(self): 'String of row and column stats.' rowinfo = 'row %d/%d (%d selected)' % (self.cursorRowIndex, self.nRows, len(self._selectedRows)) colinfo = 'col %d/%d (%d visible)' % (self.cursorColIndex, self.nCols, len(self.visibleCols)) return '%s %s' % (rowinfo, colinfo)
python
{ "resource": "" }
q33610
Sheet.toggle
train
def toggle(self, rows): 'Toggle selection of given `rows`.' for r in Progress(rows, 'toggling', total=len(self.rows)): if not self.unselectRow(r): self.selectRow(r)
python
{ "resource": "" }
q33611
Sheet.select
train
def select(self, rows, status=True, progress=True): "Bulk select given rows. Don't show progress if progress=False; don't show status if status=False." before = len(self._selectedRows) if options.bulk_select_clear: self._selectedRows.clear() for r in (Progress(rows, 'selecting') if progress else rows): self.selectRow(r) if status: if options.bulk_select_clear: msg = 'selected %s %s%s' % (len(self._selectedRows), self.rowtype, ' instead' if before > 0 else '') else: msg = 'selected %s%s %s' % (len(self._selectedRows)-before, ' more' if before > 0 else '', self.rowtype) vd.status(msg)
python
{ "resource": "" }
q33612
Sheet.unselect
train
def unselect(self, rows, status=True, progress=True): "Unselect given rows. Don't show progress if progress=False; don't show status if status=False." before = len(self._selectedRows) for r in (Progress(rows, 'unselecting') if progress else rows): self.unselectRow(r) if status: vd().status('unselected %s/%s %s' % (before-len(self._selectedRows), before, self.rowtype))
python
{ "resource": "" }
q33613
Sheet.selectByIdx
train
def selectByIdx(self, rowIdxs): 'Select given row indexes, without progress bar.' self.select((self.rows[i] for i in rowIdxs), progress=False)
python
{ "resource": "" }
q33614
Sheet.unselectByIdx
train
def unselectByIdx(self, rowIdxs): 'Unselect given row indexes, without progress bar.' self.unselect((self.rows[i] for i in rowIdxs), progress=False)
python
{ "resource": "" }
q33615
Sheet.gatherBy
train
def gatherBy(self, func): 'Generate only rows for which the given func returns True.' for i in rotate_range(len(self.rows), self.cursorRowIndex): try: r = self.rows[i] if func(r): yield r except Exception: pass
python
{ "resource": "" }
q33616
Sheet.pageLeft
train
def pageLeft(self): '''Redraw page one screen to the left. Note: keep the column cursor in the same general relative position: - if it is on the furthest right column, then it should stay on the furthest right column if possible - likewise on the left or in the middle So really both the `leftIndex` and the `cursorIndex` should move in tandem until things are correct.''' targetIdx = self.leftVisibleColIndex # for rightmost column firstNonKeyVisibleColIndex = self.visibleCols.index(self.nonKeyVisibleCols[0]) while self.rightVisibleColIndex != targetIdx and self.leftVisibleColIndex > firstNonKeyVisibleColIndex: self.cursorVisibleColIndex -= 1 self.leftVisibleColIndex -= 1 self.calcColLayout() # recompute rightVisibleColIndex # in case that rightmost column is last column, try to squeeze maximum real estate from screen if self.rightVisibleColIndex == self.nVisibleCols-1: # try to move further left while right column is still full width while self.leftVisibleColIndex > 0: rightcol = self.visibleCols[self.rightVisibleColIndex] if rightcol.width > self.visibleColLayout[self.rightVisibleColIndex][1]: # went too far self.cursorVisibleColIndex += 1 self.leftVisibleColIndex += 1 break else: self.cursorVisibleColIndex -= 1 self.leftVisibleColIndex -= 1 self.calcColLayout()
python
{ "resource": "" }
q33617
Sheet.addColumn
train
def addColumn(self, col, index=None): 'Insert column at given index or after all columns.' if col: if index is None: index = len(self.columns) col.sheet = self self.columns.insert(index, col) return col
python
{ "resource": "" }
q33618
Sheet.rowkey
train
def rowkey(self, row): 'returns a tuple of the key for the given row' return tuple(c.getTypedValueOrException(row) for c in self.keyCols)
python
{ "resource": "" }
q33619
Sheet.checkCursor
train
def checkCursor(self): 'Keep cursor in bounds of data and screen.' # keep cursor within actual available rowset if self.nRows == 0 or self.cursorRowIndex <= 0: self.cursorRowIndex = 0 elif self.cursorRowIndex >= self.nRows: self.cursorRowIndex = self.nRows-1 if self.cursorVisibleColIndex <= 0: self.cursorVisibleColIndex = 0 elif self.cursorVisibleColIndex >= self.nVisibleCols: self.cursorVisibleColIndex = self.nVisibleCols-1 if self.topRowIndex <= 0: self.topRowIndex = 0 elif self.topRowIndex > self.nRows-1: self.topRowIndex = self.nRows-1 # (x,y) is relative cell within screen viewport x = self.cursorVisibleColIndex - self.leftVisibleColIndex y = self.cursorRowIndex - self.topRowIndex + 1 # header # check bounds, scroll if necessary if y < 1: self.topRowIndex = self.cursorRowIndex elif y > self.nVisibleRows: self.topRowIndex = self.cursorRowIndex-self.nVisibleRows+1 if x <= 0: self.leftVisibleColIndex = self.cursorVisibleColIndex else: while True: if self.leftVisibleColIndex == self.cursorVisibleColIndex: # not much more we can do break self.calcColLayout() mincolidx, maxcolidx = min(self.visibleColLayout.keys()), max(self.visibleColLayout.keys()) if self.cursorVisibleColIndex < mincolidx: self.leftVisibleColIndex -= max((self.cursorVisibleColIndex - mincolid)//2, 1) continue elif self.cursorVisibleColIndex > maxcolidx: self.leftVisibleColIndex += max((maxcolidx - self.cursorVisibleColIndex)//2, 1) continue cur_x, cur_w = self.visibleColLayout[self.cursorVisibleColIndex] if cur_x+cur_w < self.vd.windowWidth: # current columns fit entirely on screen break self.leftVisibleColIndex += 1
python
{ "resource": "" }
q33620
Sheet.calcColLayout
train
def calcColLayout(self): 'Set right-most visible column, based on calculation.' minColWidth = len(options.disp_more_left)+len(options.disp_more_right) sepColWidth = len(options.disp_column_sep) winWidth = self.vd.windowWidth self.visibleColLayout = {} x = 0 vcolidx = 0 for vcolidx in range(0, self.nVisibleCols): col = self.visibleCols[vcolidx] if col.width is None and len(self.visibleRows) > 0: # handle delayed column width-finding col.width = col.getMaxWidth(self.visibleRows)+minColWidth if vcolidx != self.nVisibleCols-1: # let last column fill up the max width col.width = min(col.width, options.default_width) width = col.width if col.width is not None else options.default_width if col in self.keyCols: width = max(width, 1) # keycols must all be visible if col in self.keyCols or vcolidx >= self.leftVisibleColIndex: # visible columns self.visibleColLayout[vcolidx] = [x, min(width, winWidth-x)] x += width+sepColWidth if x > winWidth-1: break self.rightVisibleColIndex = vcolidx
python
{ "resource": "" }
q33621
Sheet.drawColHeader
train
def drawColHeader(self, scr, y, vcolidx): 'Compose and draw column header for given vcolidx.' col = self.visibleCols[vcolidx] # hdrattr highlights whole column header # sepattr is for header separators and indicators sepattr = colors.color_column_sep hdrattr = self.colorize(col, None) if vcolidx == self.cursorVisibleColIndex: hdrattr = hdrattr.update_attr(colors.color_current_hdr, 2) C = options.disp_column_sep if (self.keyCols and col is self.keyCols[-1]) or vcolidx == self.rightVisibleColIndex: C = options.disp_keycol_sep x, colwidth = self.visibleColLayout[vcolidx] # ANameTC T = getType(col.type).icon if T is None: # still allow icon to be explicitly non-displayed '' T = '?' N = ' ' + col.name # save room at front for LeftMore if len(N) > colwidth-1: N = N[:colwidth-len(options.disp_truncator)] + options.disp_truncator clipdraw(scr, y, x, N, hdrattr.attr, colwidth) clipdraw(scr, y, x+colwidth-len(T), T, hdrattr.attr, len(T)) vd.onMouse(scr, y, x, 1, colwidth, BUTTON3_RELEASED='rename-col') if vcolidx == self.leftVisibleColIndex and col not in self.keyCols and self.nonKeyVisibleCols.index(col) > 0: A = options.disp_more_left scr.addstr(y, x, A, sepattr) if C and x+colwidth+len(C) < self.vd.windowWidth: scr.addstr(y, x+colwidth, C, sepattr)
python
{ "resource": "" }
q33622
Sheet.editCell
train
def editCell(self, vcolidx=None, rowidx=None, **kwargs): 'Call `editText` at its place on the screen. Returns the new value, properly typed' if vcolidx is None: vcolidx = self.cursorVisibleColIndex x, w = self.visibleColLayout.get(vcolidx, (0, 0)) col = self.visibleCols[vcolidx] if rowidx is None: rowidx = self.cursorRowIndex if rowidx < 0: # header y = 0 currentValue = col.name else: y = self.rowLayout.get(rowidx, 0) currentValue = col.getDisplayValue(self.rows[self.cursorRowIndex]) editargs = dict(value=currentValue, fillchar=options.disp_edit_fill, truncchar=options.disp_truncator) editargs.update(kwargs) # update with user-specified args r = self.vd.editText(y, x, w, **editargs) if rowidx >= 0: # if not header r = col.type(r) # convert input to column type, let exceptions be raised return r
python
{ "resource": "" }
q33623
Column.recalc
train
def recalc(self, sheet=None): 'reset column cache, attach to sheet, and reify name' if self._cachedValues: self._cachedValues.clear() if sheet: self.sheet = sheet self.name = self._name
python
{ "resource": "" }
q33624
Column.format
train
def format(self, typedval): 'Return displayable string of `typedval` according to `Column.fmtstr`' if typedval is None: return None if isinstance(typedval, (list, tuple)): return '[%s]' % len(typedval) if isinstance(typedval, dict): return '{%s}' % len(typedval) if isinstance(typedval, bytes): typedval = typedval.decode(options.encoding, options.encoding_errors) return getType(self.type).formatter(self.fmtstr, typedval)
python
{ "resource": "" }
q33625
Column.getTypedValue
train
def getTypedValue(self, row): 'Returns the properly-typed value for the given row at this column.' return wrapply(self.type, wrapply(self.getValue, row))
python
{ "resource": "" }
q33626
Column.getTypedValueOrException
train
def getTypedValueOrException(self, row): 'Returns the properly-typed value for the given row at this column, or an Exception object.' return wrapply(self.type, wrapply(self.getValue, row))
python
{ "resource": "" }
q33627
Column.getTypedValueNoExceptions
train
def getTypedValueNoExceptions(self, row): '''Returns the properly-typed value for the given row at this column. Returns the type's default value if either the getter or the type conversion fails.''' return wrapply(self.type, wrapply(self.getValue, row))
python
{ "resource": "" }
q33628
Column.getCell
train
def getCell(self, row, width=None): 'Return DisplayWrapper for displayable cell value.' cellval = wrapply(self.getValue, row) typedval = wrapply(self.type, cellval) if isinstance(typedval, TypedWrapper): if isinstance(cellval, TypedExceptionWrapper): # calc failed exc = cellval.exception if cellval.forwarded: dispval = str(cellval) # traceback.format_exception_only(type(exc), exc)[-1].strip() else: dispval = options.disp_error_val return DisplayWrapper(cellval.val, error=exc.stacktrace, display=dispval, note=options.note_getter_exc, notecolor='color_error') elif typedval.val is None: # early out for strict None return DisplayWrapper(None, display='', # force empty display for None note=options.disp_note_none, notecolor='color_note_type') elif isinstance(typedval, TypedExceptionWrapper): # calc succeeded, type failed return DisplayWrapper(typedval.val, display=str(cellval), error=typedval.exception.stacktrace, note=options.note_type_exc, notecolor='color_warning') else: return DisplayWrapper(typedval.val, display=str(typedval.val), note=options.note_type_exc, notecolor='color_warning') elif isinstance(typedval, threading.Thread): return DisplayWrapper(None, display=options.disp_pending, note=options.note_pending, notecolor='color_note_pending') dw = DisplayWrapper(cellval) try: dw.display = self.format(typedval) or '' if width and isNumeric(self): dw.display = dw.display.rjust(width-1) # annotate cells with raw value type in anytype columns, except for strings if self.type is anytype and type(cellval) is not str: typedesc = typemap.get(type(cellval), None) dw.note = typedesc.icon if typedesc else options.note_unknown_type dw.notecolor = 'color_note_type' except Exception as e: # formatting failure e.stacktrace = stacktrace() dw.error = e try: dw.display = str(cellval) except Exception as e: dw.display = str(e) dw.note = options.note_format_exc dw.notecolor = 'color_warning' return dw
python
{ "resource": "" }
q33629
Column.setValueSafe
train
def setValueSafe(self, row, value): 'setValue and ignore exceptions' try: return self.setValue(row, value) except Exception as e: exceptionCaught(e)
python
{ "resource": "" }
q33630
Column.setValues
train
def setValues(self, rows, *values): 'Set our column value for given list of rows to `value`.' for r, v in zip(rows, itertools.cycle(values)): self.setValueSafe(r, v) self.recalc() return status('set %d cells to %d values' % (len(rows), len(values)))
python
{ "resource": "" }
q33631
Column.getMaxWidth
train
def getMaxWidth(self, rows): 'Return the maximum length of any cell in column or its header.' w = 0 if len(rows) > 0: w = max(max(len(self.getDisplayValue(r)) for r in rows), len(self.name))+2 return max(w, len(self.name))
python
{ "resource": "" }
q33632
Column.toggleWidth
train
def toggleWidth(self, width): 'Change column width to either given `width` or default value.' if self.width != width: self.width = width else: self.width = int(options.default_width)
python
{ "resource": "" }
q33633
ColorMaker.resolve_colors
train
def resolve_colors(self, colorstack): 'Returns the curses attribute for the colorstack, a list of color option names sorted highest-precedence color first.' attr = CursesAttr() for coloropt in colorstack: c = self.get_color(coloropt) attr = attr.update_attr(c) return attr
python
{ "resource": "" }
q33634
addAggregators
train
def addAggregators(cols, aggrnames): 'add aggregator for each aggrname to each of cols' for aggrname in aggrnames: aggrs = aggregators.get(aggrname) aggrs = aggrs if isinstance(aggrs, list) else [aggrs] for aggr in aggrs: for c in cols: if not hasattr(c, 'aggregators'): c.aggregators = [] if aggr and aggr not in c.aggregators: c.aggregators += [aggr]
python
{ "resource": "" }
q33635
CommandLog.removeSheet
train
def removeSheet(self, vs): 'Remove all traces of sheets named vs.name from the cmdlog.' self.rows = [r for r in self.rows if r.sheet != vs.name] status('removed "%s" from cmdlog' % vs.name)
python
{ "resource": "" }
q33636
CommandLog.delay
train
def delay(self, factor=1): 'returns True if delay satisfied' acquired = CommandLog.semaphore.acquire(timeout=options.replay_wait*factor if not self.paused else None) return acquired or not self.paused
python
{ "resource": "" }
q33637
CommandLog.replayOne
train
def replayOne(self, r): 'Replay the command in one given row.' CommandLog.currentReplayRow = r longname = getattr(r, 'longname', None) if longname == 'set-option': try: options.set(r.row, r.input, options._opts.getobj(r.col)) escaped = False except Exception as e: exceptionCaught(e) escaped = True else: vs = self.moveToReplayContext(r) vd().keystrokes = r.keystrokes # <=v1.2 used keystrokes in longname column; getCommand fetches both escaped = vs.exec_command(vs.getCommand(longname if longname else r.keystrokes), keystrokes=r.keystrokes) CommandLog.currentReplayRow = None if escaped: # escape during replay aborts replay warning('replay aborted') return escaped
python
{ "resource": "" }
q33638
CommandLog.replay_sync
train
def replay_sync(self, live=False): 'Replay all commands in log.' self.cursorRowIndex = 0 CommandLog.currentReplay = self with Progress(total=len(self.rows)) as prog: while self.cursorRowIndex < len(self.rows): if CommandLog.currentReplay is None: status('replay canceled') return vd().statuses.clear() try: if self.replayOne(self.cursorRow): self.cancel() return except Exception as e: self.cancel() exceptionCaught(e) status('replay canceled') return self.cursorRowIndex += 1 prog.addProgress(1) sync(1 if live else 0) # expect this thread also if playing live while not self.delay(): pass status('replay complete') CommandLog.currentReplay = None
python
{ "resource": "" }
q33639
CommandLog.setLastArgs
train
def setLastArgs(self, args): 'Set user input on last command, if not already set.' # only set if not already set (second input usually confirmation) if self.currentActiveRow is not None: if not self.currentActiveRow.input: self.currentActiveRow.input = args
python
{ "resource": "" }
q33640
encode_chunk
train
def encode_chunk(dataframe): """Return a file-like object of CSV-encoded rows. Args: dataframe (pandas.DataFrame): A chunk of a dataframe to encode """ csv_buffer = six.StringIO() dataframe.to_csv( csv_buffer, index=False, header=False, encoding="utf-8", float_format="%.15g", date_format="%Y-%m-%d %H:%M:%S.%f", ) # Convert to a BytesIO buffer so that unicode text is properly handled. # See: https://github.com/pydata/pandas-gbq/issues/106 body = csv_buffer.getvalue() if isinstance(body, bytes): body = body.decode("utf-8") body = body.encode("utf-8") return six.BytesIO(body)
python
{ "resource": "" }
q33641
_bqschema_to_nullsafe_dtypes
train
def _bqschema_to_nullsafe_dtypes(schema_fields): """Specify explicit dtypes based on BigQuery schema. This function only specifies a dtype when the dtype allows nulls. Otherwise, use pandas's default dtype choice. See: http://pandas.pydata.org/pandas-docs/dev/missing_data.html #missing-data-casting-rules-and-indexing """ # If you update this mapping, also update the table at # `docs/source/reading.rst`. dtype_map = { "FLOAT": np.dtype(float), # pandas doesn't support timezone-aware dtype in DataFrame/Series # constructors. It's more idiomatic to localize after construction. # https://github.com/pandas-dev/pandas/issues/25843 "TIMESTAMP": "datetime64[ns]", "TIME": "datetime64[ns]", "DATE": "datetime64[ns]", "DATETIME": "datetime64[ns]", } dtypes = {} for field in schema_fields: name = str(field["name"]) if field["mode"].upper() == "REPEATED": continue dtype = dtype_map.get(field["type"].upper()) if dtype: dtypes[name] = dtype return dtypes
python
{ "resource": "" }
q33642
_cast_empty_df_dtypes
train
def _cast_empty_df_dtypes(schema_fields, df): """Cast any columns in an empty dataframe to correct type. In an empty dataframe, pandas cannot choose a dtype unless one is explicitly provided. The _bqschema_to_nullsafe_dtypes() function only provides dtypes when the dtype safely handles null values. This means that empty int64 and boolean columns are incorrectly classified as ``object``. """ if not df.empty: raise ValueError( "DataFrame must be empty in order to cast non-nullsafe dtypes" ) dtype_map = {"BOOLEAN": bool, "INTEGER": np.int64} for field in schema_fields: column = str(field["name"]) if field["mode"].upper() == "REPEATED": continue dtype = dtype_map.get(field["type"].upper()) if dtype: df[column] = df[column].astype(dtype) return df
python
{ "resource": "" }
q33643
_localize_df
train
def _localize_df(schema_fields, df): """Localize any TIMESTAMP columns to tz-aware type. In pandas versions before 0.24.0, DatetimeTZDtype cannot be used as the dtype in Series/DataFrame construction, so localize those columns after the DataFrame is constructed. """ for field in schema_fields: column = str(field["name"]) if field["mode"].upper() == "REPEATED": continue if field["type"].upper() == "TIMESTAMP" and df[column].dt.tz is None: df[column] = df[column].dt.tz_localize("UTC") return df
python
{ "resource": "" }
q33644
read_gbq
train
def read_gbq( query, project_id=None, index_col=None, col_order=None, reauth=False, auth_local_webserver=False, dialect=None, location=None, configuration=None, credentials=None, use_bqstorage_api=False, verbose=None, private_key=None, ): r"""Load data from Google BigQuery using google-cloud-python The main method a user calls to execute a Query in Google BigQuery and read results into a pandas DataFrame. This method uses the Google Cloud client library to make requests to Google BigQuery, documented `here <https://google-cloud-python.readthedocs.io/en/latest/bigquery/usage.html>`__. See the :ref:`How to authenticate with Google BigQuery <authentication>` guide for authentication instructions. Parameters ---------- query : str SQL-Like Query to return data values. project_id : str, optional Google BigQuery Account project ID. Optional when available from the environment. index_col : str, optional Name of result column to use for index in results DataFrame. col_order : list(str), optional List of BigQuery column names in the desired order for results DataFrame. reauth : boolean, default False Force Google BigQuery to re-authenticate the user. This is useful if multiple accounts are used. auth_local_webserver : boolean, default False Use the `local webserver flow`_ instead of the `console flow`_ when getting user credentials. .. _local webserver flow: http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server .. _console flow: http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console .. versionadded:: 0.2.0 dialect : str, default 'standard' Note: The default value changed to 'standard' in version 0.10.0. SQL syntax dialect to use. Value can be one of: ``'legacy'`` Use BigQuery's legacy SQL dialect. For more information see `BigQuery Legacy SQL Reference <https://cloud.google.com/bigquery/docs/reference/legacy-sql>`__. ``'standard'`` Use BigQuery's standard SQL, which is compliant with the SQL 2011 standard. For more information see `BigQuery Standard SQL Reference <https://cloud.google.com/bigquery/docs/reference/standard-sql/>`__. location : str, optional Location where the query job should run. See the `BigQuery locations documentation <https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a list of available locations. The location must match that of any datasets used in the query. .. versionadded:: 0.5.0 configuration : dict, optional Query config parameters for job processing. For example: configuration = {'query': {'useQueryCache': False}} For more information see `BigQuery REST API Reference <https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query>`__. credentials : google.auth.credentials.Credentials, optional Credentials for accessing Google APIs. Use this parameter to override default credentials, such as to use Compute Engine :class:`google.auth.compute_engine.Credentials` or Service Account :class:`google.oauth2.service_account.Credentials` directly. .. versionadded:: 0.8.0 use_bqstorage_api : bool, default False Use the `BigQuery Storage API <https://cloud.google.com/bigquery/docs/reference/storage/>`__ to download query results quickly, but at an increased cost. To use this API, first `enable it in the Cloud Console <https://console.cloud.google.com/apis/library/bigquerystorage.googleapis.com>`__. You must also have the `bigquery.readsessions.create <https://cloud.google.com/bigquery/docs/access-control#roles>`__ permission on the project you are billing queries to. **Note:** Due to a `known issue in the ``google-cloud-bigquery`` package <https://github.com/googleapis/google-cloud-python/pull/7633>`__ (fixed in version 1.11.0), you must write your query results to a destination table. To do this with ``read_gbq``, supply a ``configuration`` dictionary. This feature requires the ``google-cloud-bigquery-storage`` and ``fastavro`` packages. .. versionadded:: 0.10.0 verbose : None, deprecated Deprecated in Pandas-GBQ 0.4.0. Use the `logging module to adjust verbosity instead <https://pandas-gbq.readthedocs.io/en/latest/intro.html#logging>`__. private_key : str, deprecated Deprecated in pandas-gbq version 0.8.0. Use the ``credentials`` parameter and :func:`google.oauth2.service_account.Credentials.from_service_account_info` or :func:`google.oauth2.service_account.Credentials.from_service_account_file` instead. Service account private key in JSON format. Can be file path or string contents. This is useful for remote server authentication (eg. Jupyter/IPython notebook on remote host). Returns ------- df: DataFrame DataFrame representing results of query. """ global context if dialect is None: dialect = context.dialect if dialect is None: dialect = "standard" _test_google_api_imports() if verbose is not None and SHOW_VERBOSE_DEPRECATION: warnings.warn( "verbose is deprecated and will be removed in " "a future version. Set logging level in order to vary " "verbosity", FutureWarning, stacklevel=2, ) if private_key is not None and SHOW_PRIVATE_KEY_DEPRECATION: warnings.warn( PRIVATE_KEY_DEPRECATION_MESSAGE, FutureWarning, stacklevel=2 ) if dialect not in ("legacy", "standard"): raise ValueError("'{0}' is not valid for dialect".format(dialect)) connector = GbqConnector( project_id, reauth=reauth, dialect=dialect, auth_local_webserver=auth_local_webserver, location=location, credentials=credentials, private_key=private_key, use_bqstorage_api=use_bqstorage_api, ) final_df = connector.run_query(query, configuration=configuration) # Reindex the DataFrame on the provided column if index_col is not None: if index_col in final_df.columns: final_df.set_index(index_col, inplace=True) else: raise InvalidIndexColumn( 'Index column "{0}" does not exist in DataFrame.'.format( index_col ) ) # Change the order of columns in the DataFrame based on provided list if col_order is not None: if sorted(col_order) == sorted(final_df.columns): final_df = final_df[col_order] else: raise InvalidColumnOrder( "Column order does not match this DataFrame." ) connector.log_elapsed_seconds( "Total time taken", datetime.now().strftime("s.\nFinished at %Y-%m-%d %H:%M:%S."), ) return final_df
python
{ "resource": "" }
q33645
GbqConnector.schema
train
def schema(self, dataset_id, table_id): """Retrieve the schema of the table Obtain from BigQuery the field names and field types for the table defined by the parameters Parameters ---------- dataset_id : str Name of the BigQuery dataset for the table table_id : str Name of the BigQuery table Returns ------- list of dicts Fields representing the schema """ table_ref = self.client.dataset(dataset_id).table(table_id) try: table = self.client.get_table(table_ref) remote_schema = table.schema remote_fields = [ field_remote.to_api_repr() for field_remote in remote_schema ] for field in remote_fields: field["type"] = field["type"].upper() field["mode"] = field["mode"].upper() return remote_fields except self.http_error as ex: self.process_http_error(ex)
python
{ "resource": "" }
q33646
GbqConnector._clean_schema_fields
train
def _clean_schema_fields(self, fields): """Return a sanitized version of the schema for comparisons.""" fields_sorted = sorted(fields, key=lambda field: field["name"]) # Ignore mode and description when comparing schemas. return [ {"name": field["name"], "type": field["type"]} for field in fields_sorted ]
python
{ "resource": "" }
q33647
GbqConnector.verify_schema
train
def verify_schema(self, dataset_id, table_id, schema): """Indicate whether schemas match exactly Compare the BigQuery table identified in the parameters with the schema passed in and indicate whether all fields in the former are present in the latter. Order is not considered. Parameters ---------- dataset_id :str Name of the BigQuery dataset for the table table_id : str Name of the BigQuery table schema : list(dict) Schema for comparison. Each item should have a 'name' and a 'type' Returns ------- bool Whether the schemas match """ fields_remote = self._clean_schema_fields( self.schema(dataset_id, table_id) ) fields_local = self._clean_schema_fields(schema["fields"]) return fields_remote == fields_local
python
{ "resource": "" }
q33648
GbqConnector.schema_is_subset
train
def schema_is_subset(self, dataset_id, table_id, schema): """Indicate whether the schema to be uploaded is a subset Compare the BigQuery table identified in the parameters with the schema passed in and indicate whether a subset of the fields in the former are present in the latter. Order is not considered. Parameters ---------- dataset_id : str Name of the BigQuery dataset for the table table_id : str Name of the BigQuery table schema : list(dict) Schema for comparison. Each item should have a 'name' and a 'type' Returns ------- bool Whether the passed schema is a subset """ fields_remote = self._clean_schema_fields( self.schema(dataset_id, table_id) ) fields_local = self._clean_schema_fields(schema["fields"]) return all(field in fields_remote for field in fields_local)
python
{ "resource": "" }
q33649
_Table.exists
train
def exists(self, table_id): """ Check if a table exists in Google BigQuery Parameters ---------- table : str Name of table to be verified Returns ------- boolean true if table exists, otherwise false """ from google.api_core.exceptions import NotFound table_ref = self.client.dataset(self.dataset_id).table(table_id) try: self.client.get_table(table_ref) return True except NotFound: return False except self.http_error as ex: self.process_http_error(ex)
python
{ "resource": "" }
q33650
_Table.create
train
def create(self, table_id, schema): """ Create a table in Google BigQuery given a table and schema Parameters ---------- table : str Name of table to be written schema : str Use the generate_bq_schema to generate your table schema from a dataframe. """ from google.cloud.bigquery import SchemaField from google.cloud.bigquery import Table if self.exists(table_id): raise TableCreationError( "Table {0} already " "exists".format(table_id) ) if not _Dataset(self.project_id, credentials=self.credentials).exists( self.dataset_id ): _Dataset( self.project_id, credentials=self.credentials, location=self.location, ).create(self.dataset_id) table_ref = self.client.dataset(self.dataset_id).table(table_id) table = Table(table_ref) # Manually create the schema objects, adding NULLABLE mode # as a workaround for # https://github.com/GoogleCloudPlatform/google-cloud-python/issues/4456 for field in schema["fields"]: if "mode" not in field: field["mode"] = "NULLABLE" table.schema = [ SchemaField.from_api_repr(field) for field in schema["fields"] ] try: self.client.create_table(table) except self.http_error as ex: self.process_http_error(ex)
python
{ "resource": "" }
q33651
_Table.delete
train
def delete(self, table_id): """ Delete a table in Google BigQuery Parameters ---------- table : str Name of table to be deleted """ from google.api_core.exceptions import NotFound if not self.exists(table_id): raise NotFoundException("Table does not exist") table_ref = self.client.dataset(self.dataset_id).table(table_id) try: self.client.delete_table(table_ref) except NotFound: # Ignore 404 error which may occur if table already deleted pass except self.http_error as ex: self.process_http_error(ex)
python
{ "resource": "" }
q33652
_Dataset.exists
train
def exists(self, dataset_id): """ Check if a dataset exists in Google BigQuery Parameters ---------- dataset_id : str Name of dataset to be verified Returns ------- boolean true if dataset exists, otherwise false """ from google.api_core.exceptions import NotFound try: self.client.get_dataset(self.client.dataset(dataset_id)) return True except NotFound: return False except self.http_error as ex: self.process_http_error(ex)
python
{ "resource": "" }
q33653
_Dataset.create
train
def create(self, dataset_id): """ Create a dataset in Google BigQuery Parameters ---------- dataset : str Name of dataset to be written """ from google.cloud.bigquery import Dataset if self.exists(dataset_id): raise DatasetCreationError( "Dataset {0} already " "exists".format(dataset_id) ) dataset = Dataset(self.client.dataset(dataset_id)) if self.location is not None: dataset.location = self.location try: self.client.create_dataset(dataset) except self.http_error as ex: self.process_http_error(ex)
python
{ "resource": "" }
q33654
update_schema
train
def update_schema(schema_old, schema_new): """ Given an old BigQuery schema, update it with a new one. Where a field name is the same, the new will replace the old. Any new fields not present in the old schema will be added. Arguments: schema_old: the old schema to update schema_new: the new schema which will overwrite/extend the old """ old_fields = schema_old["fields"] new_fields = schema_new["fields"] output_fields = list(old_fields) field_indices = {field["name"]: i for i, field in enumerate(output_fields)} for field in new_fields: name = field["name"] if name in field_indices: # replace old field with new field of same name output_fields[field_indices[name]] = field else: # add new field output_fields.append(field) return {"fields": output_fields}
python
{ "resource": "" }
q33655
AutoUsernameMixin.clean
train
def clean(self): """ automatically sets username """ if self.user: self.username = self.user.username elif not self.username: raise ValidationError({ 'username': _NOT_BLANK_MESSAGE, 'user': _NOT_BLANK_MESSAGE })
python
{ "resource": "" }
q33656
AutoGroupnameMixin.clean
train
def clean(self): """ automatically sets groupname """ super().clean() if self.group: self.groupname = self.group.name elif not self.groupname: raise ValidationError({ 'groupname': _NOT_BLANK_MESSAGE, 'group': _NOT_BLANK_MESSAGE })
python
{ "resource": "" }
q33657
AbstractRadiusGroup.get_default_queryset
train
def get_default_queryset(self): """ looks for default groups excluding the current one overridable by openwisp-radius and other 3rd party apps """ return self.__class__.objects.exclude(pk=self.pk) \ .filter(default=True)
python
{ "resource": "" }
q33658
AuthorizeView.get_user
train
def get_user(self, request): """ return active user or ``None`` """ try: return User.objects.get(username=request.data.get('username'), is_active=True) except User.DoesNotExist: return None
python
{ "resource": "" }
q33659
AuthorizeView.authenticate_user
train
def authenticate_user(self, request, user): """ returns ``True`` if the password value supplied is a valid user password or a valid user token can be overridden to implement more complex checks """ return user.check_password(request.data.get('password')) or \ self.check_user_token(request, user)
python
{ "resource": "" }
q33660
AuthorizeView.check_user_token
train
def check_user_token(self, request, user): """ if user has no password set and has at least 1 social account this is probably a social login, the password field is the user's personal auth token """ if not app_settings.REST_USER_TOKEN_ENABLED: return False try: token = Token.objects.get( user=user, key=request.data.get('password') ) except Token.DoesNotExist: token = None else: if app_settings.DISPOSABLE_USER_TOKEN: token.delete() finally: return token is not None
python
{ "resource": "" }
q33661
PostAuthView.post
train
def post(self, request, *args, **kwargs): """ Sets the response data to None in order to instruct FreeRADIUS to avoid processing the response body """ response = self.create(request, *args, **kwargs) response.data = None return response
python
{ "resource": "" }
q33662
RedirectCaptivePageView.authorize
train
def authorize(self, request, *args, **kwargs): """ authorization logic raises PermissionDenied if user is not authorized """ user = request.user if not user.is_authenticated or not user.socialaccount_set.exists(): raise PermissionDenied()
python
{ "resource": "" }
q33663
RedirectCaptivePageView.get_redirect_url
train
def get_redirect_url(self, request): """ refreshes token and returns the captive page URL """ cp = request.GET.get('cp') user = request.user Token.objects.filter(user=user).delete() token = Token.objects.create(user=user) return '{0}?username={1}&token={2}'.format(cp, user.username, token.key)
python
{ "resource": "" }
q33664
get_install_requires
train
def get_install_requires(): """ parse requirements.txt, ignore links, exclude comments """ requirements = [] for line in open('requirements.txt').readlines(): # skip to next iteration if comment or empty line if line.startswith('#') or line == '' or line.startswith('http') or line.startswith('git'): continue # add line to requirements requirements.append(line) return requirements
python
{ "resource": "" }
q33665
AbstractUserAdmin.get_inline_instances
train
def get_inline_instances(self, request, obj=None): """ Adds RadiusGroupInline only for existing objects """ inlines = super().get_inline_instances(request, obj) if obj: usergroup = RadiusUserGroupInline(self.model, self.admin_site) inlines.append(usergroup) return inlines
python
{ "resource": "" }
q33666
construct_stable_id
train
def construct_stable_id( parent_context, polymorphic_type, relative_char_offset_start, relative_char_offset_end, ): """ Contruct a stable ID for a Context given its parent and its character offsets relative to the parent. """ doc_id, _, parent_doc_char_start, _ = split_stable_id(parent_context.stable_id) start = parent_doc_char_start + relative_char_offset_start end = parent_doc_char_start + relative_char_offset_end return f"{doc_id}::{polymorphic_type}:{start}:{end}"
python
{ "resource": "" }
q33667
vizlib_unary_features
train
def vizlib_unary_features(span): """ Visual-related features for a single span """ if not span.sentence.is_visual(): return for f in get_visual_aligned_lemmas(span): yield f"ALIGNED_{f}", DEF_VALUE for page in set(span.get_attrib_tokens("page")): yield f"PAGE_[{page}]", DEF_VALUE
python
{ "resource": "" }
q33668
vizlib_binary_features
train
def vizlib_binary_features(span1, span2): """ Visual-related features for a pair of spans """ if same_page((span1, span2)): yield "SAME_PAGE", DEF_VALUE if is_horz_aligned((span1, span2)): yield "HORZ_ALIGNED", DEF_VALUE if is_vert_aligned((span1, span2)): yield "VERT_ALIGNED", DEF_VALUE if is_vert_aligned_left((span1, span2)): yield "VERT_ALIGNED_LEFT", DEF_VALUE if is_vert_aligned_right((span1, span2)): yield "VERT_ALIGNED_RIGHT", DEF_VALUE if is_vert_aligned_center((span1, span2)): yield "VERT_ALIGNED_CENTER", DEF_VALUE
python
{ "resource": "" }
q33669
MentionNgrams.apply
train
def apply(self, doc): """Generate MentionNgrams from a Document by parsing all of its Sentences. :param doc: The ``Document`` to parse. :type doc: ``Document`` :raises TypeError: If the input doc is not of type ``Document``. """ if not isinstance(doc, Document): raise TypeError( "Input Contexts to MentionNgrams.apply() must be of type Document" ) for sentence in doc.sentences: for ts in Ngrams.apply(self, sentence): yield ts
python
{ "resource": "" }
q33670
MentionFigures.apply
train
def apply(self, doc): """ Generate MentionFigures from a Document by parsing all of its Figures. :param doc: The ``Document`` to parse. :type doc: ``Document`` :raises TypeError: If the input doc is not of type ``Document``. """ if not isinstance(doc, Document): raise TypeError( "Input Contexts to MentionFigures.apply() must be of type Document" ) for figure in doc.figures: if self.types is None or any( figure.url.lower().endswith(type) for type in self.types ): yield TemporaryFigureMention(figure)
python
{ "resource": "" }
q33671
MentionSentences.apply
train
def apply(self, doc): """ Generate MentionSentences from a Document by parsing all of its Sentences. :param doc: The ``Document`` to parse. :type doc: ``Document`` :raises TypeError: If the input doc is not of type ``Document``. """ if not isinstance(doc, Document): raise TypeError( "Input Contexts to MentionSentences.apply() must be of type Document" ) for sentence in doc.sentences: yield TemporarySpanMention( char_start=0, char_end=len(sentence.text) - 1, sentence=sentence )
python
{ "resource": "" }
q33672
MentionParagraphs.apply
train
def apply(self, doc): """ Generate MentionParagraphs from a Document by parsing all of its Paragraphs. :param doc: The ``Document`` to parse. :type doc: ``Document`` :raises TypeError: If the input doc is not of type ``Document``. """ if not isinstance(doc, Document): raise TypeError( "Input Contexts to MentionParagraphs.apply() must be of type Document" ) for paragraph in doc.paragraphs: yield TemporaryParagraphMention(paragraph)
python
{ "resource": "" }
q33673
MentionCaptions.apply
train
def apply(self, doc): """ Generate MentionCaptions from a Document by parsing all of its Captions. :param doc: The ``Document`` to parse. :type doc: ``Document`` :raises TypeError: If the input doc is not of type ``Document``. """ if not isinstance(doc, Document): raise TypeError( "Input Contexts to MentionCaptions.apply() must be of type Document" ) for caption in doc.captions: yield TemporaryCaptionMention(caption)
python
{ "resource": "" }
q33674
MentionCells.apply
train
def apply(self, doc): """ Generate MentionCells from a Document by parsing all of its Cells. :param doc: The ``Document`` to parse. :type doc: ``Document`` :raises TypeError: If the input doc is not of type ``Document``. """ if not isinstance(doc, Document): raise TypeError( "Input Contexts to MentionCells.apply() must be of type Document" ) for cell in doc.cells: yield TemporaryCellMention(cell)
python
{ "resource": "" }
q33675
MentionTables.apply
train
def apply(self, doc): """ Generate MentionTables from a Document by parsing all of its Tables. :param doc: The ``Document`` to parse. :type doc: ``Document`` :raises TypeError: If the input doc is not of type ``Document``. """ if not isinstance(doc, Document): raise TypeError( "Input Contexts to MentionTables.apply() must be of type Document" ) for table in doc.tables: yield TemporaryTableMention(table)
python
{ "resource": "" }
q33676
MentionSections.apply
train
def apply(self, doc): """ Generate MentionSections from a Document by parsing all of its Sections. :param doc: The ``Document`` to parse. :type doc: ``Document`` :raises TypeError: If the input doc is not of type ``Document``. """ if not isinstance(doc, Document): raise TypeError( "Input Contexts to MentionSections.apply() must be of type Document" ) for section in doc.sections: yield TemporarySectionMention(section)
python
{ "resource": "" }
q33677
MentionExtractor.apply
train
def apply(self, docs, clear=True, parallelism=None, progress_bar=True): """Run the MentionExtractor. :Example: To extract mentions from a set of training documents using 4 cores:: mention_extractor.apply(train_docs, parallelism=4) :param docs: Set of documents to extract from. :param clear: Whether or not to clear the existing Mentions beforehand. :type clear: bool :param parallelism: How many threads to use for extraction. This will override the parallelism value used to initialize the MentionExtractor if it is provided. :type parallelism: int :param progress_bar: Whether or not to display a progress bar. The progress bar is measured per document. :type progress_bar: bool """ super(MentionExtractor, self).apply( docs, clear=clear, parallelism=parallelism, progress_bar=progress_bar )
python
{ "resource": "" }
q33678
MentionExtractor.clear
train
def clear(self): """Delete Mentions of each class in the extractor from the given split.""" # Create set of candidate_subclasses associated with each mention_subclass cand_subclasses = set() for mentions, tablename in [ (_[1][0], _[1][1]) for _ in candidate_subclasses.values() ]: for mention in mentions: if mention in self.mention_classes: cand_subclasses.add(tablename) # First, clear all the Mentions. This will cascade and remove the # mention_subclasses and corresponding candidate_subclasses. for mention_class in self.mention_classes: logger.info(f"Clearing table: {mention_class.__tablename__}") self.session.query(Mention).filter_by( type=mention_class.__tablename__ ).delete(synchronize_session="fetch") # Next, clear the Candidates. This is done manually because we have # no cascading relationship from candidate_subclass to Candidate. for cand_subclass in cand_subclasses: logger.info(f"Cascading to clear table: {cand_subclass}") self.session.query(Candidate).filter_by(type=cand_subclass).delete( synchronize_session="fetch" )
python
{ "resource": "" }
q33679
MentionExtractor.clear_all
train
def clear_all(self): """Delete all Mentions from given split the database.""" logger.info("Clearing ALL Mentions.") self.session.query(Mention).delete(synchronize_session="fetch") # With no Mentions, there should be no Candidates also self.session.query(Candidate).delete(synchronize_session="fetch") logger.info("Cleared ALL Mentions (and Candidates).")
python
{ "resource": "" }
q33680
MentionExtractor.get_mentions
train
def get_mentions(self, docs=None, sort=False): """Return a list of lists of the mentions associated with this extractor. Each list of the return will contain the Mentions for one of the mention classes associated with the MentionExtractor. :param docs: If provided, return Mentions from these documents. Else, return all Mentions. :param sort: If sort is True, then return all Mentions sorted by stable_id. :type sort: bool :return: Mentions for each mention_class. :rtype: List of lists. """ result = [] if docs: docs = docs if isinstance(docs, (list, tuple)) else [docs] # Get cands from all splits for mention_class in self.mention_classes: mentions = ( self.session.query(mention_class) .filter(mention_class.document_id.in_([doc.id for doc in docs])) .order_by(mention_class.id) .all() ) if sort: mentions = sorted(mentions, key=lambda x: x[0].get_stable_id()) result.append(mentions) else: for mention_class in self.mention_classes: mentions = ( self.session.query(mention_class).order_by(mention_class.id).all() ) if sort: mentions = sorted(mentions, key=lambda x: x[0].get_stable_id()) result.append(mentions) return result
python
{ "resource": "" }
q33681
MentionExtractorUDF.apply
train
def apply(self, doc, clear, **kwargs): """Extract mentions from the given Document. :param doc: A document to process. :param clear: Whether or not to clear the existing database entries. """ # Reattach doc with the current session or DetachedInstanceError happens doc = self.session.merge(doc) # Iterate over each mention class for i, mention_class in enumerate(self.mention_classes): tc_to_insert = defaultdict(list) # Generate TemporaryContexts that are children of the context using # the mention_space and filtered by the Matcher self.child_context_set.clear() for tc in self.matchers[i].apply(self.mention_spaces[i].apply(doc)): rec = tc._load_id_or_insert(self.session) if rec: tc_to_insert[tc._get_table()].append(rec) self.child_context_set.add(tc) # Bulk insert temporary contexts for table, records in tc_to_insert.items(): stmt = insert(table.__table__).values(records) self.session.execute(stmt) # Generates and persists mentions mention_args = {"document_id": doc.id} for child_context in self.child_context_set: # Assemble mention arguments for arg_name in mention_class.__argnames__: mention_args[arg_name + "_id"] = child_context.id # Checking for existence if not clear: q = select([mention_class.id]) for key, value in list(mention_args.items()): q = q.where(getattr(mention_class, key) == value) mention_id = self.session.execute(q).first() if mention_id is not None: continue # Add Mention to session yield mention_class(**mention_args)
python
{ "resource": "" }
q33682
SimpleTokenizer.parse
train
def parse(self, contents): """Parse the document. :param contents: The text contents of the document. :rtype: a *generator* of tokenized text. """ i = 0 for text in contents.split(self.delim): if not len(text.strip()): continue words = text.split() char_offsets = [0] + [ int(_) for _ in np.cumsum([len(x) + 1 for x in words])[:-1] ] text = " ".join(words) yield { "text": text, "words": words, "pos_tags": [""] * len(words), "ner_tags": [""] * len(words), "lemmas": [""] * len(words), "dep_parents": [0] * len(words), "dep_labels": [""] * len(words), "char_offsets": char_offsets, "abs_char_offsets": char_offsets, } i += 1
python
{ "resource": "" }
q33683
strlib_unary_features
train
def strlib_unary_features(span): """ Structural-related features for a single span """ if not span.sentence.is_structural(): return yield f"TAG_{get_tag(span)}", DEF_VALUE for attr in get_attributes(span): yield f"HTML_ATTR_{attr}", DEF_VALUE yield f"PARENT_TAG_{get_parent_tag(span)}", DEF_VALUE prev_tags = get_prev_sibling_tags(span) if len(prev_tags): yield f"PREV_SIB_TAG_{prev_tags[-1]}", DEF_VALUE yield f"NODE_POS_{len(prev_tags) + 1}", DEF_VALUE else: yield "FIRST_NODE", DEF_VALUE next_tags = get_next_sibling_tags(span) if len(next_tags): yield f"NEXT_SIB_TAG_{next_tags[0]}", DEF_VALUE else: yield "LAST_NODE", DEF_VALUE yield f"ANCESTOR_CLASS_[{' '.join(get_ancestor_class_names(span))}]", DEF_VALUE yield f"ANCESTOR_TAG_[{' '.join(get_ancestor_tag_names(span))}]", DEF_VALUE yield f"ANCESTOR_ID_[{' '.join(get_ancestor_id_names(span))}]", DEF_VALUE
python
{ "resource": "" }
q33684
build_node
train
def build_node(type, name, content): """ Wrap up content in to a html node. :param type: content type (e.g., doc, section, text, figure) :type path: str :param name: content name (e.g., the name of the section) :type path: str :param name: actual content :type path: str :return: new String with content in html format """ if type == "doc": return f"<html>{content}</html>" if type == "section": return f"<section name='{name}'>{content}</section>" if type == "text": return f"<p name='{name}'>{content}</p>" if type == "figure": return f"<img name='{name}' src='{content}'/>"
python
{ "resource": "" }
q33685
_to_span
train
def _to_span(x, idx=0): """Convert a Candidate, Mention, or Span to a span.""" if isinstance(x, Candidate): return x[idx].context elif isinstance(x, Mention): return x.context elif isinstance(x, TemporarySpanMention): return x else: raise ValueError(f"{type(x)} is an invalid argument type")
python
{ "resource": "" }
q33686
_to_spans
train
def _to_spans(x): """Convert a Candidate, Mention, or Span to a list of spans.""" if isinstance(x, Candidate): return [_to_span(m) for m in x] elif isinstance(x, Mention): return [x.context] elif isinstance(x, TemporarySpanMention): return [x] else: raise ValueError(f"{type(x)} is an invalid argument type")
python
{ "resource": "" }
q33687
get_matches
train
def get_matches(lf, candidate_set, match_values=[1, -1]): """Return a list of candidates that are matched by a particular LF. A simple helper function to see how many matches (non-zero by default) an LF gets. :param lf: The labeling function to apply to the candidate_set :param candidate_set: The set of candidates to evaluate :param match_values: An option list of the values to consider as matched. [1, -1] by default. :rtype: a list of candidates """ logger = logging.getLogger(__name__) matches = [] for c in candidate_set: label = lf(c) if label in match_values: matches.append(c) logger.info(f"{len(matches)} matches") return matches
python
{ "resource": "" }
q33688
Featurizer.update
train
def update(self, docs=None, split=0, parallelism=None, progress_bar=True): """Update the features of the specified candidates. :param docs: If provided, apply features to all the candidates in these documents. :param split: If docs is None, apply features to the candidates in this particular split. :type split: int :param parallelism: How many threads to use for extraction. This will override the parallelism value used to initialize the Featurizer if it is provided. :type parallelism: int :param progress_bar: Whether or not to display a progress bar. The progress bar is measured per document. :type progress_bar: bool """ self.apply( docs=docs, split=split, train=True, clear=False, parallelism=parallelism, progress_bar=progress_bar, )
python
{ "resource": "" }
q33689
Featurizer.apply
train
def apply( self, docs=None, split=0, train=False, clear=True, parallelism=None, progress_bar=True, ): """Apply features to the specified candidates. :param docs: If provided, apply features to all the candidates in these documents. :param split: If docs is None, apply features to the candidates in this particular split. :type split: int :param train: Whether or not to update the global key set of features and the features of candidates. :type train: bool :param clear: Whether or not to clear the features table before applying features. :type clear: bool :param parallelism: How many threads to use for extraction. This will override the parallelism value used to initialize the Featurizer if it is provided. :type parallelism: int :param progress_bar: Whether or not to display a progress bar. The progress bar is measured per document. :type progress_bar: bool """ if docs: # Call apply on the specified docs for all splits split = ALL_SPLITS super(Featurizer, self).apply( docs, split=split, train=train, clear=clear, parallelism=parallelism, progress_bar=progress_bar, ) # Needed to sync the bulk operations self.session.commit() else: # Only grab the docs containing candidates from the given split. split_docs = get_docs_from_split( self.session, self.candidate_classes, split ) super(Featurizer, self).apply( split_docs, split=split, train=train, clear=clear, parallelism=parallelism, progress_bar=progress_bar, ) # Needed to sync the bulk operations self.session.commit()
python
{ "resource": "" }
q33690
Featurizer.drop_keys
train
def drop_keys(self, keys, candidate_classes=None): """Drop the specified keys from FeatureKeys. :param keys: A list of FeatureKey names to delete. :type keys: list, tuple :param candidate_classes: A list of the Candidates to drop the key for. If None, drops the keys for all candidate classes associated with this Featurizer. :type candidate_classes: list, tuple """ # Make sure keys is iterable keys = keys if isinstance(keys, (list, tuple)) else [keys] # Make sure candidate_classes is iterable if candidate_classes: candidate_classes = ( candidate_classes if isinstance(candidate_classes, (list, tuple)) else [candidate_classes] ) # Ensure only candidate classes associated with the featurizer # are used. candidate_classes = [ _.__tablename__ for _ in candidate_classes if _ in self.candidate_classes ] if len(candidate_classes) == 0: logger.warning( "You didn't specify valid candidate classes for this featurizer." ) return # If unspecified, just use all candidate classes else: candidate_classes = [_.__tablename__ for _ in self.candidate_classes] # build dict for use by utils key_map = dict() for key in keys: key_map[key] = set(candidate_classes) drop_keys(self.session, FeatureKey, key_map)
python
{ "resource": "" }
q33691
Featurizer.clear
train
def clear(self, train=False, split=0): """Delete Features of each class from the database. :param train: Whether or not to clear the FeatureKeys :type train: bool :param split: Which split of candidates to clear features from. :type split: int """ # Clear Features for the candidates in the split passed in. logger.info(f"Clearing Features (split {split})") sub_query = ( self.session.query(Candidate.id).filter(Candidate.split == split).subquery() ) query = self.session.query(Feature).filter(Feature.candidate_id.in_(sub_query)) query.delete(synchronize_session="fetch") # Delete all old annotation keys if train: logger.debug(f"Clearing all FeatureKeys from {self.candidate_classes}...") drop_all_keys(self.session, FeatureKey, self.candidate_classes)
python
{ "resource": "" }
q33692
Featurizer.clear_all
train
def clear_all(self): """Delete all Features.""" logger.info("Clearing ALL Features and FeatureKeys.") self.session.query(Feature).delete(synchronize_session="fetch") self.session.query(FeatureKey).delete(synchronize_session="fetch")
python
{ "resource": "" }
q33693
_merge
train
def _merge(x, y): """Merge two nested dictionaries. Overwrite values in x with values in y.""" merged = {**x, **y} xkeys = x.keys() for key in xkeys: if isinstance(x[key], dict) and key in y: merged[key] = _merge(x[key], y[key]) return merged
python
{ "resource": "" }
q33694
get_config
train
def get_config(path=os.getcwd()): """Search for settings file in root of project and its parents.""" config = default tries = 0 current_dir = path while current_dir and tries < MAX_CONFIG_SEARCH_DEPTH: potential_path = os.path.join(current_dir, ".fonduer-config.yaml") if os.path.exists(potential_path): with open(potential_path, "r") as f: config = _merge(config, yaml.safe_load(f)) logger.debug(f"Loading Fonduer config from {potential_path}.") break new_dir = os.path.split(current_dir)[0] if current_dir == new_dir: logger.debug("Unable to find config file. Using defaults.") break current_dir = new_dir tries += 1 return config
python
{ "resource": "" }
q33695
TemporaryContext._load_id_or_insert
train
def _load_id_or_insert(self, session): """Load the id of the temporary context if it exists or return insert args. As a side effect, this also inserts the Context object for the stableid. :return: The record of the temporary context to insert. :rtype: dict """ if self.id is None: stable_id = self.get_stable_id() # Check if exists id = session.execute( select([Context.id]).where(Context.stable_id == stable_id) ).first() # If not, insert if id is None: self.id = session.execute( Context.__table__.insert(), {"type": self._get_table().__tablename__, "stable_id": stable_id}, ).inserted_primary_key[0] insert_args = self._get_insert_args() insert_args["id"] = self.id return insert_args else: self.id = id[0]
python
{ "resource": "" }
q33696
LogisticRegression._build_model
train
def _build_model(self): """ Build model. """ if "input_dim" not in self.settings: raise ValueError("Model parameter input_dim cannot be None.") self.linear = nn.Linear( self.settings["input_dim"], self.cardinality, self.settings["bias"] )
python
{ "resource": "" }
q33697
Parser.apply
train
def apply( self, doc_loader, pdf_path=None, clear=True, parallelism=None, progress_bar=True ): """Run the Parser. :param doc_loader: An iteratable of ``Documents`` to parse. Typically, one of Fonduer's document preprocessors. :param pdf_path: The path to the PDF documents, if any. This path will override the one used in initialization, if provided. :param clear: Whether or not to clear the labels table before applying these LFs. :type clear: bool :param parallelism: How many threads to use for extraction. This will override the parallelism value used to initialize the Labeler if it is provided. :type parallelism: int :param progress_bar: Whether or not to display a progress bar. The progress bar is measured per document. :type progress_bar: bool """ super(Parser, self).apply( doc_loader, pdf_path=pdf_path, clear=clear, parallelism=parallelism, progress_bar=progress_bar, )
python
{ "resource": "" }
q33698
Parser.get_last_documents
train
def get_last_documents(self): """Return the most recently parsed list of ``Documents``. :rtype: A list of the most recently parsed ``Documents`` ordered by name. """ return ( self.session.query(Document) .filter(Document.name.in_(self.last_docs)) .order_by(Document.name) .all() )
python
{ "resource": "" }
q33699
Parser.get_documents
train
def get_documents(self): """Return all the parsed ``Documents`` in the database. :rtype: A list of all ``Documents`` in the database ordered by name. """ return self.session.query(Document).order_by(Document.name).all()
python
{ "resource": "" }