code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
|---|---|---|---|---|---|
if self._h5Dataset.dtype.fields is None:
return tuple() # regular field
else:
fieldName = self.nodeName
fieldDtype = self._h5Dataset.dtype.fields[fieldName][0]
return fieldDtype.shape
|
def _subArrayShape(self)
|
Returns the shape of the sub-array
An empty tuple is returned for regular fields, which have no sub array.
| 6.26987
| 4.778297
| 1.312156
|
fieldName = self.nodeName
return str(self._h5Dataset.dtype.fields[fieldName][0])
|
def elementTypeName(self)
|
String representation of the element type.
| 14.969012
| 15.60624
| 0.959168
|
nSubDims = len(self._subArrayShape)
subArrayDims = ['SubDim{}'.format(dimNr) for dimNr in range(nSubDims)]
return dimNamesFromDataset(self._h5Dataset) + subArrayDims
|
def dimensionNames(self)
|
Returns a list with the dimension names of the underlying NCDF variable
| 8.529355
| 8.132982
| 1.048736
|
unit = dataSetUnit(self._h5Dataset)
fieldNames = self._h5Dataset.dtype.names
# If the missing value attribute is a list with the same length as the number of fields,
# return the missing value for field that equals the self.nodeName.
if hasattr(unit, '__len__') and len(unit) == len(fieldNames):
idx = fieldNames.index(self.nodeName)
return unit[idx]
else:
return unit
|
def unit(self)
|
Returns the unit of the RTI by calling dataSetUnit on the underlying dataset
| 6.725479
| 5.779034
| 1.163772
|
value = dataSetMissingValue(self._h5Dataset)
fieldNames = self._h5Dataset.dtype.names
# If the missing value attribute is a list with the same length as the number of fields,
# return the missing value for field that equals the self.nodeName.
if hasattr(value, '__len__') and len(value) == len(fieldNames):
idx = fieldNames.index(self.nodeName)
return value[idx]
else:
return value
|
def missingDataValue(self)
|
Returns the value to indicate missing data. None if no missing-data value is specified.
| 5.641942
| 5.934113
| 0.950764
|
if self._h5Dataset.attrs.get('CLASS', None) == b'DIMENSION_SCALE':
return RtiIconFactory.DIMENSION
else:
return RtiIconFactory.ARRAY
|
def iconGlyph(self)
|
Shows an Array icon for regular datasets but a dimension icon for dimension scales
| 17.4256
| 10.171397
| 1.713196
|
assert self.canFetchChildren(), "canFetchChildren must be True"
childItems = []
# Add fields
if self._isStructured:
for fieldName in self._h5Dataset.dtype.names:
childItems.append(H5pyFieldRti(self._h5Dataset, nodeName=fieldName,
fileName=self.fileName))
return childItems
|
def _fetchAllChildren(self)
|
Fetches all fields that this variable contains.
Only variables with a structured data type can have fields.
| 8.279633
| 7.253924
| 1.1414
|
assert self._h5Group is not None, "dataset undefined (file not opened?)"
assert self.canFetchChildren(), "canFetchChildren must be True"
childItems = []
for childName, h5Child in self._h5Group.items():
if isinstance(h5Child, h5py.Group):
childItems.append(H5pyGroupRti(h5Child, nodeName=childName,
fileName=self.fileName))
elif isinstance(h5Child, h5py.Dataset):
if len(h5Child.shape) == 0:
childItems.append(H5pyScalarRti(h5Child, nodeName=childName,
fileName=self.fileName))
else:
childItems.append(H5pyDatasetRti(h5Child, nodeName=childName,
fileName=self.fileName))
elif isinstance(h5Child, h5py.Datatype):
#logger.debug("Ignored DataType item: {}".format(childName))
pass
else:
logger.warn("Ignored {}. It has an unexpected HDF-5 type: {}"
.format(childName, type(h5Child)))
return childItems
|
def _fetchAllChildren(self)
|
Fetches all sub groups and variables that this group contains.
| 3.014423
| 2.990801
| 1.007898
|
logger.info("Opening: {}".format(self._fileName))
self._h5Group = h5py.File(self._fileName, 'r')
|
def _openResources(self)
|
Opens the root Dataset.
| 5.308073
| 4.408213
| 1.204133
|
logger.info("Closing: {}".format(self._fileName))
self._h5Group.close()
self._h5Group = None
|
def _closeResources(self)
|
Closes the root Dataset.
| 7.311059
| 5.945304
| 1.22972
|
check_class(pgImagePlot2d.slicedArray, ArrayWithMask) # sanity check
if crossPlot is None:
array = pgImagePlot2d.slicedArray # the whole image
elif crossPlot == 'horizontal':
if pgImagePlot2d.crossPlotRow is not None:
array = pgImagePlot2d.slicedArray.asMaskedArray()[pgImagePlot2d.crossPlotRow, :]
else:
array = pgImagePlot2d.slicedArray # fall back on complete sliced array
elif crossPlot == 'vertical':
if pgImagePlot2d.crossPlotCol is not None:
array = pgImagePlot2d.slicedArray.asMaskedArray()[:, pgImagePlot2d.crossPlotCol]
else:
array = pgImagePlot2d.slicedArray # fall back on complete sliced array
else:
raise ValueError("crossPlot must be: None, 'horizontal' or 'vertical', got: {}"
.format(crossPlot))
return maskedNanPercentile(array, (percentage, 100-percentage) )
|
def calcPgImagePlot2dDataRange(pgImagePlot2d, percentage, crossPlot)
|
Calculates the range from the inspectors' sliced array. Discards percentage of the minimum
and percentage of the maximum values of the inspector.slicedArray
:param pgImagePlot2d: the range methods will work on (the sliced array) of this inspector.
:param percentage: percentage that will be discarded.
:param crossPlot: if None, the range will be calculated from the entire sliced array,
if "horizontal" or "vertical" the range will be calculated from the data under the
horizontal or vertical cross hairs.
If the cursor is outside the image, there is no valid data under the cross-hair and
the range will be determined from the sliced array as a fall back.
| 3.0431
| 2.925664
| 1.04014
|
rangeFunctions = OrderedDict({} if intialItems is None else intialItems)
# If crossPlot is "horizontal" or "vertical" make functions that determine the range from the
# data at the cross hair.
if crossPlot:
rangeFunctions['cross all data'] = partial(calcPgImagePlot2dDataRange, pgImagePlot2d,
0.0, crossPlot)
for percentage in [0.1, 0.2, 0.5, 1, 2, 5, 10, 20]:
label = "cross discard {}%".format(percentage)
rangeFunctions[label] = partial(calcPgImagePlot2dDataRange, pgImagePlot2d,
percentage, crossPlot)
# Always add functions that determine the data from the complete sliced array.
for percentage in [0.1, 0.2, 0.5, 1, 2, 5, 10, 20]:
rangeFunctions['image all data'] = partial(calcPgImagePlot2dDataRange, pgImagePlot2d,
0.0, None)
label = "image discard {}%".format(percentage)
rangeFunctions[label] = partial(calcPgImagePlot2dDataRange, pgImagePlot2d,
percentage, None)
return rangeFunctions
|
def crossPlotAutoRangeMethods(pgImagePlot2d, crossPlot, intialItems=None)
|
Creates an ordered dict with autorange methods for an PgImagePlot2d inspector.
:param pgImagePlot2d: the range methods will work on (the sliced array) of this inspector.
:param crossPlot: if None, the range will be calculated from the entire sliced array,
if "horizontal" or "vertical" the range will be calculated from the data under the
horizontal or vertical cross hairs
:param intialItems: will be passed on to the OrderedDict constructor.
| 3.291
| 3.01818
| 1.090392
|
verCrossViewBox = self.pgImagePlot2d.verCrossPlotItem.getViewBox()
verCrossViewBox.sigRangeChangedManually.disconnect(self.yAxisRangeCti.setAutoRangeOff)
horCrossViewBox = self.pgImagePlot2d.horCrossPlotItem.getViewBox()
horCrossViewBox.sigRangeChangedManually.disconnect(self.xAxisRangeCti.setAutoRangeOff)
self.pgImagePlot2d.verCrossPlotItem.sigAxisReset.disconnect(self.setVerCrossPlotAutoRangeOn)
self.pgImagePlot2d.horCrossPlotItem.sigAxisReset.disconnect(self.setHorCrossPlotAutoRangeOn)
self.pgImagePlot2d.imagePlotItem.sigAxisReset.disconnect(self.setImagePlotAutoRangeOn)
|
def _closeResources(self)
|
Disconnects signals.
Is called by self.finalize when the cti is deleted.
| 3.444259
| 3.251886
| 1.059158
|
setXYAxesAutoRangeOn(self, self.xAxisRangeCti, self.yAxisRangeCti, axisNumber)
|
def setImagePlotAutoRangeOn(self, axisNumber)
|
Sets the image plot's auto-range on for the axis with number axisNumber.
:param axisNumber: 0 (X-axis), 1 (Y-axis), 2, (Both X and Y axes).
| 14.0741
| 24.592123
| 0.572301
|
setXYAxesAutoRangeOn(self, self.xAxisRangeCti, self.horCrossPlotRangeCti, axisNumber)
|
def setHorCrossPlotAutoRangeOn(self, axisNumber)
|
Sets the horizontal cross-hair plot's auto-range on for the axis with number axisNumber.
:param axisNumber: 0 (X-axis), 1 (Y-axis), 2, (Both X and Y axes).
| 17.665815
| 32.276234
| 0.547332
|
setXYAxesAutoRangeOn(self, self.verCrossPlotRangeCti, self.yAxisRangeCti, axisNumber)
|
def setVerCrossPlotAutoRangeOn(self, axisNumber)
|
Sets the vertical cross-hair plot's auto-range on for the axis with number axisNumber.
:param axisNumber: 0 (X-axis), 1 (Y-axis), 2, (Both X and Y axes).
| 18.476223
| 31.544998
| 0.58571
|
logger.debug("Clearing inspector contents")
self.titleLabel.setText('')
# Don't clear the imagePlotItem, the imageItem is only added in the constructor.
self.imageItem.clear()
self.imagePlotItem.setLabel('left', '')
self.imagePlotItem.setLabel('bottom', '')
# Set the histogram range and levels to finite values to prevent futher errors if this
# function was called after an exception in self.drawContents
self.histLutItem.setHistogramRange(0, 100)
self.histLutItem.setLevels(0, 100)
self.crossPlotRow, self.crossPlotCol = None, None
self.probeLabel.setText('')
self.crossLineHorizontal.setVisible(False)
self.crossLineVertical.setVisible(False)
self.crossLineHorShadow.setVisible(False)
self.crossLineVerShadow.setVisible(False)
self.horCrossPlotItem.clear()
self.verCrossPlotItem.clear()
|
def _clearContents(self)
|
Clears the contents when no valid data is available
| 5.276397
| 5.283185
| 0.998715
|
extStr = ';'.join(['*' + ext for ext in self.extensions])
return '{} ({})'.format(self.name, extStr)
|
def getFileDialogFilter(self)
|
Returns a filters that can be used to construct file dialogs filters,
for example: 'Text File (*.txt;*.text)'
| 5.594306
| 4.898471
| 1.142052
|
dct = super(RtiRegItem, self).asDict()
dct['extensions'] = self.extensions
return dct
|
def asDict(self)
|
Returns a dictionary for serialization.
| 8.789522
| 7.755455
| 1.133334
|
check_is_a_string(extension)
check_class(rtiRegItem, RtiRegItem)
logger.debug(" Registering extension {!r} for {}".format(extension, rtiRegItem))
# TODO: type checking
if extension in self._extensionMap:
logger.info("Overriding extension {!r}: old={}, new={}"
.format(extension, self._extensionMap[extension], rtiRegItem))
self._extensionMap[extension] = rtiRegItem
|
def _registerExtension(self, extension, rtiRegItem)
|
Links an file name extension to a repository tree item.
| 3.594364
| 3.614984
| 0.994296
|
super(RtiRegistry, self).registerItem(regItem)
for ext in regItem.extensions:
self._registerExtension(ext, regItem)
|
def registerItem(self, regItem)
|
Adds a ClassRegItem object to the registry.
| 6.256945
| 5.470646
| 1.143731
|
check_is_a_sequence(extensions)
extensions = extensions if extensions is not None else []
extensions = [prepend_point_to_extension(ext) for ext in extensions]
regRti = RtiRegItem(fullName, fullClassName, extensions, pythonPath=pythonPath)
self.registerItem(regRti)
|
def registerRti(self, fullName, fullClassName, extensions=None, pythonPath='')
|
Class that maintains the collection of registered inspector classes.
Maintains a lit of file extensions that open the RTI by default.
| 5.555225
| 5.260395
| 1.056047
|
filters = []
for regRti in self.items:
filters.append(regRti.getFileDialogFilter())
return ';;'.join(filters)
|
def getFileDialogFilter(self)
|
Returns a filter that can be used in open file dialogs,
for example: 'All files (*);;Txt (*.txt;*.text);;netCDF(*.nc;*.nc4)'
| 7.599126
| 7.276706
| 1.044308
|
return [
RtiRegItem('HDF-5 file',
'argos.repo.rtiplugins.hdf5.H5pyFileRti',
extensions=['hdf5', 'h5', 'h5e', 'he5', 'nc']), # hdf extension is for HDF-4
RtiRegItem('MATLAB file',
'argos.repo.rtiplugins.scipyio.MatlabFileRti',
extensions=['mat']),
RtiRegItem('NetCDF file',
'argos.repo.rtiplugins.ncdf.NcdfFileRti',
#extensions=['nc', 'nc3', 'nc4']),
extensions=['nc', 'nc4']),
#extensions=[]),
RtiRegItem('NumPy binary file',
'argos.repo.rtiplugins.numpyio.NumpyBinaryFileRti',
extensions=['npy']),
RtiRegItem('NumPy compressed file',
'argos.repo.rtiplugins.numpyio.NumpyCompressedFileRti',
extensions=['npz']),
RtiRegItem('NumPy text file',
'argos.repo.rtiplugins.numpyio.NumpyTextFileRti',
#extensions=['txt', 'text']),
extensions=['dat']),
RtiRegItem('IDL save file',
'argos.repo.rtiplugins.scipyio.IdlSaveFileRti',
extensions=['sav']),
RtiRegItem('Pandas CSV file',
'argos.repo.rtiplugins.pandasio.PandasCsvFileRti',
extensions=['csv']),
RtiRegItem('Pillow image',
'argos.repo.rtiplugins.pillowio.PillowFileRti',
extensions=['bmp', 'eps', 'im', 'gif', 'jpg', 'jpeg', 'msp', 'pcx',
'png', 'ppm', 'spi', 'tif', 'tiff', 'xbm', 'xv']),
RtiRegItem('Wav file',
'argos.repo.rtiplugins.scipyio.WavFileRti',
extensions=['wav'])]
|
def getDefaultItems(self)
|
Returns a list with the default plugins in the repo tree item registry.
| 2.841186
| 2.680583
| 1.059913
|
if is_a_string(moduleInfo):
moduleInfo = mi.ImportedModuleInfo(moduleInfo)
line = "{:15s}: {}".format(moduleInfo.name, moduleInfo.verboseVersion)
self.editor.appendPlainText(line)
QtWidgets.QApplication.instance().processEvents()
|
def _addModuleInfo(self, moduleInfo)
|
Adds a line with module info to the editor
:param moduleInfo: can either be a string or a module info class.
In the first case, an object is instantiated as ImportedModuleInfo(moduleInfo).
| 6.683242
| 5.861951
| 1.140105
|
logger.debug("Adding dependency info to the AboutDialog")
self.progressLabel.setText("Retrieving package info...")
self.editor.clear()
self._addModuleInfo(mi.PythonModuleInfo())
self._addModuleInfo(mi.QtModuleInfo())
modules = ['numpy', 'scipy', 'pandas', 'pyqtgraph']
for module in modules:
self._addModuleInfo(module)
self._addModuleInfo(mi.PillowInfo())
self._addModuleInfo(mi.H5pyModuleInfo())
self._addModuleInfo(mi.NetCDF4ModuleInfo())
self.progressLabel.setText("")
logger.debug("Finished adding dependency info to the AboutDialog")
|
def addDependencyInfo(self)
|
Adds version info about the installed dependencies
| 4.0704
| 3.865454
| 1.05302
|
value = float(data)
if math.isnan(value):
raise ValueError("FloatCti can't store NaNs")
if math.isinf(value):
if value > 0:
logger.warn("Replacing inf by the largest representable float")
value = sys.float_info.max
else:
logger.warn("Replacing -inf by the smallest representable float")
value = -sys.float_info.max
return value
|
def _enforceDataType(self, data)
|
Converts to float so that this CTI always stores that type.
Replaces infinite with the maximum respresentable float.
Raises a ValueError if data is a NaN.
| 3.469845
| 2.767306
| 1.253871
|
if self.specialValueText is not None and data == self.minValue:
return self.specialValueText
else:
return "{}{:.{}f}{}".format(self.prefix, data, self.decimals, self.suffix)
|
def _dataToString(self, data)
|
Conversion function used to convert the (default)data to the display value.
| 4.669314
| 4.068434
| 1.147693
|
return ("min = {}, max = {}, step = {}, decimals = {}, specVal = {}"
.format(self.minValue, self.maxValue, self.stepSize,
self.decimals, self.specialValueText))
|
def debugInfo(self)
|
Returns the string with debugging information
| 5.645739
| 5.224012
| 1.080729
|
return FloatCtiEditor(self, delegate, parent=parent)
|
def createEditor(self, delegate, parent, option)
|
Creates a FloatCtiEditor.
For the parameters see the AbstractCti constructor documentation.
| 41.696453
| 5.493588
| 7.590021
|
self.spinBox.valueChanged.disconnect(self.commitChangedValue)
super(FloatCtiEditor, self).finalize()
|
def finalize(self)
|
Called at clean up. Is used to disconnect signals.
| 15.930601
| 12.556487
| 1.268715
|
return ("enabled = {}, min = {}, max = {}, precision = {}, specVal = {}"
.format(self.enabled, self.minValue, self.maxValue,
self.precision, self.specialValueText))
|
def debugInfo(self)
|
Returns the string with debugging information
| 6.024349
| 5.624241
| 1.07114
|
return SnFloatCtiEditor(self, delegate, self.precision, parent=parent)
|
def createEditor(self, delegate, parent, option)
|
Creates a FloatCtiEditor.
For the parameters see the AbstractCti constructor documentation.
| 60.587166
| 16.610678
| 3.647483
|
self.spinBox.valueChanged.disconnect(self.commitChangedValue)
super(SnFloatCtiEditor, self).finalize()
|
def finalize(self)
|
Called at clean up. Is used to disconnect signals.
| 25.151215
| 19.327431
| 1.301322
|
_, extension = os.path.splitext(fileName)
if os.path.isdir(fileName):
rtiRegItem = None
cls = DirectoryRti
else:
try:
rtiRegItem = globalRtiRegistry().getRtiRegItemByExtension(extension)
except (KeyError):
logger.debug("No file RTI registered for extension: {}".format(extension))
rtiRegItem = None
cls = UnknownFileRti
else:
cls = rtiRegItem.getClass(tryImport=True) # cls can be None
return cls, rtiRegItem
|
def detectRtiFromFileName(fileName)
|
Determines the type of RepoTreeItem to use given a file name.
Uses a DirectoryRti for directories and an UnknownFileRti if the file
extension doesn't match one of the registered RTI extensions.
Returns (cls, regItem) tuple. Both the cls ond the regItem can be None.
If the file is a directory, (DirectoryRti, None) is returned.
If the file extension is not in the registry, (UnknownFileRti, None) is returned.
If the cls cannot be imported (None, regItem) returned. regItem.exception will be set.
Otherwise (cls, regItem) will be returned.
| 4.552017
| 3.435193
| 1.325113
|
cls, rtiRegItem = detectRtiFromFileName(fileName)
if cls is None:
logger.warn("Unable to import plugin {}: {}"
.format(rtiRegItem.fullName, rtiRegItem.exception))
rti = UnknownFileRti.createFromFileName(fileName)
rti.setException(rtiRegItem.exception)
else:
rti = cls.createFromFileName(fileName)
assert rti, "Sanity check failed (createRtiFromFileName). Please report this bug."
return rti
|
def createRtiFromFileName(fileName)
|
Determines the type of RepoTreeItem to use given a file name and creates it.
Uses a DirectoryRti for directories and an UnknownFileRti if the file
extension doesn't match one of the registered RTI extensions.
| 5.414099
| 4.718029
| 1.147534
|
childItems = []
fileNames = os.listdir(self._fileName)
absFileNames = [os.path.join(self._fileName, fn) for fn in fileNames]
# Add subdirectories
for fileName, absFileName in zip(fileNames, absFileNames):
if os.path.isdir(absFileName) and not fileName.startswith('.'):
childItems.append(DirectoryRti(fileName=absFileName, nodeName=fileName))
# Add regular files
for fileName, absFileName in zip(fileNames, absFileNames):
if os.path.isfile(absFileName) and not fileName.startswith('.'):
childItem = createRtiFromFileName(absFileName)
childItems.append(childItem)
return childItems
|
def _fetchAllChildren(self)
|
Gets all sub directories and files within the current directory.
Does not fetch hidden files.
| 2.812634
| 2.642434
| 1.06441
|
numCols = self.model().columnCount()
startCol = 0 if startCol is None else max(startCol, 0)
stopCol = numCols if stopCol is None else min(stopCol, numCols)
row = 0
for col in range(startCol, stopCol):
indexWidget = self.indexWidget(self.model().index(row, col))
if indexWidget:
contentsWidth = indexWidget.sizeHint().width()
else:
contentsWidth = self.header().sectionSizeHint(col)
self.header().resizeSection(col, contentsWidth)
|
def resizeColumnsToContents(self, startCol=None, stopCol=None)
|
Resizes all columns to the contents
| 2.460438
| 2.52024
| 0.976271
|
# The cache is invalid after the prefix, postfix and other properties
# have been set. I disabled it because sizeHint isn't called that often.
#if self._cachedSizeHint is not None:
# return self._cachedSizeHint
orgSizeHint = super(CollectorSpinBox, self).sizeHint()
self.ensurePolished()
d = self
fm = QtGui.QFontMetrics(self.fontMetrics())
# This was h = d.edit.sizeHint().height(), but that didn't work. In the end we set the
# height to the height calculated from the parent.
h = orgSizeHint.height()
w = 0
# QLatin1Char seems not to be implemented.
# Using regular string literals and hope for the best
s = d.prefix() + d.textFromValue(d.minimum()) + d.suffix() + ' '
# We disabled truncating the string here!!
#s = s[:18]
w = max(w, fm.width(s))
s = d.prefix() + d.textFromValue(d.maximum()) + d.suffix() + ' '
# We disabled truncating the string here!!
#s = s[:18]
w = max(w, fm.width(s))
if len(d.specialValueText()):
s = d.specialValueText()
w = max(w, fm.width(s))
w += 2 # cursor blinking space
opt = QtWidgets.QStyleOptionSpinBox()
self.initStyleOption(opt)
hint = QtCore.QSize(w, h)
extra = QtCore.QSize(35, 6)
opt.rect.setSize(hint + extra)
extra += hint - self.style().subControlRect(QtWidgets.QStyle.CC_SpinBox, opt,
QtWidgets.QStyle.SC_SpinBoxEditField, self).size()
# get closer to final result by repeating the calculation
opt.rect.setSize(hint + extra)
extra += hint - self.style().subControlRect(QtWidgets.QStyle.CC_SpinBox, opt,
QtWidgets.QStyle.SC_SpinBoxEditField, self).size()
hint += extra
opt.rect = self.rect()
result = (self.style().sizeFromContents(QtWidgets.QStyle.CT_SpinBox, opt, hint, self)
.expandedTo(QtWidgets.QApplication.globalStrut()))
self._cachedSizeHint = result
# Use the height ancestor's sizeHint
result.setHeight(orgSizeHint.height())
return result
|
def sizeHint(self)
|
Reimplemented from the C++ Qt source of QAbstractSpinBox.sizeHint, but without
truncating to a maximum of 18 characters.
| 4.762993
| 4.54979
| 1.04686
|
logger.debug("ConfigItemDelegate.createEditor, parent: {!r}".format(parent.objectName()))
assert index.isValid(), "sanity check failed: invalid index"
cti = index.model().getItem(index)
editor = cti.createEditor(self, parent, option)
return editor
|
def createEditor(self, parent, option, index)
|
Returns the widget used to change data from the model and can be reimplemented to
customize editing behavior.
Reimplemented from QStyledItemDelegate.
| 7.506052
| 8.115538
| 0.924899
|
# We take the config value via the model to be consistent with setModelData
data = index.model().data(index, Qt.EditRole)
editor.setData(data)
|
def setEditorData(self, editor, index)
|
Provides the widget with data to manipulate.
Calls the setEditorValue of the config tree item at the index.
:type editor: QWidget
:type index: QModelIndex
Reimplemented from QStyledItemDelegate.
| 8.923145
| 10.486884
| 0.850886
|
try:
data = editor.getData()
except InvalidInputError as ex:
logger.warn(ex)
else:
# The value is set via the model so that signals are emitted
logger.debug("ConfigItemDelegate.setModelData: {}".format(data))
model.setData(index, data, Qt.EditRole)
|
def setModelData(self, editor, model, index)
|
Gets data from the editor widget and stores it in the specified model at the item index.
Does this by calling getEditorValue of the config tree item at the index.
:type editor: QWidget
:type model: ConfigTreeModel
:type index: QModelIndex
Reimplemented from QStyledItemDelegate.
| 7.037435
| 6.232437
| 1.129163
|
cti = index.model().getItem(index)
if cti.checkState is None:
displayRect = option.rect
else:
checkBoxRect = widgetSubCheckBoxRect(editor, option)
offset = checkBoxRect.x() + checkBoxRect.width()
displayRect = option.rect
displayRect.adjust(offset, 0, 0, 0)
editor.setGeometry(displayRect)
|
def updateEditorGeometry(self, editor, option, index)
|
Ensures that the editor is displayed correctly with respect to the item view.
| 5.822433
| 5.506116
| 1.057448
|
# It would be nicer if this method was part of ConfigItemDelegate since createEditor also
# lives there. However, QAbstractItemView.closeEditor is sometimes called directly,
# without the QAbstractItemDelegate.closeEditor signal begin emitted, e.g when the
# currentItem changes. Therefore we cannot connect the QAbstractItemDelegate.closeEditor
# signal to a slot in the ConfigItemDelegate.
configItemDelegate = self.itemDelegate()
configItemDelegate.finalizeEditor(editor)
super(ConfigTreeView, self).closeEditor(editor, hint)
|
def closeEditor(self, editor, hint)
|
Finalizes, closes and releases the given editor.
| 9.229598
| 8.738379
| 1.056214
|
configModel = self.model()
if index is None:
#index = configTreeModel.createIndex()
index = QtCore.QModelIndex()
if index.isValid():
if expanded is None:
item = configModel.getItem(index)
self.setExpanded(index, item.expanded)
else:
self.setExpanded(index, expanded)
for rowNr in range(configModel.rowCount(index)):
childIndex = configModel.index(rowNr, configModel.COL_NODE_NAME, parentIndex=index)
self.expandBranch(index=childIndex, expanded=expanded)
|
def expandBranch(self, index=None, expanded=None)
|
Expands or collapses the node at the index and all it's descendants.
If expanded is True the nodes will be expanded, if False they will be collapsed, and if
expanded is None the expanded attribute of each item is used.
If parentIndex is None, the invisible root will be used (i.e. the complete forest will
be expanded).
| 3.006371
| 3.078737
| 0.976495
|
try:
return ncVar.__dict__
except Exception as ex:
# Due to some internal error netCDF4 may raise an AttributeError or KeyError,
# depending on its version.
logger.warn("Unable to read the attributes from {}. Reason: {}"
.format(ncVar.name, ex))
return {}
|
def ncVarAttributes(ncVar)
|
Returns the attributes of ncdf variable
| 6.758387
| 7.385989
| 0.915028
|
attributes = ncVarAttributes(ncVar)
if not attributes:
return '' # a premature optimization :-)
for key in ('unit', 'units', 'Unit', 'Units', 'UNIT', 'UNITS'):
if key in attributes:
# In Python3 the attribures are byte strings so we must decode them
# This a bug in h5py, see https://github.com/h5py/h5py/issues/379
return attributes[key]
else:
return ''
|
def ncVarUnit(ncVar)
|
Returns the unit of the ncVar by looking in the attributes.
It searches in the attributes for one of the following keys:
'unit', 'units', 'Unit', 'Units', 'UNIT', 'UNITS'. If these are not found, the empty
string is returned.
| 6.289361
| 5.21989
| 1.204884
|
attributes = ncVarAttributes(ncVar)
if not attributes:
return None # a premature optimization :-)
for key in ('missing_value', 'MissingValue', 'missingValue', 'FillValue', '_FillValue'):
if key in attributes:
missingDataValue = attributes[key]
return missingDataValue
return None
|
def variableMissingValue(ncVar)
|
Returns the missingData given a NetCDF variable
Looks for one of the following attributes: _FillValue, missing_value, MissingValue,
missingValue. Returns None if these attributes are not found.
| 5.605927
| 4.729665
| 1.185269
|
fieldName = self.nodeName
return str(self._ncVar.dtype.fields[fieldName][0])
|
def elementTypeName(self)
|
String representation of the element type.
| 17.200563
| 17.787407
| 0.967008
|
unit = ncVarUnit(self._ncVar)
fieldNames = self._ncVar.dtype.names
# If the missing value attribute is a list with the same length as the number of fields,
# return the missing value for field that equals the self.nodeName.
if hasattr(unit, '__len__') and len(unit) == len(fieldNames):
idx = fieldNames.index(self.nodeName)
return unit[idx]
else:
return unit
|
def unit(self)
|
Returns the unit attribute of the underlying ncdf variable.
If the units has a length (e.g is a list) and has precisely one element per field,
the unit for this field is returned.
| 6.305983
| 5.479737
| 1.150782
|
nSubDims = len(self._subArrayShape)
subArrayDims = ['SubDim{}'.format(dimNr) for dimNr in range(nSubDims)]
return list(self._ncVar.dimensions + tuple(subArrayDims))
|
def dimensionNames(self)
|
Returns a list with the dimension names of the underlying NCDF variable
| 8.079344
| 6.52619
| 1.237988
|
value = variableMissingValue(self._ncVar)
fieldNames = self._ncVar.dtype.names
# If the missing value attibute is a list with the same length as the number of fields,
# return the missing value for field that equals the self.nodeName.
if hasattr(value, '__len__') and len(value) == len(fieldNames):
idx = fieldNames.index(self.nodeName)
return value[idx]
else:
return value
|
def missingDataValue(self)
|
Returns the value to indicate missing data. None if no missing-data value is specified.
| 5.620538
| 5.661608
| 0.992746
|
dtype = self._ncVar.dtype
if type(dtype) == type:
# Handle the unexpected case that dtype is a regular Python type
# (happens e.g. in the /PROCESSOR/processing_configuration of the Trop LX files)
return dtype.__name__
return '<structured>' if dtype.names else str(dtype)
|
def elementTypeName(self)
|
String representation of the element type.
| 20.554327
| 21.214437
| 0.968884
|
assert self.canFetchChildren(), "canFetchChildren must be True"
childItems = []
# Add fields
if self._isStructured:
for fieldName in self._ncVar.dtype.names:
childItems.append(NcdfFieldRti(self._ncVar, nodeName=fieldName, fileName=self.fileName))
return childItems
|
def _fetchAllChildren(self)
|
Fetches all fields that this variable contains.
Only variables with a structured data type can have fields.
| 9.295331
| 8.11647
| 1.145243
|
assert self._ncGroup is not None, "dataset undefined (file not opened?)"
assert self.canFetchChildren(), "canFetchChildren must be True"
childItems = []
# Add dimensions
for dimName, ncDim in self._ncGroup.dimensions.items():
childItems.append(NcdfDimensionRti(ncDim, nodeName=dimName, fileName=self.fileName))
# Add groups
for groupName, ncGroup in self._ncGroup.groups.items():
childItems.append(NcdfGroupRti(ncGroup, nodeName=groupName, fileName=self.fileName))
# Add variables
for varName, ncVar in self._ncGroup.variables.items():
childItems.append(NcdfVariableRti(ncVar, nodeName=varName, fileName=self.fileName))
return childItems
|
def _fetchAllChildren(self)
|
Fetches all sub groups and variables that this group contains.
| 3.075901
| 2.939137
| 1.046532
|
logger.info("Opening: {}".format(self._fileName))
self._ncGroup = Dataset(self._fileName)
|
def _openResources(self)
|
Opens the root Dataset.
| 11.510435
| 8.956869
| 1.285096
|
logger.info("Closing: {}".format(self._fileName))
self._ncGroup.close()
self._ncGroup = None
|
def _closeResources(self)
|
Closes the root Dataset.
| 9.209746
| 7.29631
| 1.262247
|
try:
rate, data = scipy.io.wavfile.read(self._fileName, mmap=True)
except Exception as ex:
logger.warning(ex)
logger.warning("Unable to read wav with memmory mapping. Trying without now.")
rate, data = scipy.io.wavfile.read(self._fileName, mmap=False)
self._array = data
self.attributes['rate'] = rate
|
def _openResources(self)
|
Uses numpy.loadtxt to open the underlying file.
| 4.875093
| 4.709641
| 1.03513
|
childItems = []
if self._array.ndim == 2:
_nRows, nCols = self._array.shape if self._array is not None else (0, 0)
for col in range(nCols):
colItem = SliceRti(self._array[:, col], nodeName="channel-{}".format(col),
fileName=self.fileName, iconColor=self.iconColor,
attributes=self.attributes)
childItems.append(colItem)
return childItems
|
def _fetchAllChildren(self)
|
Adds an ArrayRti per column as children so that they can be inspected easily
| 5.936289
| 4.825693
| 1.230142
|
logger.debug("TextInspector._drawContents: {}".format(self))
self._clearContents()
slicedArray = self.collector.getSlicedArray()
if slicedArray is None:
return
# Sanity check, the slicedArray should be zero-dimensional. It can be used as a scalar.
# In fact, using an index (e.g. slicedArray[0]) will raise an exception.
assert slicedArray.data.ndim == 0, \
"Expected zero-dimensional array. Got: {}".format(slicedArray.ndim)
# Valid data from here...
maskedArr = slicedArray.asMaskedArray() # So that we call mask[()] for boolean masks
slicedScalar = maskedArr[()] # Convert to Numpy scalar
isMasked = maskedArr.mask[()]
text = to_string(slicedScalar, masked=isMasked, maskFormat='--',
decode_bytes=self.config.encodingCti.configValue)
self.editor.setPlainText(text)
self.editor.setWordWrapMode(self.config.wordWrapCti.configValue)
# Update the editor font from the font config item (will call self.editor.setFont)
self.config.updateTarget()
|
def _drawContents(self, reason=None, initiator=None)
|
Converts the (zero-dimensional) sliced array to string and puts it in the text editor.
The reason and initiator parameters are ignored.
See AbstractInspector.updateContents for their description.
| 9.190729
| 7.841139
| 1.172117
|
if mouseClickEvent.button() == QtCore.Qt.MiddleButton:
mouseClickEvent.accept()
argosPgPlotItem.emitResetAxisSignal(axisNumber)
|
def middleMouseClickEvent(argosPgPlotItem, axisNumber, mouseClickEvent)
|
Emits sigAxisReset when the middle mouse button is clicked on an axis of the the plot item.
| 3.708608
| 3.099664
| 1.196455
|
logger.debug("Finalizing: {}".format(self))
super(ArgosPgPlotItem, self).close()
|
def close(self)
|
Is called before destruction. Can be used to clean-up resources
Could be called 'finalize' but PlotItem already has a close so we reuse that.
| 25.602489
| 16.178513
| 1.5825
|
contextMenu = QtWidgets.QMenu()
for action in self.actions():
contextMenu.addAction(action)
contextMenu.exec_(event.screenPos())
|
def contextMenuEvent(self, event)
|
Shows the context menu at the cursor position
We need to take the event-based approach because ArgosPgPlotItem does derives from
QGraphicsWidget, and not from QWidget, and therefore doesn't have the
customContextMenuRequested signal.
| 3.131955
| 3.403414
| 0.920239
|
assert axisNumber in (VALID_AXES_NUMBERS), \
"Axis Nr should be one of {}, got {}".format(VALID_AXES_NUMBERS, axisNumber)
# Hide 'auto-scale (A)' button
logger.debug("ArgosPgPlotItem.autoBtnClicked, mode:{}".format(self.autoBtn.mode))
if self.autoBtn.mode == 'auto':
self.autoBtn.hide()
else:
# Does this occur?
msg = "Unexpected autobutton mode: {}".format(self.autoBtn.mode)
if DEBUGGING:
raise ValueError(msg)
else:
logger.warn(msg)
logger.debug("Emitting sigAxisReset({}) for {!r}".format(axisNumber, self))
self.sigAxisReset.emit(axisNumber)
|
def emitResetAxisSignal(self, axisNumber)
|
Emits the sigResetAxis with the axisNumber as parameter
axisNumber should be 0 for X, 1 for Y, and 2 for both axes.
| 6.011051
| 6.031516
| 0.996607
|
base_local = diff.diff(base, local, check_modified=check_modified)
base_remote = diff.diff(base, remote, check_modified=check_modified)
merge = diff.diff(base_local, base_remote)
return merge
|
def merge(local, base, remote, check_modified=False)
|
Generate unmerged series of changes (including conflicts).
By diffing the two diffs, we find *changes* that are
on the local branch, the remote branch, or both.
We arbitrarily choose the "local" branch to be the "before"
and the "remote" branch to be the "after" in the diff algorithm.
Therefore:
If a change is "deleted", that means that it occurs only on
the local branch. If a change is "added" that means it occurs only on
the remote branch. If a change is "unchanged", that means it occurs
in both branches. Either the same addition or same deletion occurred in
both branches, or the cell was not changed in either branch.
Parameters
----------
local : list
A sequence representing the items on the local branch.
base : dict
A sequence representing the items on the base branch
remote : dict
A sequence representing the items on the remote branch.
Returns
-------
result : A diff result comparing the changes on the local and remote
branches.
| 2.566117
| 2.786595
| 0.920879
|
data = current.read(json_data, 'ipynb')
json_data.close()
return data
|
def parse(self, json_data)
|
Parse a notebook .ipynb file.
Parameters
----------
json_data : file
A file handle for an .ipynb file.
Returns
-------
nb : An IPython Notebook data structure.
| 17.203438
| 16.292124
| 1.055936
|
nb1_cells = nb1['worksheets'][0]['cells']
nb2_cells = nb2['worksheets'][0]['cells']
diffed_nb = cells_diff(nb1_cells, nb2_cells, check_modified=check_modified)
line_diffs = diff_modified_items(diffed_nb)
cell_list = list()
for i, item in enumerate(diffed_nb):
cell = diff_result_to_cell(item)
if i in line_diffs:
cell['metadata']['extra-diff-data'] = line_diffs[i]
cell_list.append(cell)
nb1['worksheets'][0]['cells'] = cell_list
nb1['metadata']['nbdiff-type'] = 'diff'
return nb1
|
def notebook_diff(nb1, nb2, check_modified=True)
|
Unify two notebooks into a single notebook with diff metadata.
The result of this function is a valid notebook that can be loaded
by the IPython Notebook front-end. This function adds additional
cell metadata that the front-end Javascript uses to render the diffs.
Parameters
----------
nb1 : dict
An IPython Notebook to use as the baseline version.
nb2 : dict
An IPython Notebook to compare against the baseline.
check_modified : bool
Whether or not to detect cell modification.
Returns
-------
nb : A valid notebook containing diff metadata.
| 2.690333
| 2.857923
| 0.94136
|
'''diff.diff returns a dictionary with all the information we need,
but we want to extract the cell and change its metadata.'''
state = item['state']
if state == 'modified':
new_cell = item['modifiedvalue'].data
old_cell = item['originalvalue'].data
new_cell['metadata']['state'] = state
new_cell['metadata']['original'] = old_cell
cell = new_cell
else:
cell = item['value'].data
cell['metadata']['state'] = state
return cell
|
def diff_result_to_cell(item)
|
diff.diff returns a dictionary with all the information we need,
but we want to extract the cell and change its metadata.
| 4.712427
| 2.920159
| 1.613757
|
'''Diff two arrays of cells.'''
before_comps = [
CellComparator(cell, check_modified=check_modified)
for cell in before_cells
]
after_comps = [
CellComparator(cell, check_modified=check_modified)
for cell in after_cells
]
diff_result = diff(
before_comps,
after_comps,
check_modified=check_modified
)
return diff_result
|
def cells_diff(before_cells, after_cells, check_modified=False)
|
Diff two arrays of cells.
| 2.396444
| 2.405316
| 0.996311
|
'''Diff the words in two strings.
This is intended for use in diffing prose and other forms of text
where line breaks have little semantic value.
Parameters
----------
before_words : str
A string to be used as the baseline version.
after_words : str
A string to be compared against the baseline.
Returns
-------
diff_result : A list of dictionaries containing diff information.
'''
before_comps = before_words.split()
after_comps = after_words.split()
diff_result = diff(
before_comps,
after_comps
)
return diff_result
|
def words_diff(before_words, after_words)
|
Diff the words in two strings.
This is intended for use in diffing prose and other forms of text
where line breaks have little semantic value.
Parameters
----------
before_words : str
A string to be used as the baseline version.
after_words : str
A string to be compared against the baseline.
Returns
-------
diff_result : A list of dictionaries containing diff information.
| 4.469241
| 1.743552
| 2.563296
|
'''Diff the lines in two strings.
Parameters
----------
before_lines : iterable
Iterable containing lines used as the baseline version.
after_lines : iterable
Iterable containing lines to be compared against the baseline.
Returns
-------
diff_result : A list of dictionaries containing diff information.
'''
before_comps = [
LineComparator(line, check_modified=check_modified)
for line in before_lines
]
after_comps = [
LineComparator(line, check_modified=check_modified)
for line in after_lines
]
diff_result = diff(
before_comps,
after_comps,
check_modified=check_modified
)
return diff_result
|
def lines_diff(before_lines, after_lines, check_modified=False)
|
Diff the lines in two strings.
Parameters
----------
before_lines : iterable
Iterable containing lines used as the baseline version.
after_lines : iterable
Iterable containing lines to be compared against the baseline.
Returns
-------
diff_result : A list of dictionaries containing diff information.
| 2.729498
| 1.787683
| 1.526836
|
# The grid will be empty if `before` or `after` are
# empty; this will violate the assumptions made in the rest
# of this function.
# If this is the case, we know what the result of the diff is
# anyways: the contents of the other, non-empty input.
if len(before) == 0:
return [
{'state': 'added', 'value': v}
for v in after
]
elif len(after) == 0:
return [
{'state': 'deleted', 'value': v}
for v in before
]
grid = create_grid(before, after)
nrows = len(grid[0])
ncols = len(grid)
dps = diff_points(grid)
result = []
for kind, col, row in dps:
if kind == 'unchanged':
value = before[col]
result.append({
'state': kind,
'value': value,
})
elif kind == 'deleted':
assert col < ncols
value = before[col]
result.append({
'state': kind,
'value': value,
})
elif kind == 'added':
assert row < nrows
value = after[row]
result.append({
'state': kind,
'value': value,
})
elif check_modified and kind == 'modified':
result.append({
'state': kind,
'originalvalue': before[col],
'modifiedvalue': after[row],
})
elif (not check_modified) and kind == 'modified':
result.append({
'state': 'deleted',
'value': before[col],
})
result.append({
'state': 'added',
'value': after[row],
})
else:
raise Exception('We should not be here.')
return result
|
def diff(before, after, check_modified=False)
|
Diff two sequences of comparable objects.
The result of this function is a list of dictionaries containing
values in ``before`` or ``after`` with a ``state`` of either
'unchanged', 'added', 'deleted', or 'modified'.
>>> import pprint
>>> result = diff(['a', 'b', 'c'], ['b', 'c', 'd'])
>>> pprint.pprint(result)
[{'state': 'deleted', 'value': 'a'},
{'state': 'unchanged', 'value': 'b'},
{'state': 'unchanged', 'value': 'c'},
{'state': 'added', 'value': 'd'}]
Parameters
----------
before : iterable
An iterable containing values to be used as the baseline version.
after : iterable
An iterable containing values to be compared against the baseline.
check_modified : bool
Whether or not to check for modifiedness.
Returns
-------
diff_items : A list of dictionaries containing diff information.
| 2.617243
| 2.56349
| 1.020969
|
'''
return true if exactly equal or if equal but modified,
otherwise return false
return type: BooleanPlus
'''
eqLine = line1 == line2
if eqLine:
return BooleanPlus(True, False)
else:
unchanged_count = self.count_similar_words(line1, line2)
similarity_percent = (
(2.0 * unchanged_count) /
(len(line1.split()) + len(line2.split()))
)
if similarity_percent >= 0.50:
return BooleanPlus(True, True)
return BooleanPlus(False, False)
|
def equal(self, line1, line2)
|
return true if exactly equal or if equal but modified,
otherwise return false
return type: BooleanPlus
| 4.719894
| 3.101152
| 1.521981
|
'''
return true if exactly equal or if equal but modified,
otherwise return false
return type: BooleanPlus
'''
eqlanguage = cell1["language"] == cell2["language"]
eqinput = cell1["input"] == cell2["input"]
eqoutputs = self.equaloutputs(cell1["outputs"], cell2["outputs"])
if eqlanguage and eqinput and eqoutputs:
return BooleanPlus(True, False)
elif not self.check_modified:
return BooleanPlus(False, False)
input1 = u"".join(cell1['input'])
input2 = u"".join(cell2['input'])
similarity_percent = Levenshtein.ratio(input1, input2)
if similarity_percent >= 0.65:
return BooleanPlus(True, True)
return BooleanPlus(False, False)
|
def compare_cells(self, cell1, cell2)
|
return true if exactly equal or if equal but modified,
otherwise return false
return type: BooleanPlus
| 4.318552
| 2.979958
| 1.449199
|
def outer(func: Callable[..., T_Retval],
executor: Union[Executor, str] = None) -> Callable[..., Awaitable[T_Retval]]:
def wrapper(*args, **kwargs):
try:
loop = get_event_loop()
except RuntimeError:
# Event loop not available -- we're in a worker thread
return func(*args, **kwargs)
# Resolve the executor resource name to an Executor instance
if isinstance(executor, str):
try:
ctx = next(obj for obj in args[:2] if isinstance(obj, Context))
except StopIteration:
raise RuntimeError('the callable needs to be called with a Context as the '
'first or second positional argument')
_executor = ctx.require_resource(Executor, executor)
else:
_executor = executor
callback = partial(func, *args, **kwargs)
return loop.run_in_executor(_executor, callback)
assert check_argument_types()
assert not inspect.iscoroutinefunction(func), \
'Cannot wrap coroutine functions to be run in an executor'
return wraps(func)(wrapper)
if isinstance(func_or_executor, (str, Executor)):
return partial(outer, executor=func_or_executor)
else:
return outer(func_or_executor)
|
def executor(func_or_executor: Union[Executor, str, Callable[..., T_Retval]]) -> \
Union[WrappedCallable, Callable[..., WrappedCallable]]
|
Decorate a function to run in an executor.
If no executor (or ``None``) is given, the current event loop's default executor is used.
Otherwise, the argument must be a PEP 3148 compliant thread pool executor or the name of an
:class:`~concurrent.futures.Executor` instance.
If a decorated callable is called in a worker thread, the executor argument is ignored and the
wrapped function is called directly.
Callables wrapped with this decorator must be used with ``await`` when called in the event loop
thread.
Example use with the default executor (``None``)::
@executor
def this_runs_in_threadpool(ctx):
return do_something_cpu_intensive()
async def request_handler(ctx):
result = await this_runs_in_threadpool(ctx)
With a named :class:`~concurrent.futures.Executor` resource::
@executor('special_ops')
def this_runs_in_threadpool(ctx):
return do_something_cpu_intensive()
async def request_handler(ctx):
result = await this_runs_in_threadpool(ctx)
:param func_or_executor: either a callable (when used as a decorator), an executor instance or
the name of an :class:`~concurrent.futures.Executor` resource
| 3.08847
| 3.069871
| 1.006058
|
if not isclass(obj):
obj = type(obj)
if obj.__module__ == 'builtins':
return obj.__name__
else:
return '{}.{}'.format(obj.__module__, obj.__qualname__)
|
def qualified_name(obj) -> str
|
Return the qualified name (e.g. package.module.Type) for the given object.
If ``obj`` is not a class, the returned name will match its type instead.
| 2.880706
| 2.637843
| 1.092069
|
if func.__module__ == 'builtins':
return func.__name__
else:
return '{}.{}'.format(func.__module__, func.__qualname__)
|
def callable_name(func: Callable) -> str
|
Return the qualified name (e.g. package.module.func) for the given callable.
| 3.022295
| 2.234175
| 1.352756
|
assert check_argument_types()
copied = original.copy() if original else {}
if overrides:
for key, value in overrides.items():
if '.' in key:
key, rest = key.split('.', 1)
value = {rest: value}
orig_value = copied.get(key)
if isinstance(orig_value, dict) and isinstance(value, dict):
copied[key] = merge_config(orig_value, value)
else:
copied[key] = value
return copied
|
def merge_config(original: Optional[Dict[str, Any]],
overrides: Optional[Dict[str, Any]]) -> Dict[str, Any]
|
Return a copy of the ``original`` configuration dictionary, with overrides from ``overrides``
applied.
This similar to what :meth:`dict.update` does, but when a dictionary is about to be
replaced with another dictionary, it instead merges the contents.
If a key in ``overrides`` is a dotted path (ie. ``foo.bar.baz: value``), it is assumed to be a
shorthand for ``foo: {bar: {baz: value}}``.
:param original: a configuration dictionary (or ``None``)
:param overrides: a dictionary containing overriding values to the configuration (or ``None``)
:return: the merge result
| 2.168804
| 2.318868
| 0.935286
|
if not isinstance(obj, str):
return obj
if ':' in obj:
return resolve_reference(obj)
value = self._entrypoints.get(obj)
if value is None:
raise LookupError('no such entry point in {}: {}'.format(self.namespace, obj))
if isinstance(value, EntryPoint):
value = self._entrypoints[obj] = value.load()
return value
|
def resolve(self, obj)
|
Resolve a reference to an entry point or a variable in a module.
If ``obj`` is a ``module:varname`` reference to an object, :func:`resolve_reference` is
used to resolve it. If it is a string of any other kind, the named entry point is loaded
from this container's namespace. Otherwise, ``obj`` is returned as is.
:param obj: an entry point identifier, an object reference or an arbitrary object
:return: the loaded entry point, resolved object or the unchanged input value
:raises LookupError: if ``obj`` was a string but the named entry point was not found
| 3.36481
| 2.891599
| 1.16365
|
assert check_argument_types()
assert self.base_class, 'base class has not been defined'
plugin_class = self.resolve(type)
if not issubclass(plugin_class, self.base_class):
raise TypeError('{} is not a subclass of {}'.format(
qualified_name(plugin_class), qualified_name(self.base_class)))
return plugin_class(**constructor_kwargs)
|
def create_object(self, type: Union[type, str], **constructor_kwargs)
|
Instantiate a plugin.
The entry points in this namespace must point to subclasses of the ``base_class`` parameter
passed to this container.
:param type: an entry point identifier, a ``module:varname`` reference to a class, or an
actual class object
:param constructor_kwargs: keyword arguments passed to the constructor of the plugin class
:return: the plugin instance
| 3.706187
| 3.279716
| 1.130033
|
values = []
for name, value in self._entrypoints.items():
if isinstance(value, EntryPoint):
value = self._entrypoints[name] = value.load()
values.append(value)
return values
|
def all(self) -> List[Any]
|
Load all entry points (if not already loaded) in this namespace and return the resulting
objects as a list.
| 3.495057
| 2.955084
| 1.182727
|
assert check_argument_types()
if not isinstance(alias, str) or not alias:
raise TypeError('component_alias must be a nonempty string')
if alias in self.child_components:
raise ValueError('there is already a child component named "{}"'.format(alias))
config['type'] = type or alias
# Allow the external configuration to override the constructor arguments
override_config = self.component_configs.get(alias) or {}
config = merge_config(config, override_config)
component = component_types.create_object(**config)
self.child_components[alias] = component
|
def add_component(self, alias: str, type: Union[str, type] = None, **config)
|
Add a child component.
This will instantiate a component class, as specified by the ``type`` argument.
If the second argument is omitted, the value of ``alias`` is used as its value.
The locally given configuration can be overridden by component configuration parameters
supplied to the constructor (via the ``components`` argument).
When configuration values are provided both as keyword arguments to this method and
component configuration through the ``components`` constructor argument, the configurations
are merged together using :func:`~asphalt.core.util.merge_config` in a way that the
configuration values from the ``components`` argument override the keyword arguments to
this method.
:param alias: a name for the component instance, unique within this container
:param type: entry point name or :class:`Component` subclass or a ``module:varname``
reference to one
:param config: keyword arguments passed to the component's constructor
| 4.046843
| 3.969369
| 1.019518
|
for alias in self.component_configs:
if alias not in self.child_components:
self.add_component(alias)
tasks = [component.start(ctx) for component in self.child_components.values()]
if tasks:
await asyncio.gather(*tasks)
|
async def start(self, ctx: Context)
|
Create child components that have been configured but not yet created and then calls their
:meth:`~Component.start` methods in separate tasks and waits until they have completed.
| 4.643358
| 3.331848
| 1.393629
|
def outer_wrapper(func: Callable):
@wraps(func)
def inner_wrapper(*args, **kwargs):
try:
ctx = next(arg for arg in args[:2] if isinstance(arg, Context))
except StopIteration:
raise RuntimeError('the first positional argument to {}() has to be a Context '
'instance'.format(callable_name(func))) from None
executor = ctx.require_resource(Executor, resource_name)
return asyncio_extras.call_in_executor(func, *args, executor=executor, **kwargs)
return inner_wrapper
if isinstance(arg, str):
resource_name = arg
return outer_wrapper
return asyncio_extras.threadpool(arg)
|
def executor(arg: Union[Executor, str, Callable] = None)
|
Decorate a function so that it runs in an :class:`~concurrent.futures.Executor`.
If a resource name is given, the first argument must be a :class:`~.Context`.
Usage::
@executor
def should_run_in_executor():
...
With a resource name::
@executor('resourcename')
def should_run_in_executor(ctx):
...
:param arg: a callable to decorate, an :class:`~concurrent.futures.Executor` instance, the
resource name of one or ``None`` to use the event loop's default executor
:return: the wrapped function
| 4.161759
| 3.693585
| 1.126753
|
@wraps(func)
async def wrapper(*args, **kwargs) -> None:
async def teardown_callback(exception: Optional[Exception]):
try:
await generator.asend(exception)
except StopAsyncIteration:
pass
finally:
await generator.aclose()
try:
ctx = next(arg for arg in args[:2] if isinstance(arg, Context))
except StopIteration:
raise RuntimeError('the first positional argument to {}() has to be a Context '
'instance'.format(callable_name(func))) from None
generator = func(*args, **kwargs)
try:
await generator.asend(None)
except StopAsyncIteration:
pass
except BaseException:
await generator.aclose()
raise
else:
ctx.add_teardown_callback(teardown_callback, True)
if iscoroutinefunction(func):
func = async_generator(func)
elif not isasyncgenfunction(func):
raise TypeError('{} must be an async generator function'.format(callable_name(func)))
return wrapper
|
def context_teardown(func: Callable)
|
Wrap an async generator function to execute the rest of the function at context teardown.
This function returns an async function, which, when called, starts the wrapped async
generator. The wrapped async function is run until the first ``yield`` statement
(``await async_generator.yield_()`` on Python 3.5). When the context is being torn down, the
exception that ended the context, if any, is sent to the generator.
For example::
class SomeComponent(Component):
@context_teardown
async def start(self, ctx: Context):
service = SomeService()
ctx.add_resource(service)
exception = yield
service.stop()
:param func: an async generator function
:return: an async function
| 2.745526
| 2.553587
| 1.075164
|
contexts = []
ctx = self # type: Optional[Context]
while ctx is not None:
contexts.append(ctx)
ctx = ctx.parent
return contexts
|
def context_chain(self) -> List['Context']
|
Return a list of contexts starting from this one, its parent and so on.
| 3.235307
| 2.561376
| 1.263113
|
assert check_argument_types()
self._check_closed()
self._teardown_callbacks.append((callback, pass_exception))
|
def add_teardown_callback(self, callback: Callable, pass_exception: bool = False) -> None
|
Add a callback to be called when this context closes.
This is intended for cleanup of resources, and the list of callbacks is processed in the
reverse order in which they were added, so the last added callback will be called first.
The callback may return an awaitable. If it does, the awaitable is awaited on before
calling any further callbacks.
:param callback: a callable that is called with either no arguments or with the exception
that ended this context, based on the value of ``pass_exception``
:param pass_exception: ``True`` to pass the callback the exception that ended this context
(or ``None`` if the context ended cleanly)
| 5.67509
| 6.827844
| 0.831169
|
self._check_closed()
self._closed = True
exceptions = []
for callback, pass_exception in reversed(self._teardown_callbacks):
try:
retval = callback(exception) if pass_exception else callback()
if isawaitable(retval):
await retval
except Exception as e:
exceptions.append(e)
del self._teardown_callbacks
if exceptions:
raise TeardownError(exceptions)
|
async def close(self, exception: BaseException = None) -> None
|
Close this context and call any necessary resource teardown callbacks.
If a teardown callback returns an awaitable, the return value is awaited on before calling
any further teardown callbacks.
All callbacks will be processed, even if some of them raise exceptions. If at least one
callback raised an error, this method will raise a :exc:`~.TeardownError` at the end.
After this method has been called, resources can no longer be requested or published on
this context.
:param exception: the exception, if any, that caused this context to be closed
:raises .TeardownError: if one or more teardown callbacks raise an exception
| 4.144661
| 3.3111
| 1.251747
|
assert check_argument_types()
self._check_closed()
if isinstance(types, type):
types = (types,)
elif not types:
types = (type(value),)
if value is None:
raise ValueError('"value" must not be None')
if not resource_name_re.fullmatch(name):
raise ValueError('"name" must be a nonempty string consisting only of alphanumeric '
'characters and underscores')
if context_attr and getattr_static(self, context_attr, None) is not None:
raise ResourceConflict('this context already has an attribute {!r}'.format(
context_attr))
for resource_type in types:
if (resource_type, name) in self._resources:
raise ResourceConflict(
'this context already contains a resource of type {} using the name {!r}'.
format(qualified_name(resource_type), name))
resource = ResourceContainer(value, tuple(types), name, context_attr, False)
for type_ in resource.types:
self._resources[(type_, name)] = resource
if context_attr:
setattr(self, context_attr, value)
# Notify listeners that a new resource has been made available
self.resource_added.dispatch(types, name, False)
|
def add_resource(self, value, name: str = 'default', context_attr: str = None,
types: Union[type, Sequence[type]] = ()) -> None
|
Add a resource to this context.
This will cause a ``resource_added`` event to be dispatched.
:param value: the actual resource value
:param name: name of this resource (unique among all its registered types within a single
context)
:param context_attr: name of the context attribute this resource will be accessible as
:param types: type(s) to register the resource as (omit to use the type of ``value``)
:raises asphalt.core.context.ResourceConflict: if the resource conflicts with an existing
one in any way
| 3.305208
| 3.00962
| 1.098214
|
assert check_argument_types()
self._check_closed()
if not resource_name_re.fullmatch(name):
raise ValueError('"name" must be a nonempty string consisting only of alphanumeric '
'characters and underscores')
if iscoroutinefunction(factory_callback):
raise TypeError('"factory_callback" must not be a coroutine function')
if not types:
raise ValueError('"types" must not be empty')
if isinstance(types, type):
resource_types = (types,) # type: Tuple[type, ...]
else:
resource_types = tuple(types)
# Check for a conflicting context attribute
if context_attr in self._resource_factories_by_context_attr:
raise ResourceConflict(
'this context already contains a resource factory for the context attribute {!r}'.
format(context_attr))
# Check for conflicts with existing resource factories
for type_ in resource_types:
if (type_, name) in self._resource_factories:
raise ResourceConflict('this context already contains a resource factory for the '
'type {}'.format(qualified_name(type_)))
# Add the resource factory to the appropriate lookup tables
resource = ResourceContainer(factory_callback, resource_types, name, context_attr, True)
for type_ in resource_types:
self._resource_factories[(type_, name)] = resource
if context_attr:
self._resource_factories_by_context_attr[context_attr] = resource
# Notify listeners that a new resource has been made available
self.resource_added.dispatch(resource_types, name, True)
|
def add_resource_factory(self, factory_callback: factory_callback_type,
types: Union[type, Sequence[Type]], name: str = 'default',
context_attr: str = None) -> None
|
Add a resource factory to this context.
This will cause a ``resource_added`` event to be dispatched.
A resource factory is a callable that generates a "contextual" resource when it is
requested by either using any of the methods :meth:`get_resource`, :meth:`require_resource`
or :meth:`request_resource` or its context attribute is accessed.
When a new resource is created in this manner, it is always bound to the context through
it was requested, regardless of where in the chain the factory itself was added to.
:param factory_callback: a (non-coroutine) callable that takes a context instance as
argument and returns the created resource object
:param types: one or more types to register the generated resource as on the target context
:param name: name of the resource that will be created in the target context
:param context_attr: name of the context attribute the created resource will be accessible
as
:raises asphalt.core.context.ResourceConflict: if there is an existing resource factory for
the given type/name combinations or the given context variable
| 2.81977
| 2.695191
| 1.046223
|
assert check_argument_types()
self._check_closed()
key = (type, name)
# First check if there's already a matching resource in this context
resource = self._resources.get(key)
if resource is not None:
return resource.value_or_factory
# Next, check if there's a resource factory available on the context chain
resource = next((ctx._resource_factories[key] for ctx in self.context_chain
if key in ctx._resource_factories), None)
if resource is not None:
return resource.generate_value(self)
# Finally, check parents for a matching resource
return next((ctx._resources[key].value_or_factory for ctx in self.context_chain
if key in ctx._resources), None)
|
def get_resource(self, type: Type[T_Resource], name: str = 'default') -> Optional[T_Resource]
|
Look up a resource in the chain of contexts.
:param type: type of the requested resource
:param name: name of the requested resource
:return: the requested resource, or ``None`` if none was available
| 3.592915
| 3.587275
| 1.001572
|
assert check_argument_types()
# Collect all the matching resources from this context
resources = {container.name: container.value_or_factory
for container in self._resources.values()
if not container.is_factory and type in container.types
} # type: Dict[str, T_Resource]
# Next, find all matching resource factories in the context chain and generate resources
resources.update({container.name: container.generate_value(self)
for ctx in self.context_chain
for container in ctx._resources.values()
if container.is_factory and type in container.types
and container.name not in resources})
# Finally, add the resource values from the parent contexts
resources.update({container.name: container.value_or_factory
for ctx in self.context_chain[1:]
for container in ctx._resources.values()
if not container.is_factory and type in container.types
and container.name not in resources})
return set(resources.values())
|
def get_resources(self, type: Type[T_Resource]) -> Set[T_Resource]
|
Retrieve all the resources of the given type in this context and its parents.
Any matching resource factories are also triggered if necessary.
:param type: type of the resources to get
:return: a set of all found resources of the given type
| 3.205971
| 3.147249
| 1.018658
|
resource = self.get_resource(type, name)
if resource is None:
raise ResourceNotFound(type, name)
return resource
|
def require_resource(self, type: Type[T_Resource], name: str = 'default') -> T_Resource
|
Look up a resource in the chain of contexts and raise an exception if it is not found.
This is like :meth:`get_resource` except that instead of returning ``None`` when a resource
is not found, it will raise :exc:`~asphalt.core.context.ResourceNotFound`.
:param type: type of the requested resource
:param name: name of the requested resource
:return: the requested resource
:raises asphalt.core.context.ResourceNotFound: if a resource of the given type and name was
not found
| 3.249115
| 4.805256
| 0.676158
|
# First try to locate an existing resource in this context and its parents
value = self.get_resource(type, name)
if value is not None:
return value
# Wait until a matching resource or resource factory is available
signals = [ctx.resource_added for ctx in self.context_chain]
await wait_event(
signals, lambda event: event.resource_name == name and type in event.resource_types)
return self.require_resource(type, name)
|
async def request_resource(self, type: Type[T_Resource], name: str = 'default') -> T_Resource
|
Look up a resource in the chain of contexts.
This is like :meth:`get_resource` except that if the resource is not already available, it
will wait for one to become available.
:param type: type of the requested resource
:param name: name of the requested resource
:return: the requested resource
| 5.98525
| 5.442045
| 1.099816
|
return asyncio_extras.call_async(self.loop, func, *args, **kwargs)
|
def call_async(self, func: Callable, *args, **kwargs)
|
Call the given callable in the event loop thread.
This method lets you call asynchronous code from a worker thread.
Do not use it from within the event loop thread.
If the callable returns an awaitable, it is resolved before returning to the caller.
:param func: a regular function or a coroutine function
:param args: positional arguments to call the callable with
:param kwargs: keyword arguments to call the callable with
:return: the return value of the call
| 8.19594
| 10.774311
| 0.760693
|
assert check_argument_types()
if isinstance(executor, str):
executor = self.require_resource(Executor, executor)
return asyncio_extras.call_in_executor(func, *args, executor=executor, **kwargs)
|
def call_in_executor(self, func: Callable, *args, executor: Union[Executor, str] = None,
**kwargs) -> Awaitable
|
Call the given callable in an executor.
:param func: the callable to call
:param args: positional arguments to call the callable with
:param executor: either an :class:`~concurrent.futures.Executor` instance, the resource
name of one or ``None`` to use the event loop's default executor
:param kwargs: keyword arguments to call the callable with
:return: an awaitable that resolves to the return value of the call
| 5.035954
| 5.200617
| 0.968338
|
assert check_argument_types()
if isinstance(executor, str):
executor = self.require_resource(Executor, executor)
return asyncio_extras.threadpool(executor)
|
def threadpool(self, executor: Union[Executor, str] = None)
|
Return an asynchronous context manager that runs the block in a (thread pool) executor.
:param executor: either an :class:`~concurrent.futures.Executor` instance, the resource
name of one or ``None`` to use the event loop's default executor
:return: an asynchronous context manager
| 8.535048
| 8.77817
| 0.972304
|
@async_generator
async def streamer():
try:
while True:
event = await queue.get()
if filter is None or filter(event):
await yield_(event)
finally:
cleanup()
def cleanup():
nonlocal queue
if queue is not None:
for signal in signals:
signal.disconnect(queue.put_nowait)
queue = None
assert check_argument_types()
queue = Queue(max_queue_size) # type: Queue[T_Event]
for signal in signals:
signal.connect(queue.put_nowait)
gen = [streamer()] # this is to allow the reference count to drop to 0
weakref.finalize(gen[0], cleanup)
return gen.pop()
|
def stream_events(signals: Sequence[Signal], filter: Callable[[T_Event], bool] = None, *,
max_queue_size: int = 0) -> AsyncIterator[T_Event]
|
Return an async generator that yields events from the given signals.
Only events that pass the filter callable (if one has been given) are returned.
If no filter function was given, all events are yielded from the generator.
:param signals: the signals to get events from
:param filter: a callable that takes an event object as an argument and returns ``True`` if
the event should pass, ``False`` if not
:param max_queue_size: maximum size of the queue, after which it will start to drop events
| 3.643099
| 3.970056
| 0.917644
|
if sys.version_info >= (3, 5, 3):
assert check_argument_types()
async with aclosing(stream_events(signals, filter)) as events:
return await events.asend(None)
|
async def wait_event(signals: Sequence['Signal[T_Event]'],
filter: Callable[[T_Event], bool] = None) -> T_Event
|
Wait until any of the given signals dispatches an event that satisfies the filter (if any).
If no filter has been given, the first event dispatched from the signal is returned.
:param signals: the signals to get events from
:param filter: a callable that takes an event object as an argument and returns ``True`` if
the event should pass, ``False`` if not
:return: the event that was dispatched
| 5.65603
| 6.80205
| 0.831518
|
assert check_argument_types()
if self.listeners is None:
self.listeners = []
if callback not in self.listeners:
self.listeners.append(callback)
return callback
|
def connect(self, callback: Callable[[T_Event], Any]) -> Callable[[T_Event], Any]
|
Connect a callback to this signal.
Each callable can only be connected once. Duplicate registrations are ignored.
If you need to pass extra arguments to the callback, you can use :func:`functools.partial`
to wrap the callable.
:param callback: a callable that will receive an event object as its only argument.
:return: the value of ``callback`` argument
| 3.470501
| 4.391695
| 0.790242
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.