sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def rdlevenshtein_norm(source, target):
"""Calculates the normalized restricted Damerau-Levenshtein distance
(a.k.a. the normalized optimal string alignment distance) between two
string arguments. The result will be a float in the range [0.0, 1.0], with
1.0 signifying the maximum distance between strings with these lengths
"""
# Compute restricted Damerau-Levenshtein distance using helper function.
# The max is always just the length of the longer string, so this is used
# to normalize result before returning it
distance = _levenshtein_compute(source, target, True)
return float(distance) / max(len(source), len(target))
|
Calculates the normalized restricted Damerau-Levenshtein distance
(a.k.a. the normalized optimal string alignment distance) between two
string arguments. The result will be a float in the range [0.0, 1.0], with
1.0 signifying the maximum distance between strings with these lengths
|
entailment
|
def _levenshtein_compute(source, target, rd_flag):
"""Computes the Levenshtein
(https://en.wikipedia.org/wiki/Levenshtein_distance)
and restricted Damerau-Levenshtein
(https://en.wikipedia.org/wiki/Damerau%E2%80%93Levenshtein_distance)
distances between two Unicode strings with given lengths using the
Wagner-Fischer algorithm
(https://en.wikipedia.org/wiki/Wagner%E2%80%93Fischer_algorithm).
These distances are defined recursively, since the distance between two
strings is just the cost of adjusting the last one or two characters plus
the distance between the prefixes that exclude these characters (e.g. the
distance between "tester" and "tested" is 1 + the distance between "teste"
and "teste"). The Wagner-Fischer algorithm retains this idea but eliminates
redundant computations by storing the distances between various prefixes in
a matrix that is filled in iteratively.
"""
# Create matrix of correct size (this is s_len + 1 * t_len + 1 so that the
# empty prefixes "" can also be included). The leftmost column represents
# transforming various source prefixes into an empty string, which can
# always be done by deleting all characters in the respective prefix, and
# the top row represents transforming the empty string into various target
# prefixes, which can always be done by inserting every character in the
# respective prefix. The ternary used to build the list should ensure that
# this row and column are now filled correctly
s_range = range(len(source) + 1)
t_range = range(len(target) + 1)
matrix = [[(i if j == 0 else j) for j in t_range] for i in s_range]
# Iterate through rest of matrix, filling it in with Levenshtein
# distances for the remaining prefix combinations
for i in s_range[1:]:
for j in t_range[1:]:
# Applies the recursive logic outlined above using the values
# stored in the matrix so far. The options for the last pair of
# characters are deletion, insertion, and substitution, which
# amount to dropping the source character, the target character,
# or both and then calculating the distance for the resulting
# prefix combo. If the characters at this point are the same, the
# situation can be thought of as a free substitution
del_dist = matrix[i - 1][j] + 1
ins_dist = matrix[i][j - 1] + 1
sub_trans_cost = 0 if source[i - 1] == target[j - 1] else 1
sub_dist = matrix[i - 1][j - 1] + sub_trans_cost
# Choose option that produces smallest distance
matrix[i][j] = min(del_dist, ins_dist, sub_dist)
# If restricted Damerau-Levenshtein was requested via the flag,
# then there may be a fourth option: transposing the current and
# previous characters in the source string. This can be thought of
# as a double substitution and has a similar free case, where the
# current and preceeding character in both strings is the same
if rd_flag and i > 1 and j > 1 and source[i - 1] == target[j - 2] \
and source[i - 2] == target[j - 1]:
trans_dist = matrix[i - 2][j - 2] + sub_trans_cost
matrix[i][j] = min(matrix[i][j], trans_dist)
# At this point, the matrix is full, and the biggest prefixes are just the
# strings themselves, so this is the desired distance
return matrix[len(source)][len(target)]
|
Computes the Levenshtein
(https://en.wikipedia.org/wiki/Levenshtein_distance)
and restricted Damerau-Levenshtein
(https://en.wikipedia.org/wiki/Damerau%E2%80%93Levenshtein_distance)
distances between two Unicode strings with given lengths using the
Wagner-Fischer algorithm
(https://en.wikipedia.org/wiki/Wagner%E2%80%93Fischer_algorithm).
These distances are defined recursively, since the distance between two
strings is just the cost of adjusting the last one or two characters plus
the distance between the prefixes that exclude these characters (e.g. the
distance between "tester" and "tested" is 1 + the distance between "teste"
and "teste"). The Wagner-Fischer algorithm retains this idea but eliminates
redundant computations by storing the distances between various prefixes in
a matrix that is filled in iteratively.
|
entailment
|
def main():
"""The main entry point."""
if sys.version_info < (2, 7):
sys.exit('crispy requires at least Python 2.7')
elif sys.version_info[0] == 3 and sys.version_info < (3, 4):
sys.exit('crispy requires at least Python 3.4')
kwargs = dict(
name='crispy',
version=get_version(),
description='Core-Level Spectroscopy Simulations in Python',
long_description=get_readme(),
license='MIT',
author='Marius Retegan',
author_email='marius.retegan@esrf.eu',
url='https://github.com/mretegan/crispy',
download_url='https://github.com/mretegan/crispy/releases',
keywords='gui, spectroscopy, simulation, synchrotron, science',
install_requires=get_requirements(),
platforms=[
'MacOS :: MacOS X',
'Microsoft :: Windows',
'POSIX :: Linux',
],
packages=[
'crispy',
'crispy.gui',
'crispy.gui.uis',
'crispy.gui.icons',
'crispy.modules',
'crispy.modules.quanty',
'crispy.modules.orca',
'crispy.utils',
],
package_data={
'crispy.gui.uis': [
'*.ui',
'quanty/*.ui',
],
'crispy.gui.icons': [
'*.svg',
],
'crispy.modules.quanty': [
'parameters/*.json.gz',
'templates/*.lua',
],
},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: X11 Applications :: Qt',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering :: Visualization',
]
)
# At the moment pip/setuptools doesn't play nice with shebang paths
# containing white spaces.
# See: https://github.com/pypa/pip/issues/2783
# https://github.com/xonsh/xonsh/issues/879
# The most straight forward workaround is to have a .bat script to run
# crispy on Windows.
if 'win32' in sys.platform:
kwargs['scripts'] = ['scripts/crispy.bat']
else:
kwargs['scripts'] = ['scripts/crispy']
setup(**kwargs)
|
The main entry point.
|
entailment
|
def loadFromDisk(self, calculation):
"""
Read the spectra from the files generated by Quanty and store them
as a list of spectum objects.
"""
suffixes = {
'Isotropic': 'iso',
'Circular Dichroism (R-L)': 'cd',
'Right Polarized (R)': 'r',
'Left Polarized (L)': 'l',
'Linear Dichroism (V-H)': 'ld',
'Vertical Polarized (V)': 'v',
'Horizontal Polarized (H)': 'h',
}
self.raw = list()
for spectrumName in self.toPlot:
suffix = suffixes[spectrumName]
path = '{}_{}.spec'.format(calculation.baseName, suffix)
try:
data = np.loadtxt(path, skiprows=5)
except (OSError, IOError) as e:
raise e
rows, columns = data.shape
if calculation.experiment in ['XAS', 'XPS', 'XES']:
xMin = calculation.xMin
xMax = calculation.xMax
xNPoints = calculation.xNPoints
if calculation.experiment == 'XES':
x = np.linspace(xMin, xMax, xNPoints + 1)
x = x[::-1]
y = data[:, 2]
y = y / np.abs(y.max())
else:
x = np.linspace(xMin, xMax, xNPoints + 1)
y = data[:, 2::2].flatten()
spectrum = Spectrum1D(x, y)
spectrum.name = spectrumName
if len(suffix) > 2:
spectrum.shortName = suffix.title()
else:
spectrum.shortName = suffix.upper()
if calculation.experiment in ['XAS', ]:
spectrum.xLabel = 'Absorption Energy (eV)'
elif calculation.experiment in ['XPS', ]:
spectrum.xLabel = 'Binding Energy (eV)'
elif calculation.experiment in ['XES', ]:
spectrum.xLabel = 'Emission Energy (eV)'
spectrum.yLabel = 'Intensity (a.u.)'
self.broadenings = {'gaussian': (calculation.xGaussian, ), }
else:
xMin = calculation.xMin
xMax = calculation.xMax
xNPoints = calculation.xNPoints
yMin = calculation.yMin
yMax = calculation.yMax
yNPoints = calculation.yNPoints
x = np.linspace(xMin, xMax, xNPoints + 1)
y = np.linspace(yMin, yMax, yNPoints + 1)
z = data[:, 2::2]
spectrum = Spectrum2D(x, y, z)
spectrum.name = spectrumName
if len(suffix) > 2:
spectrum.shortName = suffix.title()
else:
spectrum.shortName = suffix.upper()
spectrum.xLabel = 'Incident Energy (eV)'
spectrum.yLabel = 'Energy Transfer (eV)'
self.broadenings = {'gaussian': (calculation.xGaussian,
calculation.yGaussian), }
self.raw.append(spectrum)
# Process the spectra once they where read from disk.
self.process()
|
Read the spectra from the files generated by Quanty and store them
as a list of spectum objects.
|
entailment
|
def populateWidget(self):
"""
Populate the widget using data stored in the state
object. The order in which the individual widgets are populated
follows their arrangment.
The models are recreated every time the function is called.
This might seem to be an overkill, but in practice it is very fast.
Don't try to move the model creation outside this function; is not
worth the effort, and there is nothing to gain from it.
"""
self.elementComboBox.setItems(self.state._elements, self.state.element)
self.chargeComboBox.setItems(self.state._charges, self.state.charge)
self.symmetryComboBox.setItems(
self.state._symmetries, self.state.symmetry)
self.experimentComboBox.setItems(
self.state._experiments, self.state.experiment)
self.edgeComboBox.setItems(self.state._edges, self.state.edge)
self.temperatureLineEdit.setValue(self.state.temperature)
self.magneticFieldLineEdit.setValue(self.state.magneticField)
self.axesTabWidget.setTabText(0, str(self.state.xLabel))
self.xMinLineEdit.setValue(self.state.xMin)
self.xMaxLineEdit.setValue(self.state.xMax)
self.xNPointsLineEdit.setValue(self.state.xNPoints)
self.xLorentzianLineEdit.setList(self.state.xLorentzian)
self.xGaussianLineEdit.setValue(self.state.xGaussian)
self.k1LineEdit.setVector(self.state.k1)
self.eps11LineEdit.setVector(self.state.eps11)
self.eps12LineEdit.setVector(self.state.eps12)
if self.state.experiment in ['RIXS', ]:
if self.axesTabWidget.count() == 1:
tab = self.axesTabWidget.findChild(QWidget, 'yTab')
self.axesTabWidget.addTab(tab, tab.objectName())
self.axesTabWidget.setTabText(1, self.state.yLabel)
self.yMinLineEdit.setValue(self.state.yMin)
self.yMaxLineEdit.setValue(self.state.yMax)
self.yNPointsLineEdit.setValue(self.state.yNPoints)
self.yLorentzianLineEdit.setList(self.state.yLorentzian)
self.yGaussianLineEdit.setValue(self.state.yGaussian)
self.k2LineEdit.setVector(self.state.k2)
self.eps21LineEdit.setVector(self.state.eps21)
self.eps22LineEdit.setVector(self.state.eps22)
text = self.eps11Label.text()
text = re.sub('>[vσ]', '>σ', text)
self.eps11Label.setText(text)
text = self.eps12Label.text()
text = re.sub('>[hπ]', '>π', text)
self.eps12Label.setText(text)
else:
self.axesTabWidget.removeTab(1)
text = self.eps11Label.text()
text = re.sub('>[vσ]', '>v', text)
self.eps11Label.setText(text)
text = self.eps12Label.text()
text = re.sub('>[hπ]', '>h', text)
self.eps12Label.setText(text)
# Create the spectra selection model.
self.spectraModel = SpectraModel(parent=self)
self.spectraModel.setModelData(
self.state.spectra.toCalculate,
self.state.spectra.toCalculateChecked)
self.spectraModel.checkStateChanged.connect(
self.updateSpectraCheckState)
self.spectraListView.setModel(self.spectraModel)
self.spectraListView.selectionModel().setCurrentIndex(
self.spectraModel.index(0, 0), QItemSelectionModel.Select)
self.fkLineEdit.setValue(self.state.fk)
self.gkLineEdit.setValue(self.state.gk)
self.zetaLineEdit.setValue(self.state.zeta)
# Create the Hamiltonian model.
self.hamiltonianModel = HamiltonianModel(parent=self)
self.hamiltonianModel.setModelData(self.state.hamiltonianData)
self.hamiltonianModel.setNodesCheckState(self.state.hamiltonianState)
if self.syncParametersCheckBox.isChecked():
self.hamiltonianModel.setSyncState(True)
else:
self.hamiltonianModel.setSyncState(False)
self.hamiltonianModel.dataChanged.connect(self.updateHamiltonianData)
self.hamiltonianModel.itemCheckStateChanged.connect(
self.updateHamiltonianNodeCheckState)
# Assign the Hamiltonian model to the Hamiltonian terms view.
self.hamiltonianTermsView.setModel(self.hamiltonianModel)
self.hamiltonianTermsView.selectionModel().setCurrentIndex(
self.hamiltonianModel.index(0, 0), QItemSelectionModel.Select)
self.hamiltonianTermsView.selectionModel().selectionChanged.connect(
self.selectedHamiltonianTermChanged)
# Assign the Hamiltonian model to the Hamiltonian parameters view.
self.hamiltonianParametersView.setModel(self.hamiltonianModel)
self.hamiltonianParametersView.expandAll()
self.hamiltonianParametersView.resizeAllColumnsToContents()
self.hamiltonianParametersView.setColumnWidth(0, 130)
self.hamiltonianParametersView.setRootIndex(
self.hamiltonianTermsView.currentIndex())
self.nPsisLineEdit.setValue(self.state.nPsis)
self.nPsisAutoCheckBox.setChecked(self.state.nPsisAuto)
self.nConfigurationsLineEdit.setValue(self.state.nConfigurations)
self.nConfigurationsLineEdit.setEnabled(False)
name = '{}-Ligands Hybridization'.format(self.state.block)
for termName in self.state.hamiltonianData:
if name in termName:
termState = self.state.hamiltonianState[termName]
if termState == 0:
continue
else:
self.nConfigurationsLineEdit.setEnabled(True)
if not hasattr(self, 'resultsModel'):
# Create the results model.
self.resultsModel = ResultsModel(parent=self)
self.resultsModel.itemNameChanged.connect(
self.updateCalculationName)
self.resultsModel.itemCheckStateChanged.connect(
self.updatePlotWidget)
self.resultsModel.dataChanged.connect(self.updatePlotWidget)
self.resultsModel.dataChanged.connect(self.updateResultsView)
# Assign the results model to the results view.
self.resultsView.setModel(self.resultsModel)
self.resultsView.selectionModel().selectionChanged.connect(
self.selectedResultsChanged)
self.resultsView.resizeColumnsToContents()
self.resultsView.horizontalHeader().setSectionsMovable(False)
self.resultsView.horizontalHeader().setSectionsClickable(False)
if sys.platform == 'darwin':
self.resultsView.horizontalHeader().setMaximumHeight(17)
# Add a context menu to the view.
self.resultsView.setContextMenuPolicy(Qt.CustomContextMenu)
self.resultsView.customContextMenuRequested[QPoint].connect(
self.showResultsContextMenu)
if not hasattr(self, 'resultDetailsDialog'):
self.resultDetailsDialog = QuantyResultDetailsDialog(parent=self)
self.updateMainWindowTitle(self.state.baseName)
|
Populate the widget using data stored in the state
object. The order in which the individual widgets are populated
follows their arrangment.
The models are recreated every time the function is called.
This might seem to be an overkill, but in practice it is very fast.
Don't try to move the model creation outside this function; is not
worth the effort, and there is nothing to gain from it.
|
entailment
|
def updateResultsView(self, index):
"""
Update the selection to contain only the result specified by
the index. This should be the last index of the model. Finally updade
the context menu.
The selectionChanged signal is used to trigger the update of
the Quanty dock widget and result details dialog.
:param index: Index of the last item of the model.
:type index: QModelIndex
"""
flags = (QItemSelectionModel.Clear | QItemSelectionModel.Rows |
QItemSelectionModel.Select)
self.resultsView.selectionModel().select(index, flags)
self.resultsView.resizeColumnsToContents()
self.resultsView.setFocus()
|
Update the selection to contain only the result specified by
the index. This should be the last index of the model. Finally updade
the context menu.
The selectionChanged signal is used to trigger the update of
the Quanty dock widget and result details dialog.
:param index: Index of the last item of the model.
:type index: QModelIndex
|
entailment
|
def updatePlotWidget(self):
"""Updating the plotting widget should not require any information
about the current state of the widget."""
pw = self.getPlotWidget()
pw.reset()
results = self.resultsModel.getCheckedItems()
for result in results:
if isinstance(result, ExperimentalData):
spectrum = result.spectra['Expt']
spectrum.legend = '{}-{}'.format(result.index, 'Expt')
spectrum.xLabel = 'X'
spectrum.yLabel = 'Y'
spectrum.plot(plotWidget=pw)
else:
if len(results) > 1 and result.experiment in ['RIXS', ]:
continue
for spectrum in result.spectra.processed:
spectrum.legend = '{}-{}'.format(
result.index, spectrum.shortName)
if spectrum.name in result.spectra.toPlotChecked:
spectrum.plot(plotWidget=pw)
|
Updating the plotting widget should not require any information
about the current state of the widget.
|
entailment
|
def row(self):
"""Return the row of the child."""
if self.parent is not None:
children = self.parent.getChildren()
# The index method of the list object.
return children.index(self)
else:
return 0
|
Return the row of the child.
|
entailment
|
def index(self, row, column, parent=QModelIndex()):
"""Return the index of the item in the model specified by the
given row, column, and parent index.
"""
if parent is not None and not parent.isValid():
parentItem = self.rootItem
else:
parentItem = self.item(parent)
childItem = parentItem.child(row)
if childItem:
index = self.createIndex(row, column, childItem)
else:
index = QModelIndex()
return index
|
Return the index of the item in the model specified by the
given row, column, and parent index.
|
entailment
|
def parent(self, index):
"""Return the index of the parent for a given index of the
child. Unfortunately, the name of the method has to be parent,
even though a more verbose name like parentIndex, would avoid
confusion about what parent actually is - an index or an item.
"""
childItem = self.item(index)
parentItem = childItem.parent
if parentItem == self.rootItem:
parentIndex = QModelIndex()
else:
parentIndex = self.createIndex(parentItem.row(), 0, parentItem)
return parentIndex
|
Return the index of the parent for a given index of the
child. Unfortunately, the name of the method has to be parent,
even though a more verbose name like parentIndex, would avoid
confusion about what parent actually is - an index or an item.
|
entailment
|
def rowCount(self, parentIndex):
"""Return the number of rows under the given parent. When the
parentIndex is valid, rowCount() returns the number of children
of the parent. For this it uses item() method to extract the
parentItem from the parentIndex, and calls the childCount() of
the item to get number of children.
"""
if parentIndex.column() > 0:
return 0
if not parentIndex.isValid():
parentItem = self.rootItem
else:
parentItem = self.item(parentIndex)
return parentItem.childCount()
|
Return the number of rows under the given parent. When the
parentIndex is valid, rowCount() returns the number of children
of the parent. For this it uses item() method to extract the
parentItem from the parentIndex, and calls the childCount() of
the item to get number of children.
|
entailment
|
def data(self, index, role):
"""Return role specific data for the item referred by
index.column()."""
if not index.isValid():
return
item = self.item(index)
column = index.column()
value = item.getItemData(column)
if role == Qt.DisplayRole:
try:
if column == 1:
# Display small values using scientific notation.
if abs(float(value)) < 1e-3 and float(value) != 0.0:
return '{0:8.1e}'.format(value)
else:
return '{0:8.3f}'.format(value)
else:
return '{0:8.2f}'.format(value)
except ValueError:
return value
elif role == Qt.EditRole:
try:
value = float(value)
if abs(value) < 1e-3 and value != 0.0:
return str('{0:8.1e}'.format(value))
else:
return str('{0:8.3f}'.format(value))
except ValueError:
return str(value)
elif role == Qt.CheckStateRole:
if item.parent == self.rootItem and column == 0:
return item.getCheckState()
elif role == Qt.TextAlignmentRole:
if column > 0:
return Qt.AlignRight
|
Return role specific data for the item referred by
index.column().
|
entailment
|
def setData(self, index, value, role):
"""Set the role data for the item at index to value."""
if not index.isValid():
return False
item = self.item(index)
column = index.column()
if role == Qt.EditRole:
items = list()
items.append(item)
if self.sync:
parentIndex = self.parent(index)
# Iterate over the siblings of the parent index.
for sibling in self.siblings(parentIndex):
siblingNode = self.item(sibling)
for child in siblingNode.children:
if child.getItemData(0) == item.getItemData(0):
items.append(child)
for item in items:
columnData = str(item.getItemData(column))
if columnData and columnData != value:
try:
item.setItemData(column, float(value))
except ValueError:
return False
else:
return False
elif role == Qt.CheckStateRole:
item.setCheckState(value)
if value == Qt.Unchecked or value == Qt.Checked:
state = value
self.itemCheckStateChanged.emit(index, state)
self.dataChanged.emit(index, index)
return True
|
Set the role data for the item at index to value.
|
entailment
|
def flags(self, index):
"""Return the active flags for the given index. Add editable
flag to items other than the first column.
"""
activeFlags = (Qt.ItemIsEnabled | Qt.ItemIsSelectable |
Qt.ItemIsUserCheckable)
item = self.item(index)
column = index.column()
if column > 0 and not item.childCount():
activeFlags = activeFlags | Qt.ItemIsEditable
return activeFlags
|
Return the active flags for the given index. Add editable
flag to items other than the first column.
|
entailment
|
def _getModelData(self, modelData, parentItem=None):
"""Return the data contained in the model."""
if parentItem is None:
parentItem = self.rootItem
for item in parentItem.getChildren():
key = item.getItemData(0)
if item.childCount():
modelData[key] = odict()
self._getModelData(modelData[key], item)
else:
if isinstance(item.getItemData(2), float):
modelData[key] = [item.getItemData(1), item.getItemData(2)]
else:
modelData[key] = item.getItemData(1)
|
Return the data contained in the model.
|
entailment
|
def getNodesCheckState(self, parentItem=None):
"""Return the check state (disabled, tristate, enable) of all items
belonging to a parent.
"""
if parentItem is None:
parentItem = self.rootItem
checkStates = odict()
children = parentItem.getChildren()
for child in children:
checkStates[child.itemData[0]] = child.getCheckState()
return checkStates
|
Return the check state (disabled, tristate, enable) of all items
belonging to a parent.
|
entailment
|
def calc_hexversion(major=0, minor=0, micro=0, releaselevel='dev', serial=0):
"""Calculate the hexadecimal version number from the tuple version_info:
:param major: integer
:param minor: integer
:param micro: integer
:param relev: integer or string
:param serial: integer
:return: integerm always increasing with revision numbers
"""
try:
releaselevel = int(releaselevel)
except ValueError:
releaselevel = RELEASE_LEVEL_VALUE.get(releaselevel, 0)
hex_version = int(serial)
hex_version |= releaselevel * 1 << 4
hex_version |= int(micro) * 1 << 8
hex_version |= int(minor) * 1 << 16
hex_version |= int(major) * 1 << 24
return hex_version
|
Calculate the hexadecimal version number from the tuple version_info:
:param major: integer
:param minor: integer
:param micro: integer
:param relev: integer or string
:param serial: integer
:return: integerm always increasing with revision numbers
|
entailment
|
def _contextMenu(self, pos):
"""Handle plot area customContextMenuRequested signal.
:param QPoint pos: Mouse position relative to plot area
"""
# Create the context menu.
menu = QMenu(self)
menu.addAction(self._zoomBackAction)
# Displaying the context menu at the mouse position requires
# a global position.
# The position received as argument is relative to PlotWidget's
# plot area, and thus needs to be converted.
plotArea = self.getWidgetHandle()
globalPosition = plotArea.mapToGlobal(pos)
menu.exec_(globalPosition)
|
Handle plot area customContextMenuRequested signal.
:param QPoint pos: Mouse position relative to plot area
|
entailment
|
def convolve_fft(array, kernel):
"""
Convolve an array with a kernel using FFT.
Implemntation based on the convolve_fft function from astropy.
https://github.com/astropy/astropy/blob/master/astropy/convolution/convolve.py
"""
array = np.asarray(array, dtype=np.complex)
kernel = np.asarray(kernel, dtype=np.complex)
if array.ndim != kernel.ndim:
raise ValueError("Image and kernel must have same number of "
"dimensions")
array_shape = array.shape
kernel_shape = kernel.shape
new_shape = np.array(array_shape) + np.array(kernel_shape)
array_slices = []
kernel_slices = []
for (new_dimsize, array_dimsize, kernel_dimsize) in zip(
new_shape, array_shape, kernel_shape):
center = new_dimsize - (new_dimsize + 1) // 2
array_slices += [slice(center - array_dimsize // 2,
center + (array_dimsize + 1) // 2)]
kernel_slices += [slice(center - kernel_dimsize // 2,
center + (kernel_dimsize + 1) // 2)]
array_slices = tuple(array_slices)
kernel_slices = tuple(kernel_slices)
if not np.all(new_shape == array_shape):
big_array = np.zeros(new_shape, dtype=np.complex)
big_array[array_slices] = array
else:
big_array = array
if not np.all(new_shape == kernel_shape):
big_kernel = np.zeros(new_shape, dtype=np.complex)
big_kernel[kernel_slices] = kernel
else:
big_kernel = kernel
array_fft = np.fft.fftn(big_array)
kernel_fft = np.fft.fftn(np.fft.ifftshift(big_kernel))
rifft = np.fft.ifftn(array_fft * kernel_fft)
return rifft[array_slices].real
|
Convolve an array with a kernel using FFT.
Implemntation based on the convolve_fft function from astropy.
https://github.com/astropy/astropy/blob/master/astropy/convolution/convolve.py
|
entailment
|
def diagonalize(self):
'''Diagonalize the tensor.'''
self.eigvals, self.eigvecs = np.linalg.eig(
(self.tensor.transpose() + self.tensor) / 2.0)
self.eigvals = np.diag(np.dot(
np.dot(self.eigvecs.transpose(), self.tensor), self.eigvecs))
|
Diagonalize the tensor.
|
entailment
|
def euler_angles_and_eigenframes(self):
'''Calculate the Euler angles only if the rotation matrix
(eigenframe) has positive determinant.'''
signs = np.array([[1, 1, 1], [-1, 1, 1], [1, -1, 1],
[1, 1, -1], [-1, -1, 1], [-1, 1, -1],
[1, -1, -1], [-1, -1, -1]])
eulangs = []
eigframes = []
for i, sign in enumerate(signs):
eigframe = np.dot(self.eigvecs, np.diag(sign))
if np.linalg.det(eigframe) > 1e-4:
eigframes.append(np.array(eigframe))
eulangs.append(np.array(
transformations.euler_from_matrix(eigframe, axes='szyz')))
self.eigframes = np.array(eigframes)
# The sign has to be inverted to be consistent with ORCA and EasySpin.
self.eulangs = -np.array(eulangs)
|
Calculate the Euler angles only if the rotation matrix
(eigenframe) has positive determinant.
|
entailment
|
def _skip_lines(self, n):
'''Skip a number of lines from the output.'''
for i in range(n):
self.line = next(self.output)
return self.line
|
Skip a number of lines from the output.
|
entailment
|
def _parse_tensor(self, indices=False):
'''Parse a tensor.'''
if indices:
self.line = self._skip_lines(1)
tensor = np.zeros((3, 3))
for i in range(3):
tokens = self.line.split()
if indices:
tensor[i][0] = float(tokens[1])
tensor[i][1] = float(tokens[2])
tensor[i][2] = float(tokens[3])
else:
tensor[i][0] = float(tokens[0])
tensor[i][1] = float(tokens[1])
tensor[i][2] = float(tokens[2])
self.line = self._skip_lines(1)
return tensor
|
Parse a tensor.
|
entailment
|
def parse(self):
'''Iterate over the lines and extract the required data.'''
for self.line in self.output:
# Parse general data: charge, multiplicity, coordinates, etc.
self.index = 0
if self.line[1:13] == 'Total Charge':
tokens = self.line.split()
self.charge = int(tokens[-1])
if (self.line[1:13] or self.line[0:12]) == 'Multiplicity':
tokens = self.line.split()
self.multiplicity = int(tokens[-1])
if self.line[0:33] == 'CARTESIAN COORDINATES (ANGSTROEM)':
if not hasattr(self, 'names'):
self.names = dict()
if not hasattr(self, 'coords'):
self.coords = dict()
self.line = self._skip_lines(2)
names = list()
coords = list()
while self.line.strip():
tokens = self.line.split()
names.append(tokens[0])
x = float(tokens[1])
y = float(tokens[2])
z = float(tokens[3])
coords.append((x, y, z))
self.line = next(self.output)
self.names = np.array(names)
self.coords[self.index] = np.array(coords)
if self.line[22:50] == 'MULLIKEN POPULATION ANALYSIS':
if not hasattr(self, 'populations'):
self.populations = dict()
self.line = self._skip_lines(6)
populations = list()
while self.line.strip() and 'Sum' not in self.line:
tokens = self.line.split()
populations.append((float(tokens[-2]), float(tokens[-1])))
self.line = next(self.output)
self.populations['mulliken'][self.index] = np.array(populations) # noqa
# Parse data from the EPR/NMR module
if self.line[37:44] == 'EPR/NMR':
self.eprnmr = dict()
if self.line[0:19] == 'ELECTRONIC G-MATRIX':
self.line = self._skip_lines(4)
self.eprnmr['g']['tensor'] = self._parse_tensor()
if self.line[0:27] == 'ZERO-FIELD-SPLITTING TENSOR':
self.line = self._skip_lines(4)
self.eprnmr['zfs']['tensor'] = self._parse_tensor()
if self.line[1:8] == 'Nucleus':
tokens = self.line.split()
nucleus = int(re.findall(r'\d+', tokens[1])[0])
while 'Raw HFC' not in self.line:
self.line = self._skip_lines(1)
self.line = self._skip_lines(2)
self.eprnmr['hfc'][nucleus]['tensor'] = self._parse_tensor()
self.line = self._skip_lines(1)
self.eprnmr['hfc'][nucleus]['fc'] = self._parse_components()
self.eprnmr['hfc'][nucleus]['sd'] = self._parse_components()
self.line = self._skip_lines(1)
self.eprnmr['hfc'][nucleus]['orb'] = self._parse_components()
self.eprnmr['hfc'][nucleus]['dia'] = self._parse_components()
# Parse data from the MRCI module
if self.line[36:43] == 'M R C I':
self.mrci = dict()
if self.line[1:19] == 'SPIN-SPIN COUPLING':
self.line = self._skip_lines(4)
self.mrci['zfs']['ssc']['tensor'] = self._parse_tensor()
if self.line[1:30] == '2ND ORDER SPIN-ORBIT COUPLING':
while 'Second' not in self.line:
self.line = self._skip_lines(1)
self.line = self._skip_lines(1)
self.mrci['zfs']['soc']['second_order']['0']['tensor'] = self._parse_tensor() # noqa
self.line = self._skip_lines(2)
self.mrci['zfs']['soc']['second_order']['m']['tensor'] = self._parse_tensor() # noqa
self.line = self._skip_lines(2)
self.mrci['zfs']['soc']['second_order']['p']['tensor'] = self._parse_tensor() # noqa
if self.line[1:42] == 'EFFECTIVE HAMILTONIAN SPIN-ORBIT COUPLING':
self.line = self._skip_lines(4)
self.mrci['zfs']['soc']['heff']['tensor'] = self._parse_tensor()
|
Iterate over the lines and extract the required data.
|
entailment
|
def __validate(self, target, value, oldvalue, initiator):
""" Method executed when the event 'set' is triggered.
:param target: Object triggered
:param value: New value
:param oldvalue: Previous value
:param initiator: Column modified
:return: :raise ValidateError:
"""
if value == oldvalue:
return value
if self.allow_null and value is None:
return value
if self.check_value(value):
return value
else:
if self.throw_exception:
if self.message:
self.message = self.message.format(
field=self.field, new_value=value, old_value=oldvalue, key=initiator.key)
raise ValidateError(self.message)
else:
raise ValidateError('Value %s from column %s is not valid' % (value, initiator.key))
return oldvalue
|
Method executed when the event 'set' is triggered.
:param target: Object triggered
:param value: New value
:param oldvalue: Previous value
:param initiator: Column modified
:return: :raise ValidateError:
|
entailment
|
def __create_event(self):
""" Create an SQLAlchemy event listening the 'set' in a particular column.
:rtype : object
"""
if not event.contains(self.field, 'set', self.__validate):
event.listen(self.field, 'set', self.__validate, retval=True)
|
Create an SQLAlchemy event listening the 'set' in a particular column.
:rtype : object
|
entailment
|
def stop(self):
""" Remove the listener to stop the validation
"""
if event.contains(self.field, 'set', self.__validate):
event.remove(self.field, 'set', self.__validate)
|
Remove the listener to stop the validation
|
entailment
|
def start(self):
""" Restart the listener
"""
if not event.contains(self.field, 'set', self.__validate):
self.__create_event()
|
Restart the listener
|
entailment
|
def nhapDaiHan(self, cucSo, gioiTinh):
"""Nhap dai han
Args:
cucSo (TYPE): Description
gioiTinh (TYPE): Description
Returns:
TYPE: Description
"""
for cung in self.thapNhiCung:
khoangCach = khoangCachCung(cung.cungSo, self.cungMenh, gioiTinh)
cung.daiHan(cucSo + khoangCach * 10)
return self
|
Nhap dai han
Args:
cucSo (TYPE): Description
gioiTinh (TYPE): Description
Returns:
TYPE: Description
|
entailment
|
def solve_gfl(data, edges=None, weights=None,
minlam=0.2, maxlam=1000.0, numlam=30,
alpha=0.2, inflate=2., converge=1e-6,
maxsteps=1000000, lam=None, verbose=0,
missing_val=None, full_path=False,
loss='normal'):
'''A very easy-to-use version of GFL solver that just requires the data and
the edges.'''
#Fix no edge cases
if edges.shape[0] < 1:
return data
#Keep initial edges
init_edges = edges
if verbose:
print('Decomposing graph into trails')
if loss == 'binomial':
flat_data = data[0].flatten()
nonmissing_flat_data = flat_data, data[1].flatten()
else:
flat_data = data.flatten()
nonmissing_flat_data = flat_data
if edges is None:
if loss == 'binomial':
if verbose:
print('Using default edge set of a grid of same shape as the data: {0}'.format(data[0].shape))
edges = hypercube_edges(data[0].shape)
else:
if verbose:
print('Using default edge set of a grid of same shape as the data: {0}'.format(data.shape))
edges = hypercube_edges(data.shape)
if missing_val is not None:
if verbose:
print('Removing all data points whose data value is {0}'.format(missing_val))
edges = [(e1,e2) for (e1,e2) in edges if flat_data[e1] != missing_val and flat_data[e2] != missing_val]
if loss == 'binomial':
nonmissing_flat_data = flat_data[flat_data != missing_val], nonmissing_flat_data[1][flat_data != missing_val]
else:
nonmissing_flat_data = flat_data[flat_data != missing_val]
########### Setup the graph
g = Graph()
g.add_edges_from(edges)
chains = decompose_graph(g, heuristic='greedy')
ntrails, trails, breakpoints, edges = chains_to_trails(chains)
if verbose:
print('Setting up trail solver')
########### Setup the solver
if loss == 'normal':
solver = TrailSolver(alpha, inflate, maxsteps, converge)
elif loss == 'logistic':
solver = LogisticTrailSolver(alpha, inflate, maxsteps, converge)
elif loss == 'binomial':
solver = BinomialTrailSolver(alpha, inflate, maxsteps, converge)
else:
raise NotImplementedError('Loss must be normal, logistic, or binomial')
# Set the data and pre-cache any necessary structures
solver.set_data(nonmissing_flat_data, edges, ntrails, trails, breakpoints, weights=weights)
if verbose:
print('Solving')
########### Run the solver
if lam:
# Fixed lambda
beta = solver.solve(lam)
else:
# Grid search to find the best lambda
beta = solver.solution_path(minlam, maxlam, numlam, verbose=max(0, verbose-1))
if not full_path:
beta = beta['best']
########### Fix disconnected nodes
mask = np.ones_like(beta)
mask[init_edges[:,0]] = 0
mask[init_edges[:,1]] = 0
beta[mask>0] = data[mask>0]
return beta
|
A very easy-to-use version of GFL solver that just requires the data and
the edges.
|
entailment
|
def ngayThangNam(nn, tt, nnnn, duongLich=True, timeZone=7):
"""Summary
Args:
nn (TYPE): ngay
tt (TYPE): thang
nnnn (TYPE): nam
duongLich (bool, optional): bool
timeZone (int, optional): +7 Vietnam
Returns:
TYPE: Description
Raises:
Exception: Description
"""
thangNhuan = 0
# if nnnn > 1000 and nnnn < 3000 and nn > 0 and \
if nn > 0 and \
nn < 32 and tt < 13 and tt > 0:
if duongLich is True:
[nn, tt, nnnn, thangNhuan] = S2L(nn, tt, nnnn, timeZone=timeZone)
return [nn, tt, nnnn, thangNhuan]
else:
raise Exception("Ngày, tháng, năm không chính xác.")
|
Summary
Args:
nn (TYPE): ngay
tt (TYPE): thang
nnnn (TYPE): nam
duongLich (bool, optional): bool
timeZone (int, optional): +7 Vietnam
Returns:
TYPE: Description
Raises:
Exception: Description
|
entailment
|
def canChiNgay(nn, tt, nnnn, duongLich=True, timeZone=7, thangNhuan=False):
"""Summary
Args:
nn (int): ngày
tt (int): tháng
nnnn (int): năm
duongLich (bool, optional): True nếu là dương lịch, False âm lịch
timeZone (int, optional): Múi giờ
thangNhuan (bool, optional): Có phải là tháng nhuận không?
Returns:
TYPE: Description
"""
if duongLich is False:
[nn, tt, nnnn] = L2S(nn, tt, nnnn, thangNhuan, timeZone)
jd = jdFromDate(nn, tt, nnnn)
# print jd
canNgay = (jd + 9) % 10 + 1
chiNgay = (jd + 1) % 12 + 1
return [canNgay, chiNgay]
|
Summary
Args:
nn (int): ngày
tt (int): tháng
nnnn (int): năm
duongLich (bool, optional): True nếu là dương lịch, False âm lịch
timeZone (int, optional): Múi giờ
thangNhuan (bool, optional): Có phải là tháng nhuận không?
Returns:
TYPE: Description
|
entailment
|
def ngayThangNamCanChi(nn, tt, nnnn, duongLich=True, timeZone=7):
"""chuyển đổi năm, tháng âm/dương lịch sang Can, Chi trong tiếng Việt.
Không tính đến can ngày vì phải chuyển đổi qua lịch Julius.
Hàm tìm can ngày là hàm canChiNgay(nn, tt, nnnn, duongLich=True,\
timeZone=7, thangNhuan=False)
Args:
nn (int): Ngày
tt (int): Tháng
nnnn (int): Năm
Returns:
TYPE: Description
"""
if duongLich is True:
[nn, tt, nnnn, thangNhuan] = \
ngayThangNam(nn, tt, nnnn, timeZone=timeZone)
# Can của tháng
canThang = (nnnn * 12 + tt + 3) % 10 + 1
# Can chi của năm
canNamSinh = (nnnn + 6) % 10 + 1
chiNam = (nnnn + 8) % 12 + 1
return [canThang, canNamSinh, chiNam]
|
chuyển đổi năm, tháng âm/dương lịch sang Can, Chi trong tiếng Việt.
Không tính đến can ngày vì phải chuyển đổi qua lịch Julius.
Hàm tìm can ngày là hàm canChiNgay(nn, tt, nnnn, duongLich=True,\
timeZone=7, thangNhuan=False)
Args:
nn (int): Ngày
tt (int): Tháng
nnnn (int): Năm
Returns:
TYPE: Description
|
entailment
|
def nguHanh(tenHanh):
"""
Args:
tenHanh (string): Tên Hành trong ngũ hành, Kim hoặc K, Moc hoặc M,
Thuy hoặc T, Hoa hoặc H, Tho hoặc O
Returns:
Dictionary: ID của Hành, tên đầy đủ của Hành, số Cục của Hành
Raises:
Exception: Description
"""
if tenHanh in ["Kim", "K"]:
return {"id": 1, "tenHanh": "Kim", "cuc": 4, "tenCuc": "Kim tứ Cục",
"css": "hanhKim"}
elif tenHanh == "Moc" or tenHanh == "M":
return {"id": 2, "tenHanh": "Mộc", "cuc": 3, "tenCuc": "Mộc tam Cục",
"css": "hanhMoc"}
elif tenHanh == "Thuy" or tenHanh == "T":
return {"id": 3, "tenHanh": "Thủy", "cuc": 2, "tenCuc": "Thủy nhị Cục",
"css": "hanhThuy"}
elif tenHanh == "Hoa" or tenHanh == "H":
return {"id": 4, "tenHanh": "Hỏa", "cuc": 6, "tenCuc": "Hỏa lục Cục",
"css": "hanhHoa"}
elif tenHanh == "Tho" or tenHanh == "O":
return {"id": 5, "tenHanh": "Thổ", "cuc": 5, "tenCuc": "Thổ ngũ Cục",
"css": "hanhTho"}
else:
raise Exception(
"Tên Hành phải thuộc Kim (K), Mộc (M), Thủy (T), \
Hỏa (H) hoặc Thổ (O)")
|
Args:
tenHanh (string): Tên Hành trong ngũ hành, Kim hoặc K, Moc hoặc M,
Thuy hoặc T, Hoa hoặc H, Tho hoặc O
Returns:
Dictionary: ID của Hành, tên đầy đủ của Hành, số Cục của Hành
Raises:
Exception: Description
|
entailment
|
def nguHanhNapAm(diaChi, thienCan, xuatBanMenh=False):
"""Sử dụng Ngũ Hành nạp âm để tính Hành của năm.
Args:
diaChi (integer): Số thứ tự của địa chi (Tý=1, Sửu=2,...)
thienCan (integer): Số thứ tự của thiên can (Giáp=1, Ất=2,...)
Returns:
Trả về chữ viết tắt Hành của năm (K, T, H, O, M)
"""
banMenh = {
"K1": "HẢI TRUNG KIM",
"T1": "GIÁNG HẠ THỦY",
"H1": "TÍCH LỊCH HỎA",
"O1": "BÍCH THƯỢNG THỔ",
"M1": "TANG ÐỐ MỘC",
"T2": "ÐẠI KHÊ THỦY",
"H2": "LƯ TRUNG HỎA",
"O2": "THÀNH ÐẦU THỔ",
"M2": "TÒNG BÁ MỘC",
"K2": "KIM BẠCH KIM",
"H3": "PHÚ ÐĂNG HỎA",
"O3": "SA TRUNG THỔ",
"M3": "ÐẠI LÂM MỘC",
"K3": "BẠCH LẠP KIM",
"T3": "TRƯỜNG LƯU THỦY",
"K4": "SA TRUNG KIM",
"T4": "THIÊN HÀ THỦY",
"H4": "THIÊN THƯỢNG HỎA",
"O4": "LỘ BÀN THỔ",
"M4": "DƯƠNG LIỄU MỘC",
"T5": "TRUYỀN TRUNG THỦY",
"H5": "SƠN HẠ HỎA",
"O5": "ÐẠI TRẠCH THỔ",
"M5": "THẠCH LỰU MỘC",
"K5": "KIẾM PHONG KIM",
"H6": "SƠN ÐẦU HỎA",
"O6": "ỐC THƯỢNG THỔ",
"M6": "BÌNH ÐỊA MỘC",
"K6": "XOA XUYẾN KIM",
"T6": "ÐẠI HẢI THỦY"}
matranNapAm = [
[0, "G", "Ất", "Bính", "Đinh", "Mậu", "Kỷ", "Canh", "Tân", "N", "Q"],
[1, "K1", False, "T1", False, "H1", False, "O1", False, "M1", False],
[2, False, "K1", False, "T1", False, "H1", False, "O1", False, "M1"],
[3, "T2", False, "H2", False, "O2", False, "M2", False, "K2", False],
[4, False, "T2", False, "H2", False, "O2", False, "M2", False, "K2"],
[5, "H3", False, "O3", False, "M3", False, "K3", False, "T3", False],
[6, False, "H3", False, "O3", False, "M3", False, "K3", False, "T3"],
[7, "K4", False, "T4", False, "H4", False, "O4", False, "M4", False],
[8, False, "K4", False, "T4", False, "H4", False, "O4", False, "M4"],
[9, "T5", False, "H5", False, "O5", False, "M5", False, "K5", False],
[10, False, "T5", False, "H5", False, "O5", False, "M5", False, "K5"],
[11, "H6", False, "O6", False, "M6", False, "K6", False, "T6", False],
[12, False, "H6", False, "O6", False, "M6", False, "K6", False, "T6"]
]
try:
nh = matranNapAm[diaChi][thienCan]
if nh[0] in ["K", "M", "T", "H", "O"]:
if xuatBanMenh is True:
return banMenh[nh]
else:
return nh[0]
except:
raise Exception(nguHanhNapAm.__doc__)
|
Sử dụng Ngũ Hành nạp âm để tính Hành của năm.
Args:
diaChi (integer): Số thứ tự của địa chi (Tý=1, Sửu=2,...)
thienCan (integer): Số thứ tự của thiên can (Giáp=1, Ất=2,...)
Returns:
Trả về chữ viết tắt Hành của năm (K, T, H, O, M)
|
entailment
|
def timTuVi(cuc, ngaySinhAmLich):
"""Tìm vị trí của sao Tử vi
Args:
cuc (TYPE): Description
ngaySinhAmLich (TYPE): Description
Returns:
TYPE: Description
Raises:
Exception: Description
"""
cungDan = 3 # Vị trí cung Dần ban đầu là 3
cucBanDau = cuc
if cuc not in [2, 3, 4, 5, 6]: # Tránh trường hợp infinite loop
raise Exception("Số cục phải là 2, 3, 4, 5, 6")
while cuc < ngaySinhAmLich:
cuc += cucBanDau
cungDan += 1 # Dịch vị trí cung Dần
saiLech = cuc - ngaySinhAmLich
if saiLech % 2 is 1:
saiLech = -saiLech # Nếu sai lệch là chẵn thì tiến, lẻ thì lùi
return dichCung(cungDan, saiLech)
|
Tìm vị trí của sao Tử vi
Args:
cuc (TYPE): Description
ngaySinhAmLich (TYPE): Description
Returns:
TYPE: Description
Raises:
Exception: Description
|
entailment
|
def nt2aa(ntseq):
"""Translate a nucleotide sequence into an amino acid sequence.
Parameters
----------
ntseq : str
Nucleotide sequence composed of A, C, G, or T (uppercase or lowercase)
Returns
-------
aaseq : str
Amino acid sequence
Example
--------
>>> nt2aa('TGTGCCTGGAGTGTAGCTCCGGACAGGGGTGGCTACACCTTC')
'CAWSVAPDRGGYTF'
"""
nt2num = {'A': 0, 'C': 1, 'G': 2, 'T': 3, 'a': 0, 'c': 1, 'g': 2, 't': 3}
aa_dict ='KQE*TPASRRG*ILVLNHDYTPASSRGCILVFKQE*TPASRRGWMLVLNHDYTPASSRGCILVF'
return ''.join([aa_dict[nt2num[ntseq[i]] + 4*nt2num[ntseq[i+1]] + 16*nt2num[ntseq[i+2]]] for i in range(0, len(ntseq), 3) if i+2 < len(ntseq)])
|
Translate a nucleotide sequence into an amino acid sequence.
Parameters
----------
ntseq : str
Nucleotide sequence composed of A, C, G, or T (uppercase or lowercase)
Returns
-------
aaseq : str
Amino acid sequence
Example
--------
>>> nt2aa('TGTGCCTGGAGTGTAGCTCCGGACAGGGGTGGCTACACCTTC')
'CAWSVAPDRGGYTF'
|
entailment
|
def nt2codon_rep(ntseq):
"""Represent nucleotide sequence by sequence of codon symbols.
'Translates' the nucleotide sequence into a symbolic representation of
'amino acids' where each codon gets its own unique character symbol. These
characters should be reserved only for representing the 64 individual
codons --- note that this means it is important that this function matches
the corresponding function in the preprocess script and that any custom
alphabet does not use these symbols. Defining symbols for each individual
codon allows for Pgen computation of inframe nucleotide sequences.
Parameters
----------
ntseq : str
A Nucleotide sequence (normally a CDR3 nucleotide sequence) to be
'translated' into the codon - symbol representation. Can be either
uppercase or lowercase, but only composed of A, C, G, or T.
Returns
-------
codon_rep : str
The codon - symbolic representation of ntseq. Note that if
len(ntseq) == 3L --> len(codon_rep) == L
Example
--------
>>> nt2codon_rep('TGTGCCTGGAGTGTAGCTCCGGACAGGGGTGGCTACACCTTC')
'\xbb\x96\xab\xb8\x8e\xb6\xa5\x92\xa8\xba\x9a\x93\x94\x9f'
"""
#
nt2num = {'A': 0, 'C': 1, 'G': 2, 'T': 3, 'a': 0, 'c': 1, 'g': 2, 't': 3}
#Use single characters not in use to represent each individual codon --- this function is called in constructing the codon dictionary
codon_rep ='\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf'
return ''.join([codon_rep[nt2num[ntseq[i]] + 4*nt2num[ntseq[i+1]] + 16*nt2num[ntseq[i+2]]] for i in range(0, len(ntseq), 3) if i+2 < len(ntseq)])
|
Represent nucleotide sequence by sequence of codon symbols.
'Translates' the nucleotide sequence into a symbolic representation of
'amino acids' where each codon gets its own unique character symbol. These
characters should be reserved only for representing the 64 individual
codons --- note that this means it is important that this function matches
the corresponding function in the preprocess script and that any custom
alphabet does not use these symbols. Defining symbols for each individual
codon allows for Pgen computation of inframe nucleotide sequences.
Parameters
----------
ntseq : str
A Nucleotide sequence (normally a CDR3 nucleotide sequence) to be
'translated' into the codon - symbol representation. Can be either
uppercase or lowercase, but only composed of A, C, G, or T.
Returns
-------
codon_rep : str
The codon - symbolic representation of ntseq. Note that if
len(ntseq) == 3L --> len(codon_rep) == L
Example
--------
>>> nt2codon_rep('TGTGCCTGGAGTGTAGCTCCGGACAGGGGTGGCTACACCTTC')
'\xbb\x96\xab\xb8\x8e\xb6\xa5\x92\xa8\xba\x9a\x93\x94\x9f'
|
entailment
|
def cutR_seq(seq, cutR, max_palindrome):
"""Cut genomic sequence from the right.
Parameters
----------
seq : str
Nucleotide sequence to be cut from the right
cutR : int
cutR - max_palindrome = how many nucleotides to cut from the right.
Negative cutR implies complementary palindromic insertions.
max_palindrome : int
Length of the maximum palindromic insertion.
Returns
-------
seq : str
Nucleotide sequence after being cut from the right
Examples
--------
>>> cutR_seq('TGCGCCAGCAGTGAGTC', 0, 4)
'TGCGCCAGCAGTGAGTCGACT'
>>> cutR_seq('TGCGCCAGCAGTGAGTC', 8, 4)
'TGCGCCAGCAGTG'
"""
complement_dict = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'} #can include lower case if wanted
if cutR < max_palindrome:
seq = seq + ''.join([complement_dict[nt] for nt in seq[cutR - max_palindrome:]][::-1]) #reverse complement palindrome insertions
else:
seq = seq[:len(seq) - cutR + max_palindrome] #deletions
return seq
|
Cut genomic sequence from the right.
Parameters
----------
seq : str
Nucleotide sequence to be cut from the right
cutR : int
cutR - max_palindrome = how many nucleotides to cut from the right.
Negative cutR implies complementary palindromic insertions.
max_palindrome : int
Length of the maximum palindromic insertion.
Returns
-------
seq : str
Nucleotide sequence after being cut from the right
Examples
--------
>>> cutR_seq('TGCGCCAGCAGTGAGTC', 0, 4)
'TGCGCCAGCAGTGAGTCGACT'
>>> cutR_seq('TGCGCCAGCAGTGAGTC', 8, 4)
'TGCGCCAGCAGTG'
|
entailment
|
def cutL_seq(seq, cutL, max_palindrome):
"""Cut genomic sequence from the left.
Parameters
----------
seq : str
Nucleotide sequence to be cut from the right
cutL : int
cutL - max_palindrome = how many nucleotides to cut from the left.
Negative cutL implies complementary palindromic insertions.
max_palindrome : int
Length of the maximum palindromic insertion.
Returns
-------
seq : str
Nucleotide sequence after being cut from the left
Examples
--------
>>> cutL_seq('TGAACACTGAAGCTTTCTTT', 8, 4)
'CACTGAAGCTTTCTTT'
>>> cutL_seq('TGAACACTGAAGCTTTCTTT', 0, 4)
'TTCATGAACACTGAAGCTTTCTTT'
"""
complement_dict = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'} #can include lower case if wanted
if cutL < max_palindrome:
seq = ''.join([complement_dict[nt] for nt in seq[:max_palindrome - cutL]][::-1]) + seq #reverse complement palindrome insertions
else:
seq = seq[cutL-max_palindrome:] #deletions
return seq
|
Cut genomic sequence from the left.
Parameters
----------
seq : str
Nucleotide sequence to be cut from the right
cutL : int
cutL - max_palindrome = how many nucleotides to cut from the left.
Negative cutL implies complementary palindromic insertions.
max_palindrome : int
Length of the maximum palindromic insertion.
Returns
-------
seq : str
Nucleotide sequence after being cut from the left
Examples
--------
>>> cutL_seq('TGAACACTGAAGCTTTCTTT', 8, 4)
'CACTGAAGCTTTCTTT'
>>> cutL_seq('TGAACACTGAAGCTTTCTTT', 0, 4)
'TTCATGAACACTGAAGCTTTCTTT'
|
entailment
|
def construct_codons_dict(alphabet_file = None):
"""Generate the sub_codons_right dictionary of codon suffixes.
syntax of custom alphabet_files:
char: list,of,amino,acids,or,codons,separated,by,commas
Parameters
----------
alphabet_file : str
File name for a custom alphabet definition. If no file is provided, the
default alphabet is used, i.e. standard amino acids, undetermined amino
acids (B, J, X, and Z), and single codon symbols.
Returns
-------
codons_dict : dict
Dictionary, keyed by the allowed 'amino acid' symbols with the values
being lists of codons corresponding to the symbol.
"""
#Some symbols can't be used in the CDR3 sequences in order to allow for
#regular expression parsing and general manipulation.
protected_symbols = [' ', '\t', '\n', '\x0b', '\x0c', '\r', ':', ',', ';', '[', ']', '{', '}', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
#construct list of all 64 codons
codons = [i + j + k for i in 'ACGT' for j in 'ACGT' for k in 'ACGT']
codons_dict = {}
#add standard amino acids symbols to the dict (i.e. 'ACDEFGHIKLMNPQRSTVWY*').
#these symbols CANNOT be overwritten by custom alphabet files
for codon in codons:
codons_dict[nt2aa(codon)] = codons_dict.get(nt2aa(codon), []) + [codon]
#add single codon symbols to allow for inframe ntseq pgen computation
#'\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf'
#these symbols CANNOT be overwritten by custom alphabet files
for codon in codons:
codons_dict[nt2codon_rep(codon)] = [codon]
#Check to see if custom alphabet file is supplied, else use default alphabet
#Include standard ambigious amino acids.
#these symbols CAN be overwritten by custom alphabet files
expanded_alphabet = {}
expanded_alphabet['B'] = ['D','N']
expanded_alphabet['J'] = ['I', 'L']
expanded_alphabet['X'] = ['A','C','D','E','F','G','H','I','K','L','M','N','P','Q','R','S','T','V','W','Y']
expanded_alphabet['Z'] = ['E', 'Q']
if alphabet_file is not None: #Use custom alphabet file definitions
alphabet_f = open(alphabet_file, 'r')
for line in alphabet_f:
#assumed syntax is of a line is:
#s: a1, a2, a3, a4, a5, ..., aN
#where s is a single character symbol that isn't reserved, and all
#of the a's are either amino acid symbols or codons. Whitespaces
#will be stripped as will brackets if the a's are presented as a
#list.
c_symbol = line.split(':', 1)[0].strip(''.join(protected_symbols))
#Note there shouldn't be any additional colons -- this is a protected symbol.
c_aa_codon_list_str = line.split(':', 1)[1]
expanded_alphabet[c_symbol] = [x.strip(''.join(protected_symbols)) for x in c_aa_codon_list_str.split(',')]
alphabet_f.close()
for symbol in expanded_alphabet.keys():
#Double check that the symbol isn't already used (important particularly for the single codon representation)
if symbol in codons_dict.keys():
print symbol + " is already used as an 'amino acid' symbol for codons: "
print codons_dict[symbol]
continue
elif not len(symbol) == 1: #Check that the custom symbol is a single character
print "Can't use " + symbol + " as a custom 'amino acid' definitions as such symbols must be single characters."
continue
elif symbol in protected_symbols: #This elif shouldn't trigger due to the stripping of protected symbols.
print symbol + " is a protected character"
current_codon_collection = set()
for x in expanded_alphabet[symbol]:
if x in codons_dict.keys(): #Check if reference to an amino acid or other amino acid symbol
current_codon_collection = current_codon_collection.union(codons_dict[x]) #If so, add those codons to the new collection
elif x.upper() in codons: #Check if specifying a single codon
current_codon_collection.add(x.upper()) #If so, add the codon to the new collection
elif len(x) == 0: #fully stripped away
continue
else: #If not, don't recognize the addition and continue.
print 'Unfamiliar amino acid symbol or codon: ' + x
continue
codons_dict[symbol] = list(current_codon_collection)
return codons_dict
|
Generate the sub_codons_right dictionary of codon suffixes.
syntax of custom alphabet_files:
char: list,of,amino,acids,or,codons,separated,by,commas
Parameters
----------
alphabet_file : str
File name for a custom alphabet definition. If no file is provided, the
default alphabet is used, i.e. standard amino acids, undetermined amino
acids (B, J, X, and Z), and single codon symbols.
Returns
-------
codons_dict : dict
Dictionary, keyed by the allowed 'amino acid' symbols with the values
being lists of codons corresponding to the symbol.
|
entailment
|
def generate_sub_codons_left(codons_dict):
"""Generate the sub_codons_left dictionary of codon prefixes.
Parameters
----------
codons_dict : dict
Dictionary, keyed by the allowed 'amino acid' symbols with the values
being lists of codons corresponding to the symbol.
Returns
-------
sub_codons_left : dict
Dictionary of the 1 and 2 nucleotide prefixes (read from 5') for
each codon in an 'amino acid' grouping
"""
sub_codons_left = {}
for aa in codons_dict.keys():
sub_codons_left[aa] = list(set([x[0] for x in codons_dict[aa]] + [x[:2] for x in codons_dict[aa]]))
return sub_codons_left
|
Generate the sub_codons_left dictionary of codon prefixes.
Parameters
----------
codons_dict : dict
Dictionary, keyed by the allowed 'amino acid' symbols with the values
being lists of codons corresponding to the symbol.
Returns
-------
sub_codons_left : dict
Dictionary of the 1 and 2 nucleotide prefixes (read from 5') for
each codon in an 'amino acid' grouping
|
entailment
|
def generate_sub_codons_right(codons_dict):
"""Generate the sub_codons_right dictionary of codon suffixes.
Parameters
----------
codons_dict : dict
Dictionary, keyed by the allowed 'amino acid' symbols with the values
being lists of codons corresponding to the symbol.
Returns
-------
sub_codons_right : dict
Dictionary of the 1 and 2 nucleotide suffixes (read from 5') for
each codon in an 'amino acid' grouping.
"""
sub_codons_right = {}
for aa in codons_dict.keys():
sub_codons_right[aa] = list(set([x[-1] for x in codons_dict[aa]] + [x[-2:] for x in codons_dict[aa]]))
return sub_codons_right
|
Generate the sub_codons_right dictionary of codon suffixes.
Parameters
----------
codons_dict : dict
Dictionary, keyed by the allowed 'amino acid' symbols with the values
being lists of codons corresponding to the symbol.
Returns
-------
sub_codons_right : dict
Dictionary of the 1 and 2 nucleotide suffixes (read from 5') for
each codon in an 'amino acid' grouping.
|
entailment
|
def determine_seq_type(seq, aa_alphabet):
"""Determine the type of a sequence.
Parameters
----------
seq : str
Sequence to be typed.
aa_alphabet : str
String of all characters recoginized as 'amino acids'. (i.e. the keys
of codons_dict: aa_alphabet = ''.join(codons_dict.keys()) )
Returns
-------
seq_type : str
The type of sequence (ntseq, aaseq, regex, None) seq is.
Example
--------
>>> determine_seq_type('TGTGCCAGCAGTTCCGAAGGGGCGGGAGGGCCCTCCCTGAGAGGTCATGAGCAGTTCTTC', aa_alphabet)
'ntseq'
>>> determine_seq_type('CSARDX[TV]GNX{0,}', aa_alphabet)
'regex
"""
if all([x in 'ACGTacgt' for x in seq]):
return 'ntseq'
elif all([x in aa_alphabet for x in seq]):
return 'aaseq'
elif all([x in aa_alphabet + '[]{}0123456789,']):
return 'regex'
|
Determine the type of a sequence.
Parameters
----------
seq : str
Sequence to be typed.
aa_alphabet : str
String of all characters recoginized as 'amino acids'. (i.e. the keys
of codons_dict: aa_alphabet = ''.join(codons_dict.keys()) )
Returns
-------
seq_type : str
The type of sequence (ntseq, aaseq, regex, None) seq is.
Example
--------
>>> determine_seq_type('TGTGCCAGCAGTTCCGAAGGGGCGGGAGGGCCCTCCCTGAGAGGTCATGAGCAGTTCTTC', aa_alphabet)
'ntseq'
>>> determine_seq_type('CSARDX[TV]GNX{0,}', aa_alphabet)
'regex
|
entailment
|
def calc_steady_state_dist(R):
"""Calculate the steady state dist of a 4 state markov transition matrix.
Parameters
----------
R : ndarray
Markov transition matrix
Returns
-------
p_ss : ndarray
Steady state probability distribution
"""
#Calc steady state distribution for a dinucleotide bias matrix
w, v = np.linalg.eig(R)
for i in range(4):
if np.abs(w[i] - 1) < 1e-8:
return np.real(v[:, i] / np.sum(v[:, i]))
return -1
|
Calculate the steady state dist of a 4 state markov transition matrix.
Parameters
----------
R : ndarray
Markov transition matrix
Returns
-------
p_ss : ndarray
Steady state probability distribution
|
entailment
|
def rnd_ins_seq(ins_len, C_R, CP_first_nt):
"""Generate a random insertion nucleotide sequence of length ins_len.
Draws the sequence identity (for a set length) from the distribution
defined by the dinucleotide markov model of transition matrix R.
Parameters
----------
ins_len : int
Length of nucleotide sequence to be inserted.
C_R : ndarray
(4, 4) array of the cumulative transition probabilities defined by the
Markov transition matrix R
CP_first_nt : ndarray
(4,) array of the cumulative probabilities for the first inserted
nucleotide
Returns
-------
seq : str
Randomly generated insertion sequence of length ins_len.
Examples
--------
>>> rnd_ins_seq(7, CP_generative_model['C_Rvd'], CP_generative_model['C_first_nt_bias_insVD'])
'GATGGAC'
>>> rnd_ins_seq(7, CP_generative_model['C_Rvd'], CP_generative_model['C_first_nt_bias_insVD'])
'ACCCCCG'
>>> rnd_ins_seq(3, CP_generative_model['C_Rvd'], CP_generative_model['C_first_nt_bias_insVD'])
'GCC'
"""
nt2num = {'A': 0, 'C': 1, 'G': 2, 'T': 3}
num2nt = 'ACGT'
if ins_len == 0:
return ''
seq = num2nt[CP_first_nt.searchsorted(np.random.random())]
ins_len += -1
while ins_len > 0:
seq += num2nt[C_R[nt2num[seq[-1]], :].searchsorted(np.random.random())]
ins_len += -1
return seq
|
Generate a random insertion nucleotide sequence of length ins_len.
Draws the sequence identity (for a set length) from the distribution
defined by the dinucleotide markov model of transition matrix R.
Parameters
----------
ins_len : int
Length of nucleotide sequence to be inserted.
C_R : ndarray
(4, 4) array of the cumulative transition probabilities defined by the
Markov transition matrix R
CP_first_nt : ndarray
(4,) array of the cumulative probabilities for the first inserted
nucleotide
Returns
-------
seq : str
Randomly generated insertion sequence of length ins_len.
Examples
--------
>>> rnd_ins_seq(7, CP_generative_model['C_Rvd'], CP_generative_model['C_first_nt_bias_insVD'])
'GATGGAC'
>>> rnd_ins_seq(7, CP_generative_model['C_Rvd'], CP_generative_model['C_first_nt_bias_insVD'])
'ACCCCCG'
>>> rnd_ins_seq(3, CP_generative_model['C_Rvd'], CP_generative_model['C_first_nt_bias_insVD'])
'GCC'
|
entailment
|
def gen_rnd_prod_CDR3(self, conserved_J_residues = 'FVW'):
"""Generate a productive CDR3 seq from a Monte Carlo draw of the model.
Parameters
----------
conserved_J_residues : str, optional
Conserved amino acid residues defining the CDR3 on the J side (normally
F, V, and/or W)
Returns
-------
ntseq : str
Productive CDR3 nucleotide sequence
aaseq : str
CDR3 amino acid sequence (aaseq = nt2aa(ntseq))
V_choice : int
Index of V allele chosen to generate the CDR3 seq
J_choice : int
Index of J allele chosen to generate the CDR3 seq
"""
coding_pass = False
while ~coding_pass:
recomb_events = self.choose_random_recomb_events()
V_seq = self.cutV_genomic_CDR3_segs[recomb_events['V']]
#This both checks that the position of the conserved C is
#identified and that the V isn't fully deleted out of the CDR3
#region
if len(V_seq) <= max(recomb_events['delV'], 0):
continue
D_seq = self.cutD_genomic_CDR3_segs[recomb_events['D']]
J_seq = self.cutJ_genomic_CDR3_segs[recomb_events['J']]
#We check that the D and J aren't deleted more than allowed. Note
#the generative model really should reflect this structure already
if len(D_seq) < (recomb_events['delDl'] + recomb_events['delDr']) or len(J_seq) < recomb_events['delJ']:
continue
V_seq = V_seq[:len(V_seq) - recomb_events['delV']]
D_seq = D_seq[recomb_events['delDl']:len(D_seq)-recomb_events['delDr']]
J_seq = J_seq[recomb_events['delJ']:]
if (len(V_seq)+ len(D_seq) + len(J_seq) + recomb_events['insVD'] + recomb_events['insDJ']) % 3 != 0:
continue
insVD_seq = rnd_ins_seq(recomb_events['insVD'], self.C_Rvd, self.C_first_nt_bias_insVD)
insDJ_seq = rnd_ins_seq(recomb_events['insDJ'], self.C_Rdj, self.C_first_nt_bias_insDJ)[::-1] #have to reverse the DJ seq
#Translate to amino acid sequence, see if productive
ntseq = V_seq + insVD_seq + D_seq + insDJ_seq + J_seq
aaseq = nt2aa(ntseq)
if '*' not in aaseq and aaseq[0]=='C' and aaseq[-1] in conserved_J_residues:
return ntseq, aaseq, recomb_events['V'], recomb_events['J']
|
Generate a productive CDR3 seq from a Monte Carlo draw of the model.
Parameters
----------
conserved_J_residues : str, optional
Conserved amino acid residues defining the CDR3 on the J side (normally
F, V, and/or W)
Returns
-------
ntseq : str
Productive CDR3 nucleotide sequence
aaseq : str
CDR3 amino acid sequence (aaseq = nt2aa(ntseq))
V_choice : int
Index of V allele chosen to generate the CDR3 seq
J_choice : int
Index of J allele chosen to generate the CDR3 seq
|
entailment
|
def choose_random_recomb_events(self):
"""Sample the genomic model for VDJ recombination events.
Returns
-------
recomb_events : dict
Dictionary of the VDJ recombination events. These are
integers determining gene choice, deletions, and number of insertions.
Example
--------
>>> sequence_generation.choose_random_recomb_events()
{'D': 0, 'J': 13, 'V': 36, 'delDl': 2, 'delDr': 13, 'delJ': 10, 'delV': 5, 'insDJ': 6, 'insVD': 9}
"""
recomb_events = {}
recomb_events['V'] = self.CPV.searchsorted(np.random.random())
#For 2D arrays make sure to take advantage of a mod expansion to find indicies
DJ_choice = self.CPDJ.searchsorted(np.random.random())
recomb_events['D'] = DJ_choice/self.num_J_genes
recomb_events['J'] = DJ_choice % self.num_J_genes
#Refer to the correct slices for the dependent distributions
recomb_events['delV'] = self.given_V_CPdelV[recomb_events['V'], :].searchsorted(np.random.random())
recomb_events['delJ'] = self.given_J_CPdelJ[recomb_events['J'], :].searchsorted(np.random.random())
delDldelDr_choice = self.given_D_CPdelDldelDr[recomb_events['D'], :].searchsorted(np.random.random())
recomb_events['delDl'] = delDldelDr_choice/self.num_delDr_poss
recomb_events['delDr'] = delDldelDr_choice % self.num_delDr_poss
recomb_events['insVD'] = self.CinsVD.searchsorted(np.random.random())
recomb_events['insDJ'] = self.CinsDJ.searchsorted(np.random.random())
return recomb_events
|
Sample the genomic model for VDJ recombination events.
Returns
-------
recomb_events : dict
Dictionary of the VDJ recombination events. These are
integers determining gene choice, deletions, and number of insertions.
Example
--------
>>> sequence_generation.choose_random_recomb_events()
{'D': 0, 'J': 13, 'V': 36, 'delDl': 2, 'delDr': 13, 'delJ': 10, 'delV': 5, 'insDJ': 6, 'insVD': 9}
|
entailment
|
def gen_rnd_prod_CDR3(self, conserved_J_residues = 'FVW'):
"""Generate a productive CDR3 seq from a Monte Carlo draw of the model.
Parameters
----------
conserved_J_residues : str, optional
Conserved amino acid residues defining the CDR3 on the J side (normally
F, V, and/or W)
Returns
-------
ntseq : str
Productive CDR3 nucleotide sequence
aaseq : str
CDR3 amino acid sequence (aaseq = nt2aa(ntseq))
V_choice : int
Index of V allele chosen to generate the CDR3 seq
J_choice : int
Index of J allele chosen to generate the CDR3 seq
"""
coding_pass = False
while ~coding_pass:
recomb_events = self.choose_random_recomb_events()
V_seq = self.cutV_genomic_CDR3_segs[recomb_events['V']]
#This both checks that the position of the conserved C is
#identified and that the V isn't fully deleted out of the CDR3
#region
if len(V_seq) <= max(recomb_events['delV'], 0):
continue
J_seq = self.cutJ_genomic_CDR3_segs[recomb_events['J']]
#We check that J isn't deleted more than allowed. Note the
#generative model really should reflect this structure already
if len(J_seq) < recomb_events['delJ']:
continue
V_seq = V_seq[:len(V_seq) - recomb_events['delV']]
J_seq = J_seq[recomb_events['delJ']:]
if (len(V_seq)+len(J_seq) + recomb_events['insVJ']) % 3 != 0:
continue
insVJ_seq = rnd_ins_seq(recomb_events['insVJ'], self.C_Rvj, self.C_first_nt_bias_insVJ)
#Translate to amino acid sequence, see if productive
ntseq = V_seq + insVJ_seq + J_seq
aaseq = nt2aa(ntseq)
if '*' not in aaseq and aaseq[0]=='C' and aaseq[-1] in conserved_J_residues:
return ntseq, aaseq, recomb_events['V'], recomb_events['J']
|
Generate a productive CDR3 seq from a Monte Carlo draw of the model.
Parameters
----------
conserved_J_residues : str, optional
Conserved amino acid residues defining the CDR3 on the J side (normally
F, V, and/or W)
Returns
-------
ntseq : str
Productive CDR3 nucleotide sequence
aaseq : str
CDR3 amino acid sequence (aaseq = nt2aa(ntseq))
V_choice : int
Index of V allele chosen to generate the CDR3 seq
J_choice : int
Index of J allele chosen to generate the CDR3 seq
|
entailment
|
def choose_random_recomb_events(self):
"""Sample the genomic model for VDJ recombination events.
Returns
-------
recomb_events : dict
Dictionary of the VDJ recombination events. These are
integers determining gene choice, deletions, and number of insertions.
Example
--------
>>> sequence_generation.choose_random_recomb_events()
{'J': 13, 'V': 36, 'delJ': 10, 'delV': 5, 'insVJ': 3}
"""
recomb_events = {}
#For 2D arrays make sure to take advantage of a mod expansion to find indicies
VJ_choice = self.CPVJ.searchsorted(np.random.random())
recomb_events['V'] = VJ_choice/self.num_J_genes
recomb_events['J'] = VJ_choice % self.num_J_genes
#Refer to the correct slices for the dependent distributions
recomb_events['delV'] = self.given_V_CPdelV[recomb_events['V'], :].searchsorted(np.random.random())
recomb_events['delJ'] = self.given_J_CPdelJ[recomb_events['J'], :].searchsorted(np.random.random())
recomb_events['insVJ'] = self.CPinsVJ.searchsorted(np.random.random())
return recomb_events
|
Sample the genomic model for VDJ recombination events.
Returns
-------
recomb_events : dict
Dictionary of the VDJ recombination events. These are
integers determining gene choice, deletions, and number of insertions.
Example
--------
>>> sequence_generation.choose_random_recomb_events()
{'J': 13, 'V': 36, 'delJ': 10, 'delV': 5, 'insVJ': 3}
|
entailment
|
def update_rates(self):
"""
Creates or updates rates for a source
"""
source, created = RateSource.objects.get_or_create(name=self.get_source_name())
source.base_currency = self.get_base_currency()
source.save()
for currency, value in six.iteritems(self.get_rates()):
try:
rate = Rate.objects.get(source=source, currency=currency)
except Rate.DoesNotExist:
rate = Rate(source=source, currency=currency)
rate.value = value
rate.save()
|
Creates or updates rates for a source
|
entailment
|
def sample_gtf(data, D, k, likelihood='gaussian', prior='laplace',
lambda_hyperparams=None, lam_walk_stdev=0.01, lam0=1.,
dp_hyperparameter=None, w_hyperparameters=None,
iterations=7000, burn=2000, thin=10,
robust=False, empirical=False,
verbose=False):
'''Generate samples from the generalized graph trend filtering distribution via a modified Swendsen-Wang slice sampling algorithm.
Options for likelihood: gaussian, binomial, poisson. Options for prior: laplace, doublepareto.'''
Dk = get_delta(D, k)
dk_rows, dk_rowbreaks, dk_cols, dk_vals = decompose_delta(Dk)
if likelihood == 'gaussian':
y, w = data
elif likelihood == 'binomial':
trials, successes = data
elif likelihood == 'poisson':
obs = data
else:
raise Exception('Unknown likelihood type: {0}'.format(likelihood))
if prior == 'laplace':
if lambda_hyperparams == None:
lambda_hyperparams = (1., 1.)
elif prior == 'laplacegamma':
if lambda_hyperparams == None:
lambda_hyperparams = (1., 1.)
if dp_hyperparameter == None:
dp_hyperparameter = 1.
elif prior == 'doublepareto' or prior == 'doublepareto2':
if lambda_hyperparams == None:
lambda_hyperparams = (1.0, 1.0)
if dp_hyperparameter == None:
dp_hyperparameter = 0.1
elif prior == 'cauchy':
if lambda_hyperparams == None:
lambda_hyperparams = (1.0, 1.0)
else:
raise Exception('Unknown prior type: {0}.'.format(prior))
if robust and w_hyperparameters is None:
w_hyperparameters = (1., 1.)
# Run the Gibbs sampler
sample_size = (iterations - burn) / thin
beta_samples = np.zeros((sample_size, D.shape[1]), dtype='double')
lam_samples = np.zeros(sample_size, dtype='double')
if likelihood == 'gaussian':
if prior == 'laplace':
gflbayes_gaussian_laplace(len(y), y, w,
dk_rows, dk_rowbreaks, dk_cols, dk_vals,
lambda_hyperparams[0], lambda_hyperparams[1],
iterations, burn, thin,
double_matrix_to_c_pointer(beta_samples), lam_samples)
elif prior == 'laplacegamma':
if robust:
gflbayes_gaussian_laplace_gamma_robust(len(y), y, w,
dk_rows, dk_rowbreaks, dk_cols, dk_vals,
lambda_hyperparams[0], lambda_hyperparams[1],
dp_hyperparameter,
w_hyperparameters[0], w_hyperparameters[1],
iterations, burn, thin,
double_matrix_to_c_pointer(beta_samples), lam_samples)
else:
gflbayes_gaussian_laplace_gamma(len(y), y, w,
dk_rows, dk_rowbreaks, dk_cols, dk_vals,
lambda_hyperparams[0], lambda_hyperparams[1],
dp_hyperparameter,
iterations, burn, thin,
double_matrix_to_c_pointer(beta_samples), lam_samples)
elif prior == 'doublepareto':
gflbayes_gaussian_doublepareto(len(y), y, w,
dk_rows, dk_rowbreaks, dk_cols, dk_vals,
lambda_hyperparams[0], lambda_hyperparams[1],
lam_walk_stdev, lam0, dp_hyperparameter,
iterations, burn, thin,
double_matrix_to_c_pointer(beta_samples), lam_samples)
elif prior == 'doublepareto2':
gflbayes_gaussian_doublepareto2(len(y), y, w,
dk_rows, dk_rowbreaks, dk_cols, dk_vals,
lambda_hyperparams[0], lambda_hyperparams[1],
dp_hyperparameter,
iterations, burn, thin,
double_matrix_to_c_pointer(beta_samples), lam_samples)
elif prior == 'cauchy':
gflbayes_gaussian_cauchy(len(y), y, w,
dk_rows, dk_rowbreaks, dk_cols, dk_vals,
lambda_hyperparams[0], lambda_hyperparams[1],
lam_walk_stdev, lam0,
iterations, burn, thin,
double_matrix_to_c_pointer(beta_samples), lam_samples)
elif likelihood == 'binomial':
if prior == 'laplace':
gflbayes_binomial_laplace(len(trials), trials, successes,
dk_rows, dk_rowbreaks, dk_cols, dk_vals,
lambda_hyperparams[0], lambda_hyperparams[1],
iterations, burn, thin,
double_matrix_to_c_pointer(beta_samples), lam_samples)
elif prior == 'doublepareto':
gflbayes_binomial_doublepareto(len(trials), trials, successes,
dk_rows, dk_rowbreaks, dk_cols, dk_vals,
lambda_hyperparams[0], lambda_hyperparams[1],
lam_walk_stdev, lam0, dp_hyperparameter,
iterations, burn, thin,
double_matrix_to_c_pointer(beta_samples), lam_samples)
elif prior == 'laplacegamma':
if empirical:
gflbayes_empirical_binomial_laplace_gamma(len(trials), trials, successes,
dk_rows, dk_rowbreaks, dk_cols, dk_vals,
lam0,
iterations, burn, thin,
double_matrix_to_c_pointer(beta_samples), lam_samples)
else:
gflbayes_binomial_laplace_gamma(len(trials), trials, successes,
dk_rows, dk_rowbreaks, dk_cols, dk_vals,
lambda_hyperparams[0], lambda_hyperparams[1],
dp_hyperparameter,
iterations, burn, thin,
double_matrix_to_c_pointer(beta_samples), lam_samples)
elif likelihood == 'poisson':
if prior == 'laplace':
gflbayes_poisson_laplace(len(obs), obs,
dk_rows, dk_rowbreaks, dk_cols, dk_vals,
lambda_hyperparams[0], lambda_hyperparams[1],
iterations, burn, thin,
double_matrix_to_c_pointer(beta_samples), lam_samples)
elif prior == 'doublepareto':
gflbayes_poisson_doublepareto(len(obs), obs,
dk_rows, dk_rowbreaks, dk_cols, dk_vals,
lambda_hyperparams[0], lambda_hyperparams[1],
lam_walk_stdev, lam0, dp_hyperparameter,
iterations, burn, thin,
double_matrix_to_c_pointer(beta_samples), lam_samples)
else:
raise Exception('Unknown likelihood type: {0}'.format(likelihood))
return (beta_samples,lam_samples)
|
Generate samples from the generalized graph trend filtering distribution via a modified Swendsen-Wang slice sampling algorithm.
Options for likelihood: gaussian, binomial, poisson. Options for prior: laplace, doublepareto.
|
entailment
|
def compute_regex_CDR3_template_pgen(self, regex_seq, V_usage_mask_in = None, J_usage_mask_in = None, print_warnings = True, raise_overload_warning = True):
"""Compute Pgen for all seqs consistent with regular expression regex_seq.
Computes Pgen for a (limited vocabulary) regular expression of CDR3
amino acid sequences, conditioned on the V genes/alleles indicated in
V_usage_mask_in and the J genes/alleles in J_usage_mask_in. Please note
that this function will list out all the sequences that correspond to the
regular expression and then calculate the Pgen of each sequence in
succession. THIS CAN BE SLOW. Consider defining a custom alphabet to
represent any undetermined amino acids as this will greatly speed up the
computations. For example, if the symbol ^ is defined as [AGR] in a custom
alphabet, then instead of running
compute_regex_CDR3_template_pgen('CASS[AGR]SARPEQFF', ppp),
which will compute Pgen for 3 sequences, the single sequence
'CASS^SARPEQFF' can be considered. (Examples are TCRB sequences/model)
Parameters
----------
regex_seq : str
The regular expression string that represents the CDR3 sequences to be
listed then their Pgens computed and summed.
V_usage_mask_in : str or list
An object to indicate which V alleles should be considered. The default
input is None which returns the list of all productive V alleles.
J_usage_mask_in : str or list
An object to indicate which J alleles should be considered. The default
input is None which returns the list of all productive J alleles.
print_warnings : bool
Determines whether warnings are printed or not. Default ON.
raise_overload_warning : bool
A flag to warn of more than 10000 seqs corresponding to the regex_seq
Returns
-------
pgen : float
The generation probability (Pgen) of the sequence
Examples
--------
>>> generation_probability.compute_regex_CDR3_template_pgen('CASS[AGR]SARPEQFF')
8.1090898050318022e-10
>>> generation_probability.compute_regex_CDR3_template_pgen('CASSAX{0,5}SARPEQFF')
6.8468778040965569e-10
"""
V_usage_mask, J_usage_mask = self.format_usage_masks(V_usage_mask_in, J_usage_mask_in, print_warnings)
CDR3_seqs = self.list_seqs_from_regex(regex_seq, print_warnings, raise_overload_warning)
pgen = 0
for CDR3_seq in CDR3_seqs:
if len(CDR3_seq) == 0:
continue
pgen += self.compute_CDR3_pgen(CDR3_seq, V_usage_mask, J_usage_mask)
return pgen
|
Compute Pgen for all seqs consistent with regular expression regex_seq.
Computes Pgen for a (limited vocabulary) regular expression of CDR3
amino acid sequences, conditioned on the V genes/alleles indicated in
V_usage_mask_in and the J genes/alleles in J_usage_mask_in. Please note
that this function will list out all the sequences that correspond to the
regular expression and then calculate the Pgen of each sequence in
succession. THIS CAN BE SLOW. Consider defining a custom alphabet to
represent any undetermined amino acids as this will greatly speed up the
computations. For example, if the symbol ^ is defined as [AGR] in a custom
alphabet, then instead of running
compute_regex_CDR3_template_pgen('CASS[AGR]SARPEQFF', ppp),
which will compute Pgen for 3 sequences, the single sequence
'CASS^SARPEQFF' can be considered. (Examples are TCRB sequences/model)
Parameters
----------
regex_seq : str
The regular expression string that represents the CDR3 sequences to be
listed then their Pgens computed and summed.
V_usage_mask_in : str or list
An object to indicate which V alleles should be considered. The default
input is None which returns the list of all productive V alleles.
J_usage_mask_in : str or list
An object to indicate which J alleles should be considered. The default
input is None which returns the list of all productive J alleles.
print_warnings : bool
Determines whether warnings are printed or not. Default ON.
raise_overload_warning : bool
A flag to warn of more than 10000 seqs corresponding to the regex_seq
Returns
-------
pgen : float
The generation probability (Pgen) of the sequence
Examples
--------
>>> generation_probability.compute_regex_CDR3_template_pgen('CASS[AGR]SARPEQFF')
8.1090898050318022e-10
>>> generation_probability.compute_regex_CDR3_template_pgen('CASSAX{0,5}SARPEQFF')
6.8468778040965569e-10
|
entailment
|
def compute_aa_CDR3_pgen(self, CDR3_seq, V_usage_mask_in = None, J_usage_mask_in = None, print_warnings = True):
"""Compute Pgen for the amino acid sequence CDR3_seq.
Conditioned on the V genes/alleles indicated in V_usage_mask_in and the
J genes/alleles in J_usage_mask_in. (Examples are TCRB sequences/model)
Parameters
----------
CDR3_seq : str
CDR3 sequence composed of 'amino acids' -- the standard amino acids,
plus any custom symbols for an expanded codon alphabet (note the
standard ambiguous amino acids -- B, J, X, and Z -- are included by
default).
V_usage_mask_in : str or list
An object to indicate which V alleles should be considered. The default
input is None which returns the list of all productive V alleles.
J_usage_mask_in : str or list
An object to indicate which J alleles should be considered. The default
input is None which returns the list of all productive J alleles.
print_warnings : bool
Determines whether warnings are printed or not. Default ON.
Returns
-------
pgen : float
The generation probability (Pgen) of the sequence
Examples
--------
>>> generation_probability.compute_aa_CDR3_pgen('CAWSVAPDRGGYTF')
1.5756106696284584e-10
>>> generation_probability.compute_aa_CDR3_pgen('CAWSVAPDRGGYTF', 'TRBV30*01', 'TRBJ1-2*01')
1.203646865765782e-10
>>> generation_probability.compute_aa_CDR3_pgen('CAWXXXXXXXGYTF')
7.8102586432014974e-05
"""
if len(CDR3_seq) == 0:
return 0
for aa in CDR3_seq:
if aa not in self.codons_dict.keys():
#Check to make sure all symbols are accounted for
if print_warnings:
print 'Invalid amino acid CDR3 sequence --- unfamiliar symbol: ' + aa
return 0
V_usage_mask, J_usage_mask = self.format_usage_masks(V_usage_mask_in, J_usage_mask_in, print_warnings)
return self.compute_CDR3_pgen(CDR3_seq, V_usage_mask, J_usage_mask)
|
Compute Pgen for the amino acid sequence CDR3_seq.
Conditioned on the V genes/alleles indicated in V_usage_mask_in and the
J genes/alleles in J_usage_mask_in. (Examples are TCRB sequences/model)
Parameters
----------
CDR3_seq : str
CDR3 sequence composed of 'amino acids' -- the standard amino acids,
plus any custom symbols for an expanded codon alphabet (note the
standard ambiguous amino acids -- B, J, X, and Z -- are included by
default).
V_usage_mask_in : str or list
An object to indicate which V alleles should be considered. The default
input is None which returns the list of all productive V alleles.
J_usage_mask_in : str or list
An object to indicate which J alleles should be considered. The default
input is None which returns the list of all productive J alleles.
print_warnings : bool
Determines whether warnings are printed or not. Default ON.
Returns
-------
pgen : float
The generation probability (Pgen) of the sequence
Examples
--------
>>> generation_probability.compute_aa_CDR3_pgen('CAWSVAPDRGGYTF')
1.5756106696284584e-10
>>> generation_probability.compute_aa_CDR3_pgen('CAWSVAPDRGGYTF', 'TRBV30*01', 'TRBJ1-2*01')
1.203646865765782e-10
>>> generation_probability.compute_aa_CDR3_pgen('CAWXXXXXXXGYTF')
7.8102586432014974e-05
|
entailment
|
def compute_hamming_dist_1_pgen(self, CDR3_seq, V_usage_mask_in = None, J_usage_mask_in = None, print_warnings = True):
"""Compute Pgen of all seqs hamming dist 1 (in amino acids) from CDR3_seq.
Please note that this function will list out all the
sequences that are hamming distance 1 from the base sequence and then
calculate the Pgen of each sequence in succession. THIS CAN BE SLOW
as it computes Pgen for L+1 sequences where L = len(CDR3_seq). (Examples
are TCRB sequences/model)
Parameters
----------
CDR3_seq : str
CDR3 sequence composed of amino acids (ONLY the standard amino acids).
Pgens for all sequences of hamming distance 1 (in amino acid sequence)
are summed.
V_usage_mask_in : str or list
An object to indicate which V alleles should be considered. The default
input is None which returns the list of all productive V alleles.
J_usage_mask_in : str or list
An object to indicate which J alleles should be considered. The default
input is None which returns the list of all productive J alleles.
print_warnings : bool
Determines whether warnings are printed or not. Default ON.
Returns
-------
pgen : float
The sum of generation probabilities (Pgens) of the sequences at most
hamming distance 1 (in amino acids) from CDR3_seq.
"""
#make sure that the symbol X is defined as the fully undetermined amino acid:
#X ~ ACDEFGHIKLMNPQRSTVWY
V_usage_mask, J_usage_mask = self.format_usage_masks(V_usage_mask_in, J_usage_mask_in, print_warnings)
if len(CDR3_seq) == 0:
return 0
for aa in CDR3_seq:
if aa not in 'ACDEFGHIKLMNPQRSTVWY':
#Check to make sure all symbols are accounted for
if print_warnings:
print 'Invalid amino acid CDR3 sequence --- unfamiliar symbol: ' + aa
return 0
tot_pgen = 0
for i in range(len(CDR3_seq)):
tot_pgen += self.compute_CDR3_pgen(CDR3_seq[:i] + 'X' + CDR3_seq[i+1:], V_usage_mask, J_usage_mask)
tot_pgen += -(len(CDR3_seq) - 1)*self.compute_CDR3_pgen(CDR3_seq, V_usage_mask, J_usage_mask)
return tot_pgen
|
Compute Pgen of all seqs hamming dist 1 (in amino acids) from CDR3_seq.
Please note that this function will list out all the
sequences that are hamming distance 1 from the base sequence and then
calculate the Pgen of each sequence in succession. THIS CAN BE SLOW
as it computes Pgen for L+1 sequences where L = len(CDR3_seq). (Examples
are TCRB sequences/model)
Parameters
----------
CDR3_seq : str
CDR3 sequence composed of amino acids (ONLY the standard amino acids).
Pgens for all sequences of hamming distance 1 (in amino acid sequence)
are summed.
V_usage_mask_in : str or list
An object to indicate which V alleles should be considered. The default
input is None which returns the list of all productive V alleles.
J_usage_mask_in : str or list
An object to indicate which J alleles should be considered. The default
input is None which returns the list of all productive J alleles.
print_warnings : bool
Determines whether warnings are printed or not. Default ON.
Returns
-------
pgen : float
The sum of generation probabilities (Pgens) of the sequences at most
hamming distance 1 (in amino acids) from CDR3_seq.
|
entailment
|
def compute_nt_CDR3_pgen(self, CDR3_ntseq, V_usage_mask_in = None, J_usage_mask_in = None, print_warnings = True):
"""Compute Pgen for the inframe nucleotide sequence CDR3_ntseq.
Conditioned on the V genes/alleles indicated in V_usage_mask_in and the
J genes/alleles in J_usage_mask_in. (Examples are TCRB sequences/model)
Parameters
----------
CDR3_ntseq : str
Inframe nucleotide sequence composed of ONLY A, C, G, or T (either
uppercase or lowercase).
V_usage_mask_in : str or list
An object to indicate which V alleles should be considered. The default
input is None which returns the list of all productive V alleles.
J_usage_mask_in : str or list
An object to indicate which J alleles should be considered. The default
input is None which returns the list of all productive J alleles.
print_warnings : bool
Determines whether warnings are printed or not. Default ON.
Returns
-------
pgen : float64
The generation probability (Pgen) of the sequence
Examples
--------
>>> generation_probability.compute_nt_CDR3_pgen('TGTGCCTGGAGTGTAGCTCCGGACAGGGGTGGCTACACCTTC')
3.2674893012379071e-12
>>> generation_probability.compute_nt_CDR3_pgen('TGTGCCTGGAGTGTAGCTCCGGACAGGGGTGGCTACACCTTC', 'TRBV30*01', 'TRBJ1-2*01')
2.3986503758867323e-12
"""
if not len(CDR3_ntseq)%3 == 0:
#Make sure sequence is inframe
if print_warnings:
print 'Invalid nucleotide CDR3 sequence --- out of frame sequence'
return 0
elif len(CDR3_ntseq) == 0:
return 0
else:
for nt in CDR3_ntseq:
if nt not in 'ACGTacgt':
if print_warnings:
print 'Invalid nucleotide CDR3 sequence --- unfamiliar nucleotide: ' + nt
return 0
V_usage_mask, J_usage_mask = self.format_usage_masks(V_usage_mask_in, J_usage_mask_in, print_warnings)
return self.compute_CDR3_pgen(nt2codon_rep(CDR3_ntseq), V_usage_mask, J_usage_mask)
|
Compute Pgen for the inframe nucleotide sequence CDR3_ntseq.
Conditioned on the V genes/alleles indicated in V_usage_mask_in and the
J genes/alleles in J_usage_mask_in. (Examples are TCRB sequences/model)
Parameters
----------
CDR3_ntseq : str
Inframe nucleotide sequence composed of ONLY A, C, G, or T (either
uppercase or lowercase).
V_usage_mask_in : str or list
An object to indicate which V alleles should be considered. The default
input is None which returns the list of all productive V alleles.
J_usage_mask_in : str or list
An object to indicate which J alleles should be considered. The default
input is None which returns the list of all productive J alleles.
print_warnings : bool
Determines whether warnings are printed or not. Default ON.
Returns
-------
pgen : float64
The generation probability (Pgen) of the sequence
Examples
--------
>>> generation_probability.compute_nt_CDR3_pgen('TGTGCCTGGAGTGTAGCTCCGGACAGGGGTGGCTACACCTTC')
3.2674893012379071e-12
>>> generation_probability.compute_nt_CDR3_pgen('TGTGCCTGGAGTGTAGCTCCGGACAGGGGTGGCTACACCTTC', 'TRBV30*01', 'TRBJ1-2*01')
2.3986503758867323e-12
|
entailment
|
def format_usage_masks(self, V_usage_mask_in, J_usage_mask_in, print_warnings = True):
"""Format raw usage masks into lists of indices.
Usage masks allows the Pgen computation to be conditioned on the V and J
gene/allele identities. The inputted masks are lists of strings, or a
single string, of the names of the genes or alleles to be conditioned on.
The default mask includes all productive V or J genes.
Parameters
----------
V_usage_mask_in : str or list
An object to indicate which V alleles should be considered. The default
input is None which returns the list of all productive V alleles.
J_usage_mask_in : str or list
An object to indicate which J alleles should be considered. The default
input is None which returns the list of all productive J alleles.
print_warnings : bool
Determines whether warnings are printed or not. Default ON.
Returns
-------
V_usage_mask : list of integers
Indices of the V alleles to be considered in the Pgen computation
J_usage_mask : list of integers
Indices of the J alleles to be considered in the Pgen computation
Examples
--------
>>> generation_probability.format_usage_masks('TRBV27*01','TRBJ1-1*01')
([34], [0])
>>> generation_probability.format_usage_masks('TRBV27*01', '')
([34], [0, 1, 2, 3, 4, 7, 8, 9, 10, 11, 12, 13])
>>> generation_probability.format_usage_masks(['TRBV27*01', 'TRBV13*01'], 'TRBJ1-1*01')
([34, 18], [0])
"""
#Format the V usage mask
if V_usage_mask_in is None: #Default case, use all productive V genes with non-zero probability
#V_usage_mask = [v for v, V in enumerate(ppp['cutV_genomic_CDR3_segs']) if len(V) > 0]
V_usage_mask = self.d_V_usage_mask
elif isinstance(V_usage_mask_in, list):
e_V_usage_mask = set()
for v in V_usage_mask_in:
try:
e_V_usage_mask = e_V_usage_mask.union(self.V_mask_mapping[v])
except KeyError:
if print_warnings:
print 'Unfamiliar V gene/allele: ' + v
pass
if len(e_V_usage_mask) == 0:
if print_warnings:
print 'No recognized V genes/alleles. Using default V_usage_mask'
V_usage_mask = self.d_V_usage_mask
else:
V_usage_mask = list(e_V_usage_mask)
else:
try:
V_usage_mask = self.V_mask_mapping[V_usage_mask_in]
except KeyError:
#Do raise error here as the mask will be empty
if print_warnings:
print 'Unfamiliar V usage mask: ' + str(V_usage_mask_in) + ', please check the allowed V alleles. Using default V_usage_mask'
V_usage_mask = self.d_V_usage_mask
#Format the J usage mask
if J_usage_mask_in is None: #Default case, use all productive J genes with non-zero probability
#J_usage_mask = [j for j, J in enumerate(ppp['cutJ_genomic_CDR3_segs']) if len(J) > 0]
J_usage_mask = self.d_J_usage_mask
elif isinstance(J_usage_mask_in, list):
e_J_usage_mask = set()
for j in J_usage_mask_in:
try:
e_J_usage_mask = e_J_usage_mask.union(self.J_mask_mapping[j])
except KeyError:
if print_warnings:
print 'Unfamiliar J gene/allele: ' + j
pass
if len(e_J_usage_mask) == 0:
if print_warnings:
print 'No recognized J genes/alleles. Using default J_usage_mask'
J_usage_mask = self.d_J_usage_mask
else:
J_usage_mask = list(e_J_usage_mask)
else:
try:
J_usage_mask = self.J_mask_mapping[J_usage_mask_in]
except KeyError:
#Do raise error here as the mask will be empty
if print_warnings:
print 'Unfamiliar J usage mask: ' + str(J_usage_mask_in) + ', please check the allowed J alleles. Using default J_usage_mask'
J_usage_mask = self.d_J_usage_mask
return V_usage_mask, J_usage_mask
|
Format raw usage masks into lists of indices.
Usage masks allows the Pgen computation to be conditioned on the V and J
gene/allele identities. The inputted masks are lists of strings, or a
single string, of the names of the genes or alleles to be conditioned on.
The default mask includes all productive V or J genes.
Parameters
----------
V_usage_mask_in : str or list
An object to indicate which V alleles should be considered. The default
input is None which returns the list of all productive V alleles.
J_usage_mask_in : str or list
An object to indicate which J alleles should be considered. The default
input is None which returns the list of all productive J alleles.
print_warnings : bool
Determines whether warnings are printed or not. Default ON.
Returns
-------
V_usage_mask : list of integers
Indices of the V alleles to be considered in the Pgen computation
J_usage_mask : list of integers
Indices of the J alleles to be considered in the Pgen computation
Examples
--------
>>> generation_probability.format_usage_masks('TRBV27*01','TRBJ1-1*01')
([34], [0])
>>> generation_probability.format_usage_masks('TRBV27*01', '')
([34], [0, 1, 2, 3, 4, 7, 8, 9, 10, 11, 12, 13])
>>> generation_probability.format_usage_masks(['TRBV27*01', 'TRBV13*01'], 'TRBJ1-1*01')
([34, 18], [0])
|
entailment
|
def list_seqs_from_regex(self, regex_seq, print_warnings = True, raise_overload_warning = True):
"""List sequences that match regular expression template.
This function parses a limited regular expression vocabulary, and
lists all the sequences consistent with the regular expression. Supported
regex syntax: [] and {}. Cannot have two {} in a row. Note we can't use
Kline star (*) as this is the symbol for a stop codon --- use {}.
Parameters
----------
regex_seq : str
The regular expression string that represents the sequences to be
listed.
print_warnings : bool
Determines whether warnings are printed or not. Default ON.
raise_overload_warning : bool
A flag to warn of more than 10000 seqs corresponding to the regex_seq
Returns
-------
CDR3_seqs : list
A list of CDR3 sequences that correspond to the regex_seq
Examples
--------
>>> generation_probability.list_seqs_from_regex('CASS[AGR]SARPEQFF')
['CASSGSARPEQFF', 'CASSRSARPEQFF', 'CASSASARPEQFF']
>>> generation_probability.list_seqs_from_regex('CASSAX{0,5}SARPEQFF')
['CASSASARPEQFF',
'CASSAXXXXSARPEQFF',
'CASSAXXSARPEQFF',
'CASSAXXXXXSARPEQFF',
'CASSAXXXSARPEQFF',
'CASSAXSARPEQFF']
"""
aa_symbols = ''.join(self.codons_dict)
default_max_reps = 40
#Check to make sure that expression is of the right form/symbols
#Identify bracket expressions
bracket_ex = [x for x in re.findall('\[[' + aa_symbols + ']*?\]|\{\d+,{0,1}\d*\}', regex_seq)]
split_seq = re.split('\[[' + aa_symbols + ']*?\]|\{\d+,{0,1}\d*\}', regex_seq)
#Check that all remaining characters are in the codon dict
for aa in ''.join(split_seq):
if aa not in aa_symbols:
if print_warnings:
print 'Unfamiliar symbol representing a codon:' + aa + ' --- check codon dictionary or the regex syntax'
return []
regex_list = [split_seq[i/2] if i%2 == 0 else bracket_ex[i/2] for i in range(len(bracket_ex) + len(split_seq)) if not (i%2 == 0 and len(split_seq[i/2]) == 0)]
max_num_seqs = 1
for l, ex in enumerate(regex_list[::-1]):
i = len(regex_list) - l - 1
if ex[0] == '[': #bracket expression
#check characters
for aa in ex.strip('[]'):
if aa not in aa_symbols:
if print_warnings:
print 'Unfamiliar symbol representing a codon:' + aa + ' --- check codon dictionary'
return []
max_num_seqs *= len(ex) - 2
elif ex[0] == '{': #curly bracket
if i == 0:
if print_warnings:
print "Can't have {} expression at start of sequence"
return []
elif isinstance(regex_list[i-1], list):
if print_warnings:
print "Two {} expressions in a row is not supported"
return []
elif regex_list[i-1][0] == '[':
syms = regex_list[i-1].strip('[]')
regex_list[i-1] = ''
else:
syms = regex_list[i-1][-1]
regex_list[i-1] = regex_list[i-1][:-1]
if ',' not in ex:
new_expression = [int(ex.strip('{}')), int(ex.strip('{}')), syms]
max_num_seqs *= len(syms)**new_expression[0]
else:
try:
new_expression = [int(ex.strip('{}').split(',')[0]), int(ex.strip('{}').split(',')[1]), syms]
except ValueError: #No max limit --- use default
new_expression = [int(ex.strip('{}').split(',')[0]), default_max_reps, syms]
if new_expression[0] > new_expression[1]:
if print_warnings:
print 'Check regex syntax --- should be {min,max}'
return []
max_num_seqs *= sum([len(syms)**n for n in range(new_expression[0], new_expression[1]+1)])/len(syms)
#print new_expression
regex_list[i] = new_expression
if max_num_seqs > 10000 and raise_overload_warning:
if print_warnings:
answer = raw_input('Warning large number of sequences (estimated ' + str(max_num_seqs) + ' seqs) match the regular expression. Possible memory and time issues. Continue? (y/n)')
if not answer == 'y':
print 'Canceling...'
return []
else:
return []
#print regex_list
CDR3_seqs = ['']
for l, ex in enumerate(regex_list[::-1]):
i = len(regex_list) - l - 1
if isinstance(ex, list): #curly bracket case
c_seqs = ['']
f_seqs = []
for j in range(ex[1] + 1):
if j in range(ex[0], ex[1]+1):
f_seqs += c_seqs
c_seqs = [aa + c_seq for aa in ex[2] for c_seq in c_seqs]
CDR3_seqs = [f_seq + CDR3_seq for f_seq in f_seqs for CDR3_seq in CDR3_seqs]
elif len(ex) == 0:
pass
elif ex[0] == '[': #square bracket case
CDR3_seqs = [aa + CDR3_seq for aa in ex.strip('[]') for CDR3_seq in CDR3_seqs]
else:
CDR3_seqs = [ex + CDR3_seq for CDR3_seq in CDR3_seqs]
return list(set(CDR3_seqs))
|
List sequences that match regular expression template.
This function parses a limited regular expression vocabulary, and
lists all the sequences consistent with the regular expression. Supported
regex syntax: [] and {}. Cannot have two {} in a row. Note we can't use
Kline star (*) as this is the symbol for a stop codon --- use {}.
Parameters
----------
regex_seq : str
The regular expression string that represents the sequences to be
listed.
print_warnings : bool
Determines whether warnings are printed or not. Default ON.
raise_overload_warning : bool
A flag to warn of more than 10000 seqs corresponding to the regex_seq
Returns
-------
CDR3_seqs : list
A list of CDR3 sequences that correspond to the regex_seq
Examples
--------
>>> generation_probability.list_seqs_from_regex('CASS[AGR]SARPEQFF')
['CASSGSARPEQFF', 'CASSRSARPEQFF', 'CASSASARPEQFF']
>>> generation_probability.list_seqs_from_regex('CASSAX{0,5}SARPEQFF')
['CASSASARPEQFF',
'CASSAXXXXSARPEQFF',
'CASSAXXSARPEQFF',
'CASSAXXXXXSARPEQFF',
'CASSAXXXSARPEQFF',
'CASSAXSARPEQFF']
|
entailment
|
def max_nt_to_aa_alignment_left(self, CDR3_seq, ntseq):
"""Find maximum match between CDR3_seq and ntseq from the left.
This function returns the length of the maximum length nucleotide
subsequence of ntseq contiguous from the left (or 5' end) that is
consistent with the 'amino acid' sequence CDR3_seq.
Parameters
----------
CDR3_seq : str
CDR3 sequence composed of 'amino acids' (single character symbols
each corresponding to a collection of codons as given by codons_dict).
ntseq : str
Genomic (V locus) nucleotide sequence to match.
Returns
-------
max_alignment : int
Maximum length (in nucleotides) nucleotide sequence that matches the
CDR3 'amino acid' sequence.
Example
--------
>>> generation_probability.max_nt_to_aa_alignment_left('CASSSEGAGGPSLRGHEQFF', 'TGTGCCAGCAGTTTATCGATA')
13
"""
max_alignment = 0
if len(ntseq) == 0:
return 0
aa_aligned = True
while aa_aligned:
if ntseq[max_alignment:max_alignment+3] in self.codons_dict[CDR3_seq[max_alignment/3]]:
max_alignment += 3
if max_alignment/3 == len(CDR3_seq):
return max_alignment
else:
break
aa_aligned = False
last_codon = ntseq[max_alignment:max_alignment+3]
codon_frag = ''
for nt in last_codon:
codon_frag += nt
if codon_frag in self.sub_codons_left[CDR3_seq[max_alignment/3]]:
max_alignment += 1
else:
break
return max_alignment
|
Find maximum match between CDR3_seq and ntseq from the left.
This function returns the length of the maximum length nucleotide
subsequence of ntseq contiguous from the left (or 5' end) that is
consistent with the 'amino acid' sequence CDR3_seq.
Parameters
----------
CDR3_seq : str
CDR3 sequence composed of 'amino acids' (single character symbols
each corresponding to a collection of codons as given by codons_dict).
ntseq : str
Genomic (V locus) nucleotide sequence to match.
Returns
-------
max_alignment : int
Maximum length (in nucleotides) nucleotide sequence that matches the
CDR3 'amino acid' sequence.
Example
--------
>>> generation_probability.max_nt_to_aa_alignment_left('CASSSEGAGGPSLRGHEQFF', 'TGTGCCAGCAGTTTATCGATA')
13
|
entailment
|
def max_nt_to_aa_alignment_right(self, CDR3_seq, ntseq):
"""Find maximum match between CDR3_seq and ntseq from the right.
This function returns the length of the maximum length nucleotide
subsequence of ntseq contiguous from the right (or 3' end) that is
consistent with the 'amino acid' sequence CDR3_seq
Parameters
----------
CDR3_seq : str
CDR3 sequence composed of 'amino acids' (single character symbols
each corresponding to a collection of codons as given by codons_dict).
ntseq : str
Genomic (J locus) nucleotide sequence to match.
Returns
-------
max_alignment : int
Maximum length (in nucleotides) nucleotide sequence that matches the
CDR3 'amino acid' sequence.
Example
--------
>>> generation_probability.max_nt_to_aa_alignment_right('CASSSEGAGGPSLRGHEQFF', 'TTCATGAACACTGAAGCTTTCTTT')
6
"""
r_CDR3_seq = CDR3_seq[::-1] #reverse CDR3_seq
r_ntseq = ntseq[::-1] #reverse ntseq
max_alignment = 0
if len(ntseq) == 0:
return 0
aa_aligned = True
while aa_aligned:
if r_ntseq[max_alignment:max_alignment+3][::-1] in self.codons_dict[r_CDR3_seq[max_alignment/3]]:
max_alignment += 3
if max_alignment/3 == len(CDR3_seq):
return max_alignment
else:
break
aa_aligned = False
r_last_codon = r_ntseq[max_alignment:max_alignment+3]
codon_frag = ''
for nt in r_last_codon:
codon_frag = nt + codon_frag
if codon_frag in self.sub_codons_right[r_CDR3_seq[max_alignment/3]]:
max_alignment += 1
else:
break
return max_alignment
|
Find maximum match between CDR3_seq and ntseq from the right.
This function returns the length of the maximum length nucleotide
subsequence of ntseq contiguous from the right (or 3' end) that is
consistent with the 'amino acid' sequence CDR3_seq
Parameters
----------
CDR3_seq : str
CDR3 sequence composed of 'amino acids' (single character symbols
each corresponding to a collection of codons as given by codons_dict).
ntseq : str
Genomic (J locus) nucleotide sequence to match.
Returns
-------
max_alignment : int
Maximum length (in nucleotides) nucleotide sequence that matches the
CDR3 'amino acid' sequence.
Example
--------
>>> generation_probability.max_nt_to_aa_alignment_right('CASSSEGAGGPSLRGHEQFF', 'TTCATGAACACTGAAGCTTTCTTT')
6
|
entailment
|
def compute_CDR3_pgen(self, CDR3_seq, V_usage_mask, J_usage_mask):
"""Compute Pgen for CDR3 'amino acid' sequence CDR3_seq from VDJ model.
Conditioned on the already formatted V genes/alleles indicated in
V_usage_mask and the J genes/alleles in J_usage_mask.
(Examples are TCRB sequences/model)
Parameters
----------
CDR3_seq : str
CDR3 sequence composed of 'amino acids' (single character symbols
each corresponding to a collection of codons as given by codons_dict).
V_usage_mask : list
Indices of the V alleles to be considered in the Pgen computation
J_usage_mask : list
Indices of the J alleles to be considered in the Pgen computation
Returns
-------
pgen : float
The generation probability (Pgen) of the sequence
Examples
--------
>>> compute_CDR3_pgen('CAWSVAPDRGGYTF', ppp, [42], [1])
1.203646865765782e-10
>>> compute_CDR3_pgen(nt2codon_rep('TGTGCCTGGAGTGTAGCTCCGGACAGGGGTGGCTACACCTTC'), ppp, [42], [1])
2.3986503758867323e-12
>>> compute_CDR3_pgen('\xbb\x96\xab\xb8\x8e\xb6\xa5\x92\xa8\xba\x9a\x93\x94\x9f', ppp, [42], [1])
2.3986503758867323e-12
"""
#Genomic V alignment/matching (contribution from P(V, delV)), return Pi_V
Pi_V, max_V_align = self.compute_Pi_V(CDR3_seq, V_usage_mask)
#Include VD insertions (Rvd and PinsVD) to get the total contribution from the left (3') side. Return Pi_L
Pi_L = self.compute_Pi_L(CDR3_seq, Pi_V, max_V_align)
#Genomic J alignment/matching (contribution from P(D, J, delJ)), return Pi_J_given_D
Pi_J_given_D, max_J_align = self.compute_Pi_J_given_D(CDR3_seq, J_usage_mask)
#Include DJ insertions (Rdj and PinsDJ), return Pi_JinsDJ_given_D
Pi_JinsDJ_given_D = self.compute_Pi_JinsDJ_given_D(CDR3_seq, Pi_J_given_D, max_J_align)
#Include D genomic contribution (P(delDl, delDr | D)) to complete the contribution from the right (5') side. Return Pi_R
Pi_R = self.compute_Pi_R(CDR3_seq, Pi_JinsDJ_given_D)
pgen = 0
#zip Pi_L and Pi_R together to get total pgen
for pos in range(len(CDR3_seq)*3 - 1):
pgen += np.dot(Pi_L[:, pos], Pi_R[:, pos+1])
return pgen
|
Compute Pgen for CDR3 'amino acid' sequence CDR3_seq from VDJ model.
Conditioned on the already formatted V genes/alleles indicated in
V_usage_mask and the J genes/alleles in J_usage_mask.
(Examples are TCRB sequences/model)
Parameters
----------
CDR3_seq : str
CDR3 sequence composed of 'amino acids' (single character symbols
each corresponding to a collection of codons as given by codons_dict).
V_usage_mask : list
Indices of the V alleles to be considered in the Pgen computation
J_usage_mask : list
Indices of the J alleles to be considered in the Pgen computation
Returns
-------
pgen : float
The generation probability (Pgen) of the sequence
Examples
--------
>>> compute_CDR3_pgen('CAWSVAPDRGGYTF', ppp, [42], [1])
1.203646865765782e-10
>>> compute_CDR3_pgen(nt2codon_rep('TGTGCCTGGAGTGTAGCTCCGGACAGGGGTGGCTACACCTTC'), ppp, [42], [1])
2.3986503758867323e-12
>>> compute_CDR3_pgen('\xbb\x96\xab\xb8\x8e\xb6\xa5\x92\xa8\xba\x9a\x93\x94\x9f', ppp, [42], [1])
2.3986503758867323e-12
|
entailment
|
def compute_Pi_V(self, CDR3_seq, V_usage_mask):
"""Compute Pi_V.
This function returns the Pi array from the model factors of the V genomic
contributions, P(V)*P(delV|V). This corresponds to V_{x_1}.
For clarity in parsing the algorithm implementation, we include which
instance attributes are used in the method as 'parameters.'
Parameters
----------
CDR3_seq : str
CDR3 sequence composed of 'amino acids' (single character symbols
each corresponding to a collection of codons as given by codons_dict).
V_usage_mask : list
Indices of the V alleles to be considered in the Pgen computation
self.cutV_genomic_CDR3_segs : list of strings
List of all the V genomic nucleotide sequences trimmed to begin at the
conserved C residue and with the maximum number of palindromic
insertions appended.
self.PVdelV_nt_pos_vec : list of ndarrays
For each V allele, format P(V)*P(delV|V) into the correct form for
a Pi array or V_{x_1}. This is only done for the first and last
position in each codon.
self.PVdelV_2nd_nt_pos_per_aa_vec : list of dicts
For each V allele, and each 'amino acid', format P(V)*P(delV|V) for
positions in the middle of a codon into the correct form for a Pi
array or V_{x_1} given the 'amino acid'.
Returns
-------
Pi_V : ndarray
(4, 3L) array corresponding to V_{x_1}.
max_V_align: int
Maximum alignment of the CDR3_seq to any genomic V allele allowed by
V_usage_mask.
"""
#Note, the cutV_genomic_CDR3_segs INCLUDE the palindromic insertions and thus are max_palindrome nts longer than the template.
#furthermore, the genomic sequence should be pruned to start at the conserved C
Pi_V = np.zeros((4, len(CDR3_seq)*3)) #Holds the aggregate weight for each nt possiblity and position
alignment_lengths = []
for V_in in V_usage_mask:
try:
cutV_gen_seg = self.cutV_genomic_CDR3_segs[V_in]
except IndexError:
print 'Check provided V usage mask. Contains indicies out of allowed range.'
continue
current_alignment_length = self.max_nt_to_aa_alignment_left(CDR3_seq, cutV_gen_seg)
alignment_lengths += [current_alignment_length]
current_Pi_V = np.zeros((4, len(CDR3_seq)*3))
if current_alignment_length > 0:
#For first and last nt in a codon use PVdelV_nt_pos_vec
current_Pi_V[:, :current_alignment_length] = self.PVdelV_nt_pos_vec[V_in][:, :current_alignment_length]
for pos in range(1, current_alignment_length, 3): #for middle nt use PVdelV_2nd_nt_pos_per_aa_vec
current_Pi_V[:, pos] = self.PVdelV_2nd_nt_pos_per_aa_vec[V_in][CDR3_seq[pos/3]][:, pos]
Pi_V[:, :current_alignment_length] += current_Pi_V[:, :current_alignment_length]
return Pi_V, max(alignment_lengths)
|
Compute Pi_V.
This function returns the Pi array from the model factors of the V genomic
contributions, P(V)*P(delV|V). This corresponds to V_{x_1}.
For clarity in parsing the algorithm implementation, we include which
instance attributes are used in the method as 'parameters.'
Parameters
----------
CDR3_seq : str
CDR3 sequence composed of 'amino acids' (single character symbols
each corresponding to a collection of codons as given by codons_dict).
V_usage_mask : list
Indices of the V alleles to be considered in the Pgen computation
self.cutV_genomic_CDR3_segs : list of strings
List of all the V genomic nucleotide sequences trimmed to begin at the
conserved C residue and with the maximum number of palindromic
insertions appended.
self.PVdelV_nt_pos_vec : list of ndarrays
For each V allele, format P(V)*P(delV|V) into the correct form for
a Pi array or V_{x_1}. This is only done for the first and last
position in each codon.
self.PVdelV_2nd_nt_pos_per_aa_vec : list of dicts
For each V allele, and each 'amino acid', format P(V)*P(delV|V) for
positions in the middle of a codon into the correct form for a Pi
array or V_{x_1} given the 'amino acid'.
Returns
-------
Pi_V : ndarray
(4, 3L) array corresponding to V_{x_1}.
max_V_align: int
Maximum alignment of the CDR3_seq to any genomic V allele allowed by
V_usage_mask.
|
entailment
|
def compute_Pi_L(self, CDR3_seq, Pi_V, max_V_align):
"""Compute Pi_L.
This function returns the Pi array from the model factors of the V genomic
contributions, P(V)*P(delV|V), and the VD (N1) insertions,
first_nt_bias_insVD(m_1)PinsVD(\ell_{VD})\prod_{i=2}^{\ell_{VD}}Rvd(m_i|m_{i-1}).
This corresponds to V_{x_1}{M^{x_1}}_{x_2}.
For clarity in parsing the algorithm implementation, we include which
instance attributes are used in the method as 'parameters.'
Parameters
----------
CDR3_seq : str
CDR3 sequence composed of 'amino acids' (single character symbols
each corresponding to a collection of codons as given by codons_dict).
Pi_V : ndarray
(4, 3L) array corresponding to V_{x_1}.
max_V_align : int
Maximum alignment of the CDR3_seq to any genomic V allele allowed by
V_usage_mask.
self.PinsVD : ndarray
Probability distribution of the VD (N1) insertion sequence length
self.first_nt_bias_insVD : ndarray
(4,) array of the probability distribution of the indentity of the
first nucleotide insertion for the VD junction.
self.zero_nt_bias_insVD : ndarray
(4,) array of the probability distribution of the indentity of the
the nucleotide BEFORE the VD insertion.
zero_nt_bias_insVD = Rvd^{-1}first_nt_bias_insVD
self.Tvd : dict
Dictionary of full codon transfer matrices ((4, 4) ndarrays) by
'amino acid'.
self.Svd : dict
Dictionary of transfer matrices ((4, 4) ndarrays) by 'amino acid' for
the VD insertion ending in the first position.
self.Dvd : dict
Dictionary of transfer matrices ((4, 4) ndarrays) by 'amino acid' for
the VD insertion ending in the second position.
self.lTvd : dict
Dictionary of transfer matrices ((4, 4) ndarrays) by 'amino acid' for
the VD insertion starting in the first position.
self.lDvd : dict
Dictionary of transfer matrices ((4, 4) ndarrays) by 'amino acid' for
VD insertion starting in the first position and ending in the second
position of the same codon.
Returns
-------
Pi_L : ndarray
(4, 3L) array corresponding to V_{x_1}{M^{x_1}}_{x_2}.
"""
#max_insertions = 30 #len(PinsVD) - 1 should zeropad the last few spots
max_insertions = len(self.PinsVD) - 1
Pi_L = np.zeros((4, len(CDR3_seq)*3))
#start position is first nt in a codon
for init_pos in range(0, max_V_align, 3):
#Zero insertions
Pi_L[:, init_pos] += self.PinsVD[0]*Pi_V[:, init_pos]
#One insertion
Pi_L[:, init_pos+1] += self.PinsVD[1]*np.dot(self.lDvd[CDR3_seq[init_pos/3]], Pi_V[:, init_pos])
#Two insertions and compute the base nt vec for the standard loop
current_base_nt_vec = np.dot(self.lTvd[CDR3_seq[init_pos/3]], Pi_V[:, init_pos])
Pi_L[0, init_pos+2] += self.PinsVD[2]*np.sum(current_base_nt_vec)
base_ins = 2
#Loop over all other insertions using base_nt_vec
for aa in CDR3_seq[init_pos/3 + 1: init_pos/3 + max_insertions/3]:
Pi_L[:, init_pos+base_ins+1] += self.PinsVD[base_ins + 1]*np.dot(self.Svd[aa], current_base_nt_vec)
Pi_L[:, init_pos+base_ins+2] += self.PinsVD[base_ins + 2]*np.dot(self.Dvd[aa], current_base_nt_vec)
current_base_nt_vec = np.dot(self.Tvd[aa], current_base_nt_vec)
Pi_L[0, init_pos+base_ins+3] += self.PinsVD[base_ins + 3]*np.sum(current_base_nt_vec)
base_ins +=3
#start position is second nt in a codon
for init_pos in range(1, max_V_align, 3):
#Zero insertions
Pi_L[:, init_pos] += self.PinsVD[0]*Pi_V[:, init_pos]
#One insertion --- we first compute our p vec by pairwise mult with the ss distr
current_base_nt_vec = np.multiply(Pi_V[:, init_pos], self.first_nt_bias_insVD)
Pi_L[0, init_pos+1] += self.PinsVD[1]*np.sum(current_base_nt_vec)
base_ins = 1
#Loop over all other insertions using base_nt_vec
for aa in CDR3_seq[init_pos/3 + 1: init_pos/3 + max_insertions/3]:
Pi_L[:, init_pos+base_ins+1] += self.PinsVD[base_ins + 1]*np.dot(self.Svd[aa], current_base_nt_vec)
Pi_L[:, init_pos+base_ins+2] += self.PinsVD[base_ins + 2]*np.dot(self.Dvd[aa], current_base_nt_vec)
current_base_nt_vec = np.dot(self.Tvd[aa], current_base_nt_vec)
Pi_L[0, init_pos+base_ins+3] += self.PinsVD[base_ins + 3]*np.sum(current_base_nt_vec)
base_ins +=3
#start position is last nt in a codon
for init_pos in range(2, max_V_align, 3):
#Zero insertions
Pi_L[0, init_pos] += self.PinsVD[0]*Pi_V[0, init_pos]
#current_base_nt_vec = first_nt_bias_insVD*Pi_V[0, init_pos] #Okay for steady state
current_base_nt_vec = self.zero_nt_bias_insVD*Pi_V[0, init_pos]
base_ins = 0
#Loop over all other insertions using base_nt_vec
for aa in CDR3_seq[init_pos/3 + 1: init_pos/3 + max_insertions/3]:
Pi_L[:, init_pos+base_ins+1] += self.PinsVD[base_ins + 1]*np.dot(self.Svd[aa], current_base_nt_vec)
Pi_L[:, init_pos+base_ins+2] += self.PinsVD[base_ins + 2]*np.dot(self.Dvd[aa], current_base_nt_vec)
current_base_nt_vec = np.dot(self.Tvd[aa], current_base_nt_vec)
Pi_L[0, init_pos+base_ins+3] += self.PinsVD[base_ins + 3]*np.sum(current_base_nt_vec)
base_ins +=3
return Pi_L
|
Compute Pi_L.
This function returns the Pi array from the model factors of the V genomic
contributions, P(V)*P(delV|V), and the VD (N1) insertions,
first_nt_bias_insVD(m_1)PinsVD(\ell_{VD})\prod_{i=2}^{\ell_{VD}}Rvd(m_i|m_{i-1}).
This corresponds to V_{x_1}{M^{x_1}}_{x_2}.
For clarity in parsing the algorithm implementation, we include which
instance attributes are used in the method as 'parameters.'
Parameters
----------
CDR3_seq : str
CDR3 sequence composed of 'amino acids' (single character symbols
each corresponding to a collection of codons as given by codons_dict).
Pi_V : ndarray
(4, 3L) array corresponding to V_{x_1}.
max_V_align : int
Maximum alignment of the CDR3_seq to any genomic V allele allowed by
V_usage_mask.
self.PinsVD : ndarray
Probability distribution of the VD (N1) insertion sequence length
self.first_nt_bias_insVD : ndarray
(4,) array of the probability distribution of the indentity of the
first nucleotide insertion for the VD junction.
self.zero_nt_bias_insVD : ndarray
(4,) array of the probability distribution of the indentity of the
the nucleotide BEFORE the VD insertion.
zero_nt_bias_insVD = Rvd^{-1}first_nt_bias_insVD
self.Tvd : dict
Dictionary of full codon transfer matrices ((4, 4) ndarrays) by
'amino acid'.
self.Svd : dict
Dictionary of transfer matrices ((4, 4) ndarrays) by 'amino acid' for
the VD insertion ending in the first position.
self.Dvd : dict
Dictionary of transfer matrices ((4, 4) ndarrays) by 'amino acid' for
the VD insertion ending in the second position.
self.lTvd : dict
Dictionary of transfer matrices ((4, 4) ndarrays) by 'amino acid' for
the VD insertion starting in the first position.
self.lDvd : dict
Dictionary of transfer matrices ((4, 4) ndarrays) by 'amino acid' for
VD insertion starting in the first position and ending in the second
position of the same codon.
Returns
-------
Pi_L : ndarray
(4, 3L) array corresponding to V_{x_1}{M^{x_1}}_{x_2}.
|
entailment
|
def compute_Pi_J_given_D(self, CDR3_seq, J_usage_mask):
"""Compute Pi_J conditioned on D.
This function returns the Pi array from the model factors of the D and J
genomic contributions, P(D, J)*P(delJ|J) = P(D|J)P(J)P(delJ|J). This
corresponds to J(D)^{x_4}.
For clarity in parsing the algorithm implementation, we include which
instance attributes are used in the method as 'parameters.'
Parameters
----------
CDR3_seq : str
CDR3 sequence composed of 'amino acids' (single character symbols
each corresponding to a collection of codons as given by codons_dict).
J_usage_mask : list
Indices of the J alleles to be considered in the Pgen computation.
self.cutJ_genomic_CDR3_segs : list
List of all the J genomic nucleotide sequences trimmed to begin at the
conserved 3' residue (F/W) and with the maximum number of palindromic
insertions appended.
self.PD_given_J : ndarray
Probability distribution of D conditioned on J, i.e. P(D|J).
self.PJdelJ_nt_pos_vec : list of ndarrays
For each J allele, format P(J)*P(delJ|J) into the correct form for
a Pi array or J(D)^{x_4}. This is only done for the first and last
position in each codon.
self.PJdelJ_2nd_nt_pos_per_aa_vec : list of dicts
For each J allele, and each 'amino acid', format P(J)*P(delJ|J) for
positions in the middle of a codon into the correct form for a Pi
array or J(D)^{x_4} given the 'amino acid'.
Returns
-------
Pi_J_given_D : list
List of (4, 3L) ndarrays corresponding to J(D)^{x_4}.
max_J_align: int
Maximum alignment of the CDR3_seq to any genomic J allele allowed by
J_usage_mask.
"""
#Note, the cutJ_genomic_CDR3_segs INCLUDE the palindromic insertions and thus are max_palindrome nts longer than the template.
#furthermore, the genomic sequence should be pruned to start at a conserved region on the J side
num_D_genes = self.PD_given_J.shape[0]
Pi_J_given_D = [np.zeros((4, len(CDR3_seq)*3)) for i in range(num_D_genes)] #Holds the aggregate weight for each nt possiblity and position
alignment_lengths = []
for J_in in J_usage_mask:
try:
cutJ_gen_seg = self.cutJ_genomic_CDR3_segs[J_in]
except IndexError:
print 'Check provided V usage mask. Contains indicies out of allowed range.'
continue
current_alignment_length = self.max_nt_to_aa_alignment_right(CDR3_seq, cutJ_gen_seg)
alignment_lengths += [current_alignment_length]
current_Pi_J = np.zeros((4, len(CDR3_seq)*3))
if current_alignment_length > 0:
#For first and last nt in a codon use PJdelJ_nt_pos_vec
current_Pi_J[:, -current_alignment_length:] = self.PJdelJ_nt_pos_vec[J_in][:, -current_alignment_length:]
for pos in range(-2, -current_alignment_length-1, -3): #for middle nt use PJdelJ_2nd_nt_pos_per_aa_vec
current_Pi_J[:, pos] = self.PJdelJ_2nd_nt_pos_per_aa_vec[J_in][CDR3_seq[pos/3]][:, pos]
for D_in, pd_given_j in enumerate(self.PD_given_J[:, J_in]):
Pi_J_given_D[D_in][:, -current_alignment_length:] += pd_given_j*current_Pi_J[:, -current_alignment_length:]
return Pi_J_given_D, max(alignment_lengths)
|
Compute Pi_J conditioned on D.
This function returns the Pi array from the model factors of the D and J
genomic contributions, P(D, J)*P(delJ|J) = P(D|J)P(J)P(delJ|J). This
corresponds to J(D)^{x_4}.
For clarity in parsing the algorithm implementation, we include which
instance attributes are used in the method as 'parameters.'
Parameters
----------
CDR3_seq : str
CDR3 sequence composed of 'amino acids' (single character symbols
each corresponding to a collection of codons as given by codons_dict).
J_usage_mask : list
Indices of the J alleles to be considered in the Pgen computation.
self.cutJ_genomic_CDR3_segs : list
List of all the J genomic nucleotide sequences trimmed to begin at the
conserved 3' residue (F/W) and with the maximum number of palindromic
insertions appended.
self.PD_given_J : ndarray
Probability distribution of D conditioned on J, i.e. P(D|J).
self.PJdelJ_nt_pos_vec : list of ndarrays
For each J allele, format P(J)*P(delJ|J) into the correct form for
a Pi array or J(D)^{x_4}. This is only done for the first and last
position in each codon.
self.PJdelJ_2nd_nt_pos_per_aa_vec : list of dicts
For each J allele, and each 'amino acid', format P(J)*P(delJ|J) for
positions in the middle of a codon into the correct form for a Pi
array or J(D)^{x_4} given the 'amino acid'.
Returns
-------
Pi_J_given_D : list
List of (4, 3L) ndarrays corresponding to J(D)^{x_4}.
max_J_align: int
Maximum alignment of the CDR3_seq to any genomic J allele allowed by
J_usage_mask.
|
entailment
|
def compute_Pi_JinsDJ_given_D(self, CDR3_seq, Pi_J_given_D, max_J_align):
"""Compute Pi_JinsDJ conditioned on D.
This function returns the Pi array from the model factors of the J genomic
contributions, P(D,J)*P(delJ|J), and the DJ (N2) insertions,
first_nt_bias_insDJ(n_1)PinsDJ(\ell_{DJ})\prod_{i=2}^{\ell_{DJ}}Rdj(n_i|n_{i-1})
conditioned on D identity. This corresponds to {N^{x_3}}_{x_4}J(D)^{x_4}.
For clarity in parsing the algorithm implementation, we include which
instance attributes are used in the method as 'parameters.'
Parameters
----------
CDR3_seq : str
CDR3 sequence composed of 'amino acids' (single character symbols
each corresponding to a collection of codons as given by codons_dict).
Pi_J_given_D : ndarray
List of (4, 3L) ndarrays corresponding to J(D)^{x_4}.
max_J_align : int
Maximum alignment of the CDR3_seq to any genomic J allele allowed by
J_usage_mask.
self.PinsDJ : ndarray
Probability distribution of the DJ (N2) insertion sequence length
self.first_nt_bias_insDJ : ndarray
(4,) array of the probability distribution of the indentity of the
first nucleotide insertion for the DJ junction.
self.zero_nt_bias_insDJ : ndarray
(4,) array of the probability distribution of the indentity of the
the nucleotide BEFORE the DJ insertion. Note, as the Markov model
at the DJ junction goes 3' to 5' this is the position AFTER the
insertions reading left to right.
self.Tdj : dict
Dictionary of full codon transfer matrices ((4, 4) ndarrays) by
'amino acid'.
self.Sdj : dict
Dictionary of transfer matrices ((4, 4) ndarrays) by 'amino acid' for
the DJ insertion ending in the first position.
self.Ddj : dict
Dictionary of transfer matrices ((4, 4) ndarrays) by 'amino acid' for
the VD insertion ending in the second position.
self.rTdj : dict
Dictionary of transfer matrices ((4, 4) ndarrays) by 'amino acid' for
the DJ insertion starting in the first position.
self.rDdj : dict
Dictionary of transfer matrices ((4, 4) ndarrays) by 'amino acid' for
DJ insertion starting in the first position and ending in the second
position of the same codon.
Returns
-------
Pi_JinsDJ_given_D : list
List of (4, 3L) ndarrays corresponding to {N^{x_3}}_{x_4}J(D)^{x_4}.
"""
#max_insertions = 30 #len(PinsVD) - 1 should zeropad the last few spots
max_insertions = len(self.PinsDJ) - 1
Pi_JinsDJ_given_D = [np.zeros((4, len(CDR3_seq)*3)) for i in range(len(Pi_J_given_D))]
for D_in in range(len(Pi_J_given_D)):
#start position is first nt in a codon
for init_pos in range(-1, -(max_J_align+1), -3):
#Zero insertions
Pi_JinsDJ_given_D[D_in][:, init_pos] += self.PinsDJ[0]*Pi_J_given_D[D_in][:, init_pos]
#One insertion
Pi_JinsDJ_given_D[D_in][:, init_pos-1] += self.PinsDJ[1]*np.dot(self.rDdj[CDR3_seq[init_pos/3]], Pi_J_given_D[D_in][:, init_pos])
#Two insertions and compute the base nt vec for the standard loop
current_base_nt_vec = np.dot(self.rTdj[CDR3_seq[init_pos/3]], Pi_J_given_D[D_in][:, init_pos])
Pi_JinsDJ_given_D[D_in][0, init_pos-2] += self.PinsDJ[2]*np.sum(current_base_nt_vec)
base_ins = 2
#Loop over all other insertions using base_nt_vec
for aa in CDR3_seq[init_pos/3 - 1: init_pos/3 - max_insertions/3:-1]:
Pi_JinsDJ_given_D[D_in][:, init_pos-base_ins-1] += self.PinsDJ[base_ins + 1]*np.dot(self.Sdj[aa], current_base_nt_vec)
Pi_JinsDJ_given_D[D_in][:, init_pos-base_ins-2] += self.PinsDJ[base_ins + 2]*np.dot(self.Ddj[aa], current_base_nt_vec)
current_base_nt_vec = np.dot(self.Tdj[aa], current_base_nt_vec)
Pi_JinsDJ_given_D[D_in][0, init_pos-base_ins-3] += self.PinsDJ[base_ins + 3]*np.sum(current_base_nt_vec)
base_ins +=3
#start position is second nt in a codon
for init_pos in range(-2, -(max_J_align+1), -3):
#Zero insertions
Pi_JinsDJ_given_D[D_in][:, init_pos] += self.PinsDJ[0]*Pi_J_given_D[D_in][:, init_pos]
#One insertion --- we first compute our p vec by pairwise mult with the ss distr
current_base_nt_vec = np.multiply(Pi_J_given_D[D_in][:, init_pos], self.first_nt_bias_insDJ)
Pi_JinsDJ_given_D[D_in][0, init_pos-1] += self.PinsDJ[1]*np.sum(current_base_nt_vec)
base_ins = 1
#Loop over all other insertions using base_nt_vec
for aa in CDR3_seq[init_pos/3 - 1: init_pos/3 - max_insertions/3:-1]:
Pi_JinsDJ_given_D[D_in][:, init_pos-base_ins-1] += self.PinsDJ[base_ins + 1]*np.dot(self.Sdj[aa], current_base_nt_vec)
Pi_JinsDJ_given_D[D_in][:, init_pos-base_ins-2] += self.PinsDJ[base_ins + 2]*np.dot(self.Ddj[aa], current_base_nt_vec)
current_base_nt_vec = np.dot(self.Tdj[aa], current_base_nt_vec)
Pi_JinsDJ_given_D[D_in][0, init_pos-base_ins-3] += self.PinsDJ[base_ins + 3]*np.sum(current_base_nt_vec)
base_ins +=3
#start position is last nt in a codon
for init_pos in range(-3, -(max_J_align+1), -3):
#Zero insertions
Pi_JinsDJ_given_D[D_in][0, init_pos] += self.PinsDJ[0]*Pi_J_given_D[D_in][0, init_pos]
#current_base_nt_vec = first_nt_bias_insDJ*Pi_J_given_D[D_in][0, init_pos] #Okay for steady state
current_base_nt_vec = self.zero_nt_bias_insDJ*Pi_J_given_D[D_in][0, init_pos]
base_ins = 0
#Loop over all other insertions using base_nt_vec
for aa in CDR3_seq[init_pos/3 - 1: init_pos/3 - max_insertions/3:-1]:
Pi_JinsDJ_given_D[D_in][:, init_pos-base_ins-1] += self.PinsDJ[base_ins + 1]*np.dot(self.Sdj[aa], current_base_nt_vec)
Pi_JinsDJ_given_D[D_in][:, init_pos-base_ins-2] += self.PinsDJ[base_ins + 2]*np.dot(self.Ddj[aa], current_base_nt_vec)
current_base_nt_vec = np.dot(self.Tdj[aa], current_base_nt_vec)
Pi_JinsDJ_given_D[D_in][0, init_pos-base_ins-3] += self.PinsDJ[base_ins + 3]*np.sum(current_base_nt_vec)
base_ins +=3
return Pi_JinsDJ_given_D
|
Compute Pi_JinsDJ conditioned on D.
This function returns the Pi array from the model factors of the J genomic
contributions, P(D,J)*P(delJ|J), and the DJ (N2) insertions,
first_nt_bias_insDJ(n_1)PinsDJ(\ell_{DJ})\prod_{i=2}^{\ell_{DJ}}Rdj(n_i|n_{i-1})
conditioned on D identity. This corresponds to {N^{x_3}}_{x_4}J(D)^{x_4}.
For clarity in parsing the algorithm implementation, we include which
instance attributes are used in the method as 'parameters.'
Parameters
----------
CDR3_seq : str
CDR3 sequence composed of 'amino acids' (single character symbols
each corresponding to a collection of codons as given by codons_dict).
Pi_J_given_D : ndarray
List of (4, 3L) ndarrays corresponding to J(D)^{x_4}.
max_J_align : int
Maximum alignment of the CDR3_seq to any genomic J allele allowed by
J_usage_mask.
self.PinsDJ : ndarray
Probability distribution of the DJ (N2) insertion sequence length
self.first_nt_bias_insDJ : ndarray
(4,) array of the probability distribution of the indentity of the
first nucleotide insertion for the DJ junction.
self.zero_nt_bias_insDJ : ndarray
(4,) array of the probability distribution of the indentity of the
the nucleotide BEFORE the DJ insertion. Note, as the Markov model
at the DJ junction goes 3' to 5' this is the position AFTER the
insertions reading left to right.
self.Tdj : dict
Dictionary of full codon transfer matrices ((4, 4) ndarrays) by
'amino acid'.
self.Sdj : dict
Dictionary of transfer matrices ((4, 4) ndarrays) by 'amino acid' for
the DJ insertion ending in the first position.
self.Ddj : dict
Dictionary of transfer matrices ((4, 4) ndarrays) by 'amino acid' for
the VD insertion ending in the second position.
self.rTdj : dict
Dictionary of transfer matrices ((4, 4) ndarrays) by 'amino acid' for
the DJ insertion starting in the first position.
self.rDdj : dict
Dictionary of transfer matrices ((4, 4) ndarrays) by 'amino acid' for
DJ insertion starting in the first position and ending in the second
position of the same codon.
Returns
-------
Pi_JinsDJ_given_D : list
List of (4, 3L) ndarrays corresponding to {N^{x_3}}_{x_4}J(D)^{x_4}.
|
entailment
|
def compute_Pi_R(self, CDR3_seq, Pi_JinsDJ_given_D):
"""Compute Pi_R.
This function returns the Pi array from the model factors of the D and J
genomic contributions, P(D, J)*P(delJ|J)P(delDl, delDr |D) and
the DJ (N2) insertions,
first_nt_bias_insDJ(n_1)PinsDJ(\ell_{DJ})\prod_{i=2}^{\ell_{DJ}}Rdj(n_i|n_{i-1}).
This corresponds to \sum_D {D^{x_2}}_{x_3}{N^{x_3}}_{x_4}J(D)^{x_4}.
For clarity in parsing the algorithm implementation, we include which
instance attributes are used in the method as 'parameters.'
Parameters
----------
CDR3_seq : str
CDR3 sequence composed of 'amino acids' (single character symbols
each corresponding to a collection of codons as given by codons_dict).
Pi_JinsDJ_given_D : list
List of (4, 3L) ndarrays corresponding to {N^{x_3}}_{x_4}J(D)^{x_4}.
self.cutD_genomic_CDR3_segs : list of strings
List of all the D genomic nucleotide sequences with the maximum number
of palindromic insertions appended on both ends.
self.PD_given_J : ndarray
Probability distribution of D conditioned on J, i.e. P(D|J).
self.PD_nt_pos_vec : list of ndarrays
For each D allele, format P(delDl, delDr|D) into the correct form
for a Pi array as if each position were the first in a codon.
self.PD_2nd_nt_pos_per_aa_vec : list of dicts
For each D allele, and each 'amino acid', format P(delDl, delDr|D)
for positions in the middle of a codon into the correct form for a
Pi array as if each position were the middle of a codon
corresponding to the 'amino acid'.
self.min_delDl_given_DdelDr : list of lists
minimum delDl for each delDr, D combination.
self.max_delDl_given_DdelDr : list of lists
maximum delDl for each delDr, D combination.
self.PdelDldelDr_given_D : ndarray
Joint probability distribution of the D deletions given the D allele,
i.e. P(delDl, delDr |D)
self.zeroD_given_D : list of floats
The probability that a given D allele is fully deleted away.
self.codons_dict : dict
Dictionary, keyed by the allowed 'amino acid' symbols with the values
being lists of codons corresponding to the symbol.
self.sub_codons_right : dict
Dictionary of the 1 and 2 nucleotide suffixes (read from 5') for
each codon in an 'amino acid' grouping
Returns
-------
Pi_L : ndarray
(4, 3L) array corresponding to
\sum_D {D^{x_2}}_{x_3}{N^{x_3}}_{x_4}J(D)^{x_4}.
"""
#Need to consider all D alignments from all possible positions and right deletions.
nt2num = {'A': 0, 'C': 1, 'G': 2, 'T': 3}
#n_aaseq = [aa_dict[aa] for aa in CDR3_seq]
Pi_R = np.zeros((4, len(CDR3_seq)*3))
min_pos = -len(CDR3_seq)*3
num_dell_pos, num_delr_pos, num_D_genes = self.PdelDldelDr_given_D.shape
for D_in, cutD_gen_seg in enumerate(self.cutD_genomic_CDR3_segs):
l_D_seg = len(cutD_gen_seg)
#start position is first nt in a codon
for init_pos in range(-1,-len(CDR3_seq)*3-1,-3):
Pi_R[:, init_pos] += Pi_JinsDJ_given_D[D_in][:, init_pos]*self.zeroD_given_D[D_in]
second_pos_dict = {'A': np.zeros(4), 'C': np.zeros(4), 'G': np.zeros(4), 'T': np.zeros(4)}
codon_prefix_dict = {}
for last_nt in 'ACGT':
for second_nt in 'ACGT':
codon_prefix_dict[last_nt + second_nt] = np.zeros(4)
#for first_nt in ['ACGT'[nt] for nt in range(4) if Pi_JinsDJ_given_D[D_in][nt, init_pos] > 0]:
for first_nt in 'ACGT':
if last_nt + second_nt + first_nt in self.codons_dict[CDR3_seq[init_pos/3]]: #possible allowed codon
second_pos_dict[second_nt][nt2num[last_nt]] += Pi_JinsDJ_given_D[D_in][nt2num[first_nt], init_pos] #base weight for middle pos nt
codon_prefix_dict[last_nt + second_nt][0] += Pi_JinsDJ_given_D[D_in][nt2num[first_nt], init_pos] #base weight for last pos nt
for nt1 in 'ACGT':
if np.sum(second_pos_dict[nt1]) == 0:
second_pos_dict.pop(nt1, None)
for nt2 in 'ACGT':
if np.sum(codon_prefix_dict[nt1+nt2])== 0:
codon_prefix_dict.pop(nt1+nt2, None)
# if len(second_pos_dict)> 0:
# print second_pos_dict
# return -1
for delDr in range(num_delr_pos):
if self.min_delDl_given_DdelDr[D_in][delDr] == -1: # P(delDr | D) = 0 for this delDr --> move to next
continue
#Check if first nt from the D segment is okay
if cutD_gen_seg[l_D_seg - delDr - 1] in second_pos_dict.keys():
#The dell pos may be out of range of the PdelDldelDr_given_D -- check!
if l_D_seg - delDr - 1 <= self.max_delDl_given_DdelDr[D_in][delDr]:
Pi_R[:, init_pos - 1] += self.PdelDldelDr_given_D[l_D_seg - delDr - 1, delDr, D_in]*second_pos_dict[cutD_gen_seg[l_D_seg - delDr - 1]]
else:
continue #not okay, reject the alignment
#Check if the second nt from the D segment is okay
if cutD_gen_seg[l_D_seg - delDr - 2:l_D_seg - delDr] in codon_prefix_dict.keys():
#The dell pos may be out of range of the PdelDldelDr_given_D -- check!
if l_D_seg - delDr - 2 <= self.max_delDl_given_DdelDr[D_in][delDr]:
Pi_R[0, init_pos - 2] += self.PdelDldelDr_given_D[l_D_seg - delDr - 2, delDr, D_in]*codon_prefix_dict[cutD_gen_seg[l_D_seg - delDr - 2:l_D_seg - delDr]][0]
base_prob = codon_prefix_dict[cutD_gen_seg[l_D_seg - delDr - 2:l_D_seg - delDr]][0]
else:
continue #no longer aligned, move to next delDr
#Enter main loop
for pos in range(init_pos - 3, max(init_pos - l_D_seg + delDr, min_pos)-1, -1):
#note delDl = D_pos
D_pos = l_D_seg - delDr - 1 - ((init_pos - 1) - pos)
#The dell pos may be out of range of the PdelDldelDr_given_D -- check!
if D_pos > self.max_delDl_given_DdelDr[D_in][delDr]:
current_PdelDldelDr = 0
else:
current_PdelDldelDr = self.PdelDldelDr_given_D[D_pos, delDr, D_in]
#Position is the first nt in codon
if pos%3 == 2:
#check alignment
if cutD_gen_seg[D_pos] in self.sub_codons_right[CDR3_seq[pos/3]]:
Pi_R[:, pos] += current_PdelDldelDr*base_prob*self.PD_nt_pos_vec[D_in][:, D_pos]
else:
break #no longer aligned -- exit loop
#Position is the second nt in codon
elif pos%3 == 1:
#check alignment
if cutD_gen_seg[D_pos:D_pos + 2] in self.sub_codons_right[CDR3_seq[pos/3]]:
Pi_R[:, pos] += current_PdelDldelDr*base_prob*self.PD_2nd_nt_pos_per_aa_vec[D_in][CDR3_seq[pos/3]][ :, D_pos]
else:
break #no longer aligned --- exit loop
#Position is the last nt in codon
else:
#check alignment
if cutD_gen_seg[D_pos:D_pos + 3] in self.codons_dict[CDR3_seq[pos/3]]:
Pi_R[0, pos] += current_PdelDldelDr*base_prob
else:
break #no longer aligned --- exit loop
#start position is second nt in a codon
for init_pos in range(-2,-len(CDR3_seq)*3-1,-3):
Pi_R[:, init_pos] += Pi_JinsDJ_given_D[D_in][:, init_pos]*self.zeroD_given_D[D_in]
allowed_final_nts = ['ACGT'[nt] for nt in range(4) if Pi_JinsDJ_given_D[D_in][nt, init_pos] > 0]
for delDr in range(num_delr_pos):
if self.min_delDl_given_DdelDr[D_in][delDr] == -1: # P(delDr | D) = 0 for this delDr --> move to next
continue
#check first nt of the D region (last in the codon)
if cutD_gen_seg[l_D_seg - delDr - 1] in allowed_final_nts: #first nt match
base_prob = Pi_JinsDJ_given_D[D_in][nt2num[cutD_gen_seg[l_D_seg - delDr - 1]], init_pos]
#The dell pos may be out of range of the PdelDldelDr_given_D -- check!
if l_D_seg - delDr - 1 <= self.max_delDl_given_DdelDr[D_in][delDr]:
Pi_R[0, init_pos-1] += self.PdelDldelDr_given_D[l_D_seg - delDr - 1, delDr, D_in]*base_prob
else:
continue #no alignment
#Enter main loop
for pos in range(init_pos - 2, max(init_pos - l_D_seg + delDr, min_pos)-1, -1):
#note delDl = D_pos
D_pos = l_D_seg - delDr - 1 - ((init_pos - 1) - pos)
#The dell pos may be out of range of the PdelDldelDr_given_D -- check!
if D_pos > self.max_delDl_given_DdelDr[D_in][delDr]:
current_PdelDldelDr = 0
else:
current_PdelDldelDr = self.PdelDldelDr_given_D[D_pos, delDr, D_in]
#Position is the first nt in codon
if pos%3 == 2:
#check alignment
if cutD_gen_seg[D_pos] in self.sub_codons_right[CDR3_seq[pos/3]]:
Pi_R[:, pos] += current_PdelDldelDr*base_prob*self.PD_nt_pos_vec[D_in][:, D_pos]
else:
break #no longer aligned -- exit loop
#Position is the second nt in codon
elif pos%3 == 1:
#check alignment
if cutD_gen_seg[D_pos:D_pos + 2] in self.sub_codons_right[CDR3_seq[pos/3]]:
Pi_R[:, pos] += current_PdelDldelDr*base_prob*self.PD_2nd_nt_pos_per_aa_vec[D_in][CDR3_seq[pos/3]][ :, D_pos]
else:
break #no longer aligned --- exit loop
#Position is the last nt in codon
else:
#check alignment
if cutD_gen_seg[D_pos:D_pos + 3] in self.codons_dict[CDR3_seq[pos/3]]:
Pi_R[0, pos] += current_PdelDldelDr*base_prob
else:
break #no longer aligned --- exit loop
#start position is last nt in a codon
for init_pos in range(-3,-len(CDR3_seq)*3-1,-3):
Pi_R[0, init_pos] += Pi_JinsDJ_given_D[D_in][0, init_pos]*self.zeroD_given_D[D_in]
for delDr in range(num_delr_pos):
if self.min_delDl_given_DdelDr[D_in][delDr] == -1: # P(delDr | D) = 0 for this delDr --> move to next
continue
base_prob = Pi_JinsDJ_given_D[D_in][0, init_pos]
for pos in range(init_pos - 1, max(init_pos - l_D_seg + delDr, min_pos)-1, -1):
#note delDl = D_pos
D_pos = l_D_seg - delDr - 1 - ((init_pos - 1) - pos)
#The dell pos may be out of range of the PdelDldelDr_given_D -- check!
if D_pos > self.max_delDl_given_DdelDr[D_in][delDr]:
current_PdelDldelDr = 0
else:
current_PdelDldelDr = self.PdelDldelDr_given_D[D_pos, delDr, D_in]
#Position is the first nt in codon
if pos%3 == 2:
#check alignment
if cutD_gen_seg[D_pos] in self.sub_codons_right[CDR3_seq[pos/3]]:
Pi_R[:, pos] += current_PdelDldelDr*base_prob*self.PD_nt_pos_vec[D_in][:, D_pos]
else:
break #no longer aligned -- exit loop
#Position is the second nt in codon
elif pos%3 == 1:
#check alignment
if cutD_gen_seg[D_pos:D_pos + 2] in self.sub_codons_right[CDR3_seq[pos/3]]:
Pi_R[:, pos] += current_PdelDldelDr*base_prob*self.PD_2nd_nt_pos_per_aa_vec[D_in][CDR3_seq[pos/3]][ :, D_pos]
else:
break #no longer aligned --- exit loop
#Position is the last nt in codon
else:
#check alignment
if cutD_gen_seg[D_pos:D_pos + 3] in self.codons_dict[CDR3_seq[pos/3]]:
Pi_R[0, pos] += current_PdelDldelDr*base_prob
else:
break #no longer aligned --- exit loop
return Pi_R
|
Compute Pi_R.
This function returns the Pi array from the model factors of the D and J
genomic contributions, P(D, J)*P(delJ|J)P(delDl, delDr |D) and
the DJ (N2) insertions,
first_nt_bias_insDJ(n_1)PinsDJ(\ell_{DJ})\prod_{i=2}^{\ell_{DJ}}Rdj(n_i|n_{i-1}).
This corresponds to \sum_D {D^{x_2}}_{x_3}{N^{x_3}}_{x_4}J(D)^{x_4}.
For clarity in parsing the algorithm implementation, we include which
instance attributes are used in the method as 'parameters.'
Parameters
----------
CDR3_seq : str
CDR3 sequence composed of 'amino acids' (single character symbols
each corresponding to a collection of codons as given by codons_dict).
Pi_JinsDJ_given_D : list
List of (4, 3L) ndarrays corresponding to {N^{x_3}}_{x_4}J(D)^{x_4}.
self.cutD_genomic_CDR3_segs : list of strings
List of all the D genomic nucleotide sequences with the maximum number
of palindromic insertions appended on both ends.
self.PD_given_J : ndarray
Probability distribution of D conditioned on J, i.e. P(D|J).
self.PD_nt_pos_vec : list of ndarrays
For each D allele, format P(delDl, delDr|D) into the correct form
for a Pi array as if each position were the first in a codon.
self.PD_2nd_nt_pos_per_aa_vec : list of dicts
For each D allele, and each 'amino acid', format P(delDl, delDr|D)
for positions in the middle of a codon into the correct form for a
Pi array as if each position were the middle of a codon
corresponding to the 'amino acid'.
self.min_delDl_given_DdelDr : list of lists
minimum delDl for each delDr, D combination.
self.max_delDl_given_DdelDr : list of lists
maximum delDl for each delDr, D combination.
self.PdelDldelDr_given_D : ndarray
Joint probability distribution of the D deletions given the D allele,
i.e. P(delDl, delDr |D)
self.zeroD_given_D : list of floats
The probability that a given D allele is fully deleted away.
self.codons_dict : dict
Dictionary, keyed by the allowed 'amino acid' symbols with the values
being lists of codons corresponding to the symbol.
self.sub_codons_right : dict
Dictionary of the 1 and 2 nucleotide suffixes (read from 5') for
each codon in an 'amino acid' grouping
Returns
-------
Pi_L : ndarray
(4, 3L) array corresponding to
\sum_D {D^{x_2}}_{x_3}{N^{x_3}}_{x_4}J(D)^{x_4}.
|
entailment
|
def compute_CDR3_pgen(self, CDR3_seq, V_usage_mask, J_usage_mask):
"""Compute Pgen for CDR3 'amino acid' sequence CDR3_seq from VJ model.
Conditioned on the already formatted V genes/alleles indicated in
V_usage_mask and the J genes/alleles in J_usage_mask.
Parameters
----------
CDR3_seq : str
CDR3 sequence composed of 'amino acids' (single character symbols
each corresponding to a collection of codons as given by codons_dict).
V_usage_mask : list
Indices of the V alleles to be considered in the Pgen computation
J_usage_mask : list
Indices of the J alleles to be considered in the Pgen computation
Returns
-------
pgen : float
The generation probability (Pgen) of the sequence
Examples
--------
>>> compute_CDR3_pgen('CAVKIQGAQKLVF', ppp, [72], [56])
4.1818202431143785e-07
>>> compute_CDR3_pgen(nt2codon_rep('TGTGCCTGGAGTGTAGCTCCGGACAGGGGTGGCTACACCTTC'), ppp, [42], [1])
1.3971676613008565e-08
>>> compute_CDR3_pgen('\xbb\xb6\xbe\x80\xbc\xa1\x8a\x96\xa1\xa0\xad\x8e\xbf', ppp, [72], [56])
1.3971676613008565e-08
"""
#Genomic J alignment/matching (contribution from P(delJ | J)), return Pi_J and reduced J_usage_mask
Pi_J, r_J_usage_mask = self.compute_Pi_J(CDR3_seq, J_usage_mask)
#Genomic V alignment/matching conditioned on J gene (contribution from P(V, J, delV)), return Pi_V_given_J
Pi_V_given_J, max_V_align = self.compute_Pi_V_given_J(CDR3_seq, V_usage_mask, r_J_usage_mask)
#Include insertions (R and PinsVJ) to get the total contribution from the left (3') side conditioned on J gene. Return Pi_V_insVJ_given_J
Pi_V_insVJ_given_J = self.compute_Pi_V_insVJ_given_J(CDR3_seq, Pi_V_given_J, max_V_align)
pgen = 0
#zip Pi_V_insVJ_given_J and Pi_J together for each J gene to get total pgen
for j in range(len(r_J_usage_mask)):
for pos in range(len(CDR3_seq)*3 - 1):
pgen += np.dot(Pi_V_insVJ_given_J[j][:, pos], Pi_J[j][:, pos+1])
return pgen
|
Compute Pgen for CDR3 'amino acid' sequence CDR3_seq from VJ model.
Conditioned on the already formatted V genes/alleles indicated in
V_usage_mask and the J genes/alleles in J_usage_mask.
Parameters
----------
CDR3_seq : str
CDR3 sequence composed of 'amino acids' (single character symbols
each corresponding to a collection of codons as given by codons_dict).
V_usage_mask : list
Indices of the V alleles to be considered in the Pgen computation
J_usage_mask : list
Indices of the J alleles to be considered in the Pgen computation
Returns
-------
pgen : float
The generation probability (Pgen) of the sequence
Examples
--------
>>> compute_CDR3_pgen('CAVKIQGAQKLVF', ppp, [72], [56])
4.1818202431143785e-07
>>> compute_CDR3_pgen(nt2codon_rep('TGTGCCTGGAGTGTAGCTCCGGACAGGGGTGGCTACACCTTC'), ppp, [42], [1])
1.3971676613008565e-08
>>> compute_CDR3_pgen('\xbb\xb6\xbe\x80\xbc\xa1\x8a\x96\xa1\xa0\xad\x8e\xbf', ppp, [72], [56])
1.3971676613008565e-08
|
entailment
|
def compute_Pi_V_given_J(self, CDR3_seq, V_usage_mask, J_usage_mask):
"""Compute Pi_V conditioned on J.
This function returns the Pi array from the model factors of the V genomic
contributions, P(V, J)*P(delV|V). This corresponds to V(J)_{x_1}.
For clarity in parsing the algorithm implementation, we include which
instance attributes are used in the method as 'parameters.'
Parameters
----------
CDR3_seq : str
CDR3 sequence composed of 'amino acids' (single character symbols
each corresponding to a collection of codons as given by codons_dict).
V_usage_mask : list
Indices of the V alleles to be considered in the Pgen computation
J_usage_mask : list
Indices of the J alleles to be considered in the Pgen computation
self.cutV_genomic_CDR3_segs : list of strings
List of all the V genomic nucleotide sequences trimmed to begin at the
conserved C residue and with the maximum number of palindromic
insertions appended.
self.PVdelV_nt_pos_vec : list of ndarrays
For each V allele, format P(delV|V) into the correct form for a Pi
array or V(J)_{x_1}. This is only done for the first and last
position in each codon.
self.PVdelV_2nd_nt_pos_per_aa_vec : list of dicts
For each V allele, and each 'amino acid', format P(V)*P(delV|V) for
positions in the middle of a codon into the correct form for a Pi
array or V(J)_{x_1} given the 'amino acid'.
self.PVJ : ndarray
Joint probability distribution of V and J, P(V, J).
Returns
-------
Pi_V_given_J : list
List of (4, 3L) ndarrays corresponding to V(J)_{x_1}.
max_V_align: int
Maximum alignment of the CDR3_seq to any genomic V allele allowed by
V_usage_mask.
"""
#Note, the cutV_genomic_CDR3_segs INCLUDE the palindromic insertions and thus are max_palindrome nts longer than the template.
#furthermore, the genomic sequence should be pruned to start at the conserved C
Pi_V_given_J = [np.zeros((4, len(CDR3_seq)*3)) for i in J_usage_mask] #Holds the aggregate weight for each nt possiblity and position
alignment_lengths = []
for V_in in V_usage_mask:
try:
cutV_gen_seg = self.cutV_genomic_CDR3_segs[V_in]
except IndexError:
print 'Check provided V usage mask. Contains indicies out of allowed range.'
continue
current_alignment_length = self.max_nt_to_aa_alignment_left(CDR3_seq, cutV_gen_seg)
alignment_lengths += [current_alignment_length]
current_Pi_V = np.zeros((4, len(CDR3_seq)*3))
if current_alignment_length > 0:
#For first and last nt in a codon use PVdelV_nt_pos_vec
current_Pi_V[:, :current_alignment_length] = self.PVdelV_nt_pos_vec[V_in][:, :current_alignment_length]
for pos in range(1, current_alignment_length, 3): #for middle nt use PVdelV_2nd_nt_pos_per_aa_vec
current_Pi_V[:, pos] = self.PVdelV_2nd_nt_pos_per_aa_vec[V_in][CDR3_seq[pos/3]][:, pos]
for j, J_in in enumerate(J_usage_mask):
Pi_V_given_J[j][:, :current_alignment_length] += self.PVJ[V_in, J_in]*current_Pi_V[:, :current_alignment_length]
return Pi_V_given_J, max(alignment_lengths)
|
Compute Pi_V conditioned on J.
This function returns the Pi array from the model factors of the V genomic
contributions, P(V, J)*P(delV|V). This corresponds to V(J)_{x_1}.
For clarity in parsing the algorithm implementation, we include which
instance attributes are used in the method as 'parameters.'
Parameters
----------
CDR3_seq : str
CDR3 sequence composed of 'amino acids' (single character symbols
each corresponding to a collection of codons as given by codons_dict).
V_usage_mask : list
Indices of the V alleles to be considered in the Pgen computation
J_usage_mask : list
Indices of the J alleles to be considered in the Pgen computation
self.cutV_genomic_CDR3_segs : list of strings
List of all the V genomic nucleotide sequences trimmed to begin at the
conserved C residue and with the maximum number of palindromic
insertions appended.
self.PVdelV_nt_pos_vec : list of ndarrays
For each V allele, format P(delV|V) into the correct form for a Pi
array or V(J)_{x_1}. This is only done for the first and last
position in each codon.
self.PVdelV_2nd_nt_pos_per_aa_vec : list of dicts
For each V allele, and each 'amino acid', format P(V)*P(delV|V) for
positions in the middle of a codon into the correct form for a Pi
array or V(J)_{x_1} given the 'amino acid'.
self.PVJ : ndarray
Joint probability distribution of V and J, P(V, J).
Returns
-------
Pi_V_given_J : list
List of (4, 3L) ndarrays corresponding to V(J)_{x_1}.
max_V_align: int
Maximum alignment of the CDR3_seq to any genomic V allele allowed by
V_usage_mask.
|
entailment
|
def compute_Pi_V_insVJ_given_J(self, CDR3_seq, Pi_V_given_J, max_V_align):
"""Compute Pi_V_insVJ conditioned on J.
This function returns the Pi array from the model factors of the V genomic
contributions, P(V, J)*P(delV|V), and the VJ (N) insertions,
first_nt_bias_insVJ(m_1)PinsVJ(\ell_{VJ})\prod_{i=2}^{\ell_{VJ}}Rvj(m_i|m_{i-1}).
This corresponds to V(J)_{x_1}{M^{x_1}}_{x_2}.
For clarity in parsing the algorithm implementation, we include which
instance attributes are used in the method as 'parameters.'
Parameters
----------
CDR3_seq : str
CDR3 sequence composed of 'amino acids' (single character symbols
each corresponding to a collection of codons as given by codons_dict).
Pi_V_given_J : ndarray
List of (4, 3L) ndarrays corresponding to V(J)_{x_1}.
max_V_align : int
Maximum alignment of the CDR3_seq to any genomic V allele allowed by
V_usage_mask.
self.PinsVJ : ndarray
Probability distribution of the VJ insertion sequence length
self.first_nt_bias_insVJ : ndarray
(4,) array of the probability distribution of the indentity of the
first nucleotide insertion for the VJ junction.
self.zero_nt_bias_insVJ : ndarray
(4,) array of the probability distribution of the indentity of the
the nucleotide BEFORE the VJ insertion.
zero_nt_bias_insVJ = Rvj^{-1}first_nt_bias_insVJ
self.Tvj : dict
Dictionary of full codon transfer matrices ((4, 4) ndarrays) by
'amino acid'.
self.Svj : dict
Dictionary of transfer matrices ((4, 4) ndarrays) by 'amino acid' for
the VD insertion ending in the first position.
self.Dvj : dict
Dictionary of transfer matrices ((4, 4) ndarrays) by 'amino acid' for
the VD insertion ending in the second position.
self.lTvj : dict
Dictionary of transfer matrices ((4, 4) ndarrays) by 'amino acid' for
the VD insertion starting in the first position.
self.lDvj : dict
Dictionary of transfer matrices ((4, 4) ndarrays) by 'amino acid' for
VD insertion starting in the first position and ending in the second
position of the same codon.
Returns
-------
Pi_V_insVJ_given_J : list
List of (4, 3L) ndarrays corresponding to V(J)_{x_1}{M^{x_1}}_{x_2}.
"""
#max_insertions = 30 #len(PinsVJ) - 1 should zeropad the last few spots
max_insertions = len(self.PinsVJ) - 1
Pi_V_insVJ_given_J = [np.zeros((4, len(CDR3_seq)*3)) for i in range(len(Pi_V_given_J))]
for j in range(len(Pi_V_given_J)):
#start position is first nt in a codon
for init_pos in range(0, max_V_align, 3):
#Zero insertions
Pi_V_insVJ_given_J[j][:, init_pos] += self.PinsVJ[0]*Pi_V_given_J[j][:, init_pos]
#One insertion
Pi_V_insVJ_given_J[j][:, init_pos+1] += self.PinsVJ[1]*np.dot(self.lDvj[CDR3_seq[init_pos/3]], Pi_V_given_J[j][:, init_pos])
#Two insertions and compute the base nt vec for the standard loop
current_base_nt_vec = np.dot(self.lTvj[CDR3_seq[init_pos/3]], Pi_V_given_J[j][:, init_pos])
Pi_V_insVJ_given_J[j][0, init_pos+2] += self.PinsVJ[2]*np.sum(current_base_nt_vec)
base_ins = 2
#Loop over all other insertions using base_nt_vec
for aa in CDR3_seq[init_pos/3 + 1: init_pos/3 + max_insertions/3]:
Pi_V_insVJ_given_J[j][:, init_pos+base_ins+1] += self.PinsVJ[base_ins + 1]*np.dot(self.Svj[aa], current_base_nt_vec)
Pi_V_insVJ_given_J[j][:, init_pos+base_ins+2] += self.PinsVJ[base_ins + 2]*np.dot(self.Dvj[aa], current_base_nt_vec)
current_base_nt_vec = np.dot(self.Tvj[aa], current_base_nt_vec)
Pi_V_insVJ_given_J[j][0, init_pos+base_ins+3] += self.PinsVJ[base_ins + 3]*np.sum(current_base_nt_vec)
base_ins +=3
#start position is second nt in a codon
for init_pos in range(1, max_V_align, 3):
#Zero insertions
Pi_V_insVJ_given_J[j][:, init_pos] += self.PinsVJ[0]*Pi_V_given_J[j][:, init_pos]
#One insertion --- we first compute our p vec by pairwise mult with the ss distr
current_base_nt_vec = np.multiply(Pi_V_given_J[j][:, init_pos], self.first_nt_bias_insVJ)
Pi_V_insVJ_given_J[j][0, init_pos+1] += self.PinsVJ[1]*np.sum(current_base_nt_vec)
base_ins = 1
#Loop over all other insertions using base_nt_vec
for aa in CDR3_seq[init_pos/3 + 1: init_pos/3 + max_insertions/3]:
Pi_V_insVJ_given_J[j][:, init_pos+base_ins+1] += self.PinsVJ[base_ins + 1]*np.dot(self.Svj[aa], current_base_nt_vec)
Pi_V_insVJ_given_J[j][:, init_pos+base_ins+2] += self.PinsVJ[base_ins + 2]*np.dot(self.Dvj[aa], current_base_nt_vec)
current_base_nt_vec = np.dot(self.Tvj[aa], current_base_nt_vec)
Pi_V_insVJ_given_J[j][0, init_pos+base_ins+3] += self.PinsVJ[base_ins + 3]*np.sum(current_base_nt_vec)
base_ins +=3
#start position is last nt in a codon
for init_pos in range(2, max_V_align, 3):
#Zero insertions
Pi_V_insVJ_given_J[j][0, init_pos] += self.PinsVJ[0]*Pi_V_given_J[j][0, init_pos]
#current_base_nt_vec = first_nt_bias_insVJ*Pi_V_given_J[j][0, init_pos] #Okay for steady state
current_base_nt_vec = self.zero_nt_bias_insVJ*Pi_V_given_J[j][0, init_pos]
base_ins = 0
#Loop over all other insertions using base_nt_vec
for aa in CDR3_seq[init_pos/3 + 1: init_pos/3 + max_insertions/3]:
Pi_V_insVJ_given_J[j][:, init_pos+base_ins+1] += self.PinsVJ[base_ins + 1]*np.dot(self.Svj[aa], current_base_nt_vec)
Pi_V_insVJ_given_J[j][:, init_pos+base_ins+2] += self.PinsVJ[base_ins + 2]*np.dot(self.Dvj[aa], current_base_nt_vec)
current_base_nt_vec = np.dot(self.Tvj[aa], current_base_nt_vec)
Pi_V_insVJ_given_J[j][0, init_pos+base_ins+3] += self.PinsVJ[base_ins + 3]*np.sum(current_base_nt_vec)
base_ins +=3
return Pi_V_insVJ_given_J
|
Compute Pi_V_insVJ conditioned on J.
This function returns the Pi array from the model factors of the V genomic
contributions, P(V, J)*P(delV|V), and the VJ (N) insertions,
first_nt_bias_insVJ(m_1)PinsVJ(\ell_{VJ})\prod_{i=2}^{\ell_{VJ}}Rvj(m_i|m_{i-1}).
This corresponds to V(J)_{x_1}{M^{x_1}}_{x_2}.
For clarity in parsing the algorithm implementation, we include which
instance attributes are used in the method as 'parameters.'
Parameters
----------
CDR3_seq : str
CDR3 sequence composed of 'amino acids' (single character symbols
each corresponding to a collection of codons as given by codons_dict).
Pi_V_given_J : ndarray
List of (4, 3L) ndarrays corresponding to V(J)_{x_1}.
max_V_align : int
Maximum alignment of the CDR3_seq to any genomic V allele allowed by
V_usage_mask.
self.PinsVJ : ndarray
Probability distribution of the VJ insertion sequence length
self.first_nt_bias_insVJ : ndarray
(4,) array of the probability distribution of the indentity of the
first nucleotide insertion for the VJ junction.
self.zero_nt_bias_insVJ : ndarray
(4,) array of the probability distribution of the indentity of the
the nucleotide BEFORE the VJ insertion.
zero_nt_bias_insVJ = Rvj^{-1}first_nt_bias_insVJ
self.Tvj : dict
Dictionary of full codon transfer matrices ((4, 4) ndarrays) by
'amino acid'.
self.Svj : dict
Dictionary of transfer matrices ((4, 4) ndarrays) by 'amino acid' for
the VD insertion ending in the first position.
self.Dvj : dict
Dictionary of transfer matrices ((4, 4) ndarrays) by 'amino acid' for
the VD insertion ending in the second position.
self.lTvj : dict
Dictionary of transfer matrices ((4, 4) ndarrays) by 'amino acid' for
the VD insertion starting in the first position.
self.lDvj : dict
Dictionary of transfer matrices ((4, 4) ndarrays) by 'amino acid' for
VD insertion starting in the first position and ending in the second
position of the same codon.
Returns
-------
Pi_V_insVJ_given_J : list
List of (4, 3L) ndarrays corresponding to V(J)_{x_1}{M^{x_1}}_{x_2}.
|
entailment
|
def compute_Pi_J(self, CDR3_seq, J_usage_mask):
"""Compute Pi_J.
This function returns the Pi array from the model factors of the J genomic
contributions, P(delJ|J). This corresponds to J(D)^{x_4}.
For clarity in parsing the algorithm implementation, we include which
instance attributes are used in the method as 'parameters.'
Parameters
----------
CDR3_seq : str
CDR3 sequence composed of 'amino acids' (single character symbols
each corresponding to a collection of codons as given by codons_dict).
J_usage_mask : list
Indices of the J alleles to be considered in the Pgen computation
self.cutJ_genomic_CDR3_segs : list of strings
List of all the J genomic nucleotide sequences trimmed to begin at the
conserved 3' residue (F/W) and with the maximum number of palindromic
insertions appended.
self.PJdelJ_nt_pos_vec : list of ndarrays
For each J allele, format P(delJ|J) into the correct form for a Pi
array or J^{x_2}. This is only done for the first and last position
in each codon.
self.PJdelJ_2nd_nt_pos_per_aa_vec : list of dicts
For each J allele, and each 'amino acid', format P(delJ|J) for
positions in the middle of a codon into the correct form for a Pi
array or J^{x_2} given the 'amino acid'.
Returns
-------
Pi_J : ndarray
(4, 3L) array corresponding to J^{x_4}.
r_J_usage_mask: list
Reduced J_usage mask. J genes/alleles with no contribution (bad
alignment) are removed from the mask. This is done to speed up the
computation on the V side (which must be done conditioned on the J).
"""
#Note, the cutJ_genomic_CDR3_segs INCLUDE the palindromic insertions and thus are max_palindrome nts longer than the template.
#furthermore, the genomic sequence should be pruned to start at a conserved region on the J side
Pi_J = [] #Holds the aggregate weight for each nt possiblity and position
r_J_usage_mask = []
for j, J_in in enumerate(J_usage_mask):
try:
cutJ_gen_seg = self.cutJ_genomic_CDR3_segs[J_in]
except IndexError:
print 'Check provided J usage mask. Contains indicies out of allowed range.'
continue
current_alignment_length = self.max_nt_to_aa_alignment_right(CDR3_seq, cutJ_gen_seg)
#alignment_lengths += [current_alignment_length]
current_Pi_J = np.zeros((4, len(CDR3_seq)*3))
if current_alignment_length > 0:
#For first and last nt in a codon use PJdelJ_nt_pos_vec
current_Pi_J[:, -current_alignment_length:] = self.PJdelJ_nt_pos_vec[J_in][:, -current_alignment_length:]
for pos in range(-2, -current_alignment_length-1, -3): #for middle nt use PJdelJ_2nd_nt_pos_per_aa_vec
current_Pi_J[:, pos] = self.PJdelJ_2nd_nt_pos_per_aa_vec[J_in][CDR3_seq[pos/3]][:, pos]
if np.sum(current_Pi_J) > 0:
Pi_J.append(current_Pi_J)
r_J_usage_mask.append(J_in)
return Pi_J, r_J_usage_mask
|
Compute Pi_J.
This function returns the Pi array from the model factors of the J genomic
contributions, P(delJ|J). This corresponds to J(D)^{x_4}.
For clarity in parsing the algorithm implementation, we include which
instance attributes are used in the method as 'parameters.'
Parameters
----------
CDR3_seq : str
CDR3 sequence composed of 'amino acids' (single character symbols
each corresponding to a collection of codons as given by codons_dict).
J_usage_mask : list
Indices of the J alleles to be considered in the Pgen computation
self.cutJ_genomic_CDR3_segs : list of strings
List of all the J genomic nucleotide sequences trimmed to begin at the
conserved 3' residue (F/W) and with the maximum number of palindromic
insertions appended.
self.PJdelJ_nt_pos_vec : list of ndarrays
For each J allele, format P(delJ|J) into the correct form for a Pi
array or J^{x_2}. This is only done for the first and last position
in each codon.
self.PJdelJ_2nd_nt_pos_per_aa_vec : list of dicts
For each J allele, and each 'amino acid', format P(delJ|J) for
positions in the middle of a codon into the correct form for a Pi
array or J^{x_2} given the 'amino acid'.
Returns
-------
Pi_J : ndarray
(4, 3L) array corresponding to J^{x_4}.
r_J_usage_mask: list
Reduced J_usage mask. J genes/alleles with no contribution (bad
alignment) are removed from the mask. This is done to speed up the
computation on the V side (which must be done conditioned on the J).
|
entailment
|
def solve(self, lam):
'''Solves the GFL for a fixed value of lambda.'''
s = weighted_graphtf(self.nnodes, self.y, self.weights, lam,
self.Dk.shape[0], self.Dk.shape[1], self.Dk.nnz,
self.Dk.row.astype('int32'), self.Dk.col.astype('int32'), self.Dk.data.astype('double'),
self.maxsteps, self.converge,
self.beta, self.u)
self.steps.append(s)
return self.beta
|
Solves the GFL for a fixed value of lambda.
|
entailment
|
def solution_path(self, min_lambda, max_lambda, lambda_bins, verbose=0):
'''Follows the solution path to find the best lambda value.'''
self.u = np.zeros(self.Dk.shape[0], dtype='double')
lambda_grid = np.exp(np.linspace(np.log(max_lambda), np.log(min_lambda), lambda_bins))
aic_trace = np.zeros(lambda_grid.shape) # The AIC score for each lambda value
aicc_trace = np.zeros(lambda_grid.shape) # The AICc score for each lambda value (correcting for finite sample size)
bic_trace = np.zeros(lambda_grid.shape) # The BIC score for each lambda value
dof_trace = np.zeros(lambda_grid.shape) # The degrees of freedom of each final solution
log_likelihood_trace = np.zeros(lambda_grid.shape)
beta_trace = []
best_idx = None
best_plateaus = None
if self.edges is None:
self.edges = defaultdict(list)
elist = csr_matrix(self.D).indices.reshape((self.D.shape[0], 2))
for n1, n2 in elist:
self.edges[n1].append(n2)
self.edges[n2].append(n1)
# Solve the series of lambda values with warm starts at each point
for i, lam in enumerate(lambda_grid):
if verbose:
print('#{0} Lambda = {1}'.format(i, lam))
# Fit to the final values
beta = self.solve(lam)
if verbose:
print('Calculating degrees of freedom')
# Count the number of free parameters in the grid (dof) -- TODO: the graph trend filtering paper seems to imply we shouldn't multiply by (k+1)?
dof_vals = self.Dk_minus_one.dot(beta) if self.k > 0 else beta
plateaus = calc_plateaus(dof_vals, self.edges, rel_tol=0.01) if (self.k % 2) == 0 else nearly_unique(dof_vals, rel_tol=0.03)
dof_trace[i] = max(1,len(plateaus)) #* (k+1)
if verbose:
print('Calculating Information Criteria')
# Get the negative log-likelihood
log_likelihood_trace[i] = -0.5 * ((self.y - beta)**2).sum()
# Calculate AIC = 2k - 2ln(L)
aic_trace[i] = 2. * dof_trace[i] - 2. * log_likelihood_trace[i]
# Calculate AICc = AIC + 2k * (k+1) / (n - k - 1)
aicc_trace[i] = aic_trace[i] + 2 * dof_trace[i] * (dof_trace[i]+1) / (len(beta) - dof_trace[i] - 1.)
# Calculate BIC = -2ln(L) + k * (ln(n) - ln(2pi))
bic_trace[i] = -2 * log_likelihood_trace[i] + dof_trace[i] * (np.log(len(beta)) - np.log(2 * np.pi))
# Track the best model thus far
if best_idx is None or bic_trace[i] < bic_trace[best_idx]:
best_idx = i
best_plateaus = plateaus
# Save the trace of all the resulting parameters
beta_trace.append(np.array(beta))
if verbose:
print('DoF: {0} AIC: {1} AICc: {2} BIC: {3}\n'.format(dof_trace[i], aic_trace[i], aicc_trace[i], bic_trace[i]))
if verbose:
print('Best setting (by BIC): lambda={0} [DoF: {1}, AIC: {2}, AICc: {3} BIC: {4}]'.format(lambda_grid[best_idx], dof_trace[best_idx], aic_trace[best_idx], aicc_trace[best_idx], bic_trace[best_idx]))
return {'aic': aic_trace,
'aicc': aicc_trace,
'bic': bic_trace,
'dof': dof_trace,
'loglikelihood': log_likelihood_trace,
'beta': np.array(beta_trace),
'lambda': lambda_grid,
'best_idx': best_idx,
'best': beta_trace[best_idx],
'plateaus': best_plateaus}
|
Follows the solution path to find the best lambda value.
|
entailment
|
def solve(self, lam):
'''Solves the GFL for a fixed value of lambda.'''
s = weighted_graphtf_logit(self.nnodes, self.trials, self.successes, lam,
self.Dk.shape[0], self.Dk.shape[1], self.Dk.nnz,
self.Dk.row.astype('int32'), self.Dk.col.astype('int32'), self.Dk.data.astype('double'),
self.maxsteps, self.converge,
self.beta, self.u)
self.steps.append(s)
return self.beta
|
Solves the GFL for a fixed value of lambda.
|
entailment
|
def solve(self, lam):
'''Solves the GFL for a fixed value of lambda.'''
s = weighted_graphtf_poisson(self.nnodes, self.obs, lam,
self.Dk.shape[0], self.Dk.shape[1], self.Dk.nnz,
self.Dk.row.astype('int32'), self.Dk.col.astype('int32'), self.Dk.data.astype('double'),
self.maxsteps, self.converge,
self.beta, self.u)
self.steps.append(s)
return self.beta
|
Solves the GFL for a fixed value of lambda.
|
entailment
|
def make_V_and_J_mask_mapping(self, genV, genJ):
"""Constructs the V and J mask mapping dictionaries.
Parameters
----------
genV : list
List of genomic V information.
genJ : list
List of genomic J information.
"""
#construct mapping between allele/gene names and index for custom V_usage_masks
V_allele_names = [V[0] for V in genV]
V_mask_mapping = {}
for v in set([x.split('*')[0] for x in V_allele_names]):
V_mask_mapping[v] = []
for v in set(['V'.join((x.split('*')[0]).split('V')[1:]) for x in V_allele_names]):
V_mask_mapping[v] = []
for i, v in enumerate(V_allele_names):
V_mask_mapping[v] = [i]
V_mask_mapping['V'.join((v.split('*')[0]).split('V')[1:])].append(i)
V_mask_mapping[v.split('*')[0]].append(i)
#construct mapping between allele/gene names and index for custom J_usage_masks
J_allele_names = [J[0] for J in genJ]
J_mask_mapping = {}
for j in set([x.split('*')[0] for x in J_allele_names]):
J_mask_mapping[j] = []
for j in set(['J'.join((x.split('*')[0]).split('J')[1:]) for x in J_allele_names]):
J_mask_mapping[j] = []
for i, j in enumerate(J_allele_names):
J_mask_mapping[j] = [i]
J_mask_mapping['J'.join((j.split('*')[0]).split('J')[1:])].append(i)
J_mask_mapping[j.split('*')[0]].append(i)
self.V_allele_names = V_allele_names
self.V_mask_mapping = V_mask_mapping
self.J_allele_names = J_allele_names
self.J_mask_mapping = J_mask_mapping
|
Constructs the V and J mask mapping dictionaries.
Parameters
----------
genV : list
List of genomic V information.
genJ : list
List of genomic J information.
|
entailment
|
def preprocess_D_segs(self, generative_model, genomic_data):
"""Process P(delDl, delDr|D) into Pi arrays.
Sets the attributes PD_nt_pos_vec, PD_2nd_nt_pos_per_aa_vec,
min_delDl_given_DdelDr, max_delDl_given_DdelDr, and zeroD_given_D.
Parameters
----------
generative_model : GenerativeModelVDJ
VDJ generative model class containing the model parameters.
genomic_data : GenomicDataVDJ
VDJ genomic data class containing the V, D, and J germline
sequences and info.
"""
cutD_genomic_CDR3_segs = genomic_data.cutD_genomic_CDR3_segs
nt2num = {'A': 0, 'C': 1, 'G': 2, 'T': 3}
num_dell_pos, num_delr_pos, num_D_genes = generative_model.PdelDldelDr_given_D.shape
#These arrays only include the nt identity information, not the PdelDldelDr_given_D info
PD_nt_pos_vec = [[]]*num_D_genes
PD_2nd_nt_pos_per_aa_vec = [[]]*num_D_genes
for D_in in range(num_D_genes):
current_PD_nt_pos_vec = np.zeros((4, len(cutD_genomic_CDR3_segs[D_in])))
current_PD_2nd_nt_pos_per_aa_vec = {}
for aa in self.codons_dict.keys():
current_PD_2nd_nt_pos_per_aa_vec[aa] = np.zeros((4, len(cutD_genomic_CDR3_segs[D_in])))
for pos, nt in enumerate(cutD_genomic_CDR3_segs[D_in]):
current_PD_nt_pos_vec[nt2num[nt], pos] = 1
for ins_nt in 'ACGT':
for aa in self.codons_dict.keys():
if ins_nt + cutD_genomic_CDR3_segs[D_in][pos:pos+2] in self.codons_dict[aa]:
current_PD_2nd_nt_pos_per_aa_vec[aa][nt2num[ins_nt], pos] = 1
PD_nt_pos_vec[D_in] = current_PD_nt_pos_vec
PD_2nd_nt_pos_per_aa_vec[D_in] = current_PD_2nd_nt_pos_per_aa_vec
min_delDl_given_DdelDr = [[]]*num_D_genes
max_delDl_given_DdelDr = [[]]*num_D_genes
zeroD_given_D = [[]]*num_D_genes
for D_in in range(num_D_genes):
current_min_delDl_given_delDr = [0]*num_delr_pos
current_max_delDl_given_delDr = [0]*num_delr_pos
current_zeroD = 0
for delr in range(num_delr_pos):
if num_dell_pos > len(cutD_genomic_CDR3_segs[D_in])-delr:
current_zeroD += generative_model.PdelDldelDr_given_D[len(cutD_genomic_CDR3_segs[D_in])-delr, delr, D_in]
dell = 0
while generative_model.PdelDldelDr_given_D[dell, delr, D_in]==0 and dell<num_dell_pos-1:
dell+=1
if generative_model.PdelDldelDr_given_D[dell, delr, D_in] == 0:
current_min_delDl_given_delDr[delr] = -1
else:
current_min_delDl_given_delDr[delr] = dell
if current_min_delDl_given_delDr[delr] == -1:
current_max_delDl_given_delDr[delr] = -1
else:
dell = num_dell_pos-1
while generative_model.PdelDldelDr_given_D[dell, delr, D_in]==0 and dell>=0:
dell -= 1
if generative_model.PdelDldelDr_given_D[dell, delr, D_in] == 0:
current_max_delDl_given_delDr[delr] = -1
else:
current_max_delDl_given_delDr[delr] = dell
min_delDl_given_DdelDr[D_in] = current_min_delDl_given_delDr
max_delDl_given_DdelDr[D_in] = current_max_delDl_given_delDr
zeroD_given_D[D_in] = current_zeroD
self.PD_nt_pos_vec = PD_nt_pos_vec
self.PD_2nd_nt_pos_per_aa_vec = PD_2nd_nt_pos_per_aa_vec
self.min_delDl_given_DdelDr = min_delDl_given_DdelDr
self.max_delDl_given_DdelDr = max_delDl_given_DdelDr
self.zeroD_given_D = zeroD_given_D
|
Process P(delDl, delDr|D) into Pi arrays.
Sets the attributes PD_nt_pos_vec, PD_2nd_nt_pos_per_aa_vec,
min_delDl_given_DdelDr, max_delDl_given_DdelDr, and zeroD_given_D.
Parameters
----------
generative_model : GenerativeModelVDJ
VDJ generative model class containing the model parameters.
genomic_data : GenomicDataVDJ
VDJ genomic data class containing the V, D, and J germline
sequences and info.
|
entailment
|
def generate_PJdelJ_nt_pos_vecs(self, generative_model, genomic_data):
"""Process P(J)*P(delJ|J) into Pi arrays.
Sets the attributes PJdelJ_nt_pos_vec and PJdelJ_2nd_nt_pos_per_aa_vec.
Parameters
----------
generative_model : GenerativeModelVDJ
VDJ generative model class containing the model parameters.
genomic_data : GenomicDataVDJ
VDJ genomic data class containing the V, D, and J germline
sequences and info.
"""
cutJ_genomic_CDR3_segs = genomic_data.cutJ_genomic_CDR3_segs
nt2num = {'A': 0, 'C': 1, 'G': 2, 'T': 3}
num_del_pos = generative_model.PdelJ_given_J.shape[0]
num_D_genes, num_J_genes = generative_model.PDJ.shape
PJ = np.sum(generative_model.PDJ, axis = 0)
PJdelJ_nt_pos_vec = [[]]*num_J_genes
PJdelJ_2nd_nt_pos_per_aa_vec = [[]]*num_J_genes
for J_in, pj in enumerate(PJ):
#We include the marginal PJ here
current_PJdelJ_nt_pos_vec = np.zeros((4, len(cutJ_genomic_CDR3_segs[J_in])))
current_PJdelJ_2nd_nt_pos_per_aa_vec = {}
for aa in self.codons_dict.keys():
current_PJdelJ_2nd_nt_pos_per_aa_vec[aa] = np.zeros((4, len(cutJ_genomic_CDR3_segs[J_in])))
for pos, nt in enumerate(cutJ_genomic_CDR3_segs[J_in]):
if pos >= num_del_pos:
continue
if (len(cutJ_genomic_CDR3_segs[J_in]) - pos)%3 == 1: #Start of a codon
current_PJdelJ_nt_pos_vec[nt2num[nt], pos] = pj*generative_model.PdelJ_given_J[pos, J_in]
elif (len(cutJ_genomic_CDR3_segs[J_in]) - pos)%3 == 2: #Mid codon position
for ins_nt in 'ACGT':
#We need to find what possible codons are allowed for any aa (or motif)
for aa in self.codons_dict.keys():
if ins_nt + cutJ_genomic_CDR3_segs[J_in][pos:pos+2] in self.codons_dict[aa]:
current_PJdelJ_2nd_nt_pos_per_aa_vec[aa][nt2num[ins_nt], pos] = pj*generative_model.PdelJ_given_J[pos, J_in]
elif (len(cutJ_genomic_CDR3_segs[J_in]) - pos)%3 == 0: #End of codon
current_PJdelJ_nt_pos_vec[0, pos] = pj*generative_model.PdelJ_given_J[pos, J_in]
PJdelJ_nt_pos_vec[J_in] = current_PJdelJ_nt_pos_vec
PJdelJ_2nd_nt_pos_per_aa_vec[J_in] = current_PJdelJ_2nd_nt_pos_per_aa_vec
self.PJdelJ_nt_pos_vec = PJdelJ_nt_pos_vec
self.PJdelJ_2nd_nt_pos_per_aa_vec = PJdelJ_2nd_nt_pos_per_aa_vec
|
Process P(J)*P(delJ|J) into Pi arrays.
Sets the attributes PJdelJ_nt_pos_vec and PJdelJ_2nd_nt_pos_per_aa_vec.
Parameters
----------
generative_model : GenerativeModelVDJ
VDJ generative model class containing the model parameters.
genomic_data : GenomicDataVDJ
VDJ genomic data class containing the V, D, and J germline
sequences and info.
|
entailment
|
def generate_VD_junction_transfer_matrices(self):
"""Compute the transfer matrices for the VD junction.
Sets the attributes Tvd, Svd, Dvd, lTvd, and lDvd.
"""
nt2num = {'A': 0, 'C': 1, 'G': 2, 'T': 3}
#Compute Tvd
Tvd = {}
for aa in self.codons_dict.keys():
current_Tvd = np.zeros((4, 4))
for init_nt in 'ACGT':
for codon in self.codons_dict[aa]:
current_Tvd[nt2num[codon[2]], nt2num[init_nt]] += self.Rvd[nt2num[codon[2]],nt2num[codon[1]]]*self.Rvd[nt2num[codon[1]],nt2num[codon[0]]] * self.Rvd[nt2num[codon[0]],nt2num[init_nt]]
Tvd[aa] = current_Tvd
#Compute Svd
Svd = {}
for aa in self.codons_dict.keys():
current_Svd = np.zeros((4, 4))
for ins_nt in 'ACGT':
if any([codon.startswith(ins_nt) for codon in self.codons_dict[aa]]):
current_Svd[nt2num[ins_nt], :] = self.Rvd[nt2num[ins_nt], :]
Svd[aa] = current_Svd
#Compute Dvd
Dvd = {}
for aa in self.codons_dict.keys():
current_Dvd = np.zeros((4, 4))
for init_nt in 'ACGT':
for codon in self.codons_dict[aa]:
current_Dvd[nt2num[codon[2]], nt2num[init_nt]] += self.Rvd[nt2num[codon[1]],nt2num[codon[0]]] * self.Rvd[nt2num[codon[0]],nt2num[init_nt]]
Dvd[aa] = current_Dvd
#Compute lTvd
lTvd = {}
for aa in self.codons_dict.keys():
current_lTvd = np.zeros((4, 4))
for codon in self.codons_dict[aa]:
current_lTvd[nt2num[codon[2]], nt2num[codon[0]]] += self.Rvd[nt2num[codon[2]],nt2num[codon[1]]]*self.first_nt_bias_insVD[nt2num[codon[1]]]
lTvd[aa] = current_lTvd
#Compute lDvd
lDvd = {}
for aa in self.codons_dict.keys():
current_lDvd = np.zeros((4, 4))
for codon in self.codons_dict[aa]:
current_lDvd[nt2num[codon[2]], nt2num[codon[0]]] += self.first_nt_bias_insVD[nt2num[codon[1]]]
lDvd[aa] = current_lDvd
#Set the attributes
self.Tvd = Tvd
self.Svd = Svd
self.Dvd = Dvd
self.lTvd = lTvd
self.lDvd = lDvd
|
Compute the transfer matrices for the VD junction.
Sets the attributes Tvd, Svd, Dvd, lTvd, and lDvd.
|
entailment
|
def generate_DJ_junction_transfer_matrices(self):
"""Compute the transfer matrices for the VD junction.
Sets the attributes Tdj, Sdj, Ddj, rTdj, and rDdj.
"""
nt2num = {'A': 0, 'C': 1, 'G': 2, 'T': 3}
#Compute Tdj
Tdj = {}
for aa in self.codons_dict.keys():
current_Tdj = np.zeros((4, 4))
for init_nt in 'ACGT':
for codon in self.codons_dict[aa]:
current_Tdj[nt2num[codon[0]], nt2num[init_nt]] += self.Rdj[nt2num[codon[0]],nt2num[codon[1]]]*self.Rdj[nt2num[codon[1]],nt2num[codon[2]]] * self.Rdj[nt2num[codon[2]],nt2num[init_nt]]
Tdj[aa] = current_Tdj
#Compute Sdj
Sdj = {}
for aa in self.codons_dict.keys():
current_Sdj = np.zeros((4, 4))
for ins_nt in 'ACGT':
if any([codon.endswith(ins_nt) for codon in self.codons_dict[aa]]):
current_Sdj[nt2num[ins_nt], :] = self.Rdj[nt2num[ins_nt], :]
Sdj[aa] = current_Sdj
#Compute Ddj
Ddj = {}
for aa in self.codons_dict.keys():
current_Ddj = np.zeros((4, 4))
for init_nt in 'ACGT':
for codon in self.codons_dict[aa]:
current_Ddj[nt2num[codon[0]], nt2num[init_nt]] += self.Rdj[nt2num[codon[1]],nt2num[codon[2]]] * self.Rdj[nt2num[codon[2]],nt2num[init_nt]]
Ddj[aa] = current_Ddj
#Compute rTdj
rTdj = {}
for aa in self.codons_dict.keys():
current_lTdj = np.zeros((4, 4))
for codon in self.codons_dict[aa]:
current_lTdj[nt2num[codon[0]], nt2num[codon[2]]] += self.Rdj[nt2num[codon[0]],nt2num[codon[1]]]*self.first_nt_bias_insDJ[nt2num[codon[1]]]
rTdj[aa] = current_lTdj
#Compute rDdj
rDdj = {}
for aa in self.codons_dict.keys():
current_rDdj = np.zeros((4, 4))
for codon in self.codons_dict[aa]:
current_rDdj[nt2num[codon[0]], nt2num[codon[2]]] += self.first_nt_bias_insDJ[nt2num[codon[1]]]
rDdj[aa] = current_rDdj
#Set the attributes
self.Tdj = Tdj
self.Sdj = Sdj
self.Ddj = Ddj
self.rTdj = rTdj
self.rDdj = rDdj
|
Compute the transfer matrices for the VD junction.
Sets the attributes Tdj, Sdj, Ddj, rTdj, and rDdj.
|
entailment
|
def generate_PVdelV_nt_pos_vecs(self, generative_model, genomic_data):
"""Process P(delV|V) into Pi arrays.
Set the attributes PVdelV_nt_pos_vec and PVdelV_2nd_nt_pos_per_aa_vec.
Parameters
----------
generative_model : GenerativeModelVJ
VJ generative model class containing the model parameters.
genomic_data : GenomicDataVJ
VJ genomic data class containing the V and J germline
sequences and info.
"""
cutV_genomic_CDR3_segs = genomic_data.cutV_genomic_CDR3_segs
nt2num = {'A': 0, 'C': 1, 'G': 2, 'T': 3}
num_del_pos = generative_model.PdelV_given_V.shape[0]
num_V_genes = generative_model.PdelV_given_V.shape[1]
PVdelV_nt_pos_vec = [[]]*num_V_genes
PVdelV_2nd_nt_pos_per_aa_vec = [[]]*num_V_genes
for V_in in range(num_V_genes):
current_PVdelV_nt_pos_vec = np.zeros((4, len(cutV_genomic_CDR3_segs[V_in])))
current_PVdelV_2nd_nt_pos_per_aa_vec = {}
for aa in self.codons_dict.keys():
current_PVdelV_2nd_nt_pos_per_aa_vec[aa] = np.zeros((4, len(cutV_genomic_CDR3_segs[V_in])))
for pos, nt in enumerate(cutV_genomic_CDR3_segs[V_in]):
if len(cutV_genomic_CDR3_segs[V_in]) - pos > num_del_pos:
continue
if pos%3 == 0: #Start of a codon
current_PVdelV_nt_pos_vec[nt2num[nt], pos] = generative_model.PdelV_given_V[len(cutV_genomic_CDR3_segs[V_in])-pos-1, V_in]
elif pos%3 == 1: #Mid codon position
for ins_nt in 'ACGT':
#We need to find what possible codons are allowed for any aa (or motif)
for aa in self.codons_dict.keys():
if cutV_genomic_CDR3_segs[V_in][pos-1:pos+1]+ ins_nt in self.codons_dict[aa]:
current_PVdelV_2nd_nt_pos_per_aa_vec[aa][nt2num[ins_nt], pos] = generative_model.PdelV_given_V[len(cutV_genomic_CDR3_segs[V_in])-pos-1, V_in]
elif pos%3 == 2: #End of codon
current_PVdelV_nt_pos_vec[0, pos] = generative_model.PdelV_given_V[len(cutV_genomic_CDR3_segs[V_in])-pos-1, V_in]
PVdelV_nt_pos_vec[V_in] = current_PVdelV_nt_pos_vec
PVdelV_2nd_nt_pos_per_aa_vec[V_in] = current_PVdelV_2nd_nt_pos_per_aa_vec
self.PVdelV_nt_pos_vec = PVdelV_nt_pos_vec
self.PVdelV_2nd_nt_pos_per_aa_vec = PVdelV_2nd_nt_pos_per_aa_vec
|
Process P(delV|V) into Pi arrays.
Set the attributes PVdelV_nt_pos_vec and PVdelV_2nd_nt_pos_per_aa_vec.
Parameters
----------
generative_model : GenerativeModelVJ
VJ generative model class containing the model parameters.
genomic_data : GenomicDataVJ
VJ genomic data class containing the V and J germline
sequences and info.
|
entailment
|
def generate_PJdelJ_nt_pos_vecs(self, generative_model, genomic_data):
"""Process P(delJ|J) into Pi arrays.
Set the attributes PJdelJ_nt_pos_vec and PJdelJ_2nd_nt_pos_per_aa_vec.
Parameters
----------
generative_model : GenerativeModelVJ
VJ generative model class containing the model parameters.
genomic_data : GenomicDataVJ
VJ genomic data class containing the V and J germline
sequences and info.
"""
cutJ_genomic_CDR3_segs = genomic_data.cutJ_genomic_CDR3_segs
nt2num = {'A': 0, 'C': 1, 'G': 2, 'T': 3}
num_del_pos = generative_model.PdelJ_given_J.shape[0]
num_D_genes, num_J_genes = generative_model.PVJ.shape
PJdelJ_nt_pos_vec = [[]]*num_J_genes
PJdelJ_2nd_nt_pos_per_aa_vec = [[]]*num_J_genes
for J_in in range(num_J_genes):
current_PJdelJ_nt_pos_vec = np.zeros((4, len(cutJ_genomic_CDR3_segs[J_in])))
current_PJdelJ_2nd_nt_pos_per_aa_vec = {}
for aa in self.codons_dict.keys():
current_PJdelJ_2nd_nt_pos_per_aa_vec[aa] = np.zeros((4, len(cutJ_genomic_CDR3_segs[J_in])))
for pos, nt in enumerate(cutJ_genomic_CDR3_segs[J_in]):
if pos >= num_del_pos:
continue
if (len(cutJ_genomic_CDR3_segs[J_in]) - pos)%3 == 1: #Start of a codon
current_PJdelJ_nt_pos_vec[nt2num[nt], pos] = generative_model.PdelJ_given_J[pos, J_in]
elif (len(cutJ_genomic_CDR3_segs[J_in]) - pos)%3 == 2: #Mid codon position
for ins_nt in 'ACGT':
#We need to find what possible codons are allowed for any aa (or motif)
for aa in self.codons_dict.keys():
if ins_nt + cutJ_genomic_CDR3_segs[J_in][pos:pos+2] in self.codons_dict[aa]:
current_PJdelJ_2nd_nt_pos_per_aa_vec[aa][nt2num[ins_nt], pos] = generative_model.PdelJ_given_J[pos, J_in]
elif (len(cutJ_genomic_CDR3_segs[J_in]) - pos)%3 == 0: #End of codon
current_PJdelJ_nt_pos_vec[0, pos] = generative_model.PdelJ_given_J[pos, J_in]
PJdelJ_nt_pos_vec[J_in] = current_PJdelJ_nt_pos_vec
PJdelJ_2nd_nt_pos_per_aa_vec[J_in] = current_PJdelJ_2nd_nt_pos_per_aa_vec
self.PJdelJ_nt_pos_vec = PJdelJ_nt_pos_vec
self.PJdelJ_2nd_nt_pos_per_aa_vec = PJdelJ_2nd_nt_pos_per_aa_vec
|
Process P(delJ|J) into Pi arrays.
Set the attributes PJdelJ_nt_pos_vec and PJdelJ_2nd_nt_pos_per_aa_vec.
Parameters
----------
generative_model : GenerativeModelVJ
VJ generative model class containing the model parameters.
genomic_data : GenomicDataVJ
VJ genomic data class containing the V and J germline
sequences and info.
|
entailment
|
def generate_VJ_junction_transfer_matrices(self):
"""Compute the transfer matrices for the VJ junction.
Sets the attributes Tvj, Svj, Dvj, lTvj, and lDvj.
"""
nt2num = {'A': 0, 'C': 1, 'G': 2, 'T': 3}
#Compute Tvj
Tvj = {}
for aa in self.codons_dict.keys():
current_Tvj = np.zeros((4, 4))
for init_nt in 'ACGT':
for codon in self.codons_dict[aa]:
current_Tvj[nt2num[codon[2]], nt2num[init_nt]] += self.Rvj[nt2num[codon[2]],nt2num[codon[1]]]*self.Rvj[nt2num[codon[1]],nt2num[codon[0]]] * self.Rvj[nt2num[codon[0]],nt2num[init_nt]]
Tvj[aa] = current_Tvj
#Compute Svj
Svj = {}
for aa in self.codons_dict.keys():
current_Svj = np.zeros((4, 4))
for ins_nt in 'ACGT':
if any([codon.startswith(ins_nt) for codon in self.codons_dict[aa]]):
current_Svj[nt2num[ins_nt], :] = self.Rvj[nt2num[ins_nt], :]
Svj[aa] = current_Svj
#Compute Dvj
Dvj = {}
for aa in self.codons_dict.keys():
current_Dvj = np.zeros((4, 4))
for init_nt in 'ACGT':
for codon in self.codons_dict[aa]:
current_Dvj[nt2num[codon[2]], nt2num[init_nt]] += self.Rvj[nt2num[codon[1]],nt2num[codon[0]]] * self.Rvj[nt2num[codon[0]],nt2num[init_nt]]
Dvj[aa] = current_Dvj
#Compute lTvj
lTvj = {}
for aa in self.codons_dict.keys():
current_lTvj = np.zeros((4, 4))
for codon in self.codons_dict[aa]:
current_lTvj[nt2num[codon[2]], nt2num[codon[0]]] += self.Rvj[nt2num[codon[2]],nt2num[codon[1]]]*self.first_nt_bias_insVJ[nt2num[codon[1]]]
lTvj[aa] = current_lTvj
#Compute lDvj
lDvj = {}
for aa in self.codons_dict.keys():
current_lDvj = np.zeros((4, 4))
for codon in self.codons_dict[aa]:
current_lDvj[nt2num[codon[2]], nt2num[codon[0]]] += self.first_nt_bias_insVJ[nt2num[codon[1]]]
lDvj[aa] = current_lDvj
#Set the attributes
self.Tvj = Tvj
self.Svj = Svj
self.Dvj = Dvj
self.lTvj = lTvj
self.lDvj = lDvj
|
Compute the transfer matrices for the VJ junction.
Sets the attributes Tvj, Svj, Dvj, lTvj, and lDvj.
|
entailment
|
def showDosHeaderData(peInstance):
""" Prints IMAGE_DOS_HEADER fields. """
dosFields = peInstance.dosHeader.getFields()
print "[+] IMAGE_DOS_HEADER values:\n"
for field in dosFields:
if isinstance(dosFields[field], datatypes.Array):
print "--> %s - Array of length %d" % (field, len(dosFields[field]))
counter = 0
for element in dosFields[field]:
print "[%d] 0x%08x" % (counter, element.value)
counter += 1
else:
print "--> %s = 0x%08x" % (field, dosFields[field].value)
|
Prints IMAGE_DOS_HEADER fields.
|
entailment
|
def showFileHeaderData(peInstance):
""" Prints IMAGE_FILE_HEADER fields. """
fileHeaderFields = peInstance.ntHeaders.fileHeader.getFields()
print "[+] IMAGE_FILE_HEADER values:\n"
for field in fileHeaderFields:
print "--> %s = 0x%08x" % (field, fileHeaderFields[field].value)
|
Prints IMAGE_FILE_HEADER fields.
|
entailment
|
def showOptionalHeaderData(peInstance):
""" Prints IMAGE_OPTIONAL_HEADER fields. """
print "[+] IMAGE_OPTIONAL_HEADER:\n"
ohFields = peInstance.ntHeaders.optionalHeader.getFields()
for field in ohFields:
if not isinstance(ohFields[field], datadirs.DataDirectory):
print "--> %s = 0x%08x" % (field, ohFields[field].value)
|
Prints IMAGE_OPTIONAL_HEADER fields.
|
entailment
|
def showDataDirectoriesData(peInstance):
""" Prints the DATA_DIRECTORY fields. """
print "[+] Data directories:\n"
dirs = peInstance.ntHeaders.optionalHeader.dataDirectory
counter = 1
for dir in dirs:
print "[%d] --> Name: %s -- RVA: 0x%08x -- SIZE: 0x%08x" % (counter, dir.name.value, dir.rva.value, dir.size.value)
counter += 1
|
Prints the DATA_DIRECTORY fields.
|
entailment
|
def showSectionsHeaders(peInstance):
""" Prints IMAGE_SECTION_HEADER for every section present in the file. """
print "[+] Sections information:\n"
print "--> NumberOfSections: %d\n" % peInstance.ntHeaders.fileHeader.numberOfSections.value
for section in peInstance.sectionHeaders:
fields = section.getFields()
for field in fields:
if isinstance(fields[field], datatypes.String):
fmt = "%s = %s"
else:
fmt = "%s = 0x%08x"
print fmt % (field, fields[field].value)
print "\n"
|
Prints IMAGE_SECTION_HEADER for every section present in the file.
|
entailment
|
def showImports(peInstance):
""" Shows imports information. """
iidEntries = peInstance.ntHeaders.optionalHeader.dataDirectory[consts.IMPORT_DIRECTORY].info
if iidEntries:
for iidEntry in iidEntries:
fields = iidEntry.getFields()
print "module: %s" % iidEntry.metaData.moduleName.value
for field in fields:
print "%s -> %x" % (field, fields[field].value)
for iatEntry in iidEntry.iat:
fields = iatEntry.getFields()
for field in fields:
print "%s - %r" % (field, fields[field].value)
print "\n"
else:
print "The file does not have imported functions."
|
Shows imports information.
|
entailment
|
def showExports(peInstance):
""" Show exports information """
exports = peInstance.ntHeaders.optionalHeader.dataDirectory[consts.EXPORT_DIRECTORY].info
if exports:
exp_fields = exports.getFields()
for field in exp_fields:
print "%s -> %x" % (field, exp_fields[field].value)
for entry in exports.exportTable:
entry_fields = entry.getFields()
for field in entry_fields:
print "%s -> %r" % (field, entry_fields[field].value)
else:
print "The file does not have exported functions."
|
Show exports information
|
entailment
|
def getFields(self):
"""
Returns all the class attributues.
@rtype: dict
@return: A dictionary containing all the class attributes.
"""
d = {}
for i in self._attrsList:
key = i
value = getattr(self, i)
d[key] = value
return d
|
Returns all the class attributues.
@rtype: dict
@return: A dictionary containing all the class attributes.
|
entailment
|
def parse(readDataInstance, arrayType, arrayLength):
"""
Returns a new L{Array} object.
@type readDataInstance: L{ReadData}
@param readDataInstance: The L{ReadData} object containing the array data.
@type arrayType: int
@param arrayType: The type of L{Array} to be built.
@type arrayLength: int
@param arrayLength: The length of the array passed as an argument.
@rtype: L{Array}
@return: New L{Array} object.
"""
newArray = Array(arrayType)
dataLength = len(readDataInstance)
if arrayType is TYPE_DWORD:
toRead = arrayLength * 4
if dataLength >= toRead:
for i in range(arrayLength):
newArray.append(DWORD(readDataInstance.readDword()))
else:
raise excep.DataLengthException("Not enough bytes to read.")
elif arrayType is TYPE_WORD:
toRead = arrayLength * 2
if dataLength >= toRead:
for i in range(arrayLength):
newArray.append(DWORD(readDataInstance.readWord()))
else:
raise excep.DataLengthException("Not enough bytes to read.")
elif arrayType is TYPE_QWORD:
toRead = arrayLength * 8
if dataLength >= toRead:
for i in range(arrayLength):
newArray.append(QWORD(readDataInstance.readQword()))
else:
raise excep.DataLengthException("Not enough bytes to read.")
elif arrayType is TYPE_BYTE:
for i in range(arrayLength):
newArray.append(BYTE(readDataInstance.readByte()))
else:
raise excep.ArrayTypeException("Could\'t create an array of type %d" % arrayType)
return newArray
|
Returns a new L{Array} object.
@type readDataInstance: L{ReadData}
@param readDataInstance: The L{ReadData} object containing the array data.
@type arrayType: int
@param arrayType: The type of L{Array} to be built.
@type arrayLength: int
@param arrayLength: The length of the array passed as an argument.
@rtype: L{Array}
@return: New L{Array} object.
|
entailment
|
def calc_euler_tour(g, start, end):
'''Calculates an Euler tour over the graph g from vertex start to vertex end.
Assumes start and end are odd-degree vertices and that there are no other odd-degree
vertices.'''
even_g = nx.subgraph(g, g.nodes()).copy()
if end in even_g.neighbors(start):
# If start and end are neighbors, remove the edge
even_g.remove_edge(start, end)
comps = list(nx.connected_components(even_g))
# If the graph did not split, just find the euler circuit
if len(comps) == 1:
trail = list(nx.eulerian_circuit(even_g, start))
trail.append((start, end))
elif len(comps) == 2:
subg1 = nx.subgraph(even_g, comps[0])
subg2 = nx.subgraph(even_g, comps[1])
start_subg, end_subg = (subg1, subg2) if start in subg1.nodes() else (subg2, subg1)
trail = list(nx.eulerian_circuit(start_subg, start)) + [(start, end)] + list(nx.eulerian_circuit(end_subg, end))
else:
raise Exception('Unknown edge case with connected components of size {0}:\n{1}'.format(len(comps), comps))
else:
# If they are not neighbors, we add an imaginary edge and calculate the euler circuit
even_g.add_edge(start, end)
circ = list(nx.eulerian_circuit(even_g, start))
try:
trail_start = circ.index((start, end))
except:
trail_start = circ.index((end, start))
trail = circ[trail_start+1:] + circ[:trail_start]
return trail
|
Calculates an Euler tour over the graph g from vertex start to vertex end.
Assumes start and end are odd-degree vertices and that there are no other odd-degree
vertices.
|
entailment
|
def greedy_trails(subg, odds, verbose):
'''Greedily select trails by making the longest you can until the end'''
if verbose:
print('\tCreating edge map')
edges = defaultdict(list)
for x,y in subg.edges():
edges[x].append(y)
edges[y].append(x)
if verbose:
print('\tSelecting trails')
trails = []
for x in subg.nodes():
if verbose > 2:
print('\t\tNode {0}'.format(x))
while len(edges[x]) > 0:
y = edges[x][0]
trail = [(x,y)]
edges[x].remove(y)
edges[y].remove(x)
while len(edges[y]) > 0:
x = y
y = edges[y][0]
trail.append((x,y))
edges[x].remove(y)
edges[y].remove(x)
trails.append(trail)
return trails
|
Greedily select trails by making the longest you can until the end
|
entailment
|
def decompose_graph(g, heuristic='tour', max_odds=20, verbose=0):
'''Decompose a graph into a set of non-overlapping trails.'''
# Get the connected subgraphs
subgraphs = [nx.subgraph(g, x).copy() for x in nx.connected_components(g)]
chains = []
num_subgraphs = len(subgraphs)
step = 0
while num_subgraphs > 0:
if verbose:
print('Step #{0} ({1} subgraphs)'.format(step, num_subgraphs))
for i in range(num_subgraphs-1, -1, -1):
subg = subgraphs[i]
# Get all odd-degree nodes
odds = [x for x,y in dict(nx.degree(subg)).items() if y % 2 == 1]
if verbose > 1:
if len(odds) == 0:
print('\t\tNo odds')
elif len(odds) == 2:
print('\t\tExactly 2 odds')
else:
print('\t\t{0} odds'.format(len(odds)))
# If there are no odd-degree edges, we can find an euler circuit
if len(odds) == 0:
trails = [list(nx.eulerian_circuit(subg))]
elif len(odds) == 2:
# If there are only two odd-degree edges, we can find an euler tour
trails = [calc_euler_tour(subg, odds[0], odds[1])]
elif heuristic in ['min', 'max', 'median', 'any']:
trails = select_odd_degree_trail(subg, odds, max_odds, heuristic, verbose)
elif heuristic == 'random':
trails = select_random_trail(subg, verbose)
elif heuristic == 'mindegree':
trails = select_min_degree_trail(subg, max_odds, verbose)
elif heuristic == 'ones':
trails = select_single_edge_trails(subg, verbose)
elif heuristic == 'tour':
trails = pseudo_tour_trails(subg, odds, verbose)
elif heuristic == 'greedy':
trails = greedy_trails(subg, odds, verbose)
if verbose > 2:
print('\t\tTrails: {0}'.format(len(trails)))
# Remove the trail
for trail in trails:
subg.remove_edges_from(trail)
# Add it to the list of chains
chains.extend(trails)
# If the subgraph is empty, remove it from the list
if subg.number_of_edges() == 0:
del subgraphs[i]
else:
comps = list(nx.connected_components(subg))
# If the last edge split the graph, add the new subgraphs to the list of subgraphs
if len(comps) > 1:
for x in comps:
compg = nx.subgraph(subg, x)
if compg.number_of_edges() > 0:
subgraphs.append(compg)
del subgraphs[i]
# Update the count of connected subgraphs
num_subgraphs = len(subgraphs)
step += 1
return chains
|
Decompose a graph into a set of non-overlapping trails.
|
entailment
|
def plus(self, a):
""" Add. """
return Vector(self.x+a.x, self.y+a.y, self.z+a.z)
|
Add.
|
entailment
|
def minus(self, a):
""" Subtract. """
return Vector(self.x-a.x, self.y-a.y, self.z-a.z)
|
Subtract.
|
entailment
|
def times(self, a):
""" Multiply. """
return Vector(self.x*a, self.y*a, self.z*a)
|
Multiply.
|
entailment
|
def dividedBy(self, a):
""" Divide. """
return Vector(self.x/a, self.y/a, self.z/a)
|
Divide.
|
entailment
|
def lerp(self, a, t):
""" Lerp. Linear interpolation from self to a"""
return self.plus(a.minus(self).times(t));
|
Lerp. Linear interpolation from self to a
|
entailment
|
def interpolate(self, other, t):
"""
Create a new vertex between this vertex and `other` by linearly
interpolating all properties using a parameter of `t`. Subclasses should
override this to interpolate additional properties.
"""
return Vertex(self.pos.lerp(other.pos, t),
self.normal.lerp(other.normal, t))
|
Create a new vertex between this vertex and `other` by linearly
interpolating all properties using a parameter of `t`. Subclasses should
override this to interpolate additional properties.
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.