text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse(db_folder, out_folder):
""" Parse a cronos database. Convert the database located in ``db_folder`` into CSV files in the directory ``out_folder``. """ |
# The database structure, containing table and column definitions as
# well as other data.
stru_dat = get_file(db_folder, 'CroStru.dat')
# Index file for the database, which contains offsets for each record.
data_tad = get_file(db_folder, 'CroBank.tad')
# Actual data records, can only be decoded using CroBank.tad.
data_dat = get_file(db_folder, 'CroBank.dat')
if None in [stru_dat, data_tad, data_dat]:
raise CronosException("Not all database files are present.")
meta, tables = parse_structure(stru_dat)
for table in tables:
# TODO: do we want to export the "FL" table?
if table['abbr'] == 'FL' and table['name'] == 'Files':
continue
fh = open(make_csv_file_name(meta, table, out_folder), 'w')
columns = table.get('columns')
writer = csv.writer(fh)
writer.writerow([encode_cell(c['name']) for c in columns])
for row in parse_data(data_tad, data_dat, table.get('id'), columns):
writer.writerow([encode_cell(c) for c in row])
fh.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def encode1(self):
"""Return the base64 encoding of the figure file and insert in html image tag.""" |
data_uri = b64encode(open(self.path, 'rb').read()).decode('utf-8').replace('\n', '')
return '<img src="data:image/png;base64,{0}">'.format(data_uri) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def encode2(self):
"""Return the base64 encoding of the fig attribute and insert in html image tag.""" |
buf = BytesIO()
self.fig.savefig(buf, format='png', bbox_inches='tight', dpi=100)
buf.seek(0)
string = b64encode(buf.read())
return '<img src="data:image/png;base64,{0}">'.format(urlquote(string)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def loadFromDisk(self, calculation):
""" Read the spectra from the files generated by Quanty and store them as a list of spectum objects. """ |
suffixes = {
'Isotropic': 'iso',
'Circular Dichroism (R-L)': 'cd',
'Right Polarized (R)': 'r',
'Left Polarized (L)': 'l',
'Linear Dichroism (V-H)': 'ld',
'Vertical Polarized (V)': 'v',
'Horizontal Polarized (H)': 'h',
}
self.raw = list()
for spectrumName in self.toPlot:
suffix = suffixes[spectrumName]
path = '{}_{}.spec'.format(calculation.baseName, suffix)
try:
data = np.loadtxt(path, skiprows=5)
except (OSError, IOError) as e:
raise e
rows, columns = data.shape
if calculation.experiment in ['XAS', 'XPS', 'XES']:
xMin = calculation.xMin
xMax = calculation.xMax
xNPoints = calculation.xNPoints
if calculation.experiment == 'XES':
x = np.linspace(xMin, xMax, xNPoints + 1)
x = x[::-1]
y = data[:, 2]
y = y / np.abs(y.max())
else:
x = np.linspace(xMin, xMax, xNPoints + 1)
y = data[:, 2::2].flatten()
spectrum = Spectrum1D(x, y)
spectrum.name = spectrumName
if len(suffix) > 2:
spectrum.shortName = suffix.title()
else:
spectrum.shortName = suffix.upper()
if calculation.experiment in ['XAS', ]:
spectrum.xLabel = 'Absorption Energy (eV)'
elif calculation.experiment in ['XPS', ]:
spectrum.xLabel = 'Binding Energy (eV)'
elif calculation.experiment in ['XES', ]:
spectrum.xLabel = 'Emission Energy (eV)'
spectrum.yLabel = 'Intensity (a.u.)'
self.broadenings = {'gaussian': (calculation.xGaussian, ), }
else:
xMin = calculation.xMin
xMax = calculation.xMax
xNPoints = calculation.xNPoints
yMin = calculation.yMin
yMax = calculation.yMax
yNPoints = calculation.yNPoints
x = np.linspace(xMin, xMax, xNPoints + 1)
y = np.linspace(yMin, yMax, yNPoints + 1)
z = data[:, 2::2]
spectrum = Spectrum2D(x, y, z)
spectrum.name = spectrumName
if len(suffix) > 2:
spectrum.shortName = suffix.title()
else:
spectrum.shortName = suffix.upper()
spectrum.xLabel = 'Incident Energy (eV)'
spectrum.yLabel = 'Energy Transfer (eV)'
self.broadenings = {'gaussian': (calculation.xGaussian,
calculation.yGaussian), }
self.raw.append(spectrum)
# Process the spectra once they where read from disk.
self.process() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def updateResultsView(self, index):
""" Update the selection to contain only the result specified by the index. This should be the last index of the model. Finally updade the context menu. The selectionChanged signal is used to trigger the update of the Quanty dock widget and result details dialog. :param index: Index of the last item of the model. :type index: QModelIndex """ |
flags = (QItemSelectionModel.Clear | QItemSelectionModel.Rows |
QItemSelectionModel.Select)
self.resultsView.selectionModel().select(index, flags)
self.resultsView.resizeColumnsToContents()
self.resultsView.setFocus() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def updatePlotWidget(self):
"""Updating the plotting widget should not require any information about the current state of the widget.""" |
pw = self.getPlotWidget()
pw.reset()
results = self.resultsModel.getCheckedItems()
for result in results:
if isinstance(result, ExperimentalData):
spectrum = result.spectra['Expt']
spectrum.legend = '{}-{}'.format(result.index, 'Expt')
spectrum.xLabel = 'X'
spectrum.yLabel = 'Y'
spectrum.plot(plotWidget=pw)
else:
if len(results) > 1 and result.experiment in ['RIXS', ]:
continue
for spectrum in result.spectra.processed:
spectrum.legend = '{}-{}'.format(
result.index, spectrum.shortName)
if spectrum.name in result.spectra.toPlotChecked:
spectrum.plot(plotWidget=pw) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def row(self):
"""Return the row of the child.""" |
if self.parent is not None:
children = self.parent.getChildren()
# The index method of the list object.
return children.index(self)
else:
return 0 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parent(self, index):
"""Return the index of the parent for a given index of the child. Unfortunately, the name of the method has to be parent, even though a more verbose name like parentIndex, would avoid confusion about what parent actually is - an index or an item. """ |
childItem = self.item(index)
parentItem = childItem.parent
if parentItem == self.rootItem:
parentIndex = QModelIndex()
else:
parentIndex = self.createIndex(parentItem.row(), 0, parentItem)
return parentIndex |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def setData(self, index, value, role):
"""Set the role data for the item at index to value.""" |
if not index.isValid():
return False
item = self.item(index)
column = index.column()
if role == Qt.EditRole:
items = list()
items.append(item)
if self.sync:
parentIndex = self.parent(index)
# Iterate over the siblings of the parent index.
for sibling in self.siblings(parentIndex):
siblingNode = self.item(sibling)
for child in siblingNode.children:
if child.getItemData(0) == item.getItemData(0):
items.append(child)
for item in items:
columnData = str(item.getItemData(column))
if columnData and columnData != value:
try:
item.setItemData(column, float(value))
except ValueError:
return False
else:
return False
elif role == Qt.CheckStateRole:
item.setCheckState(value)
if value == Qt.Unchecked or value == Qt.Checked:
state = value
self.itemCheckStateChanged.emit(index, state)
self.dataChanged.emit(index, index)
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def flags(self, index):
"""Return the active flags for the given index. Add editable flag to items other than the first column. """ |
activeFlags = (Qt.ItemIsEnabled | Qt.ItemIsSelectable |
Qt.ItemIsUserCheckable)
item = self.item(index)
column = index.column()
if column > 0 and not item.childCount():
activeFlags = activeFlags | Qt.ItemIsEditable
return activeFlags |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _getModelData(self, modelData, parentItem=None):
"""Return the data contained in the model.""" |
if parentItem is None:
parentItem = self.rootItem
for item in parentItem.getChildren():
key = item.getItemData(0)
if item.childCount():
modelData[key] = odict()
self._getModelData(modelData[key], item)
else:
if isinstance(item.getItemData(2), float):
modelData[key] = [item.getItemData(1), item.getItemData(2)]
else:
modelData[key] = item.getItemData(1) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _contextMenu(self, pos):
"""Handle plot area customContextMenuRequested signal. :param QPoint pos: Mouse position relative to plot area """ |
# Create the context menu.
menu = QMenu(self)
menu.addAction(self._zoomBackAction)
# Displaying the context menu at the mouse position requires
# a global position.
# The position received as argument is relative to PlotWidget's
# plot area, and thus needs to be converted.
plotArea = self.getWidgetHandle()
globalPosition = plotArea.mapToGlobal(pos)
menu.exec_(globalPosition) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def convolve_fft(array, kernel):
""" Convolve an array with a kernel using FFT. Implemntation based on the convolve_fft function from astropy. https://github.com/astropy/astropy/blob/master/astropy/convolution/convolve.py """ |
array = np.asarray(array, dtype=np.complex)
kernel = np.asarray(kernel, dtype=np.complex)
if array.ndim != kernel.ndim:
raise ValueError("Image and kernel must have same number of "
"dimensions")
array_shape = array.shape
kernel_shape = kernel.shape
new_shape = np.array(array_shape) + np.array(kernel_shape)
array_slices = []
kernel_slices = []
for (new_dimsize, array_dimsize, kernel_dimsize) in zip(
new_shape, array_shape, kernel_shape):
center = new_dimsize - (new_dimsize + 1) // 2
array_slices += [slice(center - array_dimsize // 2,
center + (array_dimsize + 1) // 2)]
kernel_slices += [slice(center - kernel_dimsize // 2,
center + (kernel_dimsize + 1) // 2)]
array_slices = tuple(array_slices)
kernel_slices = tuple(kernel_slices)
if not np.all(new_shape == array_shape):
big_array = np.zeros(new_shape, dtype=np.complex)
big_array[array_slices] = array
else:
big_array = array
if not np.all(new_shape == kernel_shape):
big_kernel = np.zeros(new_shape, dtype=np.complex)
big_kernel[kernel_slices] = kernel
else:
big_kernel = kernel
array_fft = np.fft.fftn(big_array)
kernel_fft = np.fft.fftn(np.fft.ifftshift(big_kernel))
rifft = np.fft.ifftn(array_fft * kernel_fft)
return rifft[array_slices].real |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def diagonalize(self):
'''Diagonalize the tensor.'''
self.eigvals, self.eigvecs = np.linalg.eig(
(self.tensor.transpose() + self.tensor) / 2.0)
self.eigvals = np.diag(np.dot(
np.dot(self.eigvecs.transpose(), self.tensor), self.eigvecs)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _skip_lines(self, n):
'''Skip a number of lines from the output.'''
for i in range(n):
self.line = next(self.output)
return self.line |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _parse_tensor(self, indices=False):
'''Parse a tensor.'''
if indices:
self.line = self._skip_lines(1)
tensor = np.zeros((3, 3))
for i in range(3):
tokens = self.line.split()
if indices:
tensor[i][0] = float(tokens[1])
tensor[i][1] = float(tokens[2])
tensor[i][2] = float(tokens[3])
else:
tensor[i][0] = float(tokens[0])
tensor[i][1] = float(tokens[1])
tensor[i][2] = float(tokens[2])
self.line = self._skip_lines(1)
return tensor |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __validate(self, target, value, oldvalue, initiator):
""" Method executed when the event 'set' is triggered. :param target: Object triggered :param value: New value :param oldvalue: Previous value :param initiator: Column modified :return: :raise ValidateError: """ |
if value == oldvalue:
return value
if self.allow_null and value is None:
return value
if self.check_value(value):
return value
else:
if self.throw_exception:
if self.message:
self.message = self.message.format(
field=self.field, new_value=value, old_value=oldvalue, key=initiator.key)
raise ValidateError(self.message)
else:
raise ValidateError('Value %s from column %s is not valid' % (value, initiator.key))
return oldvalue |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __create_event(self):
""" Create an SQLAlchemy event listening the 'set' in a particular column. :rtype : object """ |
if not event.contains(self.field, 'set', self.__validate):
event.listen(self.field, 'set', self.__validate, retval=True) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def stop(self):
""" Remove the listener to stop the validation """ |
if event.contains(self.field, 'set', self.__validate):
event.remove(self.field, 'set', self.__validate) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def start(self):
""" Restart the listener """ |
if not event.contains(self.field, 'set', self.__validate):
self.__create_event() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def nhapDaiHan(self, cucSo, gioiTinh):
"""Nhap dai han Args: cucSo (TYPE):
Description gioiTinh (TYPE):
Description Returns: TYPE: Description """ |
for cung in self.thapNhiCung:
khoangCach = khoangCachCung(cung.cungSo, self.cungMenh, gioiTinh)
cung.daiHan(cucSo + khoangCach * 10)
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def nt2aa(ntseq):
"""Translate a nucleotide sequence into an amino acid sequence. Parameters ntseq : str Nucleotide sequence composed of A, C, G, or T (uppercase or lowercase) Returns ------- aaseq : str Amino acid sequence Example -------- 'CAWSVAPDRGGYTF' """ |
nt2num = {'A': 0, 'C': 1, 'G': 2, 'T': 3, 'a': 0, 'c': 1, 'g': 2, 't': 3}
aa_dict ='KQE*TPASRRG*ILVLNHDYTPASSRGCILVFKQE*TPASRRGWMLVLNHDYTPASSRGCILVF'
return ''.join([aa_dict[nt2num[ntseq[i]] + 4*nt2num[ntseq[i+1]] + 16*nt2num[ntseq[i+2]]] for i in range(0, len(ntseq), 3) if i+2 < len(ntseq)]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def nt2codon_rep(ntseq):
"""Represent nucleotide sequence by sequence of codon symbols. 'Translates' the nucleotide sequence into a symbolic representation of 'amino acids' where each codon gets its own unique character symbol. These characters should be reserved only for representing the 64 individual codons --- note that this means it is important that this function matches the corresponding function in the preprocess script and that any custom alphabet does not use these symbols. Defining symbols for each individual codon allows for Pgen computation of inframe nucleotide sequences. Parameters ntseq : str A Nucleotide sequence (normally a CDR3 nucleotide sequence) to be 'translated' into the codon - symbol representation. Can be either uppercase or lowercase, but only composed of A, C, G, or T. Returns ------- codon_rep : str The codon - symbolic representation of ntseq. Note that if len(ntseq) == 3L --> len(codon_rep) == L Example -------- '\xbb\x96\xab\xb8\x8e\xb6\xa5\x92\xa8\xba\x9a\x93\x94\x9f' """ |
#
nt2num = {'A': 0, 'C': 1, 'G': 2, 'T': 3, 'a': 0, 'c': 1, 'g': 2, 't': 3}
#Use single characters not in use to represent each individual codon --- this function is called in constructing the codon dictionary
codon_rep ='\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf'
return ''.join([codon_rep[nt2num[ntseq[i]] + 4*nt2num[ntseq[i+1]] + 16*nt2num[ntseq[i+2]]] for i in range(0, len(ntseq), 3) if i+2 < len(ntseq)]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cutR_seq(seq, cutR, max_palindrome):
"""Cut genomic sequence from the right. Parameters seq : str Nucleotide sequence to be cut from the right cutR : int cutR - max_palindrome = how many nucleotides to cut from the right. Negative cutR implies complementary palindromic insertions. max_palindrome : int Length of the maximum palindromic insertion. Returns ------- seq : str Nucleotide sequence after being cut from the right Examples -------- 'TGCGCCAGCAGTGAGTCGACT' 'TGCGCCAGCAGTG' """ |
complement_dict = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'} #can include lower case if wanted
if cutR < max_palindrome:
seq = seq + ''.join([complement_dict[nt] for nt in seq[cutR - max_palindrome:]][::-1]) #reverse complement palindrome insertions
else:
seq = seq[:len(seq) - cutR + max_palindrome] #deletions
return seq |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cutL_seq(seq, cutL, max_palindrome):
"""Cut genomic sequence from the left. Parameters seq : str Nucleotide sequence to be cut from the right cutL : int cutL - max_palindrome = how many nucleotides to cut from the left. Negative cutL implies complementary palindromic insertions. max_palindrome : int Length of the maximum palindromic insertion. Returns ------- seq : str Nucleotide sequence after being cut from the left Examples -------- 'CACTGAAGCTTTCTTT' 'TTCATGAACACTGAAGCTTTCTTT' """ |
complement_dict = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'} #can include lower case if wanted
if cutL < max_palindrome:
seq = ''.join([complement_dict[nt] for nt in seq[:max_palindrome - cutL]][::-1]) + seq #reverse complement palindrome insertions
else:
seq = seq[cutL-max_palindrome:] #deletions
return seq |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def generate_sub_codons_left(codons_dict):
"""Generate the sub_codons_left dictionary of codon prefixes. Parameters codons_dict : dict Dictionary, keyed by the allowed 'amino acid' symbols with the values being lists of codons corresponding to the symbol. Returns ------- sub_codons_left : dict Dictionary of the 1 and 2 nucleotide prefixes (read from 5') for each codon in an 'amino acid' grouping """ |
sub_codons_left = {}
for aa in codons_dict.keys():
sub_codons_left[aa] = list(set([x[0] for x in codons_dict[aa]] + [x[:2] for x in codons_dict[aa]]))
return sub_codons_left |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def calc_steady_state_dist(R):
"""Calculate the steady state dist of a 4 state markov transition matrix. Parameters R : ndarray Markov transition matrix Returns ------- p_ss : ndarray Steady state probability distribution """ |
#Calc steady state distribution for a dinucleotide bias matrix
w, v = np.linalg.eig(R)
for i in range(4):
if np.abs(w[i] - 1) < 1e-8:
return np.real(v[:, i] / np.sum(v[:, i]))
return -1 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def rnd_ins_seq(ins_len, C_R, CP_first_nt):
"""Generate a random insertion nucleotide sequence of length ins_len. Draws the sequence identity (for a set length) from the distribution defined by the dinucleotide markov model of transition matrix R. Parameters ins_len : int Length of nucleotide sequence to be inserted. C_R : ndarray (4, 4) array of the cumulative transition probabilities defined by the Markov transition matrix R CP_first_nt : ndarray (4,) array of the cumulative probabilities for the first inserted nucleotide Returns ------- seq : str Randomly generated insertion sequence of length ins_len. Examples -------- 'GATGGAC' 'ACCCCCG' 'GCC' """ |
nt2num = {'A': 0, 'C': 1, 'G': 2, 'T': 3}
num2nt = 'ACGT'
if ins_len == 0:
return ''
seq = num2nt[CP_first_nt.searchsorted(np.random.random())]
ins_len += -1
while ins_len > 0:
seq += num2nt[C_R[nt2num[seq[-1]], :].searchsorted(np.random.random())]
ins_len += -1
return seq |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update_rates(self):
""" Creates or updates rates for a source """ |
source, created = RateSource.objects.get_or_create(name=self.get_source_name())
source.base_currency = self.get_base_currency()
source.save()
for currency, value in six.iteritems(self.get_rates()):
try:
rate = Rate.objects.get(source=source, currency=currency)
except Rate.DoesNotExist:
rate = Rate(source=source, currency=currency)
rate.value = value
rate.save() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def showDosHeaderData(peInstance):
""" Prints IMAGE_DOS_HEADER fields. """ |
dosFields = peInstance.dosHeader.getFields()
print "[+] IMAGE_DOS_HEADER values:\n"
for field in dosFields:
if isinstance(dosFields[field], datatypes.Array):
print "--> %s - Array of length %d" % (field, len(dosFields[field]))
counter = 0
for element in dosFields[field]:
print "[%d] 0x%08x" % (counter, element.value)
counter += 1
else:
print "--> %s = 0x%08x" % (field, dosFields[field].value) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def showFileHeaderData(peInstance):
""" Prints IMAGE_FILE_HEADER fields. """ |
fileHeaderFields = peInstance.ntHeaders.fileHeader.getFields()
print "[+] IMAGE_FILE_HEADER values:\n"
for field in fileHeaderFields:
print "--> %s = 0x%08x" % (field, fileHeaderFields[field].value) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def showOptionalHeaderData(peInstance):
""" Prints IMAGE_OPTIONAL_HEADER fields. """ |
print "[+] IMAGE_OPTIONAL_HEADER:\n"
ohFields = peInstance.ntHeaders.optionalHeader.getFields()
for field in ohFields:
if not isinstance(ohFields[field], datadirs.DataDirectory):
print "--> %s = 0x%08x" % (field, ohFields[field].value) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def showDataDirectoriesData(peInstance):
""" Prints the DATA_DIRECTORY fields. """ |
print "[+] Data directories:\n"
dirs = peInstance.ntHeaders.optionalHeader.dataDirectory
counter = 1
for dir in dirs:
print "[%d] --> Name: %s -- RVA: 0x%08x -- SIZE: 0x%08x" % (counter, dir.name.value, dir.rva.value, dir.size.value)
counter += 1 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def showSectionsHeaders(peInstance):
""" Prints IMAGE_SECTION_HEADER for every section present in the file. """ |
print "[+] Sections information:\n"
print "--> NumberOfSections: %d\n" % peInstance.ntHeaders.fileHeader.numberOfSections.value
for section in peInstance.sectionHeaders:
fields = section.getFields()
for field in fields:
if isinstance(fields[field], datatypes.String):
fmt = "%s = %s"
else:
fmt = "%s = 0x%08x"
print fmt % (field, fields[field].value)
print "\n" |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def showImports(peInstance):
""" Shows imports information. """ |
iidEntries = peInstance.ntHeaders.optionalHeader.dataDirectory[consts.IMPORT_DIRECTORY].info
if iidEntries:
for iidEntry in iidEntries:
fields = iidEntry.getFields()
print "module: %s" % iidEntry.metaData.moduleName.value
for field in fields:
print "%s -> %x" % (field, fields[field].value)
for iatEntry in iidEntry.iat:
fields = iatEntry.getFields()
for field in fields:
print "%s - %r" % (field, fields[field].value)
print "\n"
else:
print "The file does not have imported functions." |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def showExports(peInstance):
""" Show exports information """ |
exports = peInstance.ntHeaders.optionalHeader.dataDirectory[consts.EXPORT_DIRECTORY].info
if exports:
exp_fields = exports.getFields()
for field in exp_fields:
print "%s -> %x" % (field, exp_fields[field].value)
for entry in exports.exportTable:
entry_fields = entry.getFields()
for field in entry_fields:
print "%s -> %r" % (field, entry_fields[field].value)
else:
print "The file does not have exported functions." |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getFields(self):
""" Returns all the class attributues. @rtype: dict @return: A dictionary containing all the class attributes. """ |
d = {}
for i in self._attrsList:
key = i
value = getattr(self, i)
d[key] = value
return d |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def calc_euler_tour(g, start, end):
'''Calculates an Euler tour over the graph g from vertex start to vertex end.
Assumes start and end are odd-degree vertices and that there are no other odd-degree
vertices.'''
even_g = nx.subgraph(g, g.nodes()).copy()
if end in even_g.neighbors(start):
# If start and end are neighbors, remove the edge
even_g.remove_edge(start, end)
comps = list(nx.connected_components(even_g))
# If the graph did not split, just find the euler circuit
if len(comps) == 1:
trail = list(nx.eulerian_circuit(even_g, start))
trail.append((start, end))
elif len(comps) == 2:
subg1 = nx.subgraph(even_g, comps[0])
subg2 = nx.subgraph(even_g, comps[1])
start_subg, end_subg = (subg1, subg2) if start in subg1.nodes() else (subg2, subg1)
trail = list(nx.eulerian_circuit(start_subg, start)) + [(start, end)] + list(nx.eulerian_circuit(end_subg, end))
else:
raise Exception('Unknown edge case with connected components of size {0}:\n{1}'.format(len(comps), comps))
else:
# If they are not neighbors, we add an imaginary edge and calculate the euler circuit
even_g.add_edge(start, end)
circ = list(nx.eulerian_circuit(even_g, start))
try:
trail_start = circ.index((start, end))
except:
trail_start = circ.index((end, start))
trail = circ[trail_start+1:] + circ[:trail_start]
return trail |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def greedy_trails(subg, odds, verbose):
'''Greedily select trails by making the longest you can until the end'''
if verbose:
print('\tCreating edge map')
edges = defaultdict(list)
for x,y in subg.edges():
edges[x].append(y)
edges[y].append(x)
if verbose:
print('\tSelecting trails')
trails = []
for x in subg.nodes():
if verbose > 2:
print('\t\tNode {0}'.format(x))
while len(edges[x]) > 0:
y = edges[x][0]
trail = [(x,y)]
edges[x].remove(y)
edges[y].remove(x)
while len(edges[y]) > 0:
x = y
y = edges[y][0]
trail.append((x,y))
edges[x].remove(y)
edges[y].remove(x)
trails.append(trail)
return trails |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def decompose_graph(g, heuristic='tour', max_odds=20, verbose=0):
'''Decompose a graph into a set of non-overlapping trails.'''
# Get the connected subgraphs
subgraphs = [nx.subgraph(g, x).copy() for x in nx.connected_components(g)]
chains = []
num_subgraphs = len(subgraphs)
step = 0
while num_subgraphs > 0:
if verbose:
print('Step #{0} ({1} subgraphs)'.format(step, num_subgraphs))
for i in range(num_subgraphs-1, -1, -1):
subg = subgraphs[i]
# Get all odd-degree nodes
odds = [x for x,y in dict(nx.degree(subg)).items() if y % 2 == 1]
if verbose > 1:
if len(odds) == 0:
print('\t\tNo odds')
elif len(odds) == 2:
print('\t\tExactly 2 odds')
else:
print('\t\t{0} odds'.format(len(odds)))
# If there are no odd-degree edges, we can find an euler circuit
if len(odds) == 0:
trails = [list(nx.eulerian_circuit(subg))]
elif len(odds) == 2:
# If there are only two odd-degree edges, we can find an euler tour
trails = [calc_euler_tour(subg, odds[0], odds[1])]
elif heuristic in ['min', 'max', 'median', 'any']:
trails = select_odd_degree_trail(subg, odds, max_odds, heuristic, verbose)
elif heuristic == 'random':
trails = select_random_trail(subg, verbose)
elif heuristic == 'mindegree':
trails = select_min_degree_trail(subg, max_odds, verbose)
elif heuristic == 'ones':
trails = select_single_edge_trails(subg, verbose)
elif heuristic == 'tour':
trails = pseudo_tour_trails(subg, odds, verbose)
elif heuristic == 'greedy':
trails = greedy_trails(subg, odds, verbose)
if verbose > 2:
print('\t\tTrails: {0}'.format(len(trails)))
# Remove the trail
for trail in trails:
subg.remove_edges_from(trail)
# Add it to the list of chains
chains.extend(trails)
# If the subgraph is empty, remove it from the list
if subg.number_of_edges() == 0:
del subgraphs[i]
else:
comps = list(nx.connected_components(subg))
# If the last edge split the graph, add the new subgraphs to the list of subgraphs
if len(comps) > 1:
for x in comps:
compg = nx.subgraph(subg, x)
if compg.number_of_edges() > 0:
subgraphs.append(compg)
del subgraphs[i]
# Update the count of connected subgraphs
num_subgraphs = len(subgraphs)
step += 1
return chains |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def lerp(self, a, t):
""" Lerp. Linear interpolation from self to a""" |
return self.plus(a.minus(self).times(t)); |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def interpolate(self, other, t):
""" Create a new vertex between this vertex and `other` by linearly interpolating all properties using a parameter of `t`. Subclasses should override this to interpolate additional properties. """ |
return Vertex(self.pos.lerp(other.pos, t),
self.normal.lerp(other.normal, t)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def splitPolygon(self, polygon, coplanarFront, coplanarBack, front, back):
""" Split `polygon` by this plane if needed, then put the polygon or polygon fragments in the appropriate lists. Coplanar polygons go into either `coplanarFront` or `coplanarBack` depending on their orientation with respect to this plane. Polygons in front or in back of this plane go into either `front` or `back` """ |
COPLANAR = 0 # all the vertices are within EPSILON distance from plane
FRONT = 1 # all the vertices are in front of the plane
BACK = 2 # all the vertices are at the back of the plane
SPANNING = 3 # some vertices are in front, some in the back
# Classify each point as well as the entire polygon into one of the above
# four classes.
polygonType = 0
vertexLocs = []
numVertices = len(polygon.vertices)
for i in range(numVertices):
t = self.normal.dot(polygon.vertices[i].pos) - self.w
loc = -1
if t < -Plane.EPSILON:
loc = BACK
elif t > Plane.EPSILON:
loc = FRONT
else:
loc = COPLANAR
polygonType |= loc
vertexLocs.append(loc)
# Put the polygon in the correct list, splitting it when necessary.
if polygonType == COPLANAR:
normalDotPlaneNormal = self.normal.dot(polygon.plane.normal)
if normalDotPlaneNormal > 0:
coplanarFront.append(polygon)
else:
coplanarBack.append(polygon)
elif polygonType == FRONT:
front.append(polygon)
elif polygonType == BACK:
back.append(polygon)
elif polygonType == SPANNING:
f = []
b = []
for i in range(numVertices):
j = (i+1) % numVertices
ti = vertexLocs[i]
tj = vertexLocs[j]
vi = polygon.vertices[i]
vj = polygon.vertices[j]
if ti != BACK:
f.append(vi)
if ti != FRONT:
if ti != BACK:
b.append(vi.clone())
else:
b.append(vi)
if (ti | tj) == SPANNING:
# interpolation weight at the intersection point
t = (self.w - self.normal.dot(vi.pos)) / self.normal.dot(vj.pos.minus(vi.pos))
# intersection point on the plane
v = vi.interpolate(vj, t)
f.append(v)
b.append(v.clone())
if len(f) >= 3:
front.append(Polygon(f, polygon.shared))
if len(b) >= 3:
back.append(Polygon(b, polygon.shared)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def invert(self):
""" Convert solid space to empty space and empty space to solid space. """ |
for poly in self.polygons:
poly.flip()
self.plane.flip()
if self.front:
self.front.invert()
if self.back:
self.back.invert()
temp = self.front
self.front = self.back
self.back = temp |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def clipPolygons(self, polygons):
""" Recursively remove all polygons in `polygons` that are inside this BSP tree. """ |
if not self.plane:
return polygons[:]
front = []
back = []
for poly in polygons:
self.plane.splitPolygon(poly, front, back, front, back)
if self.front:
front = self.front.clipPolygons(front)
if self.back:
back = self.back.clipPolygons(back)
else:
back = []
front.extend(back)
return front |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def clipTo(self, bsp):
""" Remove all polygons in this BSP tree that are inside the other BSP tree `bsp`. """ |
self.polygons = bsp.clipPolygons(self.polygons)
if self.front:
self.front.clipTo(bsp)
if self.back:
self.back.clipTo(bsp) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def allPolygons(self):
""" Return a list of all polygons in this BSP tree. """ |
polygons = self.polygons[:]
if self.front:
polygons.extend(self.front.allPolygons())
if self.back:
polygons.extend(self.back.allPolygons())
return polygons |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_rate(currency):
"""Returns the rate from the default currency to `currency`.""" |
source = get_rate_source()
try:
return Rate.objects.get(source=source, currency=currency).value
except Rate.DoesNotExist:
raise CurrencyConversionException(
"Rate for %s in %s do not exists. "
"Please run python manage.py update_rates" % (
currency, source.name)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_rate_source():
"""Get the default Rate Source and return it.""" |
backend = money_rates_settings.DEFAULT_BACKEND()
try:
return RateSource.objects.get(name=backend.get_source_name())
except RateSource.DoesNotExist:
raise CurrencyConversionException(
"Rate for %s source do not exists. "
"Please run python manage.py update_rates" % backend.get_source_name()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def base_convert_money(amount, currency_from, currency_to):
""" Convert 'amount' from 'currency_from' to 'currency_to' """ |
source = get_rate_source()
# Get rate for currency_from.
if source.base_currency != currency_from:
rate_from = get_rate(currency_from)
else:
# If currency from is the same as base currency its rate is 1.
rate_from = Decimal(1)
# Get rate for currency_to.
rate_to = get_rate(currency_to)
if isinstance(amount, float):
amount = Decimal(amount).quantize(Decimal('.000001'))
# After finishing the operation, quantize down final amount to two points.
return ((amount / rate_from) * rate_to).quantize(Decimal("1.00")) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def convert_money(amount, currency_from, currency_to):
""" Convert 'amount' from 'currency_from' to 'currency_to' and return a Money instance of the converted amount. """ |
new_amount = base_convert_money(amount, currency_from, currency_to)
return moneyed.Money(new_amount, currency_to) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def format_date(format_string=None, datetime_obj=None):
""" Format a datetime object with Java SimpleDateFormat's-like string. If datetime_obj is not given - use current datetime. If format_string is not given - return number of millisecond since epoch. :param format_string: :param datetime_obj: :return: :rtype string """ |
datetime_obj = datetime_obj or datetime.now()
if format_string is None:
seconds = int(datetime_obj.strftime("%s"))
milliseconds = datetime_obj.microsecond // 1000
return str(seconds * 1000 + milliseconds)
else:
formatter = SimpleDateFormat(format_string)
return formatter.format_datetime(datetime_obj) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def allZero(buffer):
""" Tries to determine if a buffer is empty. @type buffer: str @param buffer: Buffer to test if it is empty. @rtype: bool @return: C{True} if the given buffer is empty, i.e. full of zeros, C{False} if it doesn't. """ |
allZero = True
for byte in buffer:
if byte != "\x00":
allZero = False
break
return allZero |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def readAlignedString(self, align = 4):
""" Reads an ASCII string aligned to the next align-bytes boundary. @type align: int @param align: (Optional) The value we want the ASCII string to be aligned. @rtype: str @return: A 4-bytes aligned (default) ASCII string. """ |
s = self.readString()
r = align - len(s) % align
while r:
s += self.data[self.offset]
self.offset += 1
r -= 1
return s.rstrip("\x00") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def readAt(self, offset, size):
""" Reads as many bytes indicated in the size parameter at the specific offset. @type offset: int @param offset: Offset of the value to be read. @type size: int @param size: This parameter indicates how many bytes are going to be read from a given offset. @rtype: str @return: A packed string containing the read data. """ |
if offset > self.length:
if self.log:
print "Warning: Trying to read: %d bytes - only %d bytes left" % (nroBytes, self.length - self.offset)
offset = self.length - self.offset
tmpOff = self.tell()
self.setOffset(offset)
r = self.read(size)
self.setOffset(tmpOff)
return r |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def send(self, message, channel_name=None, fail_silently=False, options=None):
# type: (Text, Optional[str], bool, Optional[SendOptions]) -> None """Send a notification to channels :param message: A message """ |
if channel_name is None:
channels = self.settings["CHANNELS"]
else:
try:
channels = {
"__selected__": self.settings["CHANNELS"][channel_name]
}
except KeyError:
raise Exception("channels does not exist %s", channel_name)
for _, config in channels.items():
if "_backend" not in config:
raise ImproperlyConfigured(
"Specify the backend class in the channel configuration")
backend = self._load_backend(config["_backend"]) # type: Any
config = deepcopy(config)
del config["_backend"]
channel = backend(**config)
channel.send(message, fail_silently=fail_silently, options=options) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def request(self, method, path, params=None, headers=None, cookies=None, data=None, json=None, allow_redirects=None, timeout=None):
""" Prepares and sends an HTTP request. Returns the HTTPResponse object. :param method: str :param path: str :return: response :rtype: HTTPResponse """ |
headers = headers or {}
timeout = timeout if timeout is not None else self._timeout
allow_redirects = allow_redirects if allow_redirects is not None else self._allow_redirects
if self._keep_alive and self.__session is None:
self.__session = requests.Session()
if self.__session is not None and not self._use_cookies:
self.__session.cookies.clear()
address = self._bake_address(path)
req_headers = copy.deepcopy(self._additional_headers)
req_headers.update(headers)
response = http.request(method, address, session=self.__session,
params=params, headers=headers, cookies=cookies, data=data, json=json,
allow_redirects=allow_redirects, timeout=timeout)
if self._auto_assert_ok:
response.assert_ok()
return response |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_genomic_CDR3_anchor_pos_and_functionality(anchor_pos_file_name):
"""Read anchor position and functionality from file. Parameters anchor_pos_file_name : str File name for the functionality and position of a conserved residue that defines the CDR3 region for each V or J germline sequence. Returns ------- anchor_pos_and_functionality : dict Residue anchor position and functionality for each gene/allele. """ |
anchor_pos_and_functionality = {}
anchor_pos_file = open(anchor_pos_file_name, 'r')
first_line = True
for line in anchor_pos_file:
if first_line:
first_line = False
continue
split_line = line.split(',')
split_line = [x.strip() for x in split_line]
anchor_pos_and_functionality[split_line[0]] = [int(split_line[1]), split_line[2].strip().strip('()')]
return anchor_pos_and_functionality |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def generate_cutV_genomic_CDR3_segs(self):
"""Add palindromic inserted nucleotides to germline V sequences. The maximum number of palindromic insertions are appended to the germline V segments so that delV can index directly for number of nucleotides to delete from a segment. Sets the attribute cutV_genomic_CDR3_segs. """ |
max_palindrome = self.max_delV_palindrome
self.cutV_genomic_CDR3_segs = []
for CDR3_V_seg in [x[1] for x in self.genV]:
if len(CDR3_V_seg) < max_palindrome:
self.cutV_genomic_CDR3_segs += [cutR_seq(CDR3_V_seg, 0, len(CDR3_V_seg))]
else:
self.cutV_genomic_CDR3_segs += [cutR_seq(CDR3_V_seg, 0, max_palindrome)] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def generate_cutJ_genomic_CDR3_segs(self):
"""Add palindromic inserted nucleotides to germline J sequences. The maximum number of palindromic insertions are appended to the germline J segments so that delJ can index directly for number of nucleotides to delete from a segment. Sets the attribute cutJ_genomic_CDR3_segs. """ |
max_palindrome = self.max_delJ_palindrome
self.cutJ_genomic_CDR3_segs = []
for CDR3_J_seg in [x[1] for x in self.genJ]:
if len(CDR3_J_seg) < max_palindrome:
self.cutJ_genomic_CDR3_segs += [cutL_seq(CDR3_J_seg, 0, len(CDR3_J_seg))]
else:
self.cutJ_genomic_CDR3_segs += [cutL_seq(CDR3_J_seg, 0, max_palindrome)] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def generate_cutD_genomic_CDR3_segs(self):
"""Add palindromic inserted nucleotides to germline V sequences. The maximum number of palindromic insertions are appended to the germline D segments so that delDl and delDr can index directly for number of nucleotides to delete from a segment. Sets the attribute cutV_genomic_CDR3_segs. """ |
max_palindrome_L = self.max_delDl_palindrome
max_palindrome_R = self.max_delDr_palindrome
self.cutD_genomic_CDR3_segs = []
for CDR3_D_seg in [x[1] for x in self.genD]:
if len(CDR3_D_seg) < min(max_palindrome_L, max_palindrome_R):
self.cutD_genomic_CDR3_segs += [cutR_seq(cutL_seq(CDR3_D_seg, 0, len(CDR3_D_seg)), 0, len(CDR3_D_seg))]
else:
self.cutD_genomic_CDR3_segs += [cutR_seq(cutL_seq(CDR3_D_seg, 0, max_palindrome_L), 0, max_palindrome_R)] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def verify_and_fill_address_paths_from_bip32key(address_paths, master_key, network):
'''
Take address paths and verifies their accuracy client-side.
Also fills in all the available metadata (WIF, public key, etc)
'''
assert network, network
wallet_obj = Wallet.deserialize(master_key, network=network)
address_paths_cleaned = []
for address_path in address_paths:
path = address_path['path']
input_address = address_path['address']
child_wallet = wallet_obj.get_child_for_path(path)
if child_wallet.to_address() != input_address:
err_msg = 'Client Side Verification Fail for %s on %s:\n%s != %s' % (
path,
master_key,
child_wallet.to_address(),
input_address,
)
raise Exception(err_msg)
pubkeyhex = child_wallet.get_public_key_hex(compressed=True)
server_pubkeyhex = address_path.get('public')
if server_pubkeyhex and server_pubkeyhex != pubkeyhex:
err_msg = 'Client Side Verification Fail for %s on %s:\n%s != %s' % (
path,
master_key,
pubkeyhex,
server_pubkeyhex,
)
raise Exception(err_msg)
address_path_cleaned = {
'pub_address': input_address,
'path': path,
'pubkeyhex': pubkeyhex,
}
if child_wallet.private_key:
privkeyhex = child_wallet.get_private_key_hex()
address_path_cleaned['wif'] = child_wallet.export_to_wif()
address_path_cleaned['privkeyhex'] = privkeyhex
address_paths_cleaned.append(address_path_cleaned)
return address_paths_cleaned |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def run(self, lam, initial_values=None):
'''Run the graph-fused logit lasso with a fixed lambda penalty.'''
if initial_values is not None:
if self.k == 0 and self.trails is not None:
betas, zs, us = initial_values
else:
betas, us = initial_values
else:
if self.k == 0 and self.trails is not None:
betas = [np.zeros(self.num_nodes, dtype='double') for _ in self.bins]
zs = [np.zeros(self.breakpoints[-1], dtype='double') for _ in self.bins]
us = [np.zeros(self.breakpoints[-1], dtype='double') for _ in self.bins]
else:
betas = [np.zeros(self.num_nodes, dtype='double') for _ in self.bins]
us = [np.zeros(self.Dk.shape[0], dtype='double') for _ in self.bins]
for j, (left, mid, right, trials, successes) in enumerate(self.bins):
if self.bins_allowed is not None and j not in self.bins_allowed:
continue
if self.verbose > 2:
print('\tBin #{0} [{1},{2},{3}]'.format(j, left, mid, right))
# if self.verbose > 3:
# print 'Trials:\n{0}'.format(pretty_str(trials))
# print ''
# print 'Successes:\n{0}'.format(pretty_str(successes))
beta = betas[j]
u = us[j]
if self.k == 0 and self.trails is not None:
z = zs[j]
# Run the graph-fused lasso algorithm
self.graphfl(len(beta), trials, successes,
self.ntrails, self.trails, self.breakpoints,
lam, self.alpha, self.inflate,
self.max_steps, self.converge,
beta, z, u)
else:
# Run the graph trend filtering algorithm
self.graphtf(len(beta), trials, successes, lam,
self.Dk.shape[0], self.Dk.shape[1], self.Dk.nnz,
self.Dk.row.astype('int32'), self.Dk.col.astype('int32'), self.Dk.data.astype('double'),
self.max_steps, self.converge,
beta, u)
beta = np.clip(beta, 1e-12, 1-1e-12) # numerical stability
betas[j] = -np.log(1./beta - 1.) # convert back to natural parameter form
return (betas, zs, us) if self.k == 0 and self.trails is not None else (betas, us) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def data_log_likelihood(self, successes, trials, beta):
'''Calculates the log-likelihood of a Polya tree bin given the beta values.'''
return binom.logpmf(successes, trials, 1.0 / (1 + np.exp(-beta))).sum() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def spawn_worker(params):
""" This method has to be module level function :type params: Params """ |
setup_logging(params)
log.info("Adding worker: idx=%s\tconcurrency=%s\tresults=%s", params.worker_index, params.concurrency,
params.report)
worker = Worker(params)
worker.start()
worker.join() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def create_plateaus(data, edges, plateau_size, plateau_vals, plateaus=None):
'''Creates plateaus of constant value in the data.'''
nodes = set(edges.keys())
if plateaus is None:
plateaus = []
for i in range(len(plateau_vals)):
if len(nodes) == 0:
break
node = np.random.choice(list(nodes))
nodes.remove(node)
plateau = [node]
available = set(edges[node]) & nodes
while len(nodes) > 0 and len(available) > 0 and len(plateau) < plateau_size:
node = np.random.choice(list(available))
plateau.append(node)
available |= nodes & set(edges[node])
available.remove(node)
nodes -= set(plateau)
plateaus.append(set(plateau))
for p,v in zip(plateaus, plateau_vals):
data[np.array(list(p), dtype=int)] = v
return plateaus |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def pretty_str(p, decimal_places=2, print_zero=True, label_columns=False):
'''Pretty-print a matrix or vector.'''
if len(p.shape) == 1:
return vector_str(p, decimal_places, print_zero)
if len(p.shape) == 2:
return matrix_str(p, decimal_places, print_zero, label_columns)
raise Exception('Invalid array with shape {0}'.format(p.shape)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def matrix_str(p, decimal_places=2, print_zero=True, label_columns=False):
'''Pretty-print the matrix.'''
return '[{0}]'.format("\n ".join([(str(i) if label_columns else '') + vector_str(a, decimal_places, print_zero) for i, a in enumerate(p)])) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def vector_str(p, decimal_places=2, print_zero=True):
'''Pretty-print the vector values.'''
style = '{0:.' + str(decimal_places) + 'f}'
return '[{0}]'.format(", ".join([' ' if not print_zero and a == 0 else style.format(a) for a in p])) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def nearly_unique(arr, rel_tol=1e-4, verbose=0):
'''Heuristic method to return the uniques within some precision in a numpy array'''
results = np.array([arr[0]])
for x in arr:
if np.abs(results - x).min() > rel_tol:
results = np.append(results, x)
return results |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_delta(D, k):
'''Calculate the k-th order trend filtering matrix given the oriented edge
incidence matrix and the value of k.'''
if k < 0:
raise Exception('k must be at least 0th order.')
result = D
for i in range(k):
result = D.T.dot(result) if i % 2 == 0 else D.dot(result)
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def decompose_delta(deltak):
'''Decomposes the k-th order trend filtering matrix into a c-compatible set
of arrays.'''
if not isspmatrix_coo(deltak):
deltak = coo_matrix(deltak)
dk_rows = deltak.shape[0]
dk_rowbreaks = np.cumsum(deltak.getnnz(1), dtype="int32")
dk_cols = deltak.col.astype('int32')
dk_vals = deltak.data.astype('double')
return dk_rows, dk_rowbreaks, dk_cols, dk_vals |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def hasMZSignature(self, rd):
""" Check for MZ signature. @type rd: L{ReadData} @param rd: A L{ReadData} object. @rtype: bool @return: True is the given L{ReadData} stream has the MZ signature. Otherwise, False. """ |
rd.setOffset(0)
sign = rd.read(2)
if sign == "MZ":
return True
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def hasPESignature(self, rd):
""" Check for PE signature. @type rd: L{ReadData} @param rd: A L{ReadData} object. @rtype: bool @return: True is the given L{ReadData} stream has the PE signature. Otherwise, False. """ |
rd.setOffset(0)
e_lfanew_offset = unpack("<L", rd.readAt(0x3c, 4))[0]
sign = rd.readAt(e_lfanew_offset, 2)
if sign == "PE":
return True
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate(self):
""" Performs validations over some fields of the PE structure to determine if the loaded file has a valid PE format. @raise PEException: If an invalid value is found into the PE instance. """ |
# Ange Albertini (@angie4771) can kill me for this! :)
if self.dosHeader.e_magic.value != consts.MZ_SIGNATURE:
raise excep.PEException("Invalid MZ signature. Found %d instead of %d." % (self.dosHeader.magic.value, consts.MZ_SIGNATURE))
if self.dosHeader.e_lfanew.value > len(self):
raise excep.PEException("Invalid e_lfanew value. Probably not a PE file.")
if self.ntHeaders.signature.value != consts.PE_SIGNATURE:
raise excep.PEException("Invalid PE signature. Found %d instead of %d." % (self.ntHeaders.optionaHeader.signature.value, consts.PE_SIGNATURE))
if self.ntHeaders.optionalHeader.numberOfRvaAndSizes.value > 0x10:
print excep.PEWarning("Suspicious value for NumberOfRvaAndSizes: %d." % self.ntHeaders.optionaHeader.numberOfRvaAndSizes.value) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def readFile(self, pathToFile):
""" Returns data from a file. @type pathToFile: str @param pathToFile: Path to the file. @rtype: str @return: The data from file. """ |
fd = open(pathToFile, "rb")
data = fd.read()
fd.close()
return data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _getPaddingDataToSectionOffset(self):
""" Returns the data between the last section header and the begenning of data from the first section. @rtype: str @return: Data between last section header and the begenning of the first section. """ |
start = self._getPaddingToSectionOffset()
end = self.sectionHeaders[0].pointerToRawData.value - start
return self._data[start:start+end] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _getSignature(self, readDataInstance, dataDirectoryInstance):
""" Returns the digital signature within a digital signed PE file. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} instance containing a PE file data. @type dataDirectoryInstance: L{DataDirectory} @param dataDirectoryInstance: A L{DataDirectory} object containing the information about directories. @rtype: str @return: A string with the digital signature. @raise InstanceErrorException: If the C{readDataInstance} or the C{dataDirectoryInstance} were not specified. """ |
signature = ""
if readDataInstance is not None and dataDirectoryInstance is not None:
securityDirectory = dataDirectoryInstance[consts.SECURITY_DIRECTORY]
if(securityDirectory.rva.value and securityDirectory.size.value):
readDataInstance.setOffset(self.getOffsetFromRva(securityDirectory.rva.value))
signature = readDataInstance.read(securityDirectory.size.value)
else:
raise excep.InstanceErrorException("ReadData instance or DataDirectory instance not specified.")
return signature |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _getOverlay(self, readDataInstance, sectionHdrsInstance):
""" Returns the overlay data from the PE file. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} instance containing the PE file data. @type sectionHdrsInstance: L{SectionHeaders} @param sectionHdrsInstance: A L{SectionHeaders} instance containing the information about the sections present in the PE file. @rtype: str @return: A string with the overlay data from the PE file. @raise InstanceErrorException: If the C{readDataInstance} or the C{sectionHdrsInstance} were not specified. """ |
if readDataInstance is not None and sectionHdrsInstance is not None:
# adjust the offset in readDataInstance to the RawOffset + RawSize of the last section
try:
offset = sectionHdrsInstance[-1].pointerToRawData.value + sectionHdrsInstance[-1].sizeOfRawData.value
readDataInstance.setOffset(offset)
except excep.WrongOffsetValueException:
if self._verbose:
print "It seems that the file has no overlay data."
else:
raise excep.InstanceErrorException("ReadData instance or SectionHeaders instance not specified.")
return readDataInstance.data[readDataInstance.offset:] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getOffsetFromRva(self, rva):
""" Converts an offset to an RVA. @type rva: int @param rva: The RVA to be converted. @rtype: int @return: An integer value representing an offset in the PE file. """ |
offset = -1
s = self.getSectionByRva(rva)
if s != offset:
offset = (rva - self.sectionHeaders[s].virtualAddress.value) + self.sectionHeaders[s].pointerToRawData.value
else:
offset = rva
return offset |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getRvaFromOffset(self, offset):
""" Converts a RVA to an offset. @type offset: int @param offset: The offset value to be converted to RVA. @rtype: int @return: The RVA obtained from the given offset. """ |
rva = -1
s = self.getSectionByOffset(offset)
if s:
rva = (offset - self.sectionHeaders[s].pointerToRawData.value) + self.sectionHeaders[s].virtualAddress.value
return rva |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getSectionByOffset(self, offset):
""" Given an offset in the file, tries to determine the section this offset belong to. @type offset: int @param offset: Offset value. @rtype: int @return: An index, starting at 0, that represents the section the given offset belongs to. """ |
index = -1
for i in range(len(self.sectionHeaders)):
if (offset < self.sectionHeaders[i].pointerToRawData.value + self.sectionHeaders[i].sizeOfRawData.value):
index = i
break
return index |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getSectionIndexByName(self, name):
""" Given a string representing a section name, tries to find the section index. @type name: str @param name: A section name. @rtype: int @return: The index, starting at 0, of the section. """ |
index = -1
if name:
for i in range(len(self.sectionHeaders)):
if self.sectionHeaders[i].name.value.find(name) >= 0:
index = i
break
return index |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getSectionByRva(self, rva):
""" Given a RVA in the file, tries to determine the section this RVA belongs to. @type rva: int @param rva: RVA value. @rtype: int @return: An index, starting at 1, that represents the section the given RVA belongs to. """ |
index = -1
if rva < self.sectionHeaders[0].virtualAddress.value:
return index
for i in range(len(self.sectionHeaders)):
fa = self.ntHeaders.optionalHeader.fileAlignment.value
prd = self.sectionHeaders[i].pointerToRawData.value
srd = self.sectionHeaders[i].sizeOfRawData.value
if len(str(self)) - self._adjustFileAlignment(prd, fa) < srd:
size = self.sectionHeaders[i].misc.value
else:
size = max(srd, self.sectionHeaders[i].misc.value)
if (self.sectionHeaders[i].virtualAddress.value <= rva) and rva < (self.sectionHeaders[i].virtualAddress.value + size):
index = i
break
return index |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _getPaddingToSectionOffset(self):
""" Returns the offset to last section header present in the PE file. @rtype: int @return: The offset where the end of the last section header resides in the PE file. """ |
return len(str(self.dosHeader) + str(self.dosStub) + str(self.ntHeaders) + str(self.sectionHeaders)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fullLoad(self):
"""Parse all the directories in the PE file.""" |
self._parseDirectories(self.ntHeaders.optionalHeader.dataDirectory, self.PE_TYPE) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _fixPe(self):
""" Fixes the necessary fields in the PE file instance in order to create a valid PE32. i.e. SizeOfImage. """ |
sizeOfImage = 0
for sh in self.sectionHeaders:
sizeOfImage += sh.misc
self.ntHeaders.optionaHeader.sizeoOfImage.value = self._sectionAlignment(sizeOfImage + 0x1000) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getDataAtRva(self, rva, size):
""" Gets binary data at a given RVA. @type rva: int @param rva: The RVA to get the data from. @type size: int @param size: The size of the data to be obtained. @rtype: str @return: The data obtained at the given RVA. """ |
return self.getDataAtOffset(self.getOffsetFromRva(rva), size) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getDataAtOffset(self, offset, size):
""" Gets binary data at a given offset. @type offset: int @param offset: The offset to get the data from. @type size: int @param size: The size of the data to be obtained. @rtype: str @return: The data obtained at the given offset. """ |
data = str(self)
return data[offset:offset+size] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _parseDelayImportDirectory(self, rva, size, magic = consts.PE32):
""" Parses the delay imports directory. @type rva: int @param rva: The RVA where the delay imports directory starts. @type size: int @param size: The size of the delay imports directory. @type magic: int @param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}. @rtype: str @return: The delay imports directory data. """ |
return self.getDataAtRva(rva, size) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _parseBoundImportDirectory(self, rva, size, magic = consts.PE32):
""" Parses the bound import directory. @type rva: int @param rva: The RVA where the bound import directory starts. @type size: int @param size: The size of the bound import directory. @type magic: int @param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}. @rtype: L{ImageBoundImportDescriptor} @return: A new L{ImageBoundImportDescriptor} object. """ |
data = self.getDataAtRva(rva, size)
rd = utils.ReadData(data)
boundImportDirectory = directories.ImageBoundImportDescriptor.parse(rd)
# parse the name of every bounded import.
for i in range(len(boundImportDirectory) - 1):
if hasattr(boundImportDirectory[i], "forwarderRefsList"):
if boundImportDirectory[i].forwarderRefsList:
for forwarderRefEntry in boundImportDirectory[i].forwarderRefsList:
offset = forwarderRefEntry.offsetModuleName.value
forwarderRefEntry.moduleName = self.readStringAtRva(offset + rva)
offset = boundImportDirectory[i].offsetModuleName.value
boundImportDirectory[i].moduleName = self.readStringAtRva(offset + rva)
return boundImportDirectory |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _parseLoadConfigDirectory(self, rva, size, magic = consts.PE32):
""" Parses IMAGE_LOAD_CONFIG_DIRECTORY. @type rva: int @param rva: The RVA where the IMAGE_LOAD_CONFIG_DIRECTORY starts. @type size: int @param size: The size of the IMAGE_LOAD_CONFIG_DIRECTORY. @type magic: int @param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}. @rtype: L{ImageLoadConfigDirectory} @return: A new L{ImageLoadConfigDirectory}. @note: if the L{PE} instance is a PE64 file then a new L{ImageLoadConfigDirectory64} is returned. """ |
# print "RVA: %x - SIZE: %x" % (rva, size)
# I've found some issues when parsing the IMAGE_LOAD_CONFIG_DIRECTORY in some DLLs.
# There is an inconsistency with the size of the struct between MSDN docs and VS.
# sizeof(IMAGE_LOAD_CONFIG_DIRECTORY) should be 0x40, in fact, that's the size Visual Studio put
# in the directory table, even if the DLL was compiled with SAFESEH:ON. But If that is the case, the sizeof the
# struct should be 0x48.
# more information here: http://www.accuvant.com/blog/old-meets-new-microsoft-windows-safeseh-incompatibility
data = self.getDataAtRva(rva, directories.ImageLoadConfigDirectory().sizeof())
rd = utils.ReadData(data)
if magic == consts.PE32:
return directories.ImageLoadConfigDirectory.parse(rd)
elif magic == consts.PE64:
return directories.ImageLoadConfigDirectory64.parse(rd)
else:
raise excep.InvalidParameterException("Wrong magic") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _parseTlsDirectory(self, rva, size, magic = consts.PE32):
""" Parses the TLS directory. @type rva: int @param rva: The RVA where the TLS directory starts. @type size: int @param size: The size of the TLS directory. @type magic: int @param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}. @rtype: L{TLSDirectory} @return: A new L{TLSDirectory}. @note: if the L{PE} instance is a PE64 file then a new L{TLSDirectory64} is returned. """ |
data = self.getDataAtRva(rva, size)
rd = utils.ReadData(data)
if magic == consts.PE32:
return directories.TLSDirectory.parse(rd)
elif magic == consts.PE64:
return directories.TLSDirectory64.parse(rd)
else:
raise excep.InvalidParameterException("Wrong magic") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _parseRelocsDirectory(self, rva, size, magic = consts.PE32):
""" Parses the relocation directory. @type rva: int @param rva: The RVA where the relocation directory starts. @type size: int @param size: The size of the relocation directory. @type magic: int @param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}. @rtype: L{ImageBaseRelocation} @return: A new L{ImageBaseRelocation} object. """ |
data = self.getDataAtRva(rva, size)
#print "Length Relocation data: %x" % len(data)
rd = utils.ReadData(data)
relocsArray = directories.ImageBaseRelocation()
while rd.offset < size:
relocEntry = directories.ImageBaseRelocationEntry.parse(rd)
relocsArray.append(relocEntry)
return relocsArray |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_addresses_on_both_chains(wallet_obj, used=None, zero_balance=None):
'''
Get addresses across both subchains based on the filter criteria passed in
Returns a list of dicts of the following form:
[
{'address': '1abc123...', 'path': 'm/0/9', 'pubkeyhex': '0123456...'},
...,
]
Dicts may also contain WIF and privkeyhex if wallet_obj has private key
'''
mpub = wallet_obj.serialize_b58(private=False)
wallet_name = get_blockcypher_walletname_from_mpub(
mpub=mpub,
subchain_indices=[0, 1],
)
wallet_addresses = get_wallet_addresses(
wallet_name=wallet_name,
api_key=BLOCKCYPHER_API_KEY,
is_hd_wallet=True,
used=used,
zero_balance=zero_balance,
coin_symbol=coin_symbol_from_mkey(mpub),
)
verbose_print('wallet_addresses:')
verbose_print(wallet_addresses)
if wallet_obj.private_key:
master_key = wallet_obj.serialize_b58(private=True)
else:
master_key = mpub
chains_address_paths_cleaned = []
for chain in wallet_addresses['chains']:
if chain['chain_addresses']:
chain_address_paths = verify_and_fill_address_paths_from_bip32key(
address_paths=chain['chain_addresses'],
master_key=master_key,
network=guess_network_from_mkey(mpub),
)
chain_address_paths_cleaned = {
'index': chain['index'],
'chain_addresses': chain_address_paths,
}
chains_address_paths_cleaned.append(chain_address_paths_cleaned)
return chains_address_paths_cleaned |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def dump_all_keys_or_addrs(wallet_obj):
'''
Offline-enabled mechanism to dump addresses
'''
print_traversal_warning()
puts('\nDo you understand this warning?')
if not confirm(user_prompt=DEFAULT_PROMPT, default=False):
puts(colored.red('Dump Cancelled!'))
return
mpub = wallet_obj.serialize_b58(private=False)
if wallet_obj.private_key:
desc_str = 'private keys'
else:
desc_str = 'addresses'
puts('Displaying Public Addresses Only')
puts('For Private Keys, please open bcwallet with your Master Private Key:\n')
priv_to_display = '%s123...' % first4mprv_from_mpub(mpub=mpub)
print_bcwallet_basic_priv_opening(priv_to_display=priv_to_display)
puts('How many %s (on each chain) do you want to dump?' % desc_str)
puts('Enter "b" to go back.\n')
num_keys = get_int(
user_prompt=DEFAULT_PROMPT,
max_int=10**5,
default_input='5',
show_default=True,
quit_ok=True,
)
if num_keys is False:
return
if wallet_obj.private_key:
print_childprivkey_warning()
puts('-' * 70)
for chain_int in (0, 1):
for current in range(0, num_keys):
path = "m/%d/%d" % (chain_int, current)
if current == 0:
if chain_int == 0:
print_external_chain()
print_key_path_header()
elif chain_int == 1:
print_internal_chain()
print_key_path_header()
child_wallet = wallet_obj.get_child_for_path(path)
if wallet_obj.private_key:
wif_to_use = child_wallet.export_to_wif()
else:
wif_to_use = None
print_path_info(
address=child_wallet.to_address(),
path=path,
wif=wif_to_use,
coin_symbol=coin_symbol_from_mkey(mpub),
)
puts(colored.blue('\nYou can compare this output to bip32.org')) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def dump_selected_keys_or_addrs(wallet_obj, used=None, zero_balance=None):
'''
Works for both public key only or private key access
'''
if wallet_obj.private_key:
content_str = 'private keys'
else:
content_str = 'addresses'
if not USER_ONLINE:
puts(colored.red('\nInternet connection required, would you like to dump *all* %s instead?' % (
content_str,
content_str,
)))
if confirm(user_prompt=DEFAULT_PROMPT, default=True):
dump_all_keys_or_addrs(wallet_obj=wallet_obj)
else:
return
mpub = wallet_obj.serialize_b58(private=False)
if wallet_obj.private_key is None:
puts('Displaying Public Addresses Only')
puts('For Private Keys, please open bcwallet with your Master Private Key:\n')
priv_to_display = '%s123...' % first4mprv_from_mpub(mpub=mpub)
print_bcwallet_basic_priv_opening(priv_to_display=priv_to_display)
chain_address_objs = get_addresses_on_both_chains(
wallet_obj=wallet_obj,
used=used,
zero_balance=zero_balance,
)
if wallet_obj.private_key and chain_address_objs:
print_childprivkey_warning()
addr_cnt = 0
for chain_address_obj in chain_address_objs:
if chain_address_obj['index'] == 0:
print_external_chain()
elif chain_address_obj['index'] == 1:
print_internal_chain()
print_key_path_header()
for address_obj in chain_address_obj['chain_addresses']:
print_path_info(
address=address_obj['pub_address'],
wif=address_obj.get('wif'),
path=address_obj['path'],
coin_symbol=coin_symbol_from_mkey(mpub),
)
addr_cnt += 1
if addr_cnt:
puts(colored.blue('\nYou can compare this output to bip32.org'))
else:
puts('No matching %s in this subset. Would you like to dump *all* %s instead?' % (
content_str,
content_str,
))
if confirm(user_prompt=DEFAULT_PROMPT, default=True):
dump_all_keys_or_addrs(wallet_obj=wallet_obj) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def dump_private_keys_or_addrs_chooser(wallet_obj):
'''
Offline-enabled mechanism to dump everything
'''
if wallet_obj.private_key:
puts('Which private keys and addresses do you want?')
else:
puts('Which addresses do you want?')
with indent(2):
puts(colored.cyan('1: Active - have funds to spend'))
puts(colored.cyan('2: Spent - no funds to spend (because they have been spent)'))
puts(colored.cyan('3: Unused - no funds to spend (because the address has never been used)'))
puts(colored.cyan('0: All (works offline) - regardless of whether they have funds to spend (super advanced users only)'))
puts(colored.cyan('\nb: Go Back\n'))
choice = choice_prompt(
user_prompt=DEFAULT_PROMPT,
acceptable_responses=[0, 1, 2, 3],
default_input='1',
show_default=True,
quit_ok=True,
)
if choice is False:
return
if choice == '1':
return dump_selected_keys_or_addrs(wallet_obj=wallet_obj, zero_balance=False, used=True)
elif choice == '2':
return dump_selected_keys_or_addrs(wallet_obj=wallet_obj, zero_balance=True, used=True)
elif choice == '3':
return dump_selected_keys_or_addrs(wallet_obj=wallet_obj, zero_balance=None, used=False)
elif choice == '0':
return dump_all_keys_or_addrs(wallet_obj=wallet_obj) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _send_merge_commands(self, config, file_config):
""" Netmiko is being used to push set commands. """ |
if self.loaded is False:
if self._save_backup() is False:
raise MergeConfigException('Error while storing backup '
'config.')
if self.ssh_connection is False:
self._open_ssh()
if file_config:
if isinstance(config, str):
config = config.splitlines()
else:
if isinstance(config, str):
config = str(config).split()
self.ssh_device.send_config_set(config)
self.loaded = True
self.merge_config = True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def compare_config(self):
""" Netmiko is being used to obtain config diffs because pan-python doesn't support the needed command. """ |
if self.ssh_connection is False:
self._open_ssh()
self.ssh_device.exit_config_mode()
diff = self.ssh_device.send_command("show config diff")
return diff.strip() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.