repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
listlengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
listlengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
crackinglandia/pype32
pype32/pype32.py
PE.write
def write(self, filename = ""): """ Writes data from L{PE} object to a file. @rtype: str @return: The L{PE} stream data. @raise IOError: If the file could not be opened for write operations. """ file_data = str(self) if filename: try: self.__write(filename, file_data) except IOError: raise IOError("File could not be opened for write operations.") else: return file_data
python
def write(self, filename = ""): """ Writes data from L{PE} object to a file. @rtype: str @return: The L{PE} stream data. @raise IOError: If the file could not be opened for write operations. """ file_data = str(self) if filename: try: self.__write(filename, file_data) except IOError: raise IOError("File could not be opened for write operations.") else: return file_data
[ "def", "write", "(", "self", ",", "filename", "=", "\"\"", ")", ":", "file_data", "=", "str", "(", "self", ")", "if", "filename", ":", "try", ":", "self", ".", "__write", "(", "filename", ",", "file_data", ")", "except", "IOError", ":", "raise", "IOError", "(", "\"File could not be opened for write operations.\"", ")", "else", ":", "return", "file_data" ]
Writes data from L{PE} object to a file. @rtype: str @return: The L{PE} stream data. @raise IOError: If the file could not be opened for write operations.
[ "Writes", "data", "from", "L", "{", "PE", "}", "object", "to", "a", "file", ".", "@rtype", ":", "str", "@return", ":", "The", "L", "{", "PE", "}", "stream", "data", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L201-L217
crackinglandia/pype32
pype32/pype32.py
PE.__write
def __write(self, thePath, theData): """ Write data to a file. @type thePath: str @param thePath: The file path. @type theData: str @param theData: The data to write. """ fd = open(thePath, "wb") fd.write(theData) fd.close()
python
def __write(self, thePath, theData): """ Write data to a file. @type thePath: str @param thePath: The file path. @type theData: str @param theData: The data to write. """ fd = open(thePath, "wb") fd.write(theData) fd.close()
[ "def", "__write", "(", "self", ",", "thePath", ",", "theData", ")", ":", "fd", "=", "open", "(", "thePath", ",", "\"wb\"", ")", "fd", ".", "write", "(", "theData", ")", "fd", ".", "close", "(", ")" ]
Write data to a file. @type thePath: str @param thePath: The file path. @type theData: str @param theData: The data to write.
[ "Write", "data", "to", "a", "file", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L219-L231
crackinglandia/pype32
pype32/pype32.py
PE._updateDirectoriesData
def _updateDirectoriesData(self, peStr): """ Updates the data in every L{Directory} object. @type peStr: str @param peStr: C{str} representation of the L{PE} object. @rtype: str @return: A C{str} representation of the L{PE} object. """ dataDirs = self.ntHeaders.optionalHeader.dataDirectory wr = utils.WriteData(data) for dir in dataDirs: dataToWrite = str(dir.info) if len(dataToWrite) != dir.size.value and self._verbose: print excep.DataLengthException("Warning: current size of %s directory does not match with dataToWrite length %d." % (dir.size.value, len(dataToWrite))) wr.setOffset(self.getOffsetFromRva(dir.rva.value)) wr.write(dataToWrite) return str(wr)
python
def _updateDirectoriesData(self, peStr): """ Updates the data in every L{Directory} object. @type peStr: str @param peStr: C{str} representation of the L{PE} object. @rtype: str @return: A C{str} representation of the L{PE} object. """ dataDirs = self.ntHeaders.optionalHeader.dataDirectory wr = utils.WriteData(data) for dir in dataDirs: dataToWrite = str(dir.info) if len(dataToWrite) != dir.size.value and self._verbose: print excep.DataLengthException("Warning: current size of %s directory does not match with dataToWrite length %d." % (dir.size.value, len(dataToWrite))) wr.setOffset(self.getOffsetFromRva(dir.rva.value)) wr.write(dataToWrite) return str(wr)
[ "def", "_updateDirectoriesData", "(", "self", ",", "peStr", ")", ":", "dataDirs", "=", "self", ".", "ntHeaders", ".", "optionalHeader", ".", "dataDirectory", "wr", "=", "utils", ".", "WriteData", "(", "data", ")", "for", "dir", "in", "dataDirs", ":", "dataToWrite", "=", "str", "(", "dir", ".", "info", ")", "if", "len", "(", "dataToWrite", ")", "!=", "dir", ".", "size", ".", "value", "and", "self", ".", "_verbose", ":", "print", "excep", ".", "DataLengthException", "(", "\"Warning: current size of %s directory does not match with dataToWrite length %d.\"", "%", "(", "dir", ".", "size", ".", "value", ",", "len", "(", "dataToWrite", ")", ")", ")", "wr", ".", "setOffset", "(", "self", ".", "getOffsetFromRva", "(", "dir", ".", "rva", ".", "value", ")", ")", "wr", ".", "write", "(", "dataToWrite", ")", "return", "str", "(", "wr", ")" ]
Updates the data in every L{Directory} object. @type peStr: str @param peStr: C{str} representation of the L{PE} object. @rtype: str @return: A C{str} representation of the L{PE} object.
[ "Updates", "the", "data", "in", "every", "L", "{", "Directory", "}", "object", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L247-L266
crackinglandia/pype32
pype32/pype32.py
PE._getPaddingDataToSectionOffset
def _getPaddingDataToSectionOffset(self): """ Returns the data between the last section header and the begenning of data from the first section. @rtype: str @return: Data between last section header and the begenning of the first section. """ start = self._getPaddingToSectionOffset() end = self.sectionHeaders[0].pointerToRawData.value - start return self._data[start:start+end]
python
def _getPaddingDataToSectionOffset(self): """ Returns the data between the last section header and the begenning of data from the first section. @rtype: str @return: Data between last section header and the begenning of the first section. """ start = self._getPaddingToSectionOffset() end = self.sectionHeaders[0].pointerToRawData.value - start return self._data[start:start+end]
[ "def", "_getPaddingDataToSectionOffset", "(", "self", ")", ":", "start", "=", "self", ".", "_getPaddingToSectionOffset", "(", ")", "end", "=", "self", ".", "sectionHeaders", "[", "0", "]", ".", "pointerToRawData", ".", "value", "-", "start", "return", "self", ".", "_data", "[", "start", ":", "start", "+", "end", "]" ]
Returns the data between the last section header and the begenning of data from the first section. @rtype: str @return: Data between last section header and the begenning of the first section.
[ "Returns", "the", "data", "between", "the", "last", "section", "header", "and", "the", "begenning", "of", "data", "from", "the", "first", "section", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L268-L277
crackinglandia/pype32
pype32/pype32.py
PE._getSignature
def _getSignature(self, readDataInstance, dataDirectoryInstance): """ Returns the digital signature within a digital signed PE file. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} instance containing a PE file data. @type dataDirectoryInstance: L{DataDirectory} @param dataDirectoryInstance: A L{DataDirectory} object containing the information about directories. @rtype: str @return: A string with the digital signature. @raise InstanceErrorException: If the C{readDataInstance} or the C{dataDirectoryInstance} were not specified. """ signature = "" if readDataInstance is not None and dataDirectoryInstance is not None: securityDirectory = dataDirectoryInstance[consts.SECURITY_DIRECTORY] if(securityDirectory.rva.value and securityDirectory.size.value): readDataInstance.setOffset(self.getOffsetFromRva(securityDirectory.rva.value)) signature = readDataInstance.read(securityDirectory.size.value) else: raise excep.InstanceErrorException("ReadData instance or DataDirectory instance not specified.") return signature
python
def _getSignature(self, readDataInstance, dataDirectoryInstance): """ Returns the digital signature within a digital signed PE file. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} instance containing a PE file data. @type dataDirectoryInstance: L{DataDirectory} @param dataDirectoryInstance: A L{DataDirectory} object containing the information about directories. @rtype: str @return: A string with the digital signature. @raise InstanceErrorException: If the C{readDataInstance} or the C{dataDirectoryInstance} were not specified. """ signature = "" if readDataInstance is not None and dataDirectoryInstance is not None: securityDirectory = dataDirectoryInstance[consts.SECURITY_DIRECTORY] if(securityDirectory.rva.value and securityDirectory.size.value): readDataInstance.setOffset(self.getOffsetFromRva(securityDirectory.rva.value)) signature = readDataInstance.read(securityDirectory.size.value) else: raise excep.InstanceErrorException("ReadData instance or DataDirectory instance not specified.") return signature
[ "def", "_getSignature", "(", "self", ",", "readDataInstance", ",", "dataDirectoryInstance", ")", ":", "signature", "=", "\"\"", "if", "readDataInstance", "is", "not", "None", "and", "dataDirectoryInstance", "is", "not", "None", ":", "securityDirectory", "=", "dataDirectoryInstance", "[", "consts", ".", "SECURITY_DIRECTORY", "]", "if", "(", "securityDirectory", ".", "rva", ".", "value", "and", "securityDirectory", ".", "size", ".", "value", ")", ":", "readDataInstance", ".", "setOffset", "(", "self", ".", "getOffsetFromRva", "(", "securityDirectory", ".", "rva", ".", "value", ")", ")", "signature", "=", "readDataInstance", ".", "read", "(", "securityDirectory", ".", "size", ".", "value", ")", "else", ":", "raise", "excep", ".", "InstanceErrorException", "(", "\"ReadData instance or DataDirectory instance not specified.\"", ")", "return", "signature" ]
Returns the digital signature within a digital signed PE file. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} instance containing a PE file data. @type dataDirectoryInstance: L{DataDirectory} @param dataDirectoryInstance: A L{DataDirectory} object containing the information about directories. @rtype: str @return: A string with the digital signature. @raise InstanceErrorException: If the C{readDataInstance} or the C{dataDirectoryInstance} were not specified.
[ "Returns", "the", "digital", "signature", "within", "a", "digital", "signed", "PE", "file", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L279-L306
crackinglandia/pype32
pype32/pype32.py
PE._getOverlay
def _getOverlay(self, readDataInstance, sectionHdrsInstance): """ Returns the overlay data from the PE file. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} instance containing the PE file data. @type sectionHdrsInstance: L{SectionHeaders} @param sectionHdrsInstance: A L{SectionHeaders} instance containing the information about the sections present in the PE file. @rtype: str @return: A string with the overlay data from the PE file. @raise InstanceErrorException: If the C{readDataInstance} or the C{sectionHdrsInstance} were not specified. """ if readDataInstance is not None and sectionHdrsInstance is not None: # adjust the offset in readDataInstance to the RawOffset + RawSize of the last section try: offset = sectionHdrsInstance[-1].pointerToRawData.value + sectionHdrsInstance[-1].sizeOfRawData.value readDataInstance.setOffset(offset) except excep.WrongOffsetValueException: if self._verbose: print "It seems that the file has no overlay data." else: raise excep.InstanceErrorException("ReadData instance or SectionHeaders instance not specified.") return readDataInstance.data[readDataInstance.offset:]
python
def _getOverlay(self, readDataInstance, sectionHdrsInstance): """ Returns the overlay data from the PE file. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} instance containing the PE file data. @type sectionHdrsInstance: L{SectionHeaders} @param sectionHdrsInstance: A L{SectionHeaders} instance containing the information about the sections present in the PE file. @rtype: str @return: A string with the overlay data from the PE file. @raise InstanceErrorException: If the C{readDataInstance} or the C{sectionHdrsInstance} were not specified. """ if readDataInstance is not None and sectionHdrsInstance is not None: # adjust the offset in readDataInstance to the RawOffset + RawSize of the last section try: offset = sectionHdrsInstance[-1].pointerToRawData.value + sectionHdrsInstance[-1].sizeOfRawData.value readDataInstance.setOffset(offset) except excep.WrongOffsetValueException: if self._verbose: print "It seems that the file has no overlay data." else: raise excep.InstanceErrorException("ReadData instance or SectionHeaders instance not specified.") return readDataInstance.data[readDataInstance.offset:]
[ "def", "_getOverlay", "(", "self", ",", "readDataInstance", ",", "sectionHdrsInstance", ")", ":", "if", "readDataInstance", "is", "not", "None", "and", "sectionHdrsInstance", "is", "not", "None", ":", "# adjust the offset in readDataInstance to the RawOffset + RawSize of the last section", "try", ":", "offset", "=", "sectionHdrsInstance", "[", "-", "1", "]", ".", "pointerToRawData", ".", "value", "+", "sectionHdrsInstance", "[", "-", "1", "]", ".", "sizeOfRawData", ".", "value", "readDataInstance", ".", "setOffset", "(", "offset", ")", "except", "excep", ".", "WrongOffsetValueException", ":", "if", "self", ".", "_verbose", ":", "print", "\"It seems that the file has no overlay data.\"", "else", ":", "raise", "excep", ".", "InstanceErrorException", "(", "\"ReadData instance or SectionHeaders instance not specified.\"", ")", "return", "readDataInstance", ".", "data", "[", "readDataInstance", ".", "offset", ":", "]" ]
Returns the overlay data from the PE file. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} instance containing the PE file data. @type sectionHdrsInstance: L{SectionHeaders} @param sectionHdrsInstance: A L{SectionHeaders} instance containing the information about the sections present in the PE file. @rtype: str @return: A string with the overlay data from the PE file. @raise InstanceErrorException: If the C{readDataInstance} or the C{sectionHdrsInstance} were not specified.
[ "Returns", "the", "overlay", "data", "from", "the", "PE", "file", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L308-L334
crackinglandia/pype32
pype32/pype32.py
PE.getOffsetFromRva
def getOffsetFromRva(self, rva): """ Converts an offset to an RVA. @type rva: int @param rva: The RVA to be converted. @rtype: int @return: An integer value representing an offset in the PE file. """ offset = -1 s = self.getSectionByRva(rva) if s != offset: offset = (rva - self.sectionHeaders[s].virtualAddress.value) + self.sectionHeaders[s].pointerToRawData.value else: offset = rva return offset
python
def getOffsetFromRva(self, rva): """ Converts an offset to an RVA. @type rva: int @param rva: The RVA to be converted. @rtype: int @return: An integer value representing an offset in the PE file. """ offset = -1 s = self.getSectionByRva(rva) if s != offset: offset = (rva - self.sectionHeaders[s].virtualAddress.value) + self.sectionHeaders[s].pointerToRawData.value else: offset = rva return offset
[ "def", "getOffsetFromRva", "(", "self", ",", "rva", ")", ":", "offset", "=", "-", "1", "s", "=", "self", ".", "getSectionByRva", "(", "rva", ")", "if", "s", "!=", "offset", ":", "offset", "=", "(", "rva", "-", "self", ".", "sectionHeaders", "[", "s", "]", ".", "virtualAddress", ".", "value", ")", "+", "self", ".", "sectionHeaders", "[", "s", "]", ".", "pointerToRawData", ".", "value", "else", ":", "offset", "=", "rva", "return", "offset" ]
Converts an offset to an RVA. @type rva: int @param rva: The RVA to be converted. @rtype: int @return: An integer value representing an offset in the PE file.
[ "Converts", "an", "offset", "to", "an", "RVA", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L336-L354
crackinglandia/pype32
pype32/pype32.py
PE.getRvaFromOffset
def getRvaFromOffset(self, offset): """ Converts a RVA to an offset. @type offset: int @param offset: The offset value to be converted to RVA. @rtype: int @return: The RVA obtained from the given offset. """ rva = -1 s = self.getSectionByOffset(offset) if s: rva = (offset - self.sectionHeaders[s].pointerToRawData.value) + self.sectionHeaders[s].virtualAddress.value return rva
python
def getRvaFromOffset(self, offset): """ Converts a RVA to an offset. @type offset: int @param offset: The offset value to be converted to RVA. @rtype: int @return: The RVA obtained from the given offset. """ rva = -1 s = self.getSectionByOffset(offset) if s: rva = (offset - self.sectionHeaders[s].pointerToRawData.value) + self.sectionHeaders[s].virtualAddress.value return rva
[ "def", "getRvaFromOffset", "(", "self", ",", "offset", ")", ":", "rva", "=", "-", "1", "s", "=", "self", ".", "getSectionByOffset", "(", "offset", ")", "if", "s", ":", "rva", "=", "(", "offset", "-", "self", ".", "sectionHeaders", "[", "s", "]", ".", "pointerToRawData", ".", "value", ")", "+", "self", ".", "sectionHeaders", "[", "s", "]", ".", "virtualAddress", ".", "value", "return", "rva" ]
Converts a RVA to an offset. @type offset: int @param offset: The offset value to be converted to RVA. @rtype: int @return: The RVA obtained from the given offset.
[ "Converts", "a", "RVA", "to", "an", "offset", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L356-L372
crackinglandia/pype32
pype32/pype32.py
PE.getSectionByOffset
def getSectionByOffset(self, offset): """ Given an offset in the file, tries to determine the section this offset belong to. @type offset: int @param offset: Offset value. @rtype: int @return: An index, starting at 0, that represents the section the given offset belongs to. """ index = -1 for i in range(len(self.sectionHeaders)): if (offset < self.sectionHeaders[i].pointerToRawData.value + self.sectionHeaders[i].sizeOfRawData.value): index = i break return index
python
def getSectionByOffset(self, offset): """ Given an offset in the file, tries to determine the section this offset belong to. @type offset: int @param offset: Offset value. @rtype: int @return: An index, starting at 0, that represents the section the given offset belongs to. """ index = -1 for i in range(len(self.sectionHeaders)): if (offset < self.sectionHeaders[i].pointerToRawData.value + self.sectionHeaders[i].sizeOfRawData.value): index = i break return index
[ "def", "getSectionByOffset", "(", "self", ",", "offset", ")", ":", "index", "=", "-", "1", "for", "i", "in", "range", "(", "len", "(", "self", ".", "sectionHeaders", ")", ")", ":", "if", "(", "offset", "<", "self", ".", "sectionHeaders", "[", "i", "]", ".", "pointerToRawData", ".", "value", "+", "self", ".", "sectionHeaders", "[", "i", "]", ".", "sizeOfRawData", ".", "value", ")", ":", "index", "=", "i", "break", "return", "index" ]
Given an offset in the file, tries to determine the section this offset belong to. @type offset: int @param offset: Offset value. @rtype: int @return: An index, starting at 0, that represents the section the given offset belongs to.
[ "Given", "an", "offset", "in", "the", "file", "tries", "to", "determine", "the", "section", "this", "offset", "belong", "to", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L374-L389
crackinglandia/pype32
pype32/pype32.py
PE.getSectionIndexByName
def getSectionIndexByName(self, name): """ Given a string representing a section name, tries to find the section index. @type name: str @param name: A section name. @rtype: int @return: The index, starting at 0, of the section. """ index = -1 if name: for i in range(len(self.sectionHeaders)): if self.sectionHeaders[i].name.value.find(name) >= 0: index = i break return index
python
def getSectionIndexByName(self, name): """ Given a string representing a section name, tries to find the section index. @type name: str @param name: A section name. @rtype: int @return: The index, starting at 0, of the section. """ index = -1 if name: for i in range(len(self.sectionHeaders)): if self.sectionHeaders[i].name.value.find(name) >= 0: index = i break return index
[ "def", "getSectionIndexByName", "(", "self", ",", "name", ")", ":", "index", "=", "-", "1", "if", "name", ":", "for", "i", "in", "range", "(", "len", "(", "self", ".", "sectionHeaders", ")", ")", ":", "if", "self", ".", "sectionHeaders", "[", "i", "]", ".", "name", ".", "value", ".", "find", "(", "name", ")", ">=", "0", ":", "index", "=", "i", "break", "return", "index" ]
Given a string representing a section name, tries to find the section index. @type name: str @param name: A section name. @rtype: int @return: The index, starting at 0, of the section.
[ "Given", "a", "string", "representing", "a", "section", "name", "tries", "to", "find", "the", "section", "index", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L391-L408
crackinglandia/pype32
pype32/pype32.py
PE.getSectionByRva
def getSectionByRva(self, rva): """ Given a RVA in the file, tries to determine the section this RVA belongs to. @type rva: int @param rva: RVA value. @rtype: int @return: An index, starting at 1, that represents the section the given RVA belongs to. """ index = -1 if rva < self.sectionHeaders[0].virtualAddress.value: return index for i in range(len(self.sectionHeaders)): fa = self.ntHeaders.optionalHeader.fileAlignment.value prd = self.sectionHeaders[i].pointerToRawData.value srd = self.sectionHeaders[i].sizeOfRawData.value if len(str(self)) - self._adjustFileAlignment(prd, fa) < srd: size = self.sectionHeaders[i].misc.value else: size = max(srd, self.sectionHeaders[i].misc.value) if (self.sectionHeaders[i].virtualAddress.value <= rva) and rva < (self.sectionHeaders[i].virtualAddress.value + size): index = i break return index
python
def getSectionByRva(self, rva): """ Given a RVA in the file, tries to determine the section this RVA belongs to. @type rva: int @param rva: RVA value. @rtype: int @return: An index, starting at 1, that represents the section the given RVA belongs to. """ index = -1 if rva < self.sectionHeaders[0].virtualAddress.value: return index for i in range(len(self.sectionHeaders)): fa = self.ntHeaders.optionalHeader.fileAlignment.value prd = self.sectionHeaders[i].pointerToRawData.value srd = self.sectionHeaders[i].sizeOfRawData.value if len(str(self)) - self._adjustFileAlignment(prd, fa) < srd: size = self.sectionHeaders[i].misc.value else: size = max(srd, self.sectionHeaders[i].misc.value) if (self.sectionHeaders[i].virtualAddress.value <= rva) and rva < (self.sectionHeaders[i].virtualAddress.value + size): index = i break return index
[ "def", "getSectionByRva", "(", "self", ",", "rva", ")", ":", "index", "=", "-", "1", "if", "rva", "<", "self", ".", "sectionHeaders", "[", "0", "]", ".", "virtualAddress", ".", "value", ":", "return", "index", "for", "i", "in", "range", "(", "len", "(", "self", ".", "sectionHeaders", ")", ")", ":", "fa", "=", "self", ".", "ntHeaders", ".", "optionalHeader", ".", "fileAlignment", ".", "value", "prd", "=", "self", ".", "sectionHeaders", "[", "i", "]", ".", "pointerToRawData", ".", "value", "srd", "=", "self", ".", "sectionHeaders", "[", "i", "]", ".", "sizeOfRawData", ".", "value", "if", "len", "(", "str", "(", "self", ")", ")", "-", "self", ".", "_adjustFileAlignment", "(", "prd", ",", "fa", ")", "<", "srd", ":", "size", "=", "self", ".", "sectionHeaders", "[", "i", "]", ".", "misc", ".", "value", "else", ":", "size", "=", "max", "(", "srd", ",", "self", ".", "sectionHeaders", "[", "i", "]", ".", "misc", ".", "value", ")", "if", "(", "self", ".", "sectionHeaders", "[", "i", "]", ".", "virtualAddress", ".", "value", "<=", "rva", ")", "and", "rva", "<", "(", "self", ".", "sectionHeaders", "[", "i", "]", ".", "virtualAddress", ".", "value", "+", "size", ")", ":", "index", "=", "i", "break", "return", "index" ]
Given a RVA in the file, tries to determine the section this RVA belongs to. @type rva: int @param rva: RVA value. @rtype: int @return: An index, starting at 1, that represents the section the given RVA belongs to.
[ "Given", "a", "RVA", "in", "the", "file", "tries", "to", "determine", "the", "section", "this", "RVA", "belongs", "to", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L410-L437
crackinglandia/pype32
pype32/pype32.py
PE._getPaddingToSectionOffset
def _getPaddingToSectionOffset(self): """ Returns the offset to last section header present in the PE file. @rtype: int @return: The offset where the end of the last section header resides in the PE file. """ return len(str(self.dosHeader) + str(self.dosStub) + str(self.ntHeaders) + str(self.sectionHeaders))
python
def _getPaddingToSectionOffset(self): """ Returns the offset to last section header present in the PE file. @rtype: int @return: The offset where the end of the last section header resides in the PE file. """ return len(str(self.dosHeader) + str(self.dosStub) + str(self.ntHeaders) + str(self.sectionHeaders))
[ "def", "_getPaddingToSectionOffset", "(", "self", ")", ":", "return", "len", "(", "str", "(", "self", ".", "dosHeader", ")", "+", "str", "(", "self", ".", "dosStub", ")", "+", "str", "(", "self", ".", "ntHeaders", ")", "+", "str", "(", "self", ".", "sectionHeaders", ")", ")" ]
Returns the offset to last section header present in the PE file. @rtype: int @return: The offset where the end of the last section header resides in the PE file.
[ "Returns", "the", "offset", "to", "last", "section", "header", "present", "in", "the", "PE", "file", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L449-L456
crackinglandia/pype32
pype32/pype32.py
PE.fullLoad
def fullLoad(self): """Parse all the directories in the PE file.""" self._parseDirectories(self.ntHeaders.optionalHeader.dataDirectory, self.PE_TYPE)
python
def fullLoad(self): """Parse all the directories in the PE file.""" self._parseDirectories(self.ntHeaders.optionalHeader.dataDirectory, self.PE_TYPE)
[ "def", "fullLoad", "(", "self", ")", ":", "self", ".", "_parseDirectories", "(", "self", ".", "ntHeaders", ".", "optionalHeader", ".", "dataDirectory", ",", "self", ".", "PE_TYPE", ")" ]
Parse all the directories in the PE file.
[ "Parse", "all", "the", "directories", "in", "the", "PE", "file", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L458-L460
crackinglandia/pype32
pype32/pype32.py
PE._internalParse
def _internalParse(self, readDataInstance): """ Populates the attributes of the L{PE} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} instance with the data of a PE file. """ self.dosHeader = DosHeader.parse(readDataInstance) self.dosStub = readDataInstance.read(self.dosHeader.e_lfanew.value - readDataInstance.offset) self.ntHeaders = NtHeaders.parse(readDataInstance) if self.ntHeaders.optionalHeader.magic.value == consts.PE32: self.PE_TYPE = consts.PE32 elif self.ntHeaders.optionalHeader.magic.value == consts.PE64: self.PE_TYPE = consts.PE64 readDataInstance.setOffset(readDataInstance.tell() - OptionalHeader().sizeof()) self.ntHeaders.optionalHeader = OptionalHeader64.parse(readDataInstance) self.sectionHeaders = SectionHeaders.parse(readDataInstance, self.ntHeaders.fileHeader.numberOfSections.value) # as padding is possible between the last section header and the beginning of the first section # we must adjust the offset in readDataInstance to point to the first byte of the first section. readDataInstance.setOffset(self.sectionHeaders[0].pointerToRawData.value) self.sections = Sections.parse(readDataInstance, self.sectionHeaders) self.overlay = self._getOverlay(readDataInstance, self.sectionHeaders) self.signature = self._getSignature(readDataInstance, self.ntHeaders.optionalHeader.dataDirectory) if not self._fastLoad: self._parseDirectories(self.ntHeaders.optionalHeader.dataDirectory, self.PE_TYPE)
python
def _internalParse(self, readDataInstance): """ Populates the attributes of the L{PE} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} instance with the data of a PE file. """ self.dosHeader = DosHeader.parse(readDataInstance) self.dosStub = readDataInstance.read(self.dosHeader.e_lfanew.value - readDataInstance.offset) self.ntHeaders = NtHeaders.parse(readDataInstance) if self.ntHeaders.optionalHeader.magic.value == consts.PE32: self.PE_TYPE = consts.PE32 elif self.ntHeaders.optionalHeader.magic.value == consts.PE64: self.PE_TYPE = consts.PE64 readDataInstance.setOffset(readDataInstance.tell() - OptionalHeader().sizeof()) self.ntHeaders.optionalHeader = OptionalHeader64.parse(readDataInstance) self.sectionHeaders = SectionHeaders.parse(readDataInstance, self.ntHeaders.fileHeader.numberOfSections.value) # as padding is possible between the last section header and the beginning of the first section # we must adjust the offset in readDataInstance to point to the first byte of the first section. readDataInstance.setOffset(self.sectionHeaders[0].pointerToRawData.value) self.sections = Sections.parse(readDataInstance, self.sectionHeaders) self.overlay = self._getOverlay(readDataInstance, self.sectionHeaders) self.signature = self._getSignature(readDataInstance, self.ntHeaders.optionalHeader.dataDirectory) if not self._fastLoad: self._parseDirectories(self.ntHeaders.optionalHeader.dataDirectory, self.PE_TYPE)
[ "def", "_internalParse", "(", "self", ",", "readDataInstance", ")", ":", "self", ".", "dosHeader", "=", "DosHeader", ".", "parse", "(", "readDataInstance", ")", "self", ".", "dosStub", "=", "readDataInstance", ".", "read", "(", "self", ".", "dosHeader", ".", "e_lfanew", ".", "value", "-", "readDataInstance", ".", "offset", ")", "self", ".", "ntHeaders", "=", "NtHeaders", ".", "parse", "(", "readDataInstance", ")", "if", "self", ".", "ntHeaders", ".", "optionalHeader", ".", "magic", ".", "value", "==", "consts", ".", "PE32", ":", "self", ".", "PE_TYPE", "=", "consts", ".", "PE32", "elif", "self", ".", "ntHeaders", ".", "optionalHeader", ".", "magic", ".", "value", "==", "consts", ".", "PE64", ":", "self", ".", "PE_TYPE", "=", "consts", ".", "PE64", "readDataInstance", ".", "setOffset", "(", "readDataInstance", ".", "tell", "(", ")", "-", "OptionalHeader", "(", ")", ".", "sizeof", "(", ")", ")", "self", ".", "ntHeaders", ".", "optionalHeader", "=", "OptionalHeader64", ".", "parse", "(", "readDataInstance", ")", "self", ".", "sectionHeaders", "=", "SectionHeaders", ".", "parse", "(", "readDataInstance", ",", "self", ".", "ntHeaders", ".", "fileHeader", ".", "numberOfSections", ".", "value", ")", "# as padding is possible between the last section header and the beginning of the first section", "# we must adjust the offset in readDataInstance to point to the first byte of the first section.", "readDataInstance", ".", "setOffset", "(", "self", ".", "sectionHeaders", "[", "0", "]", ".", "pointerToRawData", ".", "value", ")", "self", ".", "sections", "=", "Sections", ".", "parse", "(", "readDataInstance", ",", "self", ".", "sectionHeaders", ")", "self", ".", "overlay", "=", "self", ".", "_getOverlay", "(", "readDataInstance", ",", "self", ".", "sectionHeaders", ")", "self", ".", "signature", "=", "self", ".", "_getSignature", "(", "readDataInstance", ",", "self", ".", "ntHeaders", ".", "optionalHeader", ".", "dataDirectory", ")", "if", "not", "self", ".", "_fastLoad", ":", "self", ".", "_parseDirectories", "(", "self", ".", "ntHeaders", ".", "optionalHeader", ".", "dataDirectory", ",", "self", ".", "PE_TYPE", ")" ]
Populates the attributes of the L{PE} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} instance with the data of a PE file.
[ "Populates", "the", "attributes", "of", "the", "L", "{", "PE", "}", "object", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L462-L493
crackinglandia/pype32
pype32/pype32.py
PE.addSection
def addSection(self, data, name =".pype32\x00", flags = 0x60000000): """ Adds a new section to the existing L{PE} instance. @type data: str @param data: The data to be added in the new section. @type name: str @param name: (Optional) The name for the new section. @type flags: int @param flags: (Optional) The attributes for the new section. """ fa = self.ntHeaders.optionalHeader.fileAlignment.value sa = self.ntHeaders.optionalHeader.sectionAlignment.value padding = "\xcc" * (fa - len(data)) sh = SectionHeader() if len(self.sectionHeaders): # get the va, vz, ra and rz of the last section in the array of section headers vaLastSection = self.sectionHeaders[-1].virtualAddress.value sizeLastSection = self.sectionHeaders[-1].misc.value pointerToRawDataLastSection = self.sectionHeaders[-1].pointerToRawData.value sizeOfRawDataLastSection = self.sectionHeaders[-1].sizeOfRawData.value sh.virtualAddress.value = self._adjustSectionAlignment(vaLastSection + sizeLastSection, fa, sa) sh.pointerToRawData.value = self._adjustFileAlignment(pointerToRawDataLastSection + sizeOfRawDataLastSection, fa) sh.misc.value = self._adjustSectionAlignment(len(data), fa, sa) or consts.DEFAULT_PAGE_SIZE sh.sizeOfRawData.value = self._adjustFileAlignment(len(data), fa) or consts.DEFAULT_FILE_ALIGNMENT sh.characteristics.value = flags sh.name.value = name self.sectionHeaders.append(sh) self.sections.append(data + padding) self.ntHeaders.fileHeader.numberOfSections.value += 1
python
def addSection(self, data, name =".pype32\x00", flags = 0x60000000): """ Adds a new section to the existing L{PE} instance. @type data: str @param data: The data to be added in the new section. @type name: str @param name: (Optional) The name for the new section. @type flags: int @param flags: (Optional) The attributes for the new section. """ fa = self.ntHeaders.optionalHeader.fileAlignment.value sa = self.ntHeaders.optionalHeader.sectionAlignment.value padding = "\xcc" * (fa - len(data)) sh = SectionHeader() if len(self.sectionHeaders): # get the va, vz, ra and rz of the last section in the array of section headers vaLastSection = self.sectionHeaders[-1].virtualAddress.value sizeLastSection = self.sectionHeaders[-1].misc.value pointerToRawDataLastSection = self.sectionHeaders[-1].pointerToRawData.value sizeOfRawDataLastSection = self.sectionHeaders[-1].sizeOfRawData.value sh.virtualAddress.value = self._adjustSectionAlignment(vaLastSection + sizeLastSection, fa, sa) sh.pointerToRawData.value = self._adjustFileAlignment(pointerToRawDataLastSection + sizeOfRawDataLastSection, fa) sh.misc.value = self._adjustSectionAlignment(len(data), fa, sa) or consts.DEFAULT_PAGE_SIZE sh.sizeOfRawData.value = self._adjustFileAlignment(len(data), fa) or consts.DEFAULT_FILE_ALIGNMENT sh.characteristics.value = flags sh.name.value = name self.sectionHeaders.append(sh) self.sections.append(data + padding) self.ntHeaders.fileHeader.numberOfSections.value += 1
[ "def", "addSection", "(", "self", ",", "data", ",", "name", "=", "\".pype32\\x00\"", ",", "flags", "=", "0x60000000", ")", ":", "fa", "=", "self", ".", "ntHeaders", ".", "optionalHeader", ".", "fileAlignment", ".", "value", "sa", "=", "self", ".", "ntHeaders", ".", "optionalHeader", ".", "sectionAlignment", ".", "value", "padding", "=", "\"\\xcc\"", "*", "(", "fa", "-", "len", "(", "data", ")", ")", "sh", "=", "SectionHeader", "(", ")", "if", "len", "(", "self", ".", "sectionHeaders", ")", ":", "# get the va, vz, ra and rz of the last section in the array of section headers", "vaLastSection", "=", "self", ".", "sectionHeaders", "[", "-", "1", "]", ".", "virtualAddress", ".", "value", "sizeLastSection", "=", "self", ".", "sectionHeaders", "[", "-", "1", "]", ".", "misc", ".", "value", "pointerToRawDataLastSection", "=", "self", ".", "sectionHeaders", "[", "-", "1", "]", ".", "pointerToRawData", ".", "value", "sizeOfRawDataLastSection", "=", "self", ".", "sectionHeaders", "[", "-", "1", "]", ".", "sizeOfRawData", ".", "value", "sh", ".", "virtualAddress", ".", "value", "=", "self", ".", "_adjustSectionAlignment", "(", "vaLastSection", "+", "sizeLastSection", ",", "fa", ",", "sa", ")", "sh", ".", "pointerToRawData", ".", "value", "=", "self", ".", "_adjustFileAlignment", "(", "pointerToRawDataLastSection", "+", "sizeOfRawDataLastSection", ",", "fa", ")", "sh", ".", "misc", ".", "value", "=", "self", ".", "_adjustSectionAlignment", "(", "len", "(", "data", ")", ",", "fa", ",", "sa", ")", "or", "consts", ".", "DEFAULT_PAGE_SIZE", "sh", ".", "sizeOfRawData", ".", "value", "=", "self", ".", "_adjustFileAlignment", "(", "len", "(", "data", ")", ",", "fa", ")", "or", "consts", ".", "DEFAULT_FILE_ALIGNMENT", "sh", ".", "characteristics", ".", "value", "=", "flags", "sh", ".", "name", ".", "value", "=", "name", "self", ".", "sectionHeaders", ".", "append", "(", "sh", ")", "self", ".", "sections", ".", "append", "(", "data", "+", "padding", ")", "self", ".", "ntHeaders", ".", "fileHeader", ".", "numberOfSections", ".", "value", "+=", "1" ]
Adds a new section to the existing L{PE} instance. @type data: str @param data: The data to be added in the new section. @type name: str @param name: (Optional) The name for the new section. @type flags: int @param flags: (Optional) The attributes for the new section.
[ "Adds", "a", "new", "section", "to", "the", "existing", "L", "{", "PE", "}", "instance", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L495-L532
crackinglandia/pype32
pype32/pype32.py
PE.extendSection
def extendSection(self, sectionIndex, data): """ Extends an existing section in the L{PE} instance. @type sectionIndex: int @param sectionIndex: The index for the section to be extended. @type data: str @param data: The data to include in the section. @raise IndexError: If an invalid C{sectionIndex} was specified. @raise SectionHeadersException: If there is not section to extend. """ fa = self.ntHeaders.optionalHeader.fileAlignment.value sa = self.ntHeaders.optionalHeader.sectionAlignment.value if len(self.sectionHeaders): if len(self.sectionHeaders) == sectionIndex: try: # we are in the last section or self.sectionHeaders has only 1 sectionHeader instance vzLastSection = self.sectionHeaders[-1].misc.value rzLastSection = self.sectionHeaders[-1].sizeOfRawData.value self.sectionHeaders[-1].misc.value = self._adjustSectionAlignment(vzLastSection + len(data), fa, sa) self.sectionHeaders[-1].sizeOfRawData.value = self._adjustFileAlignment(rzLastSection + len(data), fa) vz = self.sectionHeaders[-1].misc.value rz = self.sectionHeaders[-1].sizeOfRawData.value except IndexError: raise IndexError("list index out of range.") if vz < rz: print "WARNING: VirtualSize (%x) is less than SizeOfRawData (%x)" % (vz, rz) if len(data) % fa == 0: self.sections[-1] += data else: self.sections[-1] += data + "\xcc" * (fa - len(data) % fa) else: # if it is not the last section ... try: # adjust data of the section the user wants to extend counter = sectionIndex - 1 vzCurrentSection = self.sectionHeaders[counter].misc.value rzCurrentSection = self.sectionHeaders[counter].sizeOfRawData.value self.sectionHeaders[counter].misc.value = self._adjustSectionAlignment(vzCurrentSection + len(data), fa, sa) self.sectionHeaders[counter].sizeOfRawData.value = self._adjustFileAlignment(rzCurrentSection + len(data), fa) if len(data) % fa == 0: self.sections[counter] += data else: self.sections[counter] += data + "\xcc" * (fa - len(data) % fa) counter += 1 while(counter != len(self.sectionHeaders)): vzPreviousSection = self.sectionHeaders[counter - 1].misc.value vaPreviousSection = self.sectionHeaders[counter - 1].virtualAddress.value rzPreviousSection = self.sectionHeaders[counter - 1].sizeOfRawData.value roPreviousSection = self.sectionHeaders[counter - 1].pointerToRawData.value # adjust VA and RO of the next section self.sectionHeaders[counter].virtualAddress.value = self._adjustSectionAlignment(vzPreviousSection + vaPreviousSection, fa, sa) self.sectionHeaders[counter].pointerToRawData.value = self._adjustFileAlignment(rzPreviousSection + roPreviousSection, fa) vz = self.sectionHeaders[counter].virtualAddress.value rz = self.sectionHeaders[counter].pointerToRawData.value if vz < rz: print "WARNING: VirtualSize (%x) is less than SizeOfRawData (%x)" % (vz, rz) counter += 1 except IndexError: raise IndexError("list index out of range.") else: raise excep.SectionHeadersException("There is no section to extend.")
python
def extendSection(self, sectionIndex, data): """ Extends an existing section in the L{PE} instance. @type sectionIndex: int @param sectionIndex: The index for the section to be extended. @type data: str @param data: The data to include in the section. @raise IndexError: If an invalid C{sectionIndex} was specified. @raise SectionHeadersException: If there is not section to extend. """ fa = self.ntHeaders.optionalHeader.fileAlignment.value sa = self.ntHeaders.optionalHeader.sectionAlignment.value if len(self.sectionHeaders): if len(self.sectionHeaders) == sectionIndex: try: # we are in the last section or self.sectionHeaders has only 1 sectionHeader instance vzLastSection = self.sectionHeaders[-1].misc.value rzLastSection = self.sectionHeaders[-1].sizeOfRawData.value self.sectionHeaders[-1].misc.value = self._adjustSectionAlignment(vzLastSection + len(data), fa, sa) self.sectionHeaders[-1].sizeOfRawData.value = self._adjustFileAlignment(rzLastSection + len(data), fa) vz = self.sectionHeaders[-1].misc.value rz = self.sectionHeaders[-1].sizeOfRawData.value except IndexError: raise IndexError("list index out of range.") if vz < rz: print "WARNING: VirtualSize (%x) is less than SizeOfRawData (%x)" % (vz, rz) if len(data) % fa == 0: self.sections[-1] += data else: self.sections[-1] += data + "\xcc" * (fa - len(data) % fa) else: # if it is not the last section ... try: # adjust data of the section the user wants to extend counter = sectionIndex - 1 vzCurrentSection = self.sectionHeaders[counter].misc.value rzCurrentSection = self.sectionHeaders[counter].sizeOfRawData.value self.sectionHeaders[counter].misc.value = self._adjustSectionAlignment(vzCurrentSection + len(data), fa, sa) self.sectionHeaders[counter].sizeOfRawData.value = self._adjustFileAlignment(rzCurrentSection + len(data), fa) if len(data) % fa == 0: self.sections[counter] += data else: self.sections[counter] += data + "\xcc" * (fa - len(data) % fa) counter += 1 while(counter != len(self.sectionHeaders)): vzPreviousSection = self.sectionHeaders[counter - 1].misc.value vaPreviousSection = self.sectionHeaders[counter - 1].virtualAddress.value rzPreviousSection = self.sectionHeaders[counter - 1].sizeOfRawData.value roPreviousSection = self.sectionHeaders[counter - 1].pointerToRawData.value # adjust VA and RO of the next section self.sectionHeaders[counter].virtualAddress.value = self._adjustSectionAlignment(vzPreviousSection + vaPreviousSection, fa, sa) self.sectionHeaders[counter].pointerToRawData.value = self._adjustFileAlignment(rzPreviousSection + roPreviousSection, fa) vz = self.sectionHeaders[counter].virtualAddress.value rz = self.sectionHeaders[counter].pointerToRawData.value if vz < rz: print "WARNING: VirtualSize (%x) is less than SizeOfRawData (%x)" % (vz, rz) counter += 1 except IndexError: raise IndexError("list index out of range.") else: raise excep.SectionHeadersException("There is no section to extend.")
[ "def", "extendSection", "(", "self", ",", "sectionIndex", ",", "data", ")", ":", "fa", "=", "self", ".", "ntHeaders", ".", "optionalHeader", ".", "fileAlignment", ".", "value", "sa", "=", "self", ".", "ntHeaders", ".", "optionalHeader", ".", "sectionAlignment", ".", "value", "if", "len", "(", "self", ".", "sectionHeaders", ")", ":", "if", "len", "(", "self", ".", "sectionHeaders", ")", "==", "sectionIndex", ":", "try", ":", "# we are in the last section or self.sectionHeaders has only 1 sectionHeader instance", "vzLastSection", "=", "self", ".", "sectionHeaders", "[", "-", "1", "]", ".", "misc", ".", "value", "rzLastSection", "=", "self", ".", "sectionHeaders", "[", "-", "1", "]", ".", "sizeOfRawData", ".", "value", "self", ".", "sectionHeaders", "[", "-", "1", "]", ".", "misc", ".", "value", "=", "self", ".", "_adjustSectionAlignment", "(", "vzLastSection", "+", "len", "(", "data", ")", ",", "fa", ",", "sa", ")", "self", ".", "sectionHeaders", "[", "-", "1", "]", ".", "sizeOfRawData", ".", "value", "=", "self", ".", "_adjustFileAlignment", "(", "rzLastSection", "+", "len", "(", "data", ")", ",", "fa", ")", "vz", "=", "self", ".", "sectionHeaders", "[", "-", "1", "]", ".", "misc", ".", "value", "rz", "=", "self", ".", "sectionHeaders", "[", "-", "1", "]", ".", "sizeOfRawData", ".", "value", "except", "IndexError", ":", "raise", "IndexError", "(", "\"list index out of range.\"", ")", "if", "vz", "<", "rz", ":", "print", "\"WARNING: VirtualSize (%x) is less than SizeOfRawData (%x)\"", "%", "(", "vz", ",", "rz", ")", "if", "len", "(", "data", ")", "%", "fa", "==", "0", ":", "self", ".", "sections", "[", "-", "1", "]", "+=", "data", "else", ":", "self", ".", "sections", "[", "-", "1", "]", "+=", "data", "+", "\"\\xcc\"", "*", "(", "fa", "-", "len", "(", "data", ")", "%", "fa", ")", "else", ":", "# if it is not the last section ...", "try", ":", "# adjust data of the section the user wants to extend", "counter", "=", "sectionIndex", "-", "1", "vzCurrentSection", "=", "self", ".", "sectionHeaders", "[", "counter", "]", ".", "misc", ".", "value", "rzCurrentSection", "=", "self", ".", "sectionHeaders", "[", "counter", "]", ".", "sizeOfRawData", ".", "value", "self", ".", "sectionHeaders", "[", "counter", "]", ".", "misc", ".", "value", "=", "self", ".", "_adjustSectionAlignment", "(", "vzCurrentSection", "+", "len", "(", "data", ")", ",", "fa", ",", "sa", ")", "self", ".", "sectionHeaders", "[", "counter", "]", ".", "sizeOfRawData", ".", "value", "=", "self", ".", "_adjustFileAlignment", "(", "rzCurrentSection", "+", "len", "(", "data", ")", ",", "fa", ")", "if", "len", "(", "data", ")", "%", "fa", "==", "0", ":", "self", ".", "sections", "[", "counter", "]", "+=", "data", "else", ":", "self", ".", "sections", "[", "counter", "]", "+=", "data", "+", "\"\\xcc\"", "*", "(", "fa", "-", "len", "(", "data", ")", "%", "fa", ")", "counter", "+=", "1", "while", "(", "counter", "!=", "len", "(", "self", ".", "sectionHeaders", ")", ")", ":", "vzPreviousSection", "=", "self", ".", "sectionHeaders", "[", "counter", "-", "1", "]", ".", "misc", ".", "value", "vaPreviousSection", "=", "self", ".", "sectionHeaders", "[", "counter", "-", "1", "]", ".", "virtualAddress", ".", "value", "rzPreviousSection", "=", "self", ".", "sectionHeaders", "[", "counter", "-", "1", "]", ".", "sizeOfRawData", ".", "value", "roPreviousSection", "=", "self", ".", "sectionHeaders", "[", "counter", "-", "1", "]", ".", "pointerToRawData", ".", "value", "# adjust VA and RO of the next section", "self", ".", "sectionHeaders", "[", "counter", "]", ".", "virtualAddress", ".", "value", "=", "self", ".", "_adjustSectionAlignment", "(", "vzPreviousSection", "+", "vaPreviousSection", ",", "fa", ",", "sa", ")", "self", ".", "sectionHeaders", "[", "counter", "]", ".", "pointerToRawData", ".", "value", "=", "self", ".", "_adjustFileAlignment", "(", "rzPreviousSection", "+", "roPreviousSection", ",", "fa", ")", "vz", "=", "self", ".", "sectionHeaders", "[", "counter", "]", ".", "virtualAddress", ".", "value", "rz", "=", "self", ".", "sectionHeaders", "[", "counter", "]", ".", "pointerToRawData", ".", "value", "if", "vz", "<", "rz", ":", "print", "\"WARNING: VirtualSize (%x) is less than SizeOfRawData (%x)\"", "%", "(", "vz", ",", "rz", ")", "counter", "+=", "1", "except", "IndexError", ":", "raise", "IndexError", "(", "\"list index out of range.\"", ")", "else", ":", "raise", "excep", ".", "SectionHeadersException", "(", "\"There is no section to extend.\"", ")" ]
Extends an existing section in the L{PE} instance. @type sectionIndex: int @param sectionIndex: The index for the section to be extended. @type data: str @param data: The data to include in the section. @raise IndexError: If an invalid C{sectionIndex} was specified. @raise SectionHeadersException: If there is not section to extend.
[ "Extends", "an", "existing", "section", "in", "the", "L", "{", "PE", "}", "instance", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L534-L616
crackinglandia/pype32
pype32/pype32.py
PE._fixPe
def _fixPe(self): """ Fixes the necessary fields in the PE file instance in order to create a valid PE32. i.e. SizeOfImage. """ sizeOfImage = 0 for sh in self.sectionHeaders: sizeOfImage += sh.misc self.ntHeaders.optionaHeader.sizeoOfImage.value = self._sectionAlignment(sizeOfImage + 0x1000)
python
def _fixPe(self): """ Fixes the necessary fields in the PE file instance in order to create a valid PE32. i.e. SizeOfImage. """ sizeOfImage = 0 for sh in self.sectionHeaders: sizeOfImage += sh.misc self.ntHeaders.optionaHeader.sizeoOfImage.value = self._sectionAlignment(sizeOfImage + 0x1000)
[ "def", "_fixPe", "(", "self", ")", ":", "sizeOfImage", "=", "0", "for", "sh", "in", "self", ".", "sectionHeaders", ":", "sizeOfImage", "+=", "sh", ".", "misc", "self", ".", "ntHeaders", ".", "optionaHeader", ".", "sizeoOfImage", ".", "value", "=", "self", ".", "_sectionAlignment", "(", "sizeOfImage", "+", "0x1000", ")" ]
Fixes the necessary fields in the PE file instance in order to create a valid PE32. i.e. SizeOfImage.
[ "Fixes", "the", "necessary", "fields", "in", "the", "PE", "file", "instance", "in", "order", "to", "create", "a", "valid", "PE32", ".", "i", ".", "e", ".", "SizeOfImage", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L618-L625
crackinglandia/pype32
pype32/pype32.py
PE._adjustFileAlignment
def _adjustFileAlignment(self, value, fileAlignment): """ Align a value to C{FileAligment}. @type value: int @param value: The value to align. @type fileAlignment: int @param fileAlignment: The value to be used to align the C{value} parameter. @rtype: int @return: The aligned value. """ if fileAlignment > consts.DEFAULT_FILE_ALIGNMENT: if not utils.powerOfTwo(fileAlignment): print "Warning: FileAlignment is greater than DEFAULT_FILE_ALIGNMENT (0x200) and is not power of two." if fileAlignment < consts.DEFAULT_FILE_ALIGNMENT: return value if fileAlignment and value % fileAlignment: return ((value / fileAlignment) + 1) * fileAlignment return value
python
def _adjustFileAlignment(self, value, fileAlignment): """ Align a value to C{FileAligment}. @type value: int @param value: The value to align. @type fileAlignment: int @param fileAlignment: The value to be used to align the C{value} parameter. @rtype: int @return: The aligned value. """ if fileAlignment > consts.DEFAULT_FILE_ALIGNMENT: if not utils.powerOfTwo(fileAlignment): print "Warning: FileAlignment is greater than DEFAULT_FILE_ALIGNMENT (0x200) and is not power of two." if fileAlignment < consts.DEFAULT_FILE_ALIGNMENT: return value if fileAlignment and value % fileAlignment: return ((value / fileAlignment) + 1) * fileAlignment return value
[ "def", "_adjustFileAlignment", "(", "self", ",", "value", ",", "fileAlignment", ")", ":", "if", "fileAlignment", ">", "consts", ".", "DEFAULT_FILE_ALIGNMENT", ":", "if", "not", "utils", ".", "powerOfTwo", "(", "fileAlignment", ")", ":", "print", "\"Warning: FileAlignment is greater than DEFAULT_FILE_ALIGNMENT (0x200) and is not power of two.\"", "if", "fileAlignment", "<", "consts", ".", "DEFAULT_FILE_ALIGNMENT", ":", "return", "value", "if", "fileAlignment", "and", "value", "%", "fileAlignment", ":", "return", "(", "(", "value", "/", "fileAlignment", ")", "+", "1", ")", "*", "fileAlignment", "return", "value" ]
Align a value to C{FileAligment}. @type value: int @param value: The value to align. @type fileAlignment: int @param fileAlignment: The value to be used to align the C{value} parameter. @rtype: int @return: The aligned value.
[ "Align", "a", "value", "to", "C", "{", "FileAligment", "}", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L627-L650
crackinglandia/pype32
pype32/pype32.py
PE._adjustSectionAlignment
def _adjustSectionAlignment(self, value, fileAlignment, sectionAlignment): """ Align a value to C{SectionAligment}. @type value: int @param value: The value to be aligned. @type fileAlignment: int @param fileAlignment: The value to be used as C{FileAlignment}. @type sectionAlignment: int @param sectionAlignment: The value to be used as C{SectionAlignment}. @rtype: int @return: The aligned value. """ if fileAlignment < consts.DEFAULT_FILE_ALIGNMENT: if fileAligment != sectionAlignment: print "FileAlignment does not match SectionAlignment." if sectionAlignment < consts.DEFAULT_PAGE_SIZE: sectionAlignment = fileAlignment if sectionAlignment and value % sectionAlignment: return sectionAlignment * ((value / sectionAlignment) + 1) return value
python
def _adjustSectionAlignment(self, value, fileAlignment, sectionAlignment): """ Align a value to C{SectionAligment}. @type value: int @param value: The value to be aligned. @type fileAlignment: int @param fileAlignment: The value to be used as C{FileAlignment}. @type sectionAlignment: int @param sectionAlignment: The value to be used as C{SectionAlignment}. @rtype: int @return: The aligned value. """ if fileAlignment < consts.DEFAULT_FILE_ALIGNMENT: if fileAligment != sectionAlignment: print "FileAlignment does not match SectionAlignment." if sectionAlignment < consts.DEFAULT_PAGE_SIZE: sectionAlignment = fileAlignment if sectionAlignment and value % sectionAlignment: return sectionAlignment * ((value / sectionAlignment) + 1) return value
[ "def", "_adjustSectionAlignment", "(", "self", ",", "value", ",", "fileAlignment", ",", "sectionAlignment", ")", ":", "if", "fileAlignment", "<", "consts", ".", "DEFAULT_FILE_ALIGNMENT", ":", "if", "fileAligment", "!=", "sectionAlignment", ":", "print", "\"FileAlignment does not match SectionAlignment.\"", "if", "sectionAlignment", "<", "consts", ".", "DEFAULT_PAGE_SIZE", ":", "sectionAlignment", "=", "fileAlignment", "if", "sectionAlignment", "and", "value", "%", "sectionAlignment", ":", "return", "sectionAlignment", "*", "(", "(", "value", "/", "sectionAlignment", ")", "+", "1", ")", "return", "value" ]
Align a value to C{SectionAligment}. @type value: int @param value: The value to be aligned. @type fileAlignment: int @param fileAlignment: The value to be used as C{FileAlignment}. @type sectionAlignment: int @param sectionAlignment: The value to be used as C{SectionAlignment}. @rtype: int @return: The aligned value.
[ "Align", "a", "value", "to", "C", "{", "SectionAligment", "}", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L652-L677
crackinglandia/pype32
pype32/pype32.py
PE.getDwordAtRva
def getDwordAtRva(self, rva): """ Returns a C{DWORD} from a given RVA. @type rva: int @param rva: The RVA to get the C{DWORD} from. @rtype: L{DWORD} @return: The L{DWORD} obtained at the given RVA. """ return datatypes.DWORD.parse(utils.ReadData(self.getDataAtRva(rva, 4)))
python
def getDwordAtRva(self, rva): """ Returns a C{DWORD} from a given RVA. @type rva: int @param rva: The RVA to get the C{DWORD} from. @rtype: L{DWORD} @return: The L{DWORD} obtained at the given RVA. """ return datatypes.DWORD.parse(utils.ReadData(self.getDataAtRva(rva, 4)))
[ "def", "getDwordAtRva", "(", "self", ",", "rva", ")", ":", "return", "datatypes", ".", "DWORD", ".", "parse", "(", "utils", ".", "ReadData", "(", "self", ".", "getDataAtRva", "(", "rva", ",", "4", ")", ")", ")" ]
Returns a C{DWORD} from a given RVA. @type rva: int @param rva: The RVA to get the C{DWORD} from. @rtype: L{DWORD} @return: The L{DWORD} obtained at the given RVA.
[ "Returns", "a", "C", "{", "DWORD", "}", "from", "a", "given", "RVA", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L679-L689
crackinglandia/pype32
pype32/pype32.py
PE.getWordAtRva
def getWordAtRva(self, rva): """ Returns a C{WORD} from a given RVA. @type rva: int @param rva: The RVA to get the C{WORD} from. @rtype: L{WORD} @return: The L{WORD} obtained at the given RVA. """ return datatypes.WORD.parse(utils.ReadData(self.getDataAtRva(rva, 2)))
python
def getWordAtRva(self, rva): """ Returns a C{WORD} from a given RVA. @type rva: int @param rva: The RVA to get the C{WORD} from. @rtype: L{WORD} @return: The L{WORD} obtained at the given RVA. """ return datatypes.WORD.parse(utils.ReadData(self.getDataAtRva(rva, 2)))
[ "def", "getWordAtRva", "(", "self", ",", "rva", ")", ":", "return", "datatypes", ".", "WORD", ".", "parse", "(", "utils", ".", "ReadData", "(", "self", ".", "getDataAtRva", "(", "rva", ",", "2", ")", ")", ")" ]
Returns a C{WORD} from a given RVA. @type rva: int @param rva: The RVA to get the C{WORD} from. @rtype: L{WORD} @return: The L{WORD} obtained at the given RVA.
[ "Returns", "a", "C", "{", "WORD", "}", "from", "a", "given", "RVA", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L691-L701
crackinglandia/pype32
pype32/pype32.py
PE.getDwordAtOffset
def getDwordAtOffset(self, offset): """ Returns a C{DWORD} from a given offset. @type offset: int @param offset: The offset to get the C{DWORD} from. @rtype: L{DWORD} @return: The L{DWORD} obtained at the given offset. """ return datatypes.DWORD.parse(utils.ReadData(self.getDataAtOffset(offset, 4)))
python
def getDwordAtOffset(self, offset): """ Returns a C{DWORD} from a given offset. @type offset: int @param offset: The offset to get the C{DWORD} from. @rtype: L{DWORD} @return: The L{DWORD} obtained at the given offset. """ return datatypes.DWORD.parse(utils.ReadData(self.getDataAtOffset(offset, 4)))
[ "def", "getDwordAtOffset", "(", "self", ",", "offset", ")", ":", "return", "datatypes", ".", "DWORD", ".", "parse", "(", "utils", ".", "ReadData", "(", "self", ".", "getDataAtOffset", "(", "offset", ",", "4", ")", ")", ")" ]
Returns a C{DWORD} from a given offset. @type offset: int @param offset: The offset to get the C{DWORD} from. @rtype: L{DWORD} @return: The L{DWORD} obtained at the given offset.
[ "Returns", "a", "C", "{", "DWORD", "}", "from", "a", "given", "offset", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L703-L713
crackinglandia/pype32
pype32/pype32.py
PE.getWordAtOffset
def getWordAtOffset(self, offset): """ Returns a C{WORD} from a given offset. @type offset: int @param offset: The offset to get the C{WORD} from. @rtype: L{WORD} @return: The L{WORD} obtained at the given offset. """ return datatypes.WORD.parse(utils.ReadData(self.getDataAtOffset(offset, 2)))
python
def getWordAtOffset(self, offset): """ Returns a C{WORD} from a given offset. @type offset: int @param offset: The offset to get the C{WORD} from. @rtype: L{WORD} @return: The L{WORD} obtained at the given offset. """ return datatypes.WORD.parse(utils.ReadData(self.getDataAtOffset(offset, 2)))
[ "def", "getWordAtOffset", "(", "self", ",", "offset", ")", ":", "return", "datatypes", ".", "WORD", ".", "parse", "(", "utils", ".", "ReadData", "(", "self", ".", "getDataAtOffset", "(", "offset", ",", "2", ")", ")", ")" ]
Returns a C{WORD} from a given offset. @type offset: int @param offset: The offset to get the C{WORD} from. @rtype: L{WORD} @return: The L{WORD} obtained at the given offset.
[ "Returns", "a", "C", "{", "WORD", "}", "from", "a", "given", "offset", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L715-L725
crackinglandia/pype32
pype32/pype32.py
PE.getQwordAtRva
def getQwordAtRva(self, rva): """ Returns a C{QWORD} from a given RVA. @type rva: int @param rva: The RVA to get the C{QWORD} from. @rtype: L{QWORD} @return: The L{QWORD} obtained at the given RVA. """ return datatypes.QWORD.parse(utils.ReadData(self.getDataAtRva(rva, 8)))
python
def getQwordAtRva(self, rva): """ Returns a C{QWORD} from a given RVA. @type rva: int @param rva: The RVA to get the C{QWORD} from. @rtype: L{QWORD} @return: The L{QWORD} obtained at the given RVA. """ return datatypes.QWORD.parse(utils.ReadData(self.getDataAtRva(rva, 8)))
[ "def", "getQwordAtRva", "(", "self", ",", "rva", ")", ":", "return", "datatypes", ".", "QWORD", ".", "parse", "(", "utils", ".", "ReadData", "(", "self", ".", "getDataAtRva", "(", "rva", ",", "8", ")", ")", ")" ]
Returns a C{QWORD} from a given RVA. @type rva: int @param rva: The RVA to get the C{QWORD} from. @rtype: L{QWORD} @return: The L{QWORD} obtained at the given RVA.
[ "Returns", "a", "C", "{", "QWORD", "}", "from", "a", "given", "RVA", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L727-L737
crackinglandia/pype32
pype32/pype32.py
PE.getQwordAtOffset
def getQwordAtOffset(self, offset): """ Returns a C{QWORD} from a given offset. @type offset: int @param offset: The offset to get the C{QWORD} from. @rtype: L{QWORD} @return: The L{QWORD} obtained at the given offset. """ return datatypes.QWORD.parse(utils.ReadData(self.getDataAtOffset(offset, 8)))
python
def getQwordAtOffset(self, offset): """ Returns a C{QWORD} from a given offset. @type offset: int @param offset: The offset to get the C{QWORD} from. @rtype: L{QWORD} @return: The L{QWORD} obtained at the given offset. """ return datatypes.QWORD.parse(utils.ReadData(self.getDataAtOffset(offset, 8)))
[ "def", "getQwordAtOffset", "(", "self", ",", "offset", ")", ":", "return", "datatypes", ".", "QWORD", ".", "parse", "(", "utils", ".", "ReadData", "(", "self", ".", "getDataAtOffset", "(", "offset", ",", "8", ")", ")", ")" ]
Returns a C{QWORD} from a given offset. @type offset: int @param offset: The offset to get the C{QWORD} from. @rtype: L{QWORD} @return: The L{QWORD} obtained at the given offset.
[ "Returns", "a", "C", "{", "QWORD", "}", "from", "a", "given", "offset", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L739-L749
crackinglandia/pype32
pype32/pype32.py
PE.getDataAtRva
def getDataAtRva(self, rva, size): """ Gets binary data at a given RVA. @type rva: int @param rva: The RVA to get the data from. @type size: int @param size: The size of the data to be obtained. @rtype: str @return: The data obtained at the given RVA. """ return self.getDataAtOffset(self.getOffsetFromRva(rva), size)
python
def getDataAtRva(self, rva, size): """ Gets binary data at a given RVA. @type rva: int @param rva: The RVA to get the data from. @type size: int @param size: The size of the data to be obtained. @rtype: str @return: The data obtained at the given RVA. """ return self.getDataAtOffset(self.getOffsetFromRva(rva), size)
[ "def", "getDataAtRva", "(", "self", ",", "rva", ",", "size", ")", ":", "return", "self", ".", "getDataAtOffset", "(", "self", ".", "getOffsetFromRva", "(", "rva", ")", ",", "size", ")" ]
Gets binary data at a given RVA. @type rva: int @param rva: The RVA to get the data from. @type size: int @param size: The size of the data to be obtained. @rtype: str @return: The data obtained at the given RVA.
[ "Gets", "binary", "data", "at", "a", "given", "RVA", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L751-L764
crackinglandia/pype32
pype32/pype32.py
PE.getDataAtOffset
def getDataAtOffset(self, offset, size): """ Gets binary data at a given offset. @type offset: int @param offset: The offset to get the data from. @type size: int @param size: The size of the data to be obtained. @rtype: str @return: The data obtained at the given offset. """ data = str(self) return data[offset:offset+size]
python
def getDataAtOffset(self, offset, size): """ Gets binary data at a given offset. @type offset: int @param offset: The offset to get the data from. @type size: int @param size: The size of the data to be obtained. @rtype: str @return: The data obtained at the given offset. """ data = str(self) return data[offset:offset+size]
[ "def", "getDataAtOffset", "(", "self", ",", "offset", ",", "size", ")", ":", "data", "=", "str", "(", "self", ")", "return", "data", "[", "offset", ":", "offset", "+", "size", "]" ]
Gets binary data at a given offset. @type offset: int @param offset: The offset to get the data from. @type size: int @param size: The size of the data to be obtained. @rtype: str @return: The data obtained at the given offset.
[ "Gets", "binary", "data", "at", "a", "given", "offset", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L766-L780
crackinglandia/pype32
pype32/pype32.py
PE.readStringAtRva
def readStringAtRva(self, rva): """ Returns a L{String} object from a given RVA. @type rva: int @param rva: The RVA to get the string from. @rtype: L{String} @return: A new L{String} object from the given RVA. """ d = self.getDataAtRva(rva, 1) resultStr = datatypes.String("") while d != "\x00": resultStr.value += d rva += 1 d = self.getDataAtRva(rva, 1) return resultStr
python
def readStringAtRva(self, rva): """ Returns a L{String} object from a given RVA. @type rva: int @param rva: The RVA to get the string from. @rtype: L{String} @return: A new L{String} object from the given RVA. """ d = self.getDataAtRva(rva, 1) resultStr = datatypes.String("") while d != "\x00": resultStr.value += d rva += 1 d = self.getDataAtRva(rva, 1) return resultStr
[ "def", "readStringAtRva", "(", "self", ",", "rva", ")", ":", "d", "=", "self", ".", "getDataAtRva", "(", "rva", ",", "1", ")", "resultStr", "=", "datatypes", ".", "String", "(", "\"\"", ")", "while", "d", "!=", "\"\\x00\"", ":", "resultStr", ".", "value", "+=", "d", "rva", "+=", "1", "d", "=", "self", ".", "getDataAtRva", "(", "rva", ",", "1", ")", "return", "resultStr" ]
Returns a L{String} object from a given RVA. @type rva: int @param rva: The RVA to get the string from. @rtype: L{String} @return: A new L{String} object from the given RVA.
[ "Returns", "a", "L", "{", "String", "}", "object", "from", "a", "given", "RVA", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L782-L798
crackinglandia/pype32
pype32/pype32.py
PE.isExe
def isExe(self): """ Determines if the current L{PE} instance is an Executable file. @rtype: bool @return: C{True} if the current L{PE} instance is an Executable file. Otherwise, returns C{False}. """ if not self.isDll() and not self.isDriver() and ( consts.IMAGE_FILE_EXECUTABLE_IMAGE & self.ntHeaders.fileHeader.characteristics.value) == consts.IMAGE_FILE_EXECUTABLE_IMAGE: return True return False
python
def isExe(self): """ Determines if the current L{PE} instance is an Executable file. @rtype: bool @return: C{True} if the current L{PE} instance is an Executable file. Otherwise, returns C{False}. """ if not self.isDll() and not self.isDriver() and ( consts.IMAGE_FILE_EXECUTABLE_IMAGE & self.ntHeaders.fileHeader.characteristics.value) == consts.IMAGE_FILE_EXECUTABLE_IMAGE: return True return False
[ "def", "isExe", "(", "self", ")", ":", "if", "not", "self", ".", "isDll", "(", ")", "and", "not", "self", ".", "isDriver", "(", ")", "and", "(", "consts", ".", "IMAGE_FILE_EXECUTABLE_IMAGE", "&", "self", ".", "ntHeaders", ".", "fileHeader", ".", "characteristics", ".", "value", ")", "==", "consts", ".", "IMAGE_FILE_EXECUTABLE_IMAGE", ":", "return", "True", "return", "False" ]
Determines if the current L{PE} instance is an Executable file. @rtype: bool @return: C{True} if the current L{PE} instance is an Executable file. Otherwise, returns C{False}.
[ "Determines", "if", "the", "current", "L", "{", "PE", "}", "instance", "is", "an", "Executable", "file", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L800-L809
crackinglandia/pype32
pype32/pype32.py
PE.isDll
def isDll(self): """ Determines if the current L{PE} instance is a Dynamic Link Library file. @rtype: bool @return: C{True} if the current L{PE} instance is a DLL. Otherwise, returns C{False}. """ if (consts.IMAGE_FILE_DLL & self.ntHeaders.fileHeader.characteristics.value) == consts.IMAGE_FILE_DLL: return True return False
python
def isDll(self): """ Determines if the current L{PE} instance is a Dynamic Link Library file. @rtype: bool @return: C{True} if the current L{PE} instance is a DLL. Otherwise, returns C{False}. """ if (consts.IMAGE_FILE_DLL & self.ntHeaders.fileHeader.characteristics.value) == consts.IMAGE_FILE_DLL: return True return False
[ "def", "isDll", "(", "self", ")", ":", "if", "(", "consts", ".", "IMAGE_FILE_DLL", "&", "self", ".", "ntHeaders", ".", "fileHeader", ".", "characteristics", ".", "value", ")", "==", "consts", ".", "IMAGE_FILE_DLL", ":", "return", "True", "return", "False" ]
Determines if the current L{PE} instance is a Dynamic Link Library file. @rtype: bool @return: C{True} if the current L{PE} instance is a DLL. Otherwise, returns C{False}.
[ "Determines", "if", "the", "current", "L", "{", "PE", "}", "instance", "is", "a", "Dynamic", "Link", "Library", "file", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L811-L820
crackinglandia/pype32
pype32/pype32.py
PE.isDriver
def isDriver(self): """ Determines if the current L{PE} instance is a driver (.sys) file. @rtype: bool @return: C{True} if the current L{PE} instance is a driver. Otherwise, returns C{False}. """ modules = [] imports = self.ntHeaders.optionalHeader.dataDirectory[consts.IMPORT_DIRECTORY].info for module in imports: modules.append(module.metaData.moduleName.value.lower()) if set(["ntoskrnl.exe", "hal.dll", "ndis.sys", "bootvid.dll", "kdcom.dll"]).intersection(modules): return True return False
python
def isDriver(self): """ Determines if the current L{PE} instance is a driver (.sys) file. @rtype: bool @return: C{True} if the current L{PE} instance is a driver. Otherwise, returns C{False}. """ modules = [] imports = self.ntHeaders.optionalHeader.dataDirectory[consts.IMPORT_DIRECTORY].info for module in imports: modules.append(module.metaData.moduleName.value.lower()) if set(["ntoskrnl.exe", "hal.dll", "ndis.sys", "bootvid.dll", "kdcom.dll"]).intersection(modules): return True return False
[ "def", "isDriver", "(", "self", ")", ":", "modules", "=", "[", "]", "imports", "=", "self", ".", "ntHeaders", ".", "optionalHeader", ".", "dataDirectory", "[", "consts", ".", "IMPORT_DIRECTORY", "]", ".", "info", "for", "module", "in", "imports", ":", "modules", ".", "append", "(", "module", ".", "metaData", ".", "moduleName", ".", "value", ".", "lower", "(", ")", ")", "if", "set", "(", "[", "\"ntoskrnl.exe\"", ",", "\"hal.dll\"", ",", "\"ndis.sys\"", ",", "\"bootvid.dll\"", ",", "\"kdcom.dll\"", "]", ")", ".", "intersection", "(", "modules", ")", ":", "return", "True", "return", "False" ]
Determines if the current L{PE} instance is a driver (.sys) file. @rtype: bool @return: C{True} if the current L{PE} instance is a driver. Otherwise, returns C{False}.
[ "Determines", "if", "the", "current", "L", "{", "PE", "}", "instance", "is", "a", "driver", "(", ".", "sys", ")", "file", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L822-L836
crackinglandia/pype32
pype32/pype32.py
PE.isPe32
def isPe32(self): """ Determines if the current L{PE} instance is a PE32 file. @rtype: bool @return: C{True} if the current L{PE} instance is a PE32 file. Otherwise, returns C{False}. """ if self.ntHeaders.optionalHeader.magic.value == consts.PE32: return True return False
python
def isPe32(self): """ Determines if the current L{PE} instance is a PE32 file. @rtype: bool @return: C{True} if the current L{PE} instance is a PE32 file. Otherwise, returns C{False}. """ if self.ntHeaders.optionalHeader.magic.value == consts.PE32: return True return False
[ "def", "isPe32", "(", "self", ")", ":", "if", "self", ".", "ntHeaders", ".", "optionalHeader", ".", "magic", ".", "value", "==", "consts", ".", "PE32", ":", "return", "True", "return", "False" ]
Determines if the current L{PE} instance is a PE32 file. @rtype: bool @return: C{True} if the current L{PE} instance is a PE32 file. Otherwise, returns C{False}.
[ "Determines", "if", "the", "current", "L", "{", "PE", "}", "instance", "is", "a", "PE32", "file", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L838-L847
crackinglandia/pype32
pype32/pype32.py
PE.isPe64
def isPe64(self): """ Determines if the current L{PE} instance is a PE64 file. @rtype: bool @return: C{True} if the current L{PE} instance is a PE64 file. Otherwise, returns C{False}. """ if self.ntHeaders.optionalHeader.magic.value == consts.PE64: return True return False
python
def isPe64(self): """ Determines if the current L{PE} instance is a PE64 file. @rtype: bool @return: C{True} if the current L{PE} instance is a PE64 file. Otherwise, returns C{False}. """ if self.ntHeaders.optionalHeader.magic.value == consts.PE64: return True return False
[ "def", "isPe64", "(", "self", ")", ":", "if", "self", ".", "ntHeaders", ".", "optionalHeader", ".", "magic", ".", "value", "==", "consts", ".", "PE64", ":", "return", "True", "return", "False" ]
Determines if the current L{PE} instance is a PE64 file. @rtype: bool @return: C{True} if the current L{PE} instance is a PE64 file. Otherwise, returns C{False}.
[ "Determines", "if", "the", "current", "L", "{", "PE", "}", "instance", "is", "a", "PE64", "file", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L849-L858
crackinglandia/pype32
pype32/pype32.py
PE.isPeBounded
def isPeBounded(self): """ Determines if the current L{PE} instance is bounded, i.e. has a C{BOUND_IMPORT_DIRECTORY}. @rtype: bool @return: Returns C{True} if the current L{PE} instance is bounded. Otherwise, returns C{False}. """ boundImportsDir = self.ntHeaders.optionalHeader.dataDirectory[consts.BOUND_IMPORT_DIRECTORY] if boundImportsDir.rva.value and boundImportsDir.size.value: return True return False
python
def isPeBounded(self): """ Determines if the current L{PE} instance is bounded, i.e. has a C{BOUND_IMPORT_DIRECTORY}. @rtype: bool @return: Returns C{True} if the current L{PE} instance is bounded. Otherwise, returns C{False}. """ boundImportsDir = self.ntHeaders.optionalHeader.dataDirectory[consts.BOUND_IMPORT_DIRECTORY] if boundImportsDir.rva.value and boundImportsDir.size.value: return True return False
[ "def", "isPeBounded", "(", "self", ")", ":", "boundImportsDir", "=", "self", ".", "ntHeaders", ".", "optionalHeader", ".", "dataDirectory", "[", "consts", ".", "BOUND_IMPORT_DIRECTORY", "]", "if", "boundImportsDir", ".", "rva", ".", "value", "and", "boundImportsDir", ".", "size", ".", "value", ":", "return", "True", "return", "False" ]
Determines if the current L{PE} instance is bounded, i.e. has a C{BOUND_IMPORT_DIRECTORY}. @rtype: bool @return: Returns C{True} if the current L{PE} instance is bounded. Otherwise, returns C{False}.
[ "Determines", "if", "the", "current", "L", "{", "PE", "}", "instance", "is", "bounded", "i", ".", "e", ".", "has", "a", "C", "{", "BOUND_IMPORT_DIRECTORY", "}", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L860-L870
crackinglandia/pype32
pype32/pype32.py
PE.isNXEnabled
def isNXEnabled(self): """ Determines if the current L{PE} instance has the NXCOMPAT (Compatible with Data Execution Prevention) flag enabled. @see: U{http://msdn.microsoft.com/en-us/library/ms235442.aspx} @rtype: bool @return: Returns C{True} if the current L{PE} instance has the NXCOMPAT flag enabled. Otherwise, returns C{False}. """ return self.ntHeaders.optionalHeader.dllCharacteristics.value & consts.IMAGE_DLL_CHARACTERISTICS_NX_COMPAT == consts.IMAGE_DLL_CHARACTERISTICS_NX_COMPAT
python
def isNXEnabled(self): """ Determines if the current L{PE} instance has the NXCOMPAT (Compatible with Data Execution Prevention) flag enabled. @see: U{http://msdn.microsoft.com/en-us/library/ms235442.aspx} @rtype: bool @return: Returns C{True} if the current L{PE} instance has the NXCOMPAT flag enabled. Otherwise, returns C{False}. """ return self.ntHeaders.optionalHeader.dllCharacteristics.value & consts.IMAGE_DLL_CHARACTERISTICS_NX_COMPAT == consts.IMAGE_DLL_CHARACTERISTICS_NX_COMPAT
[ "def", "isNXEnabled", "(", "self", ")", ":", "return", "self", ".", "ntHeaders", ".", "optionalHeader", ".", "dllCharacteristics", ".", "value", "&", "consts", ".", "IMAGE_DLL_CHARACTERISTICS_NX_COMPAT", "==", "consts", ".", "IMAGE_DLL_CHARACTERISTICS_NX_COMPAT" ]
Determines if the current L{PE} instance has the NXCOMPAT (Compatible with Data Execution Prevention) flag enabled. @see: U{http://msdn.microsoft.com/en-us/library/ms235442.aspx} @rtype: bool @return: Returns C{True} if the current L{PE} instance has the NXCOMPAT flag enabled. Otherwise, returns C{False}.
[ "Determines", "if", "the", "current", "L", "{", "PE", "}", "instance", "has", "the", "NXCOMPAT", "(", "Compatible", "with", "Data", "Execution", "Prevention", ")", "flag", "enabled", ".", "@see", ":", "U", "{", "http", ":", "//", "msdn", ".", "microsoft", ".", "com", "/", "en", "-", "us", "/", "library", "/", "ms235442", ".", "aspx", "}" ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L872-L880
crackinglandia/pype32
pype32/pype32.py
PE.isCFGEnabled
def isCFGEnabled(self): """ Determines if the current L{PE} instance has CFG (Control Flow Guard) flag enabled. @see: U{http://blogs.msdn.com/b/vcblog/archive/2014/12/08/visual-studio-2015-preview-work-in-progress-security-feature.aspx} @see: U{https://msdn.microsoft.com/en-us/library/dn919635%%28v=vs.140%%29.aspx} @rtype: bool @return: Returns C{True} if the current L{PE} instance has the CFG flag enabled. Otherwise, return C{False}. """ return self.ntHeaders.optionalHeader.dllCharacteristics.value & consts.IMAGE_DLL_CHARACTERISTICS_GUARD_CF == consts.IMAGE_DLL_CHARACTERISTICS_GUARD_CF
python
def isCFGEnabled(self): """ Determines if the current L{PE} instance has CFG (Control Flow Guard) flag enabled. @see: U{http://blogs.msdn.com/b/vcblog/archive/2014/12/08/visual-studio-2015-preview-work-in-progress-security-feature.aspx} @see: U{https://msdn.microsoft.com/en-us/library/dn919635%%28v=vs.140%%29.aspx} @rtype: bool @return: Returns C{True} if the current L{PE} instance has the CFG flag enabled. Otherwise, return C{False}. """ return self.ntHeaders.optionalHeader.dllCharacteristics.value & consts.IMAGE_DLL_CHARACTERISTICS_GUARD_CF == consts.IMAGE_DLL_CHARACTERISTICS_GUARD_CF
[ "def", "isCFGEnabled", "(", "self", ")", ":", "return", "self", ".", "ntHeaders", ".", "optionalHeader", ".", "dllCharacteristics", ".", "value", "&", "consts", ".", "IMAGE_DLL_CHARACTERISTICS_GUARD_CF", "==", "consts", ".", "IMAGE_DLL_CHARACTERISTICS_GUARD_CF" ]
Determines if the current L{PE} instance has CFG (Control Flow Guard) flag enabled. @see: U{http://blogs.msdn.com/b/vcblog/archive/2014/12/08/visual-studio-2015-preview-work-in-progress-security-feature.aspx} @see: U{https://msdn.microsoft.com/en-us/library/dn919635%%28v=vs.140%%29.aspx} @rtype: bool @return: Returns C{True} if the current L{PE} instance has the CFG flag enabled. Otherwise, return C{False}.
[ "Determines", "if", "the", "current", "L", "{", "PE", "}", "instance", "has", "CFG", "(", "Control", "Flow", "Guard", ")", "flag", "enabled", ".", "@see", ":", "U", "{", "http", ":", "//", "blogs", ".", "msdn", ".", "com", "/", "b", "/", "vcblog", "/", "archive", "/", "2014", "/", "12", "/", "08", "/", "visual", "-", "studio", "-", "2015", "-", "preview", "-", "work", "-", "in", "-", "progress", "-", "security", "-", "feature", ".", "aspx", "}", "@see", ":", "U", "{", "https", ":", "//", "msdn", ".", "microsoft", ".", "com", "/", "en", "-", "us", "/", "library", "/", "dn919635%%28v", "=", "vs", ".", "140%%29", ".", "aspx", "}" ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L886-L895
crackinglandia/pype32
pype32/pype32.py
PE.isASLREnabled
def isASLREnabled(self): """ Determines if the current L{PE} instance has the DYNAMICBASE (Use address space layout randomization) flag enabled. @see: U{http://msdn.microsoft.com/en-us/library/bb384887.aspx} @rtype: bool @return: Returns C{True} if the current L{PE} instance has the DYNAMICBASE flag enabled. Otherwise, returns C{False}. """ return self.ntHeaders.optionalHeader.dllCharacteristics.value & consts.IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE == consts.IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE
python
def isASLREnabled(self): """ Determines if the current L{PE} instance has the DYNAMICBASE (Use address space layout randomization) flag enabled. @see: U{http://msdn.microsoft.com/en-us/library/bb384887.aspx} @rtype: bool @return: Returns C{True} if the current L{PE} instance has the DYNAMICBASE flag enabled. Otherwise, returns C{False}. """ return self.ntHeaders.optionalHeader.dllCharacteristics.value & consts.IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE == consts.IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE
[ "def", "isASLREnabled", "(", "self", ")", ":", "return", "self", ".", "ntHeaders", ".", "optionalHeader", ".", "dllCharacteristics", ".", "value", "&", "consts", ".", "IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE", "==", "consts", ".", "IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE" ]
Determines if the current L{PE} instance has the DYNAMICBASE (Use address space layout randomization) flag enabled. @see: U{http://msdn.microsoft.com/en-us/library/bb384887.aspx} @rtype: bool @return: Returns C{True} if the current L{PE} instance has the DYNAMICBASE flag enabled. Otherwise, returns C{False}.
[ "Determines", "if", "the", "current", "L", "{", "PE", "}", "instance", "has", "the", "DYNAMICBASE", "(", "Use", "address", "space", "layout", "randomization", ")", "flag", "enabled", ".", "@see", ":", "U", "{", "http", ":", "//", "msdn", ".", "microsoft", ".", "com", "/", "en", "-", "us", "/", "library", "/", "bb384887", ".", "aspx", "}" ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L897-L905
crackinglandia/pype32
pype32/pype32.py
PE.isSAFESEHEnabled
def isSAFESEHEnabled(self): """ Determines if the current L{PE} instance has the SAFESEH (Image has Safe Exception Handlers) flag enabled. @see: U{http://msdn.microsoft.com/en-us/library/9a89h429.aspx} @rtype: bool @return: Returns C{True} if the current L{PE} instance has the SAFESEH flag enabled. Returns C{False} if SAFESEH is off or -1 if SAFESEH is set to NO. """ NOSEH = -1 SAFESEH_OFF = 0 SAFESEH_ON = 1 if self.ntHeaders.optionalHeader.dllCharacteristics.value & consts.IMAGE_DLL_CHARACTERISTICS_NO_SEH: return NOSEH loadConfigDir = self.ntHeaders.optionalHeader.dataDirectory[consts.CONFIGURATION_DIRECTORY] if loadConfigDir.info: if loadConfigDir.info.SEHandlerTable.value: return SAFESEH_ON return SAFESEH_OFF
python
def isSAFESEHEnabled(self): """ Determines if the current L{PE} instance has the SAFESEH (Image has Safe Exception Handlers) flag enabled. @see: U{http://msdn.microsoft.com/en-us/library/9a89h429.aspx} @rtype: bool @return: Returns C{True} if the current L{PE} instance has the SAFESEH flag enabled. Returns C{False} if SAFESEH is off or -1 if SAFESEH is set to NO. """ NOSEH = -1 SAFESEH_OFF = 0 SAFESEH_ON = 1 if self.ntHeaders.optionalHeader.dllCharacteristics.value & consts.IMAGE_DLL_CHARACTERISTICS_NO_SEH: return NOSEH loadConfigDir = self.ntHeaders.optionalHeader.dataDirectory[consts.CONFIGURATION_DIRECTORY] if loadConfigDir.info: if loadConfigDir.info.SEHandlerTable.value: return SAFESEH_ON return SAFESEH_OFF
[ "def", "isSAFESEHEnabled", "(", "self", ")", ":", "NOSEH", "=", "-", "1", "SAFESEH_OFF", "=", "0", "SAFESEH_ON", "=", "1", "if", "self", ".", "ntHeaders", ".", "optionalHeader", ".", "dllCharacteristics", ".", "value", "&", "consts", ".", "IMAGE_DLL_CHARACTERISTICS_NO_SEH", ":", "return", "NOSEH", "loadConfigDir", "=", "self", ".", "ntHeaders", ".", "optionalHeader", ".", "dataDirectory", "[", "consts", ".", "CONFIGURATION_DIRECTORY", "]", "if", "loadConfigDir", ".", "info", ":", "if", "loadConfigDir", ".", "info", ".", "SEHandlerTable", ".", "value", ":", "return", "SAFESEH_ON", "return", "SAFESEH_OFF" ]
Determines if the current L{PE} instance has the SAFESEH (Image has Safe Exception Handlers) flag enabled. @see: U{http://msdn.microsoft.com/en-us/library/9a89h429.aspx} @rtype: bool @return: Returns C{True} if the current L{PE} instance has the SAFESEH flag enabled. Returns C{False} if SAFESEH is off or -1 if SAFESEH is set to NO.
[ "Determines", "if", "the", "current", "L", "{", "PE", "}", "instance", "has", "the", "SAFESEH", "(", "Image", "has", "Safe", "Exception", "Handlers", ")", "flag", "enabled", ".", "@see", ":", "U", "{", "http", ":", "//", "msdn", ".", "microsoft", ".", "com", "/", "en", "-", "us", "/", "library", "/", "9a89h429", ".", "aspx", "}" ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L907-L926
crackinglandia/pype32
pype32/pype32.py
PE._parseDirectories
def _parseDirectories(self, dataDirectoryInstance, magic = consts.PE32): """ Parses all the directories in the L{PE} instance. @type dataDirectoryInstance: L{DataDirectory} @param dataDirectoryInstance: A L{DataDirectory} object with the directories data. @type magic: int @param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}. """ directories = [(consts.EXPORT_DIRECTORY, self._parseExportDirectory),\ (consts.IMPORT_DIRECTORY, self._parseImportDirectory),\ (consts.RESOURCE_DIRECTORY, self._parseResourceDirectory),\ (consts.EXCEPTION_DIRECTORY, self._parseExceptionDirectory),\ (consts.RELOCATION_DIRECTORY, self._parseRelocsDirectory),\ (consts.TLS_DIRECTORY, self._parseTlsDirectory),\ (consts.DEBUG_DIRECTORY, self._parseDebugDirectory),\ (consts.BOUND_IMPORT_DIRECTORY, self._parseBoundImportDirectory),\ (consts.DELAY_IMPORT_DIRECTORY, self._parseDelayImportDirectory),\ (consts.CONFIGURATION_DIRECTORY, self._parseLoadConfigDirectory),\ (consts.NET_METADATA_DIRECTORY, self._parseNetDirectory)] for directory in directories: dir = dataDirectoryInstance[directory[0]] if dir.rva.value and dir.size.value: try: dataDirectoryInstance[directory[0]].info = directory[1](dir.rva.value, dir.size.value, magic) except Exception as e: print excep.PEWarning("Error parsing PE directory: %s." % directory[1].__name__.replace("_parse", ""))
python
def _parseDirectories(self, dataDirectoryInstance, magic = consts.PE32): """ Parses all the directories in the L{PE} instance. @type dataDirectoryInstance: L{DataDirectory} @param dataDirectoryInstance: A L{DataDirectory} object with the directories data. @type magic: int @param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}. """ directories = [(consts.EXPORT_DIRECTORY, self._parseExportDirectory),\ (consts.IMPORT_DIRECTORY, self._parseImportDirectory),\ (consts.RESOURCE_DIRECTORY, self._parseResourceDirectory),\ (consts.EXCEPTION_DIRECTORY, self._parseExceptionDirectory),\ (consts.RELOCATION_DIRECTORY, self._parseRelocsDirectory),\ (consts.TLS_DIRECTORY, self._parseTlsDirectory),\ (consts.DEBUG_DIRECTORY, self._parseDebugDirectory),\ (consts.BOUND_IMPORT_DIRECTORY, self._parseBoundImportDirectory),\ (consts.DELAY_IMPORT_DIRECTORY, self._parseDelayImportDirectory),\ (consts.CONFIGURATION_DIRECTORY, self._parseLoadConfigDirectory),\ (consts.NET_METADATA_DIRECTORY, self._parseNetDirectory)] for directory in directories: dir = dataDirectoryInstance[directory[0]] if dir.rva.value and dir.size.value: try: dataDirectoryInstance[directory[0]].info = directory[1](dir.rva.value, dir.size.value, magic) except Exception as e: print excep.PEWarning("Error parsing PE directory: %s." % directory[1].__name__.replace("_parse", ""))
[ "def", "_parseDirectories", "(", "self", ",", "dataDirectoryInstance", ",", "magic", "=", "consts", ".", "PE32", ")", ":", "directories", "=", "[", "(", "consts", ".", "EXPORT_DIRECTORY", ",", "self", ".", "_parseExportDirectory", ")", ",", "(", "consts", ".", "IMPORT_DIRECTORY", ",", "self", ".", "_parseImportDirectory", ")", ",", "(", "consts", ".", "RESOURCE_DIRECTORY", ",", "self", ".", "_parseResourceDirectory", ")", ",", "(", "consts", ".", "EXCEPTION_DIRECTORY", ",", "self", ".", "_parseExceptionDirectory", ")", ",", "(", "consts", ".", "RELOCATION_DIRECTORY", ",", "self", ".", "_parseRelocsDirectory", ")", ",", "(", "consts", ".", "TLS_DIRECTORY", ",", "self", ".", "_parseTlsDirectory", ")", ",", "(", "consts", ".", "DEBUG_DIRECTORY", ",", "self", ".", "_parseDebugDirectory", ")", ",", "(", "consts", ".", "BOUND_IMPORT_DIRECTORY", ",", "self", ".", "_parseBoundImportDirectory", ")", ",", "(", "consts", ".", "DELAY_IMPORT_DIRECTORY", ",", "self", ".", "_parseDelayImportDirectory", ")", ",", "(", "consts", ".", "CONFIGURATION_DIRECTORY", ",", "self", ".", "_parseLoadConfigDirectory", ")", ",", "(", "consts", ".", "NET_METADATA_DIRECTORY", ",", "self", ".", "_parseNetDirectory", ")", "]", "for", "directory", "in", "directories", ":", "dir", "=", "dataDirectoryInstance", "[", "directory", "[", "0", "]", "]", "if", "dir", ".", "rva", ".", "value", "and", "dir", ".", "size", ".", "value", ":", "try", ":", "dataDirectoryInstance", "[", "directory", "[", "0", "]", "]", ".", "info", "=", "directory", "[", "1", "]", "(", "dir", ".", "rva", ".", "value", ",", "dir", ".", "size", ".", "value", ",", "magic", ")", "except", "Exception", "as", "e", ":", "print", "excep", ".", "PEWarning", "(", "\"Error parsing PE directory: %s.\"", "%", "directory", "[", "1", "]", ".", "__name__", ".", "replace", "(", "\"_parse\"", ",", "\"\"", ")", ")" ]
Parses all the directories in the L{PE} instance. @type dataDirectoryInstance: L{DataDirectory} @param dataDirectoryInstance: A L{DataDirectory} object with the directories data. @type magic: int @param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}.
[ "Parses", "all", "the", "directories", "in", "the", "L", "{", "PE", "}", "instance", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L928-L956
crackinglandia/pype32
pype32/pype32.py
PE._parseResourceDirectory
def _parseResourceDirectory(self, rva, size, magic = consts.PE32): """ Parses the C{IMAGE_RESOURCE_DIRECTORY} directory. @type rva: int @param rva: The RVA where the C{IMAGE_RESOURCE_DIRECTORY} starts. @type size: int @param size: The size of the C{IMAGE_RESOURCE_DIRECTORY} directory. @type magic: int @param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}. @rtype: str @return: The C{IMAGE_RESOURCE_DIRECTORY} data. """ return self.getDataAtRva(rva, size)
python
def _parseResourceDirectory(self, rva, size, magic = consts.PE32): """ Parses the C{IMAGE_RESOURCE_DIRECTORY} directory. @type rva: int @param rva: The RVA where the C{IMAGE_RESOURCE_DIRECTORY} starts. @type size: int @param size: The size of the C{IMAGE_RESOURCE_DIRECTORY} directory. @type magic: int @param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}. @rtype: str @return: The C{IMAGE_RESOURCE_DIRECTORY} data. """ return self.getDataAtRva(rva, size)
[ "def", "_parseResourceDirectory", "(", "self", ",", "rva", ",", "size", ",", "magic", "=", "consts", ".", "PE32", ")", ":", "return", "self", ".", "getDataAtRva", "(", "rva", ",", "size", ")" ]
Parses the C{IMAGE_RESOURCE_DIRECTORY} directory. @type rva: int @param rva: The RVA where the C{IMAGE_RESOURCE_DIRECTORY} starts. @type size: int @param size: The size of the C{IMAGE_RESOURCE_DIRECTORY} directory. @type magic: int @param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}. @rtype: str @return: The C{IMAGE_RESOURCE_DIRECTORY} data.
[ "Parses", "the", "C", "{", "IMAGE_RESOURCE_DIRECTORY", "}", "directory", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L958-L974
crackinglandia/pype32
pype32/pype32.py
PE._parseExceptionDirectory
def _parseExceptionDirectory(self, rva, size, magic = consts.PE32): """ Parses the C{IMAGE_EXCEPTION_DIRECTORY} directory. @type rva: int @param rva: The RVA where the C{IMAGE_EXCEPTION_DIRECTORY} starts. @type size: int @param size: The size of the C{IMAGE_EXCEPTION_DIRECTORY} directory. @type magic: int @param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}. @rtype: str @return: The C{IMAGE_EXCEPTION_DIRECTORY} data. """ return self.getDataAtRva(rva, size)
python
def _parseExceptionDirectory(self, rva, size, magic = consts.PE32): """ Parses the C{IMAGE_EXCEPTION_DIRECTORY} directory. @type rva: int @param rva: The RVA where the C{IMAGE_EXCEPTION_DIRECTORY} starts. @type size: int @param size: The size of the C{IMAGE_EXCEPTION_DIRECTORY} directory. @type magic: int @param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}. @rtype: str @return: The C{IMAGE_EXCEPTION_DIRECTORY} data. """ return self.getDataAtRva(rva, size)
[ "def", "_parseExceptionDirectory", "(", "self", ",", "rva", ",", "size", ",", "magic", "=", "consts", ".", "PE32", ")", ":", "return", "self", ".", "getDataAtRva", "(", "rva", ",", "size", ")" ]
Parses the C{IMAGE_EXCEPTION_DIRECTORY} directory. @type rva: int @param rva: The RVA where the C{IMAGE_EXCEPTION_DIRECTORY} starts. @type size: int @param size: The size of the C{IMAGE_EXCEPTION_DIRECTORY} directory. @type magic: int @param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}. @rtype: str @return: The C{IMAGE_EXCEPTION_DIRECTORY} data.
[ "Parses", "the", "C", "{", "IMAGE_EXCEPTION_DIRECTORY", "}", "directory", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L976-L992
crackinglandia/pype32
pype32/pype32.py
PE._parseDelayImportDirectory
def _parseDelayImportDirectory(self, rva, size, magic = consts.PE32): """ Parses the delay imports directory. @type rva: int @param rva: The RVA where the delay imports directory starts. @type size: int @param size: The size of the delay imports directory. @type magic: int @param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}. @rtype: str @return: The delay imports directory data. """ return self.getDataAtRva(rva, size)
python
def _parseDelayImportDirectory(self, rva, size, magic = consts.PE32): """ Parses the delay imports directory. @type rva: int @param rva: The RVA where the delay imports directory starts. @type size: int @param size: The size of the delay imports directory. @type magic: int @param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}. @rtype: str @return: The delay imports directory data. """ return self.getDataAtRva(rva, size)
[ "def", "_parseDelayImportDirectory", "(", "self", ",", "rva", ",", "size", ",", "magic", "=", "consts", ".", "PE32", ")", ":", "return", "self", ".", "getDataAtRva", "(", "rva", ",", "size", ")" ]
Parses the delay imports directory. @type rva: int @param rva: The RVA where the delay imports directory starts. @type size: int @param size: The size of the delay imports directory. @type magic: int @param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}. @rtype: str @return: The delay imports directory data.
[ "Parses", "the", "delay", "imports", "directory", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L994-L1010
crackinglandia/pype32
pype32/pype32.py
PE._parseBoundImportDirectory
def _parseBoundImportDirectory(self, rva, size, magic = consts.PE32): """ Parses the bound import directory. @type rva: int @param rva: The RVA where the bound import directory starts. @type size: int @param size: The size of the bound import directory. @type magic: int @param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}. @rtype: L{ImageBoundImportDescriptor} @return: A new L{ImageBoundImportDescriptor} object. """ data = self.getDataAtRva(rva, size) rd = utils.ReadData(data) boundImportDirectory = directories.ImageBoundImportDescriptor.parse(rd) # parse the name of every bounded import. for i in range(len(boundImportDirectory) - 1): if hasattr(boundImportDirectory[i], "forwarderRefsList"): if boundImportDirectory[i].forwarderRefsList: for forwarderRefEntry in boundImportDirectory[i].forwarderRefsList: offset = forwarderRefEntry.offsetModuleName.value forwarderRefEntry.moduleName = self.readStringAtRva(offset + rva) offset = boundImportDirectory[i].offsetModuleName.value boundImportDirectory[i].moduleName = self.readStringAtRva(offset + rva) return boundImportDirectory
python
def _parseBoundImportDirectory(self, rva, size, magic = consts.PE32): """ Parses the bound import directory. @type rva: int @param rva: The RVA where the bound import directory starts. @type size: int @param size: The size of the bound import directory. @type magic: int @param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}. @rtype: L{ImageBoundImportDescriptor} @return: A new L{ImageBoundImportDescriptor} object. """ data = self.getDataAtRva(rva, size) rd = utils.ReadData(data) boundImportDirectory = directories.ImageBoundImportDescriptor.parse(rd) # parse the name of every bounded import. for i in range(len(boundImportDirectory) - 1): if hasattr(boundImportDirectory[i], "forwarderRefsList"): if boundImportDirectory[i].forwarderRefsList: for forwarderRefEntry in boundImportDirectory[i].forwarderRefsList: offset = forwarderRefEntry.offsetModuleName.value forwarderRefEntry.moduleName = self.readStringAtRva(offset + rva) offset = boundImportDirectory[i].offsetModuleName.value boundImportDirectory[i].moduleName = self.readStringAtRva(offset + rva) return boundImportDirectory
[ "def", "_parseBoundImportDirectory", "(", "self", ",", "rva", ",", "size", ",", "magic", "=", "consts", ".", "PE32", ")", ":", "data", "=", "self", ".", "getDataAtRva", "(", "rva", ",", "size", ")", "rd", "=", "utils", ".", "ReadData", "(", "data", ")", "boundImportDirectory", "=", "directories", ".", "ImageBoundImportDescriptor", ".", "parse", "(", "rd", ")", "# parse the name of every bounded import.", "for", "i", "in", "range", "(", "len", "(", "boundImportDirectory", ")", "-", "1", ")", ":", "if", "hasattr", "(", "boundImportDirectory", "[", "i", "]", ",", "\"forwarderRefsList\"", ")", ":", "if", "boundImportDirectory", "[", "i", "]", ".", "forwarderRefsList", ":", "for", "forwarderRefEntry", "in", "boundImportDirectory", "[", "i", "]", ".", "forwarderRefsList", ":", "offset", "=", "forwarderRefEntry", ".", "offsetModuleName", ".", "value", "forwarderRefEntry", ".", "moduleName", "=", "self", ".", "readStringAtRva", "(", "offset", "+", "rva", ")", "offset", "=", "boundImportDirectory", "[", "i", "]", ".", "offsetModuleName", ".", "value", "boundImportDirectory", "[", "i", "]", ".", "moduleName", "=", "self", ".", "readStringAtRva", "(", "offset", "+", "rva", ")", "return", "boundImportDirectory" ]
Parses the bound import directory. @type rva: int @param rva: The RVA where the bound import directory starts. @type size: int @param size: The size of the bound import directory. @type magic: int @param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}. @rtype: L{ImageBoundImportDescriptor} @return: A new L{ImageBoundImportDescriptor} object.
[ "Parses", "the", "bound", "import", "directory", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L1012-L1042
crackinglandia/pype32
pype32/pype32.py
PE._parseLoadConfigDirectory
def _parseLoadConfigDirectory(self, rva, size, magic = consts.PE32): """ Parses IMAGE_LOAD_CONFIG_DIRECTORY. @type rva: int @param rva: The RVA where the IMAGE_LOAD_CONFIG_DIRECTORY starts. @type size: int @param size: The size of the IMAGE_LOAD_CONFIG_DIRECTORY. @type magic: int @param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}. @rtype: L{ImageLoadConfigDirectory} @return: A new L{ImageLoadConfigDirectory}. @note: if the L{PE} instance is a PE64 file then a new L{ImageLoadConfigDirectory64} is returned. """ # print "RVA: %x - SIZE: %x" % (rva, size) # I've found some issues when parsing the IMAGE_LOAD_CONFIG_DIRECTORY in some DLLs. # There is an inconsistency with the size of the struct between MSDN docs and VS. # sizeof(IMAGE_LOAD_CONFIG_DIRECTORY) should be 0x40, in fact, that's the size Visual Studio put # in the directory table, even if the DLL was compiled with SAFESEH:ON. But If that is the case, the sizeof the # struct should be 0x48. # more information here: http://www.accuvant.com/blog/old-meets-new-microsoft-windows-safeseh-incompatibility data = self.getDataAtRva(rva, directories.ImageLoadConfigDirectory().sizeof()) rd = utils.ReadData(data) if magic == consts.PE32: return directories.ImageLoadConfigDirectory.parse(rd) elif magic == consts.PE64: return directories.ImageLoadConfigDirectory64.parse(rd) else: raise excep.InvalidParameterException("Wrong magic")
python
def _parseLoadConfigDirectory(self, rva, size, magic = consts.PE32): """ Parses IMAGE_LOAD_CONFIG_DIRECTORY. @type rva: int @param rva: The RVA where the IMAGE_LOAD_CONFIG_DIRECTORY starts. @type size: int @param size: The size of the IMAGE_LOAD_CONFIG_DIRECTORY. @type magic: int @param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}. @rtype: L{ImageLoadConfigDirectory} @return: A new L{ImageLoadConfigDirectory}. @note: if the L{PE} instance is a PE64 file then a new L{ImageLoadConfigDirectory64} is returned. """ # print "RVA: %x - SIZE: %x" % (rva, size) # I've found some issues when parsing the IMAGE_LOAD_CONFIG_DIRECTORY in some DLLs. # There is an inconsistency with the size of the struct between MSDN docs and VS. # sizeof(IMAGE_LOAD_CONFIG_DIRECTORY) should be 0x40, in fact, that's the size Visual Studio put # in the directory table, even if the DLL was compiled with SAFESEH:ON. But If that is the case, the sizeof the # struct should be 0x48. # more information here: http://www.accuvant.com/blog/old-meets-new-microsoft-windows-safeseh-incompatibility data = self.getDataAtRva(rva, directories.ImageLoadConfigDirectory().sizeof()) rd = utils.ReadData(data) if magic == consts.PE32: return directories.ImageLoadConfigDirectory.parse(rd) elif magic == consts.PE64: return directories.ImageLoadConfigDirectory64.parse(rd) else: raise excep.InvalidParameterException("Wrong magic")
[ "def", "_parseLoadConfigDirectory", "(", "self", ",", "rva", ",", "size", ",", "magic", "=", "consts", ".", "PE32", ")", ":", "# print \"RVA: %x - SIZE: %x\" % (rva, size)", "# I've found some issues when parsing the IMAGE_LOAD_CONFIG_DIRECTORY in some DLLs. ", "# There is an inconsistency with the size of the struct between MSDN docs and VS.", "# sizeof(IMAGE_LOAD_CONFIG_DIRECTORY) should be 0x40, in fact, that's the size Visual Studio put", "# in the directory table, even if the DLL was compiled with SAFESEH:ON. But If that is the case, the sizeof the", "# struct should be 0x48.", "# more information here: http://www.accuvant.com/blog/old-meets-new-microsoft-windows-safeseh-incompatibility", "data", "=", "self", ".", "getDataAtRva", "(", "rva", ",", "directories", ".", "ImageLoadConfigDirectory", "(", ")", ".", "sizeof", "(", ")", ")", "rd", "=", "utils", ".", "ReadData", "(", "data", ")", "if", "magic", "==", "consts", ".", "PE32", ":", "return", "directories", ".", "ImageLoadConfigDirectory", ".", "parse", "(", "rd", ")", "elif", "magic", "==", "consts", ".", "PE64", ":", "return", "directories", ".", "ImageLoadConfigDirectory64", ".", "parse", "(", "rd", ")", "else", ":", "raise", "excep", ".", "InvalidParameterException", "(", "\"Wrong magic\"", ")" ]
Parses IMAGE_LOAD_CONFIG_DIRECTORY. @type rva: int @param rva: The RVA where the IMAGE_LOAD_CONFIG_DIRECTORY starts. @type size: int @param size: The size of the IMAGE_LOAD_CONFIG_DIRECTORY. @type magic: int @param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}. @rtype: L{ImageLoadConfigDirectory} @return: A new L{ImageLoadConfigDirectory}. @note: if the L{PE} instance is a PE64 file then a new L{ImageLoadConfigDirectory64} is returned.
[ "Parses", "IMAGE_LOAD_CONFIG_DIRECTORY", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L1044-L1077
crackinglandia/pype32
pype32/pype32.py
PE._parseTlsDirectory
def _parseTlsDirectory(self, rva, size, magic = consts.PE32): """ Parses the TLS directory. @type rva: int @param rva: The RVA where the TLS directory starts. @type size: int @param size: The size of the TLS directory. @type magic: int @param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}. @rtype: L{TLSDirectory} @return: A new L{TLSDirectory}. @note: if the L{PE} instance is a PE64 file then a new L{TLSDirectory64} is returned. """ data = self.getDataAtRva(rva, size) rd = utils.ReadData(data) if magic == consts.PE32: return directories.TLSDirectory.parse(rd) elif magic == consts.PE64: return directories.TLSDirectory64.parse(rd) else: raise excep.InvalidParameterException("Wrong magic")
python
def _parseTlsDirectory(self, rva, size, magic = consts.PE32): """ Parses the TLS directory. @type rva: int @param rva: The RVA where the TLS directory starts. @type size: int @param size: The size of the TLS directory. @type magic: int @param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}. @rtype: L{TLSDirectory} @return: A new L{TLSDirectory}. @note: if the L{PE} instance is a PE64 file then a new L{TLSDirectory64} is returned. """ data = self.getDataAtRva(rva, size) rd = utils.ReadData(data) if magic == consts.PE32: return directories.TLSDirectory.parse(rd) elif magic == consts.PE64: return directories.TLSDirectory64.parse(rd) else: raise excep.InvalidParameterException("Wrong magic")
[ "def", "_parseTlsDirectory", "(", "self", ",", "rva", ",", "size", ",", "magic", "=", "consts", ".", "PE32", ")", ":", "data", "=", "self", ".", "getDataAtRva", "(", "rva", ",", "size", ")", "rd", "=", "utils", ".", "ReadData", "(", "data", ")", "if", "magic", "==", "consts", ".", "PE32", ":", "return", "directories", ".", "TLSDirectory", ".", "parse", "(", "rd", ")", "elif", "magic", "==", "consts", ".", "PE64", ":", "return", "directories", ".", "TLSDirectory64", ".", "parse", "(", "rd", ")", "else", ":", "raise", "excep", ".", "InvalidParameterException", "(", "\"Wrong magic\"", ")" ]
Parses the TLS directory. @type rva: int @param rva: The RVA where the TLS directory starts. @type size: int @param size: The size of the TLS directory. @type magic: int @param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}. @rtype: L{TLSDirectory} @return: A new L{TLSDirectory}. @note: if the L{PE} instance is a PE64 file then a new L{TLSDirectory64} is returned.
[ "Parses", "the", "TLS", "directory", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L1079-L1104
crackinglandia/pype32
pype32/pype32.py
PE._parseRelocsDirectory
def _parseRelocsDirectory(self, rva, size, magic = consts.PE32): """ Parses the relocation directory. @type rva: int @param rva: The RVA where the relocation directory starts. @type size: int @param size: The size of the relocation directory. @type magic: int @param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}. @rtype: L{ImageBaseRelocation} @return: A new L{ImageBaseRelocation} object. """ data = self.getDataAtRva(rva, size) #print "Length Relocation data: %x" % len(data) rd = utils.ReadData(data) relocsArray = directories.ImageBaseRelocation() while rd.offset < size: relocEntry = directories.ImageBaseRelocationEntry.parse(rd) relocsArray.append(relocEntry) return relocsArray
python
def _parseRelocsDirectory(self, rva, size, magic = consts.PE32): """ Parses the relocation directory. @type rva: int @param rva: The RVA where the relocation directory starts. @type size: int @param size: The size of the relocation directory. @type magic: int @param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}. @rtype: L{ImageBaseRelocation} @return: A new L{ImageBaseRelocation} object. """ data = self.getDataAtRva(rva, size) #print "Length Relocation data: %x" % len(data) rd = utils.ReadData(data) relocsArray = directories.ImageBaseRelocation() while rd.offset < size: relocEntry = directories.ImageBaseRelocationEntry.parse(rd) relocsArray.append(relocEntry) return relocsArray
[ "def", "_parseRelocsDirectory", "(", "self", ",", "rva", ",", "size", ",", "magic", "=", "consts", ".", "PE32", ")", ":", "data", "=", "self", ".", "getDataAtRva", "(", "rva", ",", "size", ")", "#print \"Length Relocation data: %x\" % len(data)", "rd", "=", "utils", ".", "ReadData", "(", "data", ")", "relocsArray", "=", "directories", ".", "ImageBaseRelocation", "(", ")", "while", "rd", ".", "offset", "<", "size", ":", "relocEntry", "=", "directories", ".", "ImageBaseRelocationEntry", ".", "parse", "(", "rd", ")", "relocsArray", ".", "append", "(", "relocEntry", ")", "return", "relocsArray" ]
Parses the relocation directory. @type rva: int @param rva: The RVA where the relocation directory starts. @type size: int @param size: The size of the relocation directory. @type magic: int @param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}. @rtype: L{ImageBaseRelocation} @return: A new L{ImageBaseRelocation} object.
[ "Parses", "the", "relocation", "directory", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L1106-L1130
crackinglandia/pype32
pype32/pype32.py
PE._parseExportDirectory
def _parseExportDirectory(self, rva, size, magic = consts.PE32): """ Parses the C{IMAGE_EXPORT_DIRECTORY} directory. @type rva: int @param rva: The RVA where the C{IMAGE_EXPORT_DIRECTORY} directory starts. @type size: int @param size: The size of the C{IMAGE_EXPORT_DIRECTORY} directory. @type magic: int @param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}. @rtype: L{ImageExportTable} @return: A new L{ImageExportTable} object. """ data = self.getDataAtRva(rva, size) rd = utils.ReadData(data) iet = directories.ImageExportTable.parse(rd) auxFunctionRvaArray = list() numberOfNames = iet.numberOfNames.value addressOfNames = iet.addressOfNames.value addressOfNameOrdinals = iet.addressOfNameOrdinals.value addressOfFunctions = iet.addressOfFunctions.value # populate the auxFunctionRvaArray for i in xrange(iet.numberOfFunctions.value): auxFunctionRvaArray.append(self.getDwordAtRva(addressOfFunctions).value) addressOfFunctions += datatypes.DWORD().sizeof() for i in xrange(numberOfNames): nameRva = self.getDwordAtRva(addressOfNames).value nameOrdinal = self.getWordAtRva(addressOfNameOrdinals).value exportName = self.readStringAtRva(nameRva).value entry = directories.ExportTableEntry() ordinal = nameOrdinal + iet.base.value #print "Ordinal value: %d" % ordinal entry.ordinal.value = ordinal entry.nameOrdinal.vaue = nameOrdinal entry.nameRva.value = nameRva entry.name.value = exportName entry.functionRva.value = auxFunctionRvaArray[nameOrdinal] iet.exportTable.append(entry) addressOfNames += datatypes.DWORD().sizeof() addressOfNameOrdinals += datatypes.WORD().sizeof() #print "export table length: %d" % len(iet.exportTable) #print "auxFunctionRvaArray: %r" % auxFunctionRvaArray for i in xrange(iet.numberOfFunctions.value): #print "auxFunctionRvaArray[%d]: %x" % (i, auxFunctionRvaArray[i]) if auxFunctionRvaArray[i] != iet.exportTable[i].functionRva.value: entry = directories.ExportTableEntry() entry.functionRva.value = auxFunctionRvaArray[i] entry.ordinal.value = iet.base.value + i iet.exportTable.append(entry) #print "export table length: %d" % len(iet.exportTable) sorted(iet.exportTable, key=lambda entry:entry.ordinal) return iet
python
def _parseExportDirectory(self, rva, size, magic = consts.PE32): """ Parses the C{IMAGE_EXPORT_DIRECTORY} directory. @type rva: int @param rva: The RVA where the C{IMAGE_EXPORT_DIRECTORY} directory starts. @type size: int @param size: The size of the C{IMAGE_EXPORT_DIRECTORY} directory. @type magic: int @param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}. @rtype: L{ImageExportTable} @return: A new L{ImageExportTable} object. """ data = self.getDataAtRva(rva, size) rd = utils.ReadData(data) iet = directories.ImageExportTable.parse(rd) auxFunctionRvaArray = list() numberOfNames = iet.numberOfNames.value addressOfNames = iet.addressOfNames.value addressOfNameOrdinals = iet.addressOfNameOrdinals.value addressOfFunctions = iet.addressOfFunctions.value # populate the auxFunctionRvaArray for i in xrange(iet.numberOfFunctions.value): auxFunctionRvaArray.append(self.getDwordAtRva(addressOfFunctions).value) addressOfFunctions += datatypes.DWORD().sizeof() for i in xrange(numberOfNames): nameRva = self.getDwordAtRva(addressOfNames).value nameOrdinal = self.getWordAtRva(addressOfNameOrdinals).value exportName = self.readStringAtRva(nameRva).value entry = directories.ExportTableEntry() ordinal = nameOrdinal + iet.base.value #print "Ordinal value: %d" % ordinal entry.ordinal.value = ordinal entry.nameOrdinal.vaue = nameOrdinal entry.nameRva.value = nameRva entry.name.value = exportName entry.functionRva.value = auxFunctionRvaArray[nameOrdinal] iet.exportTable.append(entry) addressOfNames += datatypes.DWORD().sizeof() addressOfNameOrdinals += datatypes.WORD().sizeof() #print "export table length: %d" % len(iet.exportTable) #print "auxFunctionRvaArray: %r" % auxFunctionRvaArray for i in xrange(iet.numberOfFunctions.value): #print "auxFunctionRvaArray[%d]: %x" % (i, auxFunctionRvaArray[i]) if auxFunctionRvaArray[i] != iet.exportTable[i].functionRva.value: entry = directories.ExportTableEntry() entry.functionRva.value = auxFunctionRvaArray[i] entry.ordinal.value = iet.base.value + i iet.exportTable.append(entry) #print "export table length: %d" % len(iet.exportTable) sorted(iet.exportTable, key=lambda entry:entry.ordinal) return iet
[ "def", "_parseExportDirectory", "(", "self", ",", "rva", ",", "size", ",", "magic", "=", "consts", ".", "PE32", ")", ":", "data", "=", "self", ".", "getDataAtRva", "(", "rva", ",", "size", ")", "rd", "=", "utils", ".", "ReadData", "(", "data", ")", "iet", "=", "directories", ".", "ImageExportTable", ".", "parse", "(", "rd", ")", "auxFunctionRvaArray", "=", "list", "(", ")", "numberOfNames", "=", "iet", ".", "numberOfNames", ".", "value", "addressOfNames", "=", "iet", ".", "addressOfNames", ".", "value", "addressOfNameOrdinals", "=", "iet", ".", "addressOfNameOrdinals", ".", "value", "addressOfFunctions", "=", "iet", ".", "addressOfFunctions", ".", "value", "# populate the auxFunctionRvaArray", "for", "i", "in", "xrange", "(", "iet", ".", "numberOfFunctions", ".", "value", ")", ":", "auxFunctionRvaArray", ".", "append", "(", "self", ".", "getDwordAtRva", "(", "addressOfFunctions", ")", ".", "value", ")", "addressOfFunctions", "+=", "datatypes", ".", "DWORD", "(", ")", ".", "sizeof", "(", ")", "for", "i", "in", "xrange", "(", "numberOfNames", ")", ":", "nameRva", "=", "self", ".", "getDwordAtRva", "(", "addressOfNames", ")", ".", "value", "nameOrdinal", "=", "self", ".", "getWordAtRva", "(", "addressOfNameOrdinals", ")", ".", "value", "exportName", "=", "self", ".", "readStringAtRva", "(", "nameRva", ")", ".", "value", "entry", "=", "directories", ".", "ExportTableEntry", "(", ")", "ordinal", "=", "nameOrdinal", "+", "iet", ".", "base", ".", "value", "#print \"Ordinal value: %d\" % ordinal", "entry", ".", "ordinal", ".", "value", "=", "ordinal", "entry", ".", "nameOrdinal", ".", "vaue", "=", "nameOrdinal", "entry", ".", "nameRva", ".", "value", "=", "nameRva", "entry", ".", "name", ".", "value", "=", "exportName", "entry", ".", "functionRva", ".", "value", "=", "auxFunctionRvaArray", "[", "nameOrdinal", "]", "iet", ".", "exportTable", ".", "append", "(", "entry", ")", "addressOfNames", "+=", "datatypes", ".", "DWORD", "(", ")", ".", "sizeof", "(", ")", "addressOfNameOrdinals", "+=", "datatypes", ".", "WORD", "(", ")", ".", "sizeof", "(", ")", "#print \"export table length: %d\" % len(iet.exportTable)", "#print \"auxFunctionRvaArray: %r\" % auxFunctionRvaArray", "for", "i", "in", "xrange", "(", "iet", ".", "numberOfFunctions", ".", "value", ")", ":", "#print \"auxFunctionRvaArray[%d]: %x\" % (i, auxFunctionRvaArray[i])", "if", "auxFunctionRvaArray", "[", "i", "]", "!=", "iet", ".", "exportTable", "[", "i", "]", ".", "functionRva", ".", "value", ":", "entry", "=", "directories", ".", "ExportTableEntry", "(", ")", "entry", ".", "functionRva", ".", "value", "=", "auxFunctionRvaArray", "[", "i", "]", "entry", ".", "ordinal", ".", "value", "=", "iet", ".", "base", ".", "value", "+", "i", "iet", ".", "exportTable", ".", "append", "(", "entry", ")", "#print \"export table length: %d\" % len(iet.exportTable)", "sorted", "(", "iet", ".", "exportTable", ",", "key", "=", "lambda", "entry", ":", "entry", ".", "ordinal", ")", "return", "iet" ]
Parses the C{IMAGE_EXPORT_DIRECTORY} directory. @type rva: int @param rva: The RVA where the C{IMAGE_EXPORT_DIRECTORY} directory starts. @type size: int @param size: The size of the C{IMAGE_EXPORT_DIRECTORY} directory. @type magic: int @param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}. @rtype: L{ImageExportTable} @return: A new L{ImageExportTable} object.
[ "Parses", "the", "C", "{", "IMAGE_EXPORT_DIRECTORY", "}", "directory", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L1132-L1202
crackinglandia/pype32
pype32/pype32.py
PE._parseDebugDirectory
def _parseDebugDirectory(self, rva, size, magic = consts.PE32): """ Parses the C{IMAGE_DEBUG_DIRECTORY} directory. @see: U{http://msdn.microsoft.com/es-es/library/windows/desktop/ms680307(v=vs.85).aspx} @type rva: int @param rva: The RVA where the C{IMAGE_DEBUG_DIRECTORY} directory starts. @type size: int @param size: The size of the C{IMAGE_DEBUG_DIRECTORY} directory. @type magic: int @param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}. @rtype: L{ImageDebugDirectory} @return: A new L{ImageDebugDirectory} object. """ debugDirData = self.getDataAtRva(rva, size) numberOfEntries = size / consts.SIZEOF_IMAGE_DEBUG_ENTRY32 rd = utils.ReadData(debugDirData) return directories.ImageDebugDirectories.parse(rd, numberOfEntries)
python
def _parseDebugDirectory(self, rva, size, magic = consts.PE32): """ Parses the C{IMAGE_DEBUG_DIRECTORY} directory. @see: U{http://msdn.microsoft.com/es-es/library/windows/desktop/ms680307(v=vs.85).aspx} @type rva: int @param rva: The RVA where the C{IMAGE_DEBUG_DIRECTORY} directory starts. @type size: int @param size: The size of the C{IMAGE_DEBUG_DIRECTORY} directory. @type magic: int @param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}. @rtype: L{ImageDebugDirectory} @return: A new L{ImageDebugDirectory} object. """ debugDirData = self.getDataAtRva(rva, size) numberOfEntries = size / consts.SIZEOF_IMAGE_DEBUG_ENTRY32 rd = utils.ReadData(debugDirData) return directories.ImageDebugDirectories.parse(rd, numberOfEntries)
[ "def", "_parseDebugDirectory", "(", "self", ",", "rva", ",", "size", ",", "magic", "=", "consts", ".", "PE32", ")", ":", "debugDirData", "=", "self", ".", "getDataAtRva", "(", "rva", ",", "size", ")", "numberOfEntries", "=", "size", "/", "consts", ".", "SIZEOF_IMAGE_DEBUG_ENTRY32", "rd", "=", "utils", ".", "ReadData", "(", "debugDirData", ")", "return", "directories", ".", "ImageDebugDirectories", ".", "parse", "(", "rd", ",", "numberOfEntries", ")" ]
Parses the C{IMAGE_DEBUG_DIRECTORY} directory. @see: U{http://msdn.microsoft.com/es-es/library/windows/desktop/ms680307(v=vs.85).aspx} @type rva: int @param rva: The RVA where the C{IMAGE_DEBUG_DIRECTORY} directory starts. @type size: int @param size: The size of the C{IMAGE_DEBUG_DIRECTORY} directory. @type magic: int @param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}. @rtype: L{ImageDebugDirectory} @return: A new L{ImageDebugDirectory} object.
[ "Parses", "the", "C", "{", "IMAGE_DEBUG_DIRECTORY", "}", "directory", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L1204-L1224
crackinglandia/pype32
pype32/pype32.py
PE._parseImportDirectory
def _parseImportDirectory(self, rva, size, magic = consts.PE32): """ Parses the C{IMAGE_IMPORT_DIRECTORY} directory. @type rva: int @param rva: The RVA where the C{IMAGE_IMPORT_DIRECTORY} directory starts. @type size: int @param size: The size of the C{IMAGE_IMPORT_DIRECTORY} directory. @type magic: int @param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}. @rtype: L{ImageImportDescriptor} @return: A new L{ImageImportDescriptor} object. @raise InvalidParameterException: If wrong magic was specified. """ #print "RVA: %x - Size: %x" % (rva, size) importsDirData = self.getDataAtRva(rva, size) #print "Length importsDirData: %d" % len(importsDirData) numberOfEntries = size / consts.SIZEOF_IMAGE_IMPORT_ENTRY32 rd = utils.ReadData(importsDirData) # In .NET binaries, the size of the data directory corresponding to the import table # is greater than the number of bytes in the file. Thats why we check for the last group of 5 null bytes # that indicates the end of the IMAGE_IMPORT_DESCRIPTOR array. rdAux = utils.ReadData(importsDirData) count = 0 entry = rdAux.read(consts.SIZEOF_IMAGE_IMPORT_ENTRY32) while rdAux.offset < len(rdAux.data) and not utils.allZero(entry): try: entry = rdAux.read(consts.SIZEOF_IMAGE_IMPORT_ENTRY32) count += 1 except excep.DataLengthException: if self._verbose: print "[!] Warning: DataLengthException detected!." if numberOfEntries - 1 > count: numberOfEntries = count + 1 iid = directories.ImageImportDescriptor.parse(rd, numberOfEntries) iidLength = len(iid) peIsBounded = self.isPeBounded() if magic == consts.PE64: ORDINAL_FLAG = consts.IMAGE_ORDINAL_FLAG64 ADDRESS_MASK = consts.ADDRESS_MASK64 elif magic == consts.PE32: ORDINAL_FLAG = consts.IMAGE_ORDINAL_FLAG ADDRESS_MASK = consts.ADDRESS_MASK32 else: raise InvalidParameterException("magic value %d is not PE64 nor PE32." % magic) for i in range(iidLength -1): if iid[i].originalFirstThunk.value != 0: iltRva = iid[i].originalFirstThunk.value iatRva = iid[i].firstThunk.value if magic == consts.PE64: entry = self.getQwordAtRva(iltRva).value elif magic == consts.PE32: entry = self.getDwordAtRva(iltRva).value while entry != 0: if magic == consts.PE64: iatEntry = directories.ImportAddressTableEntry64() elif magic == consts.PE32: iatEntry = directories.ImportAddressTableEntry() iatEntry.originalFirstThunk.value = entry if iatEntry.originalFirstThunk.value & ORDINAL_FLAG: iatEntry.hint.value = None iatEntry.name.value = iatEntry.originalFirstThunk.value & ADDRESS_MASK else: iatEntry.hint.value = self.getWordAtRva(iatEntry.originalFirstThunk.value).value iatEntry.name.value = self.readStringAtRva(iatEntry.originalFirstThunk.value + 2).value if magic == consts.PE64: iatEntry.firstThunk.value = self.getQwordAtRva(iatRva).value iltRva += 8 iatRva += 8 entry = self.getQwordAtRva(iltRva).value elif magic == consts.PE32: iatEntry.firstThunk.value = self.getDwordAtRva(iatRva).value iltRva += 4 iatRva += 4 entry = self.getDwordAtRva(iltRva).value iid[i].iat.append(iatEntry) else: iatRva = iid[i].firstThunk.value if magic == consts.PE64: entry = self.getQwordAtRva(iatRva).value elif magic == consts.PE32: entry = self.getDwordAtRva(iatRva).value while entry != 0: if magic == consts.PE64: iatEntry = directories.ImportAddressTableEntry64() elif magic == consts.PE32: iatEntry = directories.ImportAddressTableEntry() iatEntry.firstThunk.value = entry iatEntry.originalFirstThunk.value = 0 if not peIsBounded: ft = iatEntry.firstThunk.value if ft & ORDINAL_FLAG: iatEntry.hint.value = None iatEntry.name.value = ft & ADDRESS_MASK else: iatEntry.hint.value = self.getWordAtRva(ft).value iatEntry.name.value = self.readStringAtRva(ft + 2).value else: iatEntry.hint.value = None iatEntry.name.value = None if magic == consts.PE64: iatRva += 8 entry = self.getQwordAtRva(iatRva).value elif magic == consts.PE32: iatRva += 4 entry = self.getDwordAtRva(iatRva).value iid[i].iat.append(iatEntry) iid[i].metaData.moduleName.value = self.readStringAtRva(iid[i].name.value).value iid[i].metaData.numberOfImports.value = len(iid[i].iat) return iid
python
def _parseImportDirectory(self, rva, size, magic = consts.PE32): """ Parses the C{IMAGE_IMPORT_DIRECTORY} directory. @type rva: int @param rva: The RVA where the C{IMAGE_IMPORT_DIRECTORY} directory starts. @type size: int @param size: The size of the C{IMAGE_IMPORT_DIRECTORY} directory. @type magic: int @param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}. @rtype: L{ImageImportDescriptor} @return: A new L{ImageImportDescriptor} object. @raise InvalidParameterException: If wrong magic was specified. """ #print "RVA: %x - Size: %x" % (rva, size) importsDirData = self.getDataAtRva(rva, size) #print "Length importsDirData: %d" % len(importsDirData) numberOfEntries = size / consts.SIZEOF_IMAGE_IMPORT_ENTRY32 rd = utils.ReadData(importsDirData) # In .NET binaries, the size of the data directory corresponding to the import table # is greater than the number of bytes in the file. Thats why we check for the last group of 5 null bytes # that indicates the end of the IMAGE_IMPORT_DESCRIPTOR array. rdAux = utils.ReadData(importsDirData) count = 0 entry = rdAux.read(consts.SIZEOF_IMAGE_IMPORT_ENTRY32) while rdAux.offset < len(rdAux.data) and not utils.allZero(entry): try: entry = rdAux.read(consts.SIZEOF_IMAGE_IMPORT_ENTRY32) count += 1 except excep.DataLengthException: if self._verbose: print "[!] Warning: DataLengthException detected!." if numberOfEntries - 1 > count: numberOfEntries = count + 1 iid = directories.ImageImportDescriptor.parse(rd, numberOfEntries) iidLength = len(iid) peIsBounded = self.isPeBounded() if magic == consts.PE64: ORDINAL_FLAG = consts.IMAGE_ORDINAL_FLAG64 ADDRESS_MASK = consts.ADDRESS_MASK64 elif magic == consts.PE32: ORDINAL_FLAG = consts.IMAGE_ORDINAL_FLAG ADDRESS_MASK = consts.ADDRESS_MASK32 else: raise InvalidParameterException("magic value %d is not PE64 nor PE32." % magic) for i in range(iidLength -1): if iid[i].originalFirstThunk.value != 0: iltRva = iid[i].originalFirstThunk.value iatRva = iid[i].firstThunk.value if magic == consts.PE64: entry = self.getQwordAtRva(iltRva).value elif magic == consts.PE32: entry = self.getDwordAtRva(iltRva).value while entry != 0: if magic == consts.PE64: iatEntry = directories.ImportAddressTableEntry64() elif magic == consts.PE32: iatEntry = directories.ImportAddressTableEntry() iatEntry.originalFirstThunk.value = entry if iatEntry.originalFirstThunk.value & ORDINAL_FLAG: iatEntry.hint.value = None iatEntry.name.value = iatEntry.originalFirstThunk.value & ADDRESS_MASK else: iatEntry.hint.value = self.getWordAtRva(iatEntry.originalFirstThunk.value).value iatEntry.name.value = self.readStringAtRva(iatEntry.originalFirstThunk.value + 2).value if magic == consts.PE64: iatEntry.firstThunk.value = self.getQwordAtRva(iatRva).value iltRva += 8 iatRva += 8 entry = self.getQwordAtRva(iltRva).value elif magic == consts.PE32: iatEntry.firstThunk.value = self.getDwordAtRva(iatRva).value iltRva += 4 iatRva += 4 entry = self.getDwordAtRva(iltRva).value iid[i].iat.append(iatEntry) else: iatRva = iid[i].firstThunk.value if magic == consts.PE64: entry = self.getQwordAtRva(iatRva).value elif magic == consts.PE32: entry = self.getDwordAtRva(iatRva).value while entry != 0: if magic == consts.PE64: iatEntry = directories.ImportAddressTableEntry64() elif magic == consts.PE32: iatEntry = directories.ImportAddressTableEntry() iatEntry.firstThunk.value = entry iatEntry.originalFirstThunk.value = 0 if not peIsBounded: ft = iatEntry.firstThunk.value if ft & ORDINAL_FLAG: iatEntry.hint.value = None iatEntry.name.value = ft & ADDRESS_MASK else: iatEntry.hint.value = self.getWordAtRva(ft).value iatEntry.name.value = self.readStringAtRva(ft + 2).value else: iatEntry.hint.value = None iatEntry.name.value = None if magic == consts.PE64: iatRva += 8 entry = self.getQwordAtRva(iatRva).value elif magic == consts.PE32: iatRva += 4 entry = self.getDwordAtRva(iatRva).value iid[i].iat.append(iatEntry) iid[i].metaData.moduleName.value = self.readStringAtRva(iid[i].name.value).value iid[i].metaData.numberOfImports.value = len(iid[i].iat) return iid
[ "def", "_parseImportDirectory", "(", "self", ",", "rva", ",", "size", ",", "magic", "=", "consts", ".", "PE32", ")", ":", "#print \"RVA: %x - Size: %x\" % (rva, size) ", "importsDirData", "=", "self", ".", "getDataAtRva", "(", "rva", ",", "size", ")", "#print \"Length importsDirData: %d\" % len(importsDirData)", "numberOfEntries", "=", "size", "/", "consts", ".", "SIZEOF_IMAGE_IMPORT_ENTRY32", "rd", "=", "utils", ".", "ReadData", "(", "importsDirData", ")", "# In .NET binaries, the size of the data directory corresponding to the import table", "# is greater than the number of bytes in the file. Thats why we check for the last group of 5 null bytes", "# that indicates the end of the IMAGE_IMPORT_DESCRIPTOR array.", "rdAux", "=", "utils", ".", "ReadData", "(", "importsDirData", ")", "count", "=", "0", "entry", "=", "rdAux", ".", "read", "(", "consts", ".", "SIZEOF_IMAGE_IMPORT_ENTRY32", ")", "while", "rdAux", ".", "offset", "<", "len", "(", "rdAux", ".", "data", ")", "and", "not", "utils", ".", "allZero", "(", "entry", ")", ":", "try", ":", "entry", "=", "rdAux", ".", "read", "(", "consts", ".", "SIZEOF_IMAGE_IMPORT_ENTRY32", ")", "count", "+=", "1", "except", "excep", ".", "DataLengthException", ":", "if", "self", ".", "_verbose", ":", "print", "\"[!] Warning: DataLengthException detected!.\"", "if", "numberOfEntries", "-", "1", ">", "count", ":", "numberOfEntries", "=", "count", "+", "1", "iid", "=", "directories", ".", "ImageImportDescriptor", ".", "parse", "(", "rd", ",", "numberOfEntries", ")", "iidLength", "=", "len", "(", "iid", ")", "peIsBounded", "=", "self", ".", "isPeBounded", "(", ")", "if", "magic", "==", "consts", ".", "PE64", ":", "ORDINAL_FLAG", "=", "consts", ".", "IMAGE_ORDINAL_FLAG64", "ADDRESS_MASK", "=", "consts", ".", "ADDRESS_MASK64", "elif", "magic", "==", "consts", ".", "PE32", ":", "ORDINAL_FLAG", "=", "consts", ".", "IMAGE_ORDINAL_FLAG", "ADDRESS_MASK", "=", "consts", ".", "ADDRESS_MASK32", "else", ":", "raise", "InvalidParameterException", "(", "\"magic value %d is not PE64 nor PE32.\"", "%", "magic", ")", "for", "i", "in", "range", "(", "iidLength", "-", "1", ")", ":", "if", "iid", "[", "i", "]", ".", "originalFirstThunk", ".", "value", "!=", "0", ":", "iltRva", "=", "iid", "[", "i", "]", ".", "originalFirstThunk", ".", "value", "iatRva", "=", "iid", "[", "i", "]", ".", "firstThunk", ".", "value", "if", "magic", "==", "consts", ".", "PE64", ":", "entry", "=", "self", ".", "getQwordAtRva", "(", "iltRva", ")", ".", "value", "elif", "magic", "==", "consts", ".", "PE32", ":", "entry", "=", "self", ".", "getDwordAtRva", "(", "iltRva", ")", ".", "value", "while", "entry", "!=", "0", ":", "if", "magic", "==", "consts", ".", "PE64", ":", "iatEntry", "=", "directories", ".", "ImportAddressTableEntry64", "(", ")", "elif", "magic", "==", "consts", ".", "PE32", ":", "iatEntry", "=", "directories", ".", "ImportAddressTableEntry", "(", ")", "iatEntry", ".", "originalFirstThunk", ".", "value", "=", "entry", "if", "iatEntry", ".", "originalFirstThunk", ".", "value", "&", "ORDINAL_FLAG", ":", "iatEntry", ".", "hint", ".", "value", "=", "None", "iatEntry", ".", "name", ".", "value", "=", "iatEntry", ".", "originalFirstThunk", ".", "value", "&", "ADDRESS_MASK", "else", ":", "iatEntry", ".", "hint", ".", "value", "=", "self", ".", "getWordAtRva", "(", "iatEntry", ".", "originalFirstThunk", ".", "value", ")", ".", "value", "iatEntry", ".", "name", ".", "value", "=", "self", ".", "readStringAtRva", "(", "iatEntry", ".", "originalFirstThunk", ".", "value", "+", "2", ")", ".", "value", "if", "magic", "==", "consts", ".", "PE64", ":", "iatEntry", ".", "firstThunk", ".", "value", "=", "self", ".", "getQwordAtRva", "(", "iatRva", ")", ".", "value", "iltRva", "+=", "8", "iatRva", "+=", "8", "entry", "=", "self", ".", "getQwordAtRva", "(", "iltRva", ")", ".", "value", "elif", "magic", "==", "consts", ".", "PE32", ":", "iatEntry", ".", "firstThunk", ".", "value", "=", "self", ".", "getDwordAtRva", "(", "iatRva", ")", ".", "value", "iltRva", "+=", "4", "iatRva", "+=", "4", "entry", "=", "self", ".", "getDwordAtRva", "(", "iltRva", ")", ".", "value", "iid", "[", "i", "]", ".", "iat", ".", "append", "(", "iatEntry", ")", "else", ":", "iatRva", "=", "iid", "[", "i", "]", ".", "firstThunk", ".", "value", "if", "magic", "==", "consts", ".", "PE64", ":", "entry", "=", "self", ".", "getQwordAtRva", "(", "iatRva", ")", ".", "value", "elif", "magic", "==", "consts", ".", "PE32", ":", "entry", "=", "self", ".", "getDwordAtRva", "(", "iatRva", ")", ".", "value", "while", "entry", "!=", "0", ":", "if", "magic", "==", "consts", ".", "PE64", ":", "iatEntry", "=", "directories", ".", "ImportAddressTableEntry64", "(", ")", "elif", "magic", "==", "consts", ".", "PE32", ":", "iatEntry", "=", "directories", ".", "ImportAddressTableEntry", "(", ")", "iatEntry", ".", "firstThunk", ".", "value", "=", "entry", "iatEntry", ".", "originalFirstThunk", ".", "value", "=", "0", "if", "not", "peIsBounded", ":", "ft", "=", "iatEntry", ".", "firstThunk", ".", "value", "if", "ft", "&", "ORDINAL_FLAG", ":", "iatEntry", ".", "hint", ".", "value", "=", "None", "iatEntry", ".", "name", ".", "value", "=", "ft", "&", "ADDRESS_MASK", "else", ":", "iatEntry", ".", "hint", ".", "value", "=", "self", ".", "getWordAtRva", "(", "ft", ")", ".", "value", "iatEntry", ".", "name", ".", "value", "=", "self", ".", "readStringAtRva", "(", "ft", "+", "2", ")", ".", "value", "else", ":", "iatEntry", ".", "hint", ".", "value", "=", "None", "iatEntry", ".", "name", ".", "value", "=", "None", "if", "magic", "==", "consts", ".", "PE64", ":", "iatRva", "+=", "8", "entry", "=", "self", ".", "getQwordAtRva", "(", "iatRva", ")", ".", "value", "elif", "magic", "==", "consts", ".", "PE32", ":", "iatRva", "+=", "4", "entry", "=", "self", ".", "getDwordAtRva", "(", "iatRva", ")", ".", "value", "iid", "[", "i", "]", ".", "iat", ".", "append", "(", "iatEntry", ")", "iid", "[", "i", "]", ".", "metaData", ".", "moduleName", ".", "value", "=", "self", ".", "readStringAtRva", "(", "iid", "[", "i", "]", ".", "name", ".", "value", ")", ".", "value", "iid", "[", "i", "]", ".", "metaData", ".", "numberOfImports", ".", "value", "=", "len", "(", "iid", "[", "i", "]", ".", "iat", ")", "return", "iid" ]
Parses the C{IMAGE_IMPORT_DIRECTORY} directory. @type rva: int @param rva: The RVA where the C{IMAGE_IMPORT_DIRECTORY} directory starts. @type size: int @param size: The size of the C{IMAGE_IMPORT_DIRECTORY} directory. @type magic: int @param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}. @rtype: L{ImageImportDescriptor} @return: A new L{ImageImportDescriptor} object. @raise InvalidParameterException: If wrong magic was specified.
[ "Parses", "the", "C", "{", "IMAGE_IMPORT_DIRECTORY", "}", "directory", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L1226-L1362
crackinglandia/pype32
pype32/pype32.py
PE._parseNetDirectory
def _parseNetDirectory(self, rva, size, magic = consts.PE32): """ Parses the NET directory. @see: U{http://www.ntcore.com/files/dotnetformat.htm} @type rva: int @param rva: The RVA where the NET directory starts. @type size: int @param size: The size of the NET directory. @type magic: int @param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}. @rtype: L{NETDirectory} @return: A new L{NETDirectory} object. """ if not rva or not size: return None # create a NETDirectory class to hold the data netDirectoryClass = directories.NETDirectory() # parse the .NET Directory netDir = directories.NetDirectory.parse(utils.ReadData(self.getDataAtRva(rva, size))) netDirectoryClass.directory = netDir # get the MetaData RVA and Size mdhRva = netDir.metaData.rva.value mdhSize = netDir.metaData.size.value # read all the MetaData rd = utils.ReadData(self.getDataAtRva(mdhRva, mdhSize)) # parse the MetaData headers netDirectoryClass.netMetaDataHeader = directories.NetMetaDataHeader.parse(rd) # parse the NET metadata streams numberOfStreams = netDirectoryClass.netMetaDataHeader.numberOfStreams.value netDirectoryClass.netMetaDataStreams = directories.NetMetaDataStreams.parse(rd, numberOfStreams) for i in range(numberOfStreams): stream = netDirectoryClass.netMetaDataStreams[i] name = stream.name.value rd.setOffset(stream.offset.value) rd2 = utils.ReadData(rd.read(stream.size.value)) stream.info = [] if name == "#~" or i == 0: stream.info = rd2 elif name == "#Strings" or i == 1: while len(rd2) > 0: offset = rd2.tell() stream.info.append({ offset: rd2.readDotNetString() }) elif name == "#US" or i == 2: while len(rd2) > 0: offset = rd2.tell() stream.info.append({ offset: rd2.readDotNetUnicodeString() }) elif name == "#GUID" or i == 3: while len(rd2) > 0: offset = rd2.tell() stream.info.append({ offset: rd2.readDotNetGuid() }) elif name == "#Blob" or i == 4: while len(rd2) > 0: offset = rd2.tell() stream.info.append({ offset: rd2.readDotNetBlob() }) for i in range(numberOfStreams): stream = netDirectoryClass.netMetaDataStreams[i] name = stream.name.value if name == "#~" or i == 0: stream.info = directories.NetMetaDataTables.parse(stream.info, netDirectoryClass.netMetaDataStreams) # parse .NET resources # get the Resources RVA and Size resRva = netDir.resources.rva.value resSize = netDir.resources.size.value # read all the MetaData rd = utils.ReadData(self.getDataAtRva(resRva, resSize)) resources = [] for i in netDirectoryClass.netMetaDataStreams[0].info.tables["ManifestResource"]: offset = i["offset"] rd.setOffset(offset) size = rd.readDword() data = rd.read(size) if data[:4] == "\xce\xca\xef\xbe": data = directories.NetResources.parse(utils.ReadData(data)) resources.append({ "name": i["name"], "offset": offset + 4, "size": size, "data": data }) netDirectoryClass.directory.resources.info = resources return netDirectoryClass
python
def _parseNetDirectory(self, rva, size, magic = consts.PE32): """ Parses the NET directory. @see: U{http://www.ntcore.com/files/dotnetformat.htm} @type rva: int @param rva: The RVA where the NET directory starts. @type size: int @param size: The size of the NET directory. @type magic: int @param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}. @rtype: L{NETDirectory} @return: A new L{NETDirectory} object. """ if not rva or not size: return None # create a NETDirectory class to hold the data netDirectoryClass = directories.NETDirectory() # parse the .NET Directory netDir = directories.NetDirectory.parse(utils.ReadData(self.getDataAtRva(rva, size))) netDirectoryClass.directory = netDir # get the MetaData RVA and Size mdhRva = netDir.metaData.rva.value mdhSize = netDir.metaData.size.value # read all the MetaData rd = utils.ReadData(self.getDataAtRva(mdhRva, mdhSize)) # parse the MetaData headers netDirectoryClass.netMetaDataHeader = directories.NetMetaDataHeader.parse(rd) # parse the NET metadata streams numberOfStreams = netDirectoryClass.netMetaDataHeader.numberOfStreams.value netDirectoryClass.netMetaDataStreams = directories.NetMetaDataStreams.parse(rd, numberOfStreams) for i in range(numberOfStreams): stream = netDirectoryClass.netMetaDataStreams[i] name = stream.name.value rd.setOffset(stream.offset.value) rd2 = utils.ReadData(rd.read(stream.size.value)) stream.info = [] if name == "#~" or i == 0: stream.info = rd2 elif name == "#Strings" or i == 1: while len(rd2) > 0: offset = rd2.tell() stream.info.append({ offset: rd2.readDotNetString() }) elif name == "#US" or i == 2: while len(rd2) > 0: offset = rd2.tell() stream.info.append({ offset: rd2.readDotNetUnicodeString() }) elif name == "#GUID" or i == 3: while len(rd2) > 0: offset = rd2.tell() stream.info.append({ offset: rd2.readDotNetGuid() }) elif name == "#Blob" or i == 4: while len(rd2) > 0: offset = rd2.tell() stream.info.append({ offset: rd2.readDotNetBlob() }) for i in range(numberOfStreams): stream = netDirectoryClass.netMetaDataStreams[i] name = stream.name.value if name == "#~" or i == 0: stream.info = directories.NetMetaDataTables.parse(stream.info, netDirectoryClass.netMetaDataStreams) # parse .NET resources # get the Resources RVA and Size resRva = netDir.resources.rva.value resSize = netDir.resources.size.value # read all the MetaData rd = utils.ReadData(self.getDataAtRva(resRva, resSize)) resources = [] for i in netDirectoryClass.netMetaDataStreams[0].info.tables["ManifestResource"]: offset = i["offset"] rd.setOffset(offset) size = rd.readDword() data = rd.read(size) if data[:4] == "\xce\xca\xef\xbe": data = directories.NetResources.parse(utils.ReadData(data)) resources.append({ "name": i["name"], "offset": offset + 4, "size": size, "data": data }) netDirectoryClass.directory.resources.info = resources return netDirectoryClass
[ "def", "_parseNetDirectory", "(", "self", ",", "rva", ",", "size", ",", "magic", "=", "consts", ".", "PE32", ")", ":", "if", "not", "rva", "or", "not", "size", ":", "return", "None", "# create a NETDirectory class to hold the data", "netDirectoryClass", "=", "directories", ".", "NETDirectory", "(", ")", "# parse the .NET Directory", "netDir", "=", "directories", ".", "NetDirectory", ".", "parse", "(", "utils", ".", "ReadData", "(", "self", ".", "getDataAtRva", "(", "rva", ",", "size", ")", ")", ")", "netDirectoryClass", ".", "directory", "=", "netDir", "# get the MetaData RVA and Size", "mdhRva", "=", "netDir", ".", "metaData", ".", "rva", ".", "value", "mdhSize", "=", "netDir", ".", "metaData", ".", "size", ".", "value", "# read all the MetaData", "rd", "=", "utils", ".", "ReadData", "(", "self", ".", "getDataAtRva", "(", "mdhRva", ",", "mdhSize", ")", ")", "# parse the MetaData headers", "netDirectoryClass", ".", "netMetaDataHeader", "=", "directories", ".", "NetMetaDataHeader", ".", "parse", "(", "rd", ")", "# parse the NET metadata streams", "numberOfStreams", "=", "netDirectoryClass", ".", "netMetaDataHeader", ".", "numberOfStreams", ".", "value", "netDirectoryClass", ".", "netMetaDataStreams", "=", "directories", ".", "NetMetaDataStreams", ".", "parse", "(", "rd", ",", "numberOfStreams", ")", "for", "i", "in", "range", "(", "numberOfStreams", ")", ":", "stream", "=", "netDirectoryClass", ".", "netMetaDataStreams", "[", "i", "]", "name", "=", "stream", ".", "name", ".", "value", "rd", ".", "setOffset", "(", "stream", ".", "offset", ".", "value", ")", "rd2", "=", "utils", ".", "ReadData", "(", "rd", ".", "read", "(", "stream", ".", "size", ".", "value", ")", ")", "stream", ".", "info", "=", "[", "]", "if", "name", "==", "\"#~\"", "or", "i", "==", "0", ":", "stream", ".", "info", "=", "rd2", "elif", "name", "==", "\"#Strings\"", "or", "i", "==", "1", ":", "while", "len", "(", "rd2", ")", ">", "0", ":", "offset", "=", "rd2", ".", "tell", "(", ")", "stream", ".", "info", ".", "append", "(", "{", "offset", ":", "rd2", ".", "readDotNetString", "(", ")", "}", ")", "elif", "name", "==", "\"#US\"", "or", "i", "==", "2", ":", "while", "len", "(", "rd2", ")", ">", "0", ":", "offset", "=", "rd2", ".", "tell", "(", ")", "stream", ".", "info", ".", "append", "(", "{", "offset", ":", "rd2", ".", "readDotNetUnicodeString", "(", ")", "}", ")", "elif", "name", "==", "\"#GUID\"", "or", "i", "==", "3", ":", "while", "len", "(", "rd2", ")", ">", "0", ":", "offset", "=", "rd2", ".", "tell", "(", ")", "stream", ".", "info", ".", "append", "(", "{", "offset", ":", "rd2", ".", "readDotNetGuid", "(", ")", "}", ")", "elif", "name", "==", "\"#Blob\"", "or", "i", "==", "4", ":", "while", "len", "(", "rd2", ")", ">", "0", ":", "offset", "=", "rd2", ".", "tell", "(", ")", "stream", ".", "info", ".", "append", "(", "{", "offset", ":", "rd2", ".", "readDotNetBlob", "(", ")", "}", ")", "for", "i", "in", "range", "(", "numberOfStreams", ")", ":", "stream", "=", "netDirectoryClass", ".", "netMetaDataStreams", "[", "i", "]", "name", "=", "stream", ".", "name", ".", "value", "if", "name", "==", "\"#~\"", "or", "i", "==", "0", ":", "stream", ".", "info", "=", "directories", ".", "NetMetaDataTables", ".", "parse", "(", "stream", ".", "info", ",", "netDirectoryClass", ".", "netMetaDataStreams", ")", "# parse .NET resources", "# get the Resources RVA and Size", "resRva", "=", "netDir", ".", "resources", ".", "rva", ".", "value", "resSize", "=", "netDir", ".", "resources", ".", "size", ".", "value", "# read all the MetaData", "rd", "=", "utils", ".", "ReadData", "(", "self", ".", "getDataAtRva", "(", "resRva", ",", "resSize", ")", ")", "resources", "=", "[", "]", "for", "i", "in", "netDirectoryClass", ".", "netMetaDataStreams", "[", "0", "]", ".", "info", ".", "tables", "[", "\"ManifestResource\"", "]", ":", "offset", "=", "i", "[", "\"offset\"", "]", "rd", ".", "setOffset", "(", "offset", ")", "size", "=", "rd", ".", "readDword", "(", ")", "data", "=", "rd", ".", "read", "(", "size", ")", "if", "data", "[", ":", "4", "]", "==", "\"\\xce\\xca\\xef\\xbe\"", ":", "data", "=", "directories", ".", "NetResources", ".", "parse", "(", "utils", ".", "ReadData", "(", "data", ")", ")", "resources", ".", "append", "(", "{", "\"name\"", ":", "i", "[", "\"name\"", "]", ",", "\"offset\"", ":", "offset", "+", "4", ",", "\"size\"", ":", "size", ",", "\"data\"", ":", "data", "}", ")", "netDirectoryClass", ".", "directory", ".", "resources", ".", "info", "=", "resources", "return", "netDirectoryClass" ]
Parses the NET directory. @see: U{http://www.ntcore.com/files/dotnetformat.htm} @type rva: int @param rva: The RVA where the NET directory starts. @type size: int @param size: The size of the NET directory. @type magic: int @param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}. @rtype: L{NETDirectory} @return: A new L{NETDirectory} object.
[ "Parses", "the", "NET", "directory", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L1364-L1458
crackinglandia/pype32
pype32/pype32.py
DosHeader.parse
def parse(readDataInstance): """ Returns a new L{DosHeader} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{DosHeader} object. @rtype: L{DosHeader} @return: A new L{DosHeader} object. """ dosHdr = DosHeader() dosHdr.e_magic.value = readDataInstance.readWord() dosHdr.e_cblp.value = readDataInstance.readWord() dosHdr.e_cp.value = readDataInstance.readWord() dosHdr.e_crlc.value = readDataInstance.readWord() dosHdr.e_cparhdr.value = readDataInstance.readWord() dosHdr.e_minalloc.value = readDataInstance.readWord() dosHdr.e_maxalloc.value = readDataInstance.readWord() dosHdr.e_ss.value = readDataInstance.readWord() dosHdr.e_sp.value = readDataInstance.readWord() dosHdr.e_csum.value = readDataInstance.readWord() dosHdr.e_ip.value = readDataInstance.readWord() dosHdr.e_cs.value = readDataInstance.readWord() dosHdr.e_lfarlc.value = readDataInstance.readWord() dosHdr.e_ovno.value = readDataInstance.readWord() dosHdr.e_res = datatypes.Array(datatypes.TYPE_WORD) for i in range(4): dosHdr.e_res.append(datatypes.WORD(readDataInstance.readWord())) dosHdr.e_oemid.value = readDataInstance.readWord() dosHdr.e_oeminfo.value = readDataInstance.readWord() dosHdr.e_res2 = datatypes.Array(datatypes.TYPE_WORD) for i in range (10): dosHdr.e_res2.append(datatypes.WORD(readDataInstance.readWord())) dosHdr.e_lfanew.value = readDataInstance.readDword() return dosHdr
python
def parse(readDataInstance): """ Returns a new L{DosHeader} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{DosHeader} object. @rtype: L{DosHeader} @return: A new L{DosHeader} object. """ dosHdr = DosHeader() dosHdr.e_magic.value = readDataInstance.readWord() dosHdr.e_cblp.value = readDataInstance.readWord() dosHdr.e_cp.value = readDataInstance.readWord() dosHdr.e_crlc.value = readDataInstance.readWord() dosHdr.e_cparhdr.value = readDataInstance.readWord() dosHdr.e_minalloc.value = readDataInstance.readWord() dosHdr.e_maxalloc.value = readDataInstance.readWord() dosHdr.e_ss.value = readDataInstance.readWord() dosHdr.e_sp.value = readDataInstance.readWord() dosHdr.e_csum.value = readDataInstance.readWord() dosHdr.e_ip.value = readDataInstance.readWord() dosHdr.e_cs.value = readDataInstance.readWord() dosHdr.e_lfarlc.value = readDataInstance.readWord() dosHdr.e_ovno.value = readDataInstance.readWord() dosHdr.e_res = datatypes.Array(datatypes.TYPE_WORD) for i in range(4): dosHdr.e_res.append(datatypes.WORD(readDataInstance.readWord())) dosHdr.e_oemid.value = readDataInstance.readWord() dosHdr.e_oeminfo.value = readDataInstance.readWord() dosHdr.e_res2 = datatypes.Array(datatypes.TYPE_WORD) for i in range (10): dosHdr.e_res2.append(datatypes.WORD(readDataInstance.readWord())) dosHdr.e_lfanew.value = readDataInstance.readDword() return dosHdr
[ "def", "parse", "(", "readDataInstance", ")", ":", "dosHdr", "=", "DosHeader", "(", ")", "dosHdr", ".", "e_magic", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "dosHdr", ".", "e_cblp", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "dosHdr", ".", "e_cp", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "dosHdr", ".", "e_crlc", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "dosHdr", ".", "e_cparhdr", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "dosHdr", ".", "e_minalloc", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "dosHdr", ".", "e_maxalloc", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "dosHdr", ".", "e_ss", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "dosHdr", ".", "e_sp", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "dosHdr", ".", "e_csum", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "dosHdr", ".", "e_ip", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "dosHdr", ".", "e_cs", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "dosHdr", ".", "e_lfarlc", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "dosHdr", ".", "e_ovno", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "dosHdr", ".", "e_res", "=", "datatypes", ".", "Array", "(", "datatypes", ".", "TYPE_WORD", ")", "for", "i", "in", "range", "(", "4", ")", ":", "dosHdr", ".", "e_res", ".", "append", "(", "datatypes", ".", "WORD", "(", "readDataInstance", ".", "readWord", "(", ")", ")", ")", "dosHdr", ".", "e_oemid", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "dosHdr", ".", "e_oeminfo", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "dosHdr", ".", "e_res2", "=", "datatypes", ".", "Array", "(", "datatypes", ".", "TYPE_WORD", ")", "for", "i", "in", "range", "(", "10", ")", ":", "dosHdr", ".", "e_res2", ".", "append", "(", "datatypes", ".", "WORD", "(", "readDataInstance", ".", "readWord", "(", ")", ")", ")", "dosHdr", ".", "e_lfanew", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "return", "dosHdr" ]
Returns a new L{DosHeader} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{DosHeader} object. @rtype: L{DosHeader} @return: A new L{DosHeader} object.
[ "Returns", "a", "new", "L", "{", "DosHeader", "}", "object", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L1617-L1656
crackinglandia/pype32
pype32/pype32.py
NtHeaders.parse
def parse(readDataInstance): """ Returns a new L{NtHeaders} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{NtHeaders} object. @rtype: L{NtHeaders} @return: A new L{NtHeaders} object. """ nt = NtHeaders() nt.signature.value = readDataInstance.readDword() nt.fileHeader = FileHeader.parse(readDataInstance) nt.optionalHeader = OptionalHeader.parse(readDataInstance) return nt
python
def parse(readDataInstance): """ Returns a new L{NtHeaders} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{NtHeaders} object. @rtype: L{NtHeaders} @return: A new L{NtHeaders} object. """ nt = NtHeaders() nt.signature.value = readDataInstance.readDword() nt.fileHeader = FileHeader.parse(readDataInstance) nt.optionalHeader = OptionalHeader.parse(readDataInstance) return nt
[ "def", "parse", "(", "readDataInstance", ")", ":", "nt", "=", "NtHeaders", "(", ")", "nt", ".", "signature", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "nt", ".", "fileHeader", "=", "FileHeader", ".", "parse", "(", "readDataInstance", ")", "nt", ".", "optionalHeader", "=", "OptionalHeader", ".", "parse", "(", "readDataInstance", ")", "return", "nt" ]
Returns a new L{NtHeaders} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{NtHeaders} object. @rtype: L{NtHeaders} @return: A new L{NtHeaders} object.
[ "Returns", "a", "new", "L", "{", "NtHeaders", "}", "object", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L1682-L1696
crackinglandia/pype32
pype32/pype32.py
FileHeader.parse
def parse(readDataInstance): """ Returns a new L{FileHeader} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{FileHeader} object. @rtype: L{FileHeader} @return: A new L{ReadData} object. """ fh = FileHeader() fh.machine.value = readDataInstance.readWord() fh.numberOfSections.value = readDataInstance.readWord() fh.timeDateStamp.value = readDataInstance.readDword() fh.pointerToSymbolTable.value = readDataInstance.readDword() fh.numberOfSymbols.value = readDataInstance.readDword() fh.sizeOfOptionalHeader.value = readDataInstance.readWord() fh.characteristics.value = readDataInstance.readWord() return fh
python
def parse(readDataInstance): """ Returns a new L{FileHeader} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{FileHeader} object. @rtype: L{FileHeader} @return: A new L{ReadData} object. """ fh = FileHeader() fh.machine.value = readDataInstance.readWord() fh.numberOfSections.value = readDataInstance.readWord() fh.timeDateStamp.value = readDataInstance.readDword() fh.pointerToSymbolTable.value = readDataInstance.readDword() fh.numberOfSymbols.value = readDataInstance.readDword() fh.sizeOfOptionalHeader.value = readDataInstance.readWord() fh.characteristics.value = readDataInstance.readWord() return fh
[ "def", "parse", "(", "readDataInstance", ")", ":", "fh", "=", "FileHeader", "(", ")", "fh", ".", "machine", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "fh", ".", "numberOfSections", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "fh", ".", "timeDateStamp", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "fh", ".", "pointerToSymbolTable", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "fh", ".", "numberOfSymbols", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "fh", ".", "sizeOfOptionalHeader", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "fh", ".", "characteristics", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "return", "fh" ]
Returns a new L{FileHeader} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{FileHeader} object. @rtype: L{FileHeader} @return: A new L{ReadData} object.
[ "Returns", "a", "new", "L", "{", "FileHeader", "}", "object", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L1726-L1744
crackinglandia/pype32
pype32/pype32.py
OptionalHeader.parse
def parse(readDataInstance): """ Returns a new L{OptionalHeader} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{OptionalHeader} object. @rtype: L{OptionalHeader} @return: A new L{OptionalHeader} object. """ oh = OptionalHeader() oh.magic.value = readDataInstance.readWord() oh.majorLinkerVersion.value = readDataInstance.readByte() oh.minorLinkerVersion.value = readDataInstance.readByte() oh.sizeOfCode.value = readDataInstance.readDword() oh.sizeOfInitializedData.value = readDataInstance.readDword() oh.sizeOfUninitializedData.value = readDataInstance.readDword() oh.addressOfEntryPoint.value = readDataInstance.readDword() oh.baseOfCode.value = readDataInstance.readDword() oh.baseOfData.value = readDataInstance.readDword() oh.imageBase.value = readDataInstance.readDword() oh.sectionAlignment.value = readDataInstance.readDword() oh.fileAlignment.value = readDataInstance.readDword() oh.majorOperatingSystemVersion.value = readDataInstance.readWord() oh.minorOperatingSystemVersion.value = readDataInstance.readWord() oh.majorImageVersion.value = readDataInstance.readWord() oh.minorImageVersion.value = readDataInstance.readWord() oh.majorSubsystemVersion.value = readDataInstance.readWord() oh.minorSubsystemVersion.value = readDataInstance.readWord() oh.win32VersionValue.value = readDataInstance.readDword() oh.sizeOfImage.value = readDataInstance.readDword() oh.sizeOfHeaders.value = readDataInstance.readDword() oh.checksum.value = readDataInstance.readDword() oh.subsystem.value = readDataInstance.readWord() oh.dllCharacteristics.value = readDataInstance.readWord() oh.sizeOfStackReserve.value = readDataInstance.readDword() oh.sizeOfStackCommit.value = readDataInstance.readDword() oh.sizeOfHeapReserve.value = readDataInstance.readDword() oh.sizeOfHeapCommit.value = readDataInstance.readDword() oh.loaderFlags.value = readDataInstance.readDword() oh.numberOfRvaAndSizes.value = readDataInstance.readDword() dirs = readDataInstance.read(consts.IMAGE_NUMBEROF_DIRECTORY_ENTRIES * 8) oh.dataDirectory = datadirs.DataDirectory.parse(utils.ReadData(dirs)) return oh
python
def parse(readDataInstance): """ Returns a new L{OptionalHeader} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{OptionalHeader} object. @rtype: L{OptionalHeader} @return: A new L{OptionalHeader} object. """ oh = OptionalHeader() oh.magic.value = readDataInstance.readWord() oh.majorLinkerVersion.value = readDataInstance.readByte() oh.minorLinkerVersion.value = readDataInstance.readByte() oh.sizeOfCode.value = readDataInstance.readDword() oh.sizeOfInitializedData.value = readDataInstance.readDword() oh.sizeOfUninitializedData.value = readDataInstance.readDword() oh.addressOfEntryPoint.value = readDataInstance.readDword() oh.baseOfCode.value = readDataInstance.readDword() oh.baseOfData.value = readDataInstance.readDword() oh.imageBase.value = readDataInstance.readDword() oh.sectionAlignment.value = readDataInstance.readDword() oh.fileAlignment.value = readDataInstance.readDword() oh.majorOperatingSystemVersion.value = readDataInstance.readWord() oh.minorOperatingSystemVersion.value = readDataInstance.readWord() oh.majorImageVersion.value = readDataInstance.readWord() oh.minorImageVersion.value = readDataInstance.readWord() oh.majorSubsystemVersion.value = readDataInstance.readWord() oh.minorSubsystemVersion.value = readDataInstance.readWord() oh.win32VersionValue.value = readDataInstance.readDword() oh.sizeOfImage.value = readDataInstance.readDword() oh.sizeOfHeaders.value = readDataInstance.readDword() oh.checksum.value = readDataInstance.readDword() oh.subsystem.value = readDataInstance.readWord() oh.dllCharacteristics.value = readDataInstance.readWord() oh.sizeOfStackReserve.value = readDataInstance.readDword() oh.sizeOfStackCommit.value = readDataInstance.readDword() oh.sizeOfHeapReserve.value = readDataInstance.readDword() oh.sizeOfHeapCommit.value = readDataInstance.readDword() oh.loaderFlags.value = readDataInstance.readDword() oh.numberOfRvaAndSizes.value = readDataInstance.readDword() dirs = readDataInstance.read(consts.IMAGE_NUMBEROF_DIRECTORY_ENTRIES * 8) oh.dataDirectory = datadirs.DataDirectory.parse(utils.ReadData(dirs)) return oh
[ "def", "parse", "(", "readDataInstance", ")", ":", "oh", "=", "OptionalHeader", "(", ")", "oh", ".", "magic", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "oh", ".", "majorLinkerVersion", ".", "value", "=", "readDataInstance", ".", "readByte", "(", ")", "oh", ".", "minorLinkerVersion", ".", "value", "=", "readDataInstance", ".", "readByte", "(", ")", "oh", ".", "sizeOfCode", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "oh", ".", "sizeOfInitializedData", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "oh", ".", "sizeOfUninitializedData", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "oh", ".", "addressOfEntryPoint", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "oh", ".", "baseOfCode", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "oh", ".", "baseOfData", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "oh", ".", "imageBase", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "oh", ".", "sectionAlignment", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "oh", ".", "fileAlignment", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "oh", ".", "majorOperatingSystemVersion", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "oh", ".", "minorOperatingSystemVersion", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "oh", ".", "majorImageVersion", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "oh", ".", "minorImageVersion", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "oh", ".", "majorSubsystemVersion", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "oh", ".", "minorSubsystemVersion", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "oh", ".", "win32VersionValue", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "oh", ".", "sizeOfImage", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "oh", ".", "sizeOfHeaders", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "oh", ".", "checksum", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "oh", ".", "subsystem", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "oh", ".", "dllCharacteristics", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "oh", ".", "sizeOfStackReserve", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "oh", ".", "sizeOfStackCommit", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "oh", ".", "sizeOfHeapReserve", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "oh", ".", "sizeOfHeapCommit", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "oh", ".", "loaderFlags", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "oh", ".", "numberOfRvaAndSizes", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "dirs", "=", "readDataInstance", ".", "read", "(", "consts", ".", "IMAGE_NUMBEROF_DIRECTORY_ENTRIES", "*", "8", ")", "oh", ".", "dataDirectory", "=", "datadirs", ".", "DataDirectory", ".", "parse", "(", "utils", ".", "ReadData", "(", "dirs", ")", ")", "return", "oh" ]
Returns a new L{OptionalHeader} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{OptionalHeader} object. @rtype: L{OptionalHeader} @return: A new L{OptionalHeader} object.
[ "Returns", "a", "new", "L", "{", "OptionalHeader", "}", "object", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L1802-L1849
crackinglandia/pype32
pype32/pype32.py
SectionHeader.parse
def parse(readDataInstance): """ Returns a new L{SectionHeader} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{SectionHeader} object. @rtype: L{SectionHeader} @return: A new L{SectionHeader} object. """ sh = SectionHeader() sh.name.value = readDataInstance.read(8) sh.misc.value = readDataInstance.readDword() sh.virtualAddress.value = readDataInstance.readDword() sh.sizeOfRawData.value = readDataInstance.readDword() sh.pointerToRawData.value = readDataInstance.readDword() sh.pointerToRelocations.value = readDataInstance.readDword() sh.pointerToLineNumbers.value = readDataInstance.readDword() sh.numberOfRelocations.value = readDataInstance.readWord() sh.numberOfLinesNumbers.value = readDataInstance.readWord() sh.characteristics.value = readDataInstance.readDword() return sh
python
def parse(readDataInstance): """ Returns a new L{SectionHeader} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{SectionHeader} object. @rtype: L{SectionHeader} @return: A new L{SectionHeader} object. """ sh = SectionHeader() sh.name.value = readDataInstance.read(8) sh.misc.value = readDataInstance.readDword() sh.virtualAddress.value = readDataInstance.readDword() sh.sizeOfRawData.value = readDataInstance.readDword() sh.pointerToRawData.value = readDataInstance.readDword() sh.pointerToRelocations.value = readDataInstance.readDword() sh.pointerToLineNumbers.value = readDataInstance.readDword() sh.numberOfRelocations.value = readDataInstance.readWord() sh.numberOfLinesNumbers.value = readDataInstance.readWord() sh.characteristics.value = readDataInstance.readDword() return sh
[ "def", "parse", "(", "readDataInstance", ")", ":", "sh", "=", "SectionHeader", "(", ")", "sh", ".", "name", ".", "value", "=", "readDataInstance", ".", "read", "(", "8", ")", "sh", ".", "misc", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "sh", ".", "virtualAddress", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "sh", ".", "sizeOfRawData", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "sh", ".", "pointerToRawData", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "sh", ".", "pointerToRelocations", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "sh", ".", "pointerToLineNumbers", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "sh", ".", "numberOfRelocations", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "sh", ".", "numberOfLinesNumbers", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "sh", ".", "characteristics", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "return", "sh" ]
Returns a new L{SectionHeader} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{SectionHeader} object. @rtype: L{SectionHeader} @return: A new L{SectionHeader} object.
[ "Returns", "a", "new", "L", "{", "SectionHeader", "}", "object", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L2017-L2038
crackinglandia/pype32
pype32/pype32.py
SectionHeaders.parse
def parse(readDataInstance, numberOfSectionHeaders): """ Returns a new L{SectionHeaders} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{SectionHeaders} object. @type numberOfSectionHeaders: int @param numberOfSectionHeaders: The number of L{SectionHeader} objects in the L{SectionHeaders} instance. """ sHdrs = SectionHeaders(numberOfSectionHeaders = 0) for i in range(numberOfSectionHeaders): sh = SectionHeader() sh.name.value = readDataInstance.read(8) sh.misc.value = readDataInstance.readDword() sh.virtualAddress.value = readDataInstance.readDword() sh.sizeOfRawData.value = readDataInstance.readDword() sh.pointerToRawData.value = readDataInstance.readDword() sh.pointerToRelocations.value = readDataInstance.readDword() sh.pointerToLineNumbers.value = readDataInstance.readDword() sh.numberOfRelocations.value = readDataInstance.readWord() sh.numberOfLinesNumbers.value = readDataInstance.readWord() sh.characteristics.value = readDataInstance.readDword() sHdrs.append(sh) return sHdrs
python
def parse(readDataInstance, numberOfSectionHeaders): """ Returns a new L{SectionHeaders} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{SectionHeaders} object. @type numberOfSectionHeaders: int @param numberOfSectionHeaders: The number of L{SectionHeader} objects in the L{SectionHeaders} instance. """ sHdrs = SectionHeaders(numberOfSectionHeaders = 0) for i in range(numberOfSectionHeaders): sh = SectionHeader() sh.name.value = readDataInstance.read(8) sh.misc.value = readDataInstance.readDword() sh.virtualAddress.value = readDataInstance.readDword() sh.sizeOfRawData.value = readDataInstance.readDword() sh.pointerToRawData.value = readDataInstance.readDword() sh.pointerToRelocations.value = readDataInstance.readDword() sh.pointerToLineNumbers.value = readDataInstance.readDword() sh.numberOfRelocations.value = readDataInstance.readWord() sh.numberOfLinesNumbers.value = readDataInstance.readWord() sh.characteristics.value = readDataInstance.readDword() sHdrs.append(sh) return sHdrs
[ "def", "parse", "(", "readDataInstance", ",", "numberOfSectionHeaders", ")", ":", "sHdrs", "=", "SectionHeaders", "(", "numberOfSectionHeaders", "=", "0", ")", "for", "i", "in", "range", "(", "numberOfSectionHeaders", ")", ":", "sh", "=", "SectionHeader", "(", ")", "sh", ".", "name", ".", "value", "=", "readDataInstance", ".", "read", "(", "8", ")", "sh", ".", "misc", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "sh", ".", "virtualAddress", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "sh", ".", "sizeOfRawData", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "sh", ".", "pointerToRawData", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "sh", ".", "pointerToRelocations", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "sh", ".", "pointerToLineNumbers", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "sh", ".", "numberOfRelocations", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "sh", ".", "numberOfLinesNumbers", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "sh", ".", "characteristics", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "sHdrs", ".", "append", "(", "sh", ")", "return", "sHdrs" ]
Returns a new L{SectionHeaders} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{SectionHeaders} object. @type numberOfSectionHeaders: int @param numberOfSectionHeaders: The number of L{SectionHeader} objects in the L{SectionHeaders} instance.
[ "Returns", "a", "new", "L", "{", "SectionHeaders", "}", "object", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L2069-L2097
crackinglandia/pype32
pype32/pype32.py
Sections.parse
def parse(readDataInstance, sectionHeadersInstance): """ Returns a new L{Sections} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{Sections} object. @type sectionHeadersInstance: instance @param sectionHeadersInstance: The L{SectionHeaders} instance with the necessary to parse every section data. @rtype: L{Sections} @return: A new L{Sections} object. """ sData = Sections() for sectionHdr in sectionHeadersInstance: if sectionHdr.sizeOfRawData.value > len(readDataInstance.data): print "Warning: SizeOfRawData is larger than file." if sectionHdr.pointerToRawData.value > len(readDataInstance.data): print "Warning: PointerToRawData points beyond the end of the file." if sectionHdr.misc.value > 0x10000000: print "Warning: VirtualSize is extremely large > 256MiB." if sectionHdr.virtualAddress.value > 0x10000000: print "Warning: VirtualAddress is beyond 0x10000000" # skip sections with pointerToRawData == 0. According to PECOFF, it contains uninitialized data if sectionHdr.pointerToRawData.value: sData.append(readDataInstance.read(sectionHdr.sizeOfRawData.value)) return sData
python
def parse(readDataInstance, sectionHeadersInstance): """ Returns a new L{Sections} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{Sections} object. @type sectionHeadersInstance: instance @param sectionHeadersInstance: The L{SectionHeaders} instance with the necessary to parse every section data. @rtype: L{Sections} @return: A new L{Sections} object. """ sData = Sections() for sectionHdr in sectionHeadersInstance: if sectionHdr.sizeOfRawData.value > len(readDataInstance.data): print "Warning: SizeOfRawData is larger than file." if sectionHdr.pointerToRawData.value > len(readDataInstance.data): print "Warning: PointerToRawData points beyond the end of the file." if sectionHdr.misc.value > 0x10000000: print "Warning: VirtualSize is extremely large > 256MiB." if sectionHdr.virtualAddress.value > 0x10000000: print "Warning: VirtualAddress is beyond 0x10000000" # skip sections with pointerToRawData == 0. According to PECOFF, it contains uninitialized data if sectionHdr.pointerToRawData.value: sData.append(readDataInstance.read(sectionHdr.sizeOfRawData.value)) return sData
[ "def", "parse", "(", "readDataInstance", ",", "sectionHeadersInstance", ")", ":", "sData", "=", "Sections", "(", ")", "for", "sectionHdr", "in", "sectionHeadersInstance", ":", "if", "sectionHdr", ".", "sizeOfRawData", ".", "value", ">", "len", "(", "readDataInstance", ".", "data", ")", ":", "print", "\"Warning: SizeOfRawData is larger than file.\"", "if", "sectionHdr", ".", "pointerToRawData", ".", "value", ">", "len", "(", "readDataInstance", ".", "data", ")", ":", "print", "\"Warning: PointerToRawData points beyond the end of the file.\"", "if", "sectionHdr", ".", "misc", ".", "value", ">", "0x10000000", ":", "print", "\"Warning: VirtualSize is extremely large > 256MiB.\"", "if", "sectionHdr", ".", "virtualAddress", ".", "value", ">", "0x10000000", ":", "print", "\"Warning: VirtualAddress is beyond 0x10000000\"", "# skip sections with pointerToRawData == 0. According to PECOFF, it contains uninitialized data", "if", "sectionHdr", ".", "pointerToRawData", ".", "value", ":", "sData", ".", "append", "(", "readDataInstance", ".", "read", "(", "sectionHdr", ".", "sizeOfRawData", ".", "value", ")", ")", "return", "sData" ]
Returns a new L{Sections} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{Sections} object. @type sectionHeadersInstance: instance @param sectionHeadersInstance: The L{SectionHeaders} instance with the necessary to parse every section data. @rtype: L{Sections} @return: A new L{Sections} object.
[ "Returns", "a", "new", "L", "{", "Sections", "}", "object", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L2118-L2151
blockcypher/bcwallet
bcwallet/bcwallet.py
get_addresses_on_both_chains
def get_addresses_on_both_chains(wallet_obj, used=None, zero_balance=None): ''' Get addresses across both subchains based on the filter criteria passed in Returns a list of dicts of the following form: [ {'address': '1abc123...', 'path': 'm/0/9', 'pubkeyhex': '0123456...'}, ..., ] Dicts may also contain WIF and privkeyhex if wallet_obj has private key ''' mpub = wallet_obj.serialize_b58(private=False) wallet_name = get_blockcypher_walletname_from_mpub( mpub=mpub, subchain_indices=[0, 1], ) wallet_addresses = get_wallet_addresses( wallet_name=wallet_name, api_key=BLOCKCYPHER_API_KEY, is_hd_wallet=True, used=used, zero_balance=zero_balance, coin_symbol=coin_symbol_from_mkey(mpub), ) verbose_print('wallet_addresses:') verbose_print(wallet_addresses) if wallet_obj.private_key: master_key = wallet_obj.serialize_b58(private=True) else: master_key = mpub chains_address_paths_cleaned = [] for chain in wallet_addresses['chains']: if chain['chain_addresses']: chain_address_paths = verify_and_fill_address_paths_from_bip32key( address_paths=chain['chain_addresses'], master_key=master_key, network=guess_network_from_mkey(mpub), ) chain_address_paths_cleaned = { 'index': chain['index'], 'chain_addresses': chain_address_paths, } chains_address_paths_cleaned.append(chain_address_paths_cleaned) return chains_address_paths_cleaned
python
def get_addresses_on_both_chains(wallet_obj, used=None, zero_balance=None): ''' Get addresses across both subchains based on the filter criteria passed in Returns a list of dicts of the following form: [ {'address': '1abc123...', 'path': 'm/0/9', 'pubkeyhex': '0123456...'}, ..., ] Dicts may also contain WIF and privkeyhex if wallet_obj has private key ''' mpub = wallet_obj.serialize_b58(private=False) wallet_name = get_blockcypher_walletname_from_mpub( mpub=mpub, subchain_indices=[0, 1], ) wallet_addresses = get_wallet_addresses( wallet_name=wallet_name, api_key=BLOCKCYPHER_API_KEY, is_hd_wallet=True, used=used, zero_balance=zero_balance, coin_symbol=coin_symbol_from_mkey(mpub), ) verbose_print('wallet_addresses:') verbose_print(wallet_addresses) if wallet_obj.private_key: master_key = wallet_obj.serialize_b58(private=True) else: master_key = mpub chains_address_paths_cleaned = [] for chain in wallet_addresses['chains']: if chain['chain_addresses']: chain_address_paths = verify_and_fill_address_paths_from_bip32key( address_paths=chain['chain_addresses'], master_key=master_key, network=guess_network_from_mkey(mpub), ) chain_address_paths_cleaned = { 'index': chain['index'], 'chain_addresses': chain_address_paths, } chains_address_paths_cleaned.append(chain_address_paths_cleaned) return chains_address_paths_cleaned
[ "def", "get_addresses_on_both_chains", "(", "wallet_obj", ",", "used", "=", "None", ",", "zero_balance", "=", "None", ")", ":", "mpub", "=", "wallet_obj", ".", "serialize_b58", "(", "private", "=", "False", ")", "wallet_name", "=", "get_blockcypher_walletname_from_mpub", "(", "mpub", "=", "mpub", ",", "subchain_indices", "=", "[", "0", ",", "1", "]", ",", ")", "wallet_addresses", "=", "get_wallet_addresses", "(", "wallet_name", "=", "wallet_name", ",", "api_key", "=", "BLOCKCYPHER_API_KEY", ",", "is_hd_wallet", "=", "True", ",", "used", "=", "used", ",", "zero_balance", "=", "zero_balance", ",", "coin_symbol", "=", "coin_symbol_from_mkey", "(", "mpub", ")", ",", ")", "verbose_print", "(", "'wallet_addresses:'", ")", "verbose_print", "(", "wallet_addresses", ")", "if", "wallet_obj", ".", "private_key", ":", "master_key", "=", "wallet_obj", ".", "serialize_b58", "(", "private", "=", "True", ")", "else", ":", "master_key", "=", "mpub", "chains_address_paths_cleaned", "=", "[", "]", "for", "chain", "in", "wallet_addresses", "[", "'chains'", "]", ":", "if", "chain", "[", "'chain_addresses'", "]", ":", "chain_address_paths", "=", "verify_and_fill_address_paths_from_bip32key", "(", "address_paths", "=", "chain", "[", "'chain_addresses'", "]", ",", "master_key", "=", "master_key", ",", "network", "=", "guess_network_from_mkey", "(", "mpub", ")", ",", ")", "chain_address_paths_cleaned", "=", "{", "'index'", ":", "chain", "[", "'index'", "]", ",", "'chain_addresses'", ":", "chain_address_paths", ",", "}", "chains_address_paths_cleaned", ".", "append", "(", "chain_address_paths_cleaned", ")", "return", "chains_address_paths_cleaned" ]
Get addresses across both subchains based on the filter criteria passed in Returns a list of dicts of the following form: [ {'address': '1abc123...', 'path': 'm/0/9', 'pubkeyhex': '0123456...'}, ..., ] Dicts may also contain WIF and privkeyhex if wallet_obj has private key
[ "Get", "addresses", "across", "both", "subchains", "based", "on", "the", "filter", "criteria", "passed", "in" ]
train
https://github.com/blockcypher/bcwallet/blob/4c623a8e19705332c9398c3fea9d5dc3c1dafb9b/bcwallet/bcwallet.py#L153-L202
blockcypher/bcwallet
bcwallet/bcwallet.py
register_unused_addresses
def register_unused_addresses(wallet_obj, subchain_index, num_addrs=1): ''' Hit /derive to register new unused_addresses on a subchain_index and verify them client-side Returns a list of dicts of the following form: [ {'address': '1abc123...', 'path': 'm/0/9', 'public': '0123456...'}, ..., ] ''' verbose_print('register_unused_addresses called on subchain %s for %s addrs' % ( subchain_index, num_addrs, )) assert type(subchain_index) is int, subchain_index assert type(num_addrs) is int, num_addrs assert num_addrs > 0 mpub = wallet_obj.serialize_b58(private=False) coin_symbol = coin_symbol_from_mkey(mpub) wallet_name = get_blockcypher_walletname_from_mpub( mpub=mpub, subchain_indices=[0, 1], ) network = guess_network_from_mkey(mpub) # register new address(es) derivation_response = derive_hd_address( api_key=BLOCKCYPHER_API_KEY, wallet_name=wallet_name, num_addresses=num_addrs, subchain_index=subchain_index, coin_symbol=coin_symbol, ) verbose_print('derivation_response:') verbose_print(derivation_response) address_paths = derivation_response['chains'][0]['chain_addresses'] # verify new addresses client-side full_address_paths = verify_and_fill_address_paths_from_bip32key( address_paths=address_paths, master_key=mpub, network=network, ) return full_address_paths
python
def register_unused_addresses(wallet_obj, subchain_index, num_addrs=1): ''' Hit /derive to register new unused_addresses on a subchain_index and verify them client-side Returns a list of dicts of the following form: [ {'address': '1abc123...', 'path': 'm/0/9', 'public': '0123456...'}, ..., ] ''' verbose_print('register_unused_addresses called on subchain %s for %s addrs' % ( subchain_index, num_addrs, )) assert type(subchain_index) is int, subchain_index assert type(num_addrs) is int, num_addrs assert num_addrs > 0 mpub = wallet_obj.serialize_b58(private=False) coin_symbol = coin_symbol_from_mkey(mpub) wallet_name = get_blockcypher_walletname_from_mpub( mpub=mpub, subchain_indices=[0, 1], ) network = guess_network_from_mkey(mpub) # register new address(es) derivation_response = derive_hd_address( api_key=BLOCKCYPHER_API_KEY, wallet_name=wallet_name, num_addresses=num_addrs, subchain_index=subchain_index, coin_symbol=coin_symbol, ) verbose_print('derivation_response:') verbose_print(derivation_response) address_paths = derivation_response['chains'][0]['chain_addresses'] # verify new addresses client-side full_address_paths = verify_and_fill_address_paths_from_bip32key( address_paths=address_paths, master_key=mpub, network=network, ) return full_address_paths
[ "def", "register_unused_addresses", "(", "wallet_obj", ",", "subchain_index", ",", "num_addrs", "=", "1", ")", ":", "verbose_print", "(", "'register_unused_addresses called on subchain %s for %s addrs'", "%", "(", "subchain_index", ",", "num_addrs", ",", ")", ")", "assert", "type", "(", "subchain_index", ")", "is", "int", ",", "subchain_index", "assert", "type", "(", "num_addrs", ")", "is", "int", ",", "num_addrs", "assert", "num_addrs", ">", "0", "mpub", "=", "wallet_obj", ".", "serialize_b58", "(", "private", "=", "False", ")", "coin_symbol", "=", "coin_symbol_from_mkey", "(", "mpub", ")", "wallet_name", "=", "get_blockcypher_walletname_from_mpub", "(", "mpub", "=", "mpub", ",", "subchain_indices", "=", "[", "0", ",", "1", "]", ",", ")", "network", "=", "guess_network_from_mkey", "(", "mpub", ")", "# register new address(es)", "derivation_response", "=", "derive_hd_address", "(", "api_key", "=", "BLOCKCYPHER_API_KEY", ",", "wallet_name", "=", "wallet_name", ",", "num_addresses", "=", "num_addrs", ",", "subchain_index", "=", "subchain_index", ",", "coin_symbol", "=", "coin_symbol", ",", ")", "verbose_print", "(", "'derivation_response:'", ")", "verbose_print", "(", "derivation_response", ")", "address_paths", "=", "derivation_response", "[", "'chains'", "]", "[", "0", "]", "[", "'chain_addresses'", "]", "# verify new addresses client-side", "full_address_paths", "=", "verify_and_fill_address_paths_from_bip32key", "(", "address_paths", "=", "address_paths", ",", "master_key", "=", "mpub", ",", "network", "=", "network", ",", ")", "return", "full_address_paths" ]
Hit /derive to register new unused_addresses on a subchain_index and verify them client-side Returns a list of dicts of the following form: [ {'address': '1abc123...', 'path': 'm/0/9', 'public': '0123456...'}, ..., ]
[ "Hit", "/", "derive", "to", "register", "new", "unused_addresses", "on", "a", "subchain_index", "and", "verify", "them", "client", "-", "side" ]
train
https://github.com/blockcypher/bcwallet/blob/4c623a8e19705332c9398c3fea9d5dc3c1dafb9b/bcwallet/bcwallet.py#L205-L254
blockcypher/bcwallet
bcwallet/bcwallet.py
dump_all_keys_or_addrs
def dump_all_keys_or_addrs(wallet_obj): ''' Offline-enabled mechanism to dump addresses ''' print_traversal_warning() puts('\nDo you understand this warning?') if not confirm(user_prompt=DEFAULT_PROMPT, default=False): puts(colored.red('Dump Cancelled!')) return mpub = wallet_obj.serialize_b58(private=False) if wallet_obj.private_key: desc_str = 'private keys' else: desc_str = 'addresses' puts('Displaying Public Addresses Only') puts('For Private Keys, please open bcwallet with your Master Private Key:\n') priv_to_display = '%s123...' % first4mprv_from_mpub(mpub=mpub) print_bcwallet_basic_priv_opening(priv_to_display=priv_to_display) puts('How many %s (on each chain) do you want to dump?' % desc_str) puts('Enter "b" to go back.\n') num_keys = get_int( user_prompt=DEFAULT_PROMPT, max_int=10**5, default_input='5', show_default=True, quit_ok=True, ) if num_keys is False: return if wallet_obj.private_key: print_childprivkey_warning() puts('-' * 70) for chain_int in (0, 1): for current in range(0, num_keys): path = "m/%d/%d" % (chain_int, current) if current == 0: if chain_int == 0: print_external_chain() print_key_path_header() elif chain_int == 1: print_internal_chain() print_key_path_header() child_wallet = wallet_obj.get_child_for_path(path) if wallet_obj.private_key: wif_to_use = child_wallet.export_to_wif() else: wif_to_use = None print_path_info( address=child_wallet.to_address(), path=path, wif=wif_to_use, coin_symbol=coin_symbol_from_mkey(mpub), ) puts(colored.blue('\nYou can compare this output to bip32.org'))
python
def dump_all_keys_or_addrs(wallet_obj): ''' Offline-enabled mechanism to dump addresses ''' print_traversal_warning() puts('\nDo you understand this warning?') if not confirm(user_prompt=DEFAULT_PROMPT, default=False): puts(colored.red('Dump Cancelled!')) return mpub = wallet_obj.serialize_b58(private=False) if wallet_obj.private_key: desc_str = 'private keys' else: desc_str = 'addresses' puts('Displaying Public Addresses Only') puts('For Private Keys, please open bcwallet with your Master Private Key:\n') priv_to_display = '%s123...' % first4mprv_from_mpub(mpub=mpub) print_bcwallet_basic_priv_opening(priv_to_display=priv_to_display) puts('How many %s (on each chain) do you want to dump?' % desc_str) puts('Enter "b" to go back.\n') num_keys = get_int( user_prompt=DEFAULT_PROMPT, max_int=10**5, default_input='5', show_default=True, quit_ok=True, ) if num_keys is False: return if wallet_obj.private_key: print_childprivkey_warning() puts('-' * 70) for chain_int in (0, 1): for current in range(0, num_keys): path = "m/%d/%d" % (chain_int, current) if current == 0: if chain_int == 0: print_external_chain() print_key_path_header() elif chain_int == 1: print_internal_chain() print_key_path_header() child_wallet = wallet_obj.get_child_for_path(path) if wallet_obj.private_key: wif_to_use = child_wallet.export_to_wif() else: wif_to_use = None print_path_info( address=child_wallet.to_address(), path=path, wif=wif_to_use, coin_symbol=coin_symbol_from_mkey(mpub), ) puts(colored.blue('\nYou can compare this output to bip32.org'))
[ "def", "dump_all_keys_or_addrs", "(", "wallet_obj", ")", ":", "print_traversal_warning", "(", ")", "puts", "(", "'\\nDo you understand this warning?'", ")", "if", "not", "confirm", "(", "user_prompt", "=", "DEFAULT_PROMPT", ",", "default", "=", "False", ")", ":", "puts", "(", "colored", ".", "red", "(", "'Dump Cancelled!'", ")", ")", "return", "mpub", "=", "wallet_obj", ".", "serialize_b58", "(", "private", "=", "False", ")", "if", "wallet_obj", ".", "private_key", ":", "desc_str", "=", "'private keys'", "else", ":", "desc_str", "=", "'addresses'", "puts", "(", "'Displaying Public Addresses Only'", ")", "puts", "(", "'For Private Keys, please open bcwallet with your Master Private Key:\\n'", ")", "priv_to_display", "=", "'%s123...'", "%", "first4mprv_from_mpub", "(", "mpub", "=", "mpub", ")", "print_bcwallet_basic_priv_opening", "(", "priv_to_display", "=", "priv_to_display", ")", "puts", "(", "'How many %s (on each chain) do you want to dump?'", "%", "desc_str", ")", "puts", "(", "'Enter \"b\" to go back.\\n'", ")", "num_keys", "=", "get_int", "(", "user_prompt", "=", "DEFAULT_PROMPT", ",", "max_int", "=", "10", "**", "5", ",", "default_input", "=", "'5'", ",", "show_default", "=", "True", ",", "quit_ok", "=", "True", ",", ")", "if", "num_keys", "is", "False", ":", "return", "if", "wallet_obj", ".", "private_key", ":", "print_childprivkey_warning", "(", ")", "puts", "(", "'-'", "*", "70", ")", "for", "chain_int", "in", "(", "0", ",", "1", ")", ":", "for", "current", "in", "range", "(", "0", ",", "num_keys", ")", ":", "path", "=", "\"m/%d/%d\"", "%", "(", "chain_int", ",", "current", ")", "if", "current", "==", "0", ":", "if", "chain_int", "==", "0", ":", "print_external_chain", "(", ")", "print_key_path_header", "(", ")", "elif", "chain_int", "==", "1", ":", "print_internal_chain", "(", ")", "print_key_path_header", "(", ")", "child_wallet", "=", "wallet_obj", ".", "get_child_for_path", "(", "path", ")", "if", "wallet_obj", ".", "private_key", ":", "wif_to_use", "=", "child_wallet", ".", "export_to_wif", "(", ")", "else", ":", "wif_to_use", "=", "None", "print_path_info", "(", "address", "=", "child_wallet", ".", "to_address", "(", ")", ",", "path", "=", "path", ",", "wif", "=", "wif_to_use", ",", "coin_symbol", "=", "coin_symbol_from_mkey", "(", "mpub", ")", ",", ")", "puts", "(", "colored", ".", "blue", "(", "'\\nYou can compare this output to bip32.org'", ")", ")" ]
Offline-enabled mechanism to dump addresses
[ "Offline", "-", "enabled", "mechanism", "to", "dump", "addresses" ]
train
https://github.com/blockcypher/bcwallet/blob/4c623a8e19705332c9398c3fea9d5dc3c1dafb9b/bcwallet/bcwallet.py#L869-L932
blockcypher/bcwallet
bcwallet/bcwallet.py
dump_selected_keys_or_addrs
def dump_selected_keys_or_addrs(wallet_obj, used=None, zero_balance=None): ''' Works for both public key only or private key access ''' if wallet_obj.private_key: content_str = 'private keys' else: content_str = 'addresses' if not USER_ONLINE: puts(colored.red('\nInternet connection required, would you like to dump *all* %s instead?' % ( content_str, content_str, ))) if confirm(user_prompt=DEFAULT_PROMPT, default=True): dump_all_keys_or_addrs(wallet_obj=wallet_obj) else: return mpub = wallet_obj.serialize_b58(private=False) if wallet_obj.private_key is None: puts('Displaying Public Addresses Only') puts('For Private Keys, please open bcwallet with your Master Private Key:\n') priv_to_display = '%s123...' % first4mprv_from_mpub(mpub=mpub) print_bcwallet_basic_priv_opening(priv_to_display=priv_to_display) chain_address_objs = get_addresses_on_both_chains( wallet_obj=wallet_obj, used=used, zero_balance=zero_balance, ) if wallet_obj.private_key and chain_address_objs: print_childprivkey_warning() addr_cnt = 0 for chain_address_obj in chain_address_objs: if chain_address_obj['index'] == 0: print_external_chain() elif chain_address_obj['index'] == 1: print_internal_chain() print_key_path_header() for address_obj in chain_address_obj['chain_addresses']: print_path_info( address=address_obj['pub_address'], wif=address_obj.get('wif'), path=address_obj['path'], coin_symbol=coin_symbol_from_mkey(mpub), ) addr_cnt += 1 if addr_cnt: puts(colored.blue('\nYou can compare this output to bip32.org')) else: puts('No matching %s in this subset. Would you like to dump *all* %s instead?' % ( content_str, content_str, )) if confirm(user_prompt=DEFAULT_PROMPT, default=True): dump_all_keys_or_addrs(wallet_obj=wallet_obj)
python
def dump_selected_keys_or_addrs(wallet_obj, used=None, zero_balance=None): ''' Works for both public key only or private key access ''' if wallet_obj.private_key: content_str = 'private keys' else: content_str = 'addresses' if not USER_ONLINE: puts(colored.red('\nInternet connection required, would you like to dump *all* %s instead?' % ( content_str, content_str, ))) if confirm(user_prompt=DEFAULT_PROMPT, default=True): dump_all_keys_or_addrs(wallet_obj=wallet_obj) else: return mpub = wallet_obj.serialize_b58(private=False) if wallet_obj.private_key is None: puts('Displaying Public Addresses Only') puts('For Private Keys, please open bcwallet with your Master Private Key:\n') priv_to_display = '%s123...' % first4mprv_from_mpub(mpub=mpub) print_bcwallet_basic_priv_opening(priv_to_display=priv_to_display) chain_address_objs = get_addresses_on_both_chains( wallet_obj=wallet_obj, used=used, zero_balance=zero_balance, ) if wallet_obj.private_key and chain_address_objs: print_childprivkey_warning() addr_cnt = 0 for chain_address_obj in chain_address_objs: if chain_address_obj['index'] == 0: print_external_chain() elif chain_address_obj['index'] == 1: print_internal_chain() print_key_path_header() for address_obj in chain_address_obj['chain_addresses']: print_path_info( address=address_obj['pub_address'], wif=address_obj.get('wif'), path=address_obj['path'], coin_symbol=coin_symbol_from_mkey(mpub), ) addr_cnt += 1 if addr_cnt: puts(colored.blue('\nYou can compare this output to bip32.org')) else: puts('No matching %s in this subset. Would you like to dump *all* %s instead?' % ( content_str, content_str, )) if confirm(user_prompt=DEFAULT_PROMPT, default=True): dump_all_keys_or_addrs(wallet_obj=wallet_obj)
[ "def", "dump_selected_keys_or_addrs", "(", "wallet_obj", ",", "used", "=", "None", ",", "zero_balance", "=", "None", ")", ":", "if", "wallet_obj", ".", "private_key", ":", "content_str", "=", "'private keys'", "else", ":", "content_str", "=", "'addresses'", "if", "not", "USER_ONLINE", ":", "puts", "(", "colored", ".", "red", "(", "'\\nInternet connection required, would you like to dump *all* %s instead?'", "%", "(", "content_str", ",", "content_str", ",", ")", ")", ")", "if", "confirm", "(", "user_prompt", "=", "DEFAULT_PROMPT", ",", "default", "=", "True", ")", ":", "dump_all_keys_or_addrs", "(", "wallet_obj", "=", "wallet_obj", ")", "else", ":", "return", "mpub", "=", "wallet_obj", ".", "serialize_b58", "(", "private", "=", "False", ")", "if", "wallet_obj", ".", "private_key", "is", "None", ":", "puts", "(", "'Displaying Public Addresses Only'", ")", "puts", "(", "'For Private Keys, please open bcwallet with your Master Private Key:\\n'", ")", "priv_to_display", "=", "'%s123...'", "%", "first4mprv_from_mpub", "(", "mpub", "=", "mpub", ")", "print_bcwallet_basic_priv_opening", "(", "priv_to_display", "=", "priv_to_display", ")", "chain_address_objs", "=", "get_addresses_on_both_chains", "(", "wallet_obj", "=", "wallet_obj", ",", "used", "=", "used", ",", "zero_balance", "=", "zero_balance", ",", ")", "if", "wallet_obj", ".", "private_key", "and", "chain_address_objs", ":", "print_childprivkey_warning", "(", ")", "addr_cnt", "=", "0", "for", "chain_address_obj", "in", "chain_address_objs", ":", "if", "chain_address_obj", "[", "'index'", "]", "==", "0", ":", "print_external_chain", "(", ")", "elif", "chain_address_obj", "[", "'index'", "]", "==", "1", ":", "print_internal_chain", "(", ")", "print_key_path_header", "(", ")", "for", "address_obj", "in", "chain_address_obj", "[", "'chain_addresses'", "]", ":", "print_path_info", "(", "address", "=", "address_obj", "[", "'pub_address'", "]", ",", "wif", "=", "address_obj", ".", "get", "(", "'wif'", ")", ",", "path", "=", "address_obj", "[", "'path'", "]", ",", "coin_symbol", "=", "coin_symbol_from_mkey", "(", "mpub", ")", ",", ")", "addr_cnt", "+=", "1", "if", "addr_cnt", ":", "puts", "(", "colored", ".", "blue", "(", "'\\nYou can compare this output to bip32.org'", ")", ")", "else", ":", "puts", "(", "'No matching %s in this subset. Would you like to dump *all* %s instead?'", "%", "(", "content_str", ",", "content_str", ",", ")", ")", "if", "confirm", "(", "user_prompt", "=", "DEFAULT_PROMPT", ",", "default", "=", "True", ")", ":", "dump_all_keys_or_addrs", "(", "wallet_obj", "=", "wallet_obj", ")" ]
Works for both public key only or private key access
[ "Works", "for", "both", "public", "key", "only", "or", "private", "key", "access" ]
train
https://github.com/blockcypher/bcwallet/blob/4c623a8e19705332c9398c3fea9d5dc3c1dafb9b/bcwallet/bcwallet.py#L935-L998
blockcypher/bcwallet
bcwallet/bcwallet.py
dump_private_keys_or_addrs_chooser
def dump_private_keys_or_addrs_chooser(wallet_obj): ''' Offline-enabled mechanism to dump everything ''' if wallet_obj.private_key: puts('Which private keys and addresses do you want?') else: puts('Which addresses do you want?') with indent(2): puts(colored.cyan('1: Active - have funds to spend')) puts(colored.cyan('2: Spent - no funds to spend (because they have been spent)')) puts(colored.cyan('3: Unused - no funds to spend (because the address has never been used)')) puts(colored.cyan('0: All (works offline) - regardless of whether they have funds to spend (super advanced users only)')) puts(colored.cyan('\nb: Go Back\n')) choice = choice_prompt( user_prompt=DEFAULT_PROMPT, acceptable_responses=[0, 1, 2, 3], default_input='1', show_default=True, quit_ok=True, ) if choice is False: return if choice == '1': return dump_selected_keys_or_addrs(wallet_obj=wallet_obj, zero_balance=False, used=True) elif choice == '2': return dump_selected_keys_or_addrs(wallet_obj=wallet_obj, zero_balance=True, used=True) elif choice == '3': return dump_selected_keys_or_addrs(wallet_obj=wallet_obj, zero_balance=None, used=False) elif choice == '0': return dump_all_keys_or_addrs(wallet_obj=wallet_obj)
python
def dump_private_keys_or_addrs_chooser(wallet_obj): ''' Offline-enabled mechanism to dump everything ''' if wallet_obj.private_key: puts('Which private keys and addresses do you want?') else: puts('Which addresses do you want?') with indent(2): puts(colored.cyan('1: Active - have funds to spend')) puts(colored.cyan('2: Spent - no funds to spend (because they have been spent)')) puts(colored.cyan('3: Unused - no funds to spend (because the address has never been used)')) puts(colored.cyan('0: All (works offline) - regardless of whether they have funds to spend (super advanced users only)')) puts(colored.cyan('\nb: Go Back\n')) choice = choice_prompt( user_prompt=DEFAULT_PROMPT, acceptable_responses=[0, 1, 2, 3], default_input='1', show_default=True, quit_ok=True, ) if choice is False: return if choice == '1': return dump_selected_keys_or_addrs(wallet_obj=wallet_obj, zero_balance=False, used=True) elif choice == '2': return dump_selected_keys_or_addrs(wallet_obj=wallet_obj, zero_balance=True, used=True) elif choice == '3': return dump_selected_keys_or_addrs(wallet_obj=wallet_obj, zero_balance=None, used=False) elif choice == '0': return dump_all_keys_or_addrs(wallet_obj=wallet_obj)
[ "def", "dump_private_keys_or_addrs_chooser", "(", "wallet_obj", ")", ":", "if", "wallet_obj", ".", "private_key", ":", "puts", "(", "'Which private keys and addresses do you want?'", ")", "else", ":", "puts", "(", "'Which addresses do you want?'", ")", "with", "indent", "(", "2", ")", ":", "puts", "(", "colored", ".", "cyan", "(", "'1: Active - have funds to spend'", ")", ")", "puts", "(", "colored", ".", "cyan", "(", "'2: Spent - no funds to spend (because they have been spent)'", ")", ")", "puts", "(", "colored", ".", "cyan", "(", "'3: Unused - no funds to spend (because the address has never been used)'", ")", ")", "puts", "(", "colored", ".", "cyan", "(", "'0: All (works offline) - regardless of whether they have funds to spend (super advanced users only)'", ")", ")", "puts", "(", "colored", ".", "cyan", "(", "'\\nb: Go Back\\n'", ")", ")", "choice", "=", "choice_prompt", "(", "user_prompt", "=", "DEFAULT_PROMPT", ",", "acceptable_responses", "=", "[", "0", ",", "1", ",", "2", ",", "3", "]", ",", "default_input", "=", "'1'", ",", "show_default", "=", "True", ",", "quit_ok", "=", "True", ",", ")", "if", "choice", "is", "False", ":", "return", "if", "choice", "==", "'1'", ":", "return", "dump_selected_keys_or_addrs", "(", "wallet_obj", "=", "wallet_obj", ",", "zero_balance", "=", "False", ",", "used", "=", "True", ")", "elif", "choice", "==", "'2'", ":", "return", "dump_selected_keys_or_addrs", "(", "wallet_obj", "=", "wallet_obj", ",", "zero_balance", "=", "True", ",", "used", "=", "True", ")", "elif", "choice", "==", "'3'", ":", "return", "dump_selected_keys_or_addrs", "(", "wallet_obj", "=", "wallet_obj", ",", "zero_balance", "=", "None", ",", "used", "=", "False", ")", "elif", "choice", "==", "'0'", ":", "return", "dump_all_keys_or_addrs", "(", "wallet_obj", "=", "wallet_obj", ")" ]
Offline-enabled mechanism to dump everything
[ "Offline", "-", "enabled", "mechanism", "to", "dump", "everything" ]
train
https://github.com/blockcypher/bcwallet/blob/4c623a8e19705332c9398c3fea9d5dc3c1dafb9b/bcwallet/bcwallet.py#L1001-L1034
blockcypher/bcwallet
bcwallet/bcwallet.py
wallet_home
def wallet_home(wallet_obj): ''' Loaded on bootup (and stays in while loop until quitting) ''' mpub = wallet_obj.serialize_b58(private=False) if wallet_obj.private_key is None: print_pubwallet_notice(mpub=mpub) else: print_bcwallet_basic_pub_opening(mpub=mpub) coin_symbol = coin_symbol_from_mkey(mpub) if USER_ONLINE: wallet_name = get_blockcypher_walletname_from_mpub( mpub=mpub, subchain_indices=[0, 1], ) # Instruct blockcypher to track the wallet by pubkey create_hd_wallet( wallet_name=wallet_name, xpubkey=mpub, api_key=BLOCKCYPHER_API_KEY, coin_symbol=coin_symbol, subchain_indices=[0, 1], # for internal and change addresses ) # Display balance info display_balance_info(wallet_obj=wallet_obj) # Go to home screen while True: puts('-' * 70 + '\n') if coin_symbol in ('bcy', 'btc-testnet'): display_shortname = COIN_SYMBOL_MAPPINGS[coin_symbol]['display_shortname'] if coin_symbol == 'bcy': faucet_url = 'https://accounts.blockcypher.com/blockcypher-faucet' elif coin_symbol == 'btc-testnet': faucet_url = 'https://accounts.blockcypher.com/testnet-faucet' puts('Get free %s faucet coins:' % display_shortname) puts(colored.blue(faucet_url)) puts() if coin_symbol == 'btc-testnet': puts('Please consider returning unused testnet coins to mwmabpJVisvti3WEP5vhFRtn3yqHRD9KNP so we can distribute them to others.\n') puts('What do you want to do?:') if not USER_ONLINE: puts("(since you are NOT connected to BlockCypher, many choices are disabled)") with indent(2): puts(colored.cyan('1: Show balance and transactions')) puts(colored.cyan('2: Show new receiving addresses')) puts(colored.cyan('3: Send funds (more options here)')) with indent(2): if wallet_obj.private_key: puts(colored.cyan('0: Dump private keys and addresses (advanced users only)')) else: puts(colored.cyan('0: Dump addresses (advanced users only)')) puts(colored.cyan('\nq: Quit bcwallet\n')) choice = choice_prompt( user_prompt=DEFAULT_PROMPT, acceptable_responses=range(0, 3+1), quit_ok=True, default_input='1', ) verbose_print('Choice: %s' % choice) if choice is False: puts(colored.green('Thanks for using bcwallet!')) print_keys_not_saved() break elif choice == '1': display_recent_txs(wallet_obj=wallet_obj) elif choice == '2': display_new_receiving_addresses(wallet_obj=wallet_obj) elif choice == '3': send_chooser(wallet_obj=wallet_obj) elif choice == '0': dump_private_keys_or_addrs_chooser(wallet_obj=wallet_obj)
python
def wallet_home(wallet_obj): ''' Loaded on bootup (and stays in while loop until quitting) ''' mpub = wallet_obj.serialize_b58(private=False) if wallet_obj.private_key is None: print_pubwallet_notice(mpub=mpub) else: print_bcwallet_basic_pub_opening(mpub=mpub) coin_symbol = coin_symbol_from_mkey(mpub) if USER_ONLINE: wallet_name = get_blockcypher_walletname_from_mpub( mpub=mpub, subchain_indices=[0, 1], ) # Instruct blockcypher to track the wallet by pubkey create_hd_wallet( wallet_name=wallet_name, xpubkey=mpub, api_key=BLOCKCYPHER_API_KEY, coin_symbol=coin_symbol, subchain_indices=[0, 1], # for internal and change addresses ) # Display balance info display_balance_info(wallet_obj=wallet_obj) # Go to home screen while True: puts('-' * 70 + '\n') if coin_symbol in ('bcy', 'btc-testnet'): display_shortname = COIN_SYMBOL_MAPPINGS[coin_symbol]['display_shortname'] if coin_symbol == 'bcy': faucet_url = 'https://accounts.blockcypher.com/blockcypher-faucet' elif coin_symbol == 'btc-testnet': faucet_url = 'https://accounts.blockcypher.com/testnet-faucet' puts('Get free %s faucet coins:' % display_shortname) puts(colored.blue(faucet_url)) puts() if coin_symbol == 'btc-testnet': puts('Please consider returning unused testnet coins to mwmabpJVisvti3WEP5vhFRtn3yqHRD9KNP so we can distribute them to others.\n') puts('What do you want to do?:') if not USER_ONLINE: puts("(since you are NOT connected to BlockCypher, many choices are disabled)") with indent(2): puts(colored.cyan('1: Show balance and transactions')) puts(colored.cyan('2: Show new receiving addresses')) puts(colored.cyan('3: Send funds (more options here)')) with indent(2): if wallet_obj.private_key: puts(colored.cyan('0: Dump private keys and addresses (advanced users only)')) else: puts(colored.cyan('0: Dump addresses (advanced users only)')) puts(colored.cyan('\nq: Quit bcwallet\n')) choice = choice_prompt( user_prompt=DEFAULT_PROMPT, acceptable_responses=range(0, 3+1), quit_ok=True, default_input='1', ) verbose_print('Choice: %s' % choice) if choice is False: puts(colored.green('Thanks for using bcwallet!')) print_keys_not_saved() break elif choice == '1': display_recent_txs(wallet_obj=wallet_obj) elif choice == '2': display_new_receiving_addresses(wallet_obj=wallet_obj) elif choice == '3': send_chooser(wallet_obj=wallet_obj) elif choice == '0': dump_private_keys_or_addrs_chooser(wallet_obj=wallet_obj)
[ "def", "wallet_home", "(", "wallet_obj", ")", ":", "mpub", "=", "wallet_obj", ".", "serialize_b58", "(", "private", "=", "False", ")", "if", "wallet_obj", ".", "private_key", "is", "None", ":", "print_pubwallet_notice", "(", "mpub", "=", "mpub", ")", "else", ":", "print_bcwallet_basic_pub_opening", "(", "mpub", "=", "mpub", ")", "coin_symbol", "=", "coin_symbol_from_mkey", "(", "mpub", ")", "if", "USER_ONLINE", ":", "wallet_name", "=", "get_blockcypher_walletname_from_mpub", "(", "mpub", "=", "mpub", ",", "subchain_indices", "=", "[", "0", ",", "1", "]", ",", ")", "# Instruct blockcypher to track the wallet by pubkey", "create_hd_wallet", "(", "wallet_name", "=", "wallet_name", ",", "xpubkey", "=", "mpub", ",", "api_key", "=", "BLOCKCYPHER_API_KEY", ",", "coin_symbol", "=", "coin_symbol", ",", "subchain_indices", "=", "[", "0", ",", "1", "]", ",", "# for internal and change addresses", ")", "# Display balance info", "display_balance_info", "(", "wallet_obj", "=", "wallet_obj", ")", "# Go to home screen", "while", "True", ":", "puts", "(", "'-'", "*", "70", "+", "'\\n'", ")", "if", "coin_symbol", "in", "(", "'bcy'", ",", "'btc-testnet'", ")", ":", "display_shortname", "=", "COIN_SYMBOL_MAPPINGS", "[", "coin_symbol", "]", "[", "'display_shortname'", "]", "if", "coin_symbol", "==", "'bcy'", ":", "faucet_url", "=", "'https://accounts.blockcypher.com/blockcypher-faucet'", "elif", "coin_symbol", "==", "'btc-testnet'", ":", "faucet_url", "=", "'https://accounts.blockcypher.com/testnet-faucet'", "puts", "(", "'Get free %s faucet coins:'", "%", "display_shortname", ")", "puts", "(", "colored", ".", "blue", "(", "faucet_url", ")", ")", "puts", "(", ")", "if", "coin_symbol", "==", "'btc-testnet'", ":", "puts", "(", "'Please consider returning unused testnet coins to mwmabpJVisvti3WEP5vhFRtn3yqHRD9KNP so we can distribute them to others.\\n'", ")", "puts", "(", "'What do you want to do?:'", ")", "if", "not", "USER_ONLINE", ":", "puts", "(", "\"(since you are NOT connected to BlockCypher, many choices are disabled)\"", ")", "with", "indent", "(", "2", ")", ":", "puts", "(", "colored", ".", "cyan", "(", "'1: Show balance and transactions'", ")", ")", "puts", "(", "colored", ".", "cyan", "(", "'2: Show new receiving addresses'", ")", ")", "puts", "(", "colored", ".", "cyan", "(", "'3: Send funds (more options here)'", ")", ")", "with", "indent", "(", "2", ")", ":", "if", "wallet_obj", ".", "private_key", ":", "puts", "(", "colored", ".", "cyan", "(", "'0: Dump private keys and addresses (advanced users only)'", ")", ")", "else", ":", "puts", "(", "colored", ".", "cyan", "(", "'0: Dump addresses (advanced users only)'", ")", ")", "puts", "(", "colored", ".", "cyan", "(", "'\\nq: Quit bcwallet\\n'", ")", ")", "choice", "=", "choice_prompt", "(", "user_prompt", "=", "DEFAULT_PROMPT", ",", "acceptable_responses", "=", "range", "(", "0", ",", "3", "+", "1", ")", ",", "quit_ok", "=", "True", ",", "default_input", "=", "'1'", ",", ")", "verbose_print", "(", "'Choice: %s'", "%", "choice", ")", "if", "choice", "is", "False", ":", "puts", "(", "colored", ".", "green", "(", "'Thanks for using bcwallet!'", ")", ")", "print_keys_not_saved", "(", ")", "break", "elif", "choice", "==", "'1'", ":", "display_recent_txs", "(", "wallet_obj", "=", "wallet_obj", ")", "elif", "choice", "==", "'2'", ":", "display_new_receiving_addresses", "(", "wallet_obj", "=", "wallet_obj", ")", "elif", "choice", "==", "'3'", ":", "send_chooser", "(", "wallet_obj", "=", "wallet_obj", ")", "elif", "choice", "==", "'0'", ":", "dump_private_keys_or_addrs_chooser", "(", "wallet_obj", "=", "wallet_obj", ")" ]
Loaded on bootup (and stays in while loop until quitting)
[ "Loaded", "on", "bootup", "(", "and", "stays", "in", "while", "loop", "until", "quitting", ")" ]
train
https://github.com/blockcypher/bcwallet/blob/4c623a8e19705332c9398c3fea9d5dc3c1dafb9b/bcwallet/bcwallet.py#L1091-L1173
tansey/gfl
pygfl/solver.py
TrailSolver.solve
def solve(self, lam): '''Solves the GFL for a fixed value of lambda.''' if self.penalty == 'dp': return self.solve_dp(lam) if self.penalty == 'gfl': return self.solve_gfl(lam) if self.penalty == 'gamlasso': return self.solve_gfl(lam) raise Exception('Unknown penalty type: {0}'.format(self.penalty))
python
def solve(self, lam): '''Solves the GFL for a fixed value of lambda.''' if self.penalty == 'dp': return self.solve_dp(lam) if self.penalty == 'gfl': return self.solve_gfl(lam) if self.penalty == 'gamlasso': return self.solve_gfl(lam) raise Exception('Unknown penalty type: {0}'.format(self.penalty))
[ "def", "solve", "(", "self", ",", "lam", ")", ":", "if", "self", ".", "penalty", "==", "'dp'", ":", "return", "self", ".", "solve_dp", "(", "lam", ")", "if", "self", ".", "penalty", "==", "'gfl'", ":", "return", "self", ".", "solve_gfl", "(", "lam", ")", "if", "self", ".", "penalty", "==", "'gamlasso'", ":", "return", "self", ".", "solve_gfl", "(", "lam", ")", "raise", "Exception", "(", "'Unknown penalty type: {0}'", ".", "format", "(", "self", ".", "penalty", ")", ")" ]
Solves the GFL for a fixed value of lambda.
[ "Solves", "the", "GFL", "for", "a", "fixed", "value", "of", "lambda", "." ]
train
https://github.com/tansey/gfl/blob/ae0f078bab57aba9e827ed6162f247ff9dc2aa19/pygfl/solver.py#L80-L88
tansey/gfl
pygfl/solver.py
TrailSolver.solve_dp
def solve_dp(self, lam): '''Solves the Graph-fused double Pareto (non-convex, local optima only)''' cur_converge = self.converge+1 step = 0 # Get an initial estimate using the GFL self.solve_gfl(lam) beta2 = np.copy(self.beta) while cur_converge > self.converge and step < self.max_dp_steps: # Weight each edge differently u = lam / (1 + np.abs(self.beta[self.trails[::2]] - self.beta[self.trails[1::2]])) # Swap the beta buffers temp = self.beta self.beta = beta2 beta2 = temp # Solve the edge-weighted GFL problem, which updates beta self.solve_gfl(u) # Check for convergence cur_converge = np.sqrt(((self.beta - beta2)**2).sum()) step += 1 self.steps.append(step) return self.beta
python
def solve_dp(self, lam): '''Solves the Graph-fused double Pareto (non-convex, local optima only)''' cur_converge = self.converge+1 step = 0 # Get an initial estimate using the GFL self.solve_gfl(lam) beta2 = np.copy(self.beta) while cur_converge > self.converge and step < self.max_dp_steps: # Weight each edge differently u = lam / (1 + np.abs(self.beta[self.trails[::2]] - self.beta[self.trails[1::2]])) # Swap the beta buffers temp = self.beta self.beta = beta2 beta2 = temp # Solve the edge-weighted GFL problem, which updates beta self.solve_gfl(u) # Check for convergence cur_converge = np.sqrt(((self.beta - beta2)**2).sum()) step += 1 self.steps.append(step) return self.beta
[ "def", "solve_dp", "(", "self", ",", "lam", ")", ":", "cur_converge", "=", "self", ".", "converge", "+", "1", "step", "=", "0", "# Get an initial estimate using the GFL", "self", ".", "solve_gfl", "(", "lam", ")", "beta2", "=", "np", ".", "copy", "(", "self", ".", "beta", ")", "while", "cur_converge", ">", "self", ".", "converge", "and", "step", "<", "self", ".", "max_dp_steps", ":", "# Weight each edge differently", "u", "=", "lam", "/", "(", "1", "+", "np", ".", "abs", "(", "self", ".", "beta", "[", "self", ".", "trails", "[", ":", ":", "2", "]", "]", "-", "self", ".", "beta", "[", "self", ".", "trails", "[", "1", ":", ":", "2", "]", "]", ")", ")", "# Swap the beta buffers", "temp", "=", "self", ".", "beta", "self", ".", "beta", "=", "beta2", "beta2", "=", "temp", "# Solve the edge-weighted GFL problem, which updates beta", "self", ".", "solve_gfl", "(", "u", ")", "# Check for convergence", "cur_converge", "=", "np", ".", "sqrt", "(", "(", "(", "self", ".", "beta", "-", "beta2", ")", "**", "2", ")", ".", "sum", "(", ")", ")", "step", "+=", "1", "self", ".", "steps", ".", "append", "(", "step", ")", "return", "self", ".", "beta" ]
Solves the Graph-fused double Pareto (non-convex, local optima only)
[ "Solves", "the", "Graph", "-", "fused", "double", "Pareto", "(", "non", "-", "convex", "local", "optima", "only", ")" ]
train
https://github.com/tansey/gfl/blob/ae0f078bab57aba9e827ed6162f247ff9dc2aa19/pygfl/solver.py#L120-L140
tansey/gfl
pygfl/solver.py
TrailSolver.solve_gamlasso
def solve_gamlasso(self, lam): '''Solves the Graph-fused gamma lasso via POSE (Taddy, 2013)''' weights = lam / (1 + self.gamma * np.abs(self.beta[self.trails[::2]] - self.beta[self.trails[1::2]])) s = self.solve_gfl(u) self.steps.append(s) return self.beta
python
def solve_gamlasso(self, lam): '''Solves the Graph-fused gamma lasso via POSE (Taddy, 2013)''' weights = lam / (1 + self.gamma * np.abs(self.beta[self.trails[::2]] - self.beta[self.trails[1::2]])) s = self.solve_gfl(u) self.steps.append(s) return self.beta
[ "def", "solve_gamlasso", "(", "self", ",", "lam", ")", ":", "weights", "=", "lam", "/", "(", "1", "+", "self", ".", "gamma", "*", "np", ".", "abs", "(", "self", ".", "beta", "[", "self", ".", "trails", "[", ":", ":", "2", "]", "]", "-", "self", ".", "beta", "[", "self", ".", "trails", "[", "1", ":", ":", "2", "]", "]", ")", ")", "s", "=", "self", ".", "solve_gfl", "(", "u", ")", "self", ".", "steps", ".", "append", "(", "s", ")", "return", "self", ".", "beta" ]
Solves the Graph-fused gamma lasso via POSE (Taddy, 2013)
[ "Solves", "the", "Graph", "-", "fused", "gamma", "lasso", "via", "POSE", "(", "Taddy", "2013", ")" ]
train
https://github.com/tansey/gfl/blob/ae0f078bab57aba9e827ed6162f247ff9dc2aa19/pygfl/solver.py#L142-L147
tansey/gfl
pygfl/solver.py
TrailSolver.solution_path
def solution_path(self, min_lambda, max_lambda, lambda_bins, verbose=0): '''Follows the solution path to find the best lambda value.''' lambda_grid = np.exp(np.linspace(np.log(max_lambda), np.log(min_lambda), lambda_bins)) aic_trace = np.zeros(lambda_grid.shape) # The AIC score for each lambda value aicc_trace = np.zeros(lambda_grid.shape) # The AICc score for each lambda value (correcting for finite sample size) bic_trace = np.zeros(lambda_grid.shape) # The BIC score for each lambda value dof_trace = np.zeros(lambda_grid.shape) # The degrees of freedom of each final solution log_likelihood_trace = np.zeros(lambda_grid.shape) beta_trace = [] best_idx = None best_plateaus = None # Solve the series of lambda values with warm starts at each point for i, lam in enumerate(lambda_grid): if verbose: print('#{0} Lambda = {1}'.format(i, lam)) # Fit to the final values beta = self.solve(lam) if verbose: print('Calculating degrees of freedom') # Count the number of free parameters in the grid (dof) plateaus = calc_plateaus(beta, self.edges) dof_trace[i] = len(plateaus) if verbose: print('Calculating AIC') # Get the negative log-likelihood log_likelihood_trace[i] = self.log_likelihood(beta) # Calculate AIC = 2k - 2ln(L) aic_trace[i] = 2. * dof_trace[i] - 2. * log_likelihood_trace[i] # Calculate AICc = AIC + 2k * (k+1) / (n - k - 1) aicc_trace[i] = aic_trace[i] + 2 * dof_trace[i] * (dof_trace[i]+1) / (len(beta) - dof_trace[i] - 1.) # Calculate BIC = -2ln(L) + k * (ln(n) - ln(2pi)) bic_trace[i] = -2 * log_likelihood_trace[i] + dof_trace[i] * (np.log(len(beta)) - np.log(2 * np.pi)) # Track the best model thus far if best_idx is None or bic_trace[i] < bic_trace[best_idx]: best_idx = i best_plateaus = plateaus # Save the trace of all the resulting parameters beta_trace.append(np.array(beta)) if verbose: print('DoF: {0} AIC: {1} AICc: {2} BIC: {3}'.format(dof_trace[i], aic_trace[i], aicc_trace[i], bic_trace[i])) if verbose: print('Best setting (by BIC): lambda={0} [DoF: {1}, AIC: {2}, AICc: {3} BIC: {4}]'.format(lambda_grid[best_idx], dof_trace[best_idx], aic_trace[best_idx], aicc_trace[best_idx], bic_trace[best_idx])) return {'aic': aic_trace, 'aicc': aicc_trace, 'bic': bic_trace, 'dof': dof_trace, 'loglikelihood': log_likelihood_trace, 'beta': np.array(beta_trace), 'lambda': lambda_grid, 'best_idx': best_idx, 'best': beta_trace[best_idx], 'plateaus': best_plateaus}
python
def solution_path(self, min_lambda, max_lambda, lambda_bins, verbose=0): '''Follows the solution path to find the best lambda value.''' lambda_grid = np.exp(np.linspace(np.log(max_lambda), np.log(min_lambda), lambda_bins)) aic_trace = np.zeros(lambda_grid.shape) # The AIC score for each lambda value aicc_trace = np.zeros(lambda_grid.shape) # The AICc score for each lambda value (correcting for finite sample size) bic_trace = np.zeros(lambda_grid.shape) # The BIC score for each lambda value dof_trace = np.zeros(lambda_grid.shape) # The degrees of freedom of each final solution log_likelihood_trace = np.zeros(lambda_grid.shape) beta_trace = [] best_idx = None best_plateaus = None # Solve the series of lambda values with warm starts at each point for i, lam in enumerate(lambda_grid): if verbose: print('#{0} Lambda = {1}'.format(i, lam)) # Fit to the final values beta = self.solve(lam) if verbose: print('Calculating degrees of freedom') # Count the number of free parameters in the grid (dof) plateaus = calc_plateaus(beta, self.edges) dof_trace[i] = len(plateaus) if verbose: print('Calculating AIC') # Get the negative log-likelihood log_likelihood_trace[i] = self.log_likelihood(beta) # Calculate AIC = 2k - 2ln(L) aic_trace[i] = 2. * dof_trace[i] - 2. * log_likelihood_trace[i] # Calculate AICc = AIC + 2k * (k+1) / (n - k - 1) aicc_trace[i] = aic_trace[i] + 2 * dof_trace[i] * (dof_trace[i]+1) / (len(beta) - dof_trace[i] - 1.) # Calculate BIC = -2ln(L) + k * (ln(n) - ln(2pi)) bic_trace[i] = -2 * log_likelihood_trace[i] + dof_trace[i] * (np.log(len(beta)) - np.log(2 * np.pi)) # Track the best model thus far if best_idx is None or bic_trace[i] < bic_trace[best_idx]: best_idx = i best_plateaus = plateaus # Save the trace of all the resulting parameters beta_trace.append(np.array(beta)) if verbose: print('DoF: {0} AIC: {1} AICc: {2} BIC: {3}'.format(dof_trace[i], aic_trace[i], aicc_trace[i], bic_trace[i])) if verbose: print('Best setting (by BIC): lambda={0} [DoF: {1}, AIC: {2}, AICc: {3} BIC: {4}]'.format(lambda_grid[best_idx], dof_trace[best_idx], aic_trace[best_idx], aicc_trace[best_idx], bic_trace[best_idx])) return {'aic': aic_trace, 'aicc': aicc_trace, 'bic': bic_trace, 'dof': dof_trace, 'loglikelihood': log_likelihood_trace, 'beta': np.array(beta_trace), 'lambda': lambda_grid, 'best_idx': best_idx, 'best': beta_trace[best_idx], 'plateaus': best_plateaus}
[ "def", "solution_path", "(", "self", ",", "min_lambda", ",", "max_lambda", ",", "lambda_bins", ",", "verbose", "=", "0", ")", ":", "lambda_grid", "=", "np", ".", "exp", "(", "np", ".", "linspace", "(", "np", ".", "log", "(", "max_lambda", ")", ",", "np", ".", "log", "(", "min_lambda", ")", ",", "lambda_bins", ")", ")", "aic_trace", "=", "np", ".", "zeros", "(", "lambda_grid", ".", "shape", ")", "# The AIC score for each lambda value", "aicc_trace", "=", "np", ".", "zeros", "(", "lambda_grid", ".", "shape", ")", "# The AICc score for each lambda value (correcting for finite sample size)", "bic_trace", "=", "np", ".", "zeros", "(", "lambda_grid", ".", "shape", ")", "# The BIC score for each lambda value", "dof_trace", "=", "np", ".", "zeros", "(", "lambda_grid", ".", "shape", ")", "# The degrees of freedom of each final solution", "log_likelihood_trace", "=", "np", ".", "zeros", "(", "lambda_grid", ".", "shape", ")", "beta_trace", "=", "[", "]", "best_idx", "=", "None", "best_plateaus", "=", "None", "# Solve the series of lambda values with warm starts at each point", "for", "i", ",", "lam", "in", "enumerate", "(", "lambda_grid", ")", ":", "if", "verbose", ":", "print", "(", "'#{0} Lambda = {1}'", ".", "format", "(", "i", ",", "lam", ")", ")", "# Fit to the final values", "beta", "=", "self", ".", "solve", "(", "lam", ")", "if", "verbose", ":", "print", "(", "'Calculating degrees of freedom'", ")", "# Count the number of free parameters in the grid (dof)", "plateaus", "=", "calc_plateaus", "(", "beta", ",", "self", ".", "edges", ")", "dof_trace", "[", "i", "]", "=", "len", "(", "plateaus", ")", "if", "verbose", ":", "print", "(", "'Calculating AIC'", ")", "# Get the negative log-likelihood", "log_likelihood_trace", "[", "i", "]", "=", "self", ".", "log_likelihood", "(", "beta", ")", "# Calculate AIC = 2k - 2ln(L)", "aic_trace", "[", "i", "]", "=", "2.", "*", "dof_trace", "[", "i", "]", "-", "2.", "*", "log_likelihood_trace", "[", "i", "]", "# Calculate AICc = AIC + 2k * (k+1) / (n - k - 1)", "aicc_trace", "[", "i", "]", "=", "aic_trace", "[", "i", "]", "+", "2", "*", "dof_trace", "[", "i", "]", "*", "(", "dof_trace", "[", "i", "]", "+", "1", ")", "/", "(", "len", "(", "beta", ")", "-", "dof_trace", "[", "i", "]", "-", "1.", ")", "# Calculate BIC = -2ln(L) + k * (ln(n) - ln(2pi))", "bic_trace", "[", "i", "]", "=", "-", "2", "*", "log_likelihood_trace", "[", "i", "]", "+", "dof_trace", "[", "i", "]", "*", "(", "np", ".", "log", "(", "len", "(", "beta", ")", ")", "-", "np", ".", "log", "(", "2", "*", "np", ".", "pi", ")", ")", "# Track the best model thus far", "if", "best_idx", "is", "None", "or", "bic_trace", "[", "i", "]", "<", "bic_trace", "[", "best_idx", "]", ":", "best_idx", "=", "i", "best_plateaus", "=", "plateaus", "# Save the trace of all the resulting parameters", "beta_trace", ".", "append", "(", "np", ".", "array", "(", "beta", ")", ")", "if", "verbose", ":", "print", "(", "'DoF: {0} AIC: {1} AICc: {2} BIC: {3}'", ".", "format", "(", "dof_trace", "[", "i", "]", ",", "aic_trace", "[", "i", "]", ",", "aicc_trace", "[", "i", "]", ",", "bic_trace", "[", "i", "]", ")", ")", "if", "verbose", ":", "print", "(", "'Best setting (by BIC): lambda={0} [DoF: {1}, AIC: {2}, AICc: {3} BIC: {4}]'", ".", "format", "(", "lambda_grid", "[", "best_idx", "]", ",", "dof_trace", "[", "best_idx", "]", ",", "aic_trace", "[", "best_idx", "]", ",", "aicc_trace", "[", "best_idx", "]", ",", "bic_trace", "[", "best_idx", "]", ")", ")", "return", "{", "'aic'", ":", "aic_trace", ",", "'aicc'", ":", "aicc_trace", ",", "'bic'", ":", "bic_trace", ",", "'dof'", ":", "dof_trace", ",", "'loglikelihood'", ":", "log_likelihood_trace", ",", "'beta'", ":", "np", ".", "array", "(", "beta_trace", ")", ",", "'lambda'", ":", "lambda_grid", ",", "'best_idx'", ":", "best_idx", ",", "'best'", ":", "beta_trace", "[", "best_idx", "]", ",", "'plateaus'", ":", "best_plateaus", "}" ]
Follows the solution path to find the best lambda value.
[ "Follows", "the", "solution", "path", "to", "find", "the", "best", "lambda", "value", "." ]
train
https://github.com/tansey/gfl/blob/ae0f078bab57aba9e827ed6162f247ff9dc2aa19/pygfl/solver.py#L152-L217
zsethna/OLGA
olga/generate_sequences.py
main
def main(): """ Generate sequences.""" parser = OptionParser(conflict_handler="resolve") parser.add_option('--humanTRA', '--human_T_alpha', action='store_true', dest='humanTRA', default=False, help='use default human TRA model (T cell alpha chain)') parser.add_option('--humanTRB', '--human_T_beta', action='store_true', dest='humanTRB', default=False, help='use default human TRB model (T cell beta chain)') parser.add_option('--mouseTRB', '--mouse_T_beta', action='store_true', dest='mouseTRB', default=False, help='use default mouse TRB model (T cell beta chain)') parser.add_option('--humanIGH', '--human_B_heavy', action='store_true', dest='humanIGH', default=False, help='use default human IGH model (B cell heavy chain)') parser.add_option('--VDJ_model_folder', dest='vdj_model_folder', metavar='PATH/TO/FOLDER/', help='specify PATH/TO/FOLDER/ for a custom VDJ generative model') parser.add_option('--VJ_model_folder', dest='vj_model_folder', metavar='PATH/TO/FOLDER/', help='specify PATH/TO/FOLDER/ for a custom VJ generative model') parser.add_option('-o', '--outfile', dest = 'outfile_name', metavar='PATH/TO/FILE', help='write CDR3 sequences to PATH/TO/FILE') parser.add_option('-n', '--num_seqs', type='float', metavar='N', default = 0, dest='num_seqs_to_generate', help='specify the number of sequences to generate.') parser.add_option('--seed', type='int', dest='seed', help='set seed for pseudorandom number generator. Default is to not set a seed.') parser.add_option('--seqs_per_time_update', type='float', default = 100000, dest='seqs_per_time_update', help='specify the number of sequences between time updates. Default is 1e5') parser.add_option('--conserved_J_residues', type='string', default = 'FVW', dest='conserved_J_residues', help="specify conserved J residues. Default is 'FVW'.") parser.add_option('--time_updates_off', action='store_false', dest='time_updates', default=True, help='turn time updates off.') parser.add_option('--seq_type', type='choice', default = 'all', dest='seq_type', choices=['all', 'ntseq', 'nucleotide', 'aaseq', 'amino_acid'], help="declare sequence type for output sequences. Choices: 'all' [default], 'ntseq', 'nucleotide', 'aaseq', 'amino_acid'") parser.add_option('--record_genes_off', action='store_false', dest="record_genes", default=True, help='turn off recording V and J gene info.') parser.add_option('-d', '--delimiter', type='choice', dest='delimiter', choices=['tab', 'space', ',', ';', ':'], help="declare delimiter choice. Default is tab for .tsv output files, comma for .csv files, and tab for all others. Choices: 'tab', 'space', ',', ';', ':'") parser.add_option('--raw_delimiter', type='str', dest='delimiter', help="declare delimiter choice as a raw string.") (options, args) = parser.parse_args() main_folder = os.path.dirname(__file__) default_models = {} default_models['humanTRA'] = [os.path.join(main_folder, 'default_models', 'human_T_alpha'), 'VJ'] default_models['humanTRB'] = [os.path.join(main_folder, 'default_models', 'human_T_beta'), 'VDJ'] default_models['mouseTRB'] = [os.path.join(main_folder, 'default_models', 'mouse_T_beta'), 'VDJ'] default_models['humanIGH'] = [os.path.join(main_folder, 'default_models', 'human_B_heavy'), 'VDJ'] num_models_specified = sum([1 for x in default_models.keys() + ['vj_model_folder', 'vdj_model_folder'] if getattr(options, x)]) if num_models_specified == 1: #exactly one model specified try: d_model = [x for x in default_models.keys() if getattr(options, x)][0] model_folder = default_models[d_model][0] recomb_type = default_models[d_model][1] except IndexError: if options.vdj_model_folder: #custom VDJ model specified model_folder = options.vdj_model_folder recomb_type = 'VDJ' elif options.vj_model_folder: #custom VJ model specified model_folder = options.vj_model_folder recomb_type = 'VJ' elif num_models_specified == 0: print 'Need to indicate generative model.' print 'Exiting...' return -1 elif num_models_specified > 1: print 'Only specify one model' print 'Exiting...' return -1 #Check that all model and genomic files exist in the indicated model folder if not os.path.isdir(model_folder): print 'Check pathing... cannot find the model folder: ' + model_folder print 'Exiting...' return -1 params_file_name = os.path.join(model_folder,'model_params.txt') marginals_file_name = os.path.join(model_folder,'model_marginals.txt') V_anchor_pos_file = os.path.join(model_folder,'V_gene_CDR3_anchors.csv') J_anchor_pos_file = os.path.join(model_folder,'J_gene_CDR3_anchors.csv') for x in [params_file_name, marginals_file_name, V_anchor_pos_file, J_anchor_pos_file]: if not os.path.isfile(x): print 'Cannot find: ' + x print 'Please check the files (and naming conventions) in the model folder ' + model_folder print 'Exiting...' return -1 if options.outfile_name is not None: outfile_name = options.outfile_name if os.path.isfile(outfile_name): if not raw_input(outfile_name + ' already exists. Overwrite (y/n)? ').strip().lower() in ['y', 'yes']: print 'Exiting...' return -1 #Parse arguments num_seqs_to_generate = int(options.num_seqs_to_generate) if num_seqs_to_generate <= 0: print 'Need to specify num_seqs (number of sequences to generate).' print 'Exiting...' return -1 #Parse default delimiter delimiter = options.delimiter if delimiter is None: delimiter = '\t' if options.outfile_name is not None: if outfile_name.endswith('.tsv'): delimiter = '\t' elif outfile_name.endswith('.csv'): delimiter = ',' else: try: delimiter = {'tab': '\t', 'space': ' ', ',': ',', ';': ';', ':': ':'}[delimiter] except KeyError: pass #Other raw string. #Optional flags seq_type = {'all': 'all', 'ntseq': 'ntseq', 'nucleotide': 'ntseq', 'aaseq': 'aaseq', 'amino_acid': 'aaseq'}[options.seq_type] record_genes = options.record_genes seqs_per_time_update = int(options.seqs_per_time_update) time_updates = options.time_updates conserved_J_residues = options.conserved_J_residues if options.seed is not None: np.random.seed(options.seed) #VDJ recomb case --- used for TCRB and IGH if recomb_type == 'VDJ': genomic_data = load_model.GenomicDataVDJ() genomic_data.load_igor_genomic_data(params_file_name, V_anchor_pos_file, J_anchor_pos_file) generative_model = load_model.GenerativeModelVDJ() generative_model.load_and_process_igor_model(marginals_file_name) seq_gen = sequence_generation.SequenceGenerationVDJ(generative_model, genomic_data) #VJ recomb case --- used for TCRA and light chain elif recomb_type == 'VJ': genomic_data = load_model.GenomicDataVJ() genomic_data.load_igor_genomic_data(params_file_name, V_anchor_pos_file, J_anchor_pos_file) generative_model = load_model.GenerativeModelVJ() generative_model.load_and_process_igor_model(marginals_file_name) seq_gen = sequence_generation.SequenceGenerationVJ(generative_model, genomic_data) V_gene_names = [V[0].split('*')[0] for V in genomic_data.genV] J_gene_names = [J[0].split('*')[0] for J in genomic_data.genJ] if options.outfile_name is not None: outfile = open(outfile_name, 'w') print 'Starting sequence generation... ' start_time = time.time() for i in range(num_seqs_to_generate): ntseq, aaseq, V_in, J_in = seq_gen.gen_rnd_prod_CDR3(conserved_J_residues) if seq_type == 'all': #default, include both ntseq and aaseq current_line_out = ntseq + delimiter + aaseq elif seq_type == 'ntseq': #only record ntseq current_line_out = ntseq elif seq_type == 'aaseq': #only record aaseq current_line_out = aaseq if record_genes: current_line_out += delimiter + V_gene_names[V_in] + delimiter + J_gene_names[J_in] outfile.write(current_line_out + '\n') if (i+1)%seqs_per_time_update == 0 and time_updates: c_time = time.time() - start_time eta = ((num_seqs_to_generate - (i+1))/float(i+1))*c_time if c_time > 86400: #more than a day c_time_str = '%d days, %d hours, %d minutes, and %.2f seconds.'%(int(c_time)/86400, (int(c_time)/3600)%24, (int(c_time)/60)%60, c_time%60) elif c_time > 3600: #more than an hr c_time_str = '%d hours, %d minutes, and %.2f seconds.'%((int(c_time)/3600)%24, (int(c_time)/60)%60, c_time%60) elif c_time > 60: #more than a min c_time_str = '%d minutes and %.2f seconds.'%((int(c_time)/60)%60, c_time%60) else: c_time_str = '%.2f seconds.'%(c_time) if eta > 86400: #more than a day eta_str = '%d days, %d hours, %d minutes, and %.2f seconds.'%(int(eta)/86400, (int(eta)/3600)%24, (int(eta)/60)%60, eta%60) elif eta > 3600: #more than an hr eta_str = '%d hours, %d minutes, and %.2f seconds.'%((int(eta)/3600)%24, (int(eta)/60)%60, eta%60) elif eta > 60: #more than a min eta_str = '%d minutes and %.2f seconds.'%((int(eta)/60)%60, eta%60) else: eta_str = '%.2f seconds.'%(eta) print '%d sequences generated in %s Estimated time remaining: %s'%(i+1, c_time_str, eta_str) c_time = time.time() - start_time if c_time > 86400: #more than a day c_time_str = '%d days, %d hours, %d minutes, and %.2f seconds.'%(int(c_time)/86400, (int(c_time)/3600)%24, (int(c_time)/60)%60, c_time%60) elif c_time > 3600: #more than an hr c_time_str = '%d hours, %d minutes, and %.2f seconds.'%((int(c_time)/3600)%24, (int(c_time)/60)%60, c_time%60) elif c_time > 60: #more than a min c_time_str = '%d minutes and %.2f seconds.'%((int(c_time)/60)%60, c_time%60) else: c_time_str = '%.2f seconds.'%(c_time) print 'Completed generating all %d sequences in %s'%(num_seqs_to_generate, c_time_str) outfile.close() else: #print to stdout for i in range(num_seqs_to_generate): ntseq, aaseq, V_in, J_in = seq_gen.gen_rnd_prod_CDR3(conserved_J_residues) if seq_type == 'all': #default, include both ntseq and aaseq current_line_out = ntseq + delimiter + aaseq elif seq_type == 'ntseq': #only record ntseq current_line_out = ntseq elif seq_type == 'aaseq': #only record aaseq current_line_out = aaseq if record_genes: current_line_out += delimiter + V_gene_names[V_in] + delimiter + J_gene_names[J_in] print current_line_out
python
def main(): """ Generate sequences.""" parser = OptionParser(conflict_handler="resolve") parser.add_option('--humanTRA', '--human_T_alpha', action='store_true', dest='humanTRA', default=False, help='use default human TRA model (T cell alpha chain)') parser.add_option('--humanTRB', '--human_T_beta', action='store_true', dest='humanTRB', default=False, help='use default human TRB model (T cell beta chain)') parser.add_option('--mouseTRB', '--mouse_T_beta', action='store_true', dest='mouseTRB', default=False, help='use default mouse TRB model (T cell beta chain)') parser.add_option('--humanIGH', '--human_B_heavy', action='store_true', dest='humanIGH', default=False, help='use default human IGH model (B cell heavy chain)') parser.add_option('--VDJ_model_folder', dest='vdj_model_folder', metavar='PATH/TO/FOLDER/', help='specify PATH/TO/FOLDER/ for a custom VDJ generative model') parser.add_option('--VJ_model_folder', dest='vj_model_folder', metavar='PATH/TO/FOLDER/', help='specify PATH/TO/FOLDER/ for a custom VJ generative model') parser.add_option('-o', '--outfile', dest = 'outfile_name', metavar='PATH/TO/FILE', help='write CDR3 sequences to PATH/TO/FILE') parser.add_option('-n', '--num_seqs', type='float', metavar='N', default = 0, dest='num_seqs_to_generate', help='specify the number of sequences to generate.') parser.add_option('--seed', type='int', dest='seed', help='set seed for pseudorandom number generator. Default is to not set a seed.') parser.add_option('--seqs_per_time_update', type='float', default = 100000, dest='seqs_per_time_update', help='specify the number of sequences between time updates. Default is 1e5') parser.add_option('--conserved_J_residues', type='string', default = 'FVW', dest='conserved_J_residues', help="specify conserved J residues. Default is 'FVW'.") parser.add_option('--time_updates_off', action='store_false', dest='time_updates', default=True, help='turn time updates off.') parser.add_option('--seq_type', type='choice', default = 'all', dest='seq_type', choices=['all', 'ntseq', 'nucleotide', 'aaseq', 'amino_acid'], help="declare sequence type for output sequences. Choices: 'all' [default], 'ntseq', 'nucleotide', 'aaseq', 'amino_acid'") parser.add_option('--record_genes_off', action='store_false', dest="record_genes", default=True, help='turn off recording V and J gene info.') parser.add_option('-d', '--delimiter', type='choice', dest='delimiter', choices=['tab', 'space', ',', ';', ':'], help="declare delimiter choice. Default is tab for .tsv output files, comma for .csv files, and tab for all others. Choices: 'tab', 'space', ',', ';', ':'") parser.add_option('--raw_delimiter', type='str', dest='delimiter', help="declare delimiter choice as a raw string.") (options, args) = parser.parse_args() main_folder = os.path.dirname(__file__) default_models = {} default_models['humanTRA'] = [os.path.join(main_folder, 'default_models', 'human_T_alpha'), 'VJ'] default_models['humanTRB'] = [os.path.join(main_folder, 'default_models', 'human_T_beta'), 'VDJ'] default_models['mouseTRB'] = [os.path.join(main_folder, 'default_models', 'mouse_T_beta'), 'VDJ'] default_models['humanIGH'] = [os.path.join(main_folder, 'default_models', 'human_B_heavy'), 'VDJ'] num_models_specified = sum([1 for x in default_models.keys() + ['vj_model_folder', 'vdj_model_folder'] if getattr(options, x)]) if num_models_specified == 1: #exactly one model specified try: d_model = [x for x in default_models.keys() if getattr(options, x)][0] model_folder = default_models[d_model][0] recomb_type = default_models[d_model][1] except IndexError: if options.vdj_model_folder: #custom VDJ model specified model_folder = options.vdj_model_folder recomb_type = 'VDJ' elif options.vj_model_folder: #custom VJ model specified model_folder = options.vj_model_folder recomb_type = 'VJ' elif num_models_specified == 0: print 'Need to indicate generative model.' print 'Exiting...' return -1 elif num_models_specified > 1: print 'Only specify one model' print 'Exiting...' return -1 #Check that all model and genomic files exist in the indicated model folder if not os.path.isdir(model_folder): print 'Check pathing... cannot find the model folder: ' + model_folder print 'Exiting...' return -1 params_file_name = os.path.join(model_folder,'model_params.txt') marginals_file_name = os.path.join(model_folder,'model_marginals.txt') V_anchor_pos_file = os.path.join(model_folder,'V_gene_CDR3_anchors.csv') J_anchor_pos_file = os.path.join(model_folder,'J_gene_CDR3_anchors.csv') for x in [params_file_name, marginals_file_name, V_anchor_pos_file, J_anchor_pos_file]: if not os.path.isfile(x): print 'Cannot find: ' + x print 'Please check the files (and naming conventions) in the model folder ' + model_folder print 'Exiting...' return -1 if options.outfile_name is not None: outfile_name = options.outfile_name if os.path.isfile(outfile_name): if not raw_input(outfile_name + ' already exists. Overwrite (y/n)? ').strip().lower() in ['y', 'yes']: print 'Exiting...' return -1 #Parse arguments num_seqs_to_generate = int(options.num_seqs_to_generate) if num_seqs_to_generate <= 0: print 'Need to specify num_seqs (number of sequences to generate).' print 'Exiting...' return -1 #Parse default delimiter delimiter = options.delimiter if delimiter is None: delimiter = '\t' if options.outfile_name is not None: if outfile_name.endswith('.tsv'): delimiter = '\t' elif outfile_name.endswith('.csv'): delimiter = ',' else: try: delimiter = {'tab': '\t', 'space': ' ', ',': ',', ';': ';', ':': ':'}[delimiter] except KeyError: pass #Other raw string. #Optional flags seq_type = {'all': 'all', 'ntseq': 'ntseq', 'nucleotide': 'ntseq', 'aaseq': 'aaseq', 'amino_acid': 'aaseq'}[options.seq_type] record_genes = options.record_genes seqs_per_time_update = int(options.seqs_per_time_update) time_updates = options.time_updates conserved_J_residues = options.conserved_J_residues if options.seed is not None: np.random.seed(options.seed) #VDJ recomb case --- used for TCRB and IGH if recomb_type == 'VDJ': genomic_data = load_model.GenomicDataVDJ() genomic_data.load_igor_genomic_data(params_file_name, V_anchor_pos_file, J_anchor_pos_file) generative_model = load_model.GenerativeModelVDJ() generative_model.load_and_process_igor_model(marginals_file_name) seq_gen = sequence_generation.SequenceGenerationVDJ(generative_model, genomic_data) #VJ recomb case --- used for TCRA and light chain elif recomb_type == 'VJ': genomic_data = load_model.GenomicDataVJ() genomic_data.load_igor_genomic_data(params_file_name, V_anchor_pos_file, J_anchor_pos_file) generative_model = load_model.GenerativeModelVJ() generative_model.load_and_process_igor_model(marginals_file_name) seq_gen = sequence_generation.SequenceGenerationVJ(generative_model, genomic_data) V_gene_names = [V[0].split('*')[0] for V in genomic_data.genV] J_gene_names = [J[0].split('*')[0] for J in genomic_data.genJ] if options.outfile_name is not None: outfile = open(outfile_name, 'w') print 'Starting sequence generation... ' start_time = time.time() for i in range(num_seqs_to_generate): ntseq, aaseq, V_in, J_in = seq_gen.gen_rnd_prod_CDR3(conserved_J_residues) if seq_type == 'all': #default, include both ntseq and aaseq current_line_out = ntseq + delimiter + aaseq elif seq_type == 'ntseq': #only record ntseq current_line_out = ntseq elif seq_type == 'aaseq': #only record aaseq current_line_out = aaseq if record_genes: current_line_out += delimiter + V_gene_names[V_in] + delimiter + J_gene_names[J_in] outfile.write(current_line_out + '\n') if (i+1)%seqs_per_time_update == 0 and time_updates: c_time = time.time() - start_time eta = ((num_seqs_to_generate - (i+1))/float(i+1))*c_time if c_time > 86400: #more than a day c_time_str = '%d days, %d hours, %d minutes, and %.2f seconds.'%(int(c_time)/86400, (int(c_time)/3600)%24, (int(c_time)/60)%60, c_time%60) elif c_time > 3600: #more than an hr c_time_str = '%d hours, %d minutes, and %.2f seconds.'%((int(c_time)/3600)%24, (int(c_time)/60)%60, c_time%60) elif c_time > 60: #more than a min c_time_str = '%d minutes and %.2f seconds.'%((int(c_time)/60)%60, c_time%60) else: c_time_str = '%.2f seconds.'%(c_time) if eta > 86400: #more than a day eta_str = '%d days, %d hours, %d minutes, and %.2f seconds.'%(int(eta)/86400, (int(eta)/3600)%24, (int(eta)/60)%60, eta%60) elif eta > 3600: #more than an hr eta_str = '%d hours, %d minutes, and %.2f seconds.'%((int(eta)/3600)%24, (int(eta)/60)%60, eta%60) elif eta > 60: #more than a min eta_str = '%d minutes and %.2f seconds.'%((int(eta)/60)%60, eta%60) else: eta_str = '%.2f seconds.'%(eta) print '%d sequences generated in %s Estimated time remaining: %s'%(i+1, c_time_str, eta_str) c_time = time.time() - start_time if c_time > 86400: #more than a day c_time_str = '%d days, %d hours, %d minutes, and %.2f seconds.'%(int(c_time)/86400, (int(c_time)/3600)%24, (int(c_time)/60)%60, c_time%60) elif c_time > 3600: #more than an hr c_time_str = '%d hours, %d minutes, and %.2f seconds.'%((int(c_time)/3600)%24, (int(c_time)/60)%60, c_time%60) elif c_time > 60: #more than a min c_time_str = '%d minutes and %.2f seconds.'%((int(c_time)/60)%60, c_time%60) else: c_time_str = '%.2f seconds.'%(c_time) print 'Completed generating all %d sequences in %s'%(num_seqs_to_generate, c_time_str) outfile.close() else: #print to stdout for i in range(num_seqs_to_generate): ntseq, aaseq, V_in, J_in = seq_gen.gen_rnd_prod_CDR3(conserved_J_residues) if seq_type == 'all': #default, include both ntseq and aaseq current_line_out = ntseq + delimiter + aaseq elif seq_type == 'ntseq': #only record ntseq current_line_out = ntseq elif seq_type == 'aaseq': #only record aaseq current_line_out = aaseq if record_genes: current_line_out += delimiter + V_gene_names[V_in] + delimiter + J_gene_names[J_in] print current_line_out
[ "def", "main", "(", ")", ":", "parser", "=", "OptionParser", "(", "conflict_handler", "=", "\"resolve\"", ")", "parser", ".", "add_option", "(", "'--humanTRA'", ",", "'--human_T_alpha'", ",", "action", "=", "'store_true'", ",", "dest", "=", "'humanTRA'", ",", "default", "=", "False", ",", "help", "=", "'use default human TRA model (T cell alpha chain)'", ")", "parser", ".", "add_option", "(", "'--humanTRB'", ",", "'--human_T_beta'", ",", "action", "=", "'store_true'", ",", "dest", "=", "'humanTRB'", ",", "default", "=", "False", ",", "help", "=", "'use default human TRB model (T cell beta chain)'", ")", "parser", ".", "add_option", "(", "'--mouseTRB'", ",", "'--mouse_T_beta'", ",", "action", "=", "'store_true'", ",", "dest", "=", "'mouseTRB'", ",", "default", "=", "False", ",", "help", "=", "'use default mouse TRB model (T cell beta chain)'", ")", "parser", ".", "add_option", "(", "'--humanIGH'", ",", "'--human_B_heavy'", ",", "action", "=", "'store_true'", ",", "dest", "=", "'humanIGH'", ",", "default", "=", "False", ",", "help", "=", "'use default human IGH model (B cell heavy chain)'", ")", "parser", ".", "add_option", "(", "'--VDJ_model_folder'", ",", "dest", "=", "'vdj_model_folder'", ",", "metavar", "=", "'PATH/TO/FOLDER/'", ",", "help", "=", "'specify PATH/TO/FOLDER/ for a custom VDJ generative model'", ")", "parser", ".", "add_option", "(", "'--VJ_model_folder'", ",", "dest", "=", "'vj_model_folder'", ",", "metavar", "=", "'PATH/TO/FOLDER/'", ",", "help", "=", "'specify PATH/TO/FOLDER/ for a custom VJ generative model'", ")", "parser", ".", "add_option", "(", "'-o'", ",", "'--outfile'", ",", "dest", "=", "'outfile_name'", ",", "metavar", "=", "'PATH/TO/FILE'", ",", "help", "=", "'write CDR3 sequences to PATH/TO/FILE'", ")", "parser", ".", "add_option", "(", "'-n'", ",", "'--num_seqs'", ",", "type", "=", "'float'", ",", "metavar", "=", "'N'", ",", "default", "=", "0", ",", "dest", "=", "'num_seqs_to_generate'", ",", "help", "=", "'specify the number of sequences to generate.'", ")", "parser", ".", "add_option", "(", "'--seed'", ",", "type", "=", "'int'", ",", "dest", "=", "'seed'", ",", "help", "=", "'set seed for pseudorandom number generator. Default is to not set a seed.'", ")", "parser", ".", "add_option", "(", "'--seqs_per_time_update'", ",", "type", "=", "'float'", ",", "default", "=", "100000", ",", "dest", "=", "'seqs_per_time_update'", ",", "help", "=", "'specify the number of sequences between time updates. Default is 1e5'", ")", "parser", ".", "add_option", "(", "'--conserved_J_residues'", ",", "type", "=", "'string'", ",", "default", "=", "'FVW'", ",", "dest", "=", "'conserved_J_residues'", ",", "help", "=", "\"specify conserved J residues. Default is 'FVW'.\"", ")", "parser", ".", "add_option", "(", "'--time_updates_off'", ",", "action", "=", "'store_false'", ",", "dest", "=", "'time_updates'", ",", "default", "=", "True", ",", "help", "=", "'turn time updates off.'", ")", "parser", ".", "add_option", "(", "'--seq_type'", ",", "type", "=", "'choice'", ",", "default", "=", "'all'", ",", "dest", "=", "'seq_type'", ",", "choices", "=", "[", "'all'", ",", "'ntseq'", ",", "'nucleotide'", ",", "'aaseq'", ",", "'amino_acid'", "]", ",", "help", "=", "\"declare sequence type for output sequences. Choices: 'all' [default], 'ntseq', 'nucleotide', 'aaseq', 'amino_acid'\"", ")", "parser", ".", "add_option", "(", "'--record_genes_off'", ",", "action", "=", "'store_false'", ",", "dest", "=", "\"record_genes\"", ",", "default", "=", "True", ",", "help", "=", "'turn off recording V and J gene info.'", ")", "parser", ".", "add_option", "(", "'-d'", ",", "'--delimiter'", ",", "type", "=", "'choice'", ",", "dest", "=", "'delimiter'", ",", "choices", "=", "[", "'tab'", ",", "'space'", ",", "','", ",", "';'", ",", "':'", "]", ",", "help", "=", "\"declare delimiter choice. Default is tab for .tsv output files, comma for .csv files, and tab for all others. Choices: 'tab', 'space', ',', ';', ':'\"", ")", "parser", ".", "add_option", "(", "'--raw_delimiter'", ",", "type", "=", "'str'", ",", "dest", "=", "'delimiter'", ",", "help", "=", "\"declare delimiter choice as a raw string.\"", ")", "(", "options", ",", "args", ")", "=", "parser", ".", "parse_args", "(", ")", "main_folder", "=", "os", ".", "path", ".", "dirname", "(", "__file__", ")", "default_models", "=", "{", "}", "default_models", "[", "'humanTRA'", "]", "=", "[", "os", ".", "path", ".", "join", "(", "main_folder", ",", "'default_models'", ",", "'human_T_alpha'", ")", ",", "'VJ'", "]", "default_models", "[", "'humanTRB'", "]", "=", "[", "os", ".", "path", ".", "join", "(", "main_folder", ",", "'default_models'", ",", "'human_T_beta'", ")", ",", "'VDJ'", "]", "default_models", "[", "'mouseTRB'", "]", "=", "[", "os", ".", "path", ".", "join", "(", "main_folder", ",", "'default_models'", ",", "'mouse_T_beta'", ")", ",", "'VDJ'", "]", "default_models", "[", "'humanIGH'", "]", "=", "[", "os", ".", "path", ".", "join", "(", "main_folder", ",", "'default_models'", ",", "'human_B_heavy'", ")", ",", "'VDJ'", "]", "num_models_specified", "=", "sum", "(", "[", "1", "for", "x", "in", "default_models", ".", "keys", "(", ")", "+", "[", "'vj_model_folder'", ",", "'vdj_model_folder'", "]", "if", "getattr", "(", "options", ",", "x", ")", "]", ")", "if", "num_models_specified", "==", "1", ":", "#exactly one model specified", "try", ":", "d_model", "=", "[", "x", "for", "x", "in", "default_models", ".", "keys", "(", ")", "if", "getattr", "(", "options", ",", "x", ")", "]", "[", "0", "]", "model_folder", "=", "default_models", "[", "d_model", "]", "[", "0", "]", "recomb_type", "=", "default_models", "[", "d_model", "]", "[", "1", "]", "except", "IndexError", ":", "if", "options", ".", "vdj_model_folder", ":", "#custom VDJ model specified", "model_folder", "=", "options", ".", "vdj_model_folder", "recomb_type", "=", "'VDJ'", "elif", "options", ".", "vj_model_folder", ":", "#custom VJ model specified", "model_folder", "=", "options", ".", "vj_model_folder", "recomb_type", "=", "'VJ'", "elif", "num_models_specified", "==", "0", ":", "print", "'Need to indicate generative model.'", "print", "'Exiting...'", "return", "-", "1", "elif", "num_models_specified", ">", "1", ":", "print", "'Only specify one model'", "print", "'Exiting...'", "return", "-", "1", "#Check that all model and genomic files exist in the indicated model folder", "if", "not", "os", ".", "path", ".", "isdir", "(", "model_folder", ")", ":", "print", "'Check pathing... cannot find the model folder: '", "+", "model_folder", "print", "'Exiting...'", "return", "-", "1", "params_file_name", "=", "os", ".", "path", ".", "join", "(", "model_folder", ",", "'model_params.txt'", ")", "marginals_file_name", "=", "os", ".", "path", ".", "join", "(", "model_folder", ",", "'model_marginals.txt'", ")", "V_anchor_pos_file", "=", "os", ".", "path", ".", "join", "(", "model_folder", ",", "'V_gene_CDR3_anchors.csv'", ")", "J_anchor_pos_file", "=", "os", ".", "path", ".", "join", "(", "model_folder", ",", "'J_gene_CDR3_anchors.csv'", ")", "for", "x", "in", "[", "params_file_name", ",", "marginals_file_name", ",", "V_anchor_pos_file", ",", "J_anchor_pos_file", "]", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "x", ")", ":", "print", "'Cannot find: '", "+", "x", "print", "'Please check the files (and naming conventions) in the model folder '", "+", "model_folder", "print", "'Exiting...'", "return", "-", "1", "if", "options", ".", "outfile_name", "is", "not", "None", ":", "outfile_name", "=", "options", ".", "outfile_name", "if", "os", ".", "path", ".", "isfile", "(", "outfile_name", ")", ":", "if", "not", "raw_input", "(", "outfile_name", "+", "' already exists. Overwrite (y/n)? '", ")", ".", "strip", "(", ")", ".", "lower", "(", ")", "in", "[", "'y'", ",", "'yes'", "]", ":", "print", "'Exiting...'", "return", "-", "1", "#Parse arguments", "num_seqs_to_generate", "=", "int", "(", "options", ".", "num_seqs_to_generate", ")", "if", "num_seqs_to_generate", "<=", "0", ":", "print", "'Need to specify num_seqs (number of sequences to generate).'", "print", "'Exiting...'", "return", "-", "1", "#Parse default delimiter", "delimiter", "=", "options", ".", "delimiter", "if", "delimiter", "is", "None", ":", "delimiter", "=", "'\\t'", "if", "options", ".", "outfile_name", "is", "not", "None", ":", "if", "outfile_name", ".", "endswith", "(", "'.tsv'", ")", ":", "delimiter", "=", "'\\t'", "elif", "outfile_name", ".", "endswith", "(", "'.csv'", ")", ":", "delimiter", "=", "','", "else", ":", "try", ":", "delimiter", "=", "{", "'tab'", ":", "'\\t'", ",", "'space'", ":", "' '", ",", "','", ":", "','", ",", "';'", ":", "';'", ",", "':'", ":", "':'", "}", "[", "delimiter", "]", "except", "KeyError", ":", "pass", "#Other raw string.", "#Optional flags", "seq_type", "=", "{", "'all'", ":", "'all'", ",", "'ntseq'", ":", "'ntseq'", ",", "'nucleotide'", ":", "'ntseq'", ",", "'aaseq'", ":", "'aaseq'", ",", "'amino_acid'", ":", "'aaseq'", "}", "[", "options", ".", "seq_type", "]", "record_genes", "=", "options", ".", "record_genes", "seqs_per_time_update", "=", "int", "(", "options", ".", "seqs_per_time_update", ")", "time_updates", "=", "options", ".", "time_updates", "conserved_J_residues", "=", "options", ".", "conserved_J_residues", "if", "options", ".", "seed", "is", "not", "None", ":", "np", ".", "random", ".", "seed", "(", "options", ".", "seed", ")", "#VDJ recomb case --- used for TCRB and IGH", "if", "recomb_type", "==", "'VDJ'", ":", "genomic_data", "=", "load_model", ".", "GenomicDataVDJ", "(", ")", "genomic_data", ".", "load_igor_genomic_data", "(", "params_file_name", ",", "V_anchor_pos_file", ",", "J_anchor_pos_file", ")", "generative_model", "=", "load_model", ".", "GenerativeModelVDJ", "(", ")", "generative_model", ".", "load_and_process_igor_model", "(", "marginals_file_name", ")", "seq_gen", "=", "sequence_generation", ".", "SequenceGenerationVDJ", "(", "generative_model", ",", "genomic_data", ")", "#VJ recomb case --- used for TCRA and light chain", "elif", "recomb_type", "==", "'VJ'", ":", "genomic_data", "=", "load_model", ".", "GenomicDataVJ", "(", ")", "genomic_data", ".", "load_igor_genomic_data", "(", "params_file_name", ",", "V_anchor_pos_file", ",", "J_anchor_pos_file", ")", "generative_model", "=", "load_model", ".", "GenerativeModelVJ", "(", ")", "generative_model", ".", "load_and_process_igor_model", "(", "marginals_file_name", ")", "seq_gen", "=", "sequence_generation", ".", "SequenceGenerationVJ", "(", "generative_model", ",", "genomic_data", ")", "V_gene_names", "=", "[", "V", "[", "0", "]", ".", "split", "(", "'*'", ")", "[", "0", "]", "for", "V", "in", "genomic_data", ".", "genV", "]", "J_gene_names", "=", "[", "J", "[", "0", "]", ".", "split", "(", "'*'", ")", "[", "0", "]", "for", "J", "in", "genomic_data", ".", "genJ", "]", "if", "options", ".", "outfile_name", "is", "not", "None", ":", "outfile", "=", "open", "(", "outfile_name", ",", "'w'", ")", "print", "'Starting sequence generation... '", "start_time", "=", "time", ".", "time", "(", ")", "for", "i", "in", "range", "(", "num_seqs_to_generate", ")", ":", "ntseq", ",", "aaseq", ",", "V_in", ",", "J_in", "=", "seq_gen", ".", "gen_rnd_prod_CDR3", "(", "conserved_J_residues", ")", "if", "seq_type", "==", "'all'", ":", "#default, include both ntseq and aaseq", "current_line_out", "=", "ntseq", "+", "delimiter", "+", "aaseq", "elif", "seq_type", "==", "'ntseq'", ":", "#only record ntseq", "current_line_out", "=", "ntseq", "elif", "seq_type", "==", "'aaseq'", ":", "#only record aaseq", "current_line_out", "=", "aaseq", "if", "record_genes", ":", "current_line_out", "+=", "delimiter", "+", "V_gene_names", "[", "V_in", "]", "+", "delimiter", "+", "J_gene_names", "[", "J_in", "]", "outfile", ".", "write", "(", "current_line_out", "+", "'\\n'", ")", "if", "(", "i", "+", "1", ")", "%", "seqs_per_time_update", "==", "0", "and", "time_updates", ":", "c_time", "=", "time", ".", "time", "(", ")", "-", "start_time", "eta", "=", "(", "(", "num_seqs_to_generate", "-", "(", "i", "+", "1", ")", ")", "/", "float", "(", "i", "+", "1", ")", ")", "*", "c_time", "if", "c_time", ">", "86400", ":", "#more than a day", "c_time_str", "=", "'%d days, %d hours, %d minutes, and %.2f seconds.'", "%", "(", "int", "(", "c_time", ")", "/", "86400", ",", "(", "int", "(", "c_time", ")", "/", "3600", ")", "%", "24", ",", "(", "int", "(", "c_time", ")", "/", "60", ")", "%", "60", ",", "c_time", "%", "60", ")", "elif", "c_time", ">", "3600", ":", "#more than an hr", "c_time_str", "=", "'%d hours, %d minutes, and %.2f seconds.'", "%", "(", "(", "int", "(", "c_time", ")", "/", "3600", ")", "%", "24", ",", "(", "int", "(", "c_time", ")", "/", "60", ")", "%", "60", ",", "c_time", "%", "60", ")", "elif", "c_time", ">", "60", ":", "#more than a min", "c_time_str", "=", "'%d minutes and %.2f seconds.'", "%", "(", "(", "int", "(", "c_time", ")", "/", "60", ")", "%", "60", ",", "c_time", "%", "60", ")", "else", ":", "c_time_str", "=", "'%.2f seconds.'", "%", "(", "c_time", ")", "if", "eta", ">", "86400", ":", "#more than a day", "eta_str", "=", "'%d days, %d hours, %d minutes, and %.2f seconds.'", "%", "(", "int", "(", "eta", ")", "/", "86400", ",", "(", "int", "(", "eta", ")", "/", "3600", ")", "%", "24", ",", "(", "int", "(", "eta", ")", "/", "60", ")", "%", "60", ",", "eta", "%", "60", ")", "elif", "eta", ">", "3600", ":", "#more than an hr", "eta_str", "=", "'%d hours, %d minutes, and %.2f seconds.'", "%", "(", "(", "int", "(", "eta", ")", "/", "3600", ")", "%", "24", ",", "(", "int", "(", "eta", ")", "/", "60", ")", "%", "60", ",", "eta", "%", "60", ")", "elif", "eta", ">", "60", ":", "#more than a min", "eta_str", "=", "'%d minutes and %.2f seconds.'", "%", "(", "(", "int", "(", "eta", ")", "/", "60", ")", "%", "60", ",", "eta", "%", "60", ")", "else", ":", "eta_str", "=", "'%.2f seconds.'", "%", "(", "eta", ")", "print", "'%d sequences generated in %s Estimated time remaining: %s'", "%", "(", "i", "+", "1", ",", "c_time_str", ",", "eta_str", ")", "c_time", "=", "time", ".", "time", "(", ")", "-", "start_time", "if", "c_time", ">", "86400", ":", "#more than a day", "c_time_str", "=", "'%d days, %d hours, %d minutes, and %.2f seconds.'", "%", "(", "int", "(", "c_time", ")", "/", "86400", ",", "(", "int", "(", "c_time", ")", "/", "3600", ")", "%", "24", ",", "(", "int", "(", "c_time", ")", "/", "60", ")", "%", "60", ",", "c_time", "%", "60", ")", "elif", "c_time", ">", "3600", ":", "#more than an hr", "c_time_str", "=", "'%d hours, %d minutes, and %.2f seconds.'", "%", "(", "(", "int", "(", "c_time", ")", "/", "3600", ")", "%", "24", ",", "(", "int", "(", "c_time", ")", "/", "60", ")", "%", "60", ",", "c_time", "%", "60", ")", "elif", "c_time", ">", "60", ":", "#more than a min", "c_time_str", "=", "'%d minutes and %.2f seconds.'", "%", "(", "(", "int", "(", "c_time", ")", "/", "60", ")", "%", "60", ",", "c_time", "%", "60", ")", "else", ":", "c_time_str", "=", "'%.2f seconds.'", "%", "(", "c_time", ")", "print", "'Completed generating all %d sequences in %s'", "%", "(", "num_seqs_to_generate", ",", "c_time_str", ")", "outfile", ".", "close", "(", ")", "else", ":", "#print to stdout", "for", "i", "in", "range", "(", "num_seqs_to_generate", ")", ":", "ntseq", ",", "aaseq", ",", "V_in", ",", "J_in", "=", "seq_gen", ".", "gen_rnd_prod_CDR3", "(", "conserved_J_residues", ")", "if", "seq_type", "==", "'all'", ":", "#default, include both ntseq and aaseq", "current_line_out", "=", "ntseq", "+", "delimiter", "+", "aaseq", "elif", "seq_type", "==", "'ntseq'", ":", "#only record ntseq", "current_line_out", "=", "ntseq", "elif", "seq_type", "==", "'aaseq'", ":", "#only record aaseq", "current_line_out", "=", "aaseq", "if", "record_genes", ":", "current_line_out", "+=", "delimiter", "+", "V_gene_names", "[", "V_in", "]", "+", "delimiter", "+", "J_gene_names", "[", "J_in", "]", "print", "current_line_out" ]
Generate sequences.
[ "Generate", "sequences", "." ]
train
https://github.com/zsethna/OLGA/blob/e825c333f0f9a4eb02132e0bcf86f0dca9123114/olga/generate_sequences.py#L144-L345
napalm-automation-community/napalm-panos
napalm_panos/panos.py
PANOSDriver._send_merge_commands
def _send_merge_commands(self, config, file_config): """ Netmiko is being used to push set commands. """ if self.loaded is False: if self._save_backup() is False: raise MergeConfigException('Error while storing backup ' 'config.') if self.ssh_connection is False: self._open_ssh() if file_config: if isinstance(config, str): config = config.splitlines() else: if isinstance(config, str): config = str(config).split() self.ssh_device.send_config_set(config) self.loaded = True self.merge_config = True
python
def _send_merge_commands(self, config, file_config): """ Netmiko is being used to push set commands. """ if self.loaded is False: if self._save_backup() is False: raise MergeConfigException('Error while storing backup ' 'config.') if self.ssh_connection is False: self._open_ssh() if file_config: if isinstance(config, str): config = config.splitlines() else: if isinstance(config, str): config = str(config).split() self.ssh_device.send_config_set(config) self.loaded = True self.merge_config = True
[ "def", "_send_merge_commands", "(", "self", ",", "config", ",", "file_config", ")", ":", "if", "self", ".", "loaded", "is", "False", ":", "if", "self", ".", "_save_backup", "(", ")", "is", "False", ":", "raise", "MergeConfigException", "(", "'Error while storing backup '", "'config.'", ")", "if", "self", ".", "ssh_connection", "is", "False", ":", "self", ".", "_open_ssh", "(", ")", "if", "file_config", ":", "if", "isinstance", "(", "config", ",", "str", ")", ":", "config", "=", "config", ".", "splitlines", "(", ")", "else", ":", "if", "isinstance", "(", "config", ",", "str", ")", ":", "config", "=", "str", "(", "config", ")", ".", "split", "(", ")", "self", ".", "ssh_device", ".", "send_config_set", "(", "config", ")", "self", ".", "loaded", "=", "True", "self", ".", "merge_config", "=", "True" ]
Netmiko is being used to push set commands.
[ "Netmiko", "is", "being", "used", "to", "push", "set", "commands", "." ]
train
https://github.com/napalm-automation-community/napalm-panos/blob/9210a81a7a4a47c724d169031414a0743de4b035/napalm_panos/panos.py#L212-L232
napalm-automation-community/napalm-panos
napalm_panos/panos.py
PANOSDriver.compare_config
def compare_config(self): """ Netmiko is being used to obtain config diffs because pan-python doesn't support the needed command. """ if self.ssh_connection is False: self._open_ssh() self.ssh_device.exit_config_mode() diff = self.ssh_device.send_command("show config diff") return diff.strip()
python
def compare_config(self): """ Netmiko is being used to obtain config diffs because pan-python doesn't support the needed command. """ if self.ssh_connection is False: self._open_ssh() self.ssh_device.exit_config_mode() diff = self.ssh_device.send_command("show config diff") return diff.strip()
[ "def", "compare_config", "(", "self", ")", ":", "if", "self", ".", "ssh_connection", "is", "False", ":", "self", ".", "_open_ssh", "(", ")", "self", ".", "ssh_device", ".", "exit_config_mode", "(", ")", "diff", "=", "self", ".", "ssh_device", ".", "send_command", "(", "\"show config diff\"", ")", "return", "diff", ".", "strip", "(", ")" ]
Netmiko is being used to obtain config diffs because pan-python doesn't support the needed command.
[ "Netmiko", "is", "being", "used", "to", "obtain", "config", "diffs", "because", "pan", "-", "python", "doesn", "t", "support", "the", "needed", "command", "." ]
train
https://github.com/napalm-automation-community/napalm-panos/blob/9210a81a7a4a47c724d169031414a0743de4b035/napalm_panos/panos.py#L280-L290
napalm-automation-community/napalm-panos
napalm_panos/panos.py
PANOSDriver.commit_config
def commit_config(self): """ Netmiko is being used to commit the configuration because it takes a better care of results compared to pan-python. """ if self.loaded: if self.ssh_connection is False: self._open_ssh() try: self.ssh_device.commit() time.sleep(3) self.loaded = False self.changed = True except: # noqa if self.merge_config: raise MergeConfigException('Error while commiting config') else: raise ReplaceConfigException('Error while commiting config') else: raise ReplaceConfigException('No config loaded.')
python
def commit_config(self): """ Netmiko is being used to commit the configuration because it takes a better care of results compared to pan-python. """ if self.loaded: if self.ssh_connection is False: self._open_ssh() try: self.ssh_device.commit() time.sleep(3) self.loaded = False self.changed = True except: # noqa if self.merge_config: raise MergeConfigException('Error while commiting config') else: raise ReplaceConfigException('Error while commiting config') else: raise ReplaceConfigException('No config loaded.')
[ "def", "commit_config", "(", "self", ")", ":", "if", "self", ".", "loaded", ":", "if", "self", ".", "ssh_connection", "is", "False", ":", "self", ".", "_open_ssh", "(", ")", "try", ":", "self", ".", "ssh_device", ".", "commit", "(", ")", "time", ".", "sleep", "(", "3", ")", "self", ".", "loaded", "=", "False", "self", ".", "changed", "=", "True", "except", ":", "# noqa", "if", "self", ".", "merge_config", ":", "raise", "MergeConfigException", "(", "'Error while commiting config'", ")", "else", ":", "raise", "ReplaceConfigException", "(", "'Error while commiting config'", ")", "else", ":", "raise", "ReplaceConfigException", "(", "'No config loaded.'", ")" ]
Netmiko is being used to commit the configuration because it takes a better care of results compared to pan-python.
[ "Netmiko", "is", "being", "used", "to", "commit", "the", "configuration", "because", "it", "takes", "a", "better", "care", "of", "results", "compared", "to", "pan", "-", "python", "." ]
train
https://github.com/napalm-automation-community/napalm-panos/blob/9210a81a7a4a47c724d169031414a0743de4b035/napalm_panos/panos.py#L302-L321
napalm-automation-community/napalm-panos
napalm_panos/panos.py
PANOSDriver.rollback
def rollback(self): """ Netmiko is being used to commit the rollback configuration because it takes a better care of results compared to pan-python. """ if self.changed: rollback_cmd = '<load><config><from>{0}</from></config></load>'.format(self.backup_file) self.device.op(cmd=rollback_cmd) time.sleep(5) if self.ssh_connection is False: self._open_ssh() try: self.ssh_device.commit() self.loaded = False self.changed = False self.merge_config = False except: # noqa ReplaceConfigException("Error while loading backup config")
python
def rollback(self): """ Netmiko is being used to commit the rollback configuration because it takes a better care of results compared to pan-python. """ if self.changed: rollback_cmd = '<load><config><from>{0}</from></config></load>'.format(self.backup_file) self.device.op(cmd=rollback_cmd) time.sleep(5) if self.ssh_connection is False: self._open_ssh() try: self.ssh_device.commit() self.loaded = False self.changed = False self.merge_config = False except: # noqa ReplaceConfigException("Error while loading backup config")
[ "def", "rollback", "(", "self", ")", ":", "if", "self", ".", "changed", ":", "rollback_cmd", "=", "'<load><config><from>{0}</from></config></load>'", ".", "format", "(", "self", ".", "backup_file", ")", "self", ".", "device", ".", "op", "(", "cmd", "=", "rollback_cmd", ")", "time", ".", "sleep", "(", "5", ")", "if", "self", ".", "ssh_connection", "is", "False", ":", "self", ".", "_open_ssh", "(", ")", "try", ":", "self", ".", "ssh_device", ".", "commit", "(", ")", "self", ".", "loaded", "=", "False", "self", ".", "changed", "=", "False", "self", ".", "merge_config", "=", "False", "except", ":", "# noqa", "ReplaceConfigException", "(", "\"Error while loading backup config\"", ")" ]
Netmiko is being used to commit the rollback configuration because it takes a better care of results compared to pan-python.
[ "Netmiko", "is", "being", "used", "to", "commit", "the", "rollback", "configuration", "because", "it", "takes", "a", "better", "care", "of", "results", "compared", "to", "pan", "-", "python", "." ]
train
https://github.com/napalm-automation-community/napalm-panos/blob/9210a81a7a4a47c724d169031414a0743de4b035/napalm_panos/panos.py#L334-L352
napalm-automation-community/napalm-panos
napalm_panos/panos.py
PANOSDriver.get_lldp_neighbors
def get_lldp_neighbors(self): """Return LLDP neighbors details.""" neighbors = {} cmd = '<show><lldp><neighbors>all</neighbors></lldp></show>' try: self.device.op(cmd=cmd) lldp_table_xml = xmltodict.parse(self.device.xml_root()) lldp_table_json = json.dumps(lldp_table_xml['response']['result']['entry']) lldp_table = json.loads(lldp_table_json) except AttributeError: lldp_table = [] for lldp_item in lldp_table: local_int = lldp_item['@name'] if local_int not in neighbors.keys(): neighbors[local_int] = [] try: lldp_neighs = lldp_item.get('neighbors').get('entry') except AttributeError: lldp_neighs = '' if isinstance(lldp_neighs, dict): lldp_neighs = [lldp_neighs] for neighbor in lldp_neighs: n = {} n['hostname'] = neighbor['system-name'] n['port'] = neighbor['port-id'] neighbors[local_int].append(n) return neighbors
python
def get_lldp_neighbors(self): """Return LLDP neighbors details.""" neighbors = {} cmd = '<show><lldp><neighbors>all</neighbors></lldp></show>' try: self.device.op(cmd=cmd) lldp_table_xml = xmltodict.parse(self.device.xml_root()) lldp_table_json = json.dumps(lldp_table_xml['response']['result']['entry']) lldp_table = json.loads(lldp_table_json) except AttributeError: lldp_table = [] for lldp_item in lldp_table: local_int = lldp_item['@name'] if local_int not in neighbors.keys(): neighbors[local_int] = [] try: lldp_neighs = lldp_item.get('neighbors').get('entry') except AttributeError: lldp_neighs = '' if isinstance(lldp_neighs, dict): lldp_neighs = [lldp_neighs] for neighbor in lldp_neighs: n = {} n['hostname'] = neighbor['system-name'] n['port'] = neighbor['port-id'] neighbors[local_int].append(n) return neighbors
[ "def", "get_lldp_neighbors", "(", "self", ")", ":", "neighbors", "=", "{", "}", "cmd", "=", "'<show><lldp><neighbors>all</neighbors></lldp></show>'", "try", ":", "self", ".", "device", ".", "op", "(", "cmd", "=", "cmd", ")", "lldp_table_xml", "=", "xmltodict", ".", "parse", "(", "self", ".", "device", ".", "xml_root", "(", ")", ")", "lldp_table_json", "=", "json", ".", "dumps", "(", "lldp_table_xml", "[", "'response'", "]", "[", "'result'", "]", "[", "'entry'", "]", ")", "lldp_table", "=", "json", ".", "loads", "(", "lldp_table_json", ")", "except", "AttributeError", ":", "lldp_table", "=", "[", "]", "for", "lldp_item", "in", "lldp_table", ":", "local_int", "=", "lldp_item", "[", "'@name'", "]", "if", "local_int", "not", "in", "neighbors", ".", "keys", "(", ")", ":", "neighbors", "[", "local_int", "]", "=", "[", "]", "try", ":", "lldp_neighs", "=", "lldp_item", ".", "get", "(", "'neighbors'", ")", ".", "get", "(", "'entry'", ")", "except", "AttributeError", ":", "lldp_neighs", "=", "''", "if", "isinstance", "(", "lldp_neighs", ",", "dict", ")", ":", "lldp_neighs", "=", "[", "lldp_neighs", "]", "for", "neighbor", "in", "lldp_neighs", ":", "n", "=", "{", "}", "n", "[", "'hostname'", "]", "=", "neighbor", "[", "'system-name'", "]", "n", "[", "'port'", "]", "=", "neighbor", "[", "'port-id'", "]", "neighbors", "[", "local_int", "]", ".", "append", "(", "n", ")", "return", "neighbors" ]
Return LLDP neighbors details.
[ "Return", "LLDP", "neighbors", "details", "." ]
train
https://github.com/napalm-automation-community/napalm-panos/blob/9210a81a7a4a47c724d169031414a0743de4b035/napalm_panos/panos.py#L398-L430
napalm-automation-community/napalm-panos
napalm_panos/panos.py
PANOSDriver.get_route_to
def get_route_to(self, destination='', protocol=''): """Return route details to a specific destination, learned from a certain protocol.""" # Note, it should be possible to query the FIB: # "<show><routing><fib></fib></routing></show>" # To add informations to this getter routes = {} if destination: destination = "<destination>{0}</destination>".format(destination) if protocol: protocol = "<type>{0}</type>".format(protocol) cmd = "<show><routing><route>{0}{1}</route></routing></show>".format(protocol, destination) try: self.device.op(cmd=cmd) routes_table_xml = xmltodict.parse(self.device.xml_root()) routes_table_json = json.dumps(routes_table_xml['response']['result']['entry']) routes_table = json.loads(routes_table_json) except (AttributeError, KeyError): routes_table = [] if isinstance(routes_table, dict): routes_table = [routes_table] for route in routes_table: d = { 'current_active': False, 'last_active': False, 'age': -1, 'next_hop': u'', 'protocol': u'', 'outgoing_interface': u'', 'preference': -1, 'inactive_reason': u'', 'routing_table': u'default', 'selected_next_hop': False, 'protocol_attributes': {} } destination = route['destination'] flags = route['flags'] if 'A' in flags: d['current_active'] = True else: d['current_active'] = False if 'C' in flags: d['protocol'] = "connect" if 'S' in flags: d['protocol'] = "static" if 'R' in flags: d['protocol'] = "rip" if 'R' in flags: d['protocol'] = "rip" if 'O' in flags: d['protocol'] = "ospf" if 'B' in flags: d['protocol'] = "bgp" if 'H' in flags: d['protocol'] = "host" if route['age'] is not None: d['age'] = int(route['age']) if route['nexthop'] is not None: d['next_hop'] = route['nexthop'] if route['interface'] is not None: d['outgoing_interface'] = route['interface'] if route['metric'] is not None: d['preference'] = int(route['metric']) if route['virtual-router'] is not None: d['routing_table'] = route['virtual-router'] if destination not in routes.keys(): routes[destination] = [] routes[destination].append(d) return routes
python
def get_route_to(self, destination='', protocol=''): """Return route details to a specific destination, learned from a certain protocol.""" # Note, it should be possible to query the FIB: # "<show><routing><fib></fib></routing></show>" # To add informations to this getter routes = {} if destination: destination = "<destination>{0}</destination>".format(destination) if protocol: protocol = "<type>{0}</type>".format(protocol) cmd = "<show><routing><route>{0}{1}</route></routing></show>".format(protocol, destination) try: self.device.op(cmd=cmd) routes_table_xml = xmltodict.parse(self.device.xml_root()) routes_table_json = json.dumps(routes_table_xml['response']['result']['entry']) routes_table = json.loads(routes_table_json) except (AttributeError, KeyError): routes_table = [] if isinstance(routes_table, dict): routes_table = [routes_table] for route in routes_table: d = { 'current_active': False, 'last_active': False, 'age': -1, 'next_hop': u'', 'protocol': u'', 'outgoing_interface': u'', 'preference': -1, 'inactive_reason': u'', 'routing_table': u'default', 'selected_next_hop': False, 'protocol_attributes': {} } destination = route['destination'] flags = route['flags'] if 'A' in flags: d['current_active'] = True else: d['current_active'] = False if 'C' in flags: d['protocol'] = "connect" if 'S' in flags: d['protocol'] = "static" if 'R' in flags: d['protocol'] = "rip" if 'R' in flags: d['protocol'] = "rip" if 'O' in flags: d['protocol'] = "ospf" if 'B' in flags: d['protocol'] = "bgp" if 'H' in flags: d['protocol'] = "host" if route['age'] is not None: d['age'] = int(route['age']) if route['nexthop'] is not None: d['next_hop'] = route['nexthop'] if route['interface'] is not None: d['outgoing_interface'] = route['interface'] if route['metric'] is not None: d['preference'] = int(route['metric']) if route['virtual-router'] is not None: d['routing_table'] = route['virtual-router'] if destination not in routes.keys(): routes[destination] = [] routes[destination].append(d) return routes
[ "def", "get_route_to", "(", "self", ",", "destination", "=", "''", ",", "protocol", "=", "''", ")", ":", "# Note, it should be possible to query the FIB:", "# \"<show><routing><fib></fib></routing></show>\"", "# To add informations to this getter", "routes", "=", "{", "}", "if", "destination", ":", "destination", "=", "\"<destination>{0}</destination>\"", ".", "format", "(", "destination", ")", "if", "protocol", ":", "protocol", "=", "\"<type>{0}</type>\"", ".", "format", "(", "protocol", ")", "cmd", "=", "\"<show><routing><route>{0}{1}</route></routing></show>\"", ".", "format", "(", "protocol", ",", "destination", ")", "try", ":", "self", ".", "device", ".", "op", "(", "cmd", "=", "cmd", ")", "routes_table_xml", "=", "xmltodict", ".", "parse", "(", "self", ".", "device", ".", "xml_root", "(", ")", ")", "routes_table_json", "=", "json", ".", "dumps", "(", "routes_table_xml", "[", "'response'", "]", "[", "'result'", "]", "[", "'entry'", "]", ")", "routes_table", "=", "json", ".", "loads", "(", "routes_table_json", ")", "except", "(", "AttributeError", ",", "KeyError", ")", ":", "routes_table", "=", "[", "]", "if", "isinstance", "(", "routes_table", ",", "dict", ")", ":", "routes_table", "=", "[", "routes_table", "]", "for", "route", "in", "routes_table", ":", "d", "=", "{", "'current_active'", ":", "False", ",", "'last_active'", ":", "False", ",", "'age'", ":", "-", "1", ",", "'next_hop'", ":", "u''", ",", "'protocol'", ":", "u''", ",", "'outgoing_interface'", ":", "u''", ",", "'preference'", ":", "-", "1", ",", "'inactive_reason'", ":", "u''", ",", "'routing_table'", ":", "u'default'", ",", "'selected_next_hop'", ":", "False", ",", "'protocol_attributes'", ":", "{", "}", "}", "destination", "=", "route", "[", "'destination'", "]", "flags", "=", "route", "[", "'flags'", "]", "if", "'A'", "in", "flags", ":", "d", "[", "'current_active'", "]", "=", "True", "else", ":", "d", "[", "'current_active'", "]", "=", "False", "if", "'C'", "in", "flags", ":", "d", "[", "'protocol'", "]", "=", "\"connect\"", "if", "'S'", "in", "flags", ":", "d", "[", "'protocol'", "]", "=", "\"static\"", "if", "'R'", "in", "flags", ":", "d", "[", "'protocol'", "]", "=", "\"rip\"", "if", "'R'", "in", "flags", ":", "d", "[", "'protocol'", "]", "=", "\"rip\"", "if", "'O'", "in", "flags", ":", "d", "[", "'protocol'", "]", "=", "\"ospf\"", "if", "'B'", "in", "flags", ":", "d", "[", "'protocol'", "]", "=", "\"bgp\"", "if", "'H'", "in", "flags", ":", "d", "[", "'protocol'", "]", "=", "\"host\"", "if", "route", "[", "'age'", "]", "is", "not", "None", ":", "d", "[", "'age'", "]", "=", "int", "(", "route", "[", "'age'", "]", ")", "if", "route", "[", "'nexthop'", "]", "is", "not", "None", ":", "d", "[", "'next_hop'", "]", "=", "route", "[", "'nexthop'", "]", "if", "route", "[", "'interface'", "]", "is", "not", "None", ":", "d", "[", "'outgoing_interface'", "]", "=", "route", "[", "'interface'", "]", "if", "route", "[", "'metric'", "]", "is", "not", "None", ":", "d", "[", "'preference'", "]", "=", "int", "(", "route", "[", "'metric'", "]", ")", "if", "route", "[", "'virtual-router'", "]", "is", "not", "None", ":", "d", "[", "'routing_table'", "]", "=", "route", "[", "'virtual-router'", "]", "if", "destination", "not", "in", "routes", ".", "keys", "(", ")", ":", "routes", "[", "destination", "]", "=", "[", "]", "routes", "[", "destination", "]", ".", "append", "(", "d", ")", "return", "routes" ]
Return route details to a specific destination, learned from a certain protocol.
[ "Return", "route", "details", "to", "a", "specific", "destination", "learned", "from", "a", "certain", "protocol", "." ]
train
https://github.com/napalm-automation-community/napalm-panos/blob/9210a81a7a4a47c724d169031414a0743de4b035/napalm_panos/panos.py#L432-L507
napalm-automation-community/napalm-panos
napalm_panos/panos.py
PANOSDriver.get_interfaces_ip
def get_interfaces_ip(self): '''Return IP interface data.''' def extract_ip_info(parsed_intf_dict): ''' IPv4: - Primary IP is in the '<ip>' tag. If no v4 is configured the return value is 'N/A'. - Secondary IP's are in '<addr>'. If no secondaries, this field is not returned by the xmltodict.parse() method. IPv6: - All addresses are returned in '<addr6>'. If no v6 configured, this is not returned either by xmltodict.parse(). Example of XML response for an intf with multiple IPv4 and IPv6 addresses: <response status="success"> <result> <ifnet> <entry> <name>ethernet1/5</name> <zone/> <fwd>N/A</fwd> <vsys>1</vsys> <dyn-addr/> <addr6> <member>fe80::d61d:71ff:fed8:fe14/64</member> <member>2001::1234/120</member> </addr6> <tag>0</tag> <ip>169.254.0.1/30</ip> <id>20</id> <addr> <member>1.1.1.1/28</member> </addr> </entry> {...} </ifnet> <hw> {...} </hw> </result> </response> ''' intf = parsed_intf_dict['name'] _ip_info = {intf: {}} v4_ip = parsed_intf_dict.get('ip') secondary_v4_ip = parsed_intf_dict.get('addr') v6_ip = parsed_intf_dict.get('addr6') if v4_ip != 'N/A': address, pref = v4_ip.split('/') _ip_info[intf].setdefault('ipv4', {})[address] = {'prefix_length': int(pref)} if secondary_v4_ip is not None: members = secondary_v4_ip['member'] if not isinstance(members, list): # If only 1 secondary IP is present, xmltodict converts field to a string, else # it converts it to a list of strings. members = [members] for address in members: address, pref = address.split('/') _ip_info[intf].setdefault('ipv4', {})[address] = {'prefix_length': int(pref)} if v6_ip is not None: members = v6_ip['member'] if not isinstance(members, list): # Same "1 vs many -> string vs list of strings" comment. members = [members] for address in members: address, pref = address.split('/') _ip_info[intf].setdefault('ipv6', {})[address] = {'prefix_length': int(pref)} # Reset dictionary if no addresses were found. if _ip_info == {intf: {}}: _ip_info = {} return _ip_info ip_interfaces = {} cmd = "<show><interface>all</interface></show>" self.device.op(cmd=cmd) interface_info_xml = xmltodict.parse(self.device.xml_root()) interface_info_json = json.dumps( interface_info_xml['response']['result']['ifnet']['entry'] ) interface_info = json.loads(interface_info_json) if isinstance(interface_info, dict): # Same "1 vs many -> dict vs list of dicts" comment. interface_info = [interface_info] for interface_dict in interface_info: ip_info = extract_ip_info(interface_dict) if ip_info: ip_interfaces.update(ip_info) return ip_interfaces
python
def get_interfaces_ip(self): '''Return IP interface data.''' def extract_ip_info(parsed_intf_dict): ''' IPv4: - Primary IP is in the '<ip>' tag. If no v4 is configured the return value is 'N/A'. - Secondary IP's are in '<addr>'. If no secondaries, this field is not returned by the xmltodict.parse() method. IPv6: - All addresses are returned in '<addr6>'. If no v6 configured, this is not returned either by xmltodict.parse(). Example of XML response for an intf with multiple IPv4 and IPv6 addresses: <response status="success"> <result> <ifnet> <entry> <name>ethernet1/5</name> <zone/> <fwd>N/A</fwd> <vsys>1</vsys> <dyn-addr/> <addr6> <member>fe80::d61d:71ff:fed8:fe14/64</member> <member>2001::1234/120</member> </addr6> <tag>0</tag> <ip>169.254.0.1/30</ip> <id>20</id> <addr> <member>1.1.1.1/28</member> </addr> </entry> {...} </ifnet> <hw> {...} </hw> </result> </response> ''' intf = parsed_intf_dict['name'] _ip_info = {intf: {}} v4_ip = parsed_intf_dict.get('ip') secondary_v4_ip = parsed_intf_dict.get('addr') v6_ip = parsed_intf_dict.get('addr6') if v4_ip != 'N/A': address, pref = v4_ip.split('/') _ip_info[intf].setdefault('ipv4', {})[address] = {'prefix_length': int(pref)} if secondary_v4_ip is not None: members = secondary_v4_ip['member'] if not isinstance(members, list): # If only 1 secondary IP is present, xmltodict converts field to a string, else # it converts it to a list of strings. members = [members] for address in members: address, pref = address.split('/') _ip_info[intf].setdefault('ipv4', {})[address] = {'prefix_length': int(pref)} if v6_ip is not None: members = v6_ip['member'] if not isinstance(members, list): # Same "1 vs many -> string vs list of strings" comment. members = [members] for address in members: address, pref = address.split('/') _ip_info[intf].setdefault('ipv6', {})[address] = {'prefix_length': int(pref)} # Reset dictionary if no addresses were found. if _ip_info == {intf: {}}: _ip_info = {} return _ip_info ip_interfaces = {} cmd = "<show><interface>all</interface></show>" self.device.op(cmd=cmd) interface_info_xml = xmltodict.parse(self.device.xml_root()) interface_info_json = json.dumps( interface_info_xml['response']['result']['ifnet']['entry'] ) interface_info = json.loads(interface_info_json) if isinstance(interface_info, dict): # Same "1 vs many -> dict vs list of dicts" comment. interface_info = [interface_info] for interface_dict in interface_info: ip_info = extract_ip_info(interface_dict) if ip_info: ip_interfaces.update(ip_info) return ip_interfaces
[ "def", "get_interfaces_ip", "(", "self", ")", ":", "def", "extract_ip_info", "(", "parsed_intf_dict", ")", ":", "'''\n IPv4:\n - Primary IP is in the '<ip>' tag. If no v4 is configured the return value is 'N/A'.\n - Secondary IP's are in '<addr>'. If no secondaries, this field is not returned by\n the xmltodict.parse() method.\n\n IPv6:\n - All addresses are returned in '<addr6>'. If no v6 configured, this is not returned\n either by xmltodict.parse().\n\n Example of XML response for an intf with multiple IPv4 and IPv6 addresses:\n\n <response status=\"success\">\n <result>\n <ifnet>\n <entry>\n <name>ethernet1/5</name>\n <zone/>\n <fwd>N/A</fwd>\n <vsys>1</vsys>\n <dyn-addr/>\n <addr6>\n <member>fe80::d61d:71ff:fed8:fe14/64</member>\n <member>2001::1234/120</member>\n </addr6>\n <tag>0</tag>\n <ip>169.254.0.1/30</ip>\n <id>20</id>\n <addr>\n <member>1.1.1.1/28</member>\n </addr>\n </entry>\n {...}\n </ifnet>\n <hw>\n {...}\n </hw>\n </result>\n </response>\n '''", "intf", "=", "parsed_intf_dict", "[", "'name'", "]", "_ip_info", "=", "{", "intf", ":", "{", "}", "}", "v4_ip", "=", "parsed_intf_dict", ".", "get", "(", "'ip'", ")", "secondary_v4_ip", "=", "parsed_intf_dict", ".", "get", "(", "'addr'", ")", "v6_ip", "=", "parsed_intf_dict", ".", "get", "(", "'addr6'", ")", "if", "v4_ip", "!=", "'N/A'", ":", "address", ",", "pref", "=", "v4_ip", ".", "split", "(", "'/'", ")", "_ip_info", "[", "intf", "]", ".", "setdefault", "(", "'ipv4'", ",", "{", "}", ")", "[", "address", "]", "=", "{", "'prefix_length'", ":", "int", "(", "pref", ")", "}", "if", "secondary_v4_ip", "is", "not", "None", ":", "members", "=", "secondary_v4_ip", "[", "'member'", "]", "if", "not", "isinstance", "(", "members", ",", "list", ")", ":", "# If only 1 secondary IP is present, xmltodict converts field to a string, else", "# it converts it to a list of strings.", "members", "=", "[", "members", "]", "for", "address", "in", "members", ":", "address", ",", "pref", "=", "address", ".", "split", "(", "'/'", ")", "_ip_info", "[", "intf", "]", ".", "setdefault", "(", "'ipv4'", ",", "{", "}", ")", "[", "address", "]", "=", "{", "'prefix_length'", ":", "int", "(", "pref", ")", "}", "if", "v6_ip", "is", "not", "None", ":", "members", "=", "v6_ip", "[", "'member'", "]", "if", "not", "isinstance", "(", "members", ",", "list", ")", ":", "# Same \"1 vs many -> string vs list of strings\" comment.", "members", "=", "[", "members", "]", "for", "address", "in", "members", ":", "address", ",", "pref", "=", "address", ".", "split", "(", "'/'", ")", "_ip_info", "[", "intf", "]", ".", "setdefault", "(", "'ipv6'", ",", "{", "}", ")", "[", "address", "]", "=", "{", "'prefix_length'", ":", "int", "(", "pref", ")", "}", "# Reset dictionary if no addresses were found.", "if", "_ip_info", "==", "{", "intf", ":", "{", "}", "}", ":", "_ip_info", "=", "{", "}", "return", "_ip_info", "ip_interfaces", "=", "{", "}", "cmd", "=", "\"<show><interface>all</interface></show>\"", "self", ".", "device", ".", "op", "(", "cmd", "=", "cmd", ")", "interface_info_xml", "=", "xmltodict", ".", "parse", "(", "self", ".", "device", ".", "xml_root", "(", ")", ")", "interface_info_json", "=", "json", ".", "dumps", "(", "interface_info_xml", "[", "'response'", "]", "[", "'result'", "]", "[", "'ifnet'", "]", "[", "'entry'", "]", ")", "interface_info", "=", "json", ".", "loads", "(", "interface_info_json", ")", "if", "isinstance", "(", "interface_info", ",", "dict", ")", ":", "# Same \"1 vs many -> dict vs list of dicts\" comment.", "interface_info", "=", "[", "interface_info", "]", "for", "interface_dict", "in", "interface_info", ":", "ip_info", "=", "extract_ip_info", "(", "interface_dict", ")", "if", "ip_info", ":", "ip_interfaces", ".", "update", "(", "ip_info", ")", "return", "ip_interfaces" ]
Return IP interface data.
[ "Return", "IP", "interface", "data", "." ]
train
https://github.com/napalm-automation-community/napalm-panos/blob/9210a81a7a4a47c724d169031414a0743de4b035/napalm_panos/panos.py#L561-L660
timknip/pycsg
csg/core.py
CSG.refine
def refine(self): """ Return a refined CSG. To each polygon, a middle point is added to each edge and to the center of the polygon """ newCSG = CSG() for poly in self.polygons: verts = poly.vertices numVerts = len(verts) if numVerts == 0: continue midPos = reduce(operator.add, [v.pos for v in verts]) / float(numVerts) midNormal = None if verts[0].normal is not None: midNormal = poly.plane.normal midVert = Vertex(midPos, midNormal) newVerts = verts + \ [verts[i].interpolate(verts[(i + 1)%numVerts], 0.5) for i in range(numVerts)] + \ [midVert] i = 0 vs = [newVerts[i], newVerts[i+numVerts], newVerts[2*numVerts], newVerts[2*numVerts-1]] newPoly = Polygon(vs, poly.shared) newPoly.shared = poly.shared newPoly.plane = poly.plane newCSG.polygons.append(newPoly) for i in range(1, numVerts): vs = [newVerts[i], newVerts[numVerts+i], newVerts[2*numVerts], newVerts[numVerts+i-1]] newPoly = Polygon(vs, poly.shared) newCSG.polygons.append(newPoly) return newCSG
python
def refine(self): """ Return a refined CSG. To each polygon, a middle point is added to each edge and to the center of the polygon """ newCSG = CSG() for poly in self.polygons: verts = poly.vertices numVerts = len(verts) if numVerts == 0: continue midPos = reduce(operator.add, [v.pos for v in verts]) / float(numVerts) midNormal = None if verts[0].normal is not None: midNormal = poly.plane.normal midVert = Vertex(midPos, midNormal) newVerts = verts + \ [verts[i].interpolate(verts[(i + 1)%numVerts], 0.5) for i in range(numVerts)] + \ [midVert] i = 0 vs = [newVerts[i], newVerts[i+numVerts], newVerts[2*numVerts], newVerts[2*numVerts-1]] newPoly = Polygon(vs, poly.shared) newPoly.shared = poly.shared newPoly.plane = poly.plane newCSG.polygons.append(newPoly) for i in range(1, numVerts): vs = [newVerts[i], newVerts[numVerts+i], newVerts[2*numVerts], newVerts[numVerts+i-1]] newPoly = Polygon(vs, poly.shared) newCSG.polygons.append(newPoly) return newCSG
[ "def", "refine", "(", "self", ")", ":", "newCSG", "=", "CSG", "(", ")", "for", "poly", "in", "self", ".", "polygons", ":", "verts", "=", "poly", ".", "vertices", "numVerts", "=", "len", "(", "verts", ")", "if", "numVerts", "==", "0", ":", "continue", "midPos", "=", "reduce", "(", "operator", ".", "add", ",", "[", "v", ".", "pos", "for", "v", "in", "verts", "]", ")", "/", "float", "(", "numVerts", ")", "midNormal", "=", "None", "if", "verts", "[", "0", "]", ".", "normal", "is", "not", "None", ":", "midNormal", "=", "poly", ".", "plane", ".", "normal", "midVert", "=", "Vertex", "(", "midPos", ",", "midNormal", ")", "newVerts", "=", "verts", "+", "[", "verts", "[", "i", "]", ".", "interpolate", "(", "verts", "[", "(", "i", "+", "1", ")", "%", "numVerts", "]", ",", "0.5", ")", "for", "i", "in", "range", "(", "numVerts", ")", "]", "+", "[", "midVert", "]", "i", "=", "0", "vs", "=", "[", "newVerts", "[", "i", "]", ",", "newVerts", "[", "i", "+", "numVerts", "]", ",", "newVerts", "[", "2", "*", "numVerts", "]", ",", "newVerts", "[", "2", "*", "numVerts", "-", "1", "]", "]", "newPoly", "=", "Polygon", "(", "vs", ",", "poly", ".", "shared", ")", "newPoly", ".", "shared", "=", "poly", ".", "shared", "newPoly", ".", "plane", "=", "poly", ".", "plane", "newCSG", ".", "polygons", ".", "append", "(", "newPoly", ")", "for", "i", "in", "range", "(", "1", ",", "numVerts", ")", ":", "vs", "=", "[", "newVerts", "[", "i", "]", ",", "newVerts", "[", "numVerts", "+", "i", "]", ",", "newVerts", "[", "2", "*", "numVerts", "]", ",", "newVerts", "[", "numVerts", "+", "i", "-", "1", "]", "]", "newPoly", "=", "Polygon", "(", "vs", ",", "poly", ".", "shared", ")", "newCSG", ".", "polygons", ".", "append", "(", "newPoly", ")", "return", "newCSG" ]
Return a refined CSG. To each polygon, a middle point is added to each edge and to the center of the polygon
[ "Return", "a", "refined", "CSG", ".", "To", "each", "polygon", "a", "middle", "point", "is", "added", "to", "each", "edge", "and", "to", "the", "center", "of", "the", "polygon" ]
train
https://github.com/timknip/pycsg/blob/b8f9710fd15c38dcc275d56a2108f604af38dcc8/csg/core.py#L75-L111
timknip/pycsg
csg/core.py
CSG.translate
def translate(self, disp): """ Translate Geometry. disp: displacement (array of floats) """ d = Vector(disp[0], disp[1], disp[2]) for poly in self.polygons: for v in poly.vertices: v.pos = v.pos.plus(d)
python
def translate(self, disp): """ Translate Geometry. disp: displacement (array of floats) """ d = Vector(disp[0], disp[1], disp[2]) for poly in self.polygons: for v in poly.vertices: v.pos = v.pos.plus(d)
[ "def", "translate", "(", "self", ",", "disp", ")", ":", "d", "=", "Vector", "(", "disp", "[", "0", "]", ",", "disp", "[", "1", "]", ",", "disp", "[", "2", "]", ")", "for", "poly", "in", "self", ".", "polygons", ":", "for", "v", "in", "poly", ".", "vertices", ":", "v", ".", "pos", "=", "v", ".", "pos", ".", "plus", "(", "d", ")" ]
Translate Geometry. disp: displacement (array of floats)
[ "Translate", "Geometry", ".", "disp", ":", "displacement", "(", "array", "of", "floats", ")" ]
train
https://github.com/timknip/pycsg/blob/b8f9710fd15c38dcc275d56a2108f604af38dcc8/csg/core.py#L113-L121
timknip/pycsg
csg/core.py
CSG.rotate
def rotate(self, axis, angleDeg): """ Rotate geometry. axis: axis of rotation (array of floats) angleDeg: rotation angle in degrees """ ax = Vector(axis[0], axis[1], axis[2]).unit() cosAngle = math.cos(math.pi * angleDeg / 180.) sinAngle = math.sin(math.pi * angleDeg / 180.) def newVector(v): vA = v.dot(ax) vPerp = v.minus(ax.times(vA)) vPerpLen = vPerp.length() if vPerpLen == 0: # vector is parallel to axis, no need to rotate return v u1 = vPerp.unit() u2 = u1.cross(ax) vCosA = vPerpLen*cosAngle vSinA = vPerpLen*sinAngle return ax.times(vA).plus(u1.times(vCosA).plus(u2.times(vSinA))) for poly in self.polygons: for vert in poly.vertices: vert.pos = newVector(vert.pos) normal = vert.normal if normal.length() > 0: vert.normal = newVector(vert.normal)
python
def rotate(self, axis, angleDeg): """ Rotate geometry. axis: axis of rotation (array of floats) angleDeg: rotation angle in degrees """ ax = Vector(axis[0], axis[1], axis[2]).unit() cosAngle = math.cos(math.pi * angleDeg / 180.) sinAngle = math.sin(math.pi * angleDeg / 180.) def newVector(v): vA = v.dot(ax) vPerp = v.minus(ax.times(vA)) vPerpLen = vPerp.length() if vPerpLen == 0: # vector is parallel to axis, no need to rotate return v u1 = vPerp.unit() u2 = u1.cross(ax) vCosA = vPerpLen*cosAngle vSinA = vPerpLen*sinAngle return ax.times(vA).plus(u1.times(vCosA).plus(u2.times(vSinA))) for poly in self.polygons: for vert in poly.vertices: vert.pos = newVector(vert.pos) normal = vert.normal if normal.length() > 0: vert.normal = newVector(vert.normal)
[ "def", "rotate", "(", "self", ",", "axis", ",", "angleDeg", ")", ":", "ax", "=", "Vector", "(", "axis", "[", "0", "]", ",", "axis", "[", "1", "]", ",", "axis", "[", "2", "]", ")", ".", "unit", "(", ")", "cosAngle", "=", "math", ".", "cos", "(", "math", ".", "pi", "*", "angleDeg", "/", "180.", ")", "sinAngle", "=", "math", ".", "sin", "(", "math", ".", "pi", "*", "angleDeg", "/", "180.", ")", "def", "newVector", "(", "v", ")", ":", "vA", "=", "v", ".", "dot", "(", "ax", ")", "vPerp", "=", "v", ".", "minus", "(", "ax", ".", "times", "(", "vA", ")", ")", "vPerpLen", "=", "vPerp", ".", "length", "(", ")", "if", "vPerpLen", "==", "0", ":", "# vector is parallel to axis, no need to rotate", "return", "v", "u1", "=", "vPerp", ".", "unit", "(", ")", "u2", "=", "u1", ".", "cross", "(", "ax", ")", "vCosA", "=", "vPerpLen", "*", "cosAngle", "vSinA", "=", "vPerpLen", "*", "sinAngle", "return", "ax", ".", "times", "(", "vA", ")", ".", "plus", "(", "u1", ".", "times", "(", "vCosA", ")", ".", "plus", "(", "u2", ".", "times", "(", "vSinA", ")", ")", ")", "for", "poly", "in", "self", ".", "polygons", ":", "for", "vert", "in", "poly", ".", "vertices", ":", "vert", ".", "pos", "=", "newVector", "(", "vert", ".", "pos", ")", "normal", "=", "vert", ".", "normal", "if", "normal", ".", "length", "(", ")", ">", "0", ":", "vert", ".", "normal", "=", "newVector", "(", "vert", ".", "normal", ")" ]
Rotate geometry. axis: axis of rotation (array of floats) angleDeg: rotation angle in degrees
[ "Rotate", "geometry", ".", "axis", ":", "axis", "of", "rotation", "(", "array", "of", "floats", ")", "angleDeg", ":", "rotation", "angle", "in", "degrees" ]
train
https://github.com/timknip/pycsg/blob/b8f9710fd15c38dcc275d56a2108f604af38dcc8/csg/core.py#L124-L152
timknip/pycsg
csg/core.py
CSG.toVerticesAndPolygons
def toVerticesAndPolygons(self): """ Return list of vertices, polygons (cells), and the total number of vertex indices in the polygon connectivity list (count). """ offset = 1.234567890 verts = [] polys = [] vertexIndexMap = {} count = 0 for poly in self.polygons: verts = poly.vertices cell = [] for v in poly.vertices: p = v.pos # use string key to remove degeneracy associated # very close points. The format %.10e ensures that # points differing in the 11 digits and higher are # treated as the same. For instance 1.2e-10 and # 1.3e-10 are essentially the same. vKey = '%.10e,%.10e,%.10e' % (p[0] + offset, p[1] + offset, p[2] + offset) if not vKey in vertexIndexMap: vertexIndexMap[vKey] = len(vertexIndexMap) index = vertexIndexMap[vKey] cell.append(index) count += 1 polys.append(cell) # sort by index sortedVertexIndex = sorted(vertexIndexMap.items(), key=operator.itemgetter(1)) verts = [] for v, i in sortedVertexIndex: p = [] for c in v.split(','): p.append(float(c) - offset) verts.append(tuple(p)) return verts, polys, count
python
def toVerticesAndPolygons(self): """ Return list of vertices, polygons (cells), and the total number of vertex indices in the polygon connectivity list (count). """ offset = 1.234567890 verts = [] polys = [] vertexIndexMap = {} count = 0 for poly in self.polygons: verts = poly.vertices cell = [] for v in poly.vertices: p = v.pos # use string key to remove degeneracy associated # very close points. The format %.10e ensures that # points differing in the 11 digits and higher are # treated as the same. For instance 1.2e-10 and # 1.3e-10 are essentially the same. vKey = '%.10e,%.10e,%.10e' % (p[0] + offset, p[1] + offset, p[2] + offset) if not vKey in vertexIndexMap: vertexIndexMap[vKey] = len(vertexIndexMap) index = vertexIndexMap[vKey] cell.append(index) count += 1 polys.append(cell) # sort by index sortedVertexIndex = sorted(vertexIndexMap.items(), key=operator.itemgetter(1)) verts = [] for v, i in sortedVertexIndex: p = [] for c in v.split(','): p.append(float(c) - offset) verts.append(tuple(p)) return verts, polys, count
[ "def", "toVerticesAndPolygons", "(", "self", ")", ":", "offset", "=", "1.234567890", "verts", "=", "[", "]", "polys", "=", "[", "]", "vertexIndexMap", "=", "{", "}", "count", "=", "0", "for", "poly", "in", "self", ".", "polygons", ":", "verts", "=", "poly", ".", "vertices", "cell", "=", "[", "]", "for", "v", "in", "poly", ".", "vertices", ":", "p", "=", "v", ".", "pos", "# use string key to remove degeneracy associated", "# very close points. The format %.10e ensures that", "# points differing in the 11 digits and higher are ", "# treated as the same. For instance 1.2e-10 and ", "# 1.3e-10 are essentially the same.", "vKey", "=", "'%.10e,%.10e,%.10e'", "%", "(", "p", "[", "0", "]", "+", "offset", ",", "p", "[", "1", "]", "+", "offset", ",", "p", "[", "2", "]", "+", "offset", ")", "if", "not", "vKey", "in", "vertexIndexMap", ":", "vertexIndexMap", "[", "vKey", "]", "=", "len", "(", "vertexIndexMap", ")", "index", "=", "vertexIndexMap", "[", "vKey", "]", "cell", ".", "append", "(", "index", ")", "count", "+=", "1", "polys", ".", "append", "(", "cell", ")", "# sort by index", "sortedVertexIndex", "=", "sorted", "(", "vertexIndexMap", ".", "items", "(", ")", ",", "key", "=", "operator", ".", "itemgetter", "(", "1", ")", ")", "verts", "=", "[", "]", "for", "v", ",", "i", "in", "sortedVertexIndex", ":", "p", "=", "[", "]", "for", "c", "in", "v", ".", "split", "(", "','", ")", ":", "p", ".", "append", "(", "float", "(", "c", ")", "-", "offset", ")", "verts", ".", "append", "(", "tuple", "(", "p", ")", ")", "return", "verts", ",", "polys", ",", "count" ]
Return list of vertices, polygons (cells), and the total number of vertex indices in the polygon connectivity list (count).
[ "Return", "list", "of", "vertices", "polygons", "(", "cells", ")", "and", "the", "total", "number", "of", "vertex", "indices", "in", "the", "polygon", "connectivity", "list", "(", "count", ")", "." ]
train
https://github.com/timknip/pycsg/blob/b8f9710fd15c38dcc275d56a2108f604af38dcc8/csg/core.py#L154-L193
timknip/pycsg
csg/core.py
CSG.saveVTK
def saveVTK(self, filename): """ Save polygons in VTK file. """ with open(filename, 'w') as f: f.write('# vtk DataFile Version 3.0\n') f.write('pycsg output\n') f.write('ASCII\n') f.write('DATASET POLYDATA\n') verts, cells, count = self.toVerticesAndPolygons() f.write('POINTS {0} float\n'.format(len(verts))) for v in verts: f.write('{0} {1} {2}\n'.format(v[0], v[1], v[2])) numCells = len(cells) f.write('POLYGONS {0} {1}\n'.format(numCells, count + numCells)) for cell in cells: f.write('{0} '.format(len(cell))) for index in cell: f.write('{0} '.format(index)) f.write('\n')
python
def saveVTK(self, filename): """ Save polygons in VTK file. """ with open(filename, 'w') as f: f.write('# vtk DataFile Version 3.0\n') f.write('pycsg output\n') f.write('ASCII\n') f.write('DATASET POLYDATA\n') verts, cells, count = self.toVerticesAndPolygons() f.write('POINTS {0} float\n'.format(len(verts))) for v in verts: f.write('{0} {1} {2}\n'.format(v[0], v[1], v[2])) numCells = len(cells) f.write('POLYGONS {0} {1}\n'.format(numCells, count + numCells)) for cell in cells: f.write('{0} '.format(len(cell))) for index in cell: f.write('{0} '.format(index)) f.write('\n')
[ "def", "saveVTK", "(", "self", ",", "filename", ")", ":", "with", "open", "(", "filename", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "'# vtk DataFile Version 3.0\\n'", ")", "f", ".", "write", "(", "'pycsg output\\n'", ")", "f", ".", "write", "(", "'ASCII\\n'", ")", "f", ".", "write", "(", "'DATASET POLYDATA\\n'", ")", "verts", ",", "cells", ",", "count", "=", "self", ".", "toVerticesAndPolygons", "(", ")", "f", ".", "write", "(", "'POINTS {0} float\\n'", ".", "format", "(", "len", "(", "verts", ")", ")", ")", "for", "v", "in", "verts", ":", "f", ".", "write", "(", "'{0} {1} {2}\\n'", ".", "format", "(", "v", "[", "0", "]", ",", "v", "[", "1", "]", ",", "v", "[", "2", "]", ")", ")", "numCells", "=", "len", "(", "cells", ")", "f", ".", "write", "(", "'POLYGONS {0} {1}\\n'", ".", "format", "(", "numCells", ",", "count", "+", "numCells", ")", ")", "for", "cell", "in", "cells", ":", "f", ".", "write", "(", "'{0} '", ".", "format", "(", "len", "(", "cell", ")", ")", ")", "for", "index", "in", "cell", ":", "f", ".", "write", "(", "'{0} '", ".", "format", "(", "index", ")", ")", "f", ".", "write", "(", "'\\n'", ")" ]
Save polygons in VTK file.
[ "Save", "polygons", "in", "VTK", "file", "." ]
train
https://github.com/timknip/pycsg/blob/b8f9710fd15c38dcc275d56a2108f604af38dcc8/csg/core.py#L195-L216
timknip/pycsg
csg/core.py
CSG.union
def union(self, csg): """ Return a new CSG solid representing space in either this solid or in the solid `csg`. Neither this solid nor the solid `csg` are modified.:: A.union(B) +-------+ +-------+ | | | | | A | | | | +--+----+ = | +----+ +----+--+ | +----+ | | B | | | | | | | +-------+ +-------+ """ a = BSPNode(self.clone().polygons) b = BSPNode(csg.clone().polygons) a.clipTo(b) b.clipTo(a) b.invert() b.clipTo(a) b.invert() a.build(b.allPolygons()); return CSG.fromPolygons(a.allPolygons())
python
def union(self, csg): """ Return a new CSG solid representing space in either this solid or in the solid `csg`. Neither this solid nor the solid `csg` are modified.:: A.union(B) +-------+ +-------+ | | | | | A | | | | +--+----+ = | +----+ +----+--+ | +----+ | | B | | | | | | | +-------+ +-------+ """ a = BSPNode(self.clone().polygons) b = BSPNode(csg.clone().polygons) a.clipTo(b) b.clipTo(a) b.invert() b.clipTo(a) b.invert() a.build(b.allPolygons()); return CSG.fromPolygons(a.allPolygons())
[ "def", "union", "(", "self", ",", "csg", ")", ":", "a", "=", "BSPNode", "(", "self", ".", "clone", "(", ")", ".", "polygons", ")", "b", "=", "BSPNode", "(", "csg", ".", "clone", "(", ")", ".", "polygons", ")", "a", ".", "clipTo", "(", "b", ")", "b", ".", "clipTo", "(", "a", ")", "b", ".", "invert", "(", ")", "b", ".", "clipTo", "(", "a", ")", "b", ".", "invert", "(", ")", "a", ".", "build", "(", "b", ".", "allPolygons", "(", ")", ")", "return", "CSG", ".", "fromPolygons", "(", "a", ".", "allPolygons", "(", ")", ")" ]
Return a new CSG solid representing space in either this solid or in the solid `csg`. Neither this solid nor the solid `csg` are modified.:: A.union(B) +-------+ +-------+ | | | | | A | | | | +--+----+ = | +----+ +----+--+ | +----+ | | B | | | | | | | +-------+ +-------+
[ "Return", "a", "new", "CSG", "solid", "representing", "space", "in", "either", "this", "solid", "or", "in", "the", "solid", "csg", ".", "Neither", "this", "solid", "nor", "the", "solid", "csg", "are", "modified", ".", "::", "A", ".", "union", "(", "B", ")", "+", "-------", "+", "+", "-------", "+", "|", "|", "|", "|", "|", "A", "|", "|", "|", "|", "+", "--", "+", "----", "+", "=", "|", "+", "----", "+", "+", "----", "+", "--", "+", "|", "+", "----", "+", "|", "|", "B", "|", "|", "|", "|", "|", "|", "|", "+", "-------", "+", "+", "-------", "+" ]
train
https://github.com/timknip/pycsg/blob/b8f9710fd15c38dcc275d56a2108f604af38dcc8/csg/core.py#L218-L242
timknip/pycsg
csg/core.py
CSG.inverse
def inverse(self): """ Return a new CSG solid with solid and empty space switched. This solid is not modified. """ csg = self.clone() map(lambda p: p.flip(), csg.polygons) return csg
python
def inverse(self): """ Return a new CSG solid with solid and empty space switched. This solid is not modified. """ csg = self.clone() map(lambda p: p.flip(), csg.polygons) return csg
[ "def", "inverse", "(", "self", ")", ":", "csg", "=", "self", ".", "clone", "(", ")", "map", "(", "lambda", "p", ":", "p", ".", "flip", "(", ")", ",", "csg", ".", "polygons", ")", "return", "csg" ]
Return a new CSG solid with solid and empty space switched. This solid is not modified.
[ "Return", "a", "new", "CSG", "solid", "with", "solid", "and", "empty", "space", "switched", ".", "This", "solid", "is", "not", "modified", "." ]
train
https://github.com/timknip/pycsg/blob/b8f9710fd15c38dcc275d56a2108f604af38dcc8/csg/core.py#L308-L315
timknip/pycsg
csg/core.py
CSG.cube
def cube(cls, center=[0,0,0], radius=[1,1,1]): """ Construct an axis-aligned solid cuboid. Optional parameters are `center` and `radius`, which default to `[0, 0, 0]` and `[1, 1, 1]`. The radius can be specified using a single number or a list of three numbers, one for each axis. Example code:: cube = CSG.cube( center=[0, 0, 0], radius=1 ) """ c = Vector(0, 0, 0) r = [1, 1, 1] if isinstance(center, list): c = Vector(center) if isinstance(radius, list): r = radius else: r = [radius, radius, radius] polygons = list(map( lambda v: Polygon( list(map(lambda i: Vertex( Vector( c.x + r[0] * (2 * bool(i & 1) - 1), c.y + r[1] * (2 * bool(i & 2) - 1), c.z + r[2] * (2 * bool(i & 4) - 1) ), None ), v[0]))), [ [[0, 4, 6, 2], [-1, 0, 0]], [[1, 3, 7, 5], [+1, 0, 0]], [[0, 1, 5, 4], [0, -1, 0]], [[2, 6, 7, 3], [0, +1, 0]], [[0, 2, 3, 1], [0, 0, -1]], [[4, 5, 7, 6], [0, 0, +1]] ])) return CSG.fromPolygons(polygons)
python
def cube(cls, center=[0,0,0], radius=[1,1,1]): """ Construct an axis-aligned solid cuboid. Optional parameters are `center` and `radius`, which default to `[0, 0, 0]` and `[1, 1, 1]`. The radius can be specified using a single number or a list of three numbers, one for each axis. Example code:: cube = CSG.cube( center=[0, 0, 0], radius=1 ) """ c = Vector(0, 0, 0) r = [1, 1, 1] if isinstance(center, list): c = Vector(center) if isinstance(radius, list): r = radius else: r = [radius, radius, radius] polygons = list(map( lambda v: Polygon( list(map(lambda i: Vertex( Vector( c.x + r[0] * (2 * bool(i & 1) - 1), c.y + r[1] * (2 * bool(i & 2) - 1), c.z + r[2] * (2 * bool(i & 4) - 1) ), None ), v[0]))), [ [[0, 4, 6, 2], [-1, 0, 0]], [[1, 3, 7, 5], [+1, 0, 0]], [[0, 1, 5, 4], [0, -1, 0]], [[2, 6, 7, 3], [0, +1, 0]], [[0, 2, 3, 1], [0, 0, -1]], [[4, 5, 7, 6], [0, 0, +1]] ])) return CSG.fromPolygons(polygons)
[ "def", "cube", "(", "cls", ",", "center", "=", "[", "0", ",", "0", ",", "0", "]", ",", "radius", "=", "[", "1", ",", "1", ",", "1", "]", ")", ":", "c", "=", "Vector", "(", "0", ",", "0", ",", "0", ")", "r", "=", "[", "1", ",", "1", ",", "1", "]", "if", "isinstance", "(", "center", ",", "list", ")", ":", "c", "=", "Vector", "(", "center", ")", "if", "isinstance", "(", "radius", ",", "list", ")", ":", "r", "=", "radius", "else", ":", "r", "=", "[", "radius", ",", "radius", ",", "radius", "]", "polygons", "=", "list", "(", "map", "(", "lambda", "v", ":", "Polygon", "(", "list", "(", "map", "(", "lambda", "i", ":", "Vertex", "(", "Vector", "(", "c", ".", "x", "+", "r", "[", "0", "]", "*", "(", "2", "*", "bool", "(", "i", "&", "1", ")", "-", "1", ")", ",", "c", ".", "y", "+", "r", "[", "1", "]", "*", "(", "2", "*", "bool", "(", "i", "&", "2", ")", "-", "1", ")", ",", "c", ".", "z", "+", "r", "[", "2", "]", "*", "(", "2", "*", "bool", "(", "i", "&", "4", ")", "-", "1", ")", ")", ",", "None", ")", ",", "v", "[", "0", "]", ")", ")", ")", ",", "[", "[", "[", "0", ",", "4", ",", "6", ",", "2", "]", ",", "[", "-", "1", ",", "0", ",", "0", "]", "]", ",", "[", "[", "1", ",", "3", ",", "7", ",", "5", "]", ",", "[", "+", "1", ",", "0", ",", "0", "]", "]", ",", "[", "[", "0", ",", "1", ",", "5", ",", "4", "]", ",", "[", "0", ",", "-", "1", ",", "0", "]", "]", ",", "[", "[", "2", ",", "6", ",", "7", ",", "3", "]", ",", "[", "0", ",", "+", "1", ",", "0", "]", "]", ",", "[", "[", "0", ",", "2", ",", "3", ",", "1", "]", ",", "[", "0", ",", "0", ",", "-", "1", "]", "]", ",", "[", "[", "4", ",", "5", ",", "7", ",", "6", "]", ",", "[", "0", ",", "0", ",", "+", "1", "]", "]", "]", ")", ")", "return", "CSG", ".", "fromPolygons", "(", "polygons", ")" ]
Construct an axis-aligned solid cuboid. Optional parameters are `center` and `radius`, which default to `[0, 0, 0]` and `[1, 1, 1]`. The radius can be specified using a single number or a list of three numbers, one for each axis. Example code:: cube = CSG.cube( center=[0, 0, 0], radius=1 )
[ "Construct", "an", "axis", "-", "aligned", "solid", "cuboid", ".", "Optional", "parameters", "are", "center", "and", "radius", "which", "default", "to", "[", "0", "0", "0", "]", "and", "[", "1", "1", "1", "]", ".", "The", "radius", "can", "be", "specified", "using", "a", "single", "number", "or", "a", "list", "of", "three", "numbers", "one", "for", "each", "axis", ".", "Example", "code", "::", "cube", "=", "CSG", ".", "cube", "(", "center", "=", "[", "0", "0", "0", "]", "radius", "=", "1", ")" ]
train
https://github.com/timknip/pycsg/blob/b8f9710fd15c38dcc275d56a2108f604af38dcc8/csg/core.py#L318-L356
timknip/pycsg
csg/core.py
CSG.sphere
def sphere(cls, **kwargs): """ Returns a sphere. Kwargs: center (list): Center of sphere, default [0, 0, 0]. radius (float): Radius of sphere, default 1.0. slices (int): Number of slices, default 16. stacks (int): Number of stacks, default 8. """ center = kwargs.get('center', [0.0, 0.0, 0.0]) if isinstance(center, float): center = [center, center, center] c = Vector(center) r = kwargs.get('radius', 1.0) if isinstance(r, list) and len(r) > 2: r = r[0] slices = kwargs.get('slices', 16) stacks = kwargs.get('stacks', 8) polygons = [] def appendVertex(vertices, theta, phi): d = Vector( math.cos(theta) * math.sin(phi), math.cos(phi), math.sin(theta) * math.sin(phi)) vertices.append(Vertex(c.plus(d.times(r)), d)) dTheta = math.pi * 2.0 / float(slices) dPhi = math.pi / float(stacks) j0 = 0 j1 = j0 + 1 for i0 in range(0, slices): i1 = i0 + 1 # +--+ # | / # |/ # + vertices = [] appendVertex(vertices, i0 * dTheta, j0 * dPhi) appendVertex(vertices, i1 * dTheta, j1 * dPhi) appendVertex(vertices, i0 * dTheta, j1 * dPhi) polygons.append(Polygon(vertices)) j0 = stacks - 1 j1 = j0 + 1 for i0 in range(0, slices): i1 = i0 + 1 # + # |\ # | \ # +--+ vertices = [] appendVertex(vertices, i0 * dTheta, j0 * dPhi) appendVertex(vertices, i1 * dTheta, j0 * dPhi) appendVertex(vertices, i0 * dTheta, j1 * dPhi) polygons.append(Polygon(vertices)) for j0 in range(1, stacks - 1): j1 = j0 + 0.5 j2 = j0 + 1 for i0 in range(0, slices): i1 = i0 + 0.5 i2 = i0 + 1 # +---+ # |\ /| # | x | # |/ \| # +---+ verticesN = [] appendVertex(verticesN, i1 * dTheta, j1 * dPhi) appendVertex(verticesN, i2 * dTheta, j2 * dPhi) appendVertex(verticesN, i0 * dTheta, j2 * dPhi) polygons.append(Polygon(verticesN)) verticesS = [] appendVertex(verticesS, i1 * dTheta, j1 * dPhi) appendVertex(verticesS, i0 * dTheta, j0 * dPhi) appendVertex(verticesS, i2 * dTheta, j0 * dPhi) polygons.append(Polygon(verticesS)) verticesW = [] appendVertex(verticesW, i1 * dTheta, j1 * dPhi) appendVertex(verticesW, i0 * dTheta, j2 * dPhi) appendVertex(verticesW, i0 * dTheta, j0 * dPhi) polygons.append(Polygon(verticesW)) verticesE = [] appendVertex(verticesE, i1 * dTheta, j1 * dPhi) appendVertex(verticesE, i2 * dTheta, j0 * dPhi) appendVertex(verticesE, i2 * dTheta, j2 * dPhi) polygons.append(Polygon(verticesE)) return CSG.fromPolygons(polygons)
python
def sphere(cls, **kwargs): """ Returns a sphere. Kwargs: center (list): Center of sphere, default [0, 0, 0]. radius (float): Radius of sphere, default 1.0. slices (int): Number of slices, default 16. stacks (int): Number of stacks, default 8. """ center = kwargs.get('center', [0.0, 0.0, 0.0]) if isinstance(center, float): center = [center, center, center] c = Vector(center) r = kwargs.get('radius', 1.0) if isinstance(r, list) and len(r) > 2: r = r[0] slices = kwargs.get('slices', 16) stacks = kwargs.get('stacks', 8) polygons = [] def appendVertex(vertices, theta, phi): d = Vector( math.cos(theta) * math.sin(phi), math.cos(phi), math.sin(theta) * math.sin(phi)) vertices.append(Vertex(c.plus(d.times(r)), d)) dTheta = math.pi * 2.0 / float(slices) dPhi = math.pi / float(stacks) j0 = 0 j1 = j0 + 1 for i0 in range(0, slices): i1 = i0 + 1 # +--+ # | / # |/ # + vertices = [] appendVertex(vertices, i0 * dTheta, j0 * dPhi) appendVertex(vertices, i1 * dTheta, j1 * dPhi) appendVertex(vertices, i0 * dTheta, j1 * dPhi) polygons.append(Polygon(vertices)) j0 = stacks - 1 j1 = j0 + 1 for i0 in range(0, slices): i1 = i0 + 1 # + # |\ # | \ # +--+ vertices = [] appendVertex(vertices, i0 * dTheta, j0 * dPhi) appendVertex(vertices, i1 * dTheta, j0 * dPhi) appendVertex(vertices, i0 * dTheta, j1 * dPhi) polygons.append(Polygon(vertices)) for j0 in range(1, stacks - 1): j1 = j0 + 0.5 j2 = j0 + 1 for i0 in range(0, slices): i1 = i0 + 0.5 i2 = i0 + 1 # +---+ # |\ /| # | x | # |/ \| # +---+ verticesN = [] appendVertex(verticesN, i1 * dTheta, j1 * dPhi) appendVertex(verticesN, i2 * dTheta, j2 * dPhi) appendVertex(verticesN, i0 * dTheta, j2 * dPhi) polygons.append(Polygon(verticesN)) verticesS = [] appendVertex(verticesS, i1 * dTheta, j1 * dPhi) appendVertex(verticesS, i0 * dTheta, j0 * dPhi) appendVertex(verticesS, i2 * dTheta, j0 * dPhi) polygons.append(Polygon(verticesS)) verticesW = [] appendVertex(verticesW, i1 * dTheta, j1 * dPhi) appendVertex(verticesW, i0 * dTheta, j2 * dPhi) appendVertex(verticesW, i0 * dTheta, j0 * dPhi) polygons.append(Polygon(verticesW)) verticesE = [] appendVertex(verticesE, i1 * dTheta, j1 * dPhi) appendVertex(verticesE, i2 * dTheta, j0 * dPhi) appendVertex(verticesE, i2 * dTheta, j2 * dPhi) polygons.append(Polygon(verticesE)) return CSG.fromPolygons(polygons)
[ "def", "sphere", "(", "cls", ",", "*", "*", "kwargs", ")", ":", "center", "=", "kwargs", ".", "get", "(", "'center'", ",", "[", "0.0", ",", "0.0", ",", "0.0", "]", ")", "if", "isinstance", "(", "center", ",", "float", ")", ":", "center", "=", "[", "center", ",", "center", ",", "center", "]", "c", "=", "Vector", "(", "center", ")", "r", "=", "kwargs", ".", "get", "(", "'radius'", ",", "1.0", ")", "if", "isinstance", "(", "r", ",", "list", ")", "and", "len", "(", "r", ")", ">", "2", ":", "r", "=", "r", "[", "0", "]", "slices", "=", "kwargs", ".", "get", "(", "'slices'", ",", "16", ")", "stacks", "=", "kwargs", ".", "get", "(", "'stacks'", ",", "8", ")", "polygons", "=", "[", "]", "def", "appendVertex", "(", "vertices", ",", "theta", ",", "phi", ")", ":", "d", "=", "Vector", "(", "math", ".", "cos", "(", "theta", ")", "*", "math", ".", "sin", "(", "phi", ")", ",", "math", ".", "cos", "(", "phi", ")", ",", "math", ".", "sin", "(", "theta", ")", "*", "math", ".", "sin", "(", "phi", ")", ")", "vertices", ".", "append", "(", "Vertex", "(", "c", ".", "plus", "(", "d", ".", "times", "(", "r", ")", ")", ",", "d", ")", ")", "dTheta", "=", "math", ".", "pi", "*", "2.0", "/", "float", "(", "slices", ")", "dPhi", "=", "math", ".", "pi", "/", "float", "(", "stacks", ")", "j0", "=", "0", "j1", "=", "j0", "+", "1", "for", "i0", "in", "range", "(", "0", ",", "slices", ")", ":", "i1", "=", "i0", "+", "1", "# +--+", "# | /", "# |/", "# +", "vertices", "=", "[", "]", "appendVertex", "(", "vertices", ",", "i0", "*", "dTheta", ",", "j0", "*", "dPhi", ")", "appendVertex", "(", "vertices", ",", "i1", "*", "dTheta", ",", "j1", "*", "dPhi", ")", "appendVertex", "(", "vertices", ",", "i0", "*", "dTheta", ",", "j1", "*", "dPhi", ")", "polygons", ".", "append", "(", "Polygon", "(", "vertices", ")", ")", "j0", "=", "stacks", "-", "1", "j1", "=", "j0", "+", "1", "for", "i0", "in", "range", "(", "0", ",", "slices", ")", ":", "i1", "=", "i0", "+", "1", "# +", "# |\\", "# | \\", "# +--+", "vertices", "=", "[", "]", "appendVertex", "(", "vertices", ",", "i0", "*", "dTheta", ",", "j0", "*", "dPhi", ")", "appendVertex", "(", "vertices", ",", "i1", "*", "dTheta", ",", "j0", "*", "dPhi", ")", "appendVertex", "(", "vertices", ",", "i0", "*", "dTheta", ",", "j1", "*", "dPhi", ")", "polygons", ".", "append", "(", "Polygon", "(", "vertices", ")", ")", "for", "j0", "in", "range", "(", "1", ",", "stacks", "-", "1", ")", ":", "j1", "=", "j0", "+", "0.5", "j2", "=", "j0", "+", "1", "for", "i0", "in", "range", "(", "0", ",", "slices", ")", ":", "i1", "=", "i0", "+", "0.5", "i2", "=", "i0", "+", "1", "# +---+", "# |\\ /|", "# | x |", "# |/ \\|", "# +---+", "verticesN", "=", "[", "]", "appendVertex", "(", "verticesN", ",", "i1", "*", "dTheta", ",", "j1", "*", "dPhi", ")", "appendVertex", "(", "verticesN", ",", "i2", "*", "dTheta", ",", "j2", "*", "dPhi", ")", "appendVertex", "(", "verticesN", ",", "i0", "*", "dTheta", ",", "j2", "*", "dPhi", ")", "polygons", ".", "append", "(", "Polygon", "(", "verticesN", ")", ")", "verticesS", "=", "[", "]", "appendVertex", "(", "verticesS", ",", "i1", "*", "dTheta", ",", "j1", "*", "dPhi", ")", "appendVertex", "(", "verticesS", ",", "i0", "*", "dTheta", ",", "j0", "*", "dPhi", ")", "appendVertex", "(", "verticesS", ",", "i2", "*", "dTheta", ",", "j0", "*", "dPhi", ")", "polygons", ".", "append", "(", "Polygon", "(", "verticesS", ")", ")", "verticesW", "=", "[", "]", "appendVertex", "(", "verticesW", ",", "i1", "*", "dTheta", ",", "j1", "*", "dPhi", ")", "appendVertex", "(", "verticesW", ",", "i0", "*", "dTheta", ",", "j2", "*", "dPhi", ")", "appendVertex", "(", "verticesW", ",", "i0", "*", "dTheta", ",", "j0", "*", "dPhi", ")", "polygons", ".", "append", "(", "Polygon", "(", "verticesW", ")", ")", "verticesE", "=", "[", "]", "appendVertex", "(", "verticesE", ",", "i1", "*", "dTheta", ",", "j1", "*", "dPhi", ")", "appendVertex", "(", "verticesE", ",", "i2", "*", "dTheta", ",", "j0", "*", "dPhi", ")", "appendVertex", "(", "verticesE", ",", "i2", "*", "dTheta", ",", "j2", "*", "dPhi", ")", "polygons", ".", "append", "(", "Polygon", "(", "verticesE", ")", ")", "return", "CSG", ".", "fromPolygons", "(", "polygons", ")" ]
Returns a sphere. Kwargs: center (list): Center of sphere, default [0, 0, 0]. radius (float): Radius of sphere, default 1.0. slices (int): Number of slices, default 16. stacks (int): Number of stacks, default 8.
[ "Returns", "a", "sphere", ".", "Kwargs", ":", "center", "(", "list", ")", ":", "Center", "of", "sphere", "default", "[", "0", "0", "0", "]", ".", "radius", "(", "float", ")", ":", "Radius", "of", "sphere", "default", "1", ".", "0", ".", "slices", "(", "int", ")", ":", "Number", "of", "slices", "default", "16", ".", "stacks", "(", "int", ")", ":", "Number", "of", "stacks", "default", "8", "." ]
train
https://github.com/timknip/pycsg/blob/b8f9710fd15c38dcc275d56a2108f604af38dcc8/csg/core.py#L359-L451
timknip/pycsg
csg/core.py
CSG.cylinder
def cylinder(cls, **kwargs): """ Returns a cylinder. Kwargs: start (list): Start of cylinder, default [0, -1, 0]. end (list): End of cylinder, default [0, 1, 0]. radius (float): Radius of cylinder, default 1.0. slices (int): Number of slices, default 16. """ s = kwargs.get('start', Vector(0.0, -1.0, 0.0)) e = kwargs.get('end', Vector(0.0, 1.0, 0.0)) if isinstance(s, list): s = Vector(*s) if isinstance(e, list): e = Vector(*e) r = kwargs.get('radius', 1.0) slices = kwargs.get('slices', 16) ray = e.minus(s) axisZ = ray.unit() isY = (math.fabs(axisZ.y) > 0.5) axisX = Vector(float(isY), float(not isY), 0).cross(axisZ).unit() axisY = axisX.cross(axisZ).unit() start = Vertex(s, axisZ.negated()) end = Vertex(e, axisZ.unit()) polygons = [] def point(stack, angle, normalBlend): out = axisX.times(math.cos(angle)).plus( axisY.times(math.sin(angle))) pos = s.plus(ray.times(stack)).plus(out.times(r)) normal = out.times(1.0 - math.fabs(normalBlend)).plus( axisZ.times(normalBlend)) return Vertex(pos, normal) dt = math.pi * 2.0 / float(slices) for i in range(0, slices): t0 = i * dt i1 = (i + 1) % slices t1 = i1 * dt polygons.append(Polygon([start.clone(), point(0., t0, -1.), point(0., t1, -1.)])) polygons.append(Polygon([point(0., t1, 0.), point(0., t0, 0.), point(1., t0, 0.), point(1., t1, 0.)])) polygons.append(Polygon([end.clone(), point(1., t1, 1.), point(1., t0, 1.)])) return CSG.fromPolygons(polygons)
python
def cylinder(cls, **kwargs): """ Returns a cylinder. Kwargs: start (list): Start of cylinder, default [0, -1, 0]. end (list): End of cylinder, default [0, 1, 0]. radius (float): Radius of cylinder, default 1.0. slices (int): Number of slices, default 16. """ s = kwargs.get('start', Vector(0.0, -1.0, 0.0)) e = kwargs.get('end', Vector(0.0, 1.0, 0.0)) if isinstance(s, list): s = Vector(*s) if isinstance(e, list): e = Vector(*e) r = kwargs.get('radius', 1.0) slices = kwargs.get('slices', 16) ray = e.minus(s) axisZ = ray.unit() isY = (math.fabs(axisZ.y) > 0.5) axisX = Vector(float(isY), float(not isY), 0).cross(axisZ).unit() axisY = axisX.cross(axisZ).unit() start = Vertex(s, axisZ.negated()) end = Vertex(e, axisZ.unit()) polygons = [] def point(stack, angle, normalBlend): out = axisX.times(math.cos(angle)).plus( axisY.times(math.sin(angle))) pos = s.plus(ray.times(stack)).plus(out.times(r)) normal = out.times(1.0 - math.fabs(normalBlend)).plus( axisZ.times(normalBlend)) return Vertex(pos, normal) dt = math.pi * 2.0 / float(slices) for i in range(0, slices): t0 = i * dt i1 = (i + 1) % slices t1 = i1 * dt polygons.append(Polygon([start.clone(), point(0., t0, -1.), point(0., t1, -1.)])) polygons.append(Polygon([point(0., t1, 0.), point(0., t0, 0.), point(1., t0, 0.), point(1., t1, 0.)])) polygons.append(Polygon([end.clone(), point(1., t1, 1.), point(1., t0, 1.)])) return CSG.fromPolygons(polygons)
[ "def", "cylinder", "(", "cls", ",", "*", "*", "kwargs", ")", ":", "s", "=", "kwargs", ".", "get", "(", "'start'", ",", "Vector", "(", "0.0", ",", "-", "1.0", ",", "0.0", ")", ")", "e", "=", "kwargs", ".", "get", "(", "'end'", ",", "Vector", "(", "0.0", ",", "1.0", ",", "0.0", ")", ")", "if", "isinstance", "(", "s", ",", "list", ")", ":", "s", "=", "Vector", "(", "*", "s", ")", "if", "isinstance", "(", "e", ",", "list", ")", ":", "e", "=", "Vector", "(", "*", "e", ")", "r", "=", "kwargs", ".", "get", "(", "'radius'", ",", "1.0", ")", "slices", "=", "kwargs", ".", "get", "(", "'slices'", ",", "16", ")", "ray", "=", "e", ".", "minus", "(", "s", ")", "axisZ", "=", "ray", ".", "unit", "(", ")", "isY", "=", "(", "math", ".", "fabs", "(", "axisZ", ".", "y", ")", ">", "0.5", ")", "axisX", "=", "Vector", "(", "float", "(", "isY", ")", ",", "float", "(", "not", "isY", ")", ",", "0", ")", ".", "cross", "(", "axisZ", ")", ".", "unit", "(", ")", "axisY", "=", "axisX", ".", "cross", "(", "axisZ", ")", ".", "unit", "(", ")", "start", "=", "Vertex", "(", "s", ",", "axisZ", ".", "negated", "(", ")", ")", "end", "=", "Vertex", "(", "e", ",", "axisZ", ".", "unit", "(", ")", ")", "polygons", "=", "[", "]", "def", "point", "(", "stack", ",", "angle", ",", "normalBlend", ")", ":", "out", "=", "axisX", ".", "times", "(", "math", ".", "cos", "(", "angle", ")", ")", ".", "plus", "(", "axisY", ".", "times", "(", "math", ".", "sin", "(", "angle", ")", ")", ")", "pos", "=", "s", ".", "plus", "(", "ray", ".", "times", "(", "stack", ")", ")", ".", "plus", "(", "out", ".", "times", "(", "r", ")", ")", "normal", "=", "out", ".", "times", "(", "1.0", "-", "math", ".", "fabs", "(", "normalBlend", ")", ")", ".", "plus", "(", "axisZ", ".", "times", "(", "normalBlend", ")", ")", "return", "Vertex", "(", "pos", ",", "normal", ")", "dt", "=", "math", ".", "pi", "*", "2.0", "/", "float", "(", "slices", ")", "for", "i", "in", "range", "(", "0", ",", "slices", ")", ":", "t0", "=", "i", "*", "dt", "i1", "=", "(", "i", "+", "1", ")", "%", "slices", "t1", "=", "i1", "*", "dt", "polygons", ".", "append", "(", "Polygon", "(", "[", "start", ".", "clone", "(", ")", ",", "point", "(", "0.", ",", "t0", ",", "-", "1.", ")", ",", "point", "(", "0.", ",", "t1", ",", "-", "1.", ")", "]", ")", ")", "polygons", ".", "append", "(", "Polygon", "(", "[", "point", "(", "0.", ",", "t1", ",", "0.", ")", ",", "point", "(", "0.", ",", "t0", ",", "0.", ")", ",", "point", "(", "1.", ",", "t0", ",", "0.", ")", ",", "point", "(", "1.", ",", "t1", ",", "0.", ")", "]", ")", ")", "polygons", ".", "append", "(", "Polygon", "(", "[", "end", ".", "clone", "(", ")", ",", "point", "(", "1.", ",", "t1", ",", "1.", ")", ",", "point", "(", "1.", ",", "t0", ",", "1.", ")", "]", ")", ")", "return", "CSG", ".", "fromPolygons", "(", "polygons", ")" ]
Returns a cylinder. Kwargs: start (list): Start of cylinder, default [0, -1, 0]. end (list): End of cylinder, default [0, 1, 0]. radius (float): Radius of cylinder, default 1.0. slices (int): Number of slices, default 16.
[ "Returns", "a", "cylinder", ".", "Kwargs", ":", "start", "(", "list", ")", ":", "Start", "of", "cylinder", "default", "[", "0", "-", "1", "0", "]", ".", "end", "(", "list", ")", ":", "End", "of", "cylinder", "default", "[", "0", "1", "0", "]", ".", "radius", "(", "float", ")", ":", "Radius", "of", "cylinder", "default", "1", ".", "0", ".", "slices", "(", "int", ")", ":", "Number", "of", "slices", "default", "16", "." ]
train
https://github.com/timknip/pycsg/blob/b8f9710fd15c38dcc275d56a2108f604af38dcc8/csg/core.py#L454-L508
timknip/pycsg
csg/core.py
CSG.cone
def cone(cls, **kwargs): """ Returns a cone. Kwargs: start (list): Start of cone, default [0, -1, 0]. end (list): End of cone, default [0, 1, 0]. radius (float): Maximum radius of cone at start, default 1.0. slices (int): Number of slices, default 16. """ s = kwargs.get('start', Vector(0.0, -1.0, 0.0)) e = kwargs.get('end', Vector(0.0, 1.0, 0.0)) if isinstance(s, list): s = Vector(*s) if isinstance(e, list): e = Vector(*e) r = kwargs.get('radius', 1.0) slices = kwargs.get('slices', 16) ray = e.minus(s) axisZ = ray.unit() isY = (math.fabs(axisZ.y) > 0.5) axisX = Vector(float(isY), float(not isY), 0).cross(axisZ).unit() axisY = axisX.cross(axisZ).unit() startNormal = axisZ.negated() start = Vertex(s, startNormal) polygons = [] taperAngle = math.atan2(r, ray.length()) sinTaperAngle = math.sin(taperAngle) cosTaperAngle = math.cos(taperAngle) def point(angle): # radial direction pointing out out = axisX.times(math.cos(angle)).plus( axisY.times(math.sin(angle))) pos = s.plus(out.times(r)) # normal taking into account the tapering of the cone normal = out.times(cosTaperAngle).plus(axisZ.times(sinTaperAngle)) return pos, normal dt = math.pi * 2.0 / float(slices) for i in range(0, slices): t0 = i * dt i1 = (i + 1) % slices t1 = i1 * dt # coordinates and associated normal pointing outwards of the cone's # side p0, n0 = point(t0) p1, n1 = point(t1) # average normal for the tip nAvg = n0.plus(n1).times(0.5) # polygon on the low side (disk sector) polyStart = Polygon([start.clone(), Vertex(p0, startNormal), Vertex(p1, startNormal)]) polygons.append(polyStart) # polygon extending from the low side to the tip polySide = Polygon([Vertex(p0, n0), Vertex(e, nAvg), Vertex(p1, n1)]) polygons.append(polySide) return CSG.fromPolygons(polygons)
python
def cone(cls, **kwargs): """ Returns a cone. Kwargs: start (list): Start of cone, default [0, -1, 0]. end (list): End of cone, default [0, 1, 0]. radius (float): Maximum radius of cone at start, default 1.0. slices (int): Number of slices, default 16. """ s = kwargs.get('start', Vector(0.0, -1.0, 0.0)) e = kwargs.get('end', Vector(0.0, 1.0, 0.0)) if isinstance(s, list): s = Vector(*s) if isinstance(e, list): e = Vector(*e) r = kwargs.get('radius', 1.0) slices = kwargs.get('slices', 16) ray = e.minus(s) axisZ = ray.unit() isY = (math.fabs(axisZ.y) > 0.5) axisX = Vector(float(isY), float(not isY), 0).cross(axisZ).unit() axisY = axisX.cross(axisZ).unit() startNormal = axisZ.negated() start = Vertex(s, startNormal) polygons = [] taperAngle = math.atan2(r, ray.length()) sinTaperAngle = math.sin(taperAngle) cosTaperAngle = math.cos(taperAngle) def point(angle): # radial direction pointing out out = axisX.times(math.cos(angle)).plus( axisY.times(math.sin(angle))) pos = s.plus(out.times(r)) # normal taking into account the tapering of the cone normal = out.times(cosTaperAngle).plus(axisZ.times(sinTaperAngle)) return pos, normal dt = math.pi * 2.0 / float(slices) for i in range(0, slices): t0 = i * dt i1 = (i + 1) % slices t1 = i1 * dt # coordinates and associated normal pointing outwards of the cone's # side p0, n0 = point(t0) p1, n1 = point(t1) # average normal for the tip nAvg = n0.plus(n1).times(0.5) # polygon on the low side (disk sector) polyStart = Polygon([start.clone(), Vertex(p0, startNormal), Vertex(p1, startNormal)]) polygons.append(polyStart) # polygon extending from the low side to the tip polySide = Polygon([Vertex(p0, n0), Vertex(e, nAvg), Vertex(p1, n1)]) polygons.append(polySide) return CSG.fromPolygons(polygons)
[ "def", "cone", "(", "cls", ",", "*", "*", "kwargs", ")", ":", "s", "=", "kwargs", ".", "get", "(", "'start'", ",", "Vector", "(", "0.0", ",", "-", "1.0", ",", "0.0", ")", ")", "e", "=", "kwargs", ".", "get", "(", "'end'", ",", "Vector", "(", "0.0", ",", "1.0", ",", "0.0", ")", ")", "if", "isinstance", "(", "s", ",", "list", ")", ":", "s", "=", "Vector", "(", "*", "s", ")", "if", "isinstance", "(", "e", ",", "list", ")", ":", "e", "=", "Vector", "(", "*", "e", ")", "r", "=", "kwargs", ".", "get", "(", "'radius'", ",", "1.0", ")", "slices", "=", "kwargs", ".", "get", "(", "'slices'", ",", "16", ")", "ray", "=", "e", ".", "minus", "(", "s", ")", "axisZ", "=", "ray", ".", "unit", "(", ")", "isY", "=", "(", "math", ".", "fabs", "(", "axisZ", ".", "y", ")", ">", "0.5", ")", "axisX", "=", "Vector", "(", "float", "(", "isY", ")", ",", "float", "(", "not", "isY", ")", ",", "0", ")", ".", "cross", "(", "axisZ", ")", ".", "unit", "(", ")", "axisY", "=", "axisX", ".", "cross", "(", "axisZ", ")", ".", "unit", "(", ")", "startNormal", "=", "axisZ", ".", "negated", "(", ")", "start", "=", "Vertex", "(", "s", ",", "startNormal", ")", "polygons", "=", "[", "]", "taperAngle", "=", "math", ".", "atan2", "(", "r", ",", "ray", ".", "length", "(", ")", ")", "sinTaperAngle", "=", "math", ".", "sin", "(", "taperAngle", ")", "cosTaperAngle", "=", "math", ".", "cos", "(", "taperAngle", ")", "def", "point", "(", "angle", ")", ":", "# radial direction pointing out", "out", "=", "axisX", ".", "times", "(", "math", ".", "cos", "(", "angle", ")", ")", ".", "plus", "(", "axisY", ".", "times", "(", "math", ".", "sin", "(", "angle", ")", ")", ")", "pos", "=", "s", ".", "plus", "(", "out", ".", "times", "(", "r", ")", ")", "# normal taking into account the tapering of the cone", "normal", "=", "out", ".", "times", "(", "cosTaperAngle", ")", ".", "plus", "(", "axisZ", ".", "times", "(", "sinTaperAngle", ")", ")", "return", "pos", ",", "normal", "dt", "=", "math", ".", "pi", "*", "2.0", "/", "float", "(", "slices", ")", "for", "i", "in", "range", "(", "0", ",", "slices", ")", ":", "t0", "=", "i", "*", "dt", "i1", "=", "(", "i", "+", "1", ")", "%", "slices", "t1", "=", "i1", "*", "dt", "# coordinates and associated normal pointing outwards of the cone's", "# side", "p0", ",", "n0", "=", "point", "(", "t0", ")", "p1", ",", "n1", "=", "point", "(", "t1", ")", "# average normal for the tip", "nAvg", "=", "n0", ".", "plus", "(", "n1", ")", ".", "times", "(", "0.5", ")", "# polygon on the low side (disk sector)", "polyStart", "=", "Polygon", "(", "[", "start", ".", "clone", "(", ")", ",", "Vertex", "(", "p0", ",", "startNormal", ")", ",", "Vertex", "(", "p1", ",", "startNormal", ")", "]", ")", "polygons", ".", "append", "(", "polyStart", ")", "# polygon extending from the low side to the tip", "polySide", "=", "Polygon", "(", "[", "Vertex", "(", "p0", ",", "n0", ")", ",", "Vertex", "(", "e", ",", "nAvg", ")", ",", "Vertex", "(", "p1", ",", "n1", ")", "]", ")", "polygons", ".", "append", "(", "polySide", ")", "return", "CSG", ".", "fromPolygons", "(", "polygons", ")" ]
Returns a cone. Kwargs: start (list): Start of cone, default [0, -1, 0]. end (list): End of cone, default [0, 1, 0]. radius (float): Maximum radius of cone at start, default 1.0. slices (int): Number of slices, default 16.
[ "Returns", "a", "cone", ".", "Kwargs", ":", "start", "(", "list", ")", ":", "Start", "of", "cone", "default", "[", "0", "-", "1", "0", "]", ".", "end", "(", "list", ")", ":", "End", "of", "cone", "default", "[", "0", "1", "0", "]", ".", "radius", "(", "float", ")", ":", "Maximum", "radius", "of", "cone", "at", "start", "default", "1", ".", "0", ".", "slices", "(", "int", ")", ":", "Number", "of", "slices", "default", "16", "." ]
train
https://github.com/timknip/pycsg/blob/b8f9710fd15c38dcc275d56a2108f604af38dcc8/csg/core.py#L511-L573
vberlier/nbtlib
nbtlib/nbt.py
load
def load(filename, *, gzipped=None, byteorder='big'): """Load the nbt file at the specified location. By default, the function will figure out by itself if the file is gzipped before loading it. You can pass a boolean to the `gzipped` keyword only argument to specify explicitly whether the file is compressed or not. You can also use the `byteorder` keyword only argument to specify whether the file is little-endian or big-endian. """ if gzipped is not None: return File.load(filename, gzipped, byteorder) # if we don't know we read the magic number with open(filename, 'rb') as buff: magic_number = buff.read(2) buff.seek(0) if magic_number == b'\x1f\x8b': buff = gzip.GzipFile(fileobj=buff) return File.from_buffer(buff, byteorder)
python
def load(filename, *, gzipped=None, byteorder='big'): """Load the nbt file at the specified location. By default, the function will figure out by itself if the file is gzipped before loading it. You can pass a boolean to the `gzipped` keyword only argument to specify explicitly whether the file is compressed or not. You can also use the `byteorder` keyword only argument to specify whether the file is little-endian or big-endian. """ if gzipped is not None: return File.load(filename, gzipped, byteorder) # if we don't know we read the magic number with open(filename, 'rb') as buff: magic_number = buff.read(2) buff.seek(0) if magic_number == b'\x1f\x8b': buff = gzip.GzipFile(fileobj=buff) return File.from_buffer(buff, byteorder)
[ "def", "load", "(", "filename", ",", "*", ",", "gzipped", "=", "None", ",", "byteorder", "=", "'big'", ")", ":", "if", "gzipped", "is", "not", "None", ":", "return", "File", ".", "load", "(", "filename", ",", "gzipped", ",", "byteorder", ")", "# if we don't know we read the magic number\r", "with", "open", "(", "filename", ",", "'rb'", ")", "as", "buff", ":", "magic_number", "=", "buff", ".", "read", "(", "2", ")", "buff", ".", "seek", "(", "0", ")", "if", "magic_number", "==", "b'\\x1f\\x8b'", ":", "buff", "=", "gzip", ".", "GzipFile", "(", "fileobj", "=", "buff", ")", "return", "File", ".", "from_buffer", "(", "buff", ",", "byteorder", ")" ]
Load the nbt file at the specified location. By default, the function will figure out by itself if the file is gzipped before loading it. You can pass a boolean to the `gzipped` keyword only argument to specify explicitly whether the file is compressed or not. You can also use the `byteorder` keyword only argument to specify whether the file is little-endian or big-endian.
[ "Load", "the", "nbt", "file", "at", "the", "specified", "location", ".", "By", "default", "the", "function", "will", "figure", "out", "by", "itself", "if", "the", "file", "is", "gzipped", "before", "loading", "it", ".", "You", "can", "pass", "a", "boolean", "to", "the", "gzipped", "keyword", "only", "argument", "to", "specify", "explicitly", "whether", "the", "file", "is", "compressed", "or", "not", ".", "You", "can", "also", "use", "the", "byteorder", "keyword", "only", "argument", "to", "specify", "whether", "the", "file", "is", "little", "-", "endian", "or", "big", "-", "endian", "." ]
train
https://github.com/vberlier/nbtlib/blob/9c9d58b5c4a530b0f1ffd76dda176f00406c3547/nbtlib/nbt.py#L17-L37
vberlier/nbtlib
nbtlib/nbt.py
File.from_buffer
def from_buffer(cls, buff, byteorder='big'): """Load nbt file from a file-like object. The `buff` argument can be either a standard `io.BufferedReader` for uncompressed nbt or a `gzip.GzipFile` for gzipped nbt data. """ self = cls.parse(buff, byteorder) self.filename = getattr(buff, 'name', self.filename) self.gzipped = isinstance(buff, gzip.GzipFile) self.byteorder = byteorder return self
python
def from_buffer(cls, buff, byteorder='big'): """Load nbt file from a file-like object. The `buff` argument can be either a standard `io.BufferedReader` for uncompressed nbt or a `gzip.GzipFile` for gzipped nbt data. """ self = cls.parse(buff, byteorder) self.filename = getattr(buff, 'name', self.filename) self.gzipped = isinstance(buff, gzip.GzipFile) self.byteorder = byteorder return self
[ "def", "from_buffer", "(", "cls", ",", "buff", ",", "byteorder", "=", "'big'", ")", ":", "self", "=", "cls", ".", "parse", "(", "buff", ",", "byteorder", ")", "self", ".", "filename", "=", "getattr", "(", "buff", ",", "'name'", ",", "self", ".", "filename", ")", "self", ".", "gzipped", "=", "isinstance", "(", "buff", ",", "gzip", ".", "GzipFile", ")", "self", ".", "byteorder", "=", "byteorder", "return", "self" ]
Load nbt file from a file-like object. The `buff` argument can be either a standard `io.BufferedReader` for uncompressed nbt or a `gzip.GzipFile` for gzipped nbt data.
[ "Load", "nbt", "file", "from", "a", "file", "-", "like", "object", ".", "The", "buff", "argument", "can", "be", "either", "a", "standard", "io", ".", "BufferedReader", "for", "uncompressed", "nbt", "or", "a", "gzip", ".", "GzipFile", "for", "gzipped", "nbt", "data", "." ]
train
https://github.com/vberlier/nbtlib/blob/9c9d58b5c4a530b0f1ffd76dda176f00406c3547/nbtlib/nbt.py#L90-L100
vberlier/nbtlib
nbtlib/nbt.py
File.load
def load(cls, filename, gzipped, byteorder='big'): """Read, parse and return the file at the specified location. The `gzipped` argument is used to indicate if the specified file is gzipped. The `byteorder` argument lets you specify whether the file is big-endian or little-endian. """ open_file = gzip.open if gzipped else open with open_file(filename, 'rb') as buff: return cls.from_buffer(buff, byteorder)
python
def load(cls, filename, gzipped, byteorder='big'): """Read, parse and return the file at the specified location. The `gzipped` argument is used to indicate if the specified file is gzipped. The `byteorder` argument lets you specify whether the file is big-endian or little-endian. """ open_file = gzip.open if gzipped else open with open_file(filename, 'rb') as buff: return cls.from_buffer(buff, byteorder)
[ "def", "load", "(", "cls", ",", "filename", ",", "gzipped", ",", "byteorder", "=", "'big'", ")", ":", "open_file", "=", "gzip", ".", "open", "if", "gzipped", "else", "open", "with", "open_file", "(", "filename", ",", "'rb'", ")", "as", "buff", ":", "return", "cls", ".", "from_buffer", "(", "buff", ",", "byteorder", ")" ]
Read, parse and return the file at the specified location. The `gzipped` argument is used to indicate if the specified file is gzipped. The `byteorder` argument lets you specify whether the file is big-endian or little-endian.
[ "Read", "parse", "and", "return", "the", "file", "at", "the", "specified", "location", ".", "The", "gzipped", "argument", "is", "used", "to", "indicate", "if", "the", "specified", "file", "is", "gzipped", ".", "The", "byteorder", "argument", "lets", "you", "specify", "whether", "the", "file", "is", "big", "-", "endian", "or", "little", "-", "endian", "." ]
train
https://github.com/vberlier/nbtlib/blob/9c9d58b5c4a530b0f1ffd76dda176f00406c3547/nbtlib/nbt.py#L103-L112
vberlier/nbtlib
nbtlib/nbt.py
File.save
def save(self, filename=None, *, gzipped=None, byteorder=None): """Write the file at the specified location. The `gzipped` keyword only argument indicates if the file should be gzipped. The `byteorder` keyword only argument lets you specify whether the file should be big-endian or little-endian. If the method is called without any argument, it will default to the instance attributes and use the file's `filename`, `gzipped` and `byteorder` attributes. Calling the method without a `filename` will raise a `ValueError` if the `filename` of the file is `None`. """ if gzipped is None: gzipped = self.gzipped if filename is None: filename = self.filename if filename is None: raise ValueError('No filename specified') open_file = gzip.open if gzipped else open with open_file(filename, 'wb') as buff: self.write(buff, byteorder or self.byteorder)
python
def save(self, filename=None, *, gzipped=None, byteorder=None): """Write the file at the specified location. The `gzipped` keyword only argument indicates if the file should be gzipped. The `byteorder` keyword only argument lets you specify whether the file should be big-endian or little-endian. If the method is called without any argument, it will default to the instance attributes and use the file's `filename`, `gzipped` and `byteorder` attributes. Calling the method without a `filename` will raise a `ValueError` if the `filename` of the file is `None`. """ if gzipped is None: gzipped = self.gzipped if filename is None: filename = self.filename if filename is None: raise ValueError('No filename specified') open_file = gzip.open if gzipped else open with open_file(filename, 'wb') as buff: self.write(buff, byteorder or self.byteorder)
[ "def", "save", "(", "self", ",", "filename", "=", "None", ",", "*", ",", "gzipped", "=", "None", ",", "byteorder", "=", "None", ")", ":", "if", "gzipped", "is", "None", ":", "gzipped", "=", "self", ".", "gzipped", "if", "filename", "is", "None", ":", "filename", "=", "self", ".", "filename", "if", "filename", "is", "None", ":", "raise", "ValueError", "(", "'No filename specified'", ")", "open_file", "=", "gzip", ".", "open", "if", "gzipped", "else", "open", "with", "open_file", "(", "filename", ",", "'wb'", ")", "as", "buff", ":", "self", ".", "write", "(", "buff", ",", "byteorder", "or", "self", ".", "byteorder", ")" ]
Write the file at the specified location. The `gzipped` keyword only argument indicates if the file should be gzipped. The `byteorder` keyword only argument lets you specify whether the file should be big-endian or little-endian. If the method is called without any argument, it will default to the instance attributes and use the file's `filename`, `gzipped` and `byteorder` attributes. Calling the method without a `filename` will raise a `ValueError` if the `filename` of the file is `None`.
[ "Write", "the", "file", "at", "the", "specified", "location", ".", "The", "gzipped", "keyword", "only", "argument", "indicates", "if", "the", "file", "should", "be", "gzipped", ".", "The", "byteorder", "keyword", "only", "argument", "lets", "you", "specify", "whether", "the", "file", "should", "be", "big", "-", "endian", "or", "little", "-", "endian", ".", "If", "the", "method", "is", "called", "without", "any", "argument", "it", "will", "default", "to", "the", "instance", "attributes", "and", "use", "the", "file", "s", "filename", "gzipped", "and", "byteorder", "attributes", ".", "Calling", "the", "method", "without", "a", "filename", "will", "raise", "a", "ValueError", "if", "the", "filename", "of", "the", "file", "is", "None", "." ]
train
https://github.com/vberlier/nbtlib/blob/9c9d58b5c4a530b0f1ffd76dda176f00406c3547/nbtlib/nbt.py#L114-L137
aheadley/python-crunchyroll
crunchyroll/util.py
return_collection
def return_collection(collection_type): """Change method return value from raw API output to collection of models """ def outer_func(func): @functools.wraps(func) def inner_func(self, *pargs, **kwargs): result = func(self, *pargs, **kwargs) return list(map(collection_type, result)) return inner_func return outer_func
python
def return_collection(collection_type): """Change method return value from raw API output to collection of models """ def outer_func(func): @functools.wraps(func) def inner_func(self, *pargs, **kwargs): result = func(self, *pargs, **kwargs) return list(map(collection_type, result)) return inner_func return outer_func
[ "def", "return_collection", "(", "collection_type", ")", ":", "def", "outer_func", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "inner_func", "(", "self", ",", "*", "pargs", ",", "*", "*", "kwargs", ")", ":", "result", "=", "func", "(", "self", ",", "*", "pargs", ",", "*", "*", "kwargs", ")", "return", "list", "(", "map", "(", "collection_type", ",", "result", ")", ")", "return", "inner_func", "return", "outer_func" ]
Change method return value from raw API output to collection of models
[ "Change", "method", "return", "value", "from", "raw", "API", "output", "to", "collection", "of", "models" ]
train
https://github.com/aheadley/python-crunchyroll/blob/9bf2eb644f0d0f3e9dc21b95b8e355c6e2050178/crunchyroll/util.py#L47-L56
tulsawebdevs/django-multi-gtfs
multigtfs/models/stop.py
post_save_stop
def post_save_stop(sender, instance, **kwargs): '''Update related objects when the Stop is updated''' from multigtfs.models.trip import Trip trip_ids = instance.stoptime_set.filter( trip__shape=None).values_list('trip_id', flat=True).distinct() for trip in Trip.objects.filter(id__in=trip_ids): trip.update_geometry()
python
def post_save_stop(sender, instance, **kwargs): '''Update related objects when the Stop is updated''' from multigtfs.models.trip import Trip trip_ids = instance.stoptime_set.filter( trip__shape=None).values_list('trip_id', flat=True).distinct() for trip in Trip.objects.filter(id__in=trip_ids): trip.update_geometry()
[ "def", "post_save_stop", "(", "sender", ",", "instance", ",", "*", "*", "kwargs", ")", ":", "from", "multigtfs", ".", "models", ".", "trip", "import", "Trip", "trip_ids", "=", "instance", ".", "stoptime_set", ".", "filter", "(", "trip__shape", "=", "None", ")", ".", "values_list", "(", "'trip_id'", ",", "flat", "=", "True", ")", ".", "distinct", "(", ")", "for", "trip", "in", "Trip", ".", "objects", ".", "filter", "(", "id__in", "=", "trip_ids", ")", ":", "trip", ".", "update_geometry", "(", ")" ]
Update related objects when the Stop is updated
[ "Update", "related", "objects", "when", "the", "Stop", "is", "updated" ]
train
https://github.com/tulsawebdevs/django-multi-gtfs/blob/8c442bfb67e87566c24a7364d8fa0aacd4a0a652/multigtfs/models/stop.py#L166-L172
aheadley/python-crunchyroll
crunchyroll/apis/android_manga.py
AndroidMangaApi._do_post_request_tasks
def _do_post_request_tasks(self, response_data): """Handle actions that need to be done with every response I'm not sure what these session_ops are actually used for yet, seems to be a way to tell the client to do *something* if needed. """ try: sess_ops = response_data.get('ops', []) except AttributeError: pass else: self._session_ops.extend(sess_ops)
python
def _do_post_request_tasks(self, response_data): """Handle actions that need to be done with every response I'm not sure what these session_ops are actually used for yet, seems to be a way to tell the client to do *something* if needed. """ try: sess_ops = response_data.get('ops', []) except AttributeError: pass else: self._session_ops.extend(sess_ops)
[ "def", "_do_post_request_tasks", "(", "self", ",", "response_data", ")", ":", "try", ":", "sess_ops", "=", "response_data", ".", "get", "(", "'ops'", ",", "[", "]", ")", "except", "AttributeError", ":", "pass", "else", ":", "self", ".", "_session_ops", ".", "extend", "(", "sess_ops", ")" ]
Handle actions that need to be done with every response I'm not sure what these session_ops are actually used for yet, seems to be a way to tell the client to do *something* if needed.
[ "Handle", "actions", "that", "need", "to", "be", "done", "with", "every", "response" ]
train
https://github.com/aheadley/python-crunchyroll/blob/9bf2eb644f0d0f3e9dc21b95b8e355c6e2050178/crunchyroll/apis/android_manga.py#L94-L105
aheadley/python-crunchyroll
crunchyroll/apis/android_manga.py
AndroidMangaApi._build_request
def _build_request(self, method, url, params=None): """Build a function to do an API request "We have to go deeper" or "It's functions all the way down!" """ full_params = self._get_base_params() if params is not None: full_params.update(params) try: request_func = lambda u, d: \ getattr(self._connector, method.lower())(u, params=d, headers=self._request_headers) except AttributeError: raise ApiException('Invalid request method') # TODO: need to catch a network here and raise as ApiNetworkException def do_request(): logger.debug('Sending %s request "%s" with params: %r', method, url, full_params) try: resp = request_func(url, full_params) logger.debug('Received response code: %d', resp.status_code) except requests.RequestException as err: raise ApiNetworkException(err) try: resp_json = resp.json() except TypeError: resp_json = resp.json method_returns_list = False try: resp_json['error'] except TypeError: logger.warn('Api method did not return map: %s', method) method_returns_list = True except KeyError: logger.warn('Api method did not return map with error key: %s', method) if method_returns_list is None: raise ApiBadResponseException(resp.content) elif method_returns_list: data = resp_json else: try: if resp_json['error']: raise ApiError('%s: %s' % (resp_json['code'], resp_json['message'])) except KeyError: data = resp_json else: data = resp_json['data'] self._do_post_request_tasks(data) self._last_response = resp return data return do_request
python
def _build_request(self, method, url, params=None): """Build a function to do an API request "We have to go deeper" or "It's functions all the way down!" """ full_params = self._get_base_params() if params is not None: full_params.update(params) try: request_func = lambda u, d: \ getattr(self._connector, method.lower())(u, params=d, headers=self._request_headers) except AttributeError: raise ApiException('Invalid request method') # TODO: need to catch a network here and raise as ApiNetworkException def do_request(): logger.debug('Sending %s request "%s" with params: %r', method, url, full_params) try: resp = request_func(url, full_params) logger.debug('Received response code: %d', resp.status_code) except requests.RequestException as err: raise ApiNetworkException(err) try: resp_json = resp.json() except TypeError: resp_json = resp.json method_returns_list = False try: resp_json['error'] except TypeError: logger.warn('Api method did not return map: %s', method) method_returns_list = True except KeyError: logger.warn('Api method did not return map with error key: %s', method) if method_returns_list is None: raise ApiBadResponseException(resp.content) elif method_returns_list: data = resp_json else: try: if resp_json['error']: raise ApiError('%s: %s' % (resp_json['code'], resp_json['message'])) except KeyError: data = resp_json else: data = resp_json['data'] self._do_post_request_tasks(data) self._last_response = resp return data return do_request
[ "def", "_build_request", "(", "self", ",", "method", ",", "url", ",", "params", "=", "None", ")", ":", "full_params", "=", "self", ".", "_get_base_params", "(", ")", "if", "params", "is", "not", "None", ":", "full_params", ".", "update", "(", "params", ")", "try", ":", "request_func", "=", "lambda", "u", ",", "d", ":", "getattr", "(", "self", ".", "_connector", ",", "method", ".", "lower", "(", ")", ")", "(", "u", ",", "params", "=", "d", ",", "headers", "=", "self", ".", "_request_headers", ")", "except", "AttributeError", ":", "raise", "ApiException", "(", "'Invalid request method'", ")", "# TODO: need to catch a network here and raise as ApiNetworkException", "def", "do_request", "(", ")", ":", "logger", ".", "debug", "(", "'Sending %s request \"%s\" with params: %r'", ",", "method", ",", "url", ",", "full_params", ")", "try", ":", "resp", "=", "request_func", "(", "url", ",", "full_params", ")", "logger", ".", "debug", "(", "'Received response code: %d'", ",", "resp", ".", "status_code", ")", "except", "requests", ".", "RequestException", "as", "err", ":", "raise", "ApiNetworkException", "(", "err", ")", "try", ":", "resp_json", "=", "resp", ".", "json", "(", ")", "except", "TypeError", ":", "resp_json", "=", "resp", ".", "json", "method_returns_list", "=", "False", "try", ":", "resp_json", "[", "'error'", "]", "except", "TypeError", ":", "logger", ".", "warn", "(", "'Api method did not return map: %s'", ",", "method", ")", "method_returns_list", "=", "True", "except", "KeyError", ":", "logger", ".", "warn", "(", "'Api method did not return map with error key: %s'", ",", "method", ")", "if", "method_returns_list", "is", "None", ":", "raise", "ApiBadResponseException", "(", "resp", ".", "content", ")", "elif", "method_returns_list", ":", "data", "=", "resp_json", "else", ":", "try", ":", "if", "resp_json", "[", "'error'", "]", ":", "raise", "ApiError", "(", "'%s: %s'", "%", "(", "resp_json", "[", "'code'", "]", ",", "resp_json", "[", "'message'", "]", ")", ")", "except", "KeyError", ":", "data", "=", "resp_json", "else", ":", "data", "=", "resp_json", "[", "'data'", "]", "self", ".", "_do_post_request_tasks", "(", "data", ")", "self", ".", "_last_response", "=", "resp", "return", "data", "return", "do_request" ]
Build a function to do an API request "We have to go deeper" or "It's functions all the way down!"
[ "Build", "a", "function", "to", "do", "an", "API", "request" ]
train
https://github.com/aheadley/python-crunchyroll/blob/9bf2eb644f0d0f3e9dc21b95b8e355c6e2050178/crunchyroll/apis/android_manga.py#L107-L161
aheadley/python-crunchyroll
crunchyroll/apis/android_manga.py
AndroidMangaApi._build_request_url
def _build_request_url(self, secure, api_method): """Build a URL for a API method request """ if secure: proto = ANDROID_MANGA.PROTOCOL_SECURE else: proto = ANDROID_MANGA.PROTOCOL_INSECURE req_url = ANDROID_MANGA.API_URL.format( protocol=proto, api_method=api_method ) return req_url
python
def _build_request_url(self, secure, api_method): """Build a URL for a API method request """ if secure: proto = ANDROID_MANGA.PROTOCOL_SECURE else: proto = ANDROID_MANGA.PROTOCOL_INSECURE req_url = ANDROID_MANGA.API_URL.format( protocol=proto, api_method=api_method ) return req_url
[ "def", "_build_request_url", "(", "self", ",", "secure", ",", "api_method", ")", ":", "if", "secure", ":", "proto", "=", "ANDROID_MANGA", ".", "PROTOCOL_SECURE", "else", ":", "proto", "=", "ANDROID_MANGA", ".", "PROTOCOL_INSECURE", "req_url", "=", "ANDROID_MANGA", ".", "API_URL", ".", "format", "(", "protocol", "=", "proto", ",", "api_method", "=", "api_method", ")", "return", "req_url" ]
Build a URL for a API method request
[ "Build", "a", "URL", "for", "a", "API", "method", "request" ]
train
https://github.com/aheadley/python-crunchyroll/blob/9bf2eb644f0d0f3e9dc21b95b8e355c6e2050178/crunchyroll/apis/android_manga.py#L163-L174
aheadley/python-crunchyroll
crunchyroll/apis/android_manga.py
AndroidMangaApi.cr_login
def cr_login(self, response): """ Login using email/username and password, used to get the auth token @param str account @param str password @param str hash_id (optional) """ self._state_params['auth'] = response['auth'] self._user_data = response['user'] if not self.logged_in: raise ApiLoginFailure(response)
python
def cr_login(self, response): """ Login using email/username and password, used to get the auth token @param str account @param str password @param str hash_id (optional) """ self._state_params['auth'] = response['auth'] self._user_data = response['user'] if not self.logged_in: raise ApiLoginFailure(response)
[ "def", "cr_login", "(", "self", ",", "response", ")", ":", "self", ".", "_state_params", "[", "'auth'", "]", "=", "response", "[", "'auth'", "]", "self", ".", "_user_data", "=", "response", "[", "'user'", "]", "if", "not", "self", ".", "logged_in", ":", "raise", "ApiLoginFailure", "(", "response", ")" ]
Login using email/username and password, used to get the auth token @param str account @param str password @param str hash_id (optional)
[ "Login", "using", "email", "/", "username", "and", "password", "used", "to", "get", "the", "auth", "token" ]
train
https://github.com/aheadley/python-crunchyroll/blob/9bf2eb644f0d0f3e9dc21b95b8e355c6e2050178/crunchyroll/apis/android_manga.py#L251-L262
tulsawebdevs/django-multi-gtfs
multigtfs/models/fields/seconds.py
SecondsField.from_db_value
def from_db_value(self, value, expression, connection, context): '''Handle data loaded from database.''' if value is None: return value return self.parse_seconds(value)
python
def from_db_value(self, value, expression, connection, context): '''Handle data loaded from database.''' if value is None: return value return self.parse_seconds(value)
[ "def", "from_db_value", "(", "self", ",", "value", ",", "expression", ",", "connection", ",", "context", ")", ":", "if", "value", "is", "None", ":", "return", "value", "return", "self", ".", "parse_seconds", "(", "value", ")" ]
Handle data loaded from database.
[ "Handle", "data", "loaded", "from", "database", "." ]
train
https://github.com/tulsawebdevs/django-multi-gtfs/blob/8c442bfb67e87566c24a7364d8fa0aacd4a0a652/multigtfs/models/fields/seconds.py#L72-L76
tulsawebdevs/django-multi-gtfs
multigtfs/models/fields/seconds.py
SecondsField.to_python
def to_python(self, value): '''Handle data from serialization and form clean() methods.''' if isinstance(value, Seconds): return value if value in self.empty_values: return None return self.parse_seconds(value)
python
def to_python(self, value): '''Handle data from serialization and form clean() methods.''' if isinstance(value, Seconds): return value if value in self.empty_values: return None return self.parse_seconds(value)
[ "def", "to_python", "(", "self", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "Seconds", ")", ":", "return", "value", "if", "value", "in", "self", ".", "empty_values", ":", "return", "None", "return", "self", ".", "parse_seconds", "(", "value", ")" ]
Handle data from serialization and form clean() methods.
[ "Handle", "data", "from", "serialization", "and", "form", "clean", "()", "methods", "." ]
train
https://github.com/tulsawebdevs/django-multi-gtfs/blob/8c442bfb67e87566c24a7364d8fa0aacd4a0a652/multigtfs/models/fields/seconds.py#L78-L84
tulsawebdevs/django-multi-gtfs
multigtfs/models/fields/seconds.py
SecondsField.parse_seconds
def parse_seconds(value): ''' Parse string into Seconds instances. Handled formats: HH:MM:SS HH:MM SS ''' svalue = str(value) colons = svalue.count(':') if colons == 2: hours, minutes, seconds = [int(v) for v in svalue.split(':')] elif colons == 1: hours, minutes = [int(v) for v in svalue.split(':')] seconds = 0 elif colons == 0: hours = 0 minutes = 0 seconds = int(svalue) else: raise ValueError('Must be in seconds or HH:MM:SS format') return Seconds.from_hms(hours, minutes, seconds)
python
def parse_seconds(value): ''' Parse string into Seconds instances. Handled formats: HH:MM:SS HH:MM SS ''' svalue = str(value) colons = svalue.count(':') if colons == 2: hours, minutes, seconds = [int(v) for v in svalue.split(':')] elif colons == 1: hours, minutes = [int(v) for v in svalue.split(':')] seconds = 0 elif colons == 0: hours = 0 minutes = 0 seconds = int(svalue) else: raise ValueError('Must be in seconds or HH:MM:SS format') return Seconds.from_hms(hours, minutes, seconds)
[ "def", "parse_seconds", "(", "value", ")", ":", "svalue", "=", "str", "(", "value", ")", "colons", "=", "svalue", ".", "count", "(", "':'", ")", "if", "colons", "==", "2", ":", "hours", ",", "minutes", ",", "seconds", "=", "[", "int", "(", "v", ")", "for", "v", "in", "svalue", ".", "split", "(", "':'", ")", "]", "elif", "colons", "==", "1", ":", "hours", ",", "minutes", "=", "[", "int", "(", "v", ")", "for", "v", "in", "svalue", ".", "split", "(", "':'", ")", "]", "seconds", "=", "0", "elif", "colons", "==", "0", ":", "hours", "=", "0", "minutes", "=", "0", "seconds", "=", "int", "(", "svalue", ")", "else", ":", "raise", "ValueError", "(", "'Must be in seconds or HH:MM:SS format'", ")", "return", "Seconds", ".", "from_hms", "(", "hours", ",", "minutes", ",", "seconds", ")" ]
Parse string into Seconds instances. Handled formats: HH:MM:SS HH:MM SS
[ "Parse", "string", "into", "Seconds", "instances", "." ]
train
https://github.com/tulsawebdevs/django-multi-gtfs/blob/8c442bfb67e87566c24a7364d8fa0aacd4a0a652/multigtfs/models/fields/seconds.py#L87-L109
tulsawebdevs/django-multi-gtfs
multigtfs/models/fields/seconds.py
SecondsField.get_prep_value
def get_prep_value(self, value): '''Prepare value for database storage.''' if isinstance(value, Seconds): return value.seconds elif value: return self.parse_seconds(value).seconds else: return None
python
def get_prep_value(self, value): '''Prepare value for database storage.''' if isinstance(value, Seconds): return value.seconds elif value: return self.parse_seconds(value).seconds else: return None
[ "def", "get_prep_value", "(", "self", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "Seconds", ")", ":", "return", "value", ".", "seconds", "elif", "value", ":", "return", "self", ".", "parse_seconds", "(", "value", ")", ".", "seconds", "else", ":", "return", "None" ]
Prepare value for database storage.
[ "Prepare", "value", "for", "database", "storage", "." ]
train
https://github.com/tulsawebdevs/django-multi-gtfs/blob/8c442bfb67e87566c24a7364d8fa0aacd4a0a652/multigtfs/models/fields/seconds.py#L111-L118