_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q26000
SQLiteStorageFile._CountStoredAttributeContainers
train
def _CountStoredAttributeContainers(self, container_type): """Counts the number of attribute containers of the given type. Args: container_type (str): attribute container type. Returns: int: number of attribute containers of the given type. Raises: ValueError: if an unsupported container_type is provided. """ if not container_type in self._CONTAINER_TYPES: raise ValueError('Attribute container type {0:s} is not supported'.format( container_type)) if not self._HasTable(container_type): return 0 # Note that this is SQLite specific, and will give inaccurate results if # there are DELETE commands run on the table. The Plaso SQLite storage # implementation does not run any DELETE commands. query = 'SELECT MAX(_ROWID_) FROM {0:s} LIMIT 1'.format(container_type) self._cursor.execute(query) row = self._cursor.fetchone() if not row: return 0 return row[0] or 0
python
{ "resource": "" }
q26001
SQLiteStorageFile._GetAttributeContainerByIndex
train
def _GetAttributeContainerByIndex(self, container_type, index): """Retrieves a specific attribute container. Args: container_type (str): attribute container type. index (int): attribute container index. Returns: AttributeContainer: attribute container or None if not available. Raises: IOError: when there is an error querying the storage file. OSError: when there is an error querying the storage file. """ sequence_number = index + 1 query = 'SELECT _data FROM {0:s} WHERE rowid = {1:d}'.format( container_type, sequence_number) try: self._cursor.execute(query) except sqlite3.OperationalError as exception: raise IOError('Unable to query storage file with error: {0!s}'.format( exception)) row = self._cursor.fetchone() if row: identifier = identifiers.SQLTableIdentifier( container_type, sequence_number) if self.compression_format == definitions.COMPRESSION_FORMAT_ZLIB: serialized_data = zlib.decompress(row[0]) else: serialized_data = row[0] if self._storage_profiler: self._storage_profiler.Sample( 'read', container_type, len(serialized_data), len(row[0])) attribute_container = self._DeserializeAttributeContainer( container_type, serialized_data) attribute_container.SetIdentifier(identifier) return attribute_container count = self._CountStoredAttributeContainers(container_type) index -= count serialized_data = self._GetSerializedAttributeContainerByIndex( container_type, index) attribute_container = self._DeserializeAttributeContainer( container_type, serialized_data) if attribute_container: identifier = identifiers.SQLTableIdentifier( container_type, sequence_number) attribute_container.SetIdentifier(identifier) return attribute_container
python
{ "resource": "" }
q26002
SQLiteStorageFile._GetAttributeContainers
train
def _GetAttributeContainers( self, container_type, filter_expression=None, order_by=None): """Retrieves a specific type of stored attribute containers. Args: container_type (str): attribute container type. filter_expression (Optional[str]): expression to filter results by. order_by (Optional[str]): name of a column to order the results by. Yields: AttributeContainer: attribute container. Raises: IOError: when there is an error querying the storage file. OSError: when there is an error querying the storage file. """ query = 'SELECT _identifier, _data FROM {0:s}'.format(container_type) if filter_expression: query = '{0:s} WHERE {1:s}'.format(query, filter_expression) if order_by: query = '{0:s} ORDER BY {1:s}'.format(query, order_by) # Use a local cursor to prevent another query interrupting the generator. cursor = self._connection.cursor() try: cursor.execute(query) except sqlite3.OperationalError as exception: raise IOError('Unable to query storage file with error: {0!s}'.format( exception)) row = cursor.fetchone() while row: identifier = identifiers.SQLTableIdentifier(container_type, row[0]) if self.compression_format == definitions.COMPRESSION_FORMAT_ZLIB: serialized_data = zlib.decompress(row[1]) else: serialized_data = row[1] if self._storage_profiler: self._storage_profiler.Sample( 'read', container_type, len(serialized_data), len(row[1])) attribute_container = self._DeserializeAttributeContainer( container_type, serialized_data) attribute_container.SetIdentifier(identifier) yield attribute_container row = cursor.fetchone()
python
{ "resource": "" }
q26003
SQLiteStorageFile._ReadAndCheckStorageMetadata
train
def _ReadAndCheckStorageMetadata(self, check_readable_only=False): """Reads storage metadata and checks that the values are valid. Args: check_readable_only (Optional[bool]): whether the store should only be checked to see if it can be read. If False, the store will be checked to see if it can be read and written to. """ query = 'SELECT key, value FROM metadata' self._cursor.execute(query) metadata_values = {row[0]: row[1] for row in self._cursor.fetchall()} self._CheckStorageMetadata( metadata_values, check_readable_only=check_readable_only) self.format_version = metadata_values['format_version'] self.compression_format = metadata_values['compression_format'] self.serialization_format = metadata_values['serialization_format'] self.storage_type = metadata_values['storage_type']
python
{ "resource": "" }
q26004
SQLiteStorageFile._WriteAttributeContainer
train
def _WriteAttributeContainer(self, attribute_container): """Writes an attribute container. The table for the container type must exist. Args: attribute_container (AttributeContainer): attribute container. """ if attribute_container.CONTAINER_TYPE == self._CONTAINER_TYPE_EVENT: timestamp, serialized_data = self._serialized_event_heap.PopEvent() else: serialized_data = self._SerializeAttributeContainer(attribute_container) if self.compression_format == definitions.COMPRESSION_FORMAT_ZLIB: compressed_data = zlib.compress(serialized_data) serialized_data = sqlite3.Binary(compressed_data) else: compressed_data = '' if self._storage_profiler: self._storage_profiler.Sample( 'write', attribute_container.CONTAINER_TYPE, len(serialized_data), len(compressed_data)) if attribute_container.CONTAINER_TYPE == self._CONTAINER_TYPE_EVENT: query = 'INSERT INTO event (_timestamp, _data) VALUES (?, ?)' self._cursor.execute(query, (timestamp, serialized_data)) else: query = 'INSERT INTO {0:s} (_data) VALUES (?)'.format( attribute_container.CONTAINER_TYPE) self._cursor.execute(query, (serialized_data, )) identifier = identifiers.SQLTableIdentifier( attribute_container.CONTAINER_TYPE, self._cursor.lastrowid) attribute_container.SetIdentifier(identifier)
python
{ "resource": "" }
q26005
SQLiteStorageFile._WriteSerializedAttributeContainerList
train
def _WriteSerializedAttributeContainerList(self, container_type): """Writes a serialized attribute container list. Args: container_type (str): attribute container type. """ if container_type == self._CONTAINER_TYPE_EVENT: if not self._serialized_event_heap.data_size: return number_of_attribute_containers = ( self._serialized_event_heap.number_of_events) else: container_list = self._GetSerializedAttributeContainerList(container_type) if not container_list.data_size: return number_of_attribute_containers = ( container_list.number_of_attribute_containers) if self._serializers_profiler: self._serializers_profiler.StartTiming('write') if container_type == self._CONTAINER_TYPE_EVENT: query = 'INSERT INTO event (_timestamp, _data) VALUES (?, ?)' else: query = 'INSERT INTO {0:s} (_data) VALUES (?)'.format(container_type) # TODO: directly use container_list instead of values_tuple_list. values_tuple_list = [] for _ in range(number_of_attribute_containers): if container_type == self._CONTAINER_TYPE_EVENT: timestamp, serialized_data = self._serialized_event_heap.PopEvent() else: serialized_data = container_list.PopAttributeContainer() if self.compression_format == definitions.COMPRESSION_FORMAT_ZLIB: compressed_data = zlib.compress(serialized_data) serialized_data = sqlite3.Binary(compressed_data) else: compressed_data = '' if self._storage_profiler: self._storage_profiler.Sample( 'write', container_type, len(serialized_data), len(compressed_data)) if container_type == self._CONTAINER_TYPE_EVENT: values_tuple_list.append((timestamp, serialized_data)) else: values_tuple_list.append((serialized_data, )) self._cursor.executemany(query, values_tuple_list) if self._serializers_profiler: self._serializers_profiler.StopTiming('write') if container_type == self._CONTAINER_TYPE_EVENT: self._serialized_event_heap.Empty() else: container_list.Empty()
python
{ "resource": "" }
q26006
SQLiteStorageFile._WriteStorageMetadata
train
def _WriteStorageMetadata(self): """Writes the storage metadata.""" self._cursor.execute(self._CREATE_METADATA_TABLE_QUERY) query = 'INSERT INTO metadata (key, value) VALUES (?, ?)' key = 'format_version' value = '{0:d}'.format(self._FORMAT_VERSION) self._cursor.execute(query, (key, value)) key = 'compression_format' value = self.compression_format self._cursor.execute(query, (key, value)) key = 'serialization_format' value = self.serialization_format self._cursor.execute(query, (key, value)) key = 'storage_type' value = self.storage_type self._cursor.execute(query, (key, value))
python
{ "resource": "" }
q26007
SQLiteStorageFile.AddEventTags
train
def AddEventTags(self, event_tags): """Adds event tags. Args: event_tags (list[EventTag]): event tags. Raises: IOError: when the storage file is closed or read-only or if the event tags cannot be serialized. OSError: when the storage file is closed or read-only or if the event tags cannot be serialized. """ self._RaiseIfNotWritable() for event_tag in event_tags: self.AddEventTag(event_tag)
python
{ "resource": "" }
q26008
SQLiteStorageFile.CheckSupportedFormat
train
def CheckSupportedFormat(cls, path, check_readable_only=False): """Checks if the storage file format is supported. Args: path (str): path to the storage file. check_readable_only (Optional[bool]): whether the store should only be checked to see if it can be read. If False, the store will be checked to see if it can be read and written to. Returns: bool: True if the format is supported. """ try: connection = sqlite3.connect( path, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES) cursor = connection.cursor() query = 'SELECT * FROM metadata' cursor.execute(query) metadata_values = {row[0]: row[1] for row in cursor.fetchall()} cls._CheckStorageMetadata( metadata_values, check_readable_only=check_readable_only) connection.close() result = True except (IOError, sqlite3.DatabaseError): result = False return result
python
{ "resource": "" }
q26009
SQLiteStorageFile.Close
train
def Close(self): """Closes the storage. Raises: IOError: if the storage file is already closed. OSError: if the storage file is already closed. """ if not self._is_open: raise IOError('Storage file already closed.') if not self._read_only: self._WriteSerializedAttributeContainerList( self._CONTAINER_TYPE_EVENT_SOURCE) self._WriteSerializedAttributeContainerList( self._CONTAINER_TYPE_EVENT_DATA) self._WriteSerializedAttributeContainerList(self._CONTAINER_TYPE_EVENT) self._WriteSerializedAttributeContainerList( self._CONTAINER_TYPE_EVENT_TAG) self._WriteSerializedAttributeContainerList( self._CONTAINER_TYPE_EXTRACTION_WARNING) if self._connection: # We need to run commit or not all data is stored in the database. self._connection.commit() self._connection.close() self._connection = None self._cursor = None self._is_open = False
python
{ "resource": "" }
q26010
SQLiteStorageFile.GetWarnings
train
def GetWarnings(self): """Retrieves the warnings. Returns: generator(ExtractionWarning): warning generator. """ # For backwards compatibility with pre-20190309 stores. # Note that stores cannot contain both ExtractionErrors and # ExtractionWarnings if self._HasAttributeContainers(self._CONTAINER_TYPE_EXTRACTION_ERROR): return self._GetExtractionErrorsAsWarnings() return self._GetAttributeContainers(self._CONTAINER_TYPE_EXTRACTION_WARNING)
python
{ "resource": "" }
q26011
SQLiteStorageFile._GetExtractionErrorsAsWarnings
train
def _GetExtractionErrorsAsWarnings(self): """Retrieves errors from from the store, and converts them to warnings. This method is for backwards compatibility with pre-20190309 storage format stores which used ExtractionError attribute containers. Yields: ExtractionWarning: extraction warnings. """ for extraction_error in self._GetAttributeContainers( self._CONTAINER_TYPE_EXTRACTION_ERROR): error_attributes = extraction_error.CopyToDict() warning = warnings.ExtractionWarning() warning.CopyFromDict(error_attributes) yield warning
python
{ "resource": "" }
q26012
SQLiteStorageFile.GetEvents
train
def GetEvents(self): """Retrieves the events. Yield: EventObject: event. """ for event in self._GetAttributeContainers('event'): if hasattr(event, 'event_data_row_identifier'): event_data_identifier = identifiers.SQLTableIdentifier( 'event_data', event.event_data_row_identifier) event.SetEventDataIdentifier(event_data_identifier) del event.event_data_row_identifier yield event
python
{ "resource": "" }
q26013
SQLiteStorageFile.GetEventTagByIdentifier
train
def GetEventTagByIdentifier(self, identifier): """Retrieves a specific event tag. Args: identifier (SQLTableIdentifier): event tag identifier. Returns: EventTag: event tag or None if not available. """ event_tag = self._GetAttributeContainerByIndex( self._CONTAINER_TYPE_EVENT_TAG, identifier.row_identifier - 1) if event_tag: event_identifier = identifiers.SQLTableIdentifier( self._CONTAINER_TYPE_EVENT, event_tag.event_row_identifier) event_tag.SetEventIdentifier(event_identifier) del event_tag.event_row_identifier return event_tag
python
{ "resource": "" }
q26014
SQLiteStorageFile.GetEventTags
train
def GetEventTags(self): """Retrieves the event tags. Yields: EventTag: event tag. """ for event_tag in self._GetAttributeContainers( self._CONTAINER_TYPE_EVENT_TAG): event_identifier = identifiers.SQLTableIdentifier( self._CONTAINER_TYPE_EVENT, event_tag.event_row_identifier) event_tag.SetEventIdentifier(event_identifier) del event_tag.event_row_identifier yield event_tag
python
{ "resource": "" }
q26015
SQLiteStorageFile.GetNumberOfEventSources
train
def GetNumberOfEventSources(self): """Retrieves the number event sources. Returns: int: number of event sources. """ number_of_event_sources = self._CountStoredAttributeContainers( self._CONTAINER_TYPE_EVENT_SOURCE) number_of_event_sources += self._GetNumberOfSerializedAttributeContainers( self._CONTAINER_TYPE_EVENT_SOURCE) return number_of_event_sources
python
{ "resource": "" }
q26016
SQLiteStorageFile.GetSessions
train
def GetSessions(self): """Retrieves the sessions. Yields: Session: session attribute container. Raises: IOError: if there is a mismatch in session identifiers between the session start and completion attribute containers. OSError: if there is a mismatch in session identifiers between the session start and completion attribute containers. """ session_start_generator = self._GetAttributeContainers( self._CONTAINER_TYPE_SESSION_START) session_completion_generator = self._GetAttributeContainers( self._CONTAINER_TYPE_SESSION_COMPLETION) for session_index in range(0, self._last_session): session_start = next(session_start_generator) # pylint: disable=stop-iteration-return session_completion = next(session_completion_generator) # pylint: disable=stop-iteration-return session = sessions.Session() session.CopyAttributesFromSessionStart(session_start) if session_completion: try: session.CopyAttributesFromSessionCompletion(session_completion) except ValueError: raise IOError( 'Session identifier mismatch for session: {0:d}'.format( session_index)) yield session
python
{ "resource": "" }
q26017
SQLiteStorageFile.HasWarnings
train
def HasWarnings(self): """Determines if a store contains extraction warnings. Returns: bool: True if the store contains extraction warnings. """ # To support older storage versions, check for the now deprecated # extraction errors. has_errors = self._HasAttributeContainers( self._CONTAINER_TYPE_EXTRACTION_ERROR) if has_errors: return True return self._HasAttributeContainers(self._CONTAINER_TYPE_EXTRACTION_WARNING)
python
{ "resource": "" }
q26018
SQLiteStorageFile.Open
train
def Open(self, path=None, read_only=True, **unused_kwargs): """Opens the storage. Args: path (Optional[str]): path to the storage file. read_only (Optional[bool]): True if the file should be opened in read-only mode. Raises: IOError: if the storage file is already opened or if the database cannot be connected. OSError: if the storage file is already opened or if the database cannot be connected. ValueError: if path is missing. """ if self._is_open: raise IOError('Storage file already opened.') if not path: raise ValueError('Missing path.') path = os.path.abspath(path) connection = sqlite3.connect( path, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES) cursor = connection.cursor() if not cursor: return self._connection = connection self._cursor = cursor self._is_open = True self._read_only = read_only if read_only: self._ReadAndCheckStorageMetadata(check_readable_only=True) else: # self._cursor.execute('PRAGMA journal_mode=MEMORY') # Turn off insert transaction integrity since we want to do bulk insert. self._cursor.execute('PRAGMA synchronous=OFF') if not self._HasTable('metadata'): self._WriteStorageMetadata() else: self._ReadAndCheckStorageMetadata() if self.compression_format == definitions.COMPRESSION_FORMAT_ZLIB: data_column_type = 'BLOB' else: data_column_type = 'TEXT' for container_type in self._CONTAINER_TYPES: if not self._HasTable(container_type): if container_type == self._CONTAINER_TYPE_EVENT: query = self._CREATE_EVENT_TABLE_QUERY.format( container_type, data_column_type) else: query = self._CREATE_TABLE_QUERY.format( container_type, data_column_type) self._cursor.execute(query) self._connection.commit() last_session_start = self._CountStoredAttributeContainers( self._CONTAINER_TYPE_SESSION_START) last_session_completion = self._CountStoredAttributeContainers( self._CONTAINER_TYPE_SESSION_COMPLETION) # Initialize next_sequence_number based on the file contents so that # SQLTableIdentifier points to the correct attribute container. for container_type in self._REFERENCED_CONTAINER_TYPES: container_list = self._GetSerializedAttributeContainerList(container_type) container_list.next_sequence_number = ( self._CountStoredAttributeContainers(container_type)) # TODO: handle open sessions. if last_session_start != last_session_completion: logger.warning('Detected unclosed session.') self._last_session = last_session_completion
python
{ "resource": "" }
q26019
FirefoxHistoryPlugin.ParseBookmarkAnnotationRow
train
def ParseBookmarkAnnotationRow( self, parser_mediator, query, row, **unused_kwargs): """Parses a bookmark annotation row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row. """ query_hash = hash(query) event_data = FirefoxPlacesBookmarkAnnotationEventData() event_data.content = self._GetRowValue(query_hash, row, 'content') event_data.offset = self._GetRowValue(query_hash, row, 'id') event_data.query = query event_data.title = self._GetRowValue(query_hash, row, 'title') event_data.url = self._GetRowValue(query_hash, row, 'url') timestamp = self._GetRowValue(query_hash, row, 'dateAdded') if timestamp: date_time = dfdatetime_posix_time.PosixTimeInMicroseconds( timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_ADDED) parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = self._GetRowValue(query_hash, row, 'lastModified') if timestamp: date_time = dfdatetime_posix_time.PosixTimeInMicroseconds( timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_MODIFICATION) parser_mediator.ProduceEventWithEventData(event, event_data)
python
{ "resource": "" }
q26020
FirefoxHistoryPlugin.ParseBookmarkFolderRow
train
def ParseBookmarkFolderRow( self, parser_mediator, query, row, **unused_kwargs): """Parses a bookmark folder row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row. """ query_hash = hash(query) title = self._GetRowValue(query_hash, row, 'title') event_data = FirefoxPlacesBookmarkFolderEventData() event_data.offset = self._GetRowValue(query_hash, row, 'id') event_data.query = query event_data.title = title or 'N/A' timestamp = self._GetRowValue(query_hash, row, 'dateAdded') if timestamp: date_time = dfdatetime_posix_time.PosixTimeInMicroseconds( timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_ADDED) parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = self._GetRowValue(query_hash, row, 'lastModified') if timestamp: date_time = dfdatetime_posix_time.PosixTimeInMicroseconds( timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_MODIFICATION) parser_mediator.ProduceEventWithEventData(event, event_data)
python
{ "resource": "" }
q26021
FirefoxHistoryPlugin.ParseBookmarkRow
train
def ParseBookmarkRow( self, parser_mediator, query, row, **unused_kwargs): """Parses a bookmark row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row. """ query_hash = hash(query) rev_host = self._GetRowValue(query_hash, row, 'rev_host') bookmark_type = self._GetRowValue(query_hash, row, 'type') event_data = FirefoxPlacesBookmarkEventData() event_data.host = rev_host or 'N/A' event_data.offset = self._GetRowValue(query_hash, row, 'id') event_data.places_title = self._GetRowValue(query_hash, row, 'places_title') event_data.query = query event_data.title = self._GetRowValue(query_hash, row, 'bookmark_title') event_data.type = self._BOOKMARK_TYPES.get(bookmark_type, 'N/A') event_data.url = self._GetRowValue(query_hash, row, 'url') event_data.visit_count = self._GetRowValue(query_hash, row, 'visit_count') timestamp = self._GetRowValue(query_hash, row, 'dateAdded') if timestamp: date_time = dfdatetime_posix_time.PosixTimeInMicroseconds( timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_ADDED) parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = self._GetRowValue(query_hash, row, 'lastModified') if timestamp: date_time = dfdatetime_posix_time.PosixTimeInMicroseconds( timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_MODIFICATION) parser_mediator.ProduceEventWithEventData(event, event_data)
python
{ "resource": "" }
q26022
FirefoxHistoryPlugin.ParsePageVisitedRow
train
def ParsePageVisitedRow( self, parser_mediator, query, row, cache=None, database=None, **unused_kwargs): """Parses a page visited row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row. cache (Optional[SQLiteCache]): cache. database (Optional[SQLiteDatabase]): database. """ query_hash = hash(query) from_visit = self._GetRowValue(query_hash, row, 'from_visit') hidden = self._GetRowValue(query_hash, row, 'hidden') rev_host = self._GetRowValue(query_hash, row, 'rev_host') typed = self._GetRowValue(query_hash, row, 'typed') # TODO: make extra conditional formatting. extras = [] if from_visit: extras.append('visited from: {0:s}'.format( self._GetUrl(from_visit, cache, database))) if hidden == '1': extras.append('(url hidden)') if typed == '1': extras.append('(directly typed)') else: extras.append('(URL not typed directly)') event_data = FirefoxPlacesPageVisitedEventData() event_data.host = self._ReverseHostname(rev_host) event_data.offset = self._GetRowValue(query_hash, row, 'id') event_data.query = query event_data.title = self._GetRowValue(query_hash, row, 'title') event_data.url = self._GetRowValue(query_hash, row, 'url') event_data.visit_count = self._GetRowValue(query_hash, row, 'visit_count') event_data.visit_type = self._GetRowValue(query_hash, row, 'visit_type') if extras: event_data.extra = extras timestamp = self._GetRowValue(query_hash, row, 'visit_date') if timestamp: date_time = dfdatetime_posix_time.PosixTimeInMicroseconds( timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_LAST_VISITED) parser_mediator.ProduceEventWithEventData(event, event_data)
python
{ "resource": "" }
q26023
FirefoxHistoryPlugin._ReverseHostname
train
def _ReverseHostname(self, hostname): """Reverses the hostname and strips the leading dot. The hostname entry is reversed: moc.elgoog.www. Should be: www.google.com Args: hostname (str): reversed hostname. Returns: str: hostname without a leading dot. """ if not hostname: return '' if len(hostname) <= 1: return hostname if hostname[-1] == '.': return hostname[::-1][1:] return hostname[::-1][0:]
python
{ "resource": "" }
q26024
FirefoxDownloadsPlugin.ParseDownloadsRow
train
def ParseDownloadsRow( self, parser_mediator, query, row, **unused_kwargs): """Parses a downloads row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row. """ query_hash = hash(query) event_data = FirefoxDownloadEventData() event_data.full_path = self._GetRowValue(query_hash, row, 'target') event_data.mime_type = self._GetRowValue(query_hash, row, 'mimeType') event_data.name = self._GetRowValue(query_hash, row, 'name') event_data.offset = self._GetRowValue(query_hash, row, 'id') event_data.query = query event_data.received_bytes = self._GetRowValue(query_hash, row, 'currBytes') event_data.referrer = self._GetRowValue(query_hash, row, 'referrer') event_data.temporary_location = self._GetRowValue( query_hash, row, 'tempPath') event_data.total_bytes = self._GetRowValue(query_hash, row, 'maxBytes') event_data.url = self._GetRowValue(query_hash, row, 'source') timestamp = self._GetRowValue(query_hash, row, 'startTime') if timestamp: date_time = dfdatetime_posix_time.PosixTimeInMicroseconds( timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_START) parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = self._GetRowValue(query_hash, row, 'endTime') if timestamp: date_time = dfdatetime_posix_time.PosixTimeInMicroseconds( timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_END) parser_mediator.ProduceEventWithEventData(event, event_data)
python
{ "resource": "" }
q26025
MultiProcessBaseProcess._SigSegvHandler
train
def _SigSegvHandler(self, signal_number, stack_frame): """Signal handler for the SIGSEGV signal. Args: signal_number (int): numeric representation of the signal. stack_frame (frame): current stack frame or None. """ self._OnCriticalError() # Note that the original SIGSEGV handler can be 0. if self._original_sigsegv_handler is not None: # Let the original SIGSEGV handler take over. signal.signal(signal.SIGSEGV, self._original_sigsegv_handler) os.kill(self._pid, signal.SIGSEGV)
python
{ "resource": "" }
q26026
MultiProcessBaseProcess._WaitForStatusNotRunning
train
def _WaitForStatusNotRunning(self): """Waits for the status is running to change to false.""" # We wait slightly longer than the status check sleep time. time.sleep(2.0) time_slept = 2.0 while self._status_is_running: time.sleep(0.5) time_slept += 0.5 if time_slept >= self._PROCESS_JOIN_TIMEOUT: break
python
{ "resource": "" }
q26027
MultiProcessBaseProcess.run
train
def run(self): """Runs the process.""" # Prevent the KeyboardInterrupt being raised inside the process. # This will prevent a process from generating a traceback when interrupted. signal.signal(signal.SIGINT, signal.SIG_IGN) # A SIGTERM signal handler is necessary to make sure IPC is cleaned up # correctly on terminate. signal.signal(signal.SIGTERM, self._SigTermHandler) # A SIGSEGV signal handler is necessary to try to indicate where # worker failed. # WARNING the SIGSEGV handler will deadlock the process on a real segfault. if self._enable_sigsegv_handler: self._original_sigsegv_handler = signal.signal( signal.SIGSEGV, self._SigSegvHandler) self._pid = os.getpid() self._process_information = process_info.ProcessInfo(self._pid) # We need to set the is running status explicitly to True in case # the process completes before the engine is able to determine # the status of the process, e.g. in the unit tests. self._status_is_running = True # Logging needs to be configured before the first output otherwise we # mess up the logging of the parent process. loggers.ConfigureLogging( debug_output=self._debug_output, filename=self._log_filename, quiet_mode=self._quiet_mode) logger.debug( 'Process: {0!s} (PID: {1:d}) started'.format(self._name, self._pid)) self._StartProcessStatusRPCServer() self._Main() self._StopProcessStatusRPCServer() logger.debug( 'Process: {0!s} (PID: {1:d}) stopped'.format(self._name, self._pid)) # Make sure log files are cleanly closed. logging.shutdown() self._status_is_running = False
python
{ "resource": "" }
q26028
SystemdJournalParser._ParseDataObject
train
def _ParseDataObject(self, file_object, file_offset): """Parses a data object. Args: file_object (dfvfs.FileIO): a file-like object. file_offset (int): offset of the data object relative to the start of the file-like object. Returns: bytes: data. Raises: ParseError: if the data object cannot be parsed. """ data_object_map = self._GetDataTypeMap('systemd_journal_data_object') try: data_object, _ = self._ReadStructureFromFileObject( file_object, file_offset, data_object_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError(( 'Unable to parse data object at offset: 0x{0:08x} with error: ' '{1!s}').format(file_offset, exception)) if data_object.object_type != self._OBJECT_TYPE_DATA: raise errors.ParseError('Unsupported object type: {0:d}.'.format( data_object.object_type)) if data_object.object_flags not in ( 0, self._OBJECT_COMPRESSED_FLAG_XZ, self._OBJECT_COMPRESSED_FLAG_LZ4): raise errors.ParseError('Unsupported object flags: 0x{0:02x}.'.format( data_object.object_flags)) # The data is read separately for performance reasons. data_size = data_object.data_size - 64 data = file_object.read(data_size) if data_object.object_flags & self._OBJECT_COMPRESSED_FLAG_XZ: data = lzma.decompress(data) elif data_object.object_flags & self._OBJECT_COMPRESSED_FLAG_LZ4: uncompressed_size_map = self._GetDataTypeMap('uint32le') try: uncompressed_size = self._ReadStructureFromByteStream( data, file_offset + 64, uncompressed_size_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError(( 'Unable to parse LZ4 uncompressed size at offset: 0x{0:08x} with ' 'error: {1!s}').format(file_offset + 64, exception)) data = lz4.block.decompress( data[8:], uncompressed_size=uncompressed_size) return data
python
{ "resource": "" }
q26029
SystemdJournalParser._ParseEntryArrayObject
train
def _ParseEntryArrayObject(self, file_object, file_offset): """Parses an entry array object. Args: file_object (dfvfs.FileIO): a file-like object. file_offset (int): offset of the entry array object relative to the start of the file-like object. Returns: systemd_journal_entry_array_object: entry array object. Raises: ParseError: if the entry array object cannot be parsed. """ entry_array_object_map = self._GetDataTypeMap( 'systemd_journal_entry_array_object') try: entry_array_object, _ = self._ReadStructureFromFileObject( file_object, file_offset, entry_array_object_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError(( 'Unable to parse entry array object at offset: 0x{0:08x} with error: ' '{1!s}').format(file_offset, exception)) if entry_array_object.object_type != self._OBJECT_TYPE_ENTRY_ARRAY: raise errors.ParseError('Unsupported object type: {0:d}.'.format( entry_array_object.object_type)) if entry_array_object.object_flags != 0: raise errors.ParseError('Unsupported object flags: 0x{0:02x}.'.format( entry_array_object.object_flags)) return entry_array_object
python
{ "resource": "" }
q26030
SystemdJournalParser._ParseEntryObject
train
def _ParseEntryObject(self, file_object, file_offset): """Parses an entry object. Args: file_object (dfvfs.FileIO): a file-like object. file_offset (int): offset of the entry object relative to the start of the file-like object. Returns: systemd_journal_entry_object: entry object. Raises: ParseError: if the entry object cannot be parsed. """ entry_object_map = self._GetDataTypeMap('systemd_journal_entry_object') try: entry_object, _ = self._ReadStructureFromFileObject( file_object, file_offset, entry_object_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError(( 'Unable to parse entry object at offset: 0x{0:08x} with error: ' '{1!s}').format(file_offset, exception)) if entry_object.object_type != self._OBJECT_TYPE_ENTRY: raise errors.ParseError('Unsupported object type: {0:d}.'.format( entry_object.object_type)) if entry_object.object_flags != 0: raise errors.ParseError('Unsupported object flags: 0x{0:02x}.'.format( entry_object.object_flags)) return entry_object
python
{ "resource": "" }
q26031
SystemdJournalParser._ParseEntryObjectOffsets
train
def _ParseEntryObjectOffsets(self, file_object, file_offset): """Parses entry array objects for the offset of the entry objects. Args: file_object (dfvfs.FileIO): a file-like object. file_offset (int): offset of the first entry array object relative to the start of the file-like object. Returns: list[int]: offsets of the entry objects. """ entry_array_object = self._ParseEntryArrayObject(file_object, file_offset) entry_object_offsets = list(entry_array_object.entry_object_offsets) while entry_array_object.next_entry_array_offset != 0: entry_array_object = self._ParseEntryArrayObject( file_object, entry_array_object.next_entry_array_offset) entry_object_offsets.extend(entry_array_object.entry_object_offsets) return entry_object_offsets
python
{ "resource": "" }
q26032
SystemdJournalParser._ParseJournalEntry
train
def _ParseJournalEntry(self, file_object, file_offset): """Parses a journal entry. This method will generate an event per ENTRY object. Args: file_object (dfvfs.FileIO): a file-like object. file_offset (int): offset of the entry object relative to the start of the file-like object. Returns: dict[str, objects]: entry items per key. Raises: ParseError: when an object offset is out of bounds. """ entry_object = self._ParseEntryObject(file_object, file_offset) # The data is read separately for performance reasons. entry_item_map = self._GetDataTypeMap('systemd_journal_entry_item') file_offset += 64 data_end_offset = file_offset + entry_object.data_size - 64 fields = {'real_time': entry_object.real_time} while file_offset < data_end_offset: try: entry_item, entry_item_data_size = self._ReadStructureFromFileObject( file_object, file_offset, entry_item_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError(( 'Unable to parse entry item at offset: 0x{0:08x} with error: ' '{1!s}').format(file_offset, exception)) file_offset += entry_item_data_size if entry_item.object_offset < self._maximum_journal_file_offset: raise errors.ParseError( 'object offset should be after hash tables ({0:d} < {1:d})'.format( entry_item.object_offset, self._maximum_journal_file_offset)) event_data = self._ParseDataObject(file_object, entry_item.object_offset) event_string = event_data.decode('utf-8') key, value = event_string.split('=', 1) fields[key] = value return fields
python
{ "resource": "" }
q26033
SystemdJournalParser.ParseFileObject
train
def ParseFileObject(self, parser_mediator, file_object): """Parses a Systemd journal file-like object. Args: parser_mediator (ParserMediator): parser mediator. file_object (dfvfs.FileIO): a file-like object. Raises: UnableToParseFile: when the header cannot be parsed. """ file_header_map = self._GetDataTypeMap('systemd_journal_file_header') try: file_header, _ = self._ReadStructureFromFileObject( file_object, 0, file_header_map) except (ValueError, errors.ParseError) as exception: raise errors.UnableToParseFile( 'Unable to parse file header with error: {0!s}'.format( exception)) if file_header.signature != self._FILE_SIGNATURE: raise errors.UnableToParseFile('Invalid file signature.') if file_header.header_size not in self._SUPPORTED_FILE_HEADER_SIZES: raise errors.UnableToParseFile( 'Unsupported file header size: {0:d}.'.format( file_header.header_size)) data_hash_table_end_offset = ( file_header.data_hash_table_offset + file_header.data_hash_table_size) field_hash_table_end_offset = ( file_header.field_hash_table_offset + file_header.field_hash_table_size) self._maximum_journal_file_offset = max( data_hash_table_end_offset, field_hash_table_end_offset) entry_object_offsets = self._ParseEntryObjectOffsets( file_object, file_header.entry_array_offset) for entry_object_offset in entry_object_offsets: if entry_object_offset == 0: continue try: fields = self._ParseJournalEntry(file_object, entry_object_offset) except errors.ParseError as exception: parser_mediator.ProduceExtractionWarning(( 'Unable to parse journal entry at offset: 0x{0:08x} with ' 'error: {1!s}').format(entry_object_offset, exception)) return event_data = SystemdJournalEventData() event_data.body = fields.get('MESSAGE', None) event_data.hostname = fields.get('_HOSTNAME', None) event_data.reporter = fields.get('SYSLOG_IDENTIFIER', None) if event_data.reporter and event_data.reporter != 'kernel': event_data.pid = fields.get('_PID', fields.get('SYSLOG_PID', None)) date_time = dfdatetime_posix_time.PosixTimeInMicroseconds( timestamp=fields['real_time']) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
python
{ "resource": "" }
q26034
WinEVTFormatter.GetEventTypeString
train
def GetEventTypeString(self, event_type): """Retrieves a string representation of the event type. Args: event_type (int): event type. Returns: str: description of the event type. """ if 0 <= event_type < len(self._EVENT_TYPES): return self._EVENT_TYPES[event_type] return 'Unknown {0:d}'.format(event_type)
python
{ "resource": "" }
q26035
WinEVTFormatter.GetSeverityString
train
def GetSeverityString(self, severity): """Retrieves a string representation of the severity. Args: severity (int): severity. Returns: str: description of the event severity. """ if 0 <= severity < len(self._SEVERITY): return self._SEVERITY[severity] return 'Unknown {0:d}'.format(severity)
python
{ "resource": "" }
q26036
SQLiteCache.CacheQueryResults
train
def CacheQueryResults( self, sql_results, attribute_name, key_name, column_names): """Build a dictionary object based on a SQL command. This function will take a SQL command, execute it and for each resulting row it will store a key in a dictionary. An example:: sql_results = A SQL result object after executing the SQL command: 'SELECT foo, bla, bar FROM my_table' attribute_name = 'all_the_things' key_name = 'foo' column_names = ['bla', 'bar'] Results from running this against the database: 'first', 'stuff', 'things' 'second', 'another stuff', 'another thing' This will result in a dictionary object being created in the cache, called 'all_the_things' and it will contain the following value:: all_the_things = { 'first': ['stuff', 'things'], 'second': ['another_stuff', 'another_thing'], 'third': ['single_thing']} Args: sql_results (sqlite3.Cursor): result after executing a SQL command on a database. attribute_name (str): attribute name in the cache to store results to. This will be the name of the dictionary attribute. key_name (str): name of the result field that should be used as a key in the resulting dictionary that is created. column_names (list[str]): of column names that are stored as values to the dictionary. If this list has only one value in it the value will be stored directly, otherwise the value will be a list containing the extracted results based on the names provided in this list. """ row = sql_results.fetchone() if not row: return # Note that pysqlite does not accept a Unicode string in row['string'] and # will raise "IndexError: Index must be int or string". keys_name_to_index_map = { name: index for index, name in enumerate(row.keys())} attribute_value = {} while row: value_index = keys_name_to_index_map.get(key_name) key_value = row[value_index] attribute_value[key_value] = [] for column_name in column_names: value_index = keys_name_to_index_map.get(column_name) column_value = row[value_index] attribute_value[key_value].append(column_value) row = sql_results.fetchone() setattr(self, attribute_name, attribute_value)
python
{ "resource": "" }
q26037
SQLiteCache.GetRowCache
train
def GetRowCache(self, query): """Retrieves the row cache for a specific query. The row cache is a set that contains hashes of values in a row. The row cache is used to find duplicate row when a database and a database with a WAL file is parsed. Args: query (str): query. Returns: set: hashes of the rows that have been parsed. """ query_hash = hash(query) if query_hash not in self._row_caches: self._row_caches[query_hash] = set() return self._row_caches[query_hash]
python
{ "resource": "" }
q26038
SQLiteDatabase._CopyFileObjectToTemporaryFile
train
def _CopyFileObjectToTemporaryFile(self, file_object, temporary_file): """Copies the contents of the file-like object to a temporary file. Args: file_object (dfvfs.FileIO): file-like object. temporary_file (file): temporary file. """ file_object.seek(0, os.SEEK_SET) data = file_object.read(self._READ_BUFFER_SIZE) while data: temporary_file.write(data) data = file_object.read(self._READ_BUFFER_SIZE)
python
{ "resource": "" }
q26039
SQLiteDatabase.Close
train
def Close(self): """Closes the database connection and cleans up the temporary file.""" self.schema = {} if self._is_open: self._database.close() self._database = None if os.path.exists(self._temp_db_file_path): try: os.remove(self._temp_db_file_path) except (OSError, IOError) as exception: logger.warning(( 'Unable to remove temporary copy: {0:s} of SQLite database: ' '{1:s} with error: {2!s}').format( self._temp_db_file_path, self._filename, exception)) self._temp_db_file_path = '' if os.path.exists(self._temp_wal_file_path): try: os.remove(self._temp_wal_file_path) except (OSError, IOError) as exception: logger.warning(( 'Unable to remove temporary copy: {0:s} of SQLite database: ' '{1:s} with error: {2!s}').format( self._temp_wal_file_path, self._filename, exception)) self._temp_wal_file_path = '' self._is_open = False
python
{ "resource": "" }
q26040
SQLiteDatabase.Open
train
def Open(self, file_object, wal_file_object=None): """Opens a SQLite database file. Since pysqlite cannot read directly from a file-like object a temporary copy of the file is made. After creating a copy the database file this function sets up a connection with the database and determines the names of the tables. Args: file_object (dfvfs.FileIO): file-like object. wal_file_object (Optional[dfvfs.FileIO]): file-like object for the Write-Ahead Log (WAL) file. Raises: IOError: if the file-like object cannot be read. OSError: if the file-like object cannot be read. sqlite3.DatabaseError: if the database cannot be parsed. ValueError: if the file-like object is missing. """ if not file_object: raise ValueError('Missing file object.') # TODO: Current design copies the entire file into a buffer # that is parsed by each SQLite parser. This is not very efficient, # especially when many SQLite parsers are ran against a relatively # large SQLite database. This temporary file that is created should # be usable by all SQLite parsers so the file should only be read # once in memory and then deleted when all SQLite parsers have completed. # TODO: Change this into a proper implementation using APSW # and virtual filesystems when that will be available. # Info: http://apidoc.apsw.googlecode.com/hg/vfs.html#vfs and # http://apidoc.apsw.googlecode.com/hg/example.html#example-vfs # Until then, just copy the file into a tempfile and parse it. temporary_file = tempfile.NamedTemporaryFile( delete=False, dir=self._temporary_directory) try: self._CopyFileObjectToTemporaryFile(file_object, temporary_file) self._temp_db_file_path = temporary_file.name except IOError: os.remove(temporary_file.name) raise finally: temporary_file.close() if wal_file_object: # Create WAL file using same filename so it is available for # sqlite3.connect() temporary_filename = '{0:s}-wal'.format(self._temp_db_file_path) temporary_file = open(temporary_filename, 'wb') try: self._CopyFileObjectToTemporaryFile(wal_file_object, temporary_file) self._temp_wal_file_path = temporary_filename except IOError: os.remove(temporary_filename) raise finally: temporary_file.close() self._database = sqlite3.connect(self._temp_db_file_path) try: self._database.row_factory = sqlite3.Row cursor = self._database.cursor() sql_results = cursor.execute(self.SCHEMA_QUERY) self.schema = { table_name: ' '.join(query.split()) for table_name, query in sql_results} except sqlite3.DatabaseError as exception: self._database.close() self._database = None os.remove(self._temp_db_file_path) self._temp_db_file_path = '' if self._temp_wal_file_path: os.remove(self._temp_wal_file_path) self._temp_wal_file_path = '' logger.debug( 'Unable to parse SQLite database: {0:s} with error: {1!s}'.format( self._filename, exception)) raise self._is_open = True
python
{ "resource": "" }
q26041
SQLiteDatabase.Query
train
def Query(self, query): """Queries the database. Args: query (str): SQL query. Returns: sqlite3.Cursor: results. Raises: sqlite3.DatabaseError: if querying the database fails. """ cursor = self._database.cursor() cursor.execute(query) return cursor
python
{ "resource": "" }
q26042
SQLiteParser.ParseFileEntry
train
def ParseFileEntry(self, parser_mediator, file_entry): """Parses a SQLite database file entry. Args: parser_mediator (ParserMediator): parser mediator. file_entry (dfvfs.FileEntry): file entry to be parsed. Raises: UnableToParseFile: when the file cannot be parsed. """ filename = parser_mediator.GetFilename() database = SQLiteDatabase( filename, temporary_directory=parser_mediator.temporary_directory) file_object = file_entry.GetFileObject() try: database.Open(file_object) except (IOError, ValueError, sqlite3.DatabaseError) as exception: parser_mediator.ProduceExtractionWarning( 'unable to open SQLite database with error: {0!s}'.format(exception)) file_object.close() return database_wal, wal_file_entry = self._OpenDatabaseWithWAL( parser_mediator, file_entry, file_object, filename) file_object.close() # Create a cache in which the resulting tables are cached. cache = SQLiteCache() try: table_names = frozenset(database.tables) for plugin in self._plugins: if not plugin.REQUIRED_TABLES.issubset(table_names): continue schema_match = plugin.CheckSchema(database) if plugin.REQUIRES_SCHEMA_MATCH and not schema_match: parser_mediator.ProduceExtractionWarning(( 'plugin: {0:s} found required tables but not a matching ' 'schema').format(plugin.NAME)) continue parser_mediator.SetFileEntry(file_entry) parser_mediator.AddEventAttribute('schema_match', schema_match) try: plugin.UpdateChainAndProcess( parser_mediator, cache=cache, database=database, database_wal=database_wal, wal_file_entry=wal_file_entry) except Exception as exception: # pylint: disable=broad-except parser_mediator.ProduceExtractionWarning(( 'plugin: {0:s} unable to parse SQLite database with error: ' '{1!s}').format(plugin.NAME, exception)) finally: parser_mediator.RemoveEventAttribute('schema_match') if not database_wal: continue schema_match = plugin.CheckSchema(database) parser_mediator.SetFileEntry(wal_file_entry) parser_mediator.AddEventAttribute('schema_match', schema_match) try: plugin.UpdateChainAndProcess( parser_mediator, cache=cache, database=database, database_wal=database_wal, wal_file_entry=wal_file_entry) except Exception as exception: # pylint: disable=broad-except parser_mediator.ProduceExtractionWarning(( 'plugin: {0:s} unable to parse SQLite database and WAL with ' 'error: {1!s}').format(plugin.NAME, exception)) finally: parser_mediator.RemoveEventAttribute('schema_match') finally: database.Close()
python
{ "resource": "" }
q26043
BinaryCookieParser._ParseCString
train
def _ParseCString(self, page_data, string_offset): """Parses a C string from the page data. Args: page_data (bytes): page data. string_offset (int): offset of the string relative to the start of the page. Returns: str: string. Raises: ParseError: when the string cannot be parsed. """ cstring_map = self._GetDataTypeMap('cstring') try: value_string = self._ReadStructureFromByteStream( page_data[string_offset:], string_offset, cstring_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError(( 'Unable to map string data at offset: 0x{0:08x} with error: ' '{1!s}').format(string_offset, exception)) return value_string.rstrip('\x00')
python
{ "resource": "" }
q26044
BinaryCookieParser._ParsePage
train
def _ParsePage(self, parser_mediator, file_offset, page_data): """Parses a page. Args: parser_mediator (ParserMediator): parser mediator. file_offset (int): offset of the data relative from the start of the file-like object. page_data (bytes): page data. Raises: ParseError: when the page cannot be parsed. """ page_header_map = self._GetDataTypeMap('binarycookies_page_header') try: page_header = self._ReadStructureFromByteStream( page_data, file_offset, page_header_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError(( 'Unable to map page header data at offset: 0x{0:08x} with error: ' '{1!s}').format(file_offset, exception)) for record_offset in page_header.offsets: if parser_mediator.abort: break self._ParseRecord(parser_mediator, page_data, record_offset)
python
{ "resource": "" }
q26045
BinaryCookieParser._ParseRecord
train
def _ParseRecord(self, parser_mediator, page_data, record_offset): """Parses a record from the page data. Args: parser_mediator (ParserMediator): parser mediator. page_data (bytes): page data. record_offset (int): offset of the record relative to the start of the page. Raises: ParseError: when the record cannot be parsed. """ record_header_map = self._GetDataTypeMap('binarycookies_record_header') try: record_header = self._ReadStructureFromByteStream( page_data[record_offset:], record_offset, record_header_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError(( 'Unable to map record header data at offset: 0x{0:08x} with error: ' '{1!s}').format(record_offset, exception)) event_data = SafariBinaryCookieEventData() event_data.flags = record_header.flags if record_header.url_offset: data_offset = record_offset + record_header.url_offset event_data.url = self._ParseCString(page_data, data_offset) if record_header.name_offset: data_offset = record_offset + record_header.name_offset event_data.cookie_name = self._ParseCString(page_data, data_offset) if record_header.path_offset: data_offset = record_offset + record_header.path_offset event_data.path = self._ParseCString(page_data, data_offset) if record_header.value_offset: data_offset = record_offset + record_header.value_offset event_data.cookie_value = self._ParseCString(page_data, data_offset) if record_header.creation_time: date_time = dfdatetime_cocoa_time.CocoaTime( timestamp=record_header.creation_time) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data) if record_header.expiration_time: date_time = dfdatetime_cocoa_time.CocoaTime( timestamp=record_header.expiration_time) else: date_time = dfdatetime_semantic_time.SemanticTime('Not set') event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_EXPIRATION) parser_mediator.ProduceEventWithEventData(event, event_data) for plugin in self._cookie_plugins: if parser_mediator.abort: break if event_data.cookie_name != plugin.COOKIE_NAME: continue try: plugin.UpdateChainAndProcess( parser_mediator, cookie_name=event_data.cookie_name, cookie_data=event_data.cookie_value, url=event_data.url) except Exception as exception: # pylint: disable=broad-except parser_mediator.ProduceExtractionWarning( 'plugin: {0:s} unable to parse cookie with error: {1!s}'.format( plugin.NAME, exception))
python
{ "resource": "" }
q26046
BinaryCookieParser.ParseFileObject
train
def ParseFileObject(self, parser_mediator, file_object): """Parses a Safari binary cookie file-like object. Args: parser_mediator (ParserMediator): parser mediator. file_object (dfvfs.FileIO): file-like object to be parsed. Raises: UnableToParseFile: when the file cannot be parsed, this will signal the event extractor to apply other parsers. """ file_header_map = self._GetDataTypeMap('binarycookies_file_header') try: file_header, file_header_data_size = self._ReadStructureFromFileObject( file_object, 0, file_header_map) except (ValueError, errors.ParseError) as exception: raise errors.UnableToParseFile( 'Unable to read file header with error: {0!s}.'.format(exception)) if file_header.signature != self._SIGNATURE: raise errors.UnableToParseFile('Unsupported file signature.') file_offset = file_header_data_size # TODO: move page sizes array into file header, this will require dtFabric # to compare signature as part of data map. page_sizes_data_size = file_header.number_of_pages * 4 page_sizes_data = file_object.read(page_sizes_data_size) context = dtfabric_data_maps.DataTypeMapContext(values={ 'binarycookies_file_header': file_header}) page_sizes_map = self._GetDataTypeMap('binarycookies_page_sizes') try: page_sizes_array = self._ReadStructureFromByteStream( page_sizes_data, file_offset, page_sizes_map, context=context) except (ValueError, errors.ParseError) as exception: raise errors.ParseError(( 'Unable to map page sizes data at offset: 0x{0:08x} with error: ' '{1!s}').format(file_offset, exception)) file_offset += page_sizes_data_size for page_number, page_size in enumerate(page_sizes_array): if parser_mediator.abort: break page_data = file_object.read(page_size) if len(page_data) != page_size: parser_mediator.ProduceExtractionWarning( 'unable to read page: {0:d}'.format(page_number)) break self._ParsePage(parser_mediator, file_offset, page_data) file_offset += page_size
python
{ "resource": "" }
q26047
DynamicFieldsHelper._FormatDate
train
def _FormatDate(self, event): """Formats the date. Args: event (EventObject): event. Returns: str: date field. """ # TODO: preserve dfdatetime as an object. # TODO: add support for self._output_mediator.timezone date_time = dfdatetime_posix_time.PosixTimeInMicroseconds( timestamp=event.timestamp) year, month, day_of_month = date_time.GetDate() try: return '{0:04d}-{1:02d}-{2:02d}'.format(year, month, day_of_month) except (TypeError, ValueError): self._ReportEventError(event, ( 'unable to copy timestamp: {0!s} to a human readable date. ' 'Defaulting to: "0000-00-00"').format(event.timestamp)) return '0000-00-00'
python
{ "resource": "" }
q26048
DynamicFieldsHelper._FormatDateTime
train
def _FormatDateTime(self, event): """Formats the date and time in ISO 8601 format. Args: event (EventObject): event. Returns: str: date and time field. """ try: return timelib.Timestamp.CopyToIsoFormat( event.timestamp, timezone=self._output_mediator.timezone, raise_error=True) except (OverflowError, ValueError) as exception: self._ReportEventError(event, ( 'unable to copy timestamp: {0!s} to a human readable date and time ' 'with error: {1!s}. Defaulting to: "0000-00-00T00:00:00"').format( event.timestamp, exception)) return '0000-00-00T00:00:00'
python
{ "resource": "" }
q26049
DynamicFieldsHelper._FormatInode
train
def _FormatInode(self, event): """Formats the inode. Args: event (EventObject): event. Returns: str: inode field. """ inode = event.inode if inode is None: if hasattr(event, 'pathspec') and hasattr(event.pathspec, 'image_inode'): inode = event.pathspec.image_inode if inode is None: inode = '-' return inode
python
{ "resource": "" }
q26050
DynamicFieldsHelper._FormatMessage
train
def _FormatMessage(self, event): """Formats the message. Args: event (EventObject): event. Returns: str: message field. Raises: NoFormatterFound: if no event formatter can be found to match the data type in the event. """ message, _ = self._output_mediator.GetFormattedMessages(event) if message is None: data_type = getattr(event, 'data_type', 'UNKNOWN') raise errors.NoFormatterFound( 'Unable to find event formatter for: {0:s}.'.format(data_type)) return message
python
{ "resource": "" }
q26051
DynamicFieldsHelper._FormatMessageShort
train
def _FormatMessageShort(self, event): """Formats the short message. Args: event (EventObject): event. Returns: str: short message field. Raises: NoFormatterFound: if no event formatter can be found to match the data type in the event. """ _, message_short = self._output_mediator.GetFormattedMessages(event) if message_short is None: data_type = getattr(event, 'data_type', 'UNKNOWN') raise errors.NoFormatterFound( 'Unable to find event formatter for: {0:s}.'.format(data_type)) return message_short
python
{ "resource": "" }
q26052
DynamicFieldsHelper._FormatSourceShort
train
def _FormatSourceShort(self, event): """Formats the short source. Args: event (EventObject): event. Returns: str: short source field. Raises: NoFormatterFound: If no event formatter can be found to match the data type in the event. """ source_short, _ = self._output_mediator.GetFormattedSources(event) if source_short is None: data_type = getattr(event, 'data_type', 'UNKNOWN') raise errors.NoFormatterFound( 'Unable to find event formatter for: {0:s}.'.format(data_type)) return source_short
python
{ "resource": "" }
q26053
DynamicFieldsHelper._FormatTag
train
def _FormatTag(self, event): """Formats the event tag. Args: event (EventObject): event. Returns: str: event tag field. """ tag = getattr(event, 'tag', None) if not tag: return '-' return ' '.join(tag.labels)
python
{ "resource": "" }
q26054
DynamicFieldsHelper.GetFormattedField
train
def GetFormattedField(self, event, field_name): """Formats the specified field. Args: event (EventObject): event. field_name (str): name of the field. Returns: str: value of the field. """ callback_name = self._FIELD_FORMAT_CALLBACKS.get(field_name, None) callback_function = None if callback_name: callback_function = getattr(self, callback_name, None) if callback_function: output_value = callback_function(event) else: output_value = getattr(event, field_name, '-') if output_value is None: output_value = '-' elif not isinstance(output_value, py2to3.STRING_TYPES): output_value = '{0!s}'.format(output_value) return output_value
python
{ "resource": "" }
q26055
SerializedAttributeContainerList.GetAttributeContainerByIndex
train
def GetAttributeContainerByIndex(self, index): """Retrieves a specific serialized attribute container from the list. Args: index (int): attribute container index. Returns: bytes: serialized attribute container data or None if not available. Raises: IndexError: if the index is less than zero. """ if index < 0: raise IndexError( 'Unsupported negative index value: {0:d}.'.format(index)) if index < len(self._list): return self._list[index] return None
python
{ "resource": "" }
q26056
SerializedAttributeContainerList.PopAttributeContainer
train
def PopAttributeContainer(self): """Pops a serialized attribute container from the list. Returns: bytes: serialized attribute container data. """ try: serialized_data = self._list.pop(0) self.data_size -= len(serialized_data) return serialized_data except IndexError: return None
python
{ "resource": "" }
q26057
SerializedAttributeContainerList.PushAttributeContainer
train
def PushAttributeContainer(self, serialized_data): """Pushes a serialized attribute container onto the list. Args: serialized_data (bytes): serialized attribute container data. """ self._list.append(serialized_data) self.data_size += len(serialized_data) self.next_sequence_number += 1
python
{ "resource": "" }
q26058
BaseStorageFile._DeserializeAttributeContainer
train
def _DeserializeAttributeContainer(self, container_type, serialized_data): """Deserializes an attribute container. Args: container_type (str): attribute container type. serialized_data (bytes): serialized attribute container data. Returns: AttributeContainer: attribute container or None. Raises: IOError: if the serialized data cannot be decoded. OSError: if the serialized data cannot be decoded. """ if not serialized_data: return None if self._serializers_profiler: self._serializers_profiler.StartTiming(container_type) try: serialized_string = serialized_data.decode('utf-8') except UnicodeDecodeError as exception: raise IOError('Unable to decode serialized data: {0!s}'.format( exception)) attribute_container = self._serializer.ReadSerialized(serialized_string) if self._serializers_profiler: self._serializers_profiler.StopTiming(container_type) return attribute_container
python
{ "resource": "" }
q26059
BaseStorageFile._GetSerializedAttributeContainerByIndex
train
def _GetSerializedAttributeContainerByIndex(self, container_type, index): """Retrieves a specific serialized attribute container. Args: container_type (str): attribute container type. index (int): attribute container index. Returns: bytes: serialized attribute container data or None if not available. """ container_list = self._GetSerializedAttributeContainerList(container_type) return container_list.GetAttributeContainerByIndex(index)
python
{ "resource": "" }
q26060
BaseStorageFile._GetSerializedAttributeContainerList
train
def _GetSerializedAttributeContainerList(self, container_type): """Retrieves a serialized attribute container list. Args: container_type (str): attribute container type. Returns: SerializedAttributeContainerList: serialized attribute container list. """ container_list = self._serialized_attribute_containers.get( container_type, None) if not container_list: container_list = SerializedAttributeContainerList() self._serialized_attribute_containers[container_type] = container_list return container_list
python
{ "resource": "" }
q26061
BaseStorageFile._SerializeAttributeContainer
train
def _SerializeAttributeContainer(self, attribute_container): """Serializes an attribute container. Args: attribute_container (AttributeContainer): attribute container. Returns: bytes: serialized attribute container. Raises: IOError: if the attribute container cannot be serialized. OSError: if the attribute container cannot be serialized. """ if self._serializers_profiler: self._serializers_profiler.StartTiming( attribute_container.CONTAINER_TYPE) try: attribute_container_data = self._serializer.WriteSerialized( attribute_container) if not attribute_container_data: raise IOError( 'Unable to serialize attribute container: {0:s}.'.format( attribute_container.CONTAINER_TYPE)) attribute_container_data = attribute_container_data.encode('utf-8') finally: if self._serializers_profiler: self._serializers_profiler.StopTiming( attribute_container.CONTAINER_TYPE) return attribute_container_data
python
{ "resource": "" }
q26062
StorageFileWriter._GetMergeTaskStorageFilePath
train
def _GetMergeTaskStorageFilePath(self, task): """Retrieves the path of a task storage file in the merge directory. Args: task (Task): task. Returns: str: path of a task storage file file in the merge directory. """ filename = '{0:s}.plaso'.format(task.identifier) return os.path.join(self._merge_task_storage_path, filename)
python
{ "resource": "" }
q26063
StorageFileWriter._GetProcessedStorageFilePath
train
def _GetProcessedStorageFilePath(self, task): """Retrieves the path of a task storage file in the processed directory. Args: task (Task): task. Returns: str: path of a task storage file in the processed directory. """ filename = '{0:s}.plaso'.format(task.identifier) return os.path.join(self._processed_task_storage_path, filename)
python
{ "resource": "" }
q26064
StorageFileWriter._GetTaskStorageFilePath
train
def _GetTaskStorageFilePath(self, task): """Retrieves the path of a task storage file in the temporary directory. Args: task (Task): task. Returns: str: path of a task storage file in the temporary directory. """ filename = '{0:s}.plaso'.format(task.identifier) return os.path.join(self._task_storage_path, filename)
python
{ "resource": "" }
q26065
StorageFileWriter._UpdateCounters
train
def _UpdateCounters(self, event): """Updates the counters. Args: event (EventObject): event. """ self._session.parsers_counter['total'] += 1 # Here we want the name of the parser or plugin not the parser chain. parser_name = getattr(event, 'parser', '') _, _, parser_name = parser_name.rpartition('/') if not parser_name: parser_name = 'N/A' self._session.parsers_counter[parser_name] += 1
python
{ "resource": "" }
q26066
StorageFileWriter.CheckTaskReadyForMerge
train
def CheckTaskReadyForMerge(self, task): """Checks if a task is ready for merging with this session storage. If the task is ready to be merged, this method also sets the task's storage file size. Args: task (Task): task. Returns: bool: True if the task is ready to be merged. Raises: IOError: if the storage type is not supported or OSError: if the storage type is not supported or if the temporary path for the task storage does not exist. """ if self._storage_type != definitions.STORAGE_TYPE_SESSION: raise IOError('Unsupported storage type.') if not self._processed_task_storage_path: raise IOError('Missing processed task storage path.') processed_storage_file_path = self._GetProcessedStorageFilePath(task) try: stat_info = os.stat(processed_storage_file_path) except (IOError, OSError): return False task.storage_file_size = stat_info.st_size return True
python
{ "resource": "" }
q26067
StorageFileWriter.GetProcessedTaskIdentifiers
train
def GetProcessedTaskIdentifiers(self): """Identifiers for tasks which have been processed. Returns: list[str]: task identifiers that are processed. Raises: IOError: if the storage type is not supported or if the temporary path for the task storage does not exist. OSError: if the storage type is not supported or if the temporary path for the task storage does not exist. """ if self._storage_type != definitions.STORAGE_TYPE_SESSION: raise IOError('Unsupported storage type.') if not self._processed_task_storage_path: raise IOError('Missing processed task storage path.') return [ path.replace('.plaso', '') for path in os.listdir(self._processed_task_storage_path)]
python
{ "resource": "" }
q26068
StorageFileWriter.SetSerializersProfiler
train
def SetSerializersProfiler(self, serializers_profiler): """Sets the serializers profiler. Args: serializers_profiler (SerializersProfiler): serializers profiler. """ self._serializers_profiler = serializers_profiler if self._storage_file: self._storage_file.SetSerializersProfiler(serializers_profiler)
python
{ "resource": "" }
q26069
StorageFileWriter.SetStorageProfiler
train
def SetStorageProfiler(self, storage_profiler): """Sets the storage profiler. Args: storage_profiler (StorageProfiler): storage profiler. """ self._storage_profiler = storage_profiler if self._storage_file: self._storage_file.SetStorageProfiler(storage_profiler)
python
{ "resource": "" }
q26070
StorageFileWriter.StartMergeTaskStorage
train
def StartMergeTaskStorage(self, task): """Starts a merge of a task storage with the session storage. Args: task (Task): task. Returns: StorageMergeReader: storage merge reader of the task storage. Raises: IOError: if the storage file cannot be opened or if the storage type is not supported or if the temporary path for the task storage does not exist or if the temporary path for the task storage doe not refers to a file. OSError: if the storage file cannot be opened or if the storage type is not supported or if the temporary path for the task storage does not exist or if the temporary path for the task storage doe not refers to a file. """ if self._storage_type != definitions.STORAGE_TYPE_SESSION: raise IOError('Unsupported storage type.') if not self._merge_task_storage_path: raise IOError('Missing merge task storage path.') merge_storage_file_path = self._GetMergeTaskStorageFilePath(task) if not os.path.isfile(merge_storage_file_path): raise IOError('Merge task storage path is not a file.') return self._CreateTaskStorageMergeReader(merge_storage_file_path)
python
{ "resource": "" }
q26071
StorageFileWriter.StartTaskStorage
train
def StartTaskStorage(self): """Creates a temporary path for the task storage. Raises: IOError: if the storage type is not supported or if the temporary path for the task storage already exists. OSError: if the storage type is not supported or if the temporary path for the task storage already exists. """ if self._storage_type != definitions.STORAGE_TYPE_SESSION: raise IOError('Unsupported storage type.') if self._task_storage_path: raise IOError('Task storage path already exists.') output_directory = os.path.dirname(self._output_file) self._task_storage_path = tempfile.mkdtemp(dir=output_directory) self._merge_task_storage_path = os.path.join( self._task_storage_path, 'merge') os.mkdir(self._merge_task_storage_path) self._processed_task_storage_path = os.path.join( self._task_storage_path, 'processed') os.mkdir(self._processed_task_storage_path)
python
{ "resource": "" }
q26072
StorageFileWriter.StopTaskStorage
train
def StopTaskStorage(self, abort=False): """Removes the temporary path for the task storage. The results of tasks will be lost on abort. Args: abort (bool): True to indicate the stop is issued on abort. Raises: IOError: if the storage type is not supported. OSError: if the storage type is not supported. """ if self._storage_type != definitions.STORAGE_TYPE_SESSION: raise IOError('Unsupported storage type.') if os.path.isdir(self._merge_task_storage_path): if abort: shutil.rmtree(self._merge_task_storage_path) else: os.rmdir(self._merge_task_storage_path) if os.path.isdir(self._processed_task_storage_path): if abort: shutil.rmtree(self._processed_task_storage_path) else: os.rmdir(self._processed_task_storage_path) if os.path.isdir(self._task_storage_path): if abort: shutil.rmtree(self._task_storage_path) else: os.rmdir(self._task_storage_path) self._merge_task_storage_path = None self._processed_task_storage_path = None self._task_storage_path = None
python
{ "resource": "" }
q26073
StorageFileWriter.WriteSessionCompletion
train
def WriteSessionCompletion(self, aborted=False): """Writes session completion information. Args: aborted (Optional[bool]): True if the session was aborted. Raises: IOError: if the storage type is not supported or when the storage writer is closed. OSError: if the storage type is not supported or when the storage writer is closed. """ self._RaiseIfNotWritable() if self._storage_type != definitions.STORAGE_TYPE_SESSION: raise IOError('Unsupported storage type.') self._session.aborted = aborted session_completion = self._session.CreateSessionCompletion() self._storage_file.WriteSessionCompletion(session_completion)
python
{ "resource": "" }
q26074
StorageFileWriter.WriteSessionStart
train
def WriteSessionStart(self): """Writes session start information. Raises: IOError: if the storage type is not supported or when the storage writer is closed. OSError: if the storage type is not supported or when the storage writer is closed. """ self._RaiseIfNotWritable() if self._storage_type != definitions.STORAGE_TYPE_SESSION: raise IOError('Unsupported storage type.') session_start = self._session.CreateSessionStart() self._storage_file.WriteSessionStart(session_start)
python
{ "resource": "" }
q26075
StorageFileWriter.WriteTaskCompletion
train
def WriteTaskCompletion(self, aborted=False): """Writes task completion information. Args: aborted (Optional[bool]): True if the session was aborted. Raises: IOError: if the storage type is not supported or when the storage writer is closed. OSError: if the storage type is not supported or when the storage writer is closed. """ self._RaiseIfNotWritable() if self._storage_type != definitions.STORAGE_TYPE_TASK: raise IOError('Unsupported storage type.') self._task.aborted = aborted task_completion = self._task.CreateTaskCompletion() self._storage_file.WriteTaskCompletion(task_completion)
python
{ "resource": "" }
q26076
StorageFileWriter.WriteTaskStart
train
def WriteTaskStart(self): """Writes task start information. Raises: IOError: if the storage type is not supported or when the storage writer is closed. OSError: if the storage type is not supported or when the storage writer is closed. """ self._RaiseIfNotWritable() if self._storage_type != definitions.STORAGE_TYPE_TASK: raise IOError('Unsupported storage type.') task_start = self._task.CreateTaskStart() self._storage_file.WriteTaskStart(task_start)
python
{ "resource": "" }
q26077
McafeeAccessProtectionParser._ConvertToTimestamp
train
def _ConvertToTimestamp(self, date, time, timezone): """Converts date and time values into a timestamp. The date and time are made up of two strings, the date and the time, separated by a tab. The time is in local time. The month and day can be either 1 or 2 characters long, e.g.: 7/30/2013\\t10:22:48 AM Args: date (str): date. time (str): time. timezone (pytz.timezone): timezone of the date and time. Returns: int: a timestamp integer containing the number of micro seconds since January 1, 1970, 00:00:00 UTC. Raises: TimestampError: if the timestamp is badly formed or unable to transfer the supplied date and time into a timestamp. """ # TODO: check if this is correct, likely not date or not time # is more accurate. if not date and not time: raise errors.TimestampError( 'Unable to extract timestamp from McAfee AV logline.') # TODO: Figure out how McAfee sets Day First and use that here. # The in-file time format is '07/30/2013\t10:22:48 AM'. try: time_string = '{0:s} {1:s}'.format(date, time) except UnicodeDecodeError: raise errors.TimestampError('Unable to form a timestamp string.') return timelib.Timestamp.FromTimeString(time_string, timezone=timezone)
python
{ "resource": "" }
q26078
SessionizeAnalysisPlugin.ExamineEvent
train
def ExamineEvent(self, mediator, event): """Analyzes an EventObject and tags it as part of a session. Args: mediator (AnalysisMediator): mediates interactions between analysis plugins and other components, such as storage and dfvfs. event (EventObject): event to examine. """ if self._session_end_timestamp is None: self._session_end_timestamp = ( event.timestamp + self._maximum_pause_microseconds) self._events_per_session.append(0) if event.timestamp > self._session_end_timestamp: self._session_counter += 1 self._events_per_session.append(0) self._session_end_timestamp = ( event.timestamp + self._maximum_pause_microseconds) # The counter for the current session is the always the last item in # the list. self._events_per_session[-1] += 1 label = 'session_{0:d}'.format(self._session_counter) event_tag = self._CreateEventTag(event, self._EVENT_TAG_COMMENT, [label]) mediator.ProduceEventTag(event_tag) self._number_of_event_tags += 1
python
{ "resource": "" }
q26079
MultiProcessingQueue.PushItem
train
def PushItem(self, item, block=True): """Pushes an item onto the queue. Args: item (object): item to add. block (Optional[bool]): True to block the process when the queue is full. Raises: QueueFull: if the item could not be pushed the queue because it's full. """ try: self._queue.put(item, block=block) except Queue.Full as exception: raise errors.QueueFull(exception)
python
{ "resource": "" }
q26080
FakeStorageWriter._PrepareAttributeContainer
train
def _PrepareAttributeContainer(self, attribute_container): """Prepares an attribute container for storage. Args: attribute_container (AttributeContainer): attribute container. Returns: AttributeContainer: copy of the attribute container to store in the fake storage. """ attribute_values_hash = hash(attribute_container.GetAttributeValuesString()) identifier = identifiers.FakeIdentifier(attribute_values_hash) attribute_container.SetIdentifier(identifier) # Make sure the fake storage preserves the state of the attribute container. return copy.deepcopy(attribute_container)
python
{ "resource": "" }
q26081
FakeStorageWriter._ReadEventDataIntoEvent
train
def _ReadEventDataIntoEvent(self, event): """Reads the data into the event. This function is intended to offer backwards compatible event behavior. Args: event (EventObject): event. """ if self._storage_type != definitions.STORAGE_TYPE_SESSION: return event_data_identifier = event.GetEventDataIdentifier() if event_data_identifier: lookup_key = event_data_identifier.CopyToString() event_data = self._event_data[lookup_key] for attribute_name, attribute_value in event_data.GetAttributes(): setattr(event, attribute_name, attribute_value)
python
{ "resource": "" }
q26082
FakeStorageWriter.AddWarning
train
def AddWarning(self, warning): """Adds a warnings. Args: warning (ExtractionWarning): warning. Raises: IOError: when the storage writer is closed. OSError: when the storage writer is closed. """ self._RaiseIfNotWritable() warning = self._PrepareAttributeContainer(warning) self._warnings.append(warning) self.number_of_warnings += 1
python
{ "resource": "" }
q26083
WinFirewallParser._GetStructureValue
train
def _GetStructureValue(self, structure, key): """Retrieves a value from a parsed log line, removing empty results. Args: structure (pyparsing.ParseResults): parsed log line. key (str): results key to retrieve from the parsed log line. Returns: type or None: the value of the named key in the parsed log line, or None if the value is a ParseResults object. """ value = structure.get(key) return value if not isinstance(value, pyparsing.ParseResults) else None
python
{ "resource": "" }
q26084
WinFirewallParser._ParseCommentRecord
train
def _ParseCommentRecord(self, structure): """Parse a comment and store appropriate attributes. Args: structure (pyparsing.ParseResults): parsed log line. """ comment = structure[1] if comment.startswith('Version'): _, _, self._version = comment.partition(':') elif comment.startswith('Software'): _, _, self._software = comment.partition(':') elif comment.startswith('Time'): _, _, time_format = comment.partition(':') if 'local' in time_format.lower(): self._use_local_timezone = True
python
{ "resource": "" }
q26085
WinFirewallParser._ParseLogLine
train
def _ParseLogLine(self, parser_mediator, structure): """Parse a single log line and and produce an event object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. structure (pyparsing.ParseResults): structure of tokens derived from a line of a text file. """ try: date_time = dfdatetime_time_elements.TimeElements( time_elements_tuple=structure.date_time) date_time.is_local_time = True except ValueError: parser_mediator.ProduceExtractionWarning( 'invalid date time value: {0!s}'.format(structure.date_time)) return event_data = WinFirewallEventData() event_data.action = self._GetStructureValue(structure, 'action') event_data.dest_ip = self._GetStructureValue(structure, 'dest_ip') event_data.dest_port = self._GetStructureValue(structure, 'dest_port') event_data.flags = self._GetStructureValue(structure, 'flags') event_data.icmp_code = self._GetStructureValue(structure, 'icmp_code') event_data.icmp_type = self._GetStructureValue(structure, 'icmp_type') event_data.info = self._GetStructureValue(structure, 'info') event_data.path = self._GetStructureValue(structure, 'path') event_data.protocol = self._GetStructureValue(structure, 'protocol') event_data.size = self._GetStructureValue(structure, 'size') event_data.source_ip = self._GetStructureValue(structure, 'source_ip') event_data.source_port = self._GetStructureValue(structure, 'source_port') event_data.tcp_ack = self._GetStructureValue(structure, 'tcp_ack') event_data.tcp_seq = self._GetStructureValue(structure, 'tcp_seq') event_data.tcp_win = self._GetStructureValue(structure, 'tcp_win') if self._use_local_timezone: time_zone = parser_mediator.timezone else: time_zone = pytz.UTC event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_WRITTEN, time_zone=time_zone) parser_mediator.ProduceEventWithEventData(event, event_data)
python
{ "resource": "" }
q26086
WindowsService.FromEvent
train
def FromEvent(cls, service_event): """Creates a service object from an event. Args: service_event (EventObject): event to create a new service object from. Returns: WindowsService: service. """ _, _, name = service_event.key_path.rpartition( WindowsService._REGISTRY_KEY_PATH_SEPARATOR) service_type = service_event.regvalue.get('Type', '') image_path = service_event.regvalue.get('ImagePath', '') start_type = service_event.regvalue.get('Start', '') service_dll = service_event.regvalue.get('ServiceDll', '') object_name = service_event.regvalue.get('ObjectName', '') if service_event.pathspec: source = (service_event.pathspec.location, service_event.key_path) else: source = ('Unknown', 'Unknown') return cls( name=name, service_type=service_type, image_path=image_path, start_type=start_type, object_name=object_name, source=source, service_dll=service_dll)
python
{ "resource": "" }
q26087
WindowsService.HumanReadableType
train
def HumanReadableType(self): """Return a human readable string describing the type value. Returns: str: human readable description of the type value. """ if isinstance(self.service_type, py2to3.STRING_TYPES): return self.service_type return human_readable_service_enums.SERVICE_ENUMS['Type'].get( self.service_type, '{0:d}'.format(self.service_type))
python
{ "resource": "" }
q26088
WindowsService.HumanReadableStartType
train
def HumanReadableStartType(self): """Return a human readable string describing the start type value. Returns: str: human readable description of the start type value. """ if isinstance(self.start_type, py2to3.STRING_TYPES): return self.start_type return human_readable_service_enums.SERVICE_ENUMS['Start'].get( self.start_type, '{0:d}'.format(self.start_type))
python
{ "resource": "" }
q26089
WindowsServiceCollection.AddService
train
def AddService(self, new_service): """Add a new service to the list of ones we know about. Args: new_service (WindowsService): the service to add. """ for service in self._services: if new_service == service: # If this service is the same as one we already know about, we # just want to add where it came from. service.sources.append(new_service.sources[0]) return # We only add a new object to our list if we don't have # an identical one already. self._services.append(new_service)
python
{ "resource": "" }
q26090
WindowsServicesAnalysisPlugin._FormatServiceText
train
def _FormatServiceText(self, service): """Produces a human readable multi-line string representing the service. Args: service (WindowsService): service to format. Returns: str: human readable representation of a Windows Service. """ string_segments = [ service.name, '\tImage Path = {0:s}'.format(service.image_path), '\tService Type = {0:s}'.format(service.HumanReadableType()), '\tStart Type = {0:s}'.format(service.HumanReadableStartType()), '\tService Dll = {0:s}'.format(service.service_dll), '\tObject Name = {0:s}'.format(service.object_name), '\tSources:'] for source in service.sources: string_segments.append('\t\t{0:s}:{1:s}'.format(source[0], source[1])) return '\n'.join(string_segments)
python
{ "resource": "" }
q26091
WindowsServicesAnalysisPlugin.ExamineEvent
train
def ExamineEvent(self, mediator, event): """Analyzes an event and creates Windows Services as required. At present, this method only handles events extracted from the Registry. Args: mediator (AnalysisMediator): mediates interactions between analysis plugins and other components, such as storage and dfvfs. event (EventObject): event to examine. """ # TODO: Handle event log entries here also (ie, event id 4697). event_data_type = getattr(event, 'data_type', '') if event_data_type == 'windows:registry:service': # Create and store the service. service = WindowsService.FromEvent(event) self._service_collection.AddService(service)
python
{ "resource": "" }
q26092
InstallHistoryPlugin.GetEntries
train
def GetEntries(self, parser_mediator, top_level=None, **unused_kwargs): """Extracts relevant install history entries. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. top_level (dict[str, object]): plist top-level key. """ for entry in top_level: datetime_value = entry.get('date', None) package_identifiers = entry.get('packageIdentifiers', []) if not datetime_value or not package_identifiers: continue display_name = entry.get('displayName', '<UNKNOWN>') display_version = entry.get('displayVersion', '<DISPLAY_VERSION>') process_name = entry.get('processName', '<PROCESS_NAME>') package_identifiers = ', '.join(package_identifiers) event_data = plist_event.PlistTimeEventData() event_data.desc = ( 'Installation of [{0:s} {1:s}] using [{2:s}]. Packages: ' '{3:s}.').format( display_name, display_version, process_name, package_identifiers) event_data.key = '' event_data.root = '/item' event = time_events.PythonDatetimeEvent( datetime_value, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
python
{ "resource": "" }
q26093
TaggingAnalysisPlugin._AttemptAutoDetectTagFile
train
def _AttemptAutoDetectTagFile(self, analysis_mediator): """Detects which tag file is most appropriate. Args: analysis_mediator (AnalysisMediator): analysis mediator. Returns: bool: True if a tag file is autodetected. """ self._autodetect_tag_file_attempt = True if not analysis_mediator.data_location: return False operating_system = analysis_mediator.operating_system.lower() filename = self._OS_TAG_FILES.get(operating_system, None) if not filename: return False logger.info('Using auto detected tag file: {0:s}'.format(filename)) tag_file_path = os.path.join(analysis_mediator.data_location, filename) self.SetAndLoadTagFile(tag_file_path) return True
python
{ "resource": "" }
q26094
TaggingAnalysisPlugin.ExamineEvent
train
def ExamineEvent(self, mediator, event): """Analyzes an EventObject and tags it according to rules in the tag file. Args: mediator (AnalysisMediator): mediates interactions between analysis plugins and other components, such as storage and dfvfs. event (EventObject): event to examine. """ if self._tagging_rules is None: if self._autodetect_tag_file_attempt: # There's nothing to tag with, and we've already tried to find a good # tag file, so there's nothing we can do with this event (or any other). return if not self._AttemptAutoDetectTagFile(mediator): logger.info( 'No tag definition file specified, and plaso was not able to ' 'autoselect a tagging file. As no definitions were specified, ' 'no events will be tagged.') return matched_label_names = [] for label_name, filter_objects in iter(self._tagging_rules.items()): for filter_object in filter_objects: if filter_object.Match(event): matched_label_names.append(label_name) break if matched_label_names: event_tag = self._CreateEventTag( event, self._EVENT_TAG_COMMENT, matched_label_names) mediator.ProduceEventTag(event_tag) self._number_of_event_tags += 1
python
{ "resource": "" }
q26095
TaggingAnalysisPlugin.SetAndLoadTagFile
train
def SetAndLoadTagFile(self, tagging_file_path): """Sets the tag file to be used by the plugin. Args: tagging_file_path (str): path of the tagging file. """ tag_file = tagging_file.TaggingFile(tagging_file_path) self._tagging_rules = tag_file.GetEventTaggingRules()
python
{ "resource": "" }
q26096
CronSyslogPlugin.ParseMessage
train
def ParseMessage(self, parser_mediator, key, date_time, tokens): """Parses a syslog body that matched one of defined grammars. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. key (str): name of the matching grammar. date_time (dfdatetime.DateTimeValues): date and time values. tokens (dict[str, str]): tokens derived from a syslog message based on the defined grammar. Raises: ValueError: If an unknown key is provided. """ if key != 'task_run': raise ValueError('Unknown grammar key: {0:s}'.format(key)) event_data = CronTaskRunEventData() event_data.body = tokens.get('body', None) event_data.command = tokens.get('command', None) event_data.hostname = tokens.get('hostname', None) # TODO: pass line number to offset or remove. event_data.offset = 0 event_data.pid = tokens.get('pid', None) event_data.reporter = tokens.get('reporter', None) event_data.severity = tokens.get('severity', None) event_data.username = tokens.get('username', None) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
python
{ "resource": "" }
q26097
YaraAnalyzer.Analyze
train
def Analyze(self, data): """Analyzes a block of data, attempting to match Yara rules to it. Args: data(bytes): a block of data. """ if not self._rules: return try: self._matches = self._rules.match(data=data, timeout=self._MATCH_TIMEOUT) except yara.YaraTimeoutError: logger.error('Could not process file within timeout: {0:d}'.format( self._MATCH_TIMEOUT)) except yara.YaraError as exception: logger.error('Error processing file with Yara: {0!s}.'.format( exception))
python
{ "resource": "" }
q26098
YaraAnalyzer.GetResults
train
def GetResults(self): """Retrieves results of the most recent analysis. Returns: list[AnalyzerResult]: results. """ result = analyzer_result.AnalyzerResult() result.analyzer_name = self.NAME result.attribute_name = self._ATTRIBUTE_NAME rule_names = [match.rule for match in self._matches] result.attribute_value = ','.join(rule_names) return [result]
python
{ "resource": "" }
q26099
ESEDBParser._GetTableNames
train
def _GetTableNames(self, database): """Retrieves the table names in a database. Args: database (pyesedb.file): ESE database. Returns: list[str]: table names. """ table_names = [] for esedb_table in database.tables: table_names.append(esedb_table.name) return table_names
python
{ "resource": "" }