_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q25400
Expression.AddArg
train
def AddArg(self, argument): """Adds a new argument to this expression. Args: argument (str): argument to add. Returns: True if the argument is the last argument, False otherwise. Raises: ParseError: If there are too many arguments. """ self.args.append(argument) if len(self.args) > self.number_of_args: raise errors.ParseError('Too many arguments for this expression.') elif len(self.args) == self.number_of_args: return True return False
python
{ "resource": "" }
q25401
BinaryExpression.AddOperands
train
def AddOperands(self, lhs, rhs): """Add an operand.""" if isinstance(lhs, Expression) and isinstance(rhs, Expression): self.args = [lhs, rhs] else: raise errors.ParseError( 'Expected expression, got {0:s} {1:s} {2:s}'.format( lhs, self.operator, rhs))
python
{ "resource": "" }
q25402
BinaryExpression.PrintTree
train
def PrintTree(self, depth=''): """Print the tree.""" result = '{0:s}{1:s}\n'.format(depth, self.operator) for part in self.args: result += '{0:s}-{1:s}\n'.format(depth, part.PrintTree(depth + ' ')) return result
python
{ "resource": "" }
q25403
SearchParser.BinaryOperator
train
def BinaryOperator(self, string=None, **unused_kwargs): """Set the binary operator.""" self.stack.append(self.binary_expression_cls(string))
python
{ "resource": "" }
q25404
SearchParser.StoreAttribute
train
def StoreAttribute(self, string='', **unused_kwargs): """Store the attribute.""" logging.debug('Storing attribute {0:s}'.format(repr(string))) # TODO: Update the expected number_of_args try: self.current_expression.SetAttribute(string) except AttributeError: raise errors.ParseError('Invalid attribute \'{0:s}\''.format(string)) return 'OPERATOR'
python
{ "resource": "" }
q25405
SearchParser.StoreOperator
train
def StoreOperator(self, string='', **unused_kwargs): """Store the operator.""" logging.debug('Storing operator {0:s}'.format(repr(string))) self.current_expression.SetOperator(string)
python
{ "resource": "" }
q25406
SearchParser.InsertArg
train
def InsertArg(self, string='', **unused_kwargs): """Insert an argument to the current expression.""" logging.debug('Storing Argument {0:s}'.format(string)) # This expression is complete if self.current_expression.AddArg(string): self.stack.append(self.current_expression) self.current_expression = self.expression_cls() return self.PopState() return None
python
{ "resource": "" }
q25407
FileNameFileEntryFilter.Match
train
def Match(self, file_entry): """Determines if a file entry matches the filter. Args: file_entry (dfvfs.FileEntry): a file entry. Returns: bool: True if the file entry matches the filter. """ if not file_entry: return False filename = file_entry.name.lower() return filename == self._filename
python
{ "resource": "" }
q25408
BaseParser.GetPluginObjectByName
train
def GetPluginObjectByName(cls, plugin_name): """Retrieves a specific plugin object by its name. Args: plugin_name (str): name of the plugin. Returns: BasePlugin: a plugin object or None if not available. """ plugin_class = cls._plugin_classes.get(plugin_name, None) if plugin_class: return plugin_class() return None
python
{ "resource": "" }
q25409
BaseParser.GetPlugins
train
def GetPlugins(cls): """Retrieves the registered plugins. Yields: tuple[str, type]: name and class of the plugin. """ for plugin_name, plugin_class in iter(cls._plugin_classes.items()): yield plugin_name, plugin_class
python
{ "resource": "" }
q25410
BaseParser.RegisterPlugin
train
def RegisterPlugin(cls, plugin_class): """Registers a plugin class. The plugin classes are identified based on their lower case name. Args: plugin_class (type): class of the plugin. Raises: KeyError: if plugin class is already set for the corresponding name. """ plugin_name = plugin_class.NAME.lower() if plugin_name in cls._plugin_classes: raise KeyError(( 'Plugin class already set for name: {0:s}.').format( plugin_class.NAME)) cls._plugin_classes[plugin_name] = plugin_class
python
{ "resource": "" }
q25411
FileEntryParser.Parse
train
def Parse(self, parser_mediator): """Parsers the file entry and extracts event objects. Args: parser_mediator (ParserMediator): a parser mediator. Raises: UnableToParseFile: when the file cannot be parsed. """ file_entry = parser_mediator.GetFileEntry() if not file_entry: raise errors.UnableToParseFile('Invalid file entry') parser_mediator.AppendToParserChain(self) try: self.ParseFileEntry(parser_mediator, file_entry) finally: parser_mediator.PopFromParserChain()
python
{ "resource": "" }
q25412
FileObjectParser.Parse
train
def Parse(self, parser_mediator, file_object): """Parses a single file-like object. Args: parser_mediator (ParserMediator): a parser mediator. file_object (dvfvs.FileIO): a file-like object to parse. Raises: UnableToParseFile: when the file cannot be parsed. """ if not file_object: raise errors.UnableToParseFile('Invalid file object') if self._INITIAL_FILE_OFFSET is not None: file_object.seek(self._INITIAL_FILE_OFFSET, os.SEEK_SET) parser_mediator.AppendToParserChain(self) try: self.ParseFileObject(parser_mediator, file_object) finally: parser_mediator.PopFromParserChain()
python
{ "resource": "" }
q25413
WinPrefetchParser.ParseFileObject
train
def ParseFileObject(self, parser_mediator, file_object): """Parses a Windows Prefetch file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): file-like object. """ scca_file = pyscca.file() try: scca_file.open_file_object(file_object) except IOError as exception: parser_mediator.ProduceExtractionWarning( 'unable to open file with error: {0!s}'.format(exception)) return format_version = scca_file.format_version executable_filename = scca_file.executable_filename prefetch_hash = scca_file.prefetch_hash run_count = scca_file.run_count number_of_volumes = scca_file.number_of_volumes volume_serial_numbers = [] volume_device_paths = [] path = '' for volume_information in iter(scca_file.volumes): volume_serial_number = volume_information.serial_number volume_device_path = volume_information.device_path volume_serial_numbers.append(volume_serial_number) volume_device_paths.append(volume_device_path) timestamp = volume_information.get_creation_time_as_integer() if timestamp: event_data = windows_events.WindowsVolumeEventData() event_data.device_path = volume_device_path event_data.origin = parser_mediator.GetFilename() event_data.serial_number = volume_serial_number date_time = dfdatetime_filetime.Filetime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data) for filename in iter(scca_file.filenames): if not filename: continue if (filename.startswith(volume_device_path) and filename.endswith(executable_filename)): _, _, path = filename.partition(volume_device_path) mapped_files = [] for entry_index, file_metrics in enumerate(scca_file.file_metrics_entries): mapped_file_string = file_metrics.filename if not mapped_file_string: parser_mediator.ProduceExtractionWarning( 'missing filename for file metrics entry: {0:d}'.format( entry_index)) continue file_reference = file_metrics.file_reference if file_reference: mapped_file_string = ( '{0:s} [MFT entry: {1:d}, sequence: {2:d}]').format( mapped_file_string, file_reference & 0xffffffffffff, file_reference >> 48) mapped_files.append(mapped_file_string) event_data = WinPrefetchExecutionEventData() event_data.executable = executable_filename event_data.mapped_files = mapped_files event_data.number_of_volumes = number_of_volumes event_data.path = path event_data.prefetch_hash = prefetch_hash event_data.run_count = run_count event_data.version = format_version event_data.volume_device_paths = volume_device_paths event_data.volume_serial_numbers = volume_serial_numbers timestamp = scca_file.get_last_run_time_as_integer(0) if not timestamp: parser_mediator.ProduceExtractionWarning('missing last run time') date_time = dfdatetime_semantic_time.SemanticTime('Not set') else: date_time = dfdatetime_filetime.Filetime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_LAST_RUN) parser_mediator.ProduceEventWithEventData(event, event_data) # Check for the 7 older last run time values available since # format version 26. if format_version >= 26: for last_run_time_index in range(1, 8): timestamp = scca_file.get_last_run_time_as_integer(last_run_time_index) if not timestamp: continue date_time = dfdatetime_filetime.Filetime(timestamp=timestamp) date_time_description = 'Previous {0:s}'.format( definitions.TIME_DESCRIPTION_LAST_RUN) event = time_events.DateTimeValuesEvent( date_time, date_time_description) parser_mediator.ProduceEventWithEventData(event, event_data) scca_file.close()
python
{ "resource": "" }
q25414
CCleanerPlugin._ParseUpdateKeyValue
train
def _ParseUpdateKeyValue(self, parser_mediator, registry_value, key_path): """Parses the UpdateKey value. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_value (dfwinreg.WinRegistryValue): Windows Registry value. key_path (str): Windows Registry key path. """ if not registry_value.DataIsString(): parser_mediator.ProduceExtractionWarning( 'unsupported UpdateKey value data type: {0:s}'.format( registry_value.data_type_string)) return date_time_string = registry_value.GetDataAsObject() if not date_time_string: parser_mediator.ProduceExtractionWarning('missing UpdateKey value data') return re_match = self._UPDATE_DATE_TIME_RE.match(date_time_string) if not re_match: parser_mediator.ProduceExtractionWarning( 'unsupported UpdateKey value data: {0!s}'.format(date_time_string)) return month, day_of_month, year, hours, minutes, seconds, part_of_day = ( re_match.groups()) try: year = int(year, 10) month = int(month, 10) day_of_month = int(day_of_month, 10) hours = int(hours, 10) minutes = int(minutes, 10) seconds = int(seconds, 10) except (TypeError, ValueError): parser_mediator.ProduceExtractionWarning( 'invalid UpdateKey date time value: {0!s}'.format(date_time_string)) return if part_of_day == 'PM': hours += 12 time_elements_tuple = (year, month, day_of_month, hours, minutes, seconds) try: date_time = dfdatetime_time_elements.TimeElements( time_elements_tuple=time_elements_tuple) date_time.is_local_time = True except ValueError: parser_mediator.ProduceExtractionWarning( 'invalid UpdateKey date time value: {0!s}'.format( time_elements_tuple)) return event_data = CCleanerUpdateEventData() event_data.key_path = key_path event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_UPDATE, time_zone=parser_mediator.timezone) parser_mediator.ProduceEventWithEventData(event, event_data)
python
{ "resource": "" }
q25415
HashingAnalyzer.GetResults
train
def GetResults(self): """Retrieves the hashing results. Returns: list[AnalyzerResult]: results. """ results = [] for hasher in self._hashers: logger.debug('Processing results for hasher {0:s}'.format(hasher.NAME)) result = analyzer_result.AnalyzerResult() result.analyzer_name = self.NAME result.attribute_name = '{0:s}_hash'.format(hasher.NAME) result.attribute_value = hasher.GetStringDigest() results.append(result) return results
python
{ "resource": "" }
q25416
HashingAnalyzer.Reset
train
def Reset(self): """Resets the internal state of the analyzer.""" hasher_names = hashers_manager.HashersManager.GetHasherNamesFromString( self._hasher_names_string) self._hashers = hashers_manager.HashersManager.GetHashers(hasher_names)
python
{ "resource": "" }
q25417
HashingAnalyzer.SetHasherNames
train
def SetHasherNames(self, hasher_names_string): """Sets the hashers that should be enabled. Args: hasher_names_string (str): comma separated names of hashers to enable. """ hasher_names = hashers_manager.HashersManager.GetHasherNamesFromString( hasher_names_string) debug_hasher_names = ', '.join(hasher_names) logger.debug('Got hasher names: {0:s}'.format(debug_hasher_names)) self._hashers = hashers_manager.HashersManager.GetHashers(hasher_names) self._hasher_names_string = hasher_names_string
python
{ "resource": "" }
q25418
SQLiteStorageFileWriter._CreateTaskStorageWriter
train
def _CreateTaskStorageWriter(self, path, task): """Creates a task storage writer. Args: path (str): path to the storage file. task (Task): task. Returns: SQLiteStorageFileWriter: storage writer. """ return SQLiteStorageFileWriter( self._session, path, storage_type=definitions.STORAGE_TYPE_TASK, task=task)
python
{ "resource": "" }
q25419
L2TCSVOutputModule._FormatField
train
def _FormatField(self, field): """Formats a field. Args: field (str): field value. Returns: str: formatted field value. """ if self._FIELD_DELIMITER and isinstance(field, py2to3.STRING_TYPES): return field.replace(self._FIELD_DELIMITER, ' ') return field
python
{ "resource": "" }
q25420
L2TCSVOutputModule._WriteOutputValues
train
def _WriteOutputValues(self, output_values): """Writes values to the output. Args: output_values (list[str]): output values. """ for index, value in enumerate(output_values): if not isinstance(value, py2to3.STRING_TYPES): value = '' output_values[index] = value.replace(',', ' ') output_line = ','.join(output_values) output_line = '{0:s}\n'.format(output_line) self._output_writer.Write(output_line)
python
{ "resource": "" }
q25421
L2TCSVOutputModule.WriteEventMACBGroup
train
def WriteEventMACBGroup(self, event_macb_group): """Writes an event MACB group to the output. Args: event_macb_group (list[EventObject]): event MACB group. """ output_values = self._GetOutputValues(event_macb_group[0]) timestamp_descriptions = [ event.timestamp_desc for event in event_macb_group] output_values[3] = ( self._output_mediator.GetMACBRepresentationFromDescriptions( timestamp_descriptions)) # TODO: fix timestamp description in source. output_values[6] = '; '.join(timestamp_descriptions) self._WriteOutputValues(output_values)
python
{ "resource": "" }
q25422
ChromePreferencesParser._ExtractExtensionInstallEvents
train
def _ExtractExtensionInstallEvents(self, settings_dict, parser_mediator): """Extract extension installation events. Args: settings_dict (dict[str: object]): settings data from a Preferences file. parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. """ for extension_id, extension in sorted(settings_dict.items()): install_time = extension.get('install_time', None) if not install_time: parser_mediator.ProduceExtractionWarning( 'installation time missing for extension ID {0:s}'.format( extension_id)) continue try: install_time = int(install_time, 10) except ValueError: parser_mediator.ProduceExtractionWarning(( 'unable to convert installation time for extension ID ' '{0:s}').format(extension_id)) continue manifest = extension.get('manifest', None) if not manifest: parser_mediator.ProduceExtractionWarning( 'manifest missing for extension ID {0:s}'.format(extension_id)) continue event_data = ChromeExtensionInstallationEventData() event_data.extension_id = extension_id event_data.extension_name = manifest.get('name', None) event_data.path = extension.get('path', None) date_time = dfdatetime_webkit_time.WebKitTime(timestamp=install_time) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_ADDED) parser_mediator.ProduceEventWithEventData(event, event_data)
python
{ "resource": "" }
q25423
ChromePreferencesParser._ExtractContentSettingsExceptions
train
def _ExtractContentSettingsExceptions(self, exceptions_dict, parser_mediator): """Extracts site specific events. Args: exceptions_dict (dict): Permission exceptions data from Preferences file. parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. """ for permission in exceptions_dict: if permission not in self._EXCEPTIONS_KEYS: continue exception_dict = exceptions_dict.get(permission, {}) for urls, url_dict in exception_dict.items(): last_used = url_dict.get('last_used', None) if not last_used: continue # If secondary_url is '*', the permission applies to primary_url. # If secondary_url is a valid URL, the permission applies to # elements loaded from secondary_url being embedded in primary_url. primary_url, secondary_url = urls.split(',') event_data = ChromeContentSettingsExceptionsEventData() event_data.permission = permission event_data.primary_url = primary_url event_data.secondary_url = secondary_url timestamp = int(last_used * 1000000) date_time = dfdatetime_posix_time.PosixTimeInMicroseconds( timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_LAST_VISITED) parser_mediator.ProduceEventWithEventData(event, event_data)
python
{ "resource": "" }
q25424
FileSystemArtifactPreprocessorPlugin.Collect
train
def Collect( self, knowledge_base, artifact_definition, searcher, file_system): """Collects values using a file artifact definition. Args: knowledge_base (KnowledgeBase): to fill with preprocessing information. artifact_definition (artifacts.ArtifactDefinition): artifact definition. searcher (dfvfs.FileSystemSearcher): file system searcher to preprocess the file system. file_system (dfvfs.FileSystem): file system to be preprocessed. Raises: PreProcessFail: if the preprocessing fails. """ for source in artifact_definition.sources: if source.type_indicator not in ( artifact_definitions.TYPE_INDICATOR_FILE, artifact_definitions.TYPE_INDICATOR_PATH): continue for path in source.paths: # Make sure the path separators used in the artifact definition # correspond to those used by the file system. path_segments = path.split(source.separator) find_spec = file_system_searcher.FindSpec( location_glob=path_segments[1:], case_sensitive=False) for path_specification in searcher.Find(find_specs=[find_spec]): self._ParsePathSpecification( knowledge_base, searcher, file_system, path_specification, source.separator)
python
{ "resource": "" }
q25425
FileEntryArtifactPreprocessorPlugin._ParsePathSpecification
train
def _ParsePathSpecification( self, knowledge_base, searcher, file_system, path_specification, path_separator): """Parses a file system for a preprocessing attribute. Args: knowledge_base (KnowledgeBase): to fill with preprocessing information. searcher (dfvfs.FileSystemSearcher): file system searcher to preprocess the file system. file_system (dfvfs.FileSystem): file system to be preprocessed. path_specification (dfvfs.PathSpec): path specification that contains the artifact value data. path_separator (str): path segment separator. Raises: PreProcessFail: if the preprocessing fails. """ try: file_entry = searcher.GetFileEntryByPathSpec(path_specification) except IOError as exception: relative_path = searcher.GetRelativePath(path_specification) if path_separator != file_system.PATH_SEPARATOR: relative_path_segments = file_system.SplitPath(relative_path) relative_path = '{0:s}{1:s}'.format( path_separator, path_separator.join(relative_path_segments)) raise errors.PreProcessFail(( 'Unable to retrieve file entry: {0:s} with error: ' '{1!s}').format(relative_path, exception)) if file_entry: self._ParseFileEntry(knowledge_base, file_entry)
python
{ "resource": "" }
q25426
FileArtifactPreprocessorPlugin._ParseFileEntry
train
def _ParseFileEntry(self, knowledge_base, file_entry): """Parses a file entry for a preprocessing attribute. Args: knowledge_base (KnowledgeBase): to fill with preprocessing information. file_entry (dfvfs.FileEntry): file entry that contains the artifact value data. Raises: PreProcessFail: if the preprocessing fails. """ file_object = file_entry.GetFileObject() try: self._ParseFileData(knowledge_base, file_object) finally: file_object.close()
python
{ "resource": "" }
q25427
WindowsRegistryKeyArtifactPreprocessorPlugin.Collect
train
def Collect( self, knowledge_base, artifact_definition, searcher): """Collects values using a Windows Registry value artifact definition. Args: knowledge_base (KnowledgeBase): to fill with preprocessing information. artifact_definition (artifacts.ArtifactDefinition): artifact definition. searcher (dfwinreg.WinRegistrySearcher): Windows Registry searcher to preprocess the Windows Registry. Raises: PreProcessFail: if the Windows Registry key or value cannot be read. """ for source in artifact_definition.sources: if source.type_indicator not in ( artifact_definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_KEY, artifact_definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_VALUE): continue if source.type_indicator == ( artifact_definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_KEY): key_value_pairs = [{'key': key} for key in source.keys] else: key_value_pairs = source.key_value_pairs for key_value_pair in key_value_pairs: key_path = key_value_pair['key'] # The artifact definitions currently incorrectly define # CurrentControlSet so we correct it here for now. # Also see: https://github.com/ForensicArtifacts/artifacts/issues/120 key_path_upper = key_path.upper() if key_path_upper.startswith('%%CURRENT_CONTROL_SET%%'): key_path = '{0:s}{1:s}'.format( 'HKEY_LOCAL_MACHINE\\System\\CurrentControlSet', key_path[23:]) find_spec = registry_searcher.FindSpec(key_path_glob=key_path) for key_path in searcher.Find(find_specs=[find_spec]): try: registry_key = searcher.GetKeyByPath(key_path) except IOError as exception: raise errors.PreProcessFail(( 'Unable to retrieve Windows Registry key: {0:s} with error: ' '{1!s}').format(key_path, exception)) if registry_key: value_name = key_value_pair.get('value', None) self._ParseKey(knowledge_base, registry_key, value_name)
python
{ "resource": "" }
q25428
SAMUsersWindowsRegistryPlugin._ParseFValue
train
def _ParseFValue(self, registry_key): """Parses an F value. Args: registry_key (dfwinreg.WinRegistryKey): Windows Registry key. Returns: f_value: F value stored in the Windows Registry key. Raises: ParseError: if the Windows Registry key does not contain an F value or F value cannot be parsed. """ registry_value = registry_key.GetValueByName('F') if not registry_value: raise errors.ParseError( 'missing value: "F" in Windows Registry key: {0:s}.'.format( registry_key.name)) f_value_map = self._GetDataTypeMap('f_value') try: return self._ReadStructureFromByteStream( registry_value.data, 0, f_value_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError(exception)
python
{ "resource": "" }
q25429
SAMUsersWindowsRegistryPlugin._ParseVValueString
train
def _ParseVValueString( self, parser_mediator, data, user_information_descriptor): """Parses a V value string. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. data (bytes): Windows Registry V value data. user_information_descriptor (user_information_descriptor): V value user information descriptor. Returns: str: string value stored in the Windows Registry V value data. """ data_start_offset = ( user_information_descriptor.offset + self._V_VALUE_STRINGS_OFFSET) data_end_offset = data_start_offset + user_information_descriptor.size descriptor_data = data[data_start_offset:data_end_offset] try: username = descriptor_data.decode('utf-16-le') except (UnicodeDecodeError, UnicodeEncodeError) as exception: username = descriptor_data.decode('utf-16-le', errors='replace') parser_mediator.ProduceExtractionWarning(( 'unable to decode V value string with error: {0!s}. Characters ' 'that cannot be decoded will be replaced with "?" or ' '"\\ufffd".').format(exception)) return username
python
{ "resource": "" }
q25430
SafariHistoryPluginSqlite.ParsePageVisitRow
train
def ParsePageVisitRow(self, parser_mediator, query, row, **unused_kwargs): """Parses a visited row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row. """ query_hash = hash(query) was_http_non_get = self._GetRowValue(query_hash, row, 'http_non_get') event_data = SafariHistoryPageVisitedEventData() event_data.offset = self._GetRowValue(query_hash, row, 'id') event_data.query = query event_data.title = self._GetRowValue(query_hash, row, 'title') event_data.url = self._GetRowValue(query_hash, row, 'url') event_data.visit_count = self._GetRowValue(query_hash, row, 'visit_count') event_data.was_http_non_get = bool(was_http_non_get) timestamp = self._GetRowValue(query_hash, row, 'visit_time') date_time = dfdatetime_cocoa_time.CocoaTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_LAST_VISITED) parser_mediator.ProduceEventWithEventData(event, event_data)
python
{ "resource": "" }
q25431
GoogleDrivePlugin.GetLocalPath
train
def GetLocalPath(self, inode, cache, database): """Return local path for a given inode. Args: inode (int): inode number for the file. cache (SQLiteCache): cache. database (SQLiteDatabase): database. Returns: str: full path, including the filename of the given inode value. """ local_path = cache.GetResults('local_path') if not local_path: results = database.Query(self.LOCAL_PATH_CACHE_QUERY) cache.CacheQueryResults( results, 'local_path', 'child_inode_number', ('parent_inode_number', 'filename')) local_path = cache.GetResults('local_path') parent, path = local_path.get(inode, [None, None]) # TODO: Read the local_sync_root from the sync_config.db and use that # for a root value. root_value = '%local_sync_root%/' if not path: return root_value paths = [] while path: paths.append(path) parent, path = local_path.get(parent, [None, None]) if not paths: return root_value # Paths are built top level to root so we need to reverse the list to # represent them in the traditional order. paths.reverse() return root_value + '/'.join(paths)
python
{ "resource": "" }
q25432
GoogleDrivePlugin.GetCloudPath
train
def GetCloudPath(self, resource_id, cache, database): """Return cloud path given a resource id. Args: resource_id (str): resource identifier for the file. cache (SQLiteCache): cache. database (SQLiteDatabase): database. Returns: str: full path to the resource value. """ cloud_path = cache.GetResults('cloud_path') if not cloud_path: results = database.Query(self.CLOUD_PATH_CACHE_QUERY) cache.CacheQueryResults( results, 'cloud_path', 'resource_id', ('filename', 'parent')) cloud_path = cache.GetResults('cloud_path') if resource_id == 'folder:root': return '/' paths = [] parent_path, parent_id = cloud_path.get(resource_id, ['', '']) while parent_path: if parent_path == 'folder:root': break paths.append(parent_path) parent_path, parent_id = cloud_path.get(parent_id, ['', '']) if not paths: return '/' # Paths are built top level to root so we need to reverse the list to # represent them in the traditional order. paths.reverse() return '/{0:s}/'.format('/'.join(paths))
python
{ "resource": "" }
q25433
GoogleDrivePlugin.ParseCloudEntryRow
train
def ParseCloudEntryRow( self, parser_mediator, query, row, cache=None, database=None, **unused_kwargs): """Parses a cloud entry row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row. cache (SQLiteCache): cache. database (SQLiteDatabase): database. """ query_hash = hash(query) parent_resource_id = self._GetRowValue( query_hash, row, 'parent_resource_id') filename = self._GetRowValue(query_hash, row, 'filename') cloud_path = self.GetCloudPath(parent_resource_id, cache, database) cloud_filename = '{0:s}{1:s}'.format(cloud_path, filename) event_data = GoogleDriveSnapshotCloudEntryEventData() event_data.document_type = self._GetRowValue(query_hash, row, 'doc_type') event_data.path = cloud_filename event_data.query = query event_data.shared = bool(self._GetRowValue(query_hash, row, 'shared')) event_data.size = self._GetRowValue(query_hash, row, 'size') event_data.url = self._GetRowValue(query_hash, row, 'url') timestamp = self._GetRowValue(query_hash, row, 'modified') date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_MODIFICATION) parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = self._GetRowValue(query_hash, row, 'created') if timestamp: date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data)
python
{ "resource": "" }
q25434
GoogleDrivePlugin.ParseLocalEntryRow
train
def ParseLocalEntryRow( self, parser_mediator, query, row, cache=None, database=None, **unused_kwargs): """Parses a local entry row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row. cache (Optional[SQLiteCache]): cache. database (Optional[SQLiteDatabase]): database. """ query_hash = hash(query) inode_number = self._GetRowValue(query_hash, row, 'inode_number') local_path = self.GetLocalPath(inode_number, cache, database) event_data = GoogleDriveSnapshotLocalEntryEventData() event_data.path = local_path event_data.query = query event_data.size = self._GetRowValue(query_hash, row, 'size') timestamp = self._GetRowValue(query_hash, row, 'modified') date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_MODIFICATION) parser_mediator.ProduceEventWithEventData(event, event_data)
python
{ "resource": "" }
q25435
_EventSourceHeap.PopEventSource
train
def PopEventSource(self): """Pops an event source from the heap. Returns: EventSource: an event source or None on if no event source is available. """ try: _, _, event_source = heapq.heappop(self._heap) except IndexError: return None return event_source
python
{ "resource": "" }
q25436
_EventSourceHeap.PushEventSource
train
def PushEventSource(self, event_source): """Pushes an event source onto the heap. Args: event_source (EventSource): event source. """ if event_source.file_entry_type == ( dfvfs_definitions.FILE_ENTRY_TYPE_DIRECTORY): weight = 1 else: weight = 100 heap_values = (weight, time.time(), event_source) heapq.heappush(self._heap, heap_values)
python
{ "resource": "" }
q25437
TaskMultiProcessEngine._FillEventSourceHeap
train
def _FillEventSourceHeap( self, storage_writer, event_source_heap, start_with_first=False): """Fills the event source heap with the available written event sources. Args: storage_writer (StorageWriter): storage writer for a session storage. event_source_heap (_EventSourceHeap): event source heap. start_with_first (Optional[bool]): True if the function should start with the first written event source. """ if self._processing_profiler: self._processing_profiler.StartTiming('fill_event_source_heap') if self._processing_profiler: self._processing_profiler.StartTiming('get_event_source') if start_with_first: event_source = storage_writer.GetFirstWrittenEventSource() else: event_source = storage_writer.GetNextWrittenEventSource() if self._processing_profiler: self._processing_profiler.StopTiming('get_event_source') while event_source: event_source_heap.PushEventSource(event_source) if event_source_heap.IsFull(): break if self._processing_profiler: self._processing_profiler.StartTiming('get_event_source') event_source = storage_writer.GetNextWrittenEventSource() if self._processing_profiler: self._processing_profiler.StopTiming('get_event_source') if self._processing_profiler: self._processing_profiler.StopTiming('fill_event_source_heap')
python
{ "resource": "" }
q25438
TaskMultiProcessEngine._MergeTaskStorage
train
def _MergeTaskStorage(self, storage_writer): """Merges a task storage with the session storage. This function checks all task stores that are ready to merge and updates the scheduled tasks. Note that to prevent this function holding up the task scheduling loop only the first available task storage is merged. Args: storage_writer (StorageWriter): storage writer for a session storage used to merge task storage. """ if self._processing_profiler: self._processing_profiler.StartTiming('merge_check') for task_identifier in storage_writer.GetProcessedTaskIdentifiers(): try: task = self._task_manager.GetProcessedTaskByIdentifier(task_identifier) self._task_manager.SampleTaskStatus(task, 'processed') to_merge = self._task_manager.CheckTaskToMerge(task) if not to_merge: storage_writer.RemoveProcessedTaskStorage(task) self._task_manager.RemoveTask(task) self._task_manager.SampleTaskStatus(task, 'removed_processed') else: storage_writer.PrepareMergeTaskStorage(task) self._task_manager.UpdateTaskAsPendingMerge(task) except KeyError: logger.error( 'Unable to retrieve task: {0:s} to prepare it to be merged.'.format( task_identifier)) continue if self._processing_profiler: self._processing_profiler.StopTiming('merge_check') task = None if not self._storage_merge_reader_on_hold: task = self._task_manager.GetTaskPendingMerge(self._merge_task) # Limit the number of attribute containers from a single task-based # storage file that are merged per loop to keep tasks flowing. if task or self._storage_merge_reader: self._status = definitions.STATUS_INDICATOR_MERGING if self._processing_profiler: self._processing_profiler.StartTiming('merge') if task: if self._storage_merge_reader: self._merge_task_on_hold = self._merge_task self._storage_merge_reader_on_hold = self._storage_merge_reader self._task_manager.SampleTaskStatus( self._merge_task_on_hold, 'merge_on_hold') self._merge_task = task try: self._storage_merge_reader = storage_writer.StartMergeTaskStorage( task) self._task_manager.SampleTaskStatus(task, 'merge_started') except IOError as exception: logger.error(( 'Unable to merge results of task: {0:s} ' 'with error: {1!s}').format(task.identifier, exception)) self._storage_merge_reader = None if self._storage_merge_reader: fully_merged = self._storage_merge_reader.MergeAttributeContainers( maximum_number_of_containers=self._MAXIMUM_NUMBER_OF_CONTAINERS) else: # TODO: Do something more sensible when this happens, perhaps # retrying the task once that is implemented. For now, we mark the task # as fully merged because we can't continue with it. fully_merged = True if self._processing_profiler: self._processing_profiler.StopTiming('merge') if fully_merged: try: self._task_manager.CompleteTask(self._merge_task) except KeyError as exception: logger.error( 'Unable to complete task: {0:s} with error: {1!s}'.format( self._merge_task.identifier, exception)) if not self._storage_merge_reader_on_hold: self._merge_task = None self._storage_merge_reader = None else: self._merge_task = self._merge_task_on_hold self._storage_merge_reader = self._storage_merge_reader_on_hold self._merge_task_on_hold = None self._storage_merge_reader_on_hold = None self._task_manager.SampleTaskStatus( self._merge_task, 'merge_resumed') self._status = definitions.STATUS_INDICATOR_RUNNING self._number_of_produced_events = storage_writer.number_of_events self._number_of_produced_sources = storage_writer.number_of_event_sources self._number_of_produced_warnings = storage_writer.number_of_warnings
python
{ "resource": "" }
q25439
TaskMultiProcessEngine._ScheduleTask
train
def _ScheduleTask(self, task): """Schedules a task. Args: task (Task): task. Returns: bool: True if the task was scheduled. """ if self._processing_profiler: self._processing_profiler.StartTiming('schedule_task') try: self._task_queue.PushItem(task, block=False) is_scheduled = True except errors.QueueFull: is_scheduled = False if self._processing_profiler: self._processing_profiler.StopTiming('schedule_task') return is_scheduled
python
{ "resource": "" }
q25440
TaskMultiProcessEngine._ScheduleTasks
train
def _ScheduleTasks(self, storage_writer): """Schedules tasks. Args: storage_writer (StorageWriter): storage writer for a session storage. """ logger.debug('Task scheduler started') self._status = definitions.STATUS_INDICATOR_RUNNING # TODO: make tasks persistent. # TODO: protect task scheduler loop by catch all and # handle abort path. event_source_heap = _EventSourceHeap() self._FillEventSourceHeap( storage_writer, event_source_heap, start_with_first=True) event_source = event_source_heap.PopEventSource() task = None while event_source or self._task_manager.HasPendingTasks(): if self._abort: break try: if not task: task = self._task_manager.CreateRetryTask() if not task and event_source: task = self._task_manager.CreateTask(self._session_identifier) task.file_entry_type = event_source.file_entry_type task.path_spec = event_source.path_spec event_source = None self._number_of_consumed_sources += 1 if self._guppy_memory_profiler: self._guppy_memory_profiler.Sample() if task: if self._ScheduleTask(task): logger.debug( 'Scheduled task {0:s} for path specification {1:s}'.format( task.identifier, task.path_spec.comparable)) self._task_manager.SampleTaskStatus(task, 'scheduled') task = None else: self._task_manager.SampleTaskStatus(task, 'schedule_attempted') self._MergeTaskStorage(storage_writer) if not event_source_heap.IsFull(): self._FillEventSourceHeap(storage_writer, event_source_heap) if not task and not event_source: event_source = event_source_heap.PopEventSource() except KeyboardInterrupt: self._abort = True self._processing_status.aborted = True if self._status_update_callback: self._status_update_callback(self._processing_status) for task in self._task_manager.GetFailedTasks(): warning = warnings.ExtractionWarning( message='Worker failed to process path specification', path_spec=task.path_spec) self._storage_writer.AddWarning(warning) self._processing_status.error_path_specs.append(task.path_spec) self._status = definitions.STATUS_INDICATOR_IDLE if self._abort: logger.debug('Task scheduler aborted') else: logger.debug('Task scheduler stopped')
python
{ "resource": "" }
q25441
TaskMultiProcessEngine._StopExtractionProcesses
train
def _StopExtractionProcesses(self, abort=False): """Stops the extraction processes. Args: abort (bool): True to indicated the stop is issued on abort. """ logger.debug('Stopping extraction processes.') self._StopMonitoringProcesses() # Note that multiprocessing.Queue is very sensitive regarding # blocking on either a get or a put. So we try to prevent using # any blocking behavior. if abort: # Signal all the processes to abort. self._AbortTerminate() logger.debug('Emptying task queue.') self._task_queue.Empty() # Wake the processes to make sure that they are not blocking # waiting for the queue new items. for _ in self._processes_per_pid: try: self._task_queue.PushItem(plaso_queue.QueueAbort(), block=False) except errors.QueueFull: logger.warning('Task queue full, unable to push abort message.') # Try waiting for the processes to exit normally. self._AbortJoin(timeout=self._PROCESS_JOIN_TIMEOUT) self._task_queue.Close(abort=abort) if not abort: # Check if the processes are still alive and terminate them if necessary. self._AbortTerminate() self._AbortJoin(timeout=self._PROCESS_JOIN_TIMEOUT) self._task_queue.Close(abort=True) # Kill any lingering processes. self._AbortKill()
python
{ "resource": "" }
q25442
ZshExtendedHistoryParser.VerifyStructure
train
def VerifyStructure(self, parser_mediator, lines): """Verifies whether content corresponds to a Zsh extended_history file. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. lines (str): one or more lines from the text file. Returns: bool: True if the line was successfully parsed. """ if self._VERIFICATION_REGEX.match(lines): return True return False
python
{ "resource": "" }
q25443
PlistFile.GetValueByPath
train
def GetValueByPath(self, path_segments): """Retrieves a plist value by path. Args: path_segments (list[str]): path segment strings relative to the root of the plist. Returns: object: The value of the key specified by the path or None. """ key = self.root_key for path_segment in path_segments: if isinstance(key, dict): try: key = key[path_segment] except KeyError: return None elif isinstance(key, list): try: list_index = int(path_segment, 10) except ValueError: return None key = key[list_index] else: return None if not key: return None return key
python
{ "resource": "" }
q25444
PlistFile.Read
train
def Read(self, file_object): """Reads a plist from a file-like object. Args: file_object (dfvfs.FileIO): a file-like object containing plist data. Raises: IOError: if the plist file-like object cannot be read. OSError: if the plist file-like object cannot be read. """ try: self.root_key = biplist.readPlist(file_object) except ( biplist.NotBinaryPlistException, biplist.InvalidPlistException) as exception: raise IOError(exception)
python
{ "resource": "" }
q25445
EventFormatter._FormatMessage
train
def _FormatMessage(self, format_string, event_values): """Determines the formatted message string. Args: format_string (str): message format string. event_values (dict[str, object]): event values. Returns: str: formatted message string. """ if not isinstance(format_string, py2to3.UNICODE_TYPE): logger.warning('Format string: {0:s} is non-Unicode.'.format( format_string)) # Plaso code files should be in UTF-8 any thus binary strings are # assumed UTF-8. If this is not the case this should be fixed. format_string = format_string.decode('utf-8', errors='ignore') try: message_string = format_string.format(**event_values) except KeyError as exception: data_type = event_values.get('data_type', 'N/A') display_name = event_values.get('display_name', 'N/A') event_identifier = event_values.get('uuid', 'N/A') parser_chain = event_values.get('parser', 'N/A') error_message = ( 'unable to format string: "{0:s}" event object is missing required ' 'attributes: {1!s}').format(format_string, exception) error_message = ( 'Event: {0:s} data type: {1:s} display name: {2:s} ' 'parser chain: {3:s} with error: {4:s}').format( event_identifier, data_type, display_name, parser_chain, error_message) logger.error(error_message) attribute_values = [] for attribute, value in iter(event_values.items()): attribute_values.append('{0:s}: {1!s}'.format(attribute, value)) message_string = ' '.join(attribute_values) except UnicodeDecodeError as exception: data_type = event_values.get('data_type', 'N/A') display_name = event_values.get('display_name', 'N/A') event_identifier = event_values.get('uuid', 'N/A') parser_chain = event_values.get('parser', 'N/A') error_message = 'Unicode decode error: {0!s}'.format(exception) error_message = ( 'Event: {0:s} data type: {1:s} display name: {2:s} ' 'parser chain: {3:s} with error: {4:s}').format( event_identifier, data_type, display_name, parser_chain, error_message) logger.error(error_message) message_string = '' # Strip carriage return and linefeed form the message strings. # Using replace function here because it is faster than re.sub() or # string.strip(). return message_string.replace('\r', '').replace('\n', '')
python
{ "resource": "" }
q25446
EventFormatter._FormatMessages
train
def _FormatMessages(self, format_string, short_format_string, event_values): """Determines the formatted message strings. Args: format_string (str): message format string. short_format_string (str): short message format string. event_values (dict[str, object]): event values. Returns: tuple(str, str): formatted message string and short message string. """ message_string = self._FormatMessage(format_string, event_values) if short_format_string: short_message_string = self._FormatMessage( short_format_string, event_values) else: short_message_string = message_string # Truncate the short message string if necessary. if len(short_message_string) > 80: short_message_string = '{0:s}...'.format(short_message_string[:77]) return message_string, short_message_string
python
{ "resource": "" }
q25447
ConditionalEventFormatter._ConditionalFormatMessages
train
def _ConditionalFormatMessages(self, event_values): """Determines the conditional formatted message strings. Args: event_values (dict[str, object]): event values. Returns: tuple(str, str): formatted message string and short message string. """ # Using getattr here to make sure the attribute is not set to None. # if A.b = None, hasattr(A, b) is True but getattr(A, b, None) is False. string_pieces = [] for map_index, attribute_name in enumerate(self._format_string_pieces_map): if not attribute_name or attribute_name in event_values: if attribute_name: attribute = event_values.get(attribute_name, None) # If an attribute is an int, yet has zero value we want to include # that in the format string, since that is still potentially valid # information. Otherwise we would like to skip it. # pylint: disable=unidiomatic-typecheck if (not isinstance(attribute, (bool, float)) and not isinstance(attribute, py2to3.INTEGER_TYPES) and not attribute): continue string_pieces.append(self.FORMAT_STRING_PIECES[map_index]) format_string = self.FORMAT_STRING_SEPARATOR.join(string_pieces) string_pieces = [] for map_index, attribute_name in enumerate( self._format_string_short_pieces_map): if not attribute_name or event_values.get(attribute_name, None): string_pieces.append(self.FORMAT_STRING_SHORT_PIECES[map_index]) short_format_string = self.FORMAT_STRING_SEPARATOR.join(string_pieces) return self._FormatMessages( format_string, short_format_string, event_values)
python
{ "resource": "" }
q25448
DateTimeFileEntryFilter.AddDateTimeRange
train
def AddDateTimeRange( self, time_value, start_time_string=None, end_time_string=None): """Adds a date time filter range. The time strings are formatted as: YYYY-MM-DD hh:mm:ss.######[+-]##:## Where # are numeric digits ranging from 0 to 9 and the seconds fraction can be either 3 or 6 digits. The time of day, seconds fraction and timezone offset are optional. The default timezone is UTC. Args: time_value (str): time value, such as, atime, ctime, crtime, dtime, bkup and mtime. start_time_string (str): start date and time value string. end_time_string (str): end date and time value string. Raises: ValueError: If the filter is badly formed. """ if not isinstance(time_value, py2to3.STRING_TYPES): raise ValueError('Filter type must be a string.') if start_time_string is None and end_time_string is None: raise ValueError( 'Filter must have either a start or an end date time value.') time_value_lower = time_value.lower() if time_value_lower not in self._SUPPORTED_TIME_VALUES: raise ValueError('Unsupported time value: {0:s}.'.format(time_value)) start_date_time = None if start_time_string: start_date_time = time_elements.TimeElementsInMicroseconds() start_date_time.CopyFromDateTimeString(start_time_string) end_date_time = None if end_time_string: end_date_time = time_elements.TimeElementsInMicroseconds() end_date_time.CopyFromDateTimeString(end_time_string) # Make sure that the end timestamp occurs after the beginning. # If not then we need to reverse the time range. if (None not in (start_date_time, end_date_time) and start_date_time > end_date_time): raise ValueError( 'Invalid date time value start must be earlier than end.') self._date_time_ranges.append(self._DATE_TIME_RANGE_TUPLE( time_value_lower, start_date_time, end_date_time))
python
{ "resource": "" }
q25449
SignaturesFileEntryFilter._GetScanner
train
def _GetScanner(self, specification_store, signature_identifiers): """Initializes the scanner form the specification store. Args: specification_store (FormatSpecificationStore): a specification store. signature_identifiers (list[str]): signature identifiers. Returns: pysigscan.scanner: signature scanner or None. """ if not specification_store: return None scanner_object = pysigscan.scanner() for format_specification in specification_store.specifications: if format_specification.identifier not in signature_identifiers: continue for signature in format_specification.signatures: pattern_offset = signature.offset if pattern_offset is None: signature_flags = pysigscan.signature_flags.NO_OFFSET elif pattern_offset < 0: pattern_offset *= -1 signature_flags = pysigscan.signature_flags.RELATIVE_FROM_END else: signature_flags = pysigscan.signature_flags.RELATIVE_FROM_START scanner_object.add_signature( signature.identifier, pattern_offset, signature.pattern, signature_flags) self._signature_identifiers.append(format_specification.identifier) return scanner_object
python
{ "resource": "" }
q25450
FileEntryFilterCollection.Matches
train
def Matches(self, file_entry): """Compares the file entry against the filter collection. Args: file_entry (dfvfs.FileEntry): file entry to compare. Returns: bool: True if the file entry matches one of the filters. If no filters are provided or applicable the result will be True. """ if not self._filters: return True results = [] for file_entry_filter in self._filters: result = file_entry_filter.Matches(file_entry) results.append(result) return True in results or False not in results
python
{ "resource": "" }
q25451
ZeitgeistActivityDatabasePlugin.ParseZeitgeistEventRow
train
def ParseZeitgeistEventRow( self, parser_mediator, query, row, **unused_kwargs): """Parses a zeitgeist event row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row. """ query_hash = hash(query) event_data = ZeitgeistActivityEventData() event_data.offset = self._GetRowValue(query_hash, row, 'id') event_data.query = query event_data.subject_uri = self._GetRowValue(query_hash, row, 'subj_uri') timestamp = self._GetRowValue(query_hash, row, 'timestamp') date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_UNKNOWN) parser_mediator.ProduceEventWithEventData(event, event_data)
python
{ "resource": "" }
q25452
WinlogonPlugin._ParseRegisteredDLLs
train
def _ParseRegisteredDLLs(self, parser_mediator, registry_key): """Parses the registered DLLs that receive event notifications. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_key (dfwinreg.WinRegistryKey): Windows Registry key. """ notify_key = registry_key.GetSubkeyByName('Notify') if not notify_key: return for subkey in notify_key.GetSubkeys(): for trigger in self._TRIGGERS: handler_value = subkey.GetValueByName(trigger) if not handler_value: continue values_dict = { 'Application': subkey.name, 'Handler': handler_value.GetDataAsObject(), 'Trigger': trigger} command_value = subkey.GetValueByName('DllName') if command_value: values_dict['Command'] = command_value.GetDataAsObject() event_data = windows_events.WindowsRegistryEventData() event_data.key_path = subkey.path event_data.offset = subkey.offset event_data.regvalue = values_dict event_data.source_append = ': Winlogon' event = time_events.DateTimeValuesEvent( subkey.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
python
{ "resource": "" }
q25453
WinlogonPlugin._ParseLogonApplications
train
def _ParseLogonApplications(self, parser_mediator, registry_key): """Parses the registered logon applications. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_key (dfwinreg.WinRegistryKey): Windows Registry key. """ for application in self._LOGON_APPLICATIONS: command_value = registry_key.GetValueByName(application) if not command_value: continue values_dict = { 'Application': application, 'Command': command_value.GetDataAsObject(), 'Trigger': 'Logon'} event_data = windows_events.WindowsRegistryEventData() event_data.key_path = registry_key.path event_data.offset = registry_key.offset event_data.regvalue = values_dict event_data.source_append = ': Winlogon' event = time_events.DateTimeValuesEvent( registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
python
{ "resource": "" }
q25454
AppCompatCacheWindowsRegistryPlugin._CheckSignature
train
def _CheckSignature(self, value_data): """Parses and validates the signature. Args: value_data (bytes): value data. Returns: int: format type or None if format could not be determined. Raises: ParseError: if the value data could not be parsed. """ signature_map = self._GetDataTypeMap('uint32le') try: signature = self._ReadStructureFromByteStream( value_data, 0, signature_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError( 'Unable to parse signature value with error: {0!s}'.format( exception)) format_type = self._HEADER_SIGNATURES.get(signature, None) if format_type == self._FORMAT_TYPE_2003: # TODO: determine which format version is used (2003 or Vista). return self._FORMAT_TYPE_2003 if format_type == self._FORMAT_TYPE_8: cached_entry_signature = value_data[signature:signature + 4] if cached_entry_signature in ( self._CACHED_ENTRY_SIGNATURE_8_0, self._CACHED_ENTRY_SIGNATURE_8_1): return self._FORMAT_TYPE_8 elif format_type == self._FORMAT_TYPE_10: # Windows 10 uses the same cache entry signature as Windows 8.1 cached_entry_signature = value_data[signature:signature + 4] if cached_entry_signature == self._CACHED_ENTRY_SIGNATURE_8_1: return self._FORMAT_TYPE_10 return format_type
python
{ "resource": "" }
q25455
AppCompatCacheWindowsRegistryPlugin._GetCachedEntryDataTypeMap
train
def _GetCachedEntryDataTypeMap( self, format_type, value_data, cached_entry_offset): """Determines the cached entry data type map. Args: format_type (int): format type. value_data (bytes): value data. cached_entry_offset (int): offset of the first cached entry data relative to the start of the value data. Returns: dtfabric.DataTypeMap: data type map which contains a data type definition, such as a structure, that can be mapped onto binary data or None if the data type map is not defined. Raises: ParseError: if the cached entry data type map cannot be determined. """ if format_type not in self._SUPPORTED_FORMAT_TYPES: raise errors.ParseError('Unsupported format type: {0:d}'.format( format_type)) data_type_map_name = '' if format_type == self._FORMAT_TYPE_XP: data_type_map_name = 'appcompatcache_cached_entry_xp_32bit' elif format_type in (self._FORMAT_TYPE_8, self._FORMAT_TYPE_10): data_type_map_name = 'appcompatcache_cached_entry_header_8' else: cached_entry = self._ParseCommon2003CachedEntry( value_data, cached_entry_offset) # Assume the entry is 64-bit if the 32-bit path offset is 0 and # the 64-bit path offset is set. if (cached_entry.path_offset_32bit == 0 and cached_entry.path_offset_64bit != 0): number_of_bits = '64' else: number_of_bits = '32' if format_type == self._FORMAT_TYPE_2003: data_type_map_name = ( 'appcompatcache_cached_entry_2003_{0:s}bit'.format(number_of_bits)) elif format_type == self._FORMAT_TYPE_VISTA: data_type_map_name = ( 'appcompatcache_cached_entry_vista_{0:s}bit'.format(number_of_bits)) elif format_type == self._FORMAT_TYPE_7: data_type_map_name = ( 'appcompatcache_cached_entry_7_{0:s}bit'.format(number_of_bits)) return self._GetDataTypeMap(data_type_map_name)
python
{ "resource": "" }
q25456
AppCompatCacheWindowsRegistryPlugin._ParseCommon2003CachedEntry
train
def _ParseCommon2003CachedEntry(self, value_data, cached_entry_offset): """Parses the cached entry structure common for Windows 2003, Vista and 7. Args: value_data (bytes): value data. cached_entry_offset (int): offset of the first cached entry data relative to the start of the value data. Returns: appcompatcache_cached_entry_2003_common: cached entry structure common for Windows 2003, Windows Vista and Windows 7. Raises: ParseError: if the value data could not be parsed. """ data_type_map = self._GetDataTypeMap( 'appcompatcache_cached_entry_2003_common') try: cached_entry = self._ReadStructureFromByteStream( value_data[cached_entry_offset:], cached_entry_offset, data_type_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError( 'Unable to parse cached entry value with error: {0!s}'.format( exception)) if cached_entry.path_size > cached_entry.maximum_path_size: raise errors.ParseError('Path size value out of bounds.') path_end_of_string_size = ( cached_entry.maximum_path_size - cached_entry.path_size) if cached_entry.path_size == 0 or path_end_of_string_size != 2: raise errors.ParseError('Unsupported path size values.') return cached_entry
python
{ "resource": "" }
q25457
AppCompatCacheWindowsRegistryPlugin._ParseCachedEntryXP
train
def _ParseCachedEntryXP(self, value_data, cached_entry_offset): """Parses a Windows XP cached entry. Args: value_data (bytes): value data. cached_entry_offset (int): offset of the first cached entry data relative to the start of the value data. Returns: AppCompatCacheCachedEntry: cached entry. Raises: ParseError: if the value data could not be parsed. """ try: cached_entry = self._ReadStructureFromByteStream( value_data[cached_entry_offset:], cached_entry_offset, self._cached_entry_data_type_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError( 'Unable to parse cached entry value with error: {0!s}'.format( exception)) # TODO: have dtFabric handle string conversion. string_size = 0 for string_index in range(0, 528, 2): if (cached_entry.path[string_index] == 0 and cached_entry.path[string_index + 1] == 0): break string_size += 2 try: path = bytearray(cached_entry.path[0:string_size]).decode('utf-16-le') except UnicodeDecodeError: raise errors.ParseError('Unable to decode cached entry path to string') cached_entry_object = AppCompatCacheCachedEntry() cached_entry_object.cached_entry_size = ( self._cached_entry_data_type_map.GetByteSize()) cached_entry_object.file_size = cached_entry.file_size cached_entry_object.last_modification_time = ( cached_entry.last_modification_time) cached_entry_object.last_update_time = cached_entry.last_update_time cached_entry_object.path = path return cached_entry_object
python
{ "resource": "" }
q25458
AppCompatCacheWindowsRegistryPlugin._ParseCachedEntry2003
train
def _ParseCachedEntry2003(self, value_data, cached_entry_offset): """Parses a Windows 2003 cached entry. Args: value_data (bytes): value data. cached_entry_offset (int): offset of the first cached entry data relative to the start of the value data. Returns: AppCompatCacheCachedEntry: cached entry. Raises: ParseError: if the value data could not be parsed. """ try: cached_entry = self._ReadStructureFromByteStream( value_data[cached_entry_offset:], cached_entry_offset, self._cached_entry_data_type_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError( 'Unable to parse cached entry value with error: {0!s}'.format( exception)) path_size = cached_entry.path_size maximum_path_size = cached_entry.maximum_path_size path_offset = cached_entry.path_offset if path_offset > 0 and path_size > 0: path_size += path_offset maximum_path_size += path_offset try: path = value_data[path_offset:path_size].decode('utf-16-le') except UnicodeDecodeError: raise errors.ParseError('Unable to decode cached entry path to string') cached_entry_object = AppCompatCacheCachedEntry() cached_entry_object.cached_entry_size = ( self._cached_entry_data_type_map.GetByteSize()) cached_entry_object.file_size = getattr(cached_entry, 'file_size', None) cached_entry_object.last_modification_time = ( cached_entry.last_modification_time) cached_entry_object.path = path return cached_entry_object
python
{ "resource": "" }
q25459
AppCompatCacheWindowsRegistryPlugin._ParseCachedEntryVista
train
def _ParseCachedEntryVista(self, value_data, cached_entry_offset): """Parses a Windows Vista cached entry. Args: value_data (bytes): value data. cached_entry_offset (int): offset of the first cached entry data relative to the start of the value data. Returns: AppCompatCacheCachedEntry: cached entry. Raises: ParseError: if the value data could not be parsed. """ try: cached_entry = self._ReadStructureFromByteStream( value_data[cached_entry_offset:], cached_entry_offset, self._cached_entry_data_type_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError( 'Unable to parse cached entry value with error: {0!s}'.format( exception)) path_size = cached_entry.path_size maximum_path_size = cached_entry.maximum_path_size path_offset = cached_entry.path_offset if path_offset > 0 and path_size > 0: path_size += path_offset maximum_path_size += path_offset try: path = value_data[path_offset:path_size].decode('utf-16-le') except UnicodeDecodeError: raise errors.ParseError('Unable to decode cached entry path to string') cached_entry_object = AppCompatCacheCachedEntry() cached_entry_object.cached_entry_size = ( self._cached_entry_data_type_map.GetByteSize()) cached_entry_object.insertion_flags = cached_entry.insertion_flags cached_entry_object.last_modification_time = ( cached_entry.last_modification_time) cached_entry_object.path = path cached_entry_object.shim_flags = cached_entry.shim_flags return cached_entry_object
python
{ "resource": "" }
q25460
AppCompatCacheWindowsRegistryPlugin._ParseCachedEntry8
train
def _ParseCachedEntry8(self, value_data, cached_entry_offset): """Parses a Windows 8.0 or 8.1 cached entry. Args: value_data (bytes): value data. cached_entry_offset (int): offset of the first cached entry data relative to the start of the value data. Returns: AppCompatCacheCachedEntry: cached entry. Raises: ParseError: if the value data could not be parsed. """ try: cached_entry = self._ReadStructureFromByteStream( value_data[cached_entry_offset:], cached_entry_offset, self._cached_entry_data_type_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError( 'Unable to parse cached entry value with error: {0!s}'.format( exception)) if cached_entry.signature not in ( self._CACHED_ENTRY_SIGNATURE_8_0, self._CACHED_ENTRY_SIGNATURE_8_1): raise errors.ParseError('Unsupported cache entry signature') cached_entry_data = value_data[cached_entry_offset:] if cached_entry.signature == self._CACHED_ENTRY_SIGNATURE_8_0: data_type_map_name = 'appcompatcache_cached_entry_body_8_0' elif cached_entry.signature == self._CACHED_ENTRY_SIGNATURE_8_1: data_type_map_name = 'appcompatcache_cached_entry_body_8_1' data_type_map = self._GetDataTypeMap(data_type_map_name) context = dtfabric_data_maps.DataTypeMapContext() try: cached_entry_body = self._ReadStructureFromByteStream( cached_entry_data[12:], cached_entry_offset + 12, data_type_map, context=context) except (ValueError, errors.ParseError) as exception: raise errors.ParseError( 'Unable to parse cached entry body with error: {0!s}'.format( exception)) data_offset = context.byte_size data_size = cached_entry_body.data_size cached_entry_object = AppCompatCacheCachedEntry() cached_entry_object.cached_entry_size = ( 12 + cached_entry.cached_entry_data_size) cached_entry_object.insertion_flags = cached_entry_body.insertion_flags cached_entry_object.last_modification_time = ( cached_entry_body.last_modification_time) cached_entry_object.path = cached_entry_body.path cached_entry_object.shim_flags = cached_entry_body.shim_flags if data_size > 0: cached_entry_object.data = cached_entry_data[ data_offset:data_offset + data_size] return cached_entry_object
python
{ "resource": "" }
q25461
AppCompatCacheWindowsRegistryPlugin._ParseHeader
train
def _ParseHeader(self, format_type, value_data): """Parses the header. Args: format_type (int): format type. value_data (bytes): value data. Returns: AppCompatCacheHeader: header. Raises: ParseError: if the value data could not be parsed. """ data_type_map_name = self._HEADER_DATA_TYPE_MAP_NAMES.get(format_type, None) if not data_type_map_name: raise errors.ParseError( 'Unsupported format type: {0:d}'.format(format_type)) data_type_map = self._GetDataTypeMap(data_type_map_name) try: header = self._ReadStructureFromByteStream( value_data, 0, data_type_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError( 'Unable to parse header value with error: {0!s}'.format( exception)) header_data_size = data_type_map.GetByteSize() if format_type == self._FORMAT_TYPE_10: header_data_size = header.signature cache_header = AppCompatCacheHeader() cache_header.header_size = header_data_size cache_header.number_of_cached_entries = getattr( header, 'number_of_cached_entries', 0) return cache_header
python
{ "resource": "" }
q25462
PlistParser.GetTopLevel
train
def GetTopLevel(self, file_object): """Returns the deserialized content of a plist as a dictionary object. Args: file_object (dfvfs.FileIO): a file-like object to parse. Returns: dict[str, object]: contents of the plist. Raises: UnableToParseFile: when the file cannot be parsed. """ try: top_level_object = biplist.readPlist(file_object) except (biplist.InvalidPlistException, biplist.NotBinaryPlistException) as exception: raise errors.UnableToParseFile( 'Unable to parse plist with error: {0!s}'.format(exception)) return top_level_object
python
{ "resource": "" }
q25463
PlistParser.ParseFileObject
train
def ParseFileObject(self, parser_mediator, file_object): """Parses a plist file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): a file-like object. Raises: UnableToParseFile: when the file cannot be parsed. """ filename = parser_mediator.GetFilename() file_size = file_object.get_size() if file_size <= 0: raise errors.UnableToParseFile( 'File size: {0:d} bytes is less equal 0.'.format(file_size)) # 50MB is 10x larger than any plist seen to date. if file_size > 50000000: raise errors.UnableToParseFile( 'File size: {0:d} bytes is larger than 50 MB.'.format(file_size)) top_level_object = self.GetTopLevel(file_object) if not top_level_object: raise errors.UnableToParseFile( 'Unable to parse: {0:s} skipping.'.format(filename)) # TODO: add a parser filter. matching_plugin = None for plugin in self._plugins: try: plugin.UpdateChainAndProcess( parser_mediator, plist_name=filename, top_level=top_level_object) matching_plugin = plugin except errors.WrongPlistPlugin as exception: logger.debug('Wrong plugin: {0:s} for: {1:s}'.format( exception.args[0], exception.args[1])) if not matching_plugin and self._default_plugin: self._default_plugin.UpdateChainAndProcess( parser_mediator, plist_name=filename, top_level=top_level_object)
python
{ "resource": "" }
q25464
ProcessStatus.UpdateNumberOfEventReports
train
def UpdateNumberOfEventReports( self, number_of_consumed_reports, number_of_produced_reports): """Updates the number of event reports. Args: number_of_consumed_reports (int): total number of event reports consumed by the process. number_of_produced_reports (int): total number of event reports produced by the process. Returns: bool: True if either number of event reports has increased. Raises: ValueError: if the consumed or produced number of event reports is smaller than the value of the previous update. """ consumed_reports_delta = 0 if number_of_consumed_reports is not None: if number_of_consumed_reports < self.number_of_consumed_reports: raise ValueError( 'Number of consumed reports smaller than previous update.') consumed_reports_delta = ( number_of_consumed_reports - self.number_of_consumed_reports) self.number_of_consumed_reports = number_of_consumed_reports self.number_of_consumed_reports_delta = consumed_reports_delta produced_reports_delta = 0 if number_of_produced_reports is not None: if number_of_produced_reports < self.number_of_produced_reports: raise ValueError( 'Number of produced reports smaller than previous update.') produced_reports_delta = ( number_of_produced_reports - self.number_of_produced_reports) self.number_of_produced_reports = number_of_produced_reports self.number_of_produced_reports_delta = produced_reports_delta return consumed_reports_delta > 0 or produced_reports_delta > 0
python
{ "resource": "" }
q25465
ProcessStatus.UpdateNumberOfEvents
train
def UpdateNumberOfEvents( self, number_of_consumed_events, number_of_produced_events): """Updates the number of events. Args: number_of_consumed_events (int): total number of events consumed by the process. number_of_produced_events (int): total number of events produced by the process. Returns: bool: True if either number of events has increased. Raises: ValueError: if the consumed or produced number of events is smaller than the value of the previous update. """ consumed_events_delta = 0 if number_of_consumed_events is not None: if number_of_consumed_events < self.number_of_consumed_events: raise ValueError( 'Number of consumed events smaller than previous update.') consumed_events_delta = ( number_of_consumed_events - self.number_of_consumed_events) self.number_of_consumed_events = number_of_consumed_events self.number_of_consumed_events_delta = consumed_events_delta produced_events_delta = 0 if number_of_produced_events is not None: if number_of_produced_events < self.number_of_produced_events: raise ValueError( 'Number of produced events smaller than previous update.') produced_events_delta = ( number_of_produced_events - self.number_of_produced_events) self.number_of_produced_events = number_of_produced_events self.number_of_produced_events_delta = produced_events_delta return consumed_events_delta > 0 or produced_events_delta > 0
python
{ "resource": "" }
q25466
ProcessStatus.UpdateNumberOfEventSources
train
def UpdateNumberOfEventSources( self, number_of_consumed_sources, number_of_produced_sources): """Updates the number of event sources. Args: number_of_consumed_sources (int): total number of event sources consumed by the process. number_of_produced_sources (int): total number of event sources produced by the process. Returns: bool: True if either number of event sources has increased. Raises: ValueError: if the consumed or produced number of event sources is smaller than the value of the previous update. """ consumed_sources_delta = 0 if number_of_consumed_sources is not None: if number_of_consumed_sources < self.number_of_consumed_sources: raise ValueError( 'Number of consumed sources smaller than previous update.') consumed_sources_delta = ( number_of_consumed_sources - self.number_of_consumed_sources) self.number_of_consumed_sources = number_of_consumed_sources self.number_of_consumed_sources_delta = consumed_sources_delta produced_sources_delta = 0 if number_of_produced_sources is not None: if number_of_produced_sources < self.number_of_produced_sources: raise ValueError( 'Number of produced sources smaller than previous update.') produced_sources_delta = ( number_of_produced_sources - self.number_of_produced_sources) self.number_of_produced_sources = number_of_produced_sources self.number_of_produced_sources_delta = produced_sources_delta return consumed_sources_delta > 0 or produced_sources_delta > 0
python
{ "resource": "" }
q25467
ProcessStatus.UpdateNumberOfEventTags
train
def UpdateNumberOfEventTags( self, number_of_consumed_event_tags, number_of_produced_event_tags): """Updates the number of event tags. Args: number_of_consumed_event_tags (int): total number of event tags consumed by the process. number_of_produced_event_tags (int): total number of event tags produced by the process. Returns: bool: True if either number of event tags has increased. Raises: ValueError: if the consumed or produced number of event tags is smaller than the value of the previous update. """ consumed_event_tags_delta = 0 if number_of_consumed_event_tags is not None: if number_of_consumed_event_tags < self.number_of_consumed_event_tags: raise ValueError( 'Number of consumed event tags smaller than previous update.') consumed_event_tags_delta = ( number_of_consumed_event_tags - self.number_of_consumed_event_tags) self.number_of_consumed_event_tags = number_of_consumed_event_tags self.number_of_consumed_event_tags_delta = consumed_event_tags_delta produced_event_tags_delta = 0 if number_of_produced_event_tags is not None: if number_of_produced_event_tags < self.number_of_produced_event_tags: raise ValueError( 'Number of produced event tags smaller than previous update.') produced_event_tags_delta = ( number_of_produced_event_tags - self.number_of_produced_event_tags) self.number_of_produced_event_tags = number_of_produced_event_tags self.number_of_produced_event_tags_delta = produced_event_tags_delta return consumed_event_tags_delta > 0 or produced_event_tags_delta > 0
python
{ "resource": "" }
q25468
ProcessStatus.UpdateNumberOfWarnings
train
def UpdateNumberOfWarnings( self, number_of_consumed_warnings, number_of_produced_warnings): """Updates the number of warnings. Args: number_of_consumed_warnings (int): total number of warnings consumed by the process. number_of_produced_warnings (int): total number of warnings produced by the process. Returns: bool: True if either number of warnings has increased. Raises: ValueError: if the consumed or produced number of warnings is smaller than the value of the previous update. """ consumed_warnings_delta = 0 if number_of_consumed_warnings is not None: if number_of_consumed_warnings < self.number_of_consumed_warnings: raise ValueError( 'Number of consumed warnings smaller than previous update.') consumed_warnings_delta = ( number_of_consumed_warnings - self.number_of_consumed_warnings) self.number_of_consumed_warnings = number_of_consumed_warnings self.number_of_consumed_warnings_delta = consumed_warnings_delta produced_warnings_delta = 0 if number_of_produced_warnings is not None: if number_of_produced_warnings < self.number_of_produced_warnings: raise ValueError( 'Number of produced warnings smaller than previous update.') produced_warnings_delta = ( number_of_produced_warnings - self.number_of_produced_warnings) self.number_of_produced_warnings = number_of_produced_warnings self.number_of_produced_warnings_delta = produced_warnings_delta return consumed_warnings_delta > 0 or produced_warnings_delta > 0
python
{ "resource": "" }
q25469
ProcessingStatus.workers_status
train
def workers_status(self): """The worker status objects sorted by identifier.""" return [self._workers_status[identifier] for identifier in sorted(self._workers_status.keys())]
python
{ "resource": "" }
q25470
ProcessingStatus._UpdateProcessStatus
train
def _UpdateProcessStatus( self, process_status, identifier, status, pid, used_memory, display_name, number_of_consumed_sources, number_of_produced_sources, number_of_consumed_events, number_of_produced_events, number_of_consumed_event_tags, number_of_produced_event_tags, number_of_consumed_reports, number_of_produced_reports, number_of_consumed_warnings, number_of_produced_warnings): """Updates a process status. Args: process_status (ProcessStatus): process status. identifier (str): process identifier. status (str): human readable status of the process e.g. 'Idle'. pid (int): process identifier (PID). used_memory (int): size of used memory in bytes. display_name (str): human readable of the file entry currently being processed by the process. number_of_consumed_sources (int): total number of event sources consumed by the process. number_of_produced_sources (int): total number of event sources produced by the process. number_of_consumed_events (int): total number of events consumed by the process. number_of_produced_events (int): total number of events produced by the process. number_of_consumed_event_tags (int): total number of event tags consumed by the process. number_of_produced_event_tags (int): total number of event tags produced by the process. number_of_consumed_reports (int): total number of event reports consumed by the process. number_of_produced_reports (int): total number of event reports produced by the process. number_of_consumed_warnings (int): total number of warnings consumed by the process. number_of_produced_warnings (int): total number of warnings produced by the process. """ new_sources = process_status.UpdateNumberOfEventSources( number_of_consumed_sources, number_of_produced_sources) new_events = process_status.UpdateNumberOfEvents( number_of_consumed_events, number_of_produced_events) new_event_tags = process_status.UpdateNumberOfEventTags( number_of_consumed_event_tags, number_of_produced_event_tags) new_warnings = process_status.UpdateNumberOfWarnings( number_of_consumed_warnings, number_of_produced_warnings) new_reports = process_status.UpdateNumberOfEventReports( number_of_consumed_reports, number_of_produced_reports) process_status.display_name = display_name process_status.identifier = identifier process_status.pid = pid process_status.status = status process_status.used_memory = used_memory if (new_sources or new_events or new_event_tags or new_warnings or new_reports): process_status.last_running_time = time.time()
python
{ "resource": "" }
q25471
ProcessingStatus.UpdateForemanStatus
train
def UpdateForemanStatus( self, identifier, status, pid, used_memory, display_name, number_of_consumed_sources, number_of_produced_sources, number_of_consumed_events, number_of_produced_events, number_of_consumed_event_tags, number_of_produced_event_tags, number_of_consumed_reports, number_of_produced_reports, number_of_consumed_warnings, number_of_produced_warnings): """Updates the status of the foreman. Args: identifier (str): foreman identifier. status (str): human readable status of the foreman e.g. 'Idle'. pid (int): process identifier (PID). used_memory (int): size of used memory in bytes. display_name (str): human readable of the file entry currently being processed by the foreman. number_of_consumed_sources (int): total number of event sources consumed by the foreman. number_of_produced_sources (int): total number of event sources produced by the foreman. number_of_consumed_events (int): total number of events consumed by the foreman. number_of_produced_events (int): total number of events produced by the foreman. number_of_consumed_event_tags (int): total number of event tags consumed by the foreman. number_of_produced_event_tags (int): total number of event tags produced by the foreman. number_of_consumed_warnings (int): total number of warnings consumed by the foreman. number_of_produced_warnings (int): total number of warnings produced by the foreman. number_of_consumed_reports (int): total number of event reports consumed by the process. number_of_produced_reports (int): total number of event reports produced by the process. """ if not self.foreman_status: self.foreman_status = ProcessStatus() self._UpdateProcessStatus( self.foreman_status, identifier, status, pid, used_memory, display_name, number_of_consumed_sources, number_of_produced_sources, number_of_consumed_events, number_of_produced_events, number_of_consumed_event_tags, number_of_produced_event_tags, number_of_consumed_reports, number_of_produced_reports, number_of_consumed_warnings, number_of_produced_warnings)
python
{ "resource": "" }
q25472
ProcessingStatus.UpdateWorkerStatus
train
def UpdateWorkerStatus( self, identifier, status, pid, used_memory, display_name, number_of_consumed_sources, number_of_produced_sources, number_of_consumed_events, number_of_produced_events, number_of_consumed_event_tags, number_of_produced_event_tags, number_of_consumed_reports, number_of_produced_reports, number_of_consumed_warnings, number_of_produced_warnings): """Updates the status of a worker. Args: identifier (str): worker identifier. status (str): human readable status of the worker e.g. 'Idle'. pid (int): process identifier (PID). used_memory (int): size of used memory in bytes. display_name (str): human readable of the file entry currently being processed by the worker. number_of_consumed_sources (int): total number of event sources consumed by the worker. number_of_produced_sources (int): total number of event sources produced by the worker. number_of_consumed_events (int): total number of events consumed by the worker. number_of_produced_events (int): total number of events produced by the worker. number_of_consumed_event_tags (int): total number of event tags consumed by the worker. number_of_produced_event_tags (int): total number of event tags produced by the worker. number_of_consumed_reports (int): total number of event reports consumed by the process. number_of_produced_reports (int): total number of event reports produced by the process. number_of_consumed_warnings (int): total number of warnings consumed by the worker. number_of_produced_warnings (int): total number of warnings produced by the worker. """ if identifier not in self._workers_status: self._workers_status[identifier] = ProcessStatus() process_status = self._workers_status[identifier] self._UpdateProcessStatus( process_status, identifier, status, pid, used_memory, display_name, number_of_consumed_sources, number_of_produced_sources, number_of_consumed_events, number_of_produced_events, number_of_consumed_event_tags, number_of_produced_event_tags, number_of_consumed_reports, number_of_produced_reports, number_of_consumed_warnings, number_of_produced_warnings)
python
{ "resource": "" }
q25473
WindowsRegistryKeyPathFilter.key_paths
train
def key_paths(self): """List of key paths defined by the filter.""" if self._wow64_key_path: return [self._key_path, self._wow64_key_path] return [self._key_path]
python
{ "resource": "" }
q25474
WindowsRegistryPlugin.Process
train
def Process(self, parser_mediator, registry_key, **kwargs): """Processes a Windows Registry key or value. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_key (dfwinreg.WinRegistryKey): Windows Registry key. Raises: ValueError: If the Windows Registry key is not set. """ if registry_key is None: raise ValueError('Windows Registry key is not set.') # This will raise if unhandled keyword arguments are passed. super(WindowsRegistryPlugin, self).Process(parser_mediator, **kwargs) self.ExtractEvents(parser_mediator, registry_key, **kwargs)
python
{ "resource": "" }
q25475
WindowsRegistryPlugin.UpdateChainAndProcess
train
def UpdateChainAndProcess(self, parser_mediator, registry_key, **kwargs): """Updates the parser chain and processes a Windows Registry key or value. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_key (dfwinreg.WinRegistryKey): Windows Registry key. Raises: ValueError: If the Windows Registry key is not set. """ parser_mediator.AppendToParserChain(self) try: self.Process(parser_mediator, registry_key, **kwargs) finally: parser_mediator.PopFromParserChain()
python
{ "resource": "" }
q25476
NTFSMFTParser._GetDateTime
train
def _GetDateTime(self, filetime): """Retrieves the date and time from a FILETIME timestamp. Args: filetime (int): FILETIME timestamp. Returns: dfdatetime.DateTimeValues: date and time. """ if filetime == 0: return dfdatetime_semantic_time.SemanticTime('Not set') return dfdatetime_filetime.Filetime(timestamp=filetime)
python
{ "resource": "" }
q25477
NTFSUsnJrnlParser._ParseUSNChangeJournal
train
def _ParseUSNChangeJournal(self, parser_mediator, usn_change_journal): """Parses an USN change journal. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. usn_change_journal (pyfsntsfs.usn_change_journal): USN change journal. Raises: ParseError: if an USN change journal record cannot be parsed. """ if not usn_change_journal: return usn_record_map = self._GetDataTypeMap('usn_record_v2') usn_record_data = usn_change_journal.read_usn_record() while usn_record_data: current_offset = usn_change_journal.get_offset() try: usn_record = self._ReadStructureFromByteStream( usn_record_data, current_offset, usn_record_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError(( 'Unable to parse USN record at offset: 0x{0:08x} with error: ' '{1!s}').format(current_offset, exception)) # Per MSDN we need to use name offset for forward compatibility. name_offset = usn_record.name_offset - 60 utf16_stream = usn_record.name[name_offset:usn_record.name_size] try: name_string = utf16_stream.decode('utf-16-le') except (UnicodeDecodeError, UnicodeEncodeError) as exception: name_string = utf16_stream.decode('utf-16-le', errors='replace') parser_mediator.ProduceExtractionWarning(( 'unable to decode USN record name string with error: ' '{0:s}. Characters that cannot be decoded will be replaced ' 'with "?" or "\\ufffd".').format(exception)) event_data = NTFSUSNChangeEventData() event_data.file_attribute_flags = usn_record.file_attribute_flags event_data.file_reference = usn_record.file_reference event_data.filename = name_string event_data.offset = current_offset event_data.parent_file_reference = usn_record.parent_file_reference event_data.update_reason_flags = usn_record.update_reason_flags event_data.update_sequence_number = usn_record.update_sequence_number event_data.update_source_flags = usn_record.update_source_flags if not usn_record.update_date_time: date_time = dfdatetime_semantic_time.SemanticTime('Not set') else: date_time = dfdatetime_filetime.Filetime( timestamp=usn_record.update_date_time) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_ENTRY_MODIFICATION) parser_mediator.ProduceEventWithEventData(event, event_data) usn_record_data = usn_change_journal.read_usn_record()
python
{ "resource": "" }
q25478
ShellItemsParser._ParseShellItem
train
def _ParseShellItem(self, parser_mediator, shell_item): """Parses a shell item. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. shell_item (pyfwsi.item): shell item. """ path_segment = self._ParseShellItemPathSegment(shell_item) self._path_segments.append(path_segment) event_data = shell_item_events.ShellItemFileEntryEventData() event_data.origin = self._origin event_data.shell_item_path = self.CopyToPath() if isinstance(shell_item, pyfwsi.file_entry): event_data.name = shell_item.name for extension_block in shell_item.extension_blocks: if isinstance(extension_block, pyfwsi.file_entry_extension): long_name = extension_block.long_name localized_name = extension_block.localized_name file_reference = extension_block.file_reference if file_reference: file_reference = '{0:d}-{1:d}'.format( file_reference & 0xffffffffffff, file_reference >> 48) event_data.file_reference = file_reference event_data.localized_name = localized_name event_data.long_name = long_name fat_date_time = extension_block.get_creation_time_as_integer() if fat_date_time != 0: date_time = dfdatetime_fat_date_time.FATDateTime( fat_date_time=fat_date_time) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data) fat_date_time = extension_block.get_access_time_as_integer() if fat_date_time != 0: date_time = dfdatetime_fat_date_time.FATDateTime( fat_date_time=fat_date_time) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_LAST_ACCESS) parser_mediator.ProduceEventWithEventData(event, event_data) fat_date_time = shell_item.get_modification_time_as_integer() if fat_date_time != 0: date_time = dfdatetime_fat_date_time.FATDateTime( fat_date_time=fat_date_time) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_MODIFICATION) parser_mediator.ProduceEventWithEventData(event, event_data)
python
{ "resource": "" }
q25479
ShellItemsParser._ParseShellItemPathSegment
train
def _ParseShellItemPathSegment(self, shell_item): """Parses a shell item path segment. Args: shell_item (pyfwsi.item): shell item. Returns: str: shell item path segment. """ path_segment = None if isinstance(shell_item, pyfwsi.root_folder): description = shell_folder_ids.DESCRIPTIONS.get( shell_item.shell_folder_identifier, None) if description: path_segment = description else: path_segment = '{{{0:s}}}'.format(shell_item.shell_folder_identifier) path_segment = '<{0:s}>'.format(path_segment) elif isinstance(shell_item, pyfwsi.volume): if shell_item.name: path_segment = shell_item.name elif shell_item.identifier: path_segment = '{{{0:s}}}'.format(shell_item.identifier) elif isinstance(shell_item, pyfwsi.file_entry): long_name = '' for extension_block in shell_item.extension_blocks: if isinstance(extension_block, pyfwsi.file_entry_extension): long_name = extension_block.long_name if long_name: path_segment = long_name elif shell_item.name: path_segment = shell_item.name elif isinstance(shell_item, pyfwsi.network_location): if shell_item.location: path_segment = shell_item.location if path_segment is None and shell_item.class_type == 0x00: # TODO: check for signature 0x23febbee pass if path_segment is None: path_segment = '<UNKNOWN: 0x{0:02x}>'.format(shell_item.class_type) return path_segment
python
{ "resource": "" }
q25480
ShellItemsParser.CopyToPath
train
def CopyToPath(self): """Copies the shell items to a path. Returns: str: converted shell item list path or None. """ number_of_path_segments = len(self._path_segments) if number_of_path_segments == 0: return None strings = [self._path_segments[0]] number_of_path_segments -= 1 for path_segment in self._path_segments[1:]: # Remove a trailing \ except for the last path segment. if path_segment.endswith('\\') and number_of_path_segments > 1: path_segment = path_segment[:-1] if ((path_segment.startswith('<') and path_segment.endswith('>')) or len(strings) == 1): strings.append(' {0:s}'.format(path_segment)) elif path_segment.startswith('\\'): strings.append('{0:s}'.format(path_segment)) else: strings.append('\\{0:s}'.format(path_segment)) number_of_path_segments -= 1 return ''.join(strings)
python
{ "resource": "" }
q25481
ShellItemsParser.ParseByteStream
train
def ParseByteStream( self, parser_mediator, byte_stream, parent_path_segments=None, codepage='cp1252'): """Parses the shell items from the byte stream. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. byte_stream (bytes): shell items data. parent_path_segments (Optional[list[str]]): parent shell item path segments. codepage (Optional[str]): byte stream codepage. """ if parent_path_segments and isinstance(parent_path_segments, list): self._path_segments = list(parent_path_segments) else: self._path_segments = [] shell_item_list = pyfwsi.item_list() parser_mediator.AppendToParserChain(self) try: shell_item_list.copy_from_byte_stream( byte_stream, ascii_codepage=codepage) for shell_item in iter(shell_item_list.items): self._ParseShellItem(parser_mediator, shell_item) finally: parser_mediator.PopFromParserChain()
python
{ "resource": "" }
q25482
SyslogParser._UpdateYear
train
def _UpdateYear(self, mediator, month): """Updates the year to use for events, based on last observed month. Args: mediator (ParserMediator): mediates the interactions between parsers and other components, such as storage and abort signals. month (int): month observed by the parser, where January is 1. """ if not self._year_use: self._year_use = mediator.GetEstimatedYear() if not self._maximum_year: self._maximum_year = mediator.GetLatestYear() if not self._last_month: self._last_month = month return # Some syslog daemons allow out-of-order sequences, so allow some leeway # to not cause Apr->May->Apr to cause the year to increment. # See http://bugzilla.adiscon.com/show_bug.cgi?id=527 if self._last_month > (month + 1): if self._year_use != self._maximum_year: self._year_use += 1 self._last_month = month
python
{ "resource": "" }
q25483
SyslogParser.VerifyStructure
train
def VerifyStructure(self, parser_mediator, lines): """Verifies that this is a syslog-formatted file. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. lines (str): one or more lines from the text file. Returns: bool: True if this is the correct parser, False otherwise. """ return (re.match(self._VERIFICATION_REGEX, lines) or re.match(self._CHROMEOS_VERIFICATION_REGEX, lines)) is not None
python
{ "resource": "" }
q25484
SerializedEventHeap.PushEvent
train
def PushEvent(self, timestamp, event_data): """Pushes a serialized event onto the heap. Args: timestamp (int): event timestamp, which contains the number of micro seconds since January 1, 1970, 00:00:00 UTC. event_data (bytes): serialized event. """ heap_values = (timestamp, event_data) heapq.heappush(self._heap, heap_values) self.data_size += len(event_data)
python
{ "resource": "" }
q25485
KnowledgeBase.AddUserAccount
train
def AddUserAccount(self, user_account, session_identifier=CURRENT_SESSION): """Adds an user account. Args: user_account (UserAccountArtifact): user account artifact. session_identifier (Optional[str])): session identifier, where CURRENT_SESSION represents the active session. Raises: KeyError: if the user account already exists. """ if session_identifier not in self._user_accounts: self._user_accounts[session_identifier] = {} user_accounts = self._user_accounts[session_identifier] if user_account.identifier in user_accounts: raise KeyError('User account: {0:s} already exists.'.format( user_account.identifier)) user_accounts[user_account.identifier] = user_account
python
{ "resource": "" }
q25486
KnowledgeBase.AddEnvironmentVariable
train
def AddEnvironmentVariable(self, environment_variable): """Adds an environment variable. Args: environment_variable (EnvironmentVariableArtifact): environment variable artifact. Raises: KeyError: if the environment variable already exists. """ name = environment_variable.name.upper() if name in self._environment_variables: raise KeyError('Environment variable: {0:s} already exists.'.format( environment_variable.name)) self._environment_variables[name] = environment_variable
python
{ "resource": "" }
q25487
KnowledgeBase.GetEnvironmentVariable
train
def GetEnvironmentVariable(self, name): """Retrieves an environment variable. Args: name (str): name of the environment variable. Returns: EnvironmentVariableArtifact: environment variable artifact or None if there was no value set for the given name. """ name = name.upper() return self._environment_variables.get(name, None)
python
{ "resource": "" }
q25488
KnowledgeBase.GetStoredHostname
train
def GetStoredHostname(self): """Retrieves the stored hostname. The hostname is determined based on the preprocessing information that is stored inside the storage file. Returns: str: hostname. """ store_number = len(self._hostnames) return self._hostnames.get(store_number, None)
python
{ "resource": "" }
q25489
KnowledgeBase.GetSystemConfigurationArtifact
train
def GetSystemConfigurationArtifact(self, session_identifier=CURRENT_SESSION): """Retrieves the knowledge base as a system configuration artifact. Args: session_identifier (Optional[str])): session identifier, where CURRENT_SESSION represents the active session. Returns: SystemConfigurationArtifact: system configuration artifact. """ system_configuration = artifacts.SystemConfigurationArtifact() system_configuration.code_page = self.GetValue( 'codepage', default_value=self._codepage) system_configuration.hostname = self._hostnames.get( session_identifier, None) system_configuration.keyboard_layout = self.GetValue('keyboard_layout') system_configuration.operating_system = self.GetValue('operating_system') system_configuration.operating_system_product = self.GetValue( 'operating_system_product') system_configuration.operating_system_version = self.GetValue( 'operating_system_version') date_time = datetime.datetime(2017, 1, 1) time_zone = self._time_zone.tzname(date_time) if time_zone and isinstance(time_zone, py2to3.BYTES_TYPE): time_zone = time_zone.decode('ascii') system_configuration.time_zone = time_zone user_accounts = self._user_accounts.get(session_identifier, {}) # In Python 3 dict.values() returns a type dict_values, which will cause # the JSON serializer to raise a TypeError. system_configuration.user_accounts = list(user_accounts.values()) return system_configuration
python
{ "resource": "" }
q25490
KnowledgeBase.GetUsernameByIdentifier
train
def GetUsernameByIdentifier( self, user_identifier, session_identifier=CURRENT_SESSION): """Retrieves the username based on an user identifier. Args: user_identifier (str): user identifier, either a UID or SID. session_identifier (Optional[str])): session identifier, where CURRENT_SESSION represents the active session. Returns: str: username. """ user_accounts = self._user_accounts.get(session_identifier, {}) user_account = user_accounts.get(user_identifier, None) if not user_account: return '' return user_account.username or ''
python
{ "resource": "" }
q25491
KnowledgeBase.GetUsernameForPath
train
def GetUsernameForPath(self, path): """Retrieves a username for a specific path. This is determining if a specific path is within a user's directory and returning the username of the user if so. Args: path (str): path. Returns: str: username or None if the path does not appear to be within a user's directory. """ path = path.lower() user_accounts = self._user_accounts.get(self.CURRENT_SESSION, {}) for user_account in iter(user_accounts.values()): if not user_account.user_directory: continue user_directory = user_account.user_directory.lower() if path.startswith(user_directory): return user_account.username return None
python
{ "resource": "" }
q25492
KnowledgeBase.GetValue
train
def GetValue(self, identifier, default_value=None): """Retrieves a value by identifier. Args: identifier (str): case insensitive unique identifier for the value. default_value (object): default value. Returns: object: value or default value if not available. Raises: TypeError: if the identifier is not a string type. """ if not isinstance(identifier, py2to3.STRING_TYPES): raise TypeError('Identifier not a string type.') identifier = identifier.lower() return self._values.get(identifier, default_value)
python
{ "resource": "" }
q25493
KnowledgeBase.ReadSystemConfigurationArtifact
train
def ReadSystemConfigurationArtifact( self, system_configuration, session_identifier=CURRENT_SESSION): """Reads the knowledge base values from a system configuration artifact. Note that this overwrites existing values in the knowledge base. Args: system_configuration (SystemConfigurationArtifact): system configuration artifact. session_identifier (Optional[str])): session identifier, where CURRENT_SESSION represents the active session. """ if system_configuration.code_page: try: self.SetCodepage(system_configuration.code_page) except ValueError: logger.warning( 'Unsupported codepage: {0:s}, defaulting to {1:s}'.format( system_configuration.code_page, self._codepage)) self._hostnames[session_identifier] = system_configuration.hostname self.SetValue('keyboard_layout', system_configuration.keyboard_layout) self.SetValue('operating_system', system_configuration.operating_system) self.SetValue( 'operating_system_product', system_configuration.operating_system_product) self.SetValue( 'operating_system_version', system_configuration.operating_system_version) if system_configuration.time_zone: try: self.SetTimeZone(system_configuration.time_zone) except ValueError: logger.warning( 'Unsupported time zone: {0:s}, defaulting to {1:s}'.format( system_configuration.time_zone, self.timezone.zone)) self._user_accounts[session_identifier] = { user_account.username: user_account for user_account in system_configuration.user_accounts}
python
{ "resource": "" }
q25494
KnowledgeBase.SetCodepage
train
def SetCodepage(self, codepage): """Sets the codepage. Args: codepage (str): codepage. Raises: ValueError: if the codepage is not supported. """ try: codecs.getencoder(codepage) self._codepage = codepage except LookupError: raise ValueError('Unsupported codepage: {0:s}'.format(codepage))
python
{ "resource": "" }
q25495
KnowledgeBase.SetEnvironmentVariable
train
def SetEnvironmentVariable(self, environment_variable): """Sets an environment variable. Args: environment_variable (EnvironmentVariableArtifact): environment variable artifact. """ name = environment_variable.name.upper() self._environment_variables[name] = environment_variable
python
{ "resource": "" }
q25496
KnowledgeBase.SetTimeZone
train
def SetTimeZone(self, time_zone): """Sets the time zone. Args: time_zone (str): time zone. Raises: ValueError: if the timezone is not supported. """ try: self._time_zone = pytz.timezone(time_zone) except (AttributeError, pytz.UnknownTimeZoneError): raise ValueError('Unsupported timezone: {0!s}'.format(time_zone))
python
{ "resource": "" }
q25497
KnowledgeBase.SetValue
train
def SetValue(self, identifier, value): """Sets a value by identifier. Args: identifier (str): case insensitive unique identifier for the value. value (object): value. Raises: TypeError: if the identifier is not a string type. """ if not isinstance(identifier, py2to3.STRING_TYPES): raise TypeError('Identifier not a string type.') identifier = identifier.lower() self._values[identifier] = value
python
{ "resource": "" }
q25498
AnalysisPluginOptions._CreateAnalysisPlugins
train
def _CreateAnalysisPlugins(self, options): """Creates the analysis plugins. Args: options (argparse.Namespace): command line arguments. Returns: dict[str, AnalysisPlugin]: analysis plugins and their names. """ if not self._analysis_plugins: return {} analysis_plugins = ( analysis_manager.AnalysisPluginManager.GetPluginObjects( self._analysis_plugins)) for analysis_plugin in analysis_plugins.values(): helpers_manager.ArgumentHelperManager.ParseOptions( options, analysis_plugin) return analysis_plugins
python
{ "resource": "" }
q25499
AnalysisPluginOptions.ListAnalysisPlugins
train
def ListAnalysisPlugins(self): """Lists the analysis modules.""" analysis_plugin_info = ( analysis_manager.AnalysisPluginManager.GetAllPluginInformation()) column_width = 10 for name, _, _ in analysis_plugin_info: if len(name) > column_width: column_width = len(name) table_view = views.ViewsFactory.GetTableView( self._views_format_type, column_names=['Name', 'Description'], title='Analysis Plugins') # TODO: add support for a 3 column table. for name, description, type_string in analysis_plugin_info: description = '{0:s} [{1:s}]'.format(description, type_string) table_view.AddRow([name, description]) table_view.Write(self._output_writer)
python
{ "resource": "" }