_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q25700
|
ParsersManager.GetParserObjects
|
train
|
def GetParserObjects(cls, parser_filter_expression=None):
"""Retrieves the parser objects.
Args:
parser_filter_expression (Optional[str]): parser filter expression,
where None represents all parsers and plugins.
Returns:
dict[str, BaseParser]: parsers per name.
"""
includes, excludes = cls._GetParserFilters(parser_filter_expression)
parser_objects = {}
for parser_name, parser_class in iter(cls._parser_classes.items()):
# If there are no includes all parsers are included by default.
if not includes and parser_name in excludes:
continue
if includes and parser_name not in includes:
continue
parser_object = parser_class()
if parser_class.SupportsPlugins():
plugin_includes = None
if parser_name in includes:
plugin_includes = includes[parser_name]
parser_object.EnablePlugins(plugin_includes)
parser_objects[parser_name] = parser_object
return parser_objects
|
python
|
{
"resource": ""
}
|
q25701
|
ParsersManager.GetParsers
|
train
|
def GetParsers(cls, parser_filter_expression=None):
"""Retrieves the registered parsers and plugins.
Retrieves a dictionary of all registered parsers and associated plugins
from a parser filter string. The filter string can contain direct names of
parsers, presets or plugins. The filter string can also negate selection
if prepended with an exclamation point, e.g.: "foo,!foo/bar" would include
parser foo but not include plugin bar. A list of specific included and
excluded plugins is also passed to each parser's class.
The three types of entries in the filter string:
* name of a parser: this would be the exact name of a single parser to
include (or exclude), e.g. foo;
* name of a preset, e.g. win7: the presets are defined in
plaso/parsers/presets.py;
* name of a plugin: if a plugin name is included the parent parser will be
included in the list of registered parsers;
Args:
parser_filter_expression (Optional[str]): parser filter expression,
where None represents all parsers and plugins.
Yields:
tuple: containing:
* str: name of the parser:
* type: parser class (subclass of BaseParser).
"""
includes, excludes = cls._GetParserFilters(parser_filter_expression)
for parser_name, parser_class in iter(cls._parser_classes.items()):
# If there are no includes all parsers are included by default.
if not includes and parser_name in excludes:
continue
if includes and parser_name not in includes:
continue
yield parser_name, parser_class
|
python
|
{
"resource": ""
}
|
q25702
|
ParsersManager.GetParsersInformation
|
train
|
def GetParsersInformation(cls):
"""Retrieves the parsers information.
Returns:
list[tuple[str, str]]: parser names and descriptions.
"""
parsers_information = []
for _, parser_class in cls.GetParsers():
description = getattr(parser_class, 'DESCRIPTION', '')
parsers_information.append((parser_class.NAME, description))
return parsers_information
|
python
|
{
"resource": ""
}
|
q25703
|
ParsersManager.GetPresetsInformation
|
train
|
def GetPresetsInformation(cls):
"""Retrieves the presets information.
Returns:
list[tuple]: containing:
str: preset name
str: comma separated parser names that are defined by the preset
"""
parser_presets_information = []
for preset_definition in ParsersManager.GetPresets():
preset_information_tuple = (
preset_definition.name, ', '.join(preset_definition.parsers))
# TODO: refactor to pass PresetDefinition.
parser_presets_information.append(preset_information_tuple)
return parser_presets_information
|
python
|
{
"resource": ""
}
|
q25704
|
ParsersManager.GetPresetsForOperatingSystem
|
train
|
def GetPresetsForOperatingSystem(
cls, operating_system, operating_system_product,
operating_system_version):
"""Determines the presets for a specific operating system.
Args:
operating_system (str): operating system for example "Windows". This
should be one of the values in definitions.OPERATING_SYSTEM_FAMILIES.
operating_system_product (str): operating system product for
example "Windows XP" as determined by preprocessing.
operating_system_version (str): operating system version for
example "5.1" as determined by preprocessing.
Returns:
list[PresetDefinition]: preset definitions, where an empty list
represents all parsers and parser plugins (no preset).
"""
operating_system = artifacts.OperatingSystemArtifact(
family=operating_system, product=operating_system_product,
version=operating_system_version)
return cls._presets.GetPresetsByOperatingSystem(operating_system)
|
python
|
{
"resource": ""
}
|
q25705
|
ParsersManager.RegisterParser
|
train
|
def RegisterParser(cls, parser_class):
"""Registers a parser class.
The parser classes are identified based on their lower case name.
Args:
parser_class (type): parser class (subclass of BaseParser).
Raises:
KeyError: if parser class is already set for the corresponding name.
"""
parser_name = parser_class.NAME.lower()
if parser_name in cls._parser_classes:
raise KeyError('Parser class already set for name: {0:s}.'.format(
parser_class.NAME))
cls._parser_classes[parser_name] = parser_class
|
python
|
{
"resource": ""
}
|
q25706
|
ImageExportTool._CalculateDigestHash
|
train
|
def _CalculateDigestHash(self, file_entry, data_stream_name):
"""Calculates a SHA-256 digest of the contents of the file entry.
Args:
file_entry (dfvfs.FileEntry): file entry whose content will be hashed.
data_stream_name (str): name of the data stream whose content is to be
hashed.
Returns:
str: hexadecimal representation of the SHA-256 hash or None if the digest
cannot be determined.
"""
file_object = file_entry.GetFileObject(data_stream_name=data_stream_name)
if not file_object:
return None
try:
file_object.seek(0, os.SEEK_SET)
hasher_object = hashers_manager.HashersManager.GetHasher('sha256')
data = file_object.read(self._READ_BUFFER_SIZE)
while data:
hasher_object.Update(data)
data = file_object.read(self._READ_BUFFER_SIZE)
finally:
file_object.close()
return hasher_object.GetStringDigest()
|
python
|
{
"resource": ""
}
|
q25707
|
ImageExportTool._CreateSanitizedDestination
|
train
|
def _CreateSanitizedDestination(
self, source_file_entry, source_path_spec, source_data_stream_name,
destination_path):
"""Creates a sanitized path of both destination directory and filename.
This function replaces non-printable and other characters defined in
_DIRTY_CHARACTERS with an underscore "_".
Args:
source_file_entry (dfvfs.FileEntry): file entry of the source file.
source_path_spec (dfvfs.PathSpec): path specification of the source file.
source_data_stream_name (str): name of the data stream of the source file
entry.
destination_path (str): path of the destination directory.
Returns:
tuple[str, str]: sanitized paths of both destination directory and
filename.
"""
file_system = source_file_entry.GetFileSystem()
path = getattr(source_path_spec, 'location', None)
path_segments = file_system.SplitPath(path)
# Sanitize each path segment.
for index, path_segment in enumerate(path_segments):
path_segments[index] = ''.join([
character if character not in self._DIRTY_CHARACTERS else '_'
for character in path_segment])
target_filename = path_segments.pop()
parent_path_spec = getattr(source_file_entry.path_spec, 'parent', None)
while parent_path_spec:
if parent_path_spec.type_indicator == (
dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION):
path_segments.insert(0, parent_path_spec.location[1:])
break
elif parent_path_spec.type_indicator == (
dfvfs_definitions.TYPE_INDICATOR_VSHADOW):
path_segments.insert(0, parent_path_spec.location[1:])
parent_path_spec = getattr(parent_path_spec, 'parent', None)
target_directory = os.path.join(destination_path, *path_segments)
if source_data_stream_name:
target_filename = '{0:s}_{1:s}'.format(
target_filename, source_data_stream_name)
return target_directory, target_filename
|
python
|
{
"resource": ""
}
|
q25708
|
ImageExportTool._Extract
|
train
|
def _Extract(
self, source_path_specs, destination_path, output_writer,
skip_duplicates=True):
"""Extracts files.
Args:
source_path_specs (list[dfvfs.PathSpec]): path specifications to extract.
destination_path (str): path where the extracted files should be stored.
output_writer (CLIOutputWriter): output writer.
skip_duplicates (Optional[bool]): True if files with duplicate content
should be skipped.
"""
output_writer.Write('Extracting file entries.\n')
path_spec_generator = self._path_spec_extractor.ExtractPathSpecs(
source_path_specs, resolver_context=self._resolver_context)
for path_spec in path_spec_generator:
self._ExtractFileEntry(
path_spec, destination_path, output_writer,
skip_duplicates=skip_duplicates)
|
python
|
{
"resource": ""
}
|
q25709
|
ImageExportTool._ExtractDataStream
|
train
|
def _ExtractDataStream(
self, file_entry, data_stream_name, destination_path, output_writer,
skip_duplicates=True):
"""Extracts a data stream.
Args:
file_entry (dfvfs.FileEntry): file entry containing the data stream.
data_stream_name (str): name of the data stream.
destination_path (str): path where the extracted files should be stored.
output_writer (CLIOutputWriter): output writer.
skip_duplicates (Optional[bool]): True if files with duplicate content
should be skipped.
"""
if not data_stream_name and not file_entry.IsFile():
return
display_name = path_helper.PathHelper.GetDisplayNameForPathSpec(
file_entry.path_spec)
if skip_duplicates:
try:
digest = self._CalculateDigestHash(file_entry, data_stream_name)
except (IOError, dfvfs_errors.BackEndError) as exception:
output_writer.Write((
'[skipping] unable to read content of file entry: {0:s} '
'with error: {1!s}\n').format(display_name, exception))
return
if not digest:
output_writer.Write(
'[skipping] unable to read content of file entry: {0:s}\n'.format(
display_name))
return
duplicate_display_name = self._digests.get(digest, None)
if duplicate_display_name:
output_writer.Write((
'[skipping] file entry: {0:s} is a duplicate of: {1:s} with '
'digest: {2:s}\n').format(
display_name, duplicate_display_name, digest))
return
self._digests[digest] = display_name
target_directory, target_filename = self._CreateSanitizedDestination(
file_entry, file_entry.path_spec, data_stream_name, destination_path)
if not os.path.isdir(target_directory):
os.makedirs(target_directory)
target_path = os.path.join(target_directory, target_filename)
if os.path.exists(target_path):
output_writer.Write((
'[skipping] unable to export contents of file entry: {0:s} '
'because exported file: {1:s} already exists.\n').format(
display_name, target_path))
return
try:
self._WriteFileEntry(file_entry, data_stream_name, target_path)
except (IOError, dfvfs_errors.BackEndError) as exception:
output_writer.Write((
'[skipping] unable to export contents of file entry: {0:s} '
'with error: {1!s}\n').format(display_name, exception))
try:
os.remove(target_path)
except (IOError, OSError):
pass
|
python
|
{
"resource": ""
}
|
q25710
|
ImageExportTool._ExtractFileEntry
|
train
|
def _ExtractFileEntry(
self, path_spec, destination_path, output_writer, skip_duplicates=True):
"""Extracts a file entry.
Args:
path_spec (dfvfs.PathSpec): path specification of the source file.
destination_path (str): path where the extracted files should be stored.
output_writer (CLIOutputWriter): output writer.
skip_duplicates (Optional[bool]): True if files with duplicate content
should be skipped.
"""
file_entry = path_spec_resolver.Resolver.OpenFileEntry(path_spec)
if not file_entry:
logger.warning('Unable to open file entry for path spec: {0:s}'.format(
path_spec.comparable))
return
if not self._filter_collection.Matches(file_entry):
return
file_entry_processed = False
for data_stream in file_entry.data_streams:
if self._abort:
break
self._ExtractDataStream(
file_entry, data_stream.name, destination_path, output_writer,
skip_duplicates=skip_duplicates)
file_entry_processed = True
if not file_entry_processed:
self._ExtractDataStream(
file_entry, '', destination_path, output_writer,
skip_duplicates=skip_duplicates)
|
python
|
{
"resource": ""
}
|
q25711
|
ImageExportTool._ExtractWithFilter
|
train
|
def _ExtractWithFilter(
self, source_path_specs, destination_path, output_writer,
artifact_filters, filter_file, artifact_definitions_path,
custom_artifacts_path, skip_duplicates=True):
"""Extracts files using a filter expression.
This method runs the file extraction process on the image and
potentially on every VSS if that is wanted.
Args:
source_path_specs (list[dfvfs.PathSpec]): path specifications to extract.
destination_path (str): path where the extracted files should be stored.
output_writer (CLIOutputWriter): output writer.
artifact_definitions_path (str): path to artifact definitions file.
custom_artifacts_path (str): path to custom artifact definitions file.
artifact_filters (list[str]): names of artifact definitions that are
used for filtering file system and Windows Registry key paths.
filter_file (str): path of the file that contains the filter file path
filters.
skip_duplicates (Optional[bool]): True if files with duplicate content
should be skipped.
"""
extraction_engine = engine.BaseEngine()
# If the source is a directory or a storage media image
# run pre-processing.
if self._source_type in self._SOURCE_TYPES_TO_PREPROCESS:
self._PreprocessSources(extraction_engine)
for source_path_spec in source_path_specs:
file_system, mount_point = self._GetSourceFileSystem(
source_path_spec, resolver_context=self._resolver_context)
display_name = path_helper.PathHelper.GetDisplayNameForPathSpec(
source_path_spec)
output_writer.Write(
'Extracting file entries from: {0:s}\n'.format(display_name))
filter_find_specs = extraction_engine.BuildFilterFindSpecs(
artifact_definitions_path, custom_artifacts_path,
extraction_engine.knowledge_base, artifact_filters, filter_file)
searcher = file_system_searcher.FileSystemSearcher(
file_system, mount_point)
for path_spec in searcher.Find(find_specs=filter_find_specs):
self._ExtractFileEntry(
path_spec, destination_path, output_writer,
skip_duplicates=skip_duplicates)
file_system.Close()
|
python
|
{
"resource": ""
}
|
q25712
|
ImageExportTool._GetSourceFileSystem
|
train
|
def _GetSourceFileSystem(self, source_path_spec, resolver_context=None):
"""Retrieves the file system of the source.
Args:
source_path_spec (dfvfs.PathSpec): source path specification of the file
system.
resolver_context (dfvfs.Context): resolver context.
Returns:
tuple: containing:
dfvfs.FileSystem: file system.
dfvfs.PathSpec: mount point path specification that refers
to the base location of the file system.
Raises:
RuntimeError: if source path specification is not set.
"""
if not source_path_spec:
raise RuntimeError('Missing source.')
file_system = path_spec_resolver.Resolver.OpenFileSystem(
source_path_spec, resolver_context=resolver_context)
type_indicator = source_path_spec.type_indicator
if path_spec_factory.Factory.IsSystemLevelTypeIndicator(type_indicator):
mount_point = source_path_spec
else:
mount_point = source_path_spec.parent
return file_system, mount_point
|
python
|
{
"resource": ""
}
|
q25713
|
ImageExportTool._ParseExtensionsString
|
train
|
def _ParseExtensionsString(self, extensions_string):
"""Parses the extensions string.
Args:
extensions_string (str): comma separated extensions to filter.
"""
if not extensions_string:
return
extensions_string = extensions_string.lower()
extensions = [
extension.strip() for extension in extensions_string.split(',')]
file_entry_filter = file_entry_filters.ExtensionsFileEntryFilter(extensions)
self._filter_collection.AddFilter(file_entry_filter)
|
python
|
{
"resource": ""
}
|
q25714
|
ImageExportTool._ParseNamesString
|
train
|
def _ParseNamesString(self, names_string):
"""Parses the name string.
Args:
names_string (str): comma separated filenames to filter.
"""
if not names_string:
return
names_string = names_string.lower()
names = [name.strip() for name in names_string.split(',')]
file_entry_filter = file_entry_filters.NamesFileEntryFilter(names)
self._filter_collection.AddFilter(file_entry_filter)
|
python
|
{
"resource": ""
}
|
q25715
|
ImageExportTool._ParseSignatureIdentifiers
|
train
|
def _ParseSignatureIdentifiers(self, data_location, signature_identifiers):
"""Parses the signature identifiers.
Args:
data_location (str): location of the format specification file, for
example, "signatures.conf".
signature_identifiers (str): comma separated signature identifiers.
Raises:
IOError: if the format specification file could not be read from
the specified data location.
OSError: if the format specification file could not be read from
the specified data location.
ValueError: if no data location was specified.
"""
if not signature_identifiers:
return
if not data_location:
raise ValueError('Missing data location.')
path = os.path.join(data_location, 'signatures.conf')
if not os.path.exists(path):
raise IOError(
'No such format specification file: {0:s}'.format(path))
try:
specification_store = self._ReadSpecificationFile(path)
except IOError as exception:
raise IOError((
'Unable to read format specification file: {0:s} with error: '
'{1!s}').format(path, exception))
signature_identifiers = signature_identifiers.lower()
signature_identifiers = [
identifier.strip() for identifier in signature_identifiers.split(',')]
file_entry_filter = file_entry_filters.SignaturesFileEntryFilter(
specification_store, signature_identifiers)
self._filter_collection.AddFilter(file_entry_filter)
|
python
|
{
"resource": ""
}
|
q25716
|
ImageExportTool._ReadSpecificationFile
|
train
|
def _ReadSpecificationFile(self, path):
"""Reads the format specification file.
Args:
path (str): path of the format specification file.
Returns:
FormatSpecificationStore: format specification store.
"""
specification_store = specification.FormatSpecificationStore()
with io.open(
path, 'rt', encoding=self._SPECIFICATION_FILE_ENCODING) as file_object:
for line in file_object.readlines():
line = line.strip()
if not line or line.startswith('#'):
continue
try:
identifier, offset, pattern = line.split()
except ValueError:
logger.error('[skipping] invalid line: {0:s}'.format(line))
continue
try:
offset = int(offset, 10)
except ValueError:
logger.error('[skipping] invalid offset in line: {0:s}'.format(line))
continue
try:
# TODO: find another way to do this that doesn't use an undocumented
# API.
pattern = codecs.escape_decode(pattern)[0]
# ValueError is raised e.g. when the patterns contains "\xg1".
except ValueError:
logger.error(
'[skipping] invalid pattern in line: {0:s}'.format(line))
continue
format_specification = specification.FormatSpecification(identifier)
format_specification.AddNewSignature(pattern, offset=offset)
specification_store.AddSpecification(format_specification)
return specification_store
|
python
|
{
"resource": ""
}
|
q25717
|
ImageExportTool._WriteFileEntry
|
train
|
def _WriteFileEntry(self, file_entry, data_stream_name, destination_file):
"""Writes the contents of the source file entry to a destination file.
Note that this function will overwrite an existing file.
Args:
file_entry (dfvfs.FileEntry): file entry whose content is to be written.
data_stream_name (str): name of the data stream whose content is to be
written.
destination_file (str): path of the destination file.
"""
source_file_object = file_entry.GetFileObject(
data_stream_name=data_stream_name)
if not source_file_object:
return
try:
with open(destination_file, 'wb') as destination_file_object:
source_file_object.seek(0, os.SEEK_SET)
data = source_file_object.read(self._COPY_BUFFER_SIZE)
while data:
destination_file_object.write(data)
data = source_file_object.read(self._COPY_BUFFER_SIZE)
finally:
source_file_object.close()
|
python
|
{
"resource": ""
}
|
q25718
|
ImageExportTool.AddFilterOptions
|
train
|
def AddFilterOptions(self, argument_group):
"""Adds the filter options to the argument group.
Args:
argument_group (argparse._ArgumentGroup): argparse argument group.
"""
names = ['artifact_filters', 'date_filters', 'filter_file']
helpers_manager.ArgumentHelperManager.AddCommandLineArguments(
argument_group, names=names)
argument_group.add_argument(
'-x', '--extensions', dest='extensions_string', action='store',
type=str, metavar='EXTENSIONS', help=(
'Filter on file name extensions. This option accepts multiple '
'multiple comma separated values e.g. "csv,docx,pst".'))
argument_group.add_argument(
'--names', dest='names_string', action='store',
type=str, metavar='NAMES', help=(
'Filter on file names. This option accepts a comma separated '
'string denoting all file names, e.g. -x '
'"NTUSER.DAT,UsrClass.dat".'))
argument_group.add_argument(
'--signatures', dest='signature_identifiers', action='store',
type=str, metavar='IDENTIFIERS', help=(
'Filter on file format signature identifiers. This option '
'accepts multiple comma separated values e.g. "esedb,lnk". '
'Use "list" to show an overview of the supported file format '
'signatures.'))
|
python
|
{
"resource": ""
}
|
q25719
|
ImageExportTool.ListSignatureIdentifiers
|
train
|
def ListSignatureIdentifiers(self):
"""Lists the signature identifier.
Raises:
BadConfigOption: if the data location is invalid.
"""
if not self._data_location:
raise errors.BadConfigOption('Missing data location.')
path = os.path.join(self._data_location, 'signatures.conf')
if not os.path.exists(path):
raise errors.BadConfigOption(
'No such format specification file: {0:s}'.format(path))
try:
specification_store = self._ReadSpecificationFile(path)
except IOError as exception:
raise errors.BadConfigOption((
'Unable to read format specification file: {0:s} with error: '
'{1!s}').format(path, exception))
identifiers = []
for format_specification in specification_store.specifications:
identifiers.append(format_specification.identifier)
self._output_writer.Write('Available signature identifiers:\n')
self._output_writer.Write(
'\n'.join(textwrap.wrap(', '.join(sorted(identifiers)), 79)))
self._output_writer.Write('\n\n')
|
python
|
{
"resource": ""
}
|
q25720
|
ImageExportTool.ParseOptions
|
train
|
def ParseOptions(self, options):
"""Parses the options and initializes the front-end.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid.
"""
# The data location is required to list signatures.
helpers_manager.ArgumentHelperManager.ParseOptions(
options, self, names=['data_location'])
# Check the list options first otherwise required options will raise.
signature_identifiers = self.ParseStringOption(
options, 'signature_identifiers')
if signature_identifiers == 'list':
self.list_signature_identifiers = True
if self.list_signature_identifiers:
return
self._ParseInformationalOptions(options)
self._ParseLogFileOptions(options)
self._ParseStorageMediaOptions(options)
self._destination_path = self.ParseStringOption(
options, 'path', default_value='export')
if not self._data_location:
logger.warning('Unable to automatically determine data location.')
argument_helper_names = ['artifact_definitions', 'process_resources']
helpers_manager.ArgumentHelperManager.ParseOptions(
options, self, names=argument_helper_names)
self._ParseFilterOptions(options)
if (getattr(options, 'no_vss', False) or
getattr(options, 'include_duplicates', False)):
self._skip_duplicates = False
self._EnforceProcessMemoryLimit(self._process_memory_limit)
|
python
|
{
"resource": ""
}
|
q25721
|
SystemResourceUsageMonitorESEDBPlugin._ConvertValueBinaryDataToFloatingPointValue
|
train
|
def _ConvertValueBinaryDataToFloatingPointValue(self, value):
"""Converts a binary data value into a floating-point value.
Args:
value (bytes): binary data value containing an ASCII string or None.
Returns:
float: floating-point representation of binary data value or None if
value is not set.
Raises:
ParseError: if the floating-point value data size is not supported or
if the value cannot be parsed.
"""
if not value:
return None
value_length = len(value)
if value_length not in (4, 8):
raise errors.ParseError('Unsupported value data size: {0:d}'.format(
value_length))
if value_length == 4:
floating_point_map = self._GetDataTypeMap('float32le')
elif value_length == 8:
floating_point_map = self._GetDataTypeMap('float64le')
try:
return self._ReadStructureFromByteStream(value, 0, floating_point_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError(
'Unable to parse floating-point value with error: {0!s}'.format(
exception))
|
python
|
{
"resource": ""
}
|
q25722
|
SystemResourceUsageMonitorESEDBPlugin._GetIdentifierMappings
|
train
|
def _GetIdentifierMappings(self, parser_mediator, cache, database):
"""Retrieves the identifier mappings from SruDbIdMapTable table.
In the SRUM database individual tables contain numeric identifiers for
the application ("AppId") and user identifier ("UserId"). A more descriptive
string of these values can be found in the SruDbIdMapTable. For example the
numeric value of 42 mapping to DiagTrack. This method will cache the
mappings of a specific SRUM database.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
cache (ESEDBCache): cache, which contains information about
the identifiers stored in the SruDbIdMapTable table.
database (pyesedb.file): ESE database.
Returns:
dict[int, str]: mapping of numeric identifiers to their string
representation.
"""
identifier_mappings = cache.GetResults('SruDbIdMapTable', default_value={})
if not identifier_mappings:
esedb_table = database.get_table_by_name('SruDbIdMapTable')
if not esedb_table:
parser_mediator.ProduceExtractionWarning(
'unable to retrieve table: SruDbIdMapTable')
else:
identifier_mappings = self._ParseIdentifierMappingsTable(
parser_mediator, esedb_table)
cache.StoreDictInCache('SruDbIdMapTable', identifier_mappings)
return identifier_mappings
|
python
|
{
"resource": ""
}
|
q25723
|
SystemResourceUsageMonitorESEDBPlugin._ParseGUIDTable
|
train
|
def _ParseGUIDTable(
self, parser_mediator, cache, database, esedb_table, values_map,
event_data_class):
"""Parses a table with a GUID as name.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
cache (ESEDBCache): cache, which contains information about
the identifiers stored in the SruDbIdMapTable table.
database (pyesedb.file): ESE database.
esedb_table (pyesedb.table): table.
values_map (dict[str, str]): mapping of table columns to event data
attribute names.
event_data_class (type): event data class.
Raises:
ValueError: if the cache, database or table value is missing.
"""
if cache is None:
raise ValueError('Missing cache value.')
if database is None:
raise ValueError('Missing database value.')
if esedb_table is None:
raise ValueError('Missing table value.')
identifier_mappings = self._GetIdentifierMappings(
parser_mediator, cache, database)
for esedb_record in esedb_table.records:
if parser_mediator.abort:
break
record_values = self._GetRecordValues(
parser_mediator, esedb_table.name, esedb_record,
value_mappings=self._GUID_TABLE_VALUE_MAPPINGS)
event_data = event_data_class()
for attribute_name, column_name in values_map.items():
record_value = record_values.get(column_name, None)
if attribute_name in ('application', 'user_identifier'):
# Human readable versions of AppId and UserId values are stored
# in the SruDbIdMapTable table; also referred to as identifier
# mapping. Here we look up the numeric identifier stored in the GUID
# table in SruDbIdMapTable.
record_value = identifier_mappings.get(record_value, record_value)
setattr(event_data, attribute_name, record_value)
timestamp = record_values.get('TimeStamp')
if timestamp:
date_time = dfdatetime_ole_automation_date.OLEAutomationDate(
timestamp=timestamp)
timestamp_description = definitions.TIME_DESCRIPTION_SAMPLE
else:
date_time = dfdatetime_semantic_time.SemanticTime('Not set')
timestamp_description = definitions.TIME_DESCRIPTION_NOT_A_TIME
event = time_events.DateTimeValuesEvent(date_time, timestamp_description)
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = record_values.get('ConnectStartTime')
if timestamp:
date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_FIRST_CONNECTED)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
python
|
{
"resource": ""
}
|
q25724
|
SystemResourceUsageMonitorESEDBPlugin._ParseIdentifierMappingRecord
|
train
|
def _ParseIdentifierMappingRecord(
self, parser_mediator, table_name, esedb_record):
"""Extracts an identifier mapping from a SruDbIdMapTable record.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
table_name (str): name of the table the record is stored in.
esedb_record (pyesedb.record): record.
Returns:
tuple[int, str]: numeric identifier and its string representation or
None, None if no identifier mapping can be retrieved from the record.
"""
record_values = self._GetRecordValues(
parser_mediator, table_name, esedb_record)
identifier = record_values.get('IdIndex', None)
if identifier is None:
parser_mediator.ProduceExtractionWarning(
'IdIndex value missing from table: SruDbIdMapTable')
return None, None
identifier_type = record_values.get('IdType', None)
if identifier_type not in self._SUPPORTED_IDENTIFIER_TYPES:
parser_mediator.ProduceExtractionWarning(
'unsupported IdType value: {0!s} in table: SruDbIdMapTable'.format(
identifier_type))
return None, None
mapped_value = record_values.get('IdBlob', None)
if mapped_value is None:
parser_mediator.ProduceExtractionWarning(
'IdBlob value missing from table: SruDbIdMapTable')
return None, None
if identifier_type == 3:
try:
fwnt_identifier = pyfwnt.security_identifier()
fwnt_identifier.copy_from_byte_stream(mapped_value)
mapped_value = fwnt_identifier.get_string()
except IOError:
parser_mediator.ProduceExtractionWarning(
'unable to decode IdBlob value as Windows NT security identifier')
return None, None
else:
try:
mapped_value = mapped_value.decode('utf-16le').rstrip('\0')
except UnicodeDecodeError:
parser_mediator.ProduceExtractionWarning(
'unable to decode IdBlob value as UTF-16 little-endian string')
return None, None
return identifier, mapped_value
|
python
|
{
"resource": ""
}
|
q25725
|
SystemResourceUsageMonitorESEDBPlugin._ParseIdentifierMappingsTable
|
train
|
def _ParseIdentifierMappingsTable(self, parser_mediator, esedb_table):
"""Extracts identifier mappings from the SruDbIdMapTable table.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
esedb_table (pyesedb.table): table.
Returns:
dict[int, str]: mapping of numeric identifiers to their string
representation.
"""
identifier_mappings = {}
for esedb_record in esedb_table.records:
if parser_mediator.abort:
break
identifier, mapped_value = self._ParseIdentifierMappingRecord(
parser_mediator, esedb_table.name, esedb_record)
if identifier is None or mapped_value is None:
continue
if identifier in identifier_mappings:
parser_mediator.ProduceExtractionWarning(
'identifier: {0:d} already exists in mappings.'.format(identifier))
continue
identifier_mappings[identifier] = mapped_value
return identifier_mappings
|
python
|
{
"resource": ""
}
|
q25726
|
SystemResourceUsageMonitorESEDBPlugin.ParseApplicationResourceUsage
|
train
|
def ParseApplicationResourceUsage(
self, parser_mediator, cache=None, database=None, table=None,
**unused_kwargs):
"""Parses the application resource usage table.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
cache (Optional[ESEDBCache]): cache, which contains information about
the identifiers stored in the SruDbIdMapTable table.
database (Optional[pyesedb.file]): ESE database.
table (Optional[pyesedb.table]): table.
"""
self._ParseGUIDTable(
parser_mediator, cache, database, table,
self._APPLICATION_RESOURCE_USAGE_VALUES_MAP,
SRUMApplicationResourceUsageEventData)
|
python
|
{
"resource": ""
}
|
q25727
|
SystemResourceUsageMonitorESEDBPlugin.ParseNetworkDataUsage
|
train
|
def ParseNetworkDataUsage(
self, parser_mediator, cache=None, database=None, table=None,
**unused_kwargs):
"""Parses the network data usage monitor table.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
cache (Optional[ESEDBCache]): cache, which contains information about
the identifiers stored in the SruDbIdMapTable table.
database (Optional[pyesedb.file]): ESE database.
table (Optional[pyesedb.table]): table.
"""
self._ParseGUIDTable(
parser_mediator, cache, database, table,
self._NETWORK_DATA_USAGE_VALUES_MAP, SRUMNetworkDataUsageEventData)
|
python
|
{
"resource": ""
}
|
q25728
|
SystemResourceUsageMonitorESEDBPlugin.ParseNetworkConnectivityUsage
|
train
|
def ParseNetworkConnectivityUsage(
self, parser_mediator, cache=None, database=None, table=None,
**unused_kwargs):
"""Parses the network connectivity usage monitor table.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
cache (Optional[ESEDBCache]): cache, which contains information about
the identifiers stored in the SruDbIdMapTable table.
database (Optional[pyesedb.file]): ESE database.
table (Optional[pyesedb.table]): table.
"""
# TODO: consider making ConnectStartTime + ConnectedTime an event.
self._ParseGUIDTable(
parser_mediator, cache, database, table,
self._NETWORK_CONNECTIVITY_USAGE_VALUES_MAP,
SRUMNetworkConnectivityUsageEventData)
|
python
|
{
"resource": ""
}
|
q25729
|
DtFabricBaseParser._FormatPackedIPv6Address
|
train
|
def _FormatPackedIPv6Address(self, packed_ip_address):
"""Formats a packed IPv6 address as a human readable string.
Args:
packed_ip_address (list[int]): packed IPv6 address.
Returns:
str: human readable IPv6 address.
"""
# Note that socket.inet_ntop() is not supported on Windows.
octet_pairs = zip(packed_ip_address[0::2], packed_ip_address[1::2])
octet_pairs = [octet1 << 8 | octet2 for octet1, octet2 in octet_pairs]
# TODO: omit ":0000" from the string.
return ':'.join([
'{0:04x}'.format(octet_pair) for octet_pair in octet_pairs])
|
python
|
{
"resource": ""
}
|
q25730
|
DtFabricBaseParser._ReadStructureFromFileObject
|
train
|
def _ReadStructureFromFileObject(
self, file_object, file_offset, data_type_map):
"""Reads a structure from a file-like object.
If the data type map has a fixed size this method will read the predefined
number of bytes from the file-like object. If the data type map has a
variable size, depending on values in the byte stream, this method will
continue to read from the file-like object until the data type map can be
successfully mapped onto the byte stream or until an error occurs.
Args:
file_object (dfvfs.FileIO): a file-like object to parse.
file_offset (int): offset of the structure data relative to the start
of the file-like object.
data_type_map (dtfabric.DataTypeMap): data type map of the structure.
Returns:
tuple[object, int]: structure values object and data size of
the structure.
Raises:
ParseError: if the structure cannot be read.
ValueError: if file-like object or data type map is missing.
"""
context = None
data = b''
last_data_size = 0
data_size = data_type_map.GetByteSize()
if not data_size:
data_size = data_type_map.GetSizeHint()
while data_size != last_data_size:
read_offset = file_offset + last_data_size
read_size = data_size - last_data_size
data_segment = self._ReadData(file_object, read_offset, read_size)
data = b''.join([data, data_segment])
try:
context = dtfabric_data_maps.DataTypeMapContext()
structure_values_object = data_type_map.MapByteStream(
data, context=context)
return structure_values_object, data_size
except dtfabric_errors.ByteStreamTooSmallError:
pass
except dtfabric_errors.MappingError as exception:
raise errors.ParseError((
'Unable to map {0:s} data at offset: 0x{1:08x} with error: '
'{2!s}').format(data_type_map.name, file_offset, exception))
last_data_size = data_size
data_size = data_type_map.GetSizeHint(context=context)
raise errors.ParseError(
'Unable to read {0:s} at offset: 0x{1:08x}'.format(
data_type_map.name, file_offset))
|
python
|
{
"resource": ""
}
|
q25731
|
ElasticsearchOutputModule.WriteHeader
|
train
|
def WriteHeader(self):
"""Connects to the Elasticsearch server and creates the index."""
mappings = {}
if self._raw_fields:
if self._document_type not in mappings:
mappings[self._document_type] = {}
mappings[self._document_type]['dynamic_templates'] = [{
'strings': {
'match_mapping_type': 'string',
'mapping': {
'fields': {
'raw': {
'type': 'keyword',
'index': 'false',
'ignore_above': self._ELASTIC_ANALYZER_STRING_LIMIT
}
}
}
}
}]
self._Connect()
self._CreateIndexIfNotExists(self._index_name, mappings)
|
python
|
{
"resource": ""
}
|
q25732
|
PlistFileArtifactPreprocessorPlugin._FindKeys
|
train
|
def _FindKeys(self, key, names, matches):
"""Searches the plist key hierarchy for keys with matching names.
If a match is found a tuple of the key name and value is added to
the matches list.
Args:
key (dict[str, object]): plist key.
names (list[str]): names of the keys to match.
matches (list[str]): keys with matching names.
"""
for name, subkey in iter(key.items()):
if name in names:
matches.append((name, subkey))
if isinstance(subkey, dict):
self._FindKeys(subkey, names, matches)
|
python
|
{
"resource": ""
}
|
q25733
|
MacOSUserAccountsPlugin._GetKeysDefaultEmpty
|
train
|
def _GetKeysDefaultEmpty(self, top_level, keys, depth=1):
"""Retrieves plist keys, defaulting to empty values.
Args:
top_level (plistlib._InternalDict): top level plist object.
keys (set[str]): names of keys that should be returned.
depth (int): depth within the plist, where 1 is top level.
Returns:
dict[str, str]: values of the requested keys.
"""
keys = set(keys)
match = {}
if depth == 1:
for key in keys:
value = top_level.get(key, None)
if value is not None:
match[key] = value
else:
for _, parsed_key, parsed_value in plist_interface.RecurseKey(
top_level, depth=depth):
if parsed_key in keys:
match[parsed_key] = parsed_value
if set(match.keys()) == keys:
return match
return match
|
python
|
{
"resource": ""
}
|
q25734
|
MacOSUserAccountsPlugin._GetPlistRootKey
|
train
|
def _GetPlistRootKey(self, file_entry):
"""Retrieves the root key of a plist file.
Args:
file_entry (dfvfs.FileEntry): file entry of the plist.
Returns:
dict[str, object]: plist root key.
Raises:
errors.PreProcessFail: if the preprocessing fails.
"""
file_object = file_entry.GetFileObject()
try:
plist_file = plist.PlistFile()
plist_file.Read(file_object)
except IOError as exception:
location = getattr(file_entry.path_spec, 'location', '')
raise errors.PreProcessFail(
'Unable to read plist file: {0:s} with error: {1!s}'.format(
location, exception))
finally:
file_object.close()
return plist_file.root_key
|
python
|
{
"resource": ""
}
|
q25735
|
RecurseKey
|
train
|
def RecurseKey(recur_item, depth=15, key_path=''):
"""Flattens nested dictionaries and lists by yielding it's values.
The hierarchy of a plist file is a series of nested dictionaries and lists.
This is a helper function helps plugins navigate the structure without
having to reimplement their own recursive methods.
This method implements an overridable depth limit to prevent processing
extremely deeply nested plists. If the limit is reached a debug message is
logged indicating which key processing stopped on.
Example Input Plist:
recur_item = { DeviceRoot: { DeviceMAC1: [Value1, Value2, Value3],
DeviceMAC2: [Value1, Value2, Value3]}}
Example Output:
('', DeviceRoot, {DeviceMACs...})
(DeviceRoot, DeviceMAC1, [Value1, Value2, Value3])
(DeviceRoot, DeviceMAC2, [Value1, Value2, Value3])
Args:
recur_item: An object to be checked for additional nested items.
depth: Optional integer indication the current recursion depth.
This value is used to ensure we stop at the maximum recursion depth.
key_path: Optional path of the current working key.
Yields:
A tuple of the key path, key, and value from a plist.
"""
if depth < 1:
logger.debug('Recursion limit hit for key: {0:s}'.format(key_path))
return
if isinstance(recur_item, (list, tuple)):
for recur in recur_item:
for key in RecurseKey(recur, depth=depth, key_path=key_path):
yield key
return
if not hasattr(recur_item, 'items'):
return
for subkey, value in iter(recur_item.items()):
yield key_path, subkey, value
if isinstance(value, dict):
value = [value]
if isinstance(value, list):
for item in value:
if not isinstance(item, dict):
continue
subkey_path = '{0:s}/{1:s}'.format(key_path, subkey)
for tuple_value in RecurseKey(
item, depth=depth - 1, key_path=subkey_path):
yield tuple_value
|
python
|
{
"resource": ""
}
|
q25736
|
PlistPlugin._GetKeys
|
train
|
def _GetKeys(self, top_level, keys, depth=1):
"""Helper function to return keys nested in a plist dict.
By default this function will return the values for the named keys requested
by a plugin in match dictionary object. The default setting is to look
a single layer down from the root (same as the check for plugin
applicability). This level is suitable for most cases.
For cases where there is variability in the name at the first level
(e.g. it is the MAC addresses of a device, or a UUID) it is possible to
override the depth limit and use GetKeys to fetch from a deeper level.
E.g.
Top_Level (root): # depth = 0
-- Key_Name_is_UUID_Generated_At_Install 1234-5678-8 # depth = 1
---- Interesting_SubKey_with_value_to_Process: [Values, ...] # depth = 2
Args:
top_level (dict[str, object]): plist top-level key.
keys: A list of keys that should be returned.
depth: Defines how many levels deep to check for a match.
Returns:
A dictionary with just the keys requested or an empty dict if the plist
is flat, eg. top_level is a list instead of a dict object.
"""
match = {}
if not isinstance(top_level, dict):
# Return an empty dict here if top_level is a list object, which happens
# if the plist file is flat.
return match
keys = set(keys)
if depth == 1:
for key in keys:
match[key] = top_level.get(key, None)
else:
for _, parsed_key, parsed_value in RecurseKey(top_level, depth=depth):
if parsed_key in keys:
match[parsed_key] = parsed_value
if set(match.keys()) == keys:
return match
return match
|
python
|
{
"resource": ""
}
|
q25737
|
BluetoothPlugin.GetEntries
|
train
|
def GetEntries(self, parser_mediator, match=None, **unused_kwargs):
"""Extracts relevant BT entries.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.
"""
device_cache = match.get('DeviceCache', {})
for device, value in iter(device_cache.items()):
name = value.get('Name', '')
if name:
name = ''.join(('Name:', name))
event_data = plist_event.PlistTimeEventData()
event_data.root = '/DeviceCache'
datetime_value = value.get('LastInquiryUpdate', None)
if datetime_value:
event_data.desc = ' '.join(
filter(None, ('Bluetooth Discovery', name)))
event_data.key = '{0:s}/LastInquiryUpdate'.format(device)
event = time_events.PythonDatetimeEvent(
datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
if device in match.get('PairedDevices', []):
event_data.desc = 'Paired:True {0:s}'.format(name)
event_data.key = device
event = time_events.PythonDatetimeEvent(
datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
datetime_value = value.get('LastNameUpdate', None)
if datetime_value:
event_data.desc = ' '.join(filter(None, ('Device Name Set', name)))
event_data.key = '{0:s}/LastNameUpdate'.format(device)
event = time_events.PythonDatetimeEvent(
datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
datetime_value = value.get('LastServicesUpdate', None)
if datetime_value:
event_data.desc = ' '.join(filter(None, ('Services Updated', name)))
event_data.key = '{0:s}/LastServicesUpdate'.format(device)
event = time_events.PythonDatetimeEvent(
datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
python
|
{
"resource": ""
}
|
q25738
|
MacKeeperCachePlugin._DictToListOfStrings
|
train
|
def _DictToListOfStrings(self, data_dict):
"""Converts a dictionary into a list of strings.
Args:
data_dict (dict[str, object]): dictionary to convert.
Returns:
list[str]: list of strings.
"""
ret_list = []
for key, value in iter(data_dict.items()):
if key in ('body', 'datetime', 'type', 'room', 'rooms', 'id'):
continue
ret_list.append('{0:s} = {1!s}'.format(key, value))
return ret_list
|
python
|
{
"resource": ""
}
|
q25739
|
MacKeeperCachePlugin._ExtractJQuery
|
train
|
def _ExtractJQuery(self, jquery_raw):
"""Extracts values from a JQuery string.
Args:
jquery_raw (str): JQuery string.
Returns:
dict[str, str]: extracted values.
"""
data_part = ''
if not jquery_raw:
return {}
if '[' in jquery_raw:
_, _, first_part = jquery_raw.partition('[')
data_part, _, _ = first_part.partition(']')
elif jquery_raw.startswith('//'):
_, _, first_part = jquery_raw.partition('{')
data_part = '{{{0:s}'.format(first_part)
elif '({' in jquery_raw:
_, _, first_part = jquery_raw.partition('(')
data_part, _, _ = first_part.rpartition(')')
if not data_part:
return {}
try:
data_dict = json.loads(data_part)
except ValueError:
return {}
return data_dict
|
python
|
{
"resource": ""
}
|
q25740
|
MacKeeperCachePlugin._ParseChatData
|
train
|
def _ParseChatData(self, data):
"""Parses chat comment data.
Args:
data (dict[str, object]): chat comment data as returned by SQLite.
Returns:
dict[str, object]: parsed chat comment data.
"""
data_store = {}
if 'body' in data:
body = data.get('body', '').replace('\n', ' ')
if body.startswith('//') and '{' in body:
body_dict = self._ExtractJQuery(body)
title, _, _ = body.partition('{')
body = '{0:s} <{1!s}>'.format(
title[2:], self._DictToListOfStrings(body_dict))
else:
body = 'No text.'
data_store['text'] = body
room = data.get('rooms', None)
if not room:
room = data.get('room', None)
if room:
data_store['room'] = room
data_store['id'] = data.get('id', None)
user = data.get('user', None)
if user:
try:
user_sid = int(user)
data_store['sid'] = user_sid
except (ValueError, TypeError):
data_store['user'] = user
return data_store
|
python
|
{
"resource": ""
}
|
q25741
|
FormattersManager.DeregisterFormatter
|
train
|
def DeregisterFormatter(cls, formatter_class):
"""Deregisters a formatter class.
The formatter classes are identified based on their lower case data type.
Args:
formatter_class (type): class of the formatter.
Raises:
KeyError: if formatter class is not set for the corresponding data type.
"""
formatter_data_type = formatter_class.DATA_TYPE.lower()
if formatter_data_type not in cls._formatter_classes:
raise KeyError(
'Formatter class not set for data type: {0:s}.'.format(
formatter_class.DATA_TYPE))
del cls._formatter_classes[formatter_data_type]
|
python
|
{
"resource": ""
}
|
q25742
|
FormattersManager.GetFormatterObject
|
train
|
def GetFormatterObject(cls, data_type):
"""Retrieves the formatter object for a specific data type.
Args:
data_type (str): data type.
Returns:
EventFormatter: corresponding formatter or the default formatter if
not available.
"""
data_type = data_type.lower()
if data_type not in cls._formatter_objects:
formatter_object = None
if data_type in cls._formatter_classes:
formatter_class = cls._formatter_classes[data_type]
# TODO: remove the need to instantiate the Formatter classes
# and use class methods only.
formatter_object = formatter_class()
if not formatter_object:
logger.warning(
'Using default formatter for data type: {0:s}'.format(data_type))
formatter_object = default.DefaultFormatter()
cls._formatter_objects[data_type] = formatter_object
return cls._formatter_objects[data_type]
|
python
|
{
"resource": ""
}
|
q25743
|
FormattersManager.GetMessageStrings
|
train
|
def GetMessageStrings(cls, formatter_mediator, event):
"""Retrieves the formatted message strings for a specific event object.
Args:
formatter_mediator (FormatterMediator): mediates the interactions between
formatters and other components, such as storage and Windows EventLog
resources.
event (EventObject): event.
Returns:
list[str, str]: long and short version of the message string.
"""
formatter_object = cls.GetFormatterObject(event.data_type)
return formatter_object.GetMessages(formatter_mediator, event)
|
python
|
{
"resource": ""
}
|
q25744
|
FormattersManager.GetSourceStrings
|
train
|
def GetSourceStrings(cls, event):
"""Retrieves the formatted source strings for a specific event object.
Args:
event (EventObject): event.
Returns:
list[str, str]: short and long version of the source of the event.
"""
# TODO: change this to return the long variant first so it is consistent
# with GetMessageStrings.
formatter_object = cls.GetFormatterObject(event.data_type)
return formatter_object.GetSources(event)
|
python
|
{
"resource": ""
}
|
q25745
|
PEParser._GetSectionNames
|
train
|
def _GetSectionNames(self, pefile_object):
"""Retrieves all PE section names.
Args:
pefile_object (pefile.PE): pefile object.
Returns:
list[str]: names of the sections.
"""
section_names = []
for section in pefile_object.sections:
section_name = getattr(section, 'Name', b'')
# Ensure the name is decoded correctly.
try:
section_name = '{0:s}'.format(section_name.decode('unicode_escape'))
except UnicodeDecodeError:
section_name = '{0:s}'.format(repr(section_name))
section_names.append(section_name)
return section_names
|
python
|
{
"resource": ""
}
|
q25746
|
PEParser._GetImportTimestamps
|
train
|
def _GetImportTimestamps(self, pefile_object):
"""Retrieves timestamps from the import directory, if available.
Args:
pefile_object (pefile.PE): pefile object.
Returns:
list[int]: import timestamps.
"""
import_timestamps = []
if not hasattr(pefile_object, 'DIRECTORY_ENTRY_IMPORT'):
return import_timestamps
for importdata in pefile_object.DIRECTORY_ENTRY_IMPORT:
dll_name = getattr(importdata, 'dll', '')
try:
dll_name = dll_name.decode('ascii')
except UnicodeDecodeError:
dll_name = dll_name.decode('ascii', errors='replace')
if not dll_name:
dll_name = '<NO DLL NAME>'
timestamp = getattr(importdata.struct, 'TimeDateStamp', 0)
if timestamp:
import_timestamps.append([dll_name, timestamp])
return import_timestamps
|
python
|
{
"resource": ""
}
|
q25747
|
PEParser._GetResourceTimestamps
|
train
|
def _GetResourceTimestamps(self, pefile_object):
"""Retrieves timestamps from resource directory entries, if available.
Args:
pefile_object (pefile.PE): pefile object.
Returns:
list[int]: resource timestamps.
"""
timestamps = []
if not hasattr(pefile_object, 'DIRECTORY_ENTRY_RESOURCE'):
return timestamps
for entrydata in pefile_object.DIRECTORY_ENTRY_RESOURCE.entries:
directory = entrydata.directory
timestamp = getattr(directory, 'TimeDateStamp', 0)
if timestamp:
timestamps.append(timestamp)
return timestamps
|
python
|
{
"resource": ""
}
|
q25748
|
PEParser._GetLoadConfigTimestamp
|
train
|
def _GetLoadConfigTimestamp(self, pefile_object):
"""Retrieves the timestamp from the Load Configuration directory.
Args:
pefile_object (pefile.PE): pefile object.
Returns:
int: load configuration timestamps or None if there are none present.
"""
if not hasattr(pefile_object, 'DIRECTORY_ENTRY_LOAD_CONFIG'):
return None
timestamp = getattr(
pefile_object.DIRECTORY_ENTRY_LOAD_CONFIG.struct, 'TimeDateStamp', 0)
return timestamp
|
python
|
{
"resource": ""
}
|
q25749
|
PEParser._GetDelayImportTimestamps
|
train
|
def _GetDelayImportTimestamps(self, pefile_object):
"""Retrieves timestamps from delay import entries, if available.
Args:
pefile_object (pefile.PE): pefile object.
Returns:
tuple[str, int]: name of the DLL being imported and the second is
the timestamp of the entry.
"""
delay_import_timestamps = []
if not hasattr(pefile_object, 'DIRECTORY_ENTRY_DELAY_IMPORT'):
return delay_import_timestamps
for importdata in pefile_object.DIRECTORY_ENTRY_DELAY_IMPORT:
dll_name = importdata.dll
try:
dll_name = dll_name.decode('ascii')
except UnicodeDecodeError:
dll_name = dll_name.decode('ascii', errors='replace')
timestamp = getattr(importdata.struct, 'dwTimeStamp', 0)
delay_import_timestamps.append([dll_name, timestamp])
return delay_import_timestamps
|
python
|
{
"resource": ""
}
|
q25750
|
TrendMicroBaseParser._CreateDictReader
|
train
|
def _CreateDictReader(self, line_reader):
"""Iterates over the log lines and provide a reader for the values.
Args:
line_reader (iter): yields each line in the log file.
Yields:
dict[str, str]: column values keyed by column header.
"""
for line in line_reader:
if isinstance(line, py2to3.BYTES_TYPE):
try:
line = codecs.decode(line, self._encoding)
except UnicodeDecodeError as exception:
raise errors.UnableToParseFile(
'Unable decode line with error: {0!s}'.format(exception))
stripped_line = line.strip()
values = stripped_line.split(self.DELIMITER)
number_of_values = len(values)
number_of_columns = len(self.COLUMNS)
if number_of_values < self.MIN_COLUMNS:
raise errors.UnableToParseFile(
'Expected at least {0:d} values, found {1:d}'.format(
self.MIN_COLUMNS, number_of_values))
if number_of_values > number_of_columns:
raise errors.UnableToParseFile(
'Expected at most {0:d} values, found {1:d}'.format(
number_of_columns, number_of_values))
yield dict(zip(self.COLUMNS, values))
|
python
|
{
"resource": ""
}
|
q25751
|
TrendMicroBaseParser._ParseTimestamp
|
train
|
def _ParseTimestamp(self, parser_mediator, row):
"""Provides a timestamp for the given row.
If the Trend Micro log comes from a version that provides a POSIX timestamp,
use that directly; it provides the advantages of UTC and of second
precision. Otherwise fall back onto the local-timezone date and time.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
row (dict[str, str]): fields of a single row, as specified in COLUMNS.
Returns:
dfdatetime.interface.DateTimeValue: date and time value.
"""
timestamp = row.get('timestamp', None)
if timestamp is not None:
try:
timestamp = int(timestamp, 10)
except (ValueError, TypeError):
parser_mediator.ProduceExtractionWarning(
'Unable to parse timestamp value: {0!s}'.format(timestamp))
return dfdatetime_posix_time.PosixTime(timestamp=timestamp)
# The timestamp is not available; parse the local date and time instead.
try:
return self._ConvertToTimestamp(row['date'], row['time'])
except ValueError as exception:
parser_mediator.ProduceExtractionWarning((
'Unable to parse time string: "{0:s} {1:s}" with error: '
'{2!s}').format(repr(row['date']), repr(row['time']), exception))
|
python
|
{
"resource": ""
}
|
q25752
|
TrendMicroBaseParser._ConvertToTimestamp
|
train
|
def _ConvertToTimestamp(self, date, time):
"""Converts date and time strings into a timestamp.
Recent versions of Office Scan write a log field with a Unix timestamp.
Older versions may not write this field; their logs only provide a date and
a time expressed in the local time zone. This functions handles the latter
case.
Args:
date (str): date as an 8-character string in the YYYYMMDD format.
time (str): time as a 3 or 4-character string in the [H]HMM format or a
6-character string in the HHMMSS format.
Returns:
dfdatetime_time_elements.TimestampElements: the parsed timestamp.
Raises:
ValueError: if the date and time values cannot be parsed.
"""
# Check that the strings have the correct length.
if len(date) != 8:
raise ValueError(
'Unsupported length of date string: {0!s}'.format(repr(date)))
if len(time) < 3 or len(time) > 4:
raise ValueError(
'Unsupported length of time string: {0!s}'.format(repr(time)))
# Extract the date.
try:
year = int(date[:4], 10)
month = int(date[4:6], 10)
day = int(date[6:8], 10)
except (TypeError, ValueError):
raise ValueError('Unable to parse date string: {0!s}'.format(repr(date)))
# Extract the time. Note that a single-digit hour value has no leading zero.
try:
hour = int(time[:-2], 10)
minutes = int(time[-2:], 10)
except (TypeError, ValueError):
raise ValueError('Unable to parse time string: {0!s}'.format(repr(date)))
time_elements_tuple = (year, month, day, hour, minutes, 0)
date_time = dfdatetime_time_elements.TimeElements(
time_elements_tuple=time_elements_tuple)
date_time.is_local_time = True
# TODO: add functionality to dfdatetime to control precision.
date_time._precision = dfdatetime_definitions.PRECISION_1_MINUTE # pylint: disable=protected-access
return date_time
|
python
|
{
"resource": ""
}
|
q25753
|
VirusTotalAnalyzer._QueryHashes
|
train
|
def _QueryHashes(self, digests):
"""Queries VirusTotal for a specfic hashes.
Args:
digests (list[str]): hashes to look up.
Returns:
dict[str, object]: JSON response or None on error.
"""
url_parameters = {'apikey': self._api_key, 'resource': ', '.join(digests)}
try:
json_response = self.MakeRequestAndDecodeJSON(
self._VIRUSTOTAL_API_REPORT_URL, 'GET', params=url_parameters)
except errors.ConnectionError as exception:
json_response = None
logger.error('Unable to query VirusTotal with error: {0!s}.'.format(
exception))
return json_response
|
python
|
{
"resource": ""
}
|
q25754
|
VirusTotalAnalyzer.Analyze
|
train
|
def Analyze(self, hashes):
"""Looks up hashes in VirusTotal using the VirusTotal HTTP API.
The API is documented here:
https://www.virustotal.com/en/documentation/public-api/
Args:
hashes (list[str]): hashes to look up.
Returns:
list[HashAnalysis]: analysis results.
Raises:
RuntimeError: If the VirusTotal API key has not been set.
"""
if not self._api_key:
raise RuntimeError('No API key specified for VirusTotal lookup.')
hash_analyses = []
json_response = self._QueryHashes(hashes) or []
# VirusTotal returns a dictionary when a single hash is queried
# and a list when multiple hashes are queried.
if isinstance(json_response, dict):
json_response = [json_response]
for result in json_response:
resource = result['resource']
hash_analysis = interface.HashAnalysis(resource, result)
hash_analyses.append(hash_analysis)
return hash_analyses
|
python
|
{
"resource": ""
}
|
q25755
|
VirusTotalAnalysisPlugin.EnableFreeAPIKeyRateLimit
|
train
|
def EnableFreeAPIKeyRateLimit(self):
"""Configures Rate limiting for queries to VirusTotal.
The default rate limit for free VirusTotal API keys is 4 requests per
minute.
"""
self._analyzer.hashes_per_batch = 4
self._analyzer.wait_after_analysis = 60
self._analysis_queue_timeout = self._analyzer.wait_after_analysis + 1
|
python
|
{
"resource": ""
}
|
q25756
|
WindowsTimelinePlugin.ParseGenericRow
|
train
|
def ParseGenericRow(
self, parser_mediator, query, row, **unused_kwargs):
"""Parses a generic windows timeline row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
"""
query_hash = hash(query)
event_data = WindowsTimelineGenericEventData()
# Payload is JSON serialized as binary data in a BLOB field, with the text
# encoded as UTF-8.
payload_json_bytes = bytes(self._GetRowValue(query_hash, row, 'Payload'))
payload_json_string = payload_json_bytes.decode('utf-8')
# AppId is JSON stored as unicode text.
appid_entries_string = self._GetRowValue(query_hash, row, 'AppId')
payload = json.loads(payload_json_string)
appid_entries = json.loads(appid_entries_string)
# Attempt to populate the package_identifier field by checking each of
# these fields in the AppId JSON.
package_id_locations = [
'packageId', 'x_exe_path', 'windows_win32', 'windows_universal',
'alternateId']
for location in package_id_locations:
for entry in appid_entries:
if entry['platform'] == location and entry['application'] != '':
event_data.package_identifier = entry['application']
break
if event_data.package_identifier is None:
# package_identifier has been populated and we're done.
break
if 'description' in payload:
event_data.description = payload['description']
else:
event_data.description = ''
if 'appDisplayName' in payload and payload['appDisplayName'] != '':
event_data.application_display_name = payload['appDisplayName']
elif 'displayText' in payload and payload['displayText'] != '':
# Fall back to displayText if appDisplayName isn't available
event_data.application_display_name = payload['displayText']
timestamp = self._GetRowValue(query_hash, row, 'StartTime')
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_START)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
python
|
{
"resource": ""
}
|
q25757
|
WindowsTimelinePlugin.ParseUserEngagedRow
|
train
|
def ParseUserEngagedRow(
self, parser_mediator, query, row, **unused_kwargs):
"""Parses a timeline row that describes a user interacting with an app.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
"""
query_hash = hash(query)
event_data = WindowsTimelineUserEngagedEventData()
event_data.package_identifier = self._GetRowValue(
query_hash, row, 'PackageName')
# Payload is JSON serialized as binary data in a BLOB field, with the text
# encoded as UTF-8.
payload_json_bytes = bytes(self._GetRowValue(query_hash, row, 'Payload'))
payload_json_string = payload_json_bytes.decode('utf-8')
payload = json.loads(payload_json_string)
if 'reportingApp' in payload:
event_data.reporting_app = payload['reportingApp']
if 'activeDurationSeconds' in payload:
event_data.active_duration_seconds = int(payload['activeDurationSeconds'])
timestamp = self._GetRowValue(query_hash, row, 'StartTime')
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_START)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
python
|
{
"resource": ""
}
|
q25758
|
AndroidCallPlugin.ParseCallsRow
|
train
|
def ParseCallsRow(self, parser_mediator, query, row, **unused_kwargs):
"""Parses a Call record row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
"""
query_hash = hash(query)
call_type = self._GetRowValue(query_hash, row, 'type')
call_type = self.CALL_TYPE.get(call_type, 'UNKNOWN')
duration = self._GetRowValue(query_hash, row, 'duration')
timestamp = self._GetRowValue(query_hash, row, 'date')
event_data = AndroidCallEventData()
event_data.call_type = call_type
event_data.duration = self._GetRowValue(query_hash, row, 'duration')
event_data.name = self._GetRowValue(query_hash, row, 'name')
event_data.number = self._GetRowValue(query_hash, row, 'number')
event_data.offset = self._GetRowValue(query_hash, row, 'id')
event_data.query = query
date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, 'Call Started')
parser_mediator.ProduceEventWithEventData(event, event_data)
if duration:
if isinstance(duration, py2to3.STRING_TYPES):
try:
duration = int(duration, 10)
except ValueError:
duration = 0
# The duration is in seconds and the date value in milliseconds.
timestamp += duration * 1000
date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, 'Call Ended')
parser_mediator.ProduceEventWithEventData(event, event_data)
|
python
|
{
"resource": ""
}
|
q25759
|
MacUserPlugin.Process
|
train
|
def Process(self, parser_mediator, plist_name, top_level, **kwargs):
"""Check if it is a valid MacOS system account plist file name.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
plist_name (str): name of the plist.
top_level (dict[str, object]): plist top-level key.
"""
super(MacUserPlugin, self).Process(
parser_mediator, plist_name=self.PLIST_PATH, top_level=top_level)
|
python
|
{
"resource": ""
}
|
q25760
|
BaseMRUListWindowsRegistryPlugin._ParseMRUListValue
|
train
|
def _ParseMRUListValue(self, registry_key):
"""Parses the MRUList value in a given Registry key.
Args:
registry_key (dfwinreg.WinRegistryKey): Windows Registry key that contains
the MRUList value.
Returns:
mrulist_entries: MRUList entries or None if not available.
"""
mrulist_value = registry_key.GetValueByName('MRUList')
# The key exists but does not contain a value named "MRUList".
if not mrulist_value:
return None
mrulist_entries_map = self._GetDataTypeMap('mrulist_entries')
context = dtfabric_data_maps.DataTypeMapContext(values={
'data_size': len(mrulist_value.data)})
return self._ReadStructureFromByteStream(
mrulist_value.data, 0, mrulist_entries_map, context=context)
|
python
|
{
"resource": ""
}
|
q25761
|
BaseMRUListWindowsRegistryPlugin._ParseMRUListKey
|
train
|
def _ParseMRUListKey(self, parser_mediator, registry_key, codepage='cp1252'):
"""Extract event objects from a MRUList Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
codepage (Optional[str]): extended ASCII string codepage.
"""
try:
mrulist = self._ParseMRUListValue(registry_key)
except (ValueError, errors.ParseError) as exception:
parser_mediator.ProduceExtractionWarning(
'unable to parse MRUList value with error: {0!s}'.format(exception))
return
if not mrulist:
return
values_dict = {}
found_terminator = False
for entry_index, entry_letter in enumerate(mrulist):
# The MRU list is terminated with '\0' (0x0000).
if entry_letter == 0:
break
if found_terminator:
parser_mediator.ProduceExtractionWarning((
'found additional MRUList entries after terminator in key: '
'{0:s}.').format(registry_key.path))
# Only create one parser error per terminator.
found_terminator = False
entry_letter = chr(entry_letter)
value_string = self._ParseMRUListEntryValue(
parser_mediator, registry_key, entry_index, entry_letter,
codepage=codepage)
value_text = 'Index: {0:d} [MRU Value {1:s}]'.format(
entry_index + 1, entry_letter)
values_dict[value_text] = value_string
event_data = windows_events.WindowsRegistryEventData()
event_data.key_path = registry_key.path
event_data.offset = registry_key.offset
event_data.regvalue = values_dict
event_data.source_append = self._SOURCE_APPEND
event = time_events.DateTimeValuesEvent(
registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
python
|
{
"resource": ""
}
|
q25762
|
DSVParser._ConvertRowToUnicode
|
train
|
def _ConvertRowToUnicode(self, parser_mediator, row):
"""Converts all strings in a DSV row dict to Unicode.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
row (dict[str, bytes]): a row from a DSV file, where the dictionary
key contains the column name and the value a binary string.
Returns:
dict[str, str]: a row from the DSV file, where the dictionary key
contains the column name and the value a Unicode string.
"""
for key, value in iter(row.items()):
if isinstance(value, py2to3.UNICODE_TYPE):
continue
try:
row[key] = value.decode(self._encoding)
except UnicodeDecodeError:
replaced_value = value.decode(self._encoding, errors='replace')
parser_mediator.ProduceExtractionWarning(
'error decoding DSV value: {0:s} as {1:s}, characters have been '
'replaced in {2:s}'.format(key, self._encoding, replaced_value))
row[key] = replaced_value
return row
|
python
|
{
"resource": ""
}
|
q25763
|
DSVParser._CreateDictReader
|
train
|
def _CreateDictReader(self, line_reader):
"""Returns a reader that processes each row and yields dictionaries.
csv.DictReader does this job well for single-character delimiters; parsers
that need multi-character delimiters need to override this method.
Args:
line_reader (iter): yields lines from a file-like object.
Returns:
iter: a reader of dictionaries, as returned by csv.DictReader().
"""
delimiter = self.DELIMITER
quotechar = self.QUOTE_CHAR
magic_test_string = self._MAGIC_TEST_STRING
# Python 3 csv module requires arguments to constructor to be of type str.
if py2to3.PY_3:
delimiter = delimiter.decode(self._encoding)
quotechar = quotechar.decode(self._encoding)
magic_test_string = magic_test_string.decode(self._encoding)
return csv.DictReader(
line_reader, delimiter=delimiter, fieldnames=self.COLUMNS,
quotechar=quotechar, restkey=magic_test_string,
restval=magic_test_string)
|
python
|
{
"resource": ""
}
|
q25764
|
DSVParser._CreateLineReader
|
train
|
def _CreateLineReader(self, file_object):
"""Creates an object that reads lines from a text file.
The line reader is advanced to the beginning of the DSV content, skipping
any header lines.
Args:
file_object (dfvfs.FileIO): file-like object.
Returns:
TextFile|BinaryLineReader: an object that implements an iterator
over lines in a text file.
Raises:
UnicodeDecodeError: if the file cannot be read with the specified
encoding.
"""
# The Python 2 csv module reads bytes and the Python 3 csv module Unicode
# reads strings.
if py2to3.PY_3:
line_reader = text_file.TextFile(
file_object, encoding=self._encoding, end_of_line=self._end_of_line)
# pylint: disable=protected-access
maximum_read_buffer_size = line_reader._MAXIMUM_READ_BUFFER_SIZE
else:
line_reader = line_reader_file.BinaryLineReader(
file_object, end_of_line=self._end_of_line)
maximum_read_buffer_size = line_reader.MAXIMUM_READ_BUFFER_SIZE
# Line length is one less than the maximum read buffer size so that we
# tell if there's a line that doesn't end at the end before the end of
# the file.
if self._maximum_line_length > maximum_read_buffer_size:
self._maximum_line_length = maximum_read_buffer_size - 1
# If we specifically define a number of lines we should skip, do that here.
for _ in range(0, self.NUMBER_OF_HEADER_LINES):
line_reader.readline(self._maximum_line_length)
return line_reader
|
python
|
{
"resource": ""
}
|
q25765
|
DSVParser._HasExpectedLineLength
|
train
|
def _HasExpectedLineLength(self, file_object):
"""Determines if a file begins with lines of the expected length.
As we know the maximum length of valid lines in the DSV file, the presence
of lines longer than this indicates that the file will not be parsed
successfully, without reading excessive data from a large file.
Args:
file_object (dfvfs.FileIO): file-like object.
Returns:
bool: True if the file has lines of the expected length.
"""
original_file_position = file_object.tell()
line_reader = self._CreateLineReader(file_object)
for _ in range(0, 20):
# Attempt to read a line that is longer than any line that should be in
# the file.
sample_line = line_reader.readline(self._maximum_line_length + 1)
if len(sample_line) > self._maximum_line_length:
file_object.seek(original_file_position)
return False
file_object.seek(original_file_position)
return True
|
python
|
{
"resource": ""
}
|
q25766
|
DSVParser.ParseFileObject
|
train
|
def ParseFileObject(self, parser_mediator, file_object):
"""Parses a DSV text file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
# TODO: Replace this with detection of the file encoding via byte-order
# marks. Also see: https://github.com/log2timeline/plaso/issues/1971
if not self._encoding:
self._encoding = parser_mediator.codepage
try:
if not self._HasExpectedLineLength(file_object):
display_name = parser_mediator.GetDisplayName()
raise errors.UnableToParseFile((
'[{0:s}] Unable to parse DSV file: {1:s} with error: '
'unexpected line length.').format(self.NAME, display_name))
except UnicodeDecodeError as exception:
display_name = parser_mediator.GetDisplayName()
raise errors.UnableToParseFile(
'[{0:s}] Unable to parse DSV file: {1:s} with error: {2!s}.'.format(
self.NAME, display_name, exception))
try:
line_reader = self._CreateLineReader(file_object)
reader = self._CreateDictReader(line_reader)
row_offset = line_reader.tell()
row = next(reader)
except (StopIteration, csv.Error, UnicodeDecodeError) as exception:
display_name = parser_mediator.GetDisplayName()
raise errors.UnableToParseFile(
'[{0:s}] Unable to parse DSV file: {1:s} with error: {2!s}.'.format(
self.NAME, display_name, exception))
number_of_columns = len(self.COLUMNS)
number_of_records = len(row)
if number_of_records != number_of_columns:
display_name = parser_mediator.GetDisplayName()
raise errors.UnableToParseFile((
'[{0:s}] Unable to parse DSV file: {1:s}. Wrong number of '
'records (expected: {2:d}, got: {3:d})').format(
self.NAME, display_name, number_of_columns,
number_of_records))
for key, value in row.items():
if self._MAGIC_TEST_STRING in (key, value):
display_name = parser_mediator.GetDisplayName()
raise errors.UnableToParseFile((
'[{0:s}] Unable to parse DSV file: {1:s}. Signature '
'mismatch.').format(self.NAME, display_name))
row = self._ConvertRowToUnicode(parser_mediator, row)
if not self.VerifyRow(parser_mediator, row):
display_name = parser_mediator.GetDisplayName()
raise errors.UnableToParseFile((
'[{0:s}] Unable to parse DSV file: {1:s}. Verification '
'failed.').format(self.NAME, display_name))
self.ParseRow(parser_mediator, row_offset, row)
row_offset = line_reader.tell()
for row in reader:
if parser_mediator.abort:
break
row = self._ConvertRowToUnicode(parser_mediator, row)
self.ParseRow(parser_mediator, row_offset, row)
row_offset = line_reader.tell()
|
python
|
{
"resource": ""
}
|
q25767
|
AmcacheParser.ParseFileObject
|
train
|
def ParseFileObject(self, parser_mediator, file_object):
"""Parses an Amcache.hve file for events.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
"""
regf_file = pyregf.file() # pylint: disable=no-member
try:
regf_file.open_file_object(file_object)
except IOError:
# The error is currently ignored -> see TODO above related to the
# fixing of handling multiple parsers for the same file format.
return
root_key = regf_file.get_root_key()
if root_key is None:
regf_file.close()
return
root_file_key = root_key.get_sub_key_by_path(self._AMCACHE_ROOT_FILE_KEY)
if root_file_key is None:
regf_file.close()
return
for volume_key in root_file_key.sub_keys:
for am_entry in volume_key.sub_keys:
self._ProcessAMCacheFileKey(am_entry, parser_mediator)
root_program_key = root_key.get_sub_key_by_path(
self._AMCACHE_ROOT_PROGRAM_KEY)
if root_program_key is None:
regf_file.close()
return
for am_entry in root_program_key.sub_keys:
self._ProcessAMCacheProgramKey(am_entry, parser_mediator)
regf_file.close()
|
python
|
{
"resource": ""
}
|
q25768
|
NsrlsvrAnalyzer._GetSocket
|
train
|
def _GetSocket(self):
"""Establishes a connection to an nsrlsvr instance.
Returns:
socket._socketobject: socket connected to an nsrlsvr instance or None if
a connection cannot be established.
"""
try:
return socket.create_connection(
(self._host, self._port), self._SOCKET_TIMEOUT)
except socket.error as exception:
logger.error(
'Unable to connect to nsrlsvr with error: {0!s}.'.format(exception))
|
python
|
{
"resource": ""
}
|
q25769
|
NsrlsvrAnalyzer._QueryHash
|
train
|
def _QueryHash(self, nsrl_socket, digest):
"""Queries nsrlsvr for a specific hash.
Args:
nsrl_socket (socket._socketobject): socket of connection to nsrlsvr.
digest (str): hash to look up.
Returns:
bool: True if the hash was found, False if not or None on error.
"""
try:
query = 'QUERY {0:s}\n'.format(digest).encode('ascii')
except UnicodeDecodeError:
logger.error('Unable to encode digest: {0!s} to ASCII.'.format(digest))
return False
response = None
try:
nsrl_socket.sendall(query)
response = nsrl_socket.recv(self._RECEIVE_BUFFER_SIZE)
except socket.error as exception:
logger.error('Unable to query nsrlsvr with error: {0!s}.'.format(
exception))
if not response:
return False
# Strip end-of-line characters since they can differ per platform on which
# nsrlsvr is running.
response = response.strip()
# nsrlsvr returns "OK 1" if the has was found or "OK 0" if not.
return response == b'OK 1'
|
python
|
{
"resource": ""
}
|
q25770
|
NsrlsvrAnalyzer.Analyze
|
train
|
def Analyze(self, hashes):
"""Looks up hashes in nsrlsvr.
Args:
hashes (list[str]): hash values to look up.
Returns:
list[HashAnalysis]: analysis results, or an empty list on error.
"""
logger.debug(
'Opening connection to {0:s}:{1:d}'.format(self._host, self._port))
nsrl_socket = self._GetSocket()
if not nsrl_socket:
self.SignalAbort()
return []
hash_analyses = []
for digest in hashes:
response = self._QueryHash(nsrl_socket, digest)
if response is None:
continue
hash_analysis = interface.HashAnalysis(digest, response)
hash_analyses.append(hash_analysis)
nsrl_socket.close()
logger.debug(
'Closed connection to {0:s}:{1:d}'.format(self._host, self._port))
return hash_analyses
|
python
|
{
"resource": ""
}
|
q25771
|
TimeMachinePlugin.GetEntries
|
train
|
def GetEntries(self, parser_mediator, match=None, **unused_kwargs):
"""Extracts relevant TimeMachine entries.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.
"""
backup_alias_map = self._GetDataTypeMap('timemachine_backup_alias')
destinations = match.get('Destinations', [])
for destination in destinations:
backup_alias_data = destination.get('BackupAlias', b'')
try:
backup_alias = self._ReadStructureFromByteStream(
backup_alias_data, 0, backup_alias_map)
alias = backup_alias.string
except (ValueError, errors.ParseError) as exception:
parser_mediator.ProduceExtractionWarning(
'unable to parse backup alias value with error: {0!s}'.format(
exception))
alias = 'Unknown alias'
destination_identifier = (
destination.get('DestinationID', None) or 'Unknown device')
event_data = plist_event.PlistTimeEventData()
event_data.desc = 'TimeMachine Backup in {0:s} ({1:s})'.format(
alias, destination_identifier)
event_data.key = 'item/SnapshotDates'
event_data.root = '/Destinations'
snapshot_dates = destination.get('SnapshotDates', [])
for datetime_value in snapshot_dates:
event = time_events.PythonDatetimeEvent(
datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
python
|
{
"resource": ""
}
|
q25772
|
TaskCacheWindowsRegistryPlugin._GetIdValue
|
train
|
def _GetIdValue(self, registry_key):
"""Retrieves the Id value from Task Cache Tree key.
Args:
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
Yields:
tuple: containing:
dfwinreg.WinRegistryKey: Windows Registry key.
dfwinreg.WinRegistryValue: Windows Registry value.
"""
id_value = registry_key.GetValueByName('Id')
if id_value:
yield registry_key, id_value
for sub_key in registry_key.GetSubkeys():
for value_key, id_value in self._GetIdValue(sub_key):
yield value_key, id_value
|
python
|
{
"resource": ""
}
|
q25773
|
PlsRecallParser._VerifyRecord
|
train
|
def _VerifyRecord(self, pls_record):
"""Verifies a PLS Recall record.
Args:
pls_record (pls_recall_record): a PLS Recall record to verify.
Returns:
bool: True if this is a valid PLS Recall record, False otherwise.
"""
# Verify that the timestamp is no more than six years into the future.
# Six years is an arbitrary time length just to evaluate the timestamp
# against some value. There is no guarantee that this will catch everything.
# TODO: Add a check for similarly valid value back in time. Maybe if it the
# timestamp is before 1980 we are pretty sure it is invalid?
# TODO: This is a very flaky assumption. Find a better one.
future_timestamp = (
timelib.Timestamp.GetNow() + self._SIX_YEARS_IN_MICRO_SECONDS)
if pls_record.last_written_time > future_timestamp:
return False
# Take the first word from the query field and attempt to match that against
# known query keywords.
first_word, _, _ = pls_record.query.partition(' ')
if first_word.lower() not in self._PLS_KEYWORD:
return False
return True
|
python
|
{
"resource": ""
}
|
q25774
|
PlsRecallParser.ParseFileObject
|
train
|
def ParseFileObject(self, parser_mediator, file_object):
"""Parses a PLSRecall.dat file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): a file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
file_offset = 0
file_size = file_object.get_size()
record_map = self._GetDataTypeMap('pls_recall_record')
while file_offset < file_size:
try:
pls_record, record_data_size = self._ReadStructureFromFileObject(
file_object, file_offset, record_map)
except (ValueError, errors.ParseError) as exception:
if file_offset == 0:
raise errors.UnableToParseFile('Unable to parse first record.')
parser_mediator.ProduceExtractionWarning((
'unable to parse record at offset: 0x{0:08x} with error: '
'{1!s}').format(file_offset, exception))
break
if file_offset == 0 and not self._VerifyRecord(pls_record):
raise errors.UnableToParseFile('Verification of first record failed.')
event_data = PlsRecallEventData()
event_data.database_name = pls_record.database_name.rstrip('\x00')
event_data.sequence_number = pls_record.sequence_number
event_data.offset = file_offset
event_data.query = pls_record.query.rstrip('\x00')
event_data.username = pls_record.username.rstrip('\x00')
date_time = dfdatetime_delphi_date_time.DelphiDateTime(
timestamp=pls_record.last_written_time)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
file_offset += record_data_size
|
python
|
{
"resource": ""
}
|
q25775
|
UniqueDomainsVisitedPlugin.ExamineEvent
|
train
|
def ExamineEvent(self, mediator, event):
"""Analyzes an event and extracts domains from it.
We only evaluate straightforward web history events, not visits which can
be inferred by TypedURLs, cookies or other means.
Args:
mediator (AnalysisMediator): mediates interactions between
analysis plugins and other components, such as storage and dfvfs.
event (EventObject): event to examine.
"""
if event.data_type not in self._DATATYPES:
return
url = getattr(event, 'url', None)
if url is None:
return
parsed_url = urlparse.urlparse(url)
domain = getattr(parsed_url, 'netloc', None)
if domain in self._domains:
# We've already found an event containing this domain.
return
self._domains.append(domain)
|
python
|
{
"resource": ""
}
|
q25776
|
WinIISParser._ParseComment
|
train
|
def _ParseComment(self, structure):
"""Parses a comment.
Args:
structure (pyparsing.ParseResults): structure parsed from the log file.
"""
if structure[1] == 'Date:':
self._year, self._month, self._day_of_month, _, _, _ = structure.date_time
elif structure[1] == 'Fields:':
self._ParseFieldsMetadata(structure)
|
python
|
{
"resource": ""
}
|
q25777
|
WinIISParser._ParseFieldsMetadata
|
train
|
def _ParseFieldsMetadata(self, structure):
"""Parses the fields metadata and updates the log line definition to match.
Args:
structure (pyparsing.ParseResults): structure parsed from the log file.
"""
fields = structure.fields.split(' ')
log_line_structure = pyparsing.Empty()
if fields[0] == 'date' and fields[1] == 'time':
log_line_structure += self.DATE_TIME.setResultsName('date_time')
fields = fields[2:]
for member in fields:
log_line_structure += self._LOG_LINE_STRUCTURES.get(member, self.URI)
updated_structures = []
for line_structure in self._line_structures:
if line_structure[0] != 'logline':
updated_structures.append(line_structure)
updated_structures.append(('logline', log_line_structure))
# TODO: self._line_structures is a work-around and this needs
# a structural fix.
self._line_structures = updated_structures
|
python
|
{
"resource": ""
}
|
q25778
|
WinIISParser.VerifyStructure
|
train
|
def VerifyStructure(self, parser_mediator, line):
"""Verify that this file is an IIS log file.
Args:
parser_mediator (ParserMediator): mediates interactions between
parsers and other components, such as storage and dfvfs.
line (str): line from a text file.
Returns:
bool: True if the line was successfully parsed.
"""
# TODO: self._line_structures is a work-around and this needs
# a structural fix.
self._line_structures = self.LINE_STRUCTURES
self._day_of_month = None
self._month = None
self._year = None
# TODO: Examine other versions of the file format and if this parser should
# support them. For now just checking if it contains the IIS header.
if self._SIGNATURE in line:
return True
return False
|
python
|
{
"resource": ""
}
|
q25779
|
DockerJSONParser._GetIdentifierFromPath
|
train
|
def _GetIdentifierFromPath(self, parser_mediator):
"""Extracts a container or a graph ID from a JSON file's path.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
Returns:
str: container or graph identifier.
"""
file_entry = parser_mediator.GetFileEntry()
path = file_entry.path_spec.location
file_system = file_entry.GetFileSystem()
path_segments = file_system.SplitPath(path)
return path_segments[-2]
|
python
|
{
"resource": ""
}
|
q25780
|
DockerJSONParser._ParseLayerConfigJSON
|
train
|
def _ParseLayerConfigJSON(self, parser_mediator, file_object):
"""Extracts events from a Docker filesystem layer configuration file.
The path of each filesystem layer config file is:
DOCKER_DIR/graph/<layer_id>/json
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): a file-like object.
Raises:
UnableToParseFile: when the file is not a valid layer config file.
"""
file_content = file_object.read()
file_content = codecs.decode(file_content, self._ENCODING)
json_dict = json.loads(file_content)
if 'docker_version' not in json_dict:
raise errors.UnableToParseFile(
'not a valid Docker layer configuration file, missing '
'\'docker_version\' key.')
if 'created' in json_dict:
layer_creation_command_array = [
x.strip() for x in json_dict['container_config']['Cmd']]
layer_creation_command = ' '.join(layer_creation_command_array).replace(
'\t', '')
event_data = DockerJSONLayerEventData()
event_data.command = layer_creation_command
event_data.layer_id = self._GetIdentifierFromPath(parser_mediator)
timestamp = timelib.Timestamp.FromTimeString(json_dict['created'])
event = time_events.TimestampEvent(
timestamp, definitions.TIME_DESCRIPTION_ADDED)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
python
|
{
"resource": ""
}
|
q25781
|
DockerJSONParser._ParseContainerConfigJSON
|
train
|
def _ParseContainerConfigJSON(self, parser_mediator, file_object):
"""Extracts events from a Docker container configuration file.
The path of each container config file is:
DOCKER_DIR/containers/<container_id>/config.json
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): a file-like object.
Raises:
UnableToParseFile: when the file is not a valid container config file.
"""
file_content = file_object.read()
file_content = codecs.decode(file_content, self._ENCODING)
json_dict = json.loads(file_content)
if 'Driver' not in json_dict:
raise errors.UnableToParseFile(
'not a valid Docker container configuration file, ' 'missing '
'\'Driver\' key.')
container_id_from_path = self._GetIdentifierFromPath(parser_mediator)
container_id_from_json = json_dict.get('ID', None)
if not container_id_from_json:
raise errors.UnableToParseFile(
'not a valid Docker layer configuration file, the \'ID\' key is '
'missing from the JSON dict (should be {0:s})'.format(
container_id_from_path))
if container_id_from_json != container_id_from_path:
raise errors.UnableToParseFile(
'not a valid Docker container configuration file. The \'ID\' key of '
'the JSON dict ({0:s}) is different from the layer ID taken from the'
' path to the file ({1:s}) JSON file.)'.format(
container_id_from_json, container_id_from_path))
if 'Config' in json_dict and 'Hostname' in json_dict['Config']:
container_name = json_dict['Config']['Hostname']
else:
container_name = 'Unknown container name'
event_data = DockerJSONContainerEventData()
event_data.container_id = container_id_from_path
event_data.container_name = container_name
if 'State' in json_dict:
if 'StartedAt' in json_dict['State']:
event_data.action = 'Container Started'
timestamp = timelib.Timestamp.FromTimeString(
json_dict['State']['StartedAt'])
event = time_events.TimestampEvent(
timestamp, definitions.TIME_DESCRIPTION_START)
parser_mediator.ProduceEventWithEventData(event, event_data)
if 'FinishedAt' in json_dict['State']:
if json_dict['State']['FinishedAt'] != '0001-01-01T00:00:00Z':
event_data.action = 'Container Finished'
# If the timestamp is 0001-01-01T00:00:00Z, the container
# is still running, so we don't generate a Finished event
timestamp = timelib.Timestamp.FromTimeString(
json_dict['State']['FinishedAt'])
event = time_events.TimestampEvent(
timestamp, definitions.TIME_DESCRIPTION_END)
parser_mediator.ProduceEventWithEventData(event, event_data)
created_time = json_dict.get('Created', None)
if created_time:
event_data.action = 'Container Created'
timestamp = timelib.Timestamp.FromTimeString(created_time)
event = time_events.TimestampEvent(
timestamp, definitions.TIME_DESCRIPTION_ADDED)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
python
|
{
"resource": ""
}
|
q25782
|
DockerJSONParser._ParseContainerLogJSON
|
train
|
def _ParseContainerLogJSON(self, parser_mediator, file_object):
"""Extract events from a Docker container log files.
The format is one JSON formatted log message per line.
The path of each container log file (which logs the container stdout and
stderr) is:
DOCKER_DIR/containers/<container_id>/<container_id>-json.log
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): a file-like object.
"""
container_id = self._GetIdentifierFromPath(parser_mediator)
text_file_object = text_file.TextFile(file_object)
for log_line in text_file_object:
json_log_line = json.loads(log_line)
time = json_log_line.get('time', None)
if not time:
continue
event_data = DockerJSONContainerLogEventData()
event_data.container_id = container_id
event_data.log_line = json_log_line.get('log', None)
event_data.log_source = json_log_line.get('stream', None)
# TODO: pass line number to offset or remove.
event_data.offset = 0
timestamp = timelib.Timestamp.FromTimeString(time)
event = time_events.TimestampEvent(
timestamp, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
python
|
{
"resource": ""
}
|
q25783
|
DockerJSONParser.ParseFileObject
|
train
|
def ParseFileObject(self, parser_mediator, file_object):
"""Parses various Docker configuration and log files in JSON format.
This methods checks whether the file_object points to a docker JSON config
or log file, and calls the corresponding _Parse* function to generate
Events.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): a file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
ValueError: if the JSON file cannot be decoded.
"""
# Trivial JSON format check: first character must be an open brace.
if file_object.read(1) != b'{':
raise errors.UnableToParseFile(
'is not a valid JSON file, missing opening brace.')
file_object.seek(0, os.SEEK_SET)
file_entry = parser_mediator.GetFileEntry()
file_system = file_entry.GetFileSystem()
json_file_path = parser_mediator.GetDisplayName()
split_path = file_system.SplitPath(json_file_path)
try:
if 'containers' in split_path:
if 'config.json' in split_path:
self._ParseContainerConfigJSON(parser_mediator, file_object)
if json_file_path.endswith('-json.log'):
self._ParseContainerLogJSON(parser_mediator, file_object)
elif 'graph' in split_path:
if 'json' in split_path:
self._ParseLayerConfigJSON(parser_mediator, file_object)
except ValueError as exception:
if exception == 'No JSON object could be decoded':
raise errors.UnableToParseFile(exception)
else:
raise
|
python
|
{
"resource": ""
}
|
q25784
|
ESEDBPlugin._ConvertValueBinaryDataToUBInt64
|
train
|
def _ConvertValueBinaryDataToUBInt64(self, value):
"""Converts a binary data value into an integer.
Args:
value (bytes): binary data value containing an unsigned 64-bit big-endian
integer.
Returns:
int: integer representation of binary data value or None if value is
not set.
Raises:
ParseError: if the integer value cannot be parsed.
"""
if not value:
return None
integer_map = self._GetDataTypeMap('uint64be')
try:
return self._ReadStructureFromByteStream(value, 0, integer_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError(
'Unable to parse integer value with error: {0!s}'.format(
exception))
|
python
|
{
"resource": ""
}
|
q25785
|
ESEDBPlugin._GetRecordValue
|
train
|
def _GetRecordValue(self, record, value_entry):
"""Retrieves a specific value from the record.
Args:
record (pyesedb.record): ESE record.
value_entry (int): value entry.
Returns:
object: value.
Raises:
ValueError: if the value is not supported.
"""
column_type = record.get_column_type(value_entry)
long_value = None
if record.is_long_value(value_entry):
long_value = record.get_value_data_as_long_value(value_entry)
if record.is_multi_value(value_entry):
# TODO: implement
raise ValueError('Multi value support not implemented yet.')
if column_type == pyesedb.column_types.NULL:
return None
if column_type == pyesedb.column_types.BOOLEAN:
# TODO: implement
raise ValueError('Boolean value support not implemented yet.')
if column_type in self.INTEGER_COLUMN_TYPES:
if long_value:
raise ValueError('Long integer value not supported.')
return record.get_value_data_as_integer(value_entry)
if column_type in self.FLOATING_POINT_COLUMN_TYPES:
if long_value:
raise ValueError('Long floating point value not supported.')
return record.get_value_data_as_floating_point(value_entry)
if column_type in self.STRING_COLUMN_TYPES:
if long_value:
return long_value.get_data_as_string()
return record.get_value_data_as_string(value_entry)
if column_type == pyesedb.column_types.GUID:
# TODO: implement
raise ValueError('GUID value support not implemented yet.')
if long_value:
return long_value.get_data()
return record.get_value_data(value_entry)
|
python
|
{
"resource": ""
}
|
q25786
|
ESEDBPlugin._GetRecordValues
|
train
|
def _GetRecordValues(
self, parser_mediator, table_name, record, value_mappings=None):
"""Retrieves the values from the record.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
table_name (str): name of the table.
record (pyesedb.record): ESE record.
value_mappings (Optional[dict[str, str]): value mappings, which map
the column name to a callback method.
Returns:
dict[str,object]: values per column name.
"""
record_values = {}
for value_entry in range(0, record.number_of_values):
if parser_mediator.abort:
break
column_name = record.get_column_name(value_entry)
if column_name in record_values:
logger.warning(
'[{0:s}] duplicate column: {1:s} in table: {2:s}'.format(
self.NAME, column_name, table_name))
continue
value_callback = None
if value_mappings and column_name in value_mappings:
value_callback_method = value_mappings.get(column_name)
if value_callback_method:
value_callback = getattr(self, value_callback_method, None)
if value_callback is None:
logger.warning((
'[{0:s}] missing value callback method: {1:s} for column: '
'{2:s} in table: {3:s}').format(
self.NAME, value_callback_method, column_name, table_name))
if value_callback:
try:
value_data = record.get_value_data(value_entry)
value = value_callback(value_data)
except Exception as exception: # pylint: disable=broad-except
logger.error(exception)
value = None
parser_mediator.ProduceExtractionWarning((
'unable to parse value: {0:s} with callback: {1:s} with error: '
'{2!s}').format(column_name, value_callback_method, exception))
else:
try:
value = self._GetRecordValue(record, value_entry)
except ValueError as exception:
value = None
parser_mediator.ProduceExtractionWarning(
'unable to parse value: {0:s} with error: {1!s}'.format(
column_name, exception))
record_values[column_name] = value
return record_values
|
python
|
{
"resource": ""
}
|
q25787
|
ESEDBPlugin.GetEntries
|
train
|
def GetEntries(self, parser_mediator, cache=None, database=None, **kwargs):
"""Extracts event objects from the database.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
cache (Optional[ESEDBCache]): cache.
database (Optional[pyesedb.file]): ESE database.
Raises:
ValueError: If the database attribute is not valid.
"""
if database is None:
raise ValueError('Invalid database.')
for table_name, callback_method in iter(self._tables.items()):
if parser_mediator.abort:
break
if not callback_method:
# Table names without a callback method are allowed to improve
# the detection of a database based on its table names.
continue
callback = getattr(self, callback_method, None)
if callback is None:
logger.warning(
'[{0:s}] missing callback method: {1:s} for table: {2:s}'.format(
self.NAME, callback_method, table_name))
continue
esedb_table = database.get_table_by_name(table_name)
if not esedb_table:
logger.warning('[{0:s}] missing table: {1:s}'.format(
self.NAME, table_name))
continue
# The database is passed in case the database contains table names
# that are assigned dynamically and cannot be defined by
# the table name-callback mechanism.
callback(
parser_mediator, cache=cache, database=database, table=esedb_table,
**kwargs)
|
python
|
{
"resource": ""
}
|
q25788
|
ESEDBPlugin.Process
|
train
|
def Process(self, parser_mediator, cache=None, database=None, **kwargs):
"""Determines if this is the appropriate plugin for the database.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
cache (Optional[ESEDBCache]): cache.
database (Optional[pyesedb.file]): ESE database.
Raises:
ValueError: If the database attribute is not valid.
"""
if database is None:
raise ValueError('Invalid database.')
# This will raise if unhandled keyword arguments are passed.
super(ESEDBPlugin, self).Process(parser_mediator)
self.GetEntries(
parser_mediator, cache=cache, database=database, **kwargs)
|
python
|
{
"resource": ""
}
|
q25789
|
DefaultPlugin.GetEntries
|
train
|
def GetEntries(self, parser_mediator, top_level=None, **unused_kwargs):
"""Simple method to exact date values from a Plist.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
top_level (dict[str, object]): plist top-level key.
"""
for root, key, datetime_value in interface.RecurseKey(top_level):
if not isinstance(datetime_value, datetime.datetime):
continue
event_data = plist_event.PlistTimeEventData()
event_data.key = key
event_data.root = root
event = time_events.PythonDatetimeEvent(
datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
python
|
{
"resource": ""
}
|
q25790
|
DefaultPlugin.Process
|
train
|
def Process(self, parser_mediator, plist_name, top_level, **kwargs):
"""Overwrite the default Process function so it always triggers.
Process() checks if the current plist being processed is a match for a
plugin by comparing the PATH and KEY requirements defined by a plugin. If
both match processing continues; else raise WrongPlistPlugin.
The purpose of the default plugin is to always trigger on any given plist
file, thus it needs to overwrite the default behavior of comparing PATH
and KEY.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
plist_name (str): name of the plist.
top_level (dict[str, object]): plist top-level key.
"""
logger.debug('Plist {0:s} plugin used for: {1:s}'.format(
self.NAME, plist_name))
self.GetEntries(parser_mediator, top_level=top_level, **kwargs)
|
python
|
{
"resource": ""
}
|
q25791
|
BashHistoryParser.ParseRecord
|
train
|
def ParseRecord(self, parser_mediator, key, structure):
"""Parses a record and produces a Bash history event.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
key (str): name of the parsed structure.
structure (pyparsing.ParseResults): elements parsed from the file.
Raises:
ParseError: when the structure type is unknown.
"""
if key != 'log_entry':
raise errors.ParseError(
'Unable to parse record, unknown structure: {0:s}'.format(key))
event_data = BashHistoryEventData()
event_data.command = structure.command
date_time = dfdatetime_posix_time.PosixTime(timestamp=structure.timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_MODIFICATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
python
|
{
"resource": ""
}
|
q25792
|
BashHistoryParser.VerifyStructure
|
train
|
def VerifyStructure(self, parser_mediator, lines):
"""Verifies that this is a bash history file.
Args:
parser_mediator (ParserMediator): mediates interactions between
parsers and other components, such as storage and dfvfs.
lines (str): one or more lines from the text file.
Returns:
bool: True if this is the correct parser, False otherwise.
"""
match_generator = self._VERIFICATION_GRAMMAR.scanString(lines, maxMatches=1)
return bool(list(match_generator))
|
python
|
{
"resource": ""
}
|
q25793
|
NetworksWindowsRegistryPlugin._GetNetworkInfo
|
train
|
def _GetNetworkInfo(self, signatures_key):
"""Retrieves the network info within the signatures subkey.
Args:
signatures_key (dfwinreg.WinRegistryKey): a Windows Registry key.
Returns:
dict[str, tuple]: a tuple of default_gateway_mac and dns_suffix per
profile identifier (GUID).
"""
network_info = {}
for category in signatures_key.GetSubkeys():
for signature in category.GetSubkeys():
profile_guid_value = signature.GetValueByName('ProfileGuid')
if profile_guid_value:
profile_guid = profile_guid_value.GetDataAsObject()
else:
continue
default_gateway_mac_value = signature.GetValueByName(
'DefaultGatewayMac')
if default_gateway_mac_value:
default_gateway_mac = ':'.join([
'{0:02x}'.format(octet)
for octet in bytearray(default_gateway_mac_value.data)])
else:
default_gateway_mac = None
dns_suffix_value = signature.GetValueByName('DnsSuffix')
if dns_suffix_value:
dns_suffix = dns_suffix_value.GetDataAsObject()
else:
dns_suffix = None
network_info[profile_guid] = (default_gateway_mac, dns_suffix)
return network_info
|
python
|
{
"resource": ""
}
|
q25794
|
NetworksWindowsRegistryPlugin._ParseSystemTime
|
train
|
def _ParseSystemTime(self, byte_stream):
"""Parses a SYSTEMTIME date and time value from a byte stream.
Args:
byte_stream (bytes): byte stream.
Returns:
dfdatetime.Systemtime: SYSTEMTIME date and time value or None if no
value is set.
Raises:
ParseError: if the SYSTEMTIME could not be parsed.
"""
systemtime_map = self._GetDataTypeMap('systemtime')
try:
systemtime = self._ReadStructureFromByteStream(
byte_stream, 0, systemtime_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError(
'Unable to parse SYSTEMTIME value with error: {0!s}'.format(
exception))
system_time_tuple = (
systemtime.year, systemtime.month, systemtime.weekday,
systemtime.day_of_month, systemtime.hours, systemtime.minutes,
systemtime.seconds, systemtime.milliseconds)
if system_time_tuple == self._EMPTY_SYSTEM_TIME_TUPLE:
return None
try:
return dfdatetime_systemtime.Systemtime(
system_time_tuple=system_time_tuple)
except ValueError:
raise errors.ParseError(
'Invalid SYSTEMTIME value: {0!s}'.format(system_time_tuple))
|
python
|
{
"resource": ""
}
|
q25795
|
Task.CreateRetryTask
|
train
|
def CreateRetryTask(self):
"""Creates a new task to retry a previously abandoned task.
The retry task will have a new identifier but most of the attributes
will be a copy of the previously abandoned task.
Returns:
Task: a task to retry a previously abandoned task.
"""
retry_task = Task(session_identifier=self.session_identifier)
retry_task.file_entry_type = self.file_entry_type
retry_task.merge_priority = self.merge_priority
retry_task.path_spec = self.path_spec
retry_task.storage_file_size = self.storage_file_size
self.has_retry = True
return retry_task
|
python
|
{
"resource": ""
}
|
q25796
|
Task.CreateTaskCompletion
|
train
|
def CreateTaskCompletion(self):
"""Creates a task completion.
Returns:
TaskCompletion: task completion attribute container.
"""
self.completion_time = int(
time.time() * definitions.MICROSECONDS_PER_SECOND)
task_completion = TaskCompletion()
task_completion.aborted = self.aborted
task_completion.identifier = self.identifier
task_completion.session_identifier = self.session_identifier
task_completion.timestamp = self.completion_time
return task_completion
|
python
|
{
"resource": ""
}
|
q25797
|
Task.CreateTaskStart
|
train
|
def CreateTaskStart(self):
"""Creates a task start.
Returns:
TaskStart: task start attribute container.
"""
task_start = TaskStart()
task_start.identifier = self.identifier
task_start.session_identifier = self.session_identifier
task_start.timestamp = self.start_time
return task_start
|
python
|
{
"resource": ""
}
|
q25798
|
ServicesPlugin.GetServiceDll
|
train
|
def GetServiceDll(self, key):
"""Get the Service DLL for a service, if it exists.
Checks for a ServiceDLL for in the Parameters subkey of a service key in
the Registry.
Args:
key (dfwinreg.WinRegistryKey): a Windows Registry key.
Returns:
str: path of the service DLL or None.
"""
parameters_key = key.GetSubkeyByName('Parameters')
if not parameters_key:
return None
service_dll = parameters_key.GetValueByName('ServiceDll')
if not service_dll:
return None
return service_dll.GetDataAsObject()
|
python
|
{
"resource": ""
}
|
q25799
|
FormatSpecification.AddNewSignature
|
train
|
def AddNewSignature(self, pattern, offset=None):
"""Adds a signature.
Args:
pattern (bytes): pattern of the signature.
offset (int): offset of the signature. None is used to indicate
the signature has no offset. A positive offset is relative from
the start of the data a negative offset is relative from the end
of the data.
"""
self.signatures.append(Signature(pattern, offset=offset))
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.