_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q25500
|
HashersOptions.ListHashers
|
train
|
def ListHashers(self):
"""Lists information about the available hashers."""
hashers_information = hashers_manager.HashersManager.GetHashersInformation()
table_view = views.ViewsFactory.GetTableView(
self._views_format_type, column_names=['Name', 'Description'],
title='Hashers')
for name, description in sorted(hashers_information):
table_view.AddRow([name, description])
table_view.Write(self._output_writer)
|
python
|
{
"resource": ""
}
|
q25501
|
OutputModuleOptions._CreateOutputModule
|
train
|
def _CreateOutputModule(self, options):
"""Creates the output module.
Args:
options (argparse.Namespace): command line arguments.
Returns:
OutputModule: output module.
Raises:
RuntimeError: if the output module cannot be created.
"""
formatter_mediator = formatters_mediator.FormatterMediator(
data_location=self._data_location)
try:
formatter_mediator.SetPreferredLanguageIdentifier(
self._preferred_language)
except (KeyError, TypeError) as exception:
raise RuntimeError(exception)
mediator = output_mediator.OutputMediator(
self._knowledge_base, formatter_mediator,
preferred_encoding=self.preferred_encoding)
mediator.SetTimezone(self._preferred_time_zone)
try:
output_module = output_manager.OutputManager.NewOutputModule(
self._output_format, mediator)
except (KeyError, ValueError) as exception:
raise RuntimeError(
'Unable to create output module with error: {0!s}'.format(
exception))
if output_manager.OutputManager.IsLinearOutputModule(self._output_format):
output_file_object = open(self._output_filename, 'wb')
output_writer = tools.FileObjectOutputWriter(output_file_object)
output_module.SetOutputWriter(output_writer)
helpers_manager.ArgumentHelperManager.ParseOptions(options, output_module)
# Check if there are parameters that have not been defined and need to
# in order for the output module to continue. Prompt user to supply
# those that may be missing.
missing_parameters = output_module.GetMissingArguments()
while missing_parameters:
for parameter in missing_parameters:
value = self._PromptUserForInput(
'Missing parameter {0:s} for output module'.format(parameter))
if value is None:
logger.warning(
'Unable to set the missing parameter for: {0:s}'.format(
parameter))
continue
setattr(options, parameter, value)
helpers_manager.ArgumentHelperManager.ParseOptions(
options, output_module)
missing_parameters = output_module.GetMissingArguments()
return output_module
|
python
|
{
"resource": ""
}
|
q25502
|
OutputModuleOptions.ListLanguageIdentifiers
|
train
|
def ListLanguageIdentifiers(self):
"""Lists the language identifiers."""
table_view = views.ViewsFactory.GetTableView(
self._views_format_type, column_names=['Identifier', 'Language'],
title='Language identifiers')
for language_id, value_list in sorted(
language_ids.LANGUAGE_IDENTIFIERS.items()):
table_view.AddRow([language_id, value_list[1]])
table_view.Write(self._output_writer)
|
python
|
{
"resource": ""
}
|
q25503
|
OutputModuleOptions._GetOutputModulesInformation
|
train
|
def _GetOutputModulesInformation(self):
"""Retrieves the output modules information.
Returns:
list[tuple[str, str]]: pairs of output module names and descriptions.
"""
output_modules_information = []
for name, output_class in output_manager.OutputManager.GetOutputClasses():
output_modules_information.append((name, output_class.DESCRIPTION))
return output_modules_information
|
python
|
{
"resource": ""
}
|
q25504
|
OutputModuleOptions.ListOutputModules
|
train
|
def ListOutputModules(self):
"""Lists the output modules."""
table_view = views.ViewsFactory.GetTableView(
self._views_format_type, column_names=['Name', 'Description'],
title='Output Modules')
for name, output_class in output_manager.OutputManager.GetOutputClasses():
table_view.AddRow([name, output_class.DESCRIPTION])
table_view.Write(self._output_writer)
disabled_classes = list(
output_manager.OutputManager.GetDisabledOutputClasses())
if not disabled_classes:
return
table_view = views.ViewsFactory.GetTableView(
self._views_format_type, column_names=['Name', 'Description'],
title='Disabled Output Modules')
for name, output_class in disabled_classes:
table_view.AddRow([name, output_class.DESCRIPTION])
table_view.Write(self._output_writer)
|
python
|
{
"resource": ""
}
|
q25505
|
ProfilingOptions.ListProfilers
|
train
|
def ListProfilers(self):
"""Lists information about the available profilers."""
table_view = views.ViewsFactory.GetTableView(
self._views_format_type, column_names=['Name', 'Description'],
title='Profilers')
profilers_information = sorted(
profiling.ProfilingArgumentsHelper.PROFILERS_INFORMATION.items())
for name, description in profilers_information:
table_view.AddRow([name, description])
table_view.Write(self._output_writer)
|
python
|
{
"resource": ""
}
|
q25506
|
AppleAccountPlugin.Process
|
train
|
def Process(self, parser_mediator, plist_name, top_level, **kwargs):
"""Check if it is a valid Apple account plist file name.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
plist_name (str): name of the plist.
top_level (dict[str, object]): plist top-level key.
"""
if not plist_name.startswith(self.PLIST_PATH):
raise errors.WrongPlistPlugin(self.NAME, plist_name)
super(AppleAccountPlugin, self).Process(
parser_mediator, plist_name=self.PLIST_PATH, top_level=top_level)
|
python
|
{
"resource": ""
}
|
q25507
|
AppleAccountPlugin.GetEntries
|
train
|
def GetEntries(self, parser_mediator, match=None, **unused_kwargs):
"""Extracts relevant Apple Account entries.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.
"""
accounts = match.get('Accounts', {})
for name_account, account in iter(accounts.items()):
first_name = account.get('FirstName', '<FirstName>')
last_name = account.get('LastName', '<LastName>')
general_description = '{0:s} ({1:s} {2:s})'.format(
name_account, first_name, last_name)
event_data = plist_event.PlistTimeEventData()
event_data.key = name_account
event_data.root = '/Accounts'
datetime_value = account.get('CreationDate', None)
if datetime_value:
event_data.desc = 'Configured Apple account {0:s}'.format(
general_description)
event = time_events.PythonDatetimeEvent(
datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
datetime_value = account.get('LastSuccessfulConnect', None)
if datetime_value:
event_data.desc = 'Connected Apple account {0:s}'.format(
general_description)
event = time_events.PythonDatetimeEvent(
datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
datetime_value = account.get('ValidationDate', None)
if datetime_value:
event_data.desc = 'Last validation Apple account {0:s}'.format(
general_description)
event = time_events.PythonDatetimeEvent(
datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
python
|
{
"resource": ""
}
|
q25508
|
TerminalServerClientPlugin.ExtractEvents
|
train
|
def ExtractEvents(self, parser_mediator, registry_key, **kwargs):
"""Extracts events from a Terminal Server Client Windows Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
"""
mru_values_dict = {}
for subkey in registry_key.GetSubkeys():
username_value = subkey.GetValueByName('UsernameHint')
if (username_value and username_value.data and
username_value.DataIsString()):
username = username_value.GetDataAsObject()
else:
username = 'N/A'
mru_values_dict[subkey.name] = username
event_data = windows_events.WindowsRegistryEventData()
event_data.key_path = subkey.path
event_data.offset = subkey.offset
event_data.regvalue = {'Username hint': username}
event_data.source_append = self._SOURCE_APPEND
event = time_events.DateTimeValuesEvent(
subkey.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
event_data = windows_events.WindowsRegistryEventData()
event_data.key_path = registry_key.path
event_data.offset = registry_key.offset
event_data.regvalue = mru_values_dict
event_data.source_append = self._SOURCE_APPEND
event = time_events.DateTimeValuesEvent(
registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
python
|
{
"resource": ""
}
|
q25509
|
BencodeParser.ParseFileObject
|
train
|
def ParseFileObject(self, parser_mediator, file_object):
"""Parses a bencoded file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): a file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
file_object.seek(0, os.SEEK_SET)
header = file_object.read(2)
if not self.BENCODE_RE.match(header):
raise errors.UnableToParseFile('Not a valid Bencoded file.')
file_object.seek(0, os.SEEK_SET)
try:
data_object = bencode.bdecode(file_object.read())
except (IOError, bencode.BTFailure) as exception:
raise errors.UnableToParseFile(
'[{0:s}] unable to parse file: {1:s} with error: {2!s}'.format(
self.NAME, parser_mediator.GetDisplayName(), exception))
if not data_object:
raise errors.UnableToParseFile(
'[{0:s}] missing decoded data for file: {1:s}'.format(
self.NAME, parser_mediator.GetDisplayName()))
for plugin in self._plugins:
try:
plugin.UpdateChainAndProcess(parser_mediator, data=data_object)
except errors.WrongBencodePlugin as exception:
logger.debug('[{0:s}] wrong plugin: {1!s}'.format(
self.NAME, exception))
|
python
|
{
"resource": ""
}
|
q25510
|
SkyDriveLogParser._ParseHeader
|
train
|
def _ParseHeader(self, parser_mediator, structure):
"""Parse header lines and store appropriate attributes.
['Logging started.', 'Version=', '17.0.2011.0627',
[2013, 7, 25], 16, 3, 23, 291, 'StartLocalTime', '<details>']
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
"""
try:
date_time = dfdatetime_time_elements.TimeElementsInMilliseconds(
time_elements_tuple=structure.header_date_time)
except ValueError:
parser_mediator.ProduceExtractionWarning(
'invalid date time value: {0!s}'.format(structure.header_date_time))
return
event_data = SkyDriveLogEventData()
# TODO: refactor detail to individual event data attributes.
event_data.detail = '{0:s} {1:s} {2:s} {3:s} {4:s}'.format(
structure.log_start, structure.version_string,
structure.version_number, structure.local_time_string,
structure.details)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_ADDED)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
python
|
{
"resource": ""
}
|
q25511
|
SkyDriveLogParser._ParseLine
|
train
|
def _ParseLine(self, parser_mediator, structure):
"""Parses a logline and store appropriate attributes.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
"""
# TODO: Verify if date and time value is locale dependent.
month, day_of_month, year, hours, minutes, seconds, milliseconds = (
structure.date_time)
year += 2000
time_elements_tuple = (
year, month, day_of_month, hours, minutes, seconds, milliseconds)
try:
date_time = dfdatetime_time_elements.TimeElementsInMilliseconds(
time_elements_tuple=time_elements_tuple)
except ValueError:
parser_mediator.ProduceExtractionWarning(
'invalid date time value: {0!s}'.format(structure.date_time))
return
event_data = SkyDriveLogEventData()
# Replace newlines with spaces in structure.detail to preserve output.
# TODO: refactor detail to individual event data attributes.
event_data.detail = structure.detail.replace('\n', ' ')
event_data.log_level = structure.log_level
event_data.module = structure.module
event_data.source_code = structure.source_code
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_ADDED)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
python
|
{
"resource": ""
}
|
q25512
|
SkyDriveLogParser.VerifyStructure
|
train
|
def VerifyStructure(self, parser_mediator, lines):
"""Verify that this file is a SkyDrive log file.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
lines (str): one or more lines from the text file.
Returns:
bool: True if this is the correct parser, False otherwise.
"""
try:
structure = self._SDF_HEADER.parseString(lines)
except pyparsing.ParseException:
logger.debug('Not a SkyDrive log file')
return False
try:
dfdatetime_time_elements.TimeElementsInMilliseconds(
time_elements_tuple=structure.header_date_time)
except ValueError:
logger.debug(
'Not a SkyDrive log file, invalid date and time: {0!s}'.format(
structure.header_date_time))
return False
return True
|
python
|
{
"resource": ""
}
|
q25513
|
SkyDriveOldLogParser._ParseLogline
|
train
|
def _ParseLogline(self, parser_mediator, structure):
"""Parse a logline and store appropriate attributes.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
"""
# TODO: Verify if date and time value is locale dependent.
month, day_of_month, year, hours, minutes, seconds, milliseconds = (
structure.date_time)
time_elements_tuple = (
year, month, day_of_month, hours, minutes, seconds, milliseconds)
try:
date_time = dfdatetime_time_elements.TimeElementsInMilliseconds(
time_elements_tuple=time_elements_tuple)
except ValueError:
parser_mediator.ProduceExtractionWarning(
'invalid date time value: {0!s}'.format(structure.date_time))
return
event_data = SkyDriveOldLogEventData()
event_data.log_level = structure.log_level
event_data.offset = self.offset
event_data.source_code = structure.source_code
event_data.text = structure.text
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_ADDED)
parser_mediator.ProduceEventWithEventData(event, event_data)
self._last_date_time = date_time
self._last_event_data = event_data
|
python
|
{
"resource": ""
}
|
q25514
|
SkyDriveOldLogParser._ParseNoHeaderSingleLine
|
train
|
def _ParseNoHeaderSingleLine(self, parser_mediator, structure):
"""Parse an isolated header line and store appropriate attributes.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
"""
if not self._last_event_data:
logger.debug('SkyDrive, found isolated line with no previous events')
return
event_data = SkyDriveOldLogEventData()
event_data.offset = self._last_event_data.offset
event_data.text = structure.text
event = time_events.DateTimeValuesEvent(
self._last_date_time, definitions.TIME_DESCRIPTION_ADDED)
parser_mediator.ProduceEventWithEventData(event, event_data)
# TODO think to a possible refactoring for the non-header lines.
self._last_date_time = None
self._last_event_data = None
|
python
|
{
"resource": ""
}
|
q25515
|
SkyDriveOldLogParser.VerifyStructure
|
train
|
def VerifyStructure(self, parser_mediator, line):
"""Verify that this file is a SkyDrive old log file.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
line (str): line from a text file.
Returns:
bool: True if the line is in the expected format, False if not.
"""
try:
structure = self._LINE.parseString(line)
except pyparsing.ParseException:
logger.debug('Not a SkyDrive old log file')
return False
day_of_month, month, year, hours, minutes, seconds, milliseconds = (
structure.date_time)
time_elements_tuple = (
year, month, day_of_month, hours, minutes, seconds, milliseconds)
try:
dfdatetime_time_elements.TimeElementsInMilliseconds(
time_elements_tuple=time_elements_tuple)
except ValueError:
logger.debug(
'Not a SkyDrive old log file, invalid date and time: {0!s}'.format(
structure.date_time))
return False
return True
|
python
|
{
"resource": ""
}
|
q25516
|
KodiMyVideosPlugin.ParseVideoRow
|
train
|
def ParseVideoRow(self, parser_mediator, query, row, **unused_kwargs):
"""Parses a Video row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
"""
query_hash = hash(query)
event_data = KodiVideoEventData()
event_data.filename = self._GetRowValue(query_hash, row, 'strFilename')
event_data.play_count = self._GetRowValue(query_hash, row, 'playCount')
event_data.query = query
timestamp = self._GetRowValue(query_hash, row, 'lastPlayed')
date_time = dfdatetime_time_elements.TimeElements()
date_time.CopyFromDateTimeString(timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_VISITED)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
python
|
{
"resource": ""
}
|
q25517
|
Session.CopyAttributesFromSessionCompletion
|
train
|
def CopyAttributesFromSessionCompletion(self, session_completion):
"""Copies attributes from a session completion.
Args:
session_completion (SessionCompletion): session completion attribute
container.
Raises:
ValueError: if the identifier of the session completion does not match
that of the session.
"""
if self.identifier != session_completion.identifier:
raise ValueError('Session identifier mismatch.')
self.aborted = session_completion.aborted
if session_completion.analysis_reports_counter:
self.analysis_reports_counter = (
session_completion.analysis_reports_counter)
self.completion_time = session_completion.timestamp
if session_completion.event_labels_counter:
self.event_labels_counter = session_completion.event_labels_counter
if session_completion.parsers_counter:
self.parsers_counter = session_completion.parsers_counter
|
python
|
{
"resource": ""
}
|
q25518
|
Session.CreateSessionCompletion
|
train
|
def CreateSessionCompletion(self):
"""Creates a session completion.
Returns:
SessionCompletion: session completion attribute container.
"""
self.completion_time = int(time.time() * 1000000)
session_completion = SessionCompletion()
session_completion.aborted = self.aborted
session_completion.analysis_reports_counter = self.analysis_reports_counter
session_completion.event_labels_counter = self.event_labels_counter
session_completion.identifier = self.identifier
session_completion.parsers_counter = self.parsers_counter
session_completion.timestamp = self.completion_time
return session_completion
|
python
|
{
"resource": ""
}
|
q25519
|
Session.CreateSessionStart
|
train
|
def CreateSessionStart(self):
"""Creates a session start.
Returns:
SessionStart: session start attribute container.
"""
session_start = SessionStart()
session_start.artifact_filters = self.artifact_filters
session_start.command_line_arguments = self.command_line_arguments
session_start.debug_mode = self.debug_mode
session_start.enabled_parser_names = self.enabled_parser_names
session_start.filter_file = self.filter_file
session_start.identifier = self.identifier
session_start.parser_filter_expression = self.parser_filter_expression
session_start.preferred_encoding = self.preferred_encoding
session_start.preferred_time_zone = self.preferred_time_zone
session_start.product_name = self.product_name
session_start.product_version = self.product_version
session_start.timestamp = self.start_time
return session_start
|
python
|
{
"resource": ""
}
|
q25520
|
ArgumentsHelper._ParseNumericOption
|
train
|
def _ParseNumericOption(cls, options, argument_name, default_value=None):
"""Parses a numeric command line argument.
Args:
options (argparse.Namespace): parser options.
argument_name (str): name of the command line argument.
default_value (Optional[int]): default value of the command line argument.
Returns:
int: command line argument value or the default value if the command line
argument is not set
Raises:
BadConfigOption: if the command line argument value cannot be converted
to a Unicode string.
"""
argument_value = getattr(options, argument_name, None)
if argument_value is None:
return default_value
if not isinstance(argument_value, py2to3.INTEGER_TYPES):
raise errors.BadConfigOption(
'Unsupported option: {0:s} integer type required.'.format(
argument_name))
return argument_value
|
python
|
{
"resource": ""
}
|
q25521
|
FseventsdParser._ParseDLSPageHeader
|
train
|
def _ParseDLSPageHeader(self, file_object, page_offset):
"""Parses a DLS page header from a file-like object.
Args:
file_object (file): file-like object to read the header from.
page_offset (int): offset of the start of the page header, relative
to the start of the file.
Returns:
tuple: containing:
dls_page_header: parsed record structure.
int: header size.
Raises:
ParseError: when the header cannot be parsed.
"""
page_header_map = self._GetDataTypeMap('dls_page_header')
try:
page_header, page_size = self._ReadStructureFromFileObject(
file_object, page_offset, page_header_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError(
'Unable to parse page header at offset: 0x{0:08x} '
'with error: {1!s}'.format(page_offset, exception))
if page_header.signature not in self._DLS_SIGNATURES:
raise errors.UnableToParseFile(
'Unsupported page header signature at offset: 0x{0:08x}'.format(
page_offset))
return page_header, page_size
|
python
|
{
"resource": ""
}
|
q25522
|
FseventsdParser._BuildEventData
|
train
|
def _BuildEventData(self, record):
"""Builds an FseventsdData object from a parsed structure.
Args:
record (dls_record_v1|dls_record_v2): parsed record structure.
Returns:
FseventsdEventData: event data attribute container.
"""
event_data = FseventsdEventData()
event_data.path = record.path
event_data.flags = record.event_flags
event_data.event_identifier = record.event_identifier
# Node identifier is only set in DLS V2 records.
event_data.node_identifier = getattr(record, 'node_identifier', None)
return event_data
|
python
|
{
"resource": ""
}
|
q25523
|
FseventsdParser._GetParentModificationTime
|
train
|
def _GetParentModificationTime(self, gzip_file_entry):
"""Retrieves the modification time of the file entry's parent file.
Note that this retrieves the time from the file entry of the parent of the
gzip file entry's path spec, which is different from trying to retrieve it
from the gzip file entry's parent file entry.
It would be preferable to retrieve the modification time from the metadata
in the gzip file itself, but it appears to not be set when the file is
written by fseventsd.
Args:
gzip_file_entry (dfvfs.FileEntry): file entry of the gzip file containing
the fseventsd data.
Returns:
dfdatetime.DateTimeValues: parent modification time, or None if not
available.
"""
parent_file_entry = path_spec_resolver.Resolver.OpenFileEntry(
gzip_file_entry.path_spec.parent)
if not parent_file_entry:
return None
return parent_file_entry.modification_time
|
python
|
{
"resource": ""
}
|
q25524
|
FseventsdParser.ParseFileObject
|
train
|
def ParseFileObject(self, parser_mediator, file_object):
"""Parses an fseventsd file.
Args:
parser_mediator (ParserMediator): parser mediator.
file_object (dfvfs.FileIO): a file-like object.
Raises:
UnableToParseFile: when the header cannot be parsed.
"""
page_header_map = self._GetDataTypeMap('dls_page_header')
try:
page_header, file_offset = self._ReadStructureFromFileObject(
file_object, 0, page_header_map)
except (ValueError, errors.ParseError) as exception:
raise errors.UnableToParseFile(
'Unable to parse page header with error: {0!s}'.format(
exception))
if page_header.signature not in self._DLS_SIGNATURES:
raise errors.UnableToParseFile('Invalid file signature')
current_page_end = page_header.page_size
file_entry = parser_mediator.GetFileEntry()
date_time = self._GetParentModificationTime(file_entry)
# TODO: Change this to use a more representative time definition (time span)
# when https://github.com/log2timeline/dfdatetime/issues/65 is resolved.
if date_time:
timestamp_description = definitions.TIME_DESCRIPTION_RECORDED
else:
date_time = dfdatetime_semantic_time.SemanticTime('Not set')
timestamp_description = definitions.TIME_DESCRIPTION_NOT_A_TIME
event = time_events.DateTimeValuesEvent(date_time, timestamp_description)
file_size = file_object.get_size()
while file_offset < file_size:
if file_offset >= current_page_end:
try:
page_header, header_size = self._ParseDLSPageHeader(
file_object, file_offset)
except errors.ParseError as exception:
parser_mediator.ProduceExtractionWarning(
'Unable to parse page header with error: {0!s}'.format(
exception))
break
current_page_end += page_header.page_size
file_offset += header_size
continue
if page_header.signature == self._DLS_V1_SIGNATURE:
record_map = self._GetDataTypeMap('dls_record_v1')
else:
record_map = self._GetDataTypeMap('dls_record_v2')
try:
record, record_length = self._ReadStructureFromFileObject(
file_object, file_offset, record_map)
file_offset += record_length
except (ValueError, errors.ParseError) as exception:
parser_mediator.ProduceExtractionWarning(
'Unable to parse page record with error: {0!s}'.format(
exception))
break
event_data = self._BuildEventData(record)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
python
|
{
"resource": ""
}
|
q25525
|
MacDocumentVersionsPlugin.DocumentVersionsRow
|
train
|
def DocumentVersionsRow(
self, parser_mediator, query, row, **unused_kwargs):
"""Parses a document versions row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
"""
query_hash = hash(query)
# version_path = "PerUser/UserID/xx/client_id/version_file"
# where PerUser and UserID are a real directories.
version_path = self._GetRowValue(query_hash, row, 'version_path')
path = self._GetRowValue(query_hash, row, 'path')
paths = version_path.split('/')
if len(paths) < 2 or not paths[1].isdigit():
user_sid = ''
else:
user_sid = paths[1]
version_path = self.ROOT_VERSION_PATH + version_path
path, _, _ = path.rpartition('/')
event_data = MacDocumentVersionsEventData()
# TODO: shouldn't this be a separate event?
event_data.last_time = self._GetRowValue(query_hash, row, 'last_time')
event_data.name = self._GetRowValue(query_hash, row, 'name')
event_data.path = path
event_data.query = query
# Note that the user_sid value is expected to be a string.
event_data.user_sid = '{0!s}'.format(user_sid)
event_data.version_path = version_path
timestamp = self._GetRowValue(query_hash, row, 'version_time')
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
python
|
{
"resource": ""
}
|
q25526
|
UtmpParser._ReadEntry
|
train
|
def _ReadEntry(self, parser_mediator, file_object, file_offset):
"""Reads an utmp entry.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): a file-like object.
file_offset (int): offset of the data relative to the start of
the file-like object.
Returns:
tuple: containing:
int: timestamp, which contains the number of microseconds
since January 1, 1970, 00:00:00 UTC.
UtmpEventData: event data of the utmp entry read.
Raises:
ParseError: if the entry cannot be parsed.
"""
entry_map = self._GetDataTypeMap('linux_libc6_utmp_entry')
try:
entry, _ = self._ReadStructureFromFileObject(
file_object, file_offset, entry_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError((
'Unable to parse utmp entry at offset: 0x{0:08x} with error: '
'{1!s}.').format(file_offset, exception))
if entry.type not in self._SUPPORTED_TYPES:
raise errors.UnableToParseFile('Unsupported type: {0:d}'.format(
entry.type))
encoding = parser_mediator.codepage or 'utf-8'
try:
username = entry.username.split(b'\x00')[0]
username = username.decode(encoding)
except UnicodeDecodeError:
parser_mediator.ProduceExtractionWarning(
'unable to decode username string')
username = None
try:
terminal = entry.terminal.split(b'\x00')[0]
terminal = terminal.decode(encoding)
except UnicodeDecodeError:
parser_mediator.ProduceExtractionWarning(
'unable to decode terminal string')
terminal = None
if terminal == '~':
terminal = 'system boot'
try:
hostname = entry.hostname.split(b'\x00')[0]
hostname = hostname.decode(encoding)
except UnicodeDecodeError:
parser_mediator.ProduceExtractionWarning(
'unable to decode hostname string')
hostname = None
if not hostname or hostname == ':0':
hostname = 'localhost'
if entry.ip_address[4:] == self._EMPTY_IP_ADDRESS[4:]:
ip_address = self._FormatPackedIPv4Address(entry.ip_address[:4])
else:
ip_address = self._FormatPackedIPv6Address(entry.ip_address)
# TODO: add termination status.
event_data = UtmpEventData()
event_data.hostname = hostname
event_data.exit_status = entry.exit_status
event_data.ip_address = ip_address
event_data.offset = file_offset
event_data.pid = entry.pid
event_data.terminal = terminal
event_data.terminal_identifier = entry.terminal_identifier
event_data.type = entry.type
event_data.username = username
timestamp = entry.microseconds + (
entry.timestamp * definitions.MICROSECONDS_PER_SECOND)
return timestamp, event_data
|
python
|
{
"resource": ""
}
|
q25527
|
UtmpParser.ParseFileObject
|
train
|
def ParseFileObject(self, parser_mediator, file_object):
"""Parses an utmp file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): a file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
file_offset = 0
try:
timestamp, event_data = self._ReadEntry(
parser_mediator, file_object, file_offset)
except errors.ParseError as exception:
raise errors.UnableToParseFile(
'Unable to parse first utmp entry with error: {0!s}'.format(
exception))
if not event_data.username:
raise errors.UnableToParseFile(
'Unable to parse first utmp entry with error: missing username')
if not timestamp:
raise errors.UnableToParseFile(
'Unable to parse first utmp entry with error: missing timestamp')
date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(
timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_START)
parser_mediator.ProduceEventWithEventData(event, event_data)
file_offset = file_object.tell()
file_size = file_object.get_size()
while file_offset < file_size:
if parser_mediator.abort:
break
try:
timestamp, event_data = self._ReadEntry(
parser_mediator, file_object, file_offset)
except errors.ParseError:
# Note that the utmp file can contain trailing data.
break
date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(
timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_START)
parser_mediator.ProduceEventWithEventData(event, event_data)
file_offset = file_object.tell()
|
python
|
{
"resource": ""
}
|
q25528
|
OperaTypedHistoryParser.ParseFileObject
|
train
|
def ParseFileObject(self, parser_mediator, file_object):
"""Parses an Opera typed history file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
data = file_object.read(self._HEADER_READ_SIZE)
if not data.startswith(b'<?xml'):
raise errors.UnableToParseFile(
'Not an Opera typed history file [not a XML]')
_, _, data = data.partition(b'\n')
if not data.startswith(b'<typed_history'):
raise errors.UnableToParseFile(
'Not an Opera typed history file [wrong XML root key]')
# For ElementTree to work we need to work on a file object seeked
# to the beginning.
file_object.seek(0, os.SEEK_SET)
xml = ElementTree.parse(file_object)
for history_item in xml.iterfind('typed_history_item'):
event_data = OperaTypedHistoryEventData()
event_data.entry_type = history_item.get('type', None)
event_data.url = history_item.get('content', None)
if event_data.entry_type == 'selected':
event_data.entry_selection = 'Filled from autocomplete.'
elif event_data.entry_type == 'text':
event_data.entry_selection = 'Manually typed.'
last_typed_time = history_item.get('last_typed', None)
if last_typed_time is None:
parser_mediator.ProduceExtractionWarning('missing last typed time.')
continue
date_time = dfdatetime_time_elements.TimeElements()
try:
date_time.CopyFromStringISO8601(last_typed_time)
except ValueError as exception:
parser_mediator.ProduceExtractionWarning(
'unsupported last typed time: {0:s} with error: {1!s}.'.format(
last_typed_time, exception))
continue
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_VISITED)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
python
|
{
"resource": ""
}
|
q25529
|
OperaGlobalHistoryParser._IsValidUrl
|
train
|
def _IsValidUrl(self, url):
"""Checks if an URL is considered valid.
Returns:
bool: True if the URL is valid.
"""
parsed_url = urlparse.urlparse(url)
return parsed_url.scheme in self._SUPPORTED_URL_SCHEMES
|
python
|
{
"resource": ""
}
|
q25530
|
OperaGlobalHistoryParser._ParseRecord
|
train
|
def _ParseRecord(self, parser_mediator, text_file_object):
"""Parses an Opera global history record.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
text_file_object (dfvfs.TextFile): text file.
Returns:
bool: True if the record was successfully parsed.
"""
try:
title = text_file_object.readline()
except UnicodeDecodeError:
parser_mediator.ProduceExtractionWarning(
'unable to read and decode title')
return False
if not title:
return False
try:
url = text_file_object.readline()
except UnicodeDecodeError:
parser_mediator.ProduceExtractionWarning(
'unable to read and decode url')
return False
try:
timestamp = text_file_object.readline()
except UnicodeDecodeError:
parser_mediator.ProduceExtractionWarning(
'unable to read and decode timestamp')
return False
try:
popularity_index = text_file_object.readline()
except UnicodeDecodeError:
parser_mediator.ProduceExtractionWarning(
'unable to read and decode popularity index')
return False
event_data = OperaGlobalHistoryEventData()
event_data.url = url.strip()
title = title.strip()
if title != event_data.url:
event_data.title = title
popularity_index = popularity_index.strip()
try:
event_data.popularity_index = int(popularity_index, 10)
except ValueError:
parser_mediator.ProduceExtractionWarning(
'unable to convert popularity index: {0:s}'.format(popularity_index))
if event_data.popularity_index < 0:
event_data.description = 'First and Only Visit'
else:
event_data.description = 'Last Visit'
timestamp = timestamp.strip()
try:
timestamp = int(timestamp, 10)
except ValueError:
parser_mediator.ProduceExtractionWarning(
'unable to convert timestamp: {0:s}'.format(timestamp))
timestamp = None
if timestamp is None:
date_time = dfdatetime_semantic_time.SemanticTime('Invalid')
else:
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_VISITED)
parser_mediator.ProduceEventWithEventData(event, event_data)
return True
|
python
|
{
"resource": ""
}
|
q25531
|
OperaGlobalHistoryParser._ParseAndValidateRecord
|
train
|
def _ParseAndValidateRecord(self, parser_mediator, text_file_object):
"""Parses and validates an Opera global history record.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
text_file_object (dfvfs.TextFile): text file.
Returns:
bool: True if the record was successfully parsed.
"""
try:
title = text_file_object.readline(size=self._MAXIMUM_LINE_SIZE)
url = text_file_object.readline(size=self._MAXIMUM_LINE_SIZE)
timestamp = text_file_object.readline(size=self._MAXIMUM_LINE_SIZE)
popularity_index = text_file_object.readline(size=self._MAXIMUM_LINE_SIZE)
except UnicodeDecodeError:
return False
if len(title) == self._MAXIMUM_LINE_SIZE and title[-1] != '\n':
return False
if len(url) == self._MAXIMUM_LINE_SIZE and url[-1] != '\n':
return False
if len(timestamp) == self._MAXIMUM_LINE_SIZE and timestamp[-1] != '\n':
return False
if (len(popularity_index) == self._MAXIMUM_LINE_SIZE and
popularity_index[-1] != '\n'):
return False
title = title.strip()
url = url.strip()
timestamp = timestamp.strip()
popularity_index = popularity_index.strip()
if not title or not url or not timestamp or not popularity_index:
return False
event_data = OperaGlobalHistoryEventData()
if not self._IsValidUrl(url):
return False
event_data.url = url
if title != url:
event_data.title = title
try:
event_data.popularity_index = int(popularity_index, 10)
timestamp = int(timestamp, 10)
except ValueError:
return False
if event_data.popularity_index < 0:
event_data.description = 'First and Only Visit'
else:
event_data.description = 'Last Visit'
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_VISITED)
parser_mediator.ProduceEventWithEventData(event, event_data)
return True
|
python
|
{
"resource": ""
}
|
q25532
|
OperaGlobalHistoryParser.ParseFileObject
|
train
|
def ParseFileObject(self, parser_mediator, file_object):
"""Parses an Opera global history file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
encoding = self._ENCODING or parser_mediator.codepage
text_file_object = text_file.TextFile(file_object, encoding=encoding)
if not self._ParseAndValidateRecord(parser_mediator, text_file_object):
raise errors.UnableToParseFile(
'Unable to parse as Opera global_history.dat.')
while self._ParseRecord(parser_mediator, text_file_object):
pass
|
python
|
{
"resource": ""
}
|
q25533
|
WinRecycleBinParser._ParseOriginalFilename
|
train
|
def _ParseOriginalFilename(self, file_object, format_version):
"""Parses the original filename.
Args:
file_object (FileIO): file-like object.
format_version (int): format version.
Returns:
str: filename or None on error.
Raises:
ParseError: if the original filename cannot be read.
"""
file_offset = file_object.tell()
if format_version == 1:
data_type_map = self._GetDataTypeMap(
'recycle_bin_metadata_utf16le_string')
else:
data_type_map = self._GetDataTypeMap(
'recycle_bin_metadata_utf16le_string_with_size')
try:
original_filename, _ = self._ReadStructureFromFileObject(
file_object, file_offset, data_type_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError(
'Unable to parse original filename with error: {0!s}'.format(
exception))
if format_version == 1:
return original_filename.rstrip('\x00')
return original_filename.string.rstrip('\x00')
|
python
|
{
"resource": ""
}
|
q25534
|
WinRecyclerInfo2Parser._ParseInfo2Record
|
train
|
def _ParseInfo2Record(
self, parser_mediator, file_object, record_offset, record_size):
"""Parses an INFO-2 record.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
record_offset (int): record offset.
record_size (int): record size.
Raises:
ParseError: if the record cannot be read.
"""
record_data = self._ReadData(file_object, record_offset, record_size)
record_map = self._GetDataTypeMap('recycler_info2_file_entry')
try:
record = self._ReadStructureFromByteStream(
record_data, record_offset, record_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError((
'Unable to map record data at offset: 0x{0:08x} with error: '
'{1!s}').format(record_offset, exception))
codepage = parser_mediator.codepage or 'ascii'
# The original filename can contain remnant data after the end-of-string
# character.
ascii_filename = record.original_filename.split(b'\x00')[0]
try:
ascii_filename = ascii_filename.decode(codepage)
except UnicodeDecodeError:
ascii_filename = ascii_filename.decode(codepage, errors='replace')
parser_mediator.ProduceExtractionWarning(
'unable to decode original filename.')
unicode_filename = None
if record_size > 280:
record_offset += 280
utf16_string_map = self._GetDataTypeMap(
'recycler_info2_file_entry_utf16le_string')
try:
unicode_filename = self._ReadStructureFromByteStream(
record_data[280:], record_offset, utf16_string_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError((
'Unable to map record data at offset: 0x{0:08x} with error: '
'{1!s}').format(record_offset, exception))
unicode_filename = unicode_filename.rstrip('\x00')
if record.deletion_time == 0:
date_time = dfdatetime_semantic_time.SemanticTime('Not set')
else:
date_time = dfdatetime_filetime.Filetime(
timestamp=record.deletion_time)
event_data = WinRecycleBinEventData()
event_data.drive_number = record.drive_number
event_data.original_filename = unicode_filename or ascii_filename
event_data.file_size = record.original_file_size
event_data.offset = record_offset
event_data.record_index = record.index
if ascii_filename != unicode_filename:
event_data.short_filename = ascii_filename
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_DELETED)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
python
|
{
"resource": ""
}
|
q25535
|
WinRecyclerInfo2Parser.ParseFileObject
|
train
|
def ParseFileObject(self, parser_mediator, file_object):
"""Parses a Windows Recycler INFO2 file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
# Since this header value is really generic it is hard not to use filename
# as an indicator too.
# TODO: Rethink this and potentially make a better test.
filename = parser_mediator.GetFilename()
if not filename.startswith('INFO2'):
return
file_header_map = self._GetDataTypeMap('recycler_info2_file_header')
try:
file_header, _ = self._ReadStructureFromFileObject(
file_object, 0, file_header_map)
except (ValueError, errors.ParseError) as exception:
raise errors.UnableToParseFile((
'Unable to parse Windows Recycler INFO2 file header with '
'error: {0!s}').format(exception))
if file_header.unknown1 != 5:
parser_mediator.ProduceExtractionWarning('unsupported format signature.')
return
file_entry_size = file_header.file_entry_size
if file_entry_size not in (280, 800):
parser_mediator.ProduceExtractionWarning(
'unsupported file entry size: {0:d}'.format(file_entry_size))
return
file_offset = file_object.get_offset()
file_size = file_object.get_size()
while file_offset < file_size:
self._ParseInfo2Record(
parser_mediator, file_object, file_offset, file_entry_size)
file_offset += file_entry_size
|
python
|
{
"resource": ""
}
|
q25536
|
StorageMediaTool._AddCredentialConfiguration
|
train
|
def _AddCredentialConfiguration(
self, path_spec, credential_type, credential_data):
"""Adds a credential configuration.
Args:
path_spec (dfvfs.PathSpec): path specification.
credential_type (str): credential type.
credential_data (bytes): credential data.
"""
credential_configuration = configurations.CredentialConfiguration(
credential_data=credential_data, credential_type=credential_type,
path_spec=path_spec)
self._credential_configurations.append(credential_configuration)
|
python
|
{
"resource": ""
}
|
q25537
|
StorageMediaTool._FormatHumanReadableSize
|
train
|
def _FormatHumanReadableSize(self, size):
"""Represents a number of bytes as a human readable string.
Args:
size (int): size in bytes.
Returns:
str: human readable string of the size.
"""
magnitude_1000 = 0
size_1000 = float(size)
while size_1000 >= 1000:
size_1000 /= 1000
magnitude_1000 += 1
magnitude_1024 = 0
size_1024 = float(size)
while size_1024 >= 1024:
size_1024 /= 1024
magnitude_1024 += 1
size_string_1000 = None
if 0 < magnitude_1000 <= 7:
size_string_1000 = '{0:.1f}{1:s}'.format(
size_1000, self._UNITS_1000[magnitude_1000])
size_string_1024 = None
if 0 < magnitude_1024 <= 7:
size_string_1024 = '{0:.1f}{1:s}'.format(
size_1024, self._UNITS_1024[magnitude_1024])
if not size_string_1000 or not size_string_1024:
return '{0:d} B'.format(size)
return '{0:s} / {1:s} ({2:d} B)'.format(
size_string_1024, size_string_1000, size)
|
python
|
{
"resource": ""
}
|
q25538
|
StorageMediaTool._ParseCredentialOptions
|
train
|
def _ParseCredentialOptions(self, options):
"""Parses the credential options.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid.
"""
credentials = getattr(options, 'credentials', [])
if not isinstance(credentials, list):
raise errors.BadConfigOption('Unsupported credentials value.')
for credential_string in credentials:
credential_type, _, credential_data = credential_string.partition(':')
if not credential_type or not credential_data:
raise errors.BadConfigOption(
'Badly formatted credential: {0:s}.'.format(credential_string))
if credential_type not in self._SUPPORTED_CREDENTIAL_TYPES:
raise errors.BadConfigOption(
'Unsupported credential type for: {0:s}.'.format(
credential_string))
if credential_type in self._BINARY_DATA_CREDENTIAL_TYPES:
try:
credential_data = credential_data.decode('hex')
except TypeError:
raise errors.BadConfigOption(
'Unsupported credential data for: {0:s}.'.format(
credential_string))
self._credentials.append((credential_type, credential_data))
|
python
|
{
"resource": ""
}
|
q25539
|
StorageMediaTool._ParseSourcePathOption
|
train
|
def _ParseSourcePathOption(self, options):
"""Parses the source path option.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid.
"""
self._source_path = self.ParseStringOption(options, self._SOURCE_OPTION)
if not self._source_path:
raise errors.BadConfigOption('Missing source path.')
self._source_path = os.path.abspath(self._source_path)
|
python
|
{
"resource": ""
}
|
q25540
|
StorageMediaTool._ParseStorageMediaOptions
|
train
|
def _ParseStorageMediaOptions(self, options):
"""Parses the storage media options.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid.
"""
self._ParseStorageMediaImageOptions(options)
self._ParseVSSProcessingOptions(options)
self._ParseCredentialOptions(options)
self._ParseSourcePathOption(options)
|
python
|
{
"resource": ""
}
|
q25541
|
StorageMediaTool._ParseStorageMediaImageOptions
|
train
|
def _ParseStorageMediaImageOptions(self, options):
"""Parses the storage media image options.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid.
"""
self._partitions = getattr(options, 'partitions', None)
if self._partitions:
try:
self._ParseVolumeIdentifiersString(self._partitions, prefix='p')
except ValueError:
raise errors.BadConfigOption('Unsupported partitions')
self._volumes = getattr(options, 'volumes', None)
if self._volumes:
try:
self._ParseVolumeIdentifiersString(self._volumes, prefix='apfs')
except ValueError:
raise errors.BadConfigOption('Unsupported volumes')
|
python
|
{
"resource": ""
}
|
q25542
|
StorageMediaTool._ParseVolumeIdentifiersString
|
train
|
def _ParseVolumeIdentifiersString(
self, volume_identifiers_string, prefix='v'):
"""Parses a user specified volume identifiers string.
Args:
volume_identifiers_string (str): user specified volume identifiers. A
range of volumes can be defined as: "3..5". Multiple volumes can be
defined as: "1,3,5" (a list of comma separated values). Ranges and
lists can also be combined as: "1,3..5". The first volume is 1. All
volumes can be defined as: "all".
prefix (Optional[str]): volume identifier prefix.
Returns:
list[str]: volume identifiers with prefix or the string "all".
Raises:
ValueError: if the volume identifiers string is invalid.
"""
prefix_length = 0
if prefix:
prefix_length = len(prefix)
if not volume_identifiers_string:
return []
if volume_identifiers_string == 'all':
return ['all']
volume_identifiers = set()
for identifiers_range in volume_identifiers_string.split(','):
# Determine if the range is formatted as 1..3 otherwise it indicates
# a single volume identifier.
if '..' in identifiers_range:
first_identifier, last_identifier = identifiers_range.split('..')
if first_identifier.startswith(prefix):
first_identifier = first_identifier[prefix_length:]
if last_identifier.startswith(prefix):
last_identifier = last_identifier[prefix_length:]
try:
first_identifier = int(first_identifier, 10)
last_identifier = int(last_identifier, 10)
except ValueError:
raise ValueError('Invalid volume identifiers range: {0:s}.'.format(
identifiers_range))
for volume_identifier in range(first_identifier, last_identifier + 1):
if volume_identifier not in volume_identifiers:
volume_identifier = '{0:s}{1:d}'.format(prefix, volume_identifier)
volume_identifiers.add(volume_identifier)
else:
identifier = identifiers_range
if identifier.startswith(prefix):
identifier = identifiers_range[prefix_length:]
try:
volume_identifier = int(identifier, 10)
except ValueError:
raise ValueError('Invalid volume identifier range: {0:s}.'.format(
identifiers_range))
volume_identifier = '{0:s}{1:d}'.format(prefix, volume_identifier)
volume_identifiers.add(volume_identifier)
# Note that sorted will return a list.
return sorted(volume_identifiers)
|
python
|
{
"resource": ""
}
|
q25543
|
StorageMediaTool._ParseVSSProcessingOptions
|
train
|
def _ParseVSSProcessingOptions(self, options):
"""Parses the VSS processing options.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid.
"""
vss_only = False
vss_stores = None
self._process_vss = not getattr(options, 'no_vss', False)
if self._process_vss:
vss_only = getattr(options, 'vss_only', False)
vss_stores = getattr(options, 'vss_stores', None)
if vss_stores:
try:
self._ParseVolumeIdentifiersString(vss_stores, prefix='vss')
except ValueError:
raise errors.BadConfigOption('Unsupported VSS stores')
self._vss_only = vss_only
self._vss_stores = vss_stores
|
python
|
{
"resource": ""
}
|
q25544
|
StorageMediaTool._PrintAPFSVolumeIdentifiersOverview
|
train
|
def _PrintAPFSVolumeIdentifiersOverview(
self, volume_system, volume_identifiers):
"""Prints an overview of APFS volume identifiers.
Args:
volume_system (dfvfs.APFSVolumeSystem): volume system.
volume_identifiers (list[str]): allowed volume identifiers.
Raises:
SourceScannerError: if a volume cannot be resolved from the volume
identifier.
"""
header = 'The following Apple File System (APFS) volumes were found:\n'
self._output_writer.Write(header)
column_names = ['Identifier', 'Name']
table_view = views.CLITabularTableView(column_names=column_names)
for volume_identifier in volume_identifiers:
volume = volume_system.GetVolumeByIdentifier(volume_identifier)
if not volume:
raise errors.SourceScannerError(
'Volume missing for identifier: {0:s}.'.format(
volume_identifier))
volume_attribute = volume.GetAttribute('name')
table_view.AddRow([volume.identifier, volume_attribute.value])
self._output_writer.Write('\n')
table_view.Write(self._output_writer)
self._output_writer.Write('\n')
|
python
|
{
"resource": ""
}
|
q25545
|
StorageMediaTool._PrintTSKPartitionIdentifiersOverview
|
train
|
def _PrintTSKPartitionIdentifiersOverview(
self, volume_system, volume_identifiers):
"""Prints an overview of TSK partition identifiers.
Args:
volume_system (dfvfs.TSKVolumeSystem): volume system.
volume_identifiers (list[str]): allowed volume identifiers.
Raises:
SourceScannerError: if a volume cannot be resolved from the volume
identifier.
"""
header = 'The following partitions were found:\n'
self._output_writer.Write(header)
column_names = ['Identifier', 'Offset (in bytes)', 'Size (in bytes)']
table_view = views.CLITabularTableView(column_names=column_names)
for volume_identifier in sorted(volume_identifiers):
volume = volume_system.GetVolumeByIdentifier(volume_identifier)
if not volume:
raise errors.SourceScannerError(
'Partition missing for identifier: {0:s}.'.format(
volume_identifier))
volume_extent = volume.extents[0]
volume_offset = '{0:d} (0x{0:08x})'.format(volume_extent.offset)
volume_size = self._FormatHumanReadableSize(volume_extent.size)
table_view.AddRow([volume.identifier, volume_offset, volume_size])
self._output_writer.Write('\n')
table_view.Write(self._output_writer)
self._output_writer.Write('\n')
|
python
|
{
"resource": ""
}
|
q25546
|
StorageMediaTool._PrintVSSStoreIdentifiersOverview
|
train
|
def _PrintVSSStoreIdentifiersOverview(
self, volume_system, volume_identifiers):
"""Prints an overview of VSS store identifiers.
Args:
volume_system (dfvfs.VShadowVolumeSystem): volume system.
volume_identifiers (list[str]): allowed volume identifiers.
Raises:
SourceScannerError: if a volume cannot be resolved from the volume
identifier.
"""
header = 'The following Volume Shadow Snapshots (VSS) were found:\n'
self._output_writer.Write(header)
column_names = ['Identifier', 'Creation Time']
table_view = views.CLITabularTableView(column_names=column_names)
for volume_identifier in volume_identifiers:
volume = volume_system.GetVolumeByIdentifier(volume_identifier)
if not volume:
raise errors.SourceScannerError(
'Volume missing for identifier: {0:s}.'.format(
volume_identifier))
volume_attribute = volume.GetAttribute('creation_time')
filetime = dfdatetime_filetime.Filetime(timestamp=volume_attribute.value)
creation_time = filetime.CopyToDateTimeString()
if volume.HasExternalData():
creation_time = '{0:s}\tWARNING: data stored outside volume'.format(
creation_time)
table_view.AddRow([volume.identifier, creation_time])
self._output_writer.Write('\n')
table_view.Write(self._output_writer)
self._output_writer.Write('\n')
|
python
|
{
"resource": ""
}
|
q25547
|
StorageMediaTool._PromptUserForAPFSVolumeIdentifiers
|
train
|
def _PromptUserForAPFSVolumeIdentifiers(
self, volume_system, volume_identifiers):
"""Prompts the user to provide APFS volume identifiers.
Args:
volume_system (dfvfs.APFSVolumeSystem): volume system.
volume_identifiers (list[str]): volume identifiers including prefix.
Returns:
list[str]: selected volume identifiers including prefix or None.
"""
print_header = True
while True:
if print_header:
self._PrintAPFSVolumeIdentifiersOverview(
volume_system, volume_identifiers)
print_header = False
lines = self._textwrapper.wrap(self._USER_PROMPT_APFS)
self._output_writer.Write('\n'.join(lines))
self._output_writer.Write('\n\nVolume identifiers: ')
try:
selected_volumes = self._ReadSelectedVolumes(
volume_system, prefix='apfs')
if (not selected_volumes or
not set(selected_volumes).difference(volume_identifiers)):
break
except ValueError:
pass
self._output_writer.Write('\n')
lines = self._textwrapper.wrap(
'Unsupported volume identifier(s), please try again or abort with '
'Ctrl^C.')
self._output_writer.Write('\n'.join(lines))
self._output_writer.Write('\n\n')
return selected_volumes
|
python
|
{
"resource": ""
}
|
q25548
|
StorageMediaTool._PromptUserForPartitionIdentifiers
|
train
|
def _PromptUserForPartitionIdentifiers(
self, volume_system, volume_identifiers):
"""Prompts the user to provide partition identifiers.
Args:
volume_system (dfvfs.TSKVolumeSystem): volume system.
volume_identifiers (list[str]): volume identifiers including prefix.
Returns:
list[str]: selected volume identifiers including prefix or None.
"""
print_header = True
while True:
if print_header:
self._PrintTSKPartitionIdentifiersOverview(
volume_system, volume_identifiers)
print_header = False
lines = self._textwrapper.wrap(self._USER_PROMPT_TSK)
self._output_writer.Write('\n'.join(lines))
self._output_writer.Write('\n\nPartition identifiers: ')
try:
selected_volumes = self._ReadSelectedVolumes(volume_system, prefix='p')
if (selected_volumes and
not set(selected_volumes).difference(volume_identifiers)):
break
except ValueError:
pass
self._output_writer.Write('\n')
lines = self._textwrapper.wrap(
'Unsupported partition identifier(s), please try again or abort with '
'Ctrl^C.')
self._output_writer.Write('\n'.join(lines))
self._output_writer.Write('\n\n')
return selected_volumes
|
python
|
{
"resource": ""
}
|
q25549
|
StorageMediaTool._PromptUserForVSSCurrentVolume
|
train
|
def _PromptUserForVSSCurrentVolume(self):
"""Prompts the user if the current volume with VSS should be processed.
Returns:
bool: True if the current volume with VSS should be processed.
"""
while True:
self._output_writer.Write(
'Volume Shadow Snapshots (VSS) were selected also process current\n'
'volume? [yes, no]\n')
process_current_volume = self._input_reader.Read()
process_current_volume = process_current_volume.strip()
process_current_volume = process_current_volume.lower()
if (not process_current_volume or
process_current_volume in ('no', 'yes')):
break
self._output_writer.Write(
'\n'
'Unsupported option, please try again or abort with Ctrl^C.\n'
'\n')
self._output_writer.Write('\n')
return not process_current_volume or process_current_volume == 'yes'
|
python
|
{
"resource": ""
}
|
q25550
|
StorageMediaTool._ReadSelectedVolumes
|
train
|
def _ReadSelectedVolumes(self, volume_system, prefix='v'):
"""Reads the selected volumes provided by the user.
Args:
volume_system (APFSVolumeSystem): volume system.
prefix (Optional[str]): volume identifier prefix.
Returns:
list[str]: selected volume identifiers including prefix.
Raises:
KeyboardInterrupt: if the user requested to abort.
ValueError: if the volume identifiers string could not be parsed.
"""
volume_identifiers_string = self._input_reader.Read()
volume_identifiers_string = volume_identifiers_string.strip()
if not volume_identifiers_string:
return []
selected_volumes = self._ParseVolumeIdentifiersString(
volume_identifiers_string, prefix=prefix)
if selected_volumes == ['all']:
return [
'{0:s}{1:d}'.format(prefix, volume_index)
for volume_index in range(1, volume_system.number_of_volumes + 1)]
return selected_volumes
|
python
|
{
"resource": ""
}
|
q25551
|
StorageMediaTool.AddCredentialOptions
|
train
|
def AddCredentialOptions(self, argument_group):
"""Adds the credential options to the argument group.
The credential options are use to unlock encrypted volumes.
Args:
argument_group (argparse._ArgumentGroup): argparse argument group.
"""
argument_group.add_argument(
'--credential', action='append', default=[], type=str,
dest='credentials', metavar='TYPE:DATA', help=(
'Define a credentials that can be used to unlock encrypted '
'volumes e.g. BitLocker. The credential is defined as type:data '
'e.g. "password:BDE-test". Supported credential types are: '
'{0:s}. Binary key data is expected to be passed in BASE-16 '
'encoding (hexadecimal). WARNING credentials passed via command '
'line arguments can end up in logs, so use this option with '
'care.').format(', '.join(self._SUPPORTED_CREDENTIAL_TYPES)))
|
python
|
{
"resource": ""
}
|
q25552
|
StorageMediaTool.AddStorageMediaImageOptions
|
train
|
def AddStorageMediaImageOptions(self, argument_group):
"""Adds the storage media image options to the argument group.
Args:
argument_group (argparse._ArgumentGroup): argparse argument group.
"""
argument_group.add_argument(
'--partitions', '--partition', dest='partitions', action='store',
type=str, default=None, help=(
'Define partitions to be processed. A range of '
'partitions can be defined as: "3..5". Multiple partitions can '
'be defined as: "1,3,5" (a list of comma separated values). '
'Ranges and lists can also be combined as: "1,3..5". The first '
'partition is 1. All partitions can be specified with: "all".'))
argument_group.add_argument(
'--volumes', '--volume', dest='volumes', action='store', type=str,
default=None, help=(
'Define volumes to be processed. A range of volumes can be defined '
'as: "3..5". Multiple volumes can be defined as: "1,3,5" (a list '
'of comma separated values). Ranges and lists can also be combined '
'as: "1,3..5". The first volume is 1. All volumes can be specified '
'with: "all".'))
|
python
|
{
"resource": ""
}
|
q25553
|
StorageMediaTool.AddVSSProcessingOptions
|
train
|
def AddVSSProcessingOptions(self, argument_group):
"""Adds the VSS processing options to the argument group.
Args:
argument_group (argparse._ArgumentGroup): argparse argument group.
"""
argument_group.add_argument(
'--no_vss', '--no-vss', dest='no_vss', action='store_true',
default=False, help=(
'Do not scan for Volume Shadow Snapshots (VSS). This means that '
'Volume Shadow Snapshots (VSS) are not processed.'))
argument_group.add_argument(
'--vss_only', '--vss-only', dest='vss_only', action='store_true',
default=False, help=(
'Do not process the current volume if Volume Shadow Snapshots '
'(VSS) have been selected.'))
argument_group.add_argument(
'--vss_stores', '--vss-stores', dest='vss_stores', action='store',
type=str, default=None, help=(
'Define Volume Shadow Snapshots (VSS) (or stores that need to be '
'processed. A range of stores can be defined as: "3..5". '
'Multiple stores can be defined as: "1,3,5" (a list of comma '
'separated values). Ranges and lists can also be combined as: '
'"1,3..5". The first store is 1. All stores can be defined as: '
'"all".'))
|
python
|
{
"resource": ""
}
|
q25554
|
StorageMediaTool.ScanSource
|
train
|
def ScanSource(self, source_path):
"""Scans the source path for volume and file systems.
This function sets the internal source path specification and source
type values.
Args:
source_path (str): path to the source.
Returns:
dfvfs.SourceScannerContext: source scanner context.
Raises:
SourceScannerError: if the format of or within the source is
not supported.
"""
# Symbolic links are resolved here and not earlier to preserve the user
# specified source path in storage and reporting.
if os.path.islink(source_path):
source_path = os.path.realpath(source_path)
if (not source_path.startswith('\\\\.\\') and
not os.path.exists(source_path)):
raise errors.SourceScannerError(
'No such device, file or directory: {0:s}.'.format(source_path))
scan_context = source_scanner.SourceScannerContext()
scan_context.OpenSourcePath(source_path)
try:
self._source_scanner.Scan(scan_context)
except (ValueError, dfvfs_errors.BackEndError) as exception:
raise errors.SourceScannerError(
'Unable to scan source with error: {0!s}.'.format(exception))
if scan_context.source_type not in (
scan_context.SOURCE_TYPE_STORAGE_MEDIA_DEVICE,
scan_context.SOURCE_TYPE_STORAGE_MEDIA_IMAGE):
scan_node = scan_context.GetRootScanNode()
self._source_path_specs.append(scan_node.path_spec)
return scan_context
# Get the first node where where we need to decide what to process.
scan_node = scan_context.GetRootScanNode()
while len(scan_node.sub_nodes) == 1:
scan_node = scan_node.sub_nodes[0]
base_path_specs = []
if scan_node.type_indicator != (
dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION):
self._ScanVolume(scan_context, scan_node, base_path_specs)
else:
# Determine which partition needs to be processed.
partition_identifiers = self._GetTSKPartitionIdentifiers(scan_node)
if not partition_identifiers:
raise errors.SourceScannerError('No partitions found.')
for partition_identifier in partition_identifiers:
location = '/{0:s}'.format(partition_identifier)
sub_scan_node = scan_node.GetSubNodeByLocation(location)
self._ScanVolume(scan_context, sub_scan_node, base_path_specs)
if not base_path_specs:
raise errors.SourceScannerError(
'No supported file system found in source.')
self._source_path_specs = base_path_specs
return scan_context
|
python
|
{
"resource": ""
}
|
q25555
|
FileSystemWinRegistryFileReader._CreateWindowsPathResolver
|
train
|
def _CreateWindowsPathResolver(
self, file_system, mount_point, environment_variables):
"""Create a Windows path resolver and sets the environment variables.
Args:
file_system (dfvfs.FileSystem): file system.
mount_point (dfvfs.PathSpec): mount point path specification.
environment_variables (list[EnvironmentVariableArtifact]): environment
variables.
Returns:
dfvfs.WindowsPathResolver: Windows path resolver.
"""
if environment_variables is None:
environment_variables = []
path_resolver = windows_path_resolver.WindowsPathResolver(
file_system, mount_point)
for environment_variable in environment_variables:
name = environment_variable.name.lower()
if name not in ('systemroot', 'userprofile'):
continue
path_resolver.SetEnvironmentVariable(
environment_variable.name, environment_variable.value)
return path_resolver
|
python
|
{
"resource": ""
}
|
q25556
|
FileSystemWinRegistryFileReader._OpenPathSpec
|
train
|
def _OpenPathSpec(self, path_specification, ascii_codepage='cp1252'):
"""Opens the Windows Registry file specified by the path specification.
Args:
path_specification (dfvfs.PathSpec): path specification.
ascii_codepage (Optional[str]): ASCII string codepage.
Returns:
WinRegistryFile: Windows Registry file or None.
"""
if not path_specification:
return None
file_entry = self._file_system.GetFileEntryByPathSpec(path_specification)
if file_entry is None:
return None
file_object = file_entry.GetFileObject()
if file_object is None:
return None
registry_file = dfwinreg_regf.REGFWinRegistryFile(
ascii_codepage=ascii_codepage)
try:
registry_file.Open(file_object)
except IOError as exception:
logger.warning(
'Unable to open Windows Registry file with error: {0!s}'.format(
exception))
file_object.close()
return None
return registry_file
|
python
|
{
"resource": ""
}
|
q25557
|
FileSystemWinRegistryFileReader.Open
|
train
|
def Open(self, path, ascii_codepage='cp1252'):
"""Opens the Windows Registry file specified by the path.
Args:
path (str): path of the Windows Registry file.
ascii_codepage (Optional[str]): ASCII string codepage.
Returns:
WinRegistryFile: Windows Registry file or None.
"""
path_specification = self._path_resolver.ResolvePath(path)
if path_specification is None:
return None
return self._OpenPathSpec(path_specification)
|
python
|
{
"resource": ""
}
|
q25558
|
PreprocessPluginsManager.CollectFromKnowledgeBase
|
train
|
def CollectFromKnowledgeBase(cls, knowledge_base):
"""Collects values from knowledge base values.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
"""
for preprocess_plugin in cls._knowledge_base_plugins.values():
logger.debug('Running knowledge base preprocessor plugin: {0:s}'.format(
preprocess_plugin.__class__.__name__))
try:
preprocess_plugin.Collect(knowledge_base)
except errors.PreProcessFail as exception:
logger.warning(
'Unable to collect knowledge base value with error: {0!s}'.format(
exception))
|
python
|
{
"resource": ""
}
|
q25559
|
PreprocessPluginsManager.DeregisterPlugin
|
train
|
def DeregisterPlugin(cls, plugin_class):
"""Deregisters an preprocess plugin class.
Args:
plugin_class (type): preprocess plugin class.
Raises:
KeyError: if plugin class is not set for the corresponding name.
TypeError: if the source type of the plugin class is not supported.
"""
name = getattr(
plugin_class, 'ARTIFACT_DEFINITION_NAME', plugin_class.__name__)
name = name.lower()
if name not in cls._plugins:
raise KeyError(
'Artifact plugin class not set for name: {0:s}.'.format(name))
del cls._plugins[name]
if name in cls._file_system_plugins:
del cls._file_system_plugins[name]
if name in cls._knowledge_base_plugins:
del cls._knowledge_base_plugins[name]
if name in cls._windows_registry_plugins:
del cls._windows_registry_plugins[name]
|
python
|
{
"resource": ""
}
|
q25560
|
PreprocessPluginsManager.GetNames
|
train
|
def GetNames(cls):
"""Retrieves the names of the registered artifact definitions.
Returns:
list[str]: registered artifact definitions names.
"""
names = []
for plugin_class in cls._plugins.values():
name = getattr(plugin_class, 'ARTIFACT_DEFINITION_NAME', None)
if name:
names.append(name)
return names
|
python
|
{
"resource": ""
}
|
q25561
|
PreprocessPluginsManager.RegisterPlugin
|
train
|
def RegisterPlugin(cls, plugin_class):
"""Registers an preprocess plugin class.
Args:
plugin_class (type): preprocess plugin class.
Raises:
KeyError: if plugin class is already set for the corresponding name.
TypeError: if the source type of the plugin class is not supported.
"""
name = getattr(
plugin_class, 'ARTIFACT_DEFINITION_NAME', plugin_class.__name__)
name = name.lower()
if name in cls._plugins:
raise KeyError(
'Artifact plugin class already set for name: {0:s}.'.format(name))
preprocess_plugin = plugin_class()
cls._plugins[name] = preprocess_plugin
if isinstance(
preprocess_plugin, interface.FileSystemArtifactPreprocessorPlugin):
cls._file_system_plugins[name] = preprocess_plugin
elif isinstance(
preprocess_plugin, interface.KnowledgeBasePreprocessorPlugin):
cls._knowledge_base_plugins[name] = preprocess_plugin
elif isinstance(
preprocess_plugin,
interface.WindowsRegistryKeyArtifactPreprocessorPlugin):
cls._windows_registry_plugins[name] = preprocess_plugin
|
python
|
{
"resource": ""
}
|
q25562
|
PreprocessPluginsManager.RunPlugins
|
train
|
def RunPlugins(
cls, artifacts_registry, file_system, mount_point, knowledge_base):
"""Runs the preprocessing plugins.
Args:
artifacts_registry (artifacts.ArtifactDefinitionsRegistry): artifacts
definitions registry.
file_system (dfvfs.FileSystem): file system to be preprocessed.
mount_point (dfvfs.PathSpec): mount point path specification that refers
to the base location of the file system.
knowledge_base (KnowledgeBase): to fill with preprocessing information.
"""
searcher = file_system_searcher.FileSystemSearcher(file_system, mount_point)
cls.CollectFromFileSystem(
artifacts_registry, knowledge_base, searcher, file_system)
# Run the Registry plugins separately so we do not have to open
# Registry files for every preprocess plugin.
environment_variables = None
if knowledge_base:
environment_variables = knowledge_base.GetEnvironmentVariables()
registry_file_reader = FileSystemWinRegistryFileReader(
file_system, mount_point, environment_variables=environment_variables)
win_registry = dfwinreg_registry.WinRegistry(
registry_file_reader=registry_file_reader)
searcher = registry_searcher.WinRegistrySearcher(win_registry)
cls.CollectFromWindowsRegistry(
artifacts_registry, knowledge_base, searcher)
cls.CollectFromKnowledgeBase(knowledge_base)
if not knowledge_base.HasUserAccounts():
logger.warning('Unable to find any user accounts on the system.')
|
python
|
{
"resource": ""
}
|
q25563
|
CompoundZIPParser.ParseFileObject
|
train
|
def ParseFileObject(self, parser_mediator, file_object):
"""Parses a compound ZIP file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): a file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
display_name = parser_mediator.GetDisplayName()
if not zipfile.is_zipfile(file_object):
raise errors.UnableToParseFile(
'[{0:s}] unable to parse file: {1:s} with error: {2:s}'.format(
self.NAME, display_name, 'Not a Zip file.'))
try:
zip_file = zipfile.ZipFile(file_object, 'r', allowZip64=True)
self._ProcessZipFileWithPlugins(parser_mediator, zip_file)
zip_file.close()
# Some non-ZIP files return true for is_zipfile but will fail with a
# negative seek (IOError) or another error.
except (zipfile.BadZipfile, struct.error) as exception:
raise errors.UnableToParseFile(
'[{0:s}] unable to parse file: {1:s} with error: {2!s}'.format(
self.NAME, display_name, exception))
|
python
|
{
"resource": ""
}
|
q25564
|
CompoundZIPParser._ProcessZipFileWithPlugins
|
train
|
def _ProcessZipFileWithPlugins(self, parser_mediator, zip_file):
"""Processes a zip file using all compound zip files.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
zip_file (zipfile.ZipFile): the zip file. It should not be closed in
this method, but will be closed in ParseFileObject().
"""
archive_members = zip_file.namelist()
for plugin in self._plugins:
try:
plugin.UpdateChainAndProcess(
parser_mediator, zip_file=zip_file, archive_members=archive_members)
except errors.WrongCompoundZIPPlugin as exception:
logger.debug('[{0:s}] wrong plugin: {1!s}'.format(
self.NAME, exception))
|
python
|
{
"resource": ""
}
|
q25565
|
GetYearFromPosixTime
|
train
|
def GetYearFromPosixTime(posix_time, timezone=pytz.UTC):
"""Gets the year from a POSIX timestamp
The POSIX time is the number of seconds since 1970-01-01 00:00:00 UTC.
Args:
posix_time: An integer containing the number of seconds since
1970-01-01 00:00:00 UTC.
timezone: Optional timezone of the POSIX timestamp.
Returns:
The year of the POSIX timestamp.
Raises:
ValueError: If the posix timestamp is out of the range of supported values.
"""
datetime_object = datetime.datetime.fromtimestamp(posix_time, tz=timezone)
return datetime_object.year
|
python
|
{
"resource": ""
}
|
q25566
|
Timestamp.CopyToDatetime
|
train
|
def CopyToDatetime(cls, timestamp, timezone, raise_error=False):
"""Copies the timestamp to a datetime object.
Args:
timestamp: The timestamp which is an integer containing the number
of micro seconds since January 1, 1970, 00:00:00 UTC.
timezone: The timezone (pytz.timezone) object.
raise_error: Boolean that if set to True will not absorb an OverflowError
if the timestamp is out of bounds. By default there will be
no error raised.
Returns:
A datetime object (instance of datetime.datetime). A datetime object of
January 1, 1970 00:00:00 UTC is returned on error if raises_error is
not set.
Raises:
OverflowError: If raises_error is set to True and an overflow error
occurs.
ValueError: If raises_error is set to True and no timestamp value is
provided.
"""
datetime_object = datetime.datetime(1970, 1, 1, 0, 0, 0, 0, tzinfo=pytz.UTC)
if not timestamp:
if raise_error:
raise ValueError('Missing timestamp value')
return datetime_object
try:
datetime_object += datetime.timedelta(microseconds=timestamp)
return datetime_object.astimezone(timezone)
except OverflowError as exception:
if raise_error:
raise
logging.error((
'Unable to copy {0:d} to a datetime object with error: '
'{1!s}').format(timestamp, exception))
return datetime_object
|
python
|
{
"resource": ""
}
|
q25567
|
Timestamp.CopyToIsoFormat
|
train
|
def CopyToIsoFormat(cls, timestamp, timezone=pytz.UTC, raise_error=False):
"""Copies the timestamp to an ISO 8601 formatted string.
Args:
timestamp: The timestamp which is an integer containing the number
of micro seconds since January 1, 1970, 00:00:00 UTC.
timezone: Optional timezone (instance of pytz.timezone).
raise_error: Boolean that if set to True will not absorb an OverflowError
if the timestamp is out of bounds. By default there will be
no error raised.
Returns:
A string containing an ISO 8601 formatted date and time.
"""
datetime_object = cls.CopyToDatetime(
timestamp, timezone, raise_error=raise_error)
return datetime_object.isoformat()
|
python
|
{
"resource": ""
}
|
q25568
|
Timestamp.FromTimeString
|
train
|
def FromTimeString(
cls, time_string, dayfirst=False, gmt_as_timezone=True,
timezone=pytz.UTC):
"""Converts a string containing a date and time value into a timestamp.
Args:
time_string: String that contains a date and time value.
dayfirst: An optional boolean argument. If set to true then the
parser will change the precedence in which it parses timestamps
from MM-DD-YYYY to DD-MM-YYYY (and YYYY-MM-DD will be
YYYY-DD-MM, etc).
gmt_as_timezone: Sometimes the dateutil parser will interpret GMT and UTC
the same way, that is not make a distinction. By default
this is set to true, that is GMT can be interpreted
differently than UTC. If that is not the expected result
this attribute can be set to false.
timezone: Optional timezone object (instance of pytz.timezone) that
the data and time value in the string represents. This value
is used when the timezone cannot be determined from the string.
Returns:
The timestamp which is an integer containing the number of micro seconds
since January 1, 1970, 00:00:00 UTC or 0 on error.
Raises:
TimestampError: if the time string could not be parsed.
"""
if not gmt_as_timezone and time_string.endswith(' GMT'):
time_string = '{0:s}UTC'.format(time_string[:-3])
try:
# TODO: deprecate the use of dateutil parser.
datetime_object = dateutil.parser.parse(time_string, dayfirst=dayfirst)
except (TypeError, ValueError) as exception:
raise errors.TimestampError((
'Unable to convert time string: {0:s} in to a datetime object '
'with error: {1!s}').format(time_string, exception))
if datetime_object.tzinfo:
datetime_object = datetime_object.astimezone(pytz.UTC)
else:
datetime_object = timezone.localize(datetime_object)
posix_time = int(calendar.timegm(datetime_object.utctimetuple()))
timestamp = posix_time * definitions.MICROSECONDS_PER_SECOND
return timestamp + datetime_object.microsecond
|
python
|
{
"resource": ""
}
|
q25569
|
Timestamp.LocaltimeToUTC
|
train
|
def LocaltimeToUTC(cls, timestamp, timezone, is_dst=False):
"""Converts the timestamp in localtime of the timezone to UTC.
Args:
timestamp: The timestamp which is an integer containing the number
of micro seconds since January 1, 1970, 00:00:00 UTC.
timezone: The timezone (pytz.timezone) object.
is_dst: A boolean to indicate the timestamp is corrected for daylight
savings time (DST) only used for the DST transition period.
Returns:
The timestamp which is an integer containing the number of micro seconds
since January 1, 1970, 00:00:00 UTC or 0 on error.
"""
if timezone and timezone != pytz.UTC:
datetime_object = (
datetime.datetime(1970, 1, 1, 0, 0, 0, 0, tzinfo=None) +
datetime.timedelta(microseconds=timestamp))
# Check if timezone is UTC since utcoffset() does not support is_dst
# for UTC and will raise.
datetime_delta = timezone.utcoffset(datetime_object, is_dst=is_dst)
seconds_delta = int(datetime_delta.total_seconds())
timestamp -= seconds_delta * definitions.MICROSECONDS_PER_SECOND
return timestamp
|
python
|
{
"resource": ""
}
|
q25570
|
Timestamp.RoundToSeconds
|
train
|
def RoundToSeconds(cls, timestamp):
"""Takes a timestamp value and rounds it to a second precision."""
leftovers = timestamp % definitions.MICROSECONDS_PER_SECOND
scrubbed = timestamp - leftovers
rounded = round(float(leftovers) / definitions.MICROSECONDS_PER_SECOND)
return int(scrubbed + rounded * definitions.MICROSECONDS_PER_SECOND)
|
python
|
{
"resource": ""
}
|
q25571
|
XChatScrollbackParser.ParseRecord
|
train
|
def ParseRecord(self, parser_mediator, key, structure):
"""Parses a log record structure.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
key (str): name of the parsed structure.
structure (pyparsing.ParseResults): structure parsed from the log file.
"""
if key != 'logline':
logger.warning(
'Unable to parse record, unknown structure: {0:s}'.format(key))
return
try:
timestamp = int(structure.timestamp)
except ValueError:
logger.debug('Invalid timestamp string {0:s}, skipping record'.format(
structure.timestamp))
return
try:
nickname, text = self._StripThenGetNicknameAndText(structure.text)
except pyparsing.ParseException:
logger.debug('Error parsing entry at offset {0:d}'.format(self._offset))
return
event_data = XChatScrollbackEventData()
event_data.nickname = nickname
event_data.offset = self._offset
event_data.text = text
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_ADDED)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
python
|
{
"resource": ""
}
|
q25572
|
XChatScrollbackParser.VerifyStructure
|
train
|
def VerifyStructure(self, parser_mediator, line):
"""Verify that this file is a XChat scrollback log file.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
line (str): line from a text file.
Returns:
bool: True if the line was successfully parsed.
"""
structure = self.LOG_LINE
try:
parsed_structure = structure.parseString(line)
except pyparsing.ParseException:
logger.debug('Not a XChat scrollback log file')
return False
try:
int(parsed_structure.timestamp, 10)
except ValueError:
logger.debug('Not a XChat scrollback log file, invalid timestamp string')
return False
return True
|
python
|
{
"resource": ""
}
|
q25573
|
MsieWebCacheESEDBPlugin.ParseContainersTable
|
train
|
def ParseContainersTable(
self, parser_mediator, database=None, table=None, **unused_kwargs):
"""Parses the Containers table.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
database (Optional[pyesedb.file]): ESE database.
table (Optional[pyesedb.table]): table.
Raises:
ValueError: if the database or table value is missing.
"""
if database is None:
raise ValueError('Missing database value.')
if table is None:
raise ValueError('Missing table value.')
for esedb_record in table.records:
if parser_mediator.abort:
break
record_values = self._GetRecordValues(
parser_mediator, table.name, esedb_record)
event_data = MsieWebCacheContainersEventData()
event_data.container_identifier = record_values.get('ContainerId', None)
event_data.directory = record_values.get('Directory', None)
event_data.name = record_values.get('Name', None)
event_data.set_identifier = record_values.get('SetId', None)
timestamp = record_values.get('LastScavengeTime', None)
if timestamp:
date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, 'Last Scavenge Time')
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = record_values.get('LastAccessTime', None)
if timestamp:
date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_ACCESS)
parser_mediator.ProduceEventWithEventData(event, event_data)
container_identifier = record_values.get('ContainerId', None)
container_name = record_values.get('Name', None)
if not container_identifier or not container_name:
continue
table_name = 'Container_{0:d}'.format(container_identifier)
esedb_table = database.get_table_by_name(table_name)
if not esedb_table:
parser_mediator.ProduceExtractionWarning(
'Missing table: {0:s}'.format(table_name))
continue
self._ParseContainerTable(parser_mediator, esedb_table, container_name)
|
python
|
{
"resource": ""
}
|
q25574
|
MsieWebCacheESEDBPlugin.ParseLeakFilesTable
|
train
|
def ParseLeakFilesTable(
self, parser_mediator, database=None, table=None, **unused_kwargs):
"""Parses the LeakFiles table.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
database (Optional[pyesedb.file]): ESE database.
table (Optional[pyesedb.table]): table.
Raises:
ValueError: if the database or table value is missing.
"""
if database is None:
raise ValueError('Missing database value.')
if table is None:
raise ValueError('Missing table value.')
for esedb_record in table.records:
if parser_mediator.abort:
break
record_values = self._GetRecordValues(
parser_mediator, table.name, esedb_record)
event_data = MsieWebCacheLeakFilesEventData()
event_data.cached_filename = record_values.get('Filename', None)
event_data.leak_identifier = record_values.get('LeakId', None)
timestamp = record_values.get('CreationTime', None)
if timestamp:
date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
python
|
{
"resource": ""
}
|
q25575
|
MsieWebCacheESEDBPlugin.ParsePartitionsTable
|
train
|
def ParsePartitionsTable(
self, parser_mediator, database=None, table=None, **unused_kwargs):
"""Parses the Partitions table.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
database (Optional[pyesedb.file]): ESE database.
table (Optional[pyesedb.table]): table.
Raises:
ValueError: if the database or table value is missing.
"""
if database is None:
raise ValueError('Missing database value.')
if table is None:
raise ValueError('Missing table value.')
for esedb_record in table.records:
if parser_mediator.abort:
break
record_values = self._GetRecordValues(
parser_mediator, table.name, esedb_record)
event_data = MsieWebCachePartitionsEventData()
event_data.directory = record_values.get('Directory', None)
event_data.partition_identifier = record_values.get('PartitionId', None)
event_data.partition_type = record_values.get('PartitionType', None)
event_data.table_identifier = record_values.get('TableId', None)
timestamp = record_values.get('LastScavengeTime', None)
if timestamp:
date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, 'Last Scavenge Time')
parser_mediator.ProduceEventWithEventData(event, event_data)
|
python
|
{
"resource": ""
}
|
q25576
|
ASLParser._ParseRecord
|
train
|
def _ParseRecord(self, parser_mediator, file_object, record_offset):
"""Parses a record and produces events.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (file): file-like object.
record_offset (int): offset of the record relative to the start of
the file.
Returns:
int: next record offset.
Raises:
ParseError: if the record cannot be parsed.
"""
record_strings_data_offset = file_object.tell()
record_strings_data_size = record_offset - record_strings_data_offset
record_strings_data = self._ReadData(
file_object, record_strings_data_offset, record_strings_data_size)
record_map = self._GetDataTypeMap('asl_record')
try:
record, record_data_size = self._ReadStructureFromFileObject(
file_object, record_offset, record_map)
except (ValueError, errors.ParseError) as exception:
raise errors.UnableToParseFile((
'Unable to parse record at offset: 0x{0:08x} with error: '
'{1!s}').format(record_offset, exception))
hostname = self._ParseRecordString(
record_strings_data, record_strings_data_offset,
record.hostname_string_offset)
sender = self._ParseRecordString(
record_strings_data, record_strings_data_offset,
record.sender_string_offset)
facility = self._ParseRecordString(
record_strings_data, record_strings_data_offset,
record.facility_string_offset)
message = self._ParseRecordString(
record_strings_data, record_strings_data_offset,
record.message_string_offset)
file_offset = record_offset + record_data_size
additional_data_size = record.data_size + 6 - record_data_size
if additional_data_size % 8 != 0:
raise errors.ParseError(
'Invalid record additional data size: {0:d}.'.format(
additional_data_size))
additional_data = self._ReadData(
file_object, file_offset, additional_data_size)
extra_fields = {}
for additional_data_offset in range(0, additional_data_size - 8, 16):
record_extra_field = self._ParseRecordExtraField(
additional_data[additional_data_offset:], file_offset)
file_offset += 16
name = self._ParseRecordString(
record_strings_data, record_strings_data_offset,
record_extra_field.name_string_offset)
value = self._ParseRecordString(
record_strings_data, record_strings_data_offset,
record_extra_field.value_string_offset)
if name is not None:
extra_fields[name] = value
# TODO: implement determine previous record offset
event_data = ASLEventData()
event_data.computer_name = hostname
event_data.extra_information = ', '.join([
'{0:s}: {1:s}'.format(name, value)
for name, value in sorted(extra_fields.items())])
event_data.facility = facility
event_data.group_id = record.group_identifier
event_data.level = record.alert_level
event_data.message_id = record.message_identifier
event_data.message = message
event_data.pid = record.process_identifier
event_data.read_gid = record.real_group_identifier
event_data.read_uid = record.real_user_identifier
event_data.record_position = record_offset
event_data.sender = sender
# Note that the user_sid value is expected to be a string.
event_data.user_sid = '{0:d}'.format(record.user_identifier)
microseconds, _ = divmod(record.written_time_nanoseconds, 1000)
timestamp = (record.written_time * 1000000) + microseconds
# TODO: replace by PosixTimeInNanoseconds.
date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(
timestamp=timestamp)
# TODO: replace by written time.
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
return record.next_record_offset
|
python
|
{
"resource": ""
}
|
q25577
|
ASLParser._ParseRecordExtraField
|
train
|
def _ParseRecordExtraField(self, byte_stream, file_offset):
"""Parses a record extra field.
Args:
byte_stream (bytes): byte stream.
file_offset (int): offset of the record extra field relative to
the start of the file.
Returns:
asl_record_extra_field: record extra field.
Raises:
ParseError: if the record extra field cannot be parsed.
"""
extra_field_map = self._GetDataTypeMap('asl_record_extra_field')
try:
record_extra_field = self._ReadStructureFromByteStream(
byte_stream, file_offset, extra_field_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError((
'Unable to parse record extra field at offset: 0x{0:08x} with error: '
'{1!s}').format(file_offset, exception))
return record_extra_field
|
python
|
{
"resource": ""
}
|
q25578
|
ASLParser._ParseRecordString
|
train
|
def _ParseRecordString(
self, record_strings_data, record_strings_data_offset, string_offset):
"""Parses a record string.
Args:
record_strings_data (bytes): record strings data.
record_strings_data_offset (int): offset of the record strings data
relative to the start of the file.
string_offset (int): offset of the string relative to the start of
the file.
Returns:
str: record string or None if string offset is 0.
Raises:
ParseError: if the record string cannot be parsed.
"""
if string_offset == 0:
return None
if string_offset & self._STRING_OFFSET_MSB:
if (string_offset >> 60) != 8:
raise errors.ParseError('Invalid inline record string flag.')
string_size = (string_offset >> 56) & 0x0f
if string_size >= 8:
raise errors.ParseError('Invalid inline record string size.')
string_data = bytes(bytearray([
string_offset >> (8 * byte_index) & 0xff
for byte_index in range(6, -1, -1)]))
try:
return string_data[:string_size].decode('utf-8')
except UnicodeDecodeError as exception:
raise errors.ParseError(
'Unable to decode inline record string with error: {0!s}.'.format(
exception))
data_offset = string_offset - record_strings_data_offset
record_string_map = self._GetDataTypeMap('asl_record_string')
try:
record_string = self._ReadStructureFromByteStream(
record_strings_data[data_offset:], string_offset, record_string_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError((
'Unable to parse record string at offset: 0x{0:08x} with error: '
'{1!s}').format(string_offset, exception))
return record_string.string.rstrip('\x00')
|
python
|
{
"resource": ""
}
|
q25579
|
ASLParser.ParseFileObject
|
train
|
def ParseFileObject(self, parser_mediator, file_object):
"""Parses an ASL file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
file_header_map = self._GetDataTypeMap('asl_file_header')
try:
file_header, _ = self._ReadStructureFromFileObject(
file_object, 0, file_header_map)
except (ValueError, errors.ParseError) as exception:
raise errors.UnableToParseFile(
'Unable to parse file header with error: {0!s}'.format(
exception))
if file_header.signature != self._FILE_SIGNATURE:
raise errors.UnableToParseFile('Invalid file signature.')
# TODO: generate event for creation time.
file_size = file_object.get_size()
if file_header.first_log_entry_offset > 0:
last_log_entry_offset = 0
file_offset = file_header.first_log_entry_offset
while file_offset < file_size:
last_log_entry_offset = file_offset
try:
file_offset = self._ParseRecord(
parser_mediator, file_object, file_offset)
except errors.ParseError as exception:
parser_mediator.ProduceExtractionWarning(
'unable to parse record with error: {0!s}'.format(exception))
return
if file_offset == 0:
break
if last_log_entry_offset != file_header.last_log_entry_offset:
parser_mediator.ProduceExtractionWarning(
'last log entry offset does not match value in file header.')
|
python
|
{
"resource": ""
}
|
q25580
|
_PathFilterTable._AddPathSegments
|
train
|
def _AddPathSegments(self, path, ignore_list):
"""Adds the path segments to the table.
Args:
path: a string containing the path.
ignore_list: a list of path segment indexes to ignore, where 0 is the
index of the first path segment relative from the root.
"""
path_segments = path.split(self._path_segment_separator)
for path_segment_index, path_segment in enumerate(path_segments):
if path_segment_index not in self.path_segments_per_index:
self.path_segments_per_index[path_segment_index] = {}
if path_segment_index not in ignore_list:
path_segments = self.path_segments_per_index[path_segment_index]
if path_segment not in path_segments:
path_segments[path_segment] = []
paths_per_segment_list = path_segments[path_segment]
paths_per_segment_list.append(path)
|
python
|
{
"resource": ""
}
|
q25581
|
_PathFilterTable.ToDebugString
|
train
|
def ToDebugString(self):
"""Converts the path filter table into a debug string."""
text_parts = ['Path segment index\tPath segments(s)']
for index, path_segments in self.path_segments_per_index.items():
text_parts.append('{0:d}\t\t\t[{1:s}]'.format(
index, ', '.join(path_segments)))
text_parts.append('')
return '\n'.join(text_parts)
|
python
|
{
"resource": ""
}
|
q25582
|
_PathSegmentWeights.AddIndex
|
train
|
def AddIndex(self, path_segment_index):
"""Adds a path segment index and sets its weight to 0.
Args:
path_segment_index: an integer containing the path segment index.
Raises:
ValueError: if the path segment weights already contains
the path segment index.
"""
if path_segment_index in self._weight_per_index:
raise ValueError('Path segment index already set.')
self._weight_per_index[path_segment_index] = 0
|
python
|
{
"resource": ""
}
|
q25583
|
_PathSegmentWeights.AddWeight
|
train
|
def AddWeight(self, path_segment_index, weight):
"""Adds a weight for a specific path segment index.
Args:
path_segment_index: an integer containing the path segment index.
weight: an integer containing the weight.
Raises:
ValueError: if the path segment weights do not contain
the path segment index.
"""
if path_segment_index not in self._weight_per_index:
raise ValueError('Path segment index not set.')
self._weight_per_index[path_segment_index] += weight
if weight not in self._indexes_per_weight:
self._indexes_per_weight[weight] = []
self._indexes_per_weight[weight].append(path_segment_index)
|
python
|
{
"resource": ""
}
|
q25584
|
_PathSegmentWeights.SetWeight
|
train
|
def SetWeight(self, path_segment_index, weight):
"""Sets a weight for a specific path segment index.
Args:
path_segment_index: an integer containing the path segment index.
weight: an integer containing the weight.
Raises:
ValueError: if the path segment weights do not contain
the path segment index.
"""
if path_segment_index not in self._weight_per_index:
raise ValueError('Path segment index not set.')
self._weight_per_index[path_segment_index] = weight
if weight not in self._indexes_per_weight:
self._indexes_per_weight[weight] = []
self._indexes_per_weight[weight].append(path_segment_index)
|
python
|
{
"resource": ""
}
|
q25585
|
_PathSegmentWeights.ToDebugString
|
train
|
def ToDebugString(self):
"""Converts the path segment weights into a debug string."""
text_parts = ['Path segment index\tWeight']
for path_segment_index, weight in self._weight_per_index.items():
text_parts.append('{0:d}\t\t\t{1:d}'.format(
path_segment_index, weight))
text_parts.append('')
text_parts.append('Weight\t\t\tPath segment index(es)')
for weight, path_segment_indexes in self._indexes_per_weight.items():
text_parts.append('{0:d}\t\t\t{1!s}'.format(
weight, path_segment_indexes))
text_parts.append('')
return '\n'.join(text_parts)
|
python
|
{
"resource": ""
}
|
q25586
|
PathFilterScanTree._BuildScanTreeNode
|
train
|
def _BuildScanTreeNode(self, path_filter_table, ignore_list):
"""Builds a scan tree node.
Args:
path_filter_table: a path filter table object (instance of
_PathFilterTable).
ignore_list: a list of path segment indexes to ignore, where 0 is the
index of the first path segment relative from the root.
Returns:
A scan tree node (instance of PathFilterScanTreeNode).
Raises:
ValueError: if the path segment index value or the number of paths
segments value is out of bounds.
"""
# Make a copy of the lists because the function is going to alter them
# and the changes must remain in scope of the function.
paths_list = list(path_filter_table.paths)
ignore_list = list(ignore_list)
similarity_weights = _PathSegmentWeights()
occurrence_weights = _PathSegmentWeights()
value_weights = _PathSegmentWeights()
for path_segment_index in path_filter_table.path_segments_per_index.keys():
# Skip a path segment index for which no path segments are defined
# in the path filter table.
if not path_filter_table.path_segments_per_index[path_segment_index]:
continue
similarity_weights.AddIndex(path_segment_index)
occurrence_weights.AddIndex(path_segment_index)
value_weights.AddIndex(path_segment_index)
path_segments = path_filter_table.GetPathSegments(path_segment_index)
number_of_path_segments = len(path_segments.keys())
if number_of_path_segments > 1:
occurrence_weights.SetWeight(
path_segment_index, number_of_path_segments)
for paths_per_segment_list in path_segments.values():
path_segment_weight = len(paths_per_segment_list)
if path_segment_weight > 1:
similarity_weights.AddWeight(path_segment_index, path_segment_weight)
path_segment_index = self._GetMostSignificantPathSegmentIndex(
paths_list, similarity_weights, occurrence_weights, value_weights)
ignore_list.append(path_segment_index)
if path_segment_index < 0:
raise ValueError('Invalid path segment index value out of bounds.')
scan_tree_node = PathFilterScanTreeNode(path_segment_index)
path_segments = path_filter_table.GetPathSegments(path_segment_index)
for path_segment, paths_per_segment_list in path_segments.items():
if not paths_per_segment_list:
raise ValueError('Invalid number of paths value out of bounds.')
if len(paths_per_segment_list) == 1:
for path in paths_per_segment_list:
scan_tree_node.AddPathSegment(path_segment, path)
else:
sub_path_filter_table = _PathFilterTable(
paths_per_segment_list, ignore_list,
path_segment_separator=self._path_segment_separator)
scan_sub_node = self._BuildScanTreeNode(
sub_path_filter_table, ignore_list)
scan_tree_node.AddPathSegment(path_segment, scan_sub_node)
for path in paths_per_segment_list:
paths_list.remove(path)
number_of_paths = len(paths_list)
if number_of_paths == 1:
scan_tree_node.SetDefaultValue(paths_list[0])
elif number_of_paths > 1:
path_filter_table = _PathFilterTable(
paths_list, ignore_list,
path_segment_separator=self._path_segment_separator)
scan_sub_node = self._BuildScanTreeNode(path_filter_table, ignore_list)
scan_tree_node.SetDefaultValue(scan_sub_node)
return scan_tree_node
|
python
|
{
"resource": ""
}
|
q25587
|
PathFilterScanTree._GetMostSignificantPathSegmentIndex
|
train
|
def _GetMostSignificantPathSegmentIndex(
self, paths, similarity_weights, occurrence_weights, value_weights):
"""Retrieves the index of the most significant path segment.
Args:
paths: a list of strings containing the paths.
similarity_weights: the similarity weights object (instance of
_PathSegmentWeights).
occurrence_weights: the occurrence weights object (instance of
_PathSegmentWeights).
value_weights: the value weights object (instance of _PathSegmentWeights).
Returns:
An integer containing the path segment index.
Raises:
ValueError: when paths is an empty list.
"""
if not paths:
raise ValueError('Missing paths.')
number_of_paths = len(paths)
path_segment_index = None
if number_of_paths == 1:
path_segment_index = self._GetPathSegmentIndexForValueWeights(
value_weights)
elif number_of_paths == 2:
path_segment_index = self._GetPathSegmentIndexForOccurrenceWeights(
occurrence_weights, value_weights)
elif number_of_paths > 2:
path_segment_index = self._GetPathSegmentIndexForSimilarityWeights(
similarity_weights, occurrence_weights, value_weights)
return path_segment_index
|
python
|
{
"resource": ""
}
|
q25588
|
PathFilterScanTree._GetPathSegmentIndexForOccurrenceWeights
|
train
|
def _GetPathSegmentIndexForOccurrenceWeights(
self, occurrence_weights, value_weights):
"""Retrieves the index of the path segment based on occurrence weights.
Args:
occurrence_weights: the occurrence weights object (instance of
_PathSegmentWeights).
value_weights: the value weights object (instance of _PathSegmentWeights).
Returns:
An integer containing the path segment index.
"""
largest_weight = occurrence_weights.GetLargestWeight()
if largest_weight > 0:
occurrence_weight_indexes = occurrence_weights.GetIndexesForWeight(
largest_weight)
number_of_occurrence_indexes = len(occurrence_weight_indexes)
else:
number_of_occurrence_indexes = 0
path_segment_index = None
if number_of_occurrence_indexes == 0:
path_segment_index = self._GetPathSegmentIndexForValueWeights(
value_weights)
elif number_of_occurrence_indexes == 1:
path_segment_index = occurrence_weight_indexes[0]
else:
largest_weight = 0
for occurrence_index in occurrence_weight_indexes:
value_weight = value_weights.GetWeightForIndex(occurrence_index)
if not path_segment_index or largest_weight < value_weight:
largest_weight = value_weight
path_segment_index = occurrence_index
return path_segment_index
|
python
|
{
"resource": ""
}
|
q25589
|
PathFilterScanTree._GetPathSegmentIndexForSimilarityWeights
|
train
|
def _GetPathSegmentIndexForSimilarityWeights(
self, similarity_weights, occurrence_weights, value_weights):
"""Retrieves the index of the path segment based on similarity weights.
Args:
similarity_weights: the similarity weights object (instance of
_PathSegmentWeights).
occurrence_weights: the occurrence weights object (instance of
_PathSegmentWeights).
value_weights: the value weights object (instance of _PathSegmentWeights).
Returns:
An integer containing the path segment index.
"""
largest_weight = similarity_weights.GetLargestWeight()
if largest_weight > 0:
similarity_weight_indexes = similarity_weights.GetIndexesForWeight(
largest_weight)
number_of_similarity_indexes = len(similarity_weight_indexes)
else:
number_of_similarity_indexes = 0
path_segment_index = None
if number_of_similarity_indexes == 0:
path_segment_index = self._GetPathSegmentIndexForOccurrenceWeights(
occurrence_weights, value_weights)
elif number_of_similarity_indexes == 1:
path_segment_index = similarity_weight_indexes[0]
else:
largest_weight = 0
largest_value_weight = 0
for similarity_index in similarity_weight_indexes:
occurrence_weight = occurrence_weights.GetWeightForIndex(
similarity_index)
if largest_weight > 0 and largest_weight == occurrence_weight:
value_weight = value_weights.GetWeightForIndex(similarity_index)
if largest_value_weight < value_weight:
largest_weight = 0
if not path_segment_index or largest_weight < occurrence_weight:
largest_weight = occurrence_weight
path_segment_index = similarity_index
largest_value_weight = value_weights.GetWeightForIndex(
similarity_index)
return path_segment_index
|
python
|
{
"resource": ""
}
|
q25590
|
PathFilterScanTree._GetPathSegmentIndexForValueWeights
|
train
|
def _GetPathSegmentIndexForValueWeights(self, value_weights):
"""Retrieves the index of the path segment based on value weights.
Args:
value_weights: the value weights object (instance of _PathSegmentWeights).
Returns:
An integer containing the path segment index.
Raises:
RuntimeError: is no path segment index can be found.
"""
largest_weight = value_weights.GetLargestWeight()
if largest_weight > 0:
value_weight_indexes = value_weights.GetIndexesForWeight(largest_weight)
else:
value_weight_indexes = []
if value_weight_indexes:
path_segment_index = value_weight_indexes[0]
else:
path_segment_index = value_weights.GetFirstAvailableIndex()
if path_segment_index is None:
raise RuntimeError('No path segment index found.')
return path_segment_index
|
python
|
{
"resource": ""
}
|
q25591
|
PathFilterScanTree.CheckPath
|
train
|
def CheckPath(self, path, path_segment_separator=None):
"""Checks if a path matches the scan tree-based path filter.
Args:
path: a string containing the path.
path_segment_separator: optional string containing the path segment
separator. None defaults to the path segment
separator that was set when the path filter
scan tree was initialized.
Returns:
A boolean indicating if the path matches the filter.
"""
if not self._case_sensitive:
path = path.lower()
if path_segment_separator is None:
path_segment_separator = self._path_segment_separator
path_segments = path.split(path_segment_separator)
number_of_path_segments = len(path_segments)
scan_object = self._root_node
while scan_object:
if isinstance(scan_object, py2to3.STRING_TYPES):
break
if scan_object.path_segment_index >= number_of_path_segments:
scan_object = scan_object.default_value
continue
path_segment = path_segments[scan_object.path_segment_index]
scan_object = scan_object.GetScanObject(path_segment)
if not isinstance(scan_object, py2to3.STRING_TYPES):
return False
filter_path_segments = scan_object.split(self._path_segment_separator)
return filter_path_segments == path_segments
|
python
|
{
"resource": ""
}
|
q25592
|
PathFilterScanTreeNode.AddPathSegment
|
train
|
def AddPathSegment(self, path_segment, scan_object):
"""Adds a path segment.
Args:
path_segment: a string containing the path segment.
scan_object: a scan object, either a scan tree sub node (instance of
PathFilterScanTreeNode) or a string containing a path.
Raises:
ValueError: if the node already contains a scan object for
the path segment.
"""
if path_segment in self._path_segments:
raise ValueError('Path segment already set.')
if isinstance(scan_object, PathFilterScanTreeNode):
scan_object.parent = self
self._path_segments[path_segment] = scan_object
|
python
|
{
"resource": ""
}
|
q25593
|
PathFilterScanTreeNode.ToDebugString
|
train
|
def ToDebugString(self, indentation_level=1):
"""Converts the path filter scan tree node into a debug string.
Args:
indentation_level: an integer containing the text indentation level.
Returns:
A string containing a debug representation of the path filter scan
tree node.
"""
indentation = ' ' * indentation_level
text_parts = ['{0:s}path segment index: {1:d}\n'.format(
indentation, self.path_segment_index)]
for path_segment, scan_object in self._path_segments.items():
text_parts.append('{0:s}path segment: {1:s}\n'.format(
indentation, path_segment))
if isinstance(scan_object, PathFilterScanTreeNode):
text_parts.append('{0:s}scan tree node:\n'.format(indentation))
text_parts.append(scan_object.ToDebugString(indentation_level + 1))
elif isinstance(scan_object, py2to3.STRING_TYPES):
text_parts.append('{0:s}path: {1:s}\n'.format(
indentation, scan_object))
text_parts.append('{0:s}default value:\n'.format(indentation))
if isinstance(self.default_value, PathFilterScanTreeNode):
text_parts.append('{0:s}scan tree node:\n'.format(indentation))
text_parts.append(self.default_value.ToDebugString(indentation_level + 1))
elif isinstance(self.default_value, py2to3.STRING_TYPES):
text_parts.append('{0:s}pattern: {1:s}\n'.format(
indentation, self.default_value))
text_parts.append('\n')
return ''.join(text_parts)
|
python
|
{
"resource": ""
}
|
q25594
|
BagMRUWindowsRegistryPlugin._ParseMRUListExValue
|
train
|
def _ParseMRUListExValue(self, registry_key):
"""Parses the MRUListEx value in a given Registry key.
Args:
registry_key (dfwinreg.WinRegistryKey): Windows Registry key that contains
the MRUListEx value.
Returns:
mrulistex_entries: MRUListEx entries or None if not available.
"""
mrulistex_value = registry_key.GetValueByName('MRUListEx')
# The key exists but does not contain a value named "MRUList".
if not mrulistex_value:
return None
mrulistex_entries_map = self._GetDataTypeMap('mrulistex_entries')
context = dtfabric_data_maps.DataTypeMapContext(values={
'data_size': len(mrulistex_value.data)})
return self._ReadStructureFromByteStream(
mrulistex_value.data, 0, mrulistex_entries_map, context=context)
|
python
|
{
"resource": ""
}
|
q25595
|
SoftwareUpdatePlugin.GetEntries
|
train
|
def GetEntries(self, parser_mediator, match=None, **unused_kwargs):
"""Extracts relevant MacOS update entries.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.
"""
version = match.get('LastAttemptSystemVersion', 'N/A')
pending = match.get('LastUpdatesAvailable', None)
event_data = plist_event.PlistTimeEventData()
event_data.desc = 'Last MacOS {0:s} full update.'.format(version)
event_data.key = ''
event_data.root = '/'
datetime_value = match.get('LastFullSuccessfulDate', None)
if datetime_value:
event = time_events.PythonDatetimeEvent(
datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
datetime_value = match.get('LastSuccessfulDate', None)
if datetime_value and pending:
software = []
for update in match.get('RecommendedUpdates', []):
identifier = update.get('Identifier', '<IDENTIFIER>')
product_key = update.get('Product Key', '<PRODUCT_KEY>')
software.append('{0:s}({1:s})'.format(identifier, product_key))
if not software:
return
software = ','.join(software)
event_data.desc = (
'Last Mac OS {0!s} partially update, pending {1!s}: '
'{2:s}.').format(version, pending, software)
event = time_events.PythonDatetimeEvent(
datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
python
|
{
"resource": ""
}
|
q25596
|
WindowsUserAccountsPlugin._GetUsernameFromProfilePath
|
train
|
def _GetUsernameFromProfilePath(self, path):
"""Retrieves the username from a Windows profile path.
Trailing path path segment are ignored.
Args:
path (str): a Windows path with '\\' as path segment separator.
Returns:
str: basename which is the last path segment.
"""
# Strip trailing key separators.
while path and path[-1] == '\\':
path = path[:-1]
if path:
_, _, path = path.rpartition('\\')
return path
|
python
|
{
"resource": ""
}
|
q25597
|
XMLRPCClient.CallFunction
|
train
|
def CallFunction(self):
"""Calls the function via RPC."""
if self._xmlrpc_proxy is None:
return None
rpc_call = getattr(self._xmlrpc_proxy, self._RPC_FUNCTION_NAME, None)
if rpc_call is None:
return None
try:
return rpc_call() # pylint: disable=not-callable
except (
expat.ExpatError, SocketServer.socket.error,
xmlrpclib.Fault) as exception:
logger.warning('Unable to make RPC call with error: {0!s}'.format(
exception))
return None
|
python
|
{
"resource": ""
}
|
q25598
|
XMLRPCClient.Open
|
train
|
def Open(self, hostname, port):
"""Opens a RPC communication channel to the server.
Args:
hostname (str): hostname or IP address to connect to for requests.
port (int): port to connect to for requests.
Returns:
bool: True if the communication channel was established.
"""
server_url = 'http://{0:s}:{1:d}'.format(hostname, port)
try:
self._xmlrpc_proxy = xmlrpclib.ServerProxy(
server_url, allow_none=True)
except SocketServer.socket.error as exception:
logger.warning((
'Unable to connect to RPC server on {0:s}:{1:d} with error: '
'{2!s}').format(hostname, port, exception))
return False
return True
|
python
|
{
"resource": ""
}
|
q25599
|
ThreadedXMLRPCServer._Open
|
train
|
def _Open(self, hostname, port):
"""Opens the RPC communication channel for clients.
Args:
hostname (str): hostname or IP address to connect to for requests.
port (int): port to connect to for requests.
Returns:
bool: True if the communication channel was successfully opened.
"""
try:
self._xmlrpc_server = SimpleXMLRPCServer.SimpleXMLRPCServer(
(hostname, port), logRequests=False, allow_none=True)
except SocketServer.socket.error as exception:
logger.warning((
'Unable to bind a RPC server on {0:s}:{1:d} with error: '
'{2!s}').format(hostname, port, exception))
return False
self._xmlrpc_server.register_function(
self._callback, self._RPC_FUNCTION_NAME)
return True
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.