_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q25300
|
WinRegistryParser._ParseKeysFromFindSpecs
|
train
|
def _ParseKeysFromFindSpecs(self, parser_mediator, win_registry, find_specs):
"""Parses the Registry keys from FindSpecs.
Args:
parser_mediator (ParserMediator): parser mediator.
win_registry (dfwinreg.WinRegistryKey): root Windows Registry key.
find_specs (dfwinreg.FindSpecs): Keys to search for.
"""
searcher = dfwinreg_registry_searcher.WinRegistrySearcher(win_registry)
for registry_key_path in iter(searcher.Find(find_specs=find_specs)):
if parser_mediator.abort:
break
registry_key = searcher.GetKeyByPath(registry_key_path)
self._ParseKey(parser_mediator, registry_key)
|
python
|
{
"resource": ""
}
|
q25301
|
WinRegistryParser.ParseFileObject
|
train
|
def ParseFileObject(self, parser_mediator, file_object):
"""Parses a Windows Registry file-like object.
Args:
parser_mediator (ParserMediator): parser mediator.
file_object (dfvfs.FileIO): a file-like object.
"""
win_registry_reader = FileObjectWinRegistryFileReader()
try:
registry_file = win_registry_reader.Open(file_object)
except IOError as exception:
parser_mediator.ProduceExtractionWarning(
'unable to open Windows Registry file with error: {0!s}'.format(
exception))
return
win_registry = dfwinreg_registry.WinRegistry()
key_path_prefix = win_registry.GetRegistryFileMapping(registry_file)
registry_file.SetKeyPathPrefix(key_path_prefix)
root_key = registry_file.GetRootKey()
if not root_key:
return
registry_find_specs = getattr(
parser_mediator.artifacts_filter_helper, 'registry_find_specs', None)
if not registry_find_specs:
try:
self._ParseRecurseKeys(parser_mediator, root_key)
except IOError as exception:
parser_mediator.ProduceExtractionWarning('{0!s}'.format(exception))
else:
artifacts_filter_helper = artifact_filters.ArtifactDefinitionsFilterHelper
if not artifacts_filter_helper.CheckKeyCompatibility(key_path_prefix):
logger.warning((
'Artifacts filters are not supported for Windows Registry file '
'with key path prefix: "{0:s}".').format(key_path_prefix))
else:
try:
win_registry.MapFile(key_path_prefix, registry_file)
self._ParseKeysFromFindSpecs(
parser_mediator, win_registry, registry_find_specs)
except IOError as exception:
parser_mediator.ProduceExtractionWarning('{0!s}'.format(exception))
|
python
|
{
"resource": ""
}
|
q25302
|
StorageFactory.CreateStorageReaderForFile
|
train
|
def CreateStorageReaderForFile(cls, path):
"""Creates a storage reader based on the file.
Args:
path (str): path to the storage file.
Returns:
StorageReader: a storage reader or None if the storage file cannot be
opened or the storage format is not supported.
"""
if sqlite_file.SQLiteStorageFile.CheckSupportedFormat(
path, check_readable_only=True):
return sqlite_reader.SQLiteStorageFileReader(path)
return None
|
python
|
{
"resource": ""
}
|
q25303
|
StorageFactory.CreateStorageWriter
|
train
|
def CreateStorageWriter(cls, storage_format, session, path):
"""Creates a storage writer.
Args:
session (Session): session the storage changes are part of.
path (str): path to the storage file.
storage_format (str): storage format.
Returns:
StorageWriter: a storage writer or None if the storage file cannot be
opened or the storage format is not supported.
"""
if storage_format == definitions.STORAGE_FORMAT_SQLITE:
return sqlite_writer.SQLiteStorageFileWriter(session, path)
return None
|
python
|
{
"resource": ""
}
|
q25304
|
StorageFactory.CreateStorageWriterForFile
|
train
|
def CreateStorageWriterForFile(cls, session, path):
"""Creates a storage writer based on the file.
Args:
session (Session): session the storage changes are part of.
path (str): path to the storage file.
Returns:
StorageWriter: a storage writer or None if the storage file cannot be
opened or the storage format is not supported.
"""
if sqlite_file.SQLiteStorageFile.CheckSupportedFormat(path):
return sqlite_writer.SQLiteStorageFileWriter(session, path)
return None
|
python
|
{
"resource": ""
}
|
q25305
|
MySQL4n6TimeOutputModule.SetCredentials
|
train
|
def SetCredentials(self, password=None, username=None):
"""Sets the database credentials.
Args:
password (Optional[str]): password to access the database.
username (Optional[str]): username to access the database.
"""
if password:
self._password = password
if username:
self._user = username
|
python
|
{
"resource": ""
}
|
q25306
|
MySQL4n6TimeOutputModule.SetServerInformation
|
train
|
def SetServerInformation(self, server, port):
"""Sets the server information.
Args:
server (str): hostname or IP address of the database server.
port (int): port number of the database server.
"""
self._host = server
self._port = port
|
python
|
{
"resource": ""
}
|
q25307
|
CLITableView._WriteHeader
|
train
|
def _WriteHeader(self, output_writer):
"""Writes a header.
Args:
output_writer (OutputWriter): output writer.
"""
header_string = ''
if self._title:
header_string = ' {0:s} '.format(self._title)
header_string = self._HEADER_FORMAT_STRING.format(header_string)
output_writer.Write(header_string)
|
python
|
{
"resource": ""
}
|
q25308
|
CLITableView._WriteRow
|
train
|
def _WriteRow(self, output_writer, values):
"""Writes a row of values aligned to the column width.
Args:
output_writer (OutputWriter): output writer.
values (list[object]): values.
"""
maximum_row_width = self._MAXIMUM_WIDTH - self._column_width - 3
# The format string of the first line of the column value.
primary_format_string = '{{0:>{0:d}s}} : {{1:s}}\n'.format(
self._column_width)
# The format string of successive lines of the column value.
secondary_format_string = '{{0:<{0:d}s}}{{1:s}}\n'.format(
self._column_width + 3)
if isinstance(values[1], py2to3.STRING_TYPES):
value_string = values[1]
else:
value_string = '{0!s}'.format(values[1])
if len(value_string) < maximum_row_width:
output_writer.Write(primary_format_string.format(
values[0], value_string))
return
# Split the column value in words.
words = value_string.split()
current = 0
lines = []
word_buffer = []
for word in words:
current += len(word) + 1
if current >= maximum_row_width:
current = len(word)
lines.append(' '.join(word_buffer))
word_buffer = [word]
else:
word_buffer.append(word)
lines.append(' '.join(word_buffer))
# Split the column value across multiple lines.
output_writer.Write(
primary_format_string.format(values[0], lines[0]))
for line in lines[1:]:
output_writer.Write(secondary_format_string.format('', line))
|
python
|
{
"resource": ""
}
|
q25309
|
ViewsFactory.GetTableView
|
train
|
def GetTableView(cls, format_type, column_names=None, title=None):
"""Retrieves a table view.
Args:
format_type (str): table view format type.
column_names (Optional[list[str]]): column names.
title (Optional[str]): title.
Returns:
BaseTableView: table view.
Raises:
ValueError: if the format type is not supported.
"""
view_class = cls._TABLE_VIEW_FORMAT_CLASSES.get(format_type, None)
if not view_class:
raise ValueError('Unsupported format type: {0:s}'.format(format_type))
return view_class(column_names=column_names, title=title)
|
python
|
{
"resource": ""
}
|
q25310
|
DtFabricBasePlistPlugin._GetDataTypeMap
|
train
|
def _GetDataTypeMap(self, name):
"""Retrieves a data type map defined by the definition file.
The data type maps are cached for reuse.
Args:
name (str): name of the data type as defined by the definition file.
Returns:
dtfabric.DataTypeMap: data type map which contains a data type definition,
such as a structure, that can be mapped onto binary data.
"""
data_type_map = self._data_type_maps.get(name, None)
if not data_type_map:
data_type_map = self._fabric.CreateDataTypeMap(name)
self._data_type_maps[name] = data_type_map
return data_type_map
|
python
|
{
"resource": ""
}
|
q25311
|
DtFabricBasePlistPlugin._ReadDefinitionFile
|
train
|
def _ReadDefinitionFile(self, filename):
"""Reads a dtFabric definition file.
Args:
filename (str): name of the dtFabric definition file.
Returns:
dtfabric.DataTypeFabric: data type fabric which contains the data format
data type maps of the data type definition, such as a structure, that
can be mapped onto binary data or None if no filename is provided.
"""
if not filename:
return None
path = os.path.join(self._DEFINITION_FILES_PATH, filename)
with open(path, 'rb') as file_object:
definition = file_object.read()
return dtfabric_fabric.DataTypeFabric(yaml_definition=definition)
|
python
|
{
"resource": ""
}
|
q25312
|
RunSphinxAPIDoc
|
train
|
def RunSphinxAPIDoc(_):
"""Runs sphinx-apidoc to auto-generate documentation."""
current_directory = os.path.abspath(os.path.dirname(__file__))
module = os.path.join(current_directory, '..', 'plaso')
api_directory = os.path.join(current_directory, 'sources', 'api')
apidoc.main(['-o', api_directory, module, '--force'])
|
python
|
{
"resource": ""
}
|
q25313
|
setup
|
train
|
def setup(app):
"""Called at Sphinx initialization."""
# Triggers sphinx-apidoc to generate API documentation.
app.connect('builder-inited', RunSphinxAPIDoc)
app.add_config_value(
'recommonmark_config', {
'enable_auto_doc_ref': False},
True)
app.add_transform(AutoStructify)
app.add_transform(ProcessLink)
|
python
|
{
"resource": ""
}
|
q25314
|
FileHistoryESEDBPlugin._GetDictFromStringsTable
|
train
|
def _GetDictFromStringsTable(self, parser_mediator, table):
"""Build a dictionary of the value in the strings table.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
table (pyesedb.table): strings table.
Returns:
dict[str,object]: values per column name.
"""
if not table:
return {}
record_values = {}
for record in table.records:
if parser_mediator.abort:
break
if record.get_number_of_values() != 2:
continue
identification = self._GetRecordValue(record, 0)
filename = self._GetRecordValue(record, 1)
if not identification:
continue
record_values[identification] = filename
return record_values
|
python
|
{
"resource": ""
}
|
q25315
|
FileHistoryESEDBPlugin.ParseNameSpace
|
train
|
def ParseNameSpace(
self, parser_mediator, cache=None, database=None, table=None,
**unused_kwargs):
"""Parses the namespace table.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
cache (Optional[ESEDBCache]): cache.
database (Optional[pyesedb.file]): ESE database.
table (Optional[pyesedb.table]): table.
Raises:
ValueError: if the database or table value is missing.
"""
if database is None:
raise ValueError('Missing database value.')
if table is None:
raise ValueError('Missing table value.')
strings = cache.GetResults('strings')
if not strings:
esedb_table = database.get_table_by_name('string')
strings = self._GetDictFromStringsTable(parser_mediator, esedb_table)
cache.StoreDictInCache('strings', strings)
for esedb_record in table.records:
if parser_mediator.abort:
break
record_values = self._GetRecordValues(
parser_mediator, table.name, esedb_record)
event_data = FileHistoryNamespaceEventData()
event_data.file_attribute = record_values.get('fileAttrib', None)
event_data.identifier = record_values.get('id', None)
event_data.parent_identifier = record_values.get('parentId', None)
event_data.usn_number = record_values.get('usn', None)
event_data.original_filename = strings.get(event_data.identifier, None)
created_timestamp = record_values.get('fileCreated')
if created_timestamp:
date_time = dfdatetime_filetime.Filetime(timestamp=created_timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
modified_timestamp = record_values.get('fileModified')
if modified_timestamp:
date_time = dfdatetime_filetime.Filetime(timestamp=modified_timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_MODIFICATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
if not created_timestamp and not modified_timestamp:
date_time = dfdatetime_semantic_time.SemanticTime('Not set')
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_NOT_A_TIME)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
python
|
{
"resource": ""
}
|
q25316
|
AnalysisMediator.GetDisplayNameForPathSpec
|
train
|
def GetDisplayNameForPathSpec(self, path_spec):
"""Retrieves the display name for a path specification.
Args:
path_spec (dfvfs.PathSpec): path specification.
Returns:
str: human readable version of the path specification.
"""
return path_helper.PathHelper.GetDisplayNameForPathSpec(
path_spec, mount_path=self._mount_path, text_prepend=self._text_prepend)
|
python
|
{
"resource": ""
}
|
q25317
|
AnalysisMediator.ProduceAnalysisReport
|
train
|
def ProduceAnalysisReport(self, plugin):
"""Produces an analysis report.
Args:
plugin (AnalysisPlugin): plugin.
"""
analysis_report = plugin.CompileReport(self)
if not analysis_report:
return
analysis_report.time_compiled = timelib.Timestamp.GetNow()
plugin_name = getattr(analysis_report, 'plugin_name', plugin.plugin_name)
if plugin_name:
analysis_report.plugin_name = plugin_name
if self._event_filter_expression:
# TODO: rename filter string when refactoring the analysis reports.
analysis_report.filter_string = self._event_filter_expression
self._storage_writer.AddAnalysisReport(analysis_report)
self.number_of_produced_analysis_reports += 1
self.number_of_produced_event_tags = (
self._storage_writer.number_of_event_tags)
self.last_activity_timestamp = time.time()
|
python
|
{
"resource": ""
}
|
q25318
|
AnalysisMediator.ProduceEventTag
|
train
|
def ProduceEventTag(self, event_tag):
"""Produces an event tag.
Args:
event_tag (EventTag): event tag.
"""
self._storage_writer.AddEventTag(event_tag)
self.number_of_produced_event_tags += 1
self.last_activity_timestamp = time.time()
|
python
|
{
"resource": ""
}
|
q25319
|
PsortEventHeap._GetEventIdentifiers
|
train
|
def _GetEventIdentifiers(self, event):
"""Retrieves different identifiers of the event.
Every event contains event data, which consists of attributes and values.
These attributes and values can be represented as a string and used for
sorting and uniquely identifying events. This function determines multiple
identifiers:
* an identifier of the attributes and values without the timestamp
description (or usage). This is referred to as the MACB group
identifier.
* an identifier of the attributes and values including the timestamp
description (or usage). This is referred to as the event content
identifier.
The identifier without the timestamp description can be used to group
events that have the same MACB (modification, access, change, birth)
timestamps. The PsortEventHeap will store these events individually and
relies on PsortMultiProcessEngine to do the actual grouping of events.
Args:
event (EventObject): event.
Returns:
tuple: containing:
str: identifier of the event MACB group or None if the event cannot
be grouped.
str: identifier of the event content.
"""
attributes = []
attribute_string = 'data_type: {0:s}'.format(event.data_type)
attributes.append(attribute_string)
for attribute_name, attribute_value in sorted(event.GetAttributes()):
if attribute_name in self._IDENTIFIER_EXCLUDED_ATTRIBUTES:
continue
if not attribute_value:
continue
if attribute_name == 'pathspec':
attribute_value = attribute_value.comparable
elif isinstance(attribute_value, dict):
attribute_value = sorted(attribute_value.items())
elif isinstance(attribute_value, set):
attribute_value = sorted(list(attribute_value))
elif isinstance(attribute_value, py2to3.BYTES_TYPE):
attribute_value = repr(attribute_value)
try:
attribute_string = '{0:s}: {1!s}'.format(
attribute_name, attribute_value)
except UnicodeDecodeError:
logger.error('Failed to decode attribute {0:s}'.format(
attribute_name))
attributes.append(attribute_string)
# The 'atime', 'ctime', 'crtime', 'mtime' are included for backwards
# compatibility with the filestat parser.
if event.timestamp_desc in (
'atime', 'ctime', 'crtime', 'mtime',
definitions.TIME_DESCRIPTION_LAST_ACCESS,
definitions.TIME_DESCRIPTION_CHANGE,
definitions.TIME_DESCRIPTION_CREATION,
definitions.TIME_DESCRIPTION_MODIFICATION):
macb_group_identifier = ', '.join(attributes)
else:
macb_group_identifier = None
attributes.insert(0, event.timestamp_desc)
content_identifier = ', '.join(attributes)
return macb_group_identifier, content_identifier
|
python
|
{
"resource": ""
}
|
q25320
|
PsortEventHeap.PopEvents
|
train
|
def PopEvents(self):
"""Pops events from the heap.
Yields:
EventObject: event.
"""
event = self.PopEvent()
while event:
yield event
event = self.PopEvent()
|
python
|
{
"resource": ""
}
|
q25321
|
PsortMultiProcessEngine._CheckStatusAnalysisProcess
|
train
|
def _CheckStatusAnalysisProcess(self, pid):
"""Checks the status of an analysis process.
Args:
pid (int): process ID (PID) of a registered analysis process.
Raises:
KeyError: if the process is not registered with the engine.
"""
# TODO: Refactor this method, simplify and separate concerns (monitoring
# vs management).
self._RaiseIfNotRegistered(pid)
if pid in self._completed_analysis_processes:
status_indicator = definitions.STATUS_INDICATOR_COMPLETED
process_status = {
'processing_status': status_indicator}
used_memory = 0
else:
process = self._processes_per_pid[pid]
process_status = self._QueryProcessStatus(process)
if process_status is None:
process_is_alive = False
else:
process_is_alive = True
process_information = self._process_information_per_pid[pid]
used_memory = process_information.GetUsedMemory() or 0
if self._worker_memory_limit and used_memory > self._worker_memory_limit:
logger.warning((
'Process: {0:s} (PID: {1:d}) killed because it exceeded the '
'memory limit: {2:d}.').format(
process.name, pid, self._worker_memory_limit))
self._KillProcess(pid)
if isinstance(process_status, dict):
self._rpc_errors_per_pid[pid] = 0
status_indicator = process_status.get('processing_status', None)
if status_indicator == definitions.STATUS_INDICATOR_COMPLETED:
self._completed_analysis_processes.add(pid)
else:
rpc_errors = self._rpc_errors_per_pid.get(pid, 0) + 1
self._rpc_errors_per_pid[pid] = rpc_errors
if rpc_errors > self._MAXIMUM_RPC_ERRORS:
process_is_alive = False
if process_is_alive:
rpc_port = process.rpc_port.value
logger.warning((
'Unable to retrieve process: {0:s} (PID: {1:d}) status via '
'RPC socket: http://localhost:{2:d}').format(
process.name, pid, rpc_port))
processing_status_string = 'RPC error'
status_indicator = definitions.STATUS_INDICATOR_RUNNING
else:
processing_status_string = 'killed'
status_indicator = definitions.STATUS_INDICATOR_KILLED
process_status = {
'processing_status': processing_status_string}
self._UpdateProcessingStatus(pid, process_status, used_memory)
if status_indicator in definitions.ERROR_STATUS_INDICATORS:
logger.error((
'Process {0:s} (PID: {1:d}) is not functioning correctly. '
'Status code: {2!s}.').format(
process.name, pid, status_indicator))
self._TerminateProcessByPid(pid)
|
python
|
{
"resource": ""
}
|
q25322
|
PsortMultiProcessEngine._ExportEvent
|
train
|
def _ExportEvent(self, output_module, event, deduplicate_events=True):
"""Exports an event using an output module.
Args:
output_module (OutputModule): output module.
event (EventObject): event.
deduplicate_events (Optional[bool]): True if events should be
deduplicated.
"""
if event.timestamp != self._export_event_timestamp:
self._FlushExportBuffer(
output_module, deduplicate_events=deduplicate_events)
self._export_event_timestamp = event.timestamp
self._export_event_heap.PushEvent(event)
|
python
|
{
"resource": ""
}
|
q25323
|
PsortMultiProcessEngine._FlushExportBuffer
|
train
|
def _FlushExportBuffer(self, output_module, deduplicate_events=True):
"""Flushes buffered events and writes them to the output module.
Args:
output_module (OutputModule): output module.
deduplicate_events (Optional[bool]): True if events should be
deduplicated.
"""
last_macb_group_identifier = None
last_content_identifier = None
macb_group = []
generator = self._export_event_heap.PopEvents()
for macb_group_identifier, content_identifier, event in generator:
if deduplicate_events and last_content_identifier == content_identifier:
self._events_status.number_of_duplicate_events += 1
continue
if macb_group_identifier is None:
if macb_group:
output_module.WriteEventMACBGroup(macb_group)
macb_group = []
output_module.WriteEvent(event)
else:
if (last_macb_group_identifier == macb_group_identifier or
not macb_group):
macb_group.append(event)
else:
output_module.WriteEventMACBGroup(macb_group)
macb_group = [event]
self._events_status.number_of_macb_grouped_events += 1
last_macb_group_identifier = macb_group_identifier
last_content_identifier = content_identifier
if macb_group:
output_module.WriteEventMACBGroup(macb_group)
|
python
|
{
"resource": ""
}
|
q25324
|
PsortMultiProcessEngine._MergeEventTag
|
train
|
def _MergeEventTag(self, storage_writer, attribute_container):
"""Merges an event tag with the last stored event tag.
If there is an existing event the provided event tag is updated with
the contents of the existing one. After which the event tag index is
updated.
Args:
storage_writer (StorageWriter): storage writer.
attribute_container (AttributeContainer): container.
"""
if attribute_container.CONTAINER_TYPE != 'event_tag':
return
event_identifier = attribute_container.GetEventIdentifier()
if not event_identifier:
return
# Check if the event has already been tagged on a previous occasion,
# we need to append the event tag to the last stored one.
stored_event_tag = self._event_tag_index.GetEventTagByIdentifier(
storage_writer, event_identifier)
if stored_event_tag:
attribute_container.AddComment(stored_event_tag.comment)
attribute_container.AddLabels(stored_event_tag.labels)
self._event_tag_index.SetEventTag(attribute_container)
|
python
|
{
"resource": ""
}
|
q25325
|
PsortMultiProcessEngine._StartAnalysisProcesses
|
train
|
def _StartAnalysisProcesses(self, storage_writer, analysis_plugins):
"""Starts the analysis processes.
Args:
storage_writer (StorageWriter): storage writer.
analysis_plugins (dict[str, AnalysisPlugin]): analysis plugins that
should be run and their names.
"""
logger.info('Starting analysis plugins.')
for analysis_plugin in analysis_plugins.values():
self._analysis_plugins[analysis_plugin.NAME] = analysis_plugin
process = self._StartWorkerProcess(analysis_plugin.NAME, storage_writer)
if not process:
logger.error('Unable to create analysis process: {0:s}'.format(
analysis_plugin.NAME))
logger.info('Analysis plugins running')
|
python
|
{
"resource": ""
}
|
q25326
|
PsortMultiProcessEngine._StopAnalysisProcesses
|
train
|
def _StopAnalysisProcesses(self, abort=False):
"""Stops the analysis processes.
Args:
abort (bool): True to indicated the stop is issued on abort.
"""
logger.debug('Stopping analysis processes.')
self._StopMonitoringProcesses()
# Note that multiprocessing.Queue is very sensitive regarding
# blocking on either a get or a put. So we try to prevent using
# any blocking behavior.
if abort:
# Signal all the processes to abort.
self._AbortTerminate()
if not self._use_zeromq:
logger.debug('Emptying queues.')
for event_queue in self._event_queues.values():
event_queue.Empty()
# Wake the processes to make sure that they are not blocking
# waiting for the queue new items.
for event_queue in self._event_queues.values():
event_queue.PushItem(plaso_queue.QueueAbort(), block=False)
# Try waiting for the processes to exit normally.
self._AbortJoin(timeout=self._PROCESS_JOIN_TIMEOUT)
for event_queue in self._event_queues.values():
event_queue.Close(abort=abort)
if abort:
# Kill any remaining processes.
self._AbortKill()
else:
# Check if the processes are still alive and terminate them if necessary.
self._AbortTerminate()
self._AbortJoin(timeout=self._PROCESS_JOIN_TIMEOUT)
for event_queue in self._event_queues.values():
event_queue.Close(abort=True)
|
python
|
{
"resource": ""
}
|
q25327
|
PsortMultiProcessEngine._UpdateForemanProcessStatus
|
train
|
def _UpdateForemanProcessStatus(self):
"""Update the foreman process status."""
used_memory = self._process_information.GetUsedMemory() or 0
display_name = getattr(self._merge_task, 'identifier', '')
self._processing_status.UpdateForemanStatus(
self._name, self._status, self._pid, used_memory, display_name,
self._number_of_consumed_sources, self._number_of_produced_sources,
self._number_of_consumed_events, self._number_of_produced_events,
self._number_of_consumed_event_tags,
self._number_of_produced_event_tags,
self._number_of_consumed_warnings, self._number_of_produced_warnings,
self._number_of_consumed_reports, self._number_of_produced_reports)
self._processing_status.UpdateEventsStatus(self._events_status)
|
python
|
{
"resource": ""
}
|
q25328
|
WinLnkLinkFormatter._GetLinkedPath
|
train
|
def _GetLinkedPath(self, event):
"""Determines the linked path.
Args:
event (EventObject): event that contains a linked path.
Returns:
str: linked path.
"""
if hasattr(event, 'local_path'):
return event.local_path
if hasattr(event, 'network_path'):
return event.network_path
if hasattr(event, 'relative_path'):
paths = []
if hasattr(event, 'working_directory'):
paths.append(event.working_directory)
paths.append(event.relative_path)
return '\\'.join(paths)
return 'Unknown'
|
python
|
{
"resource": ""
}
|
q25329
|
BrowserSearchPlugin._DecodeURL
|
train
|
def _DecodeURL(self, url):
"""Decodes the URL, replaces %XX to their corresponding characters.
Args:
url (str): encoded URL.
Returns:
str: decoded URL.
"""
if not url:
return ''
decoded_url = urlparse.unquote(url)
if isinstance(decoded_url, py2to3.BYTES_TYPE):
try:
decoded_url = decoded_url.decode('utf-8')
except UnicodeDecodeError as exception:
decoded_url = decoded_url.decode('utf-8', errors='replace')
logger.warning(
'Unable to decode URL: {0:s} with error: {1!s}'.format(
url, exception))
return decoded_url
|
python
|
{
"resource": ""
}
|
q25330
|
BrowserSearchPlugin._ExtractGMailSearchQuery
|
train
|
def _ExtractGMailSearchQuery(self, url):
"""Extracts a search query from a GMail search URL.
GMail: https://mail.google.com/mail/u/0/#search/query[/?]
Args:
url (str): URL.
Returns:
str: search query or None if no query was found.
"""
if 'search/' not in url:
return None
_, _, line = url.partition('search/')
line, _, _ = line.partition('/')
line, _, _ = line.partition('?')
return line.replace('+', ' ')
|
python
|
{
"resource": ""
}
|
q25331
|
BrowserSearchPlugin._ExtractGoogleDocsSearchQuery
|
train
|
def _ExtractGoogleDocsSearchQuery(self, url):
"""Extracts a search query from a Google docs URL.
Google Docs: https://docs.google.com/.*/u/0/?q=query
Args:
url (str): URL.
Returns:
str: search query or None if no query was found.
"""
if 'q=' not in url:
return None
line = self._GetBetweenQEqualsAndAmpersand(url)
if not line:
return None
return line.replace('+', ' ')
|
python
|
{
"resource": ""
}
|
q25332
|
BrowserSearchPlugin._ExtractGoogleSearchQuery
|
train
|
def _ExtractGoogleSearchQuery(self, url):
"""Extracts a search query from a Google URL.
Google Drive: https://drive.google.com/drive/search?q=query
Google Search: https://www.google.com/search?q=query
Google Sites: https://sites.google.com/site/.*/system/app/pages/
search?q=query
Args:
url (str): URL.
Returns:
str: search query or None if no query was found.
"""
if 'search' not in url or 'q=' not in url:
return None
line = self._GetBetweenQEqualsAndAmpersand(url)
if not line:
return None
return line.replace('+', ' ')
|
python
|
{
"resource": ""
}
|
q25333
|
BrowserSearchPlugin._ExtractYahooSearchQuery
|
train
|
def _ExtractYahooSearchQuery(self, url):
"""Extracts a search query from a Yahoo search URL.
Examples:
https://search.yahoo.com/search?p=query
https://search.yahoo.com/search;?p=query
Args:
url (str): URL.
Returns:
str: search query or None if no query was found.
"""
if 'p=' not in url:
return None
_, _, line = url.partition('p=')
before_and, _, _ = line.partition('&')
if not before_and:
return None
yahoo_search_url = before_and.split()[0]
return yahoo_search_url.replace('+', ' ')
|
python
|
{
"resource": ""
}
|
q25334
|
BrowserSearchPlugin._ExtractYandexSearchQuery
|
train
|
def _ExtractYandexSearchQuery(self, url):
"""Extracts a search query from a Yandex search URL.
Yandex: https://www.yandex.com/search/?text=query
Args:
url (str): URL.
Returns:
str: search query or None if no query was found.
"""
if 'text=' not in url:
return None
_, _, line = url.partition('text=')
before_and, _, _ = line.partition('&')
if not before_and:
return None
yandex_search_url = before_and.split()[0]
return yandex_search_url.replace('+', ' ')
|
python
|
{
"resource": ""
}
|
q25335
|
BrowserSearchPlugin._GetBetweenQEqualsAndAmpersand
|
train
|
def _GetBetweenQEqualsAndAmpersand(self, url):
"""Retrieves the substring between the substrings 'q=' and '&'.
Args:
url (str): URL.
Returns:
str: search query, the value between 'q=' and '&' or None if no query
was found.
"""
# Make sure we're analyzing the query part of the URL.
_, _, url = url.partition('?')
# Look for a key value pair named 'q'.
_, _, url = url.partition('q=')
if not url:
return ''
# Strip additional key value pairs.
url, _, _ = url.partition('&')
return url
|
python
|
{
"resource": ""
}
|
q25336
|
TangoAndroidTCPlugin.ParseConversationRow
|
train
|
def ParseConversationRow(self, parser_mediator, query, row, **unused_kwargs):
"""Parses a conversation row from the database.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row resulting from query.
"""
query_hash = hash(query)
event_data = TangoAndroidConversationEventData()
event_data.conversation_identifier = self._GetRowValue(
query_hash, row, 'conv_id')
# TODO: payload is a base64 encoded binary blob, we need to find the
# structure to extract the relevant bits.
# event_data.payload = self._GetRowValue(query_hash, row, 'payload')
date_time = dfdatetime_semantic_time.NotSet()
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_NOT_A_TIME)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
python
|
{
"resource": ""
}
|
q25337
|
TangoAndroidTCPlugin.ParseMessageRow
|
train
|
def ParseMessageRow(self, parser_mediator, query, row, **unused_kwargs):
"""Parses a message row from the database.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row resulting from query.
"""
query_hash = hash(query)
event_data = TangoAndroidMessageEventData()
event_data.message_identifier = self._GetRowValue(
query_hash, row, 'msg_id')
# TODO: payload is a base64 encoded binary blob, we need to find the
# structure to extract the relevant bits.
# event_data.payload = self._GetRowValue(query_hash, row, 'payload')
event_data.direction = self._GetRowValue(query_hash, row, 'direction')
timestamp = self._GetRowValue(query_hash, row, 'create_time')
if timestamp:
date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = self._GetRowValue(query_hash, row, 'send_time')
if timestamp:
date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_SENT)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
python
|
{
"resource": ""
}
|
q25338
|
NativePythonFormatterHelper.GetFormattedEventObject
|
train
|
def GetFormattedEventObject(cls, event):
"""Retrieves a string representation of the event.
Args:
event (EventObject): event.
Returns:
str: string representation of the event.
"""
time_string = timelib.Timestamp.CopyToIsoFormat(event.timestamp)
lines_of_text = [
'+-' * 40,
'[Timestamp]:',
' {0:s}'.format(time_string)]
pathspec = getattr(event, 'pathspec', None)
if pathspec:
lines_of_text.append('[Pathspec]:')
attribute_string = pathspec.comparable.replace('\n', '\n ')
attribute_string = ' {0:s}\n'.format(attribute_string)
lines_of_text.append(attribute_string)
# TODO: add support for event tag after event clean up.
lines_of_text.append('[Reserved attributes]:')
out_additional = ['[Additional attributes]:']
for attribute_name, attribute_value in sorted(event.GetAttributes()):
if attribute_name not in definitions.RESERVED_VARIABLE_NAMES:
attribute_string = ' {{{0!s}}} {1!s}'.format(
attribute_name, attribute_value)
out_additional.append(attribute_string)
elif attribute_name not in ('pathspec', 'tag'):
attribute_string = ' {{{0!s}}} {1!s}'.format(
attribute_name, attribute_value)
lines_of_text.append(attribute_string)
lines_of_text.append('')
out_additional.append('')
lines_of_text.extend(out_additional)
return '\n'.join(lines_of_text)
|
python
|
{
"resource": ""
}
|
q25339
|
_PendingMergeTaskHeap.PopTask
|
train
|
def PopTask(self):
"""Retrieves and removes the first task from the heap.
Returns:
Task: the task or None if the heap is empty.
"""
try:
_, task = heapq.heappop(self._heap)
except IndexError:
return None
self._task_identifiers.remove(task.identifier)
return task
|
python
|
{
"resource": ""
}
|
q25340
|
_PendingMergeTaskHeap.PushTask
|
train
|
def PushTask(self, task):
"""Pushes a task onto the heap.
Args:
task (Task): task.
Raises:
ValueError: if the size of the storage file is not set in the task.
"""
storage_file_size = getattr(task, 'storage_file_size', None)
if not storage_file_size:
raise ValueError('Task storage file size not set.')
if task.file_entry_type == dfvfs_definitions.FILE_ENTRY_TYPE_DIRECTORY:
weight = 1
else:
weight = storage_file_size
task.merge_priority = weight
heap_values = (weight, task)
heapq.heappush(self._heap, heap_values)
self._task_identifiers.add(task.identifier)
|
python
|
{
"resource": ""
}
|
q25341
|
TaskManager._AbandonInactiveProcessingTasks
|
train
|
def _AbandonInactiveProcessingTasks(self):
"""Marks processing tasks that exceed the inactive time as abandoned.
This method does not lock the manager and should be called by a method
holding the manager lock.
"""
if self._tasks_processing:
inactive_time = time.time() - self._TASK_INACTIVE_TIME
inactive_time = int(inactive_time * definitions.MICROSECONDS_PER_SECOND)
# Abandon all tasks after they're identified so as not to modify the
# dict while iterating over it.
tasks_to_abandon = []
for task_identifier, task in iter(self._tasks_processing.items()):
if task.last_processing_time < inactive_time:
logger.debug('Abandoned processing task: {0:s}.'.format(
task_identifier))
self.SampleTaskStatus(task, 'abandoned_processing')
tasks_to_abandon.append((task_identifier, task))
for task_identifier, task in tasks_to_abandon:
self._tasks_abandoned[task_identifier] = task
del self._tasks_processing[task_identifier]
|
python
|
{
"resource": ""
}
|
q25342
|
TaskManager._AbandonQueuedTasks
|
train
|
def _AbandonQueuedTasks(self):
"""Marks queued tasks abandoned.
This method does not lock the manager and should be called by a method
holding the manager lock.
"""
# Abandon all tasks after they're identified so as not to modify the
# dict while iterating over it.
tasks_to_abandon = []
for task_identifier, task in iter(self._tasks_queued.items()):
logger.debug('Abandoned queued task: {0:s}.'.format(task_identifier))
tasks_to_abandon.append((task_identifier, task))
for task_identifier, task in tasks_to_abandon:
self._tasks_abandoned[task_identifier] = task
del self._tasks_queued[task_identifier]
|
python
|
{
"resource": ""
}
|
q25343
|
TaskManager.CheckTaskToMerge
|
train
|
def CheckTaskToMerge(self, task):
"""Checks if the task should be merged.
Args:
task (Task): task.
Returns:
bool: True if the task should be merged.
Raises:
KeyError: if the task was not queued, processing or abandoned.
"""
with self._lock:
is_abandoned = task.identifier in self._tasks_abandoned
is_processing = task.identifier in self._tasks_processing
is_queued = task.identifier in self._tasks_queued
if not is_queued and not is_processing and not is_abandoned:
raise KeyError('Status of task {0:s} is unknown.'.format(
task.identifier))
return is_queued or is_processing or is_abandoned and not task.has_retry
|
python
|
{
"resource": ""
}
|
q25344
|
TaskManager.CreateRetryTask
|
train
|
def CreateRetryTask(self):
"""Creates a task that to retry a previously abandoned task.
Returns:
Task: a task that was abandoned but should be retried or None if there are
no abandoned tasks that should be retried.
"""
with self._lock:
abandoned_task = self._GetTaskPendingRetry()
if not abandoned_task:
return None
# The abandoned task is kept in _tasks_abandoned so it can be still
# identified in CheckTaskToMerge and UpdateTaskAsPendingMerge.
retry_task = abandoned_task.CreateRetryTask()
logger.debug('Retrying task {0:s} as {1:s}.'.format(
abandoned_task.identifier, retry_task.identifier))
self._tasks_queued[retry_task.identifier] = retry_task
self._total_number_of_tasks += 1
self.SampleTaskStatus(retry_task, 'created_retry')
return retry_task
|
python
|
{
"resource": ""
}
|
q25345
|
TaskManager.CreateTask
|
train
|
def CreateTask(self, session_identifier):
"""Creates a task.
Args:
session_identifier (str): the identifier of the session the task is
part of.
Returns:
Task: task attribute container.
"""
task = tasks.Task(session_identifier)
logger.debug('Created task: {0:s}.'.format(task.identifier))
with self._lock:
self._tasks_queued[task.identifier] = task
self._total_number_of_tasks += 1
self.SampleTaskStatus(task, 'created')
return task
|
python
|
{
"resource": ""
}
|
q25346
|
TaskManager.CompleteTask
|
train
|
def CompleteTask(self, task):
"""Completes a task.
The task is complete and can be removed from the task manager.
Args:
task (Task): task.
Raises:
KeyError: if the task was not merging.
"""
with self._lock:
if task.identifier not in self._tasks_merging:
raise KeyError('Task {0:s} was not merging.'.format(task.identifier))
self.SampleTaskStatus(task, 'completed')
del self._tasks_merging[task.identifier]
logger.debug('Completed task {0:s}.'.format(task.identifier))
|
python
|
{
"resource": ""
}
|
q25347
|
TaskManager.GetFailedTasks
|
train
|
def GetFailedTasks(self):
"""Retrieves all failed tasks.
Failed tasks are tasks that were abandoned and have no retry task once
the foreman is done processing.
Returns:
list[Task]: tasks.
"""
# TODO: add check to determine foreman is done processing.
with self._lock:
return [task for task in self._tasks_abandoned.values()
if not task.has_retry]
|
python
|
{
"resource": ""
}
|
q25348
|
TaskManager.GetProcessedTaskByIdentifier
|
train
|
def GetProcessedTaskByIdentifier(self, task_identifier):
"""Retrieves a task that has been processed.
Args:
task_identifier (str): unique identifier of the task.
Returns:
Task: a task that has been processed.
Raises:
KeyError: if the task was not processing, queued or abandoned.
"""
with self._lock:
task = self._tasks_processing.get(task_identifier, None)
if not task:
task = self._tasks_queued.get(task_identifier, None)
if not task:
task = self._tasks_abandoned.get(task_identifier, None)
if not task:
raise KeyError('Status of task {0:s} is unknown'.format(
task_identifier))
return task
|
python
|
{
"resource": ""
}
|
q25349
|
TaskManager.GetStatusInformation
|
train
|
def GetStatusInformation(self):
"""Retrieves status information about the tasks.
Returns:
TasksStatus: tasks status information.
"""
status = processing_status.TasksStatus()
with self._lock:
status.number_of_abandoned_tasks = len(self._tasks_abandoned)
status.number_of_queued_tasks = len(self._tasks_queued)
status.number_of_tasks_pending_merge = (
len(self._tasks_pending_merge) + len(self._tasks_merging))
status.number_of_tasks_processing = len(self._tasks_processing)
status.total_number_of_tasks = self._total_number_of_tasks
return status
|
python
|
{
"resource": ""
}
|
q25350
|
TaskManager.GetTaskPendingMerge
|
train
|
def GetTaskPendingMerge(self, current_task):
"""Retrieves the first task that is pending merge or has a higher priority.
This function will check if there is a task with a higher merge priority
than the current_task being merged. If so, that task with the higher
priority is returned.
Args:
current_task (Task): current task being merged or None if no such task.
Returns:
Task: the next task to merge or None if there is no task pending merge or
with a higher priority.
"""
next_task = self._tasks_pending_merge.PeekTask()
if not next_task:
return None
if current_task and next_task.merge_priority > current_task.merge_priority:
return None
with self._lock:
next_task = self._tasks_pending_merge.PopTask()
self._tasks_merging[next_task.identifier] = next_task
return next_task
|
python
|
{
"resource": ""
}
|
q25351
|
TaskManager.HasPendingTasks
|
train
|
def HasPendingTasks(self):
"""Determines if there are tasks running or in need of retrying.
Returns:
bool: True if there are tasks that are active, ready to be merged or
need to be retried.
"""
with self._lock:
self._AbandonInactiveProcessingTasks()
if self._tasks_processing:
return True
# There are no tasks being processed, but we might be
# waiting for some tasks to be merged.
if self._HasTasksPendingMerge():
return True
# There are no tasks processing or pending merge, but there may
# still be some waiting to be retried, so we check that.
if self._HasTasksPendingRetry():
return True
# It is possible that a worker has processed a task and the foreman has
# not been informed about it, since there is no feedback from the worker
# when it pops a task from the queue.
# If we believe all the workers are idle for longer than the task
# inactive time (timeout) abandon all queued tasks. This ensures
# that processing actually stops when the foreman never gets an
# update from a worker.
if self._tasks_queued:
inactive_time = time.time() - self._TASK_INACTIVE_TIME
inactive_time = int(inactive_time * definitions.MICROSECONDS_PER_SECOND)
if self._latest_task_processing_time < inactive_time:
self._AbandonQueuedTasks()
if self._tasks_queued:
return True
if self._tasks_merging:
return True
# There are no tasks pending any work.
return False
|
python
|
{
"resource": ""
}
|
q25352
|
TaskManager.RemoveTask
|
train
|
def RemoveTask(self, task):
"""Removes an abandoned task.
Args:
task (Task): task.
Raises:
KeyError: if the task was not abandoned or the task was abandoned and
was not retried.
"""
with self._lock:
if task.identifier not in self._tasks_abandoned:
raise KeyError('Task {0:s} was not abandoned.'.format(task.identifier))
if not task.has_retry:
raise KeyError(
'Will not remove a task {0:s} without retry task.'.format(
task.identifier))
del self._tasks_abandoned[task.identifier]
logger.debug('Removed task {0:s}.'.format(task.identifier))
|
python
|
{
"resource": ""
}
|
q25353
|
TaskManager.SampleTaskStatus
|
train
|
def SampleTaskStatus(self, task, status):
"""Takes a sample of the status of the task for profiling.
Args:
task (Task): a task.
status (str): status.
"""
if self._tasks_profiler:
self._tasks_profiler.Sample(task, status)
|
python
|
{
"resource": ""
}
|
q25354
|
TaskManager.UpdateTaskAsPendingMerge
|
train
|
def UpdateTaskAsPendingMerge(self, task):
"""Updates the task manager to reflect the task is ready to be merged.
Args:
task (Task): task.
Raises:
KeyError: if the task was not queued, processing or abandoned, or
the task was abandoned and has a retry task.
"""
with self._lock:
is_abandoned = task.identifier in self._tasks_abandoned
is_processing = task.identifier in self._tasks_processing
is_queued = task.identifier in self._tasks_queued
if not is_queued and not is_processing and not is_abandoned:
raise KeyError('Status of task {0:s} is unknown.'.format(
task.identifier))
if is_abandoned and task.has_retry:
raise KeyError('Will not merge a task {0:s} with retry task.'.format(
task.identifier))
if is_queued:
logger.debug('Task {0:s} was queued, now merging.'.format(
task.identifier))
del self._tasks_queued[task.identifier]
if is_processing:
logger.debug('Task {0:s} was processing, now merging.'.format(
task.identifier))
del self._tasks_processing[task.identifier]
if is_abandoned:
logger.debug('Task {0:s} was abandoned, now merging.'.format(
task.identifier))
del self._tasks_abandoned[task.identifier]
self._tasks_pending_merge.PushTask(task)
self.SampleTaskStatus(task, 'pending_merge')
task.UpdateProcessingTime()
self._UpdateLatestProcessingTime(task)
|
python
|
{
"resource": ""
}
|
q25355
|
TaskManager.UpdateTaskAsProcessingByIdentifier
|
train
|
def UpdateTaskAsProcessingByIdentifier(self, task_identifier):
"""Updates the task manager to reflect the task is processing.
Args:
task_identifier (str): unique identifier of the task.
Raises:
KeyError: if the task is not known to the task manager.
"""
with self._lock:
task_processing = self._tasks_processing.get(task_identifier, None)
if task_processing:
task_processing.UpdateProcessingTime()
self._UpdateLatestProcessingTime(task_processing)
return
task_queued = self._tasks_queued.get(task_identifier, None)
if task_queued:
logger.debug('Task {0:s} was queued, now processing.'.format(
task_identifier))
self._tasks_processing[task_identifier] = task_queued
del self._tasks_queued[task_identifier]
task_queued.UpdateProcessingTime()
self._UpdateLatestProcessingTime(task_queued)
return
task_abandoned = self._tasks_abandoned.get(task_identifier, None)
if task_abandoned:
del self._tasks_abandoned[task_identifier]
self._tasks_processing[task_identifier] = task_abandoned
logger.debug('Task {0:s} was abandoned, but now processing.'.format(
task_identifier))
task_abandoned.UpdateProcessingTime()
self._UpdateLatestProcessingTime(task_abandoned)
return
if task_identifier in self._tasks_pending_merge:
# No need to update the processing time, as this task is already
# finished processing and is just waiting for merge.
return
# If we get here, we don't know what state the tasks is in, so raise.
raise KeyError('Status of task {0:s} is unknown.'.format(task_identifier))
|
python
|
{
"resource": ""
}
|
q25356
|
AndroidSMSPlugin.ParseSmsRow
|
train
|
def ParseSmsRow(self, parser_mediator, query, row, **unused_kwargs):
"""Parses an SMS row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
"""
query_hash = hash(query)
sms_read = self._GetRowValue(query_hash, row, 'read')
sms_type = self._GetRowValue(query_hash, row, 'type')
event_data = AndroidSMSEventData()
event_data.address = self._GetRowValue(query_hash, row, 'address')
event_data.body = self._GetRowValue(query_hash, row, 'body')
event_data.offset = self._GetRowValue(query_hash, row, 'id')
event_data.query = query
event_data.sms_read = self.SMS_READ.get(sms_read, 'UNKNOWN')
event_data.sms_type = self.SMS_TYPE.get(sms_type, 'UNKNOWN')
timestamp = self._GetRowValue(query_hash, row, 'date')
date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
python
|
{
"resource": ""
}
|
q25357
|
PstealTool._GenerateStorageFileName
|
train
|
def _GenerateStorageFileName(self):
"""Generates a name for the storage file.
The result use a timestamp and the basename of the source path.
Returns:
str: a filename for the storage file in the form <time>-<source>.plaso
Raises:
BadConfigOption: raised if the source path is not set.
"""
if not self._source_path:
raise errors.BadConfigOption('Please define a source (--source).')
timestamp = datetime.datetime.now()
datetime_string = timestamp.strftime('%Y%m%dT%H%M%S')
source_path = os.path.abspath(self._source_path)
if source_path.endswith(os.path.sep):
source_path = os.path.dirname(source_path)
source_name = os.path.basename(source_path)
if not source_name or source_name in ('/', '\\'):
# The user passed the filesystem's root as source
source_name = 'ROOT'
return '{0:s}-{1:s}.plaso'.format(datetime_string, source_name)
|
python
|
{
"resource": ""
}
|
q25358
|
PstealTool.AnalyzeEvents
|
train
|
def AnalyzeEvents(self):
"""Analyzes events from a plaso storage file and generate a report.
Raises:
BadConfigOption: when a configuration parameter fails validation.
RuntimeError: if a non-recoverable situation is encountered.
"""
session = engine.BaseEngine.CreateSession(
command_line_arguments=self._command_line_arguments,
preferred_encoding=self.preferred_encoding)
storage_reader = storage_factory.StorageFactory.CreateStorageReaderForFile(
self._storage_file_path)
if not storage_reader:
logger.error('Format of storage file: {0:s} not supported'.format(
self._storage_file_path))
return
self._number_of_analysis_reports = (
storage_reader.GetNumberOfAnalysisReports())
storage_reader.Close()
configuration = self._CreateProcessingConfiguration(
self._knowledge_base)
counter = collections.Counter()
if self._output_format != 'null':
self._status_view.SetMode(self._status_view_mode)
self._status_view.SetStorageFileInformation(self._storage_file_path)
status_update_callback = (
self._status_view.GetAnalysisStatusUpdateCallback())
storage_reader = (
storage_factory.StorageFactory.CreateStorageReaderForFile(
self._storage_file_path))
# TODO: add single processing support.
analysis_engine = psort.PsortMultiProcessEngine(
use_zeromq=self._use_zeromq)
analysis_engine.ExportEvents(
self._knowledge_base, storage_reader, self._output_module,
configuration, deduplicate_events=self._deduplicate_events,
status_update_callback=status_update_callback,
time_slice=self._time_slice, use_time_slicer=self._use_time_slicer)
for item, value in iter(session.analysis_reports_counter.items()):
counter[item] = value
if self._quiet_mode:
return
self._output_writer.Write('Processing completed.\n')
table_view = views.ViewsFactory.GetTableView(
self._views_format_type, title='Counter')
for element, count in counter.most_common():
if not element:
element = 'N/A'
table_view.AddRow([element, count])
table_view.Write(self._output_writer)
storage_reader = storage_factory.StorageFactory.CreateStorageReaderForFile(
self._storage_file_path)
self._PrintAnalysisReportsDetails(
storage_reader, self._number_of_analysis_reports)
self._output_writer.Write('Storage file is {0:s}\n'.format(
self._storage_file_path))
|
python
|
{
"resource": ""
}
|
q25359
|
PstealTool.ParseOptions
|
train
|
def ParseOptions(self, options):
"""Parses tool specific options.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid.
"""
# The extraction options are dependent on the data location.
helpers_manager.ArgumentHelperManager.ParseOptions(
options, self, names=['data_location'])
self._ReadParserPresetsFromFile()
# The output modules options are dependent on the preferred language
# and preferred time zone options.
self._ParseTimezoneOption(options)
argument_helper_names = [
'artifact_definitions', 'hashers', 'language', 'parsers']
helpers_manager.ArgumentHelperManager.ParseOptions(
options, self, names=argument_helper_names)
self.list_hashers = self._hasher_names_string == 'list'
self.list_language_identifiers = self._preferred_language == 'list'
self.list_parsers_and_plugins = self._parser_filter_expression == 'list'
# Check the list options first otherwise required options will raise.
if (self.list_hashers or self.list_language_identifiers or
self.list_parsers_and_plugins or self.list_timezones):
return
# Check output modules after the other listable options, otherwise
# it could raise with "requires an output file".
helpers_manager.ArgumentHelperManager.ParseOptions(
options, self, names=['output_modules'])
self.list_output_modules = self._output_format == 'list'
if self.list_output_modules:
return
self._ParseInformationalOptions(options)
argument_helper_names = ['extraction', 'status_view']
helpers_manager.ArgumentHelperManager.ParseOptions(
options, self, names=argument_helper_names)
self._ParseLogFileOptions(options)
self._ParseStorageMediaOptions(options)
self._ParsePerformanceOptions(options)
self._ParseProcessingOptions(options)
self._storage_file_path = getattr(options, 'storage_file', None)
if not self._storage_file_path:
self._storage_file_path = self._GenerateStorageFileName()
self._output_filename = getattr(options, 'write', None)
if not self._output_filename:
raise errors.BadConfigOption((
'Output format: {0:s} requires an output file '
'(-w OUTPUT_FILE)').format(self._output_format))
if os.path.exists(self._output_filename):
raise errors.BadConfigOption(
'Output file already exists: {0:s}.'.format(self._output_filename))
self._EnforceProcessMemoryLimit(self._process_memory_limit)
self._output_module = self._CreateOutputModule(options)
|
python
|
{
"resource": ""
}
|
q25360
|
TLNBaseOutputModule._FormatDescription
|
train
|
def _FormatDescription(self, event):
"""Formats the description.
Args:
event (EventObject): event.
Returns:
str: formatted description field.
"""
date_time_string = timelib.Timestamp.CopyToIsoFormat(
event.timestamp, timezone=self._output_mediator.timezone)
timestamp_description = event.timestamp_desc or 'UNKNOWN'
message, _ = self._output_mediator.GetFormattedMessages(event)
if message is None:
data_type = getattr(event, 'data_type', 'UNKNOWN')
raise errors.NoFormatterFound(
'Unable to find event formatter for: {0:s}.'.format(data_type))
description = '{0:s}; {1:s}; {2:s}'.format(
date_time_string, timestamp_description,
message.replace(self._DESCRIPTION_FIELD_DELIMITER, ' '))
return self._SanitizeField(description)
|
python
|
{
"resource": ""
}
|
q25361
|
L2TTLNOutputModule._FormatNotes
|
train
|
def _FormatNotes(self, event):
"""Formats the notes.
Args:
event (EventObject): event.
Returns:
str: formatted notes field.
"""
inode = event.inode
if inode is None:
inode = '-'
notes = getattr(event, 'notes', '')
if not notes:
display_name = getattr(event, 'display_name', '')
notes = 'File: {0:s} inode: {1!s}'.format(display_name, inode)
return self._SanitizeField(notes)
|
python
|
{
"resource": ""
}
|
q25362
|
TwitterAndroidPlugin.ParseSearchRow
|
train
|
def ParseSearchRow(self, parser_mediator, query, row, **unused_kwargs):
"""Parses a search row from the database.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row resulting from query.
"""
query_hash = hash(query)
event_data = TwitterAndroidSearchEventData()
event_data.query = query
event_data.name = self._GetRowValue(query_hash, row, 'name')
event_data.search_query = self._GetRowValue(query_hash, row, 'query')
timestamp = self._GetRowValue(query_hash, row, 'time')
if timestamp:
date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
python
|
{
"resource": ""
}
|
q25363
|
JSONAttributeContainerSerializer._ConvertAttributeContainerToDict
|
train
|
def _ConvertAttributeContainerToDict(cls, attribute_container):
"""Converts an attribute container object into a JSON dictionary.
The resulting dictionary of the JSON serialized objects consists of:
{
'__type__': 'AttributeContainer'
'__container_type__': ...
...
}
Here '__type__' indicates the object base type. In this case
'AttributeContainer'.
'__container_type__' indicates the container type and rest of the elements
of the dictionary make up the attributes of the container.
Args:
attribute_container (AttributeContainer): attribute container.
Returns:
dict[str, object]: JSON serialized objects.
Raises:
TypeError: if not an instance of AttributeContainer.
ValueError: if the attribute container type is not supported.
"""
if not isinstance(
attribute_container, containers_interface.AttributeContainer):
raise TypeError('{0:s} is not an attribute container type.'.format(
type(attribute_container)))
container_type = getattr(attribute_container, 'CONTAINER_TYPE', None)
if not container_type:
raise ValueError('Unsupported attribute container type: {0:s}.'.format(
type(attribute_container)))
json_dict = {
'__type__': 'AttributeContainer',
'__container_type__': container_type,
}
for attribute_name, attribute_value in attribute_container.GetAttributes():
json_dict[attribute_name] = cls._ConvertAttributeValueToDict(
attribute_value)
return json_dict
|
python
|
{
"resource": ""
}
|
q25364
|
JSONAttributeContainerSerializer._ConvertAttributeValueToDict
|
train
|
def _ConvertAttributeValueToDict(cls, attribute_value):
"""Converts an attribute value into a JSON dictionary.
Args:
attribute_value (object): an attribute value.
Returns:
dict|list: The JSON serialized object which can be a dictionary or a list.
"""
if isinstance(attribute_value, py2to3.BYTES_TYPE):
encoded_value = binascii.b2a_qp(attribute_value)
encoded_value = codecs.decode(encoded_value, 'ascii')
attribute_value = {
'__type__': 'bytes',
'stream': '{0:s}'.format(encoded_value)
}
elif isinstance(attribute_value, (list, tuple)):
json_list = []
for list_element in attribute_value:
json_dict = cls._ConvertAttributeValueToDict(list_element)
json_list.append(json_dict)
if isinstance(attribute_value, list):
attribute_value = json_list
else:
attribute_value = {
'__type__': 'tuple',
'values': json_list
}
elif isinstance(attribute_value, collections.Counter):
attribute_value = cls._ConvertCollectionsCounterToDict(attribute_value)
elif isinstance(attribute_value, dfvfs_path_spec.PathSpec):
attribute_value = cls._ConvertPathSpecToDict(attribute_value)
elif isinstance(attribute_value, containers_interface.AttributeContainer):
attribute_value = cls._ConvertAttributeContainerToDict(attribute_value)
return attribute_value
|
python
|
{
"resource": ""
}
|
q25365
|
JSONAttributeContainerSerializer._ConvertCollectionsCounterToDict
|
train
|
def _ConvertCollectionsCounterToDict(cls, collections_counter):
"""Converts a collections.Counter object into a JSON dictionary.
The resulting dictionary of the JSON serialized objects consists of:
{
'__type__': 'collections.Counter'
...
}
Here '__type__' indicates the object base type. In this case
'collections.Counter'. The rest of the elements of the dictionary make up
the collections.Counter object attributes.
Args:
collections_counter (collections.Counter): counter.
Returns:
dict[str, object]: JSON serialized objects.
Raises:
TypeError: if not an instance of collections.Counter.
"""
if not isinstance(collections_counter, collections.Counter):
raise TypeError
json_dict = {'__type__': 'collections.Counter'}
for attribute_name, attribute_value in iter(collections_counter.items()):
if attribute_value is None:
continue
if isinstance(attribute_value, py2to3.BYTES_TYPE):
attribute_value = {
'__type__': 'bytes',
'stream': '{0:s}'.format(binascii.b2a_qp(attribute_value))
}
json_dict[attribute_name] = attribute_value
return json_dict
|
python
|
{
"resource": ""
}
|
q25366
|
JSONAttributeContainerSerializer._ConvertDictToObject
|
train
|
def _ConvertDictToObject(cls, json_dict):
"""Converts a JSON dict into an object.
The dictionary of the JSON serialized objects consists of:
{
'__type__': 'AttributeContainer'
'__container_type__': ...
...
}
Here '__type__' indicates the object base type. In this case
'AttributeContainer'.
'__container_type__' indicates the attribute container type.
The rest of the elements of the dictionary make up the attributes.
Args:
json_dict (dict[str, object]): JSON serialized objects.
Returns:
AttributeContainer|dict|list|tuple: deserialized object.
Raises:
ValueError: if the class type or container type is not supported.
"""
# Use __type__ to indicate the object class type.
class_type = json_dict.get('__type__', None)
if not class_type:
# Dealing with a regular dict.
return json_dict
if class_type == 'bytes':
return binascii.a2b_qp(json_dict['stream'])
if class_type == 'tuple':
return tuple(cls._ConvertListToObject(json_dict['values']))
if class_type == 'collections.Counter':
return cls._ConvertDictToCollectionsCounter(json_dict)
if class_type == 'AttributeContainer':
# Use __container_type__ to indicate the attribute container type.
container_type = json_dict.get('__container_type__', None)
# Since we would like the JSON as flat as possible we handle decoding
# a path specification.
elif class_type == 'PathSpec':
return cls._ConvertDictToPathSpec(json_dict)
else:
raise ValueError('Unsupported class type: {0:s}'.format(class_type))
container_class = (
containers_manager.AttributeContainersManager.GetAttributeContainer(
container_type))
if not container_class:
raise ValueError('Unsupported container type: {0:s}'.format(
container_type))
container_object = container_class()
supported_attribute_names = container_object.GetAttributeNames()
for attribute_name, attribute_value in iter(json_dict.items()):
# Be strict about which attributes to set in non event values.
if (container_type not in ('event', 'event_data') and
attribute_name not in supported_attribute_names):
if attribute_name not in ('__container_type__', '__type__'):
logger.debug((
'[ConvertDictToObject] unsupported attribute name: '
'{0:s}.{1:s}').format(container_type, attribute_name))
continue
if isinstance(attribute_value, dict):
attribute_value = cls._ConvertDictToObject(attribute_value)
elif isinstance(attribute_value, list):
attribute_value = cls._ConvertListToObject(attribute_value)
setattr(container_object, attribute_name, attribute_value)
return container_object
|
python
|
{
"resource": ""
}
|
q25367
|
JSONAttributeContainerSerializer._ConvertDictToCollectionsCounter
|
train
|
def _ConvertDictToCollectionsCounter(cls, json_dict):
"""Converts a JSON dict into a collections.Counter.
The dictionary of the JSON serialized objects consists of:
{
'__type__': 'collections.Counter'
...
}
Here '__type__' indicates the object base type. In this case this should
be 'collections.Counter'. The rest of the elements of the dictionary make up
the preprocessing object properties.
Args:
json_dict (dict[str, object]): JSON serialized objects.
Returns:
collections.Counter: counter.
"""
collections_counter = collections.Counter()
for key, value in iter(json_dict.items()):
if key == '__type__':
continue
collections_counter[key] = value
return collections_counter
|
python
|
{
"resource": ""
}
|
q25368
|
JSONAttributeContainerSerializer._ConvertListToObject
|
train
|
def _ConvertListToObject(cls, json_list):
"""Converts a JSON list into an object.
Args:
json_list (list[object]): JSON serialized objects.
Returns:
list[object]: a deserialized list.
"""
list_value = []
for json_list_element in json_list:
if isinstance(json_list_element, dict):
list_value.append(cls._ConvertDictToObject(json_list_element))
elif isinstance(json_list_element, list):
list_value.append(cls._ConvertListToObject(json_list_element))
else:
list_value.append(json_list_element)
return list_value
|
python
|
{
"resource": ""
}
|
q25369
|
JSONAttributeContainerSerializer.ReadSerialized
|
train
|
def ReadSerialized(cls, json_string): # pylint: disable=arguments-differ
"""Reads an attribute container from serialized form.
Args:
json_string (str): JSON serialized attribute container.
Returns:
AttributeContainer: attribute container or None.
"""
if json_string:
json_dict = json.loads(json_string)
return cls.ReadSerializedDict(json_dict)
return None
|
python
|
{
"resource": ""
}
|
q25370
|
JSONAttributeContainerSerializer.ReadSerializedDict
|
train
|
def ReadSerializedDict(cls, json_dict):
"""Reads an attribute container from serialized dictionary form.
Args:
json_dict (dict[str, object]): JSON serialized objects.
Returns:
AttributeContainer: attribute container or None.
Raises:
TypeError: if the serialized dictionary does not contain an
AttributeContainer.
"""
if json_dict:
json_object = cls._ConvertDictToObject(json_dict)
if not isinstance(json_object, containers_interface.AttributeContainer):
raise TypeError('{0:s} is not an attribute container type.'.format(
type(json_object)))
return json_object
return None
|
python
|
{
"resource": ""
}
|
q25371
|
JSONAttributeContainerSerializer.WriteSerialized
|
train
|
def WriteSerialized(cls, attribute_container):
"""Writes an attribute container to serialized form.
Args:
attribute_container (AttributeContainer): attribute container.
Returns:
str: A JSON string containing the serialized form.
"""
json_dict = cls.WriteSerializedDict(attribute_container)
return json.dumps(json_dict)
|
python
|
{
"resource": ""
}
|
q25372
|
OutputModule._ReportEventError
|
train
|
def _ReportEventError(self, event, error_message):
"""Reports an event related error.
Args:
event (EventObject): event.
error_message (str): error message.
"""
event_identifier = event.GetIdentifier()
event_identifier_string = event_identifier.CopyToString()
display_name = getattr(event, 'display_name', None) or 'N/A'
parser_chain = getattr(event, 'parser', None) or 'N/A'
error_message = (
'Event: {0!s} data type: {1:s} display name: {2:s} '
'parser chain: {3:s} with error: {4:s}').format(
event_identifier_string, event.data_type, display_name,
parser_chain, error_message)
logger.error(error_message)
|
python
|
{
"resource": ""
}
|
q25373
|
OutputModule.WriteEvent
|
train
|
def WriteEvent(self, event):
"""Writes the event to the output.
Args:
event (EventObject): event.
"""
self.WriteEventStart()
try:
self.WriteEventBody(event)
except errors.NoFormatterFound as exception:
error_message = 'unable to retrieve formatter with error: {0!s}'.format(
exception)
self._ReportEventError(event, error_message)
except errors.WrongFormatter as exception:
error_message = 'wrong formatter with error: {0!s}'.format(exception)
self._ReportEventError(event, error_message)
self.WriteEventEnd()
|
python
|
{
"resource": ""
}
|
q25374
|
DpkgParser.VerifyStructure
|
train
|
def VerifyStructure(self, parser_mediator, line):
"""Verifies if a line from a text file is in the expected format.
Args:
parser_mediator (ParserMediator): parser mediator.
line (str): line from a text file.
Returns:
bool: True if the line is in the expected format, False if not.
"""
try:
structure = self._DPKG_LOG_LINE.parseString(line)
except pyparsing.ParseException as exception:
logger.debug(
'Unable to parse Debian dpkg.log file with error: {0!s}'.format(
exception))
return False
return 'date_time' in structure and 'body' in structure
|
python
|
{
"resource": ""
}
|
q25375
|
ParserPresetsManager._ReadOperatingSystemArtifactValues
|
train
|
def _ReadOperatingSystemArtifactValues(self, operating_system_values):
"""Reads an operating system artifact from a dictionary.
Args:
operating_system_values (dict[str, object]): operating system values.
Returns:
OperatingSystemArtifact: an operating system artifact attribute container.
Raises:
MalformedPresetError: if the format of the operating system values are
not set or incorrect.
"""
if not operating_system_values:
raise errors.MalformedPresetError('Missing operating system values.')
family = operating_system_values.get('family', None)
product = operating_system_values.get('product', None)
version = operating_system_values.get('version', None)
if not family and not product:
raise errors.MalformedPresetError(
'Invalid operating system missing family and product.')
return artifacts.OperatingSystemArtifact(
family=family, product=product, version=version)
|
python
|
{
"resource": ""
}
|
q25376
|
ParserPresetsManager._ReadParserPresetValues
|
train
|
def _ReadParserPresetValues(self, preset_definition_values):
"""Reads a parser preset from a dictionary.
Args:
preset_definition_values (dict[str, object]): preset definition values.
Returns:
ParserPreset: a parser preset.
Raises:
MalformedPresetError: if the format of the preset definition is not set
or incorrect, or the preset of a specific operating system has already
been set.
"""
if not preset_definition_values:
raise errors.MalformedPresetError('Missing preset definition values.')
name = preset_definition_values.get('name', None)
if not name:
raise errors.MalformedPresetError(
'Invalid preset definition missing name.')
parsers = preset_definition_values.get('parsers', None)
if not parsers:
raise errors.MalformedPresetError(
'Invalid preset definition missing parsers.')
parser_preset = ParserPreset(name, parsers)
for operating_system_values in preset_definition_values.get(
'operating_systems', []):
operating_system = self._ReadOperatingSystemArtifactValues(
operating_system_values)
parser_preset.operating_systems.append(operating_system)
return parser_preset
|
python
|
{
"resource": ""
}
|
q25377
|
ParserPresetsManager._ReadPresetsFromFileObject
|
train
|
def _ReadPresetsFromFileObject(self, file_object):
"""Reads parser and parser plugin presets from a file-like object.
Args:
file_object (file): file-like object containing the parser and parser
plugin presets definitions.
Yields:
ParserPreset: a parser preset.
Raises:
MalformedPresetError: if one or more plugin preset definitions are
malformed.
"""
yaml_generator = yaml.safe_load_all(file_object)
last_preset_definition = None
for yaml_definition in yaml_generator:
try:
preset_definition = self._ReadParserPresetValues(yaml_definition)
except errors.MalformedPresetError as exception:
error_location = 'At start'
if last_preset_definition:
error_location = 'After: {0:s}'.format(last_preset_definition.name)
raise errors.MalformedPresetError(
'{0:s} {1!s}'.format(error_location, exception))
yield preset_definition
last_preset_definition = preset_definition
|
python
|
{
"resource": ""
}
|
q25378
|
ParserPresetsManager.GetPresetByName
|
train
|
def GetPresetByName(self, name):
"""Retrieves a specific preset definition by name.
Args:
name (str): name of the preset.
Returns:
ParserPreset: a parser preset or None if not available.
"""
name = name.lower()
return self._definitions.get(name, None)
|
python
|
{
"resource": ""
}
|
q25379
|
ParserPresetsManager.GetPresetsByOperatingSystem
|
train
|
def GetPresetsByOperatingSystem(self, operating_system):
"""Retrieves preset definitions for a specific operating system.
Args:
operating_system (OperatingSystemArtifact): an operating system artifact
attribute container.
Returns:
list[PresetDefinition]: preset definition that correspond with the
operating system.
"""
preset_definitions = []
for preset_definition in self._definitions.values():
for preset_operating_system in preset_definition.operating_systems:
if preset_operating_system.IsEquivalent(operating_system):
preset_definitions.append(preset_definition)
return preset_definitions
|
python
|
{
"resource": ""
}
|
q25380
|
ParserPresetsManager.ReadFromFile
|
train
|
def ReadFromFile(self, path):
"""Reads parser and parser plugin presets from a file.
Args:
path (str): path of file that contains the the parser and parser plugin
presets configuration.
Raises:
MalformedPresetError: if one or more plugin preset definitions are
malformed.
"""
self._definitions = {}
with open(path, 'r') as file_object:
for preset_definition in self._ReadPresetsFromFileObject(file_object):
self._definitions[preset_definition.name] = preset_definition
|
python
|
{
"resource": ""
}
|
q25381
|
WorkerProcess._ProcessTask
|
train
|
def _ProcessTask(self, task):
"""Processes a task.
Args:
task (Task): task.
"""
logger.debug('Started processing task: {0:s}.'.format(task.identifier))
if self._tasks_profiler:
self._tasks_profiler.Sample(task, 'processing_started')
self._task = task
storage_writer = self._storage_writer.CreateTaskStorage(task)
if self._serializers_profiler:
storage_writer.SetSerializersProfiler(self._serializers_profiler)
storage_writer.Open()
self._parser_mediator.SetStorageWriter(storage_writer)
storage_writer.WriteTaskStart()
try:
# TODO: add support for more task types.
self._ProcessPathSpec(
self._extraction_worker, self._parser_mediator, task.path_spec)
self._number_of_consumed_sources += 1
if self._guppy_memory_profiler:
self._guppy_memory_profiler.Sample()
finally:
storage_writer.WriteTaskCompletion(aborted=self._abort)
self._parser_mediator.SetStorageWriter(None)
storage_writer.Close()
try:
self._storage_writer.FinalizeTaskStorage(task)
except IOError:
pass
self._task = None
if self._tasks_profiler:
self._tasks_profiler.Sample(task, 'processing_completed')
logger.debug('Completed processing task: {0:s}.'.format(task.identifier))
|
python
|
{
"resource": ""
}
|
q25382
|
OperatingSystemArtifact._GetNameFromProduct
|
train
|
def _GetNameFromProduct(self):
"""Determines the predefined operating system name from the product.
Returns:
str: operating system name, such as "macOS Mojave" or "Windows XP" or
None if the name cannot be determined. This value is used to
programmatically link a parser preset to an operating system and
therefore must be one of predefined values.
"""
product = self.product or ''
product = product.split(' ')
product_lower_case = [segment.lower() for segment in product]
number_of_segments = len(product)
if 'windows' in product_lower_case:
segment_index = product_lower_case.index('windows') + 1
if product_lower_case[segment_index] in ('(r)', 'server'):
segment_index += 1
# Check if the version has a suffix.
suffix_segment_index = segment_index + 1
if (suffix_segment_index < number_of_segments and
product_lower_case[suffix_segment_index] == 'r2'):
return 'Windows {0:s} R2'.format(product[segment_index])
return 'Windows {0:s}'.format(product[segment_index])
return None
|
python
|
{
"resource": ""
}
|
q25383
|
OperatingSystemArtifact.IsEquivalent
|
train
|
def IsEquivalent(self, other):
"""Determines if 2 operating system artifacts are equivalent.
This function compares the operating systems based in order of:
* name derived from product
* family and version
* family
Args:
other (OperatingSystemArtifact): operating system artifact attribute
container to compare with.
Returns:
bool: True if the operating systems are considered equivalent, False if
the most specific criteria do no match, or no criteria are available.
"""
if self.name and other.name:
return self.name == other.name
if self.name:
self_family, self_version_tuple = self._FAMILY_AND_VERSION_PER_NAME.get(
self.name, self._DEFAULT_FAMILY_AND_VERSION)
return (
self_family == other.family and
self_version_tuple == other.version_tuple)
if self.family and self.version:
if other.name:
other_family, other_version_tuple = (
self._FAMILY_AND_VERSION_PER_NAME.get(
other.name, self._DEFAULT_FAMILY_AND_VERSION))
else:
other_family = other.family
other_version_tuple = other.version_tuple
return (
self.family == other_family and
self.version_tuple == other_version_tuple)
if self.family:
if other.name:
other_family, _ = self._FAMILY_AND_VERSION_PER_NAME.get(
other.name, self._DEFAULT_FAMILY_AND_VERSION)
else:
other_family = other.family
return self.family == other_family
return False
|
python
|
{
"resource": ""
}
|
q25384
|
BaseEngine._DetermineOperatingSystem
|
train
|
def _DetermineOperatingSystem(self, searcher):
"""Tries to determine the underlying operating system.
Args:
searcher (dfvfs.FileSystemSearcher): file system searcher.
Returns:
str: operating system for example "Windows". This should be one of
the values in definitions.OPERATING_SYSTEM_FAMILIES.
"""
find_specs = [
file_system_searcher.FindSpec(
location='/etc', case_sensitive=False),
file_system_searcher.FindSpec(
location='/System/Library', case_sensitive=False),
file_system_searcher.FindSpec(
location='/Windows/System32', case_sensitive=False),
file_system_searcher.FindSpec(
location='/WINNT/System32', case_sensitive=False),
file_system_searcher.FindSpec(
location='/WINNT35/System32', case_sensitive=False),
file_system_searcher.FindSpec(
location='/WTSRV/System32', case_sensitive=False)]
locations = []
for path_spec in searcher.Find(find_specs=find_specs):
relative_path = searcher.GetRelativePath(path_spec)
if relative_path:
locations.append(relative_path.lower())
# We need to check for both forward and backward slashes since the path
# spec will be OS dependent, as in running the tool on Windows will return
# Windows paths (backward slash) vs. forward slash on *NIX systems.
windows_locations = set([
'/windows/system32', '\\windows\\system32', '/winnt/system32',
'\\winnt\\system32', '/winnt35/system32', '\\winnt35\\system32',
'\\wtsrv\\system32', '/wtsrv/system32'])
operating_system = definitions.OPERATING_SYSTEM_FAMILY_UNKNOWN
if windows_locations.intersection(set(locations)):
operating_system = definitions.OPERATING_SYSTEM_FAMILY_WINDOWS_NT
elif '/system/library' in locations:
operating_system = definitions.OPERATING_SYSTEM_FAMILY_MACOS
elif '/etc' in locations:
operating_system = definitions.OPERATING_SYSTEM_FAMILY_LINUX
return operating_system
|
python
|
{
"resource": ""
}
|
q25385
|
BaseEngine.CreateSession
|
train
|
def CreateSession(
cls, artifact_filter_names=None, command_line_arguments=None,
debug_mode=False, filter_file_path=None, preferred_encoding='utf-8',
preferred_time_zone=None, preferred_year=None):
"""Creates a session attribute container.
Args:
artifact_filter_names (Optional[list[str]]): names of artifact definitions
that are used for filtering file system and Windows Registry
key paths.
command_line_arguments (Optional[str]): the command line arguments.
debug_mode (bool): True if debug mode was enabled.
filter_file_path (Optional[str]): path to a file with find specifications.
preferred_encoding (Optional[str]): preferred encoding.
preferred_time_zone (Optional[str]): preferred time zone.
preferred_year (Optional[int]): preferred year.
Returns:
Session: session attribute container.
"""
session = sessions.Session()
session.artifact_filters = artifact_filter_names
session.command_line_arguments = command_line_arguments
session.debug_mode = debug_mode
session.filter_file = filter_file_path
session.preferred_encoding = preferred_encoding
session.preferred_time_zone = preferred_time_zone
session.preferred_year = preferred_year
return session
|
python
|
{
"resource": ""
}
|
q25386
|
BaseEngine.BuildFilterFindSpecs
|
train
|
def BuildFilterFindSpecs(
self, artifact_definitions_path, custom_artifacts_path,
knowledge_base_object, artifact_filter_names=None, filter_file_path=None):
"""Builds find specifications from artifacts or filter file if available.
Args:
artifact_definitions_path (str): path to artifact definitions file.
custom_artifacts_path (str): path to custom artifact definitions file.
knowledge_base_object (KnowledgeBase): knowledge base.
artifact_filter_names (Optional[list[str]]): names of artifact
definitions that are used for filtering file system and Windows
Registry key paths.
filter_file_path (Optional[str]): path of filter file.
Returns:
list[dfvfs.FindSpec]: find specifications for the file source type.
Raises:
InvalidFilter: if no valid FindSpecs are built.
"""
environment_variables = knowledge_base_object.GetEnvironmentVariables()
find_specs = None
if artifact_filter_names:
logger.debug(
'building find specification based on artifacts: {0:s}'.format(
', '.join(artifact_filter_names)))
artifacts_registry_object = BaseEngine.BuildArtifactsRegistry(
artifact_definitions_path, custom_artifacts_path)
self._artifacts_filter_helper = (
artifact_filters.ArtifactDefinitionsFilterHelper(
artifacts_registry_object, knowledge_base_object))
self._artifacts_filter_helper.BuildFindSpecs(
artifact_filter_names, environment_variables=environment_variables)
# If the user selected Windows Registry artifacts we have to ensure
# the Windows Registry files are parsed.
if self._artifacts_filter_helper.registry_find_specs:
self._artifacts_filter_helper.BuildFindSpecs(
self._WINDOWS_REGISTRY_FILES_ARTIFACT_NAMES,
environment_variables=environment_variables)
find_specs = self._artifacts_filter_helper.file_system_find_specs
if not find_specs:
raise errors.InvalidFilter(
'No valid file system find specifications were built from '
'artifacts.')
elif filter_file_path:
logger.debug(
'building find specification based on filter file: {0:s}'.format(
filter_file_path))
filter_file_object = filter_file.FilterFile(filter_file_path)
find_specs = filter_file_object.BuildFindSpecs(
environment_variables=environment_variables)
if not find_specs:
raise errors.InvalidFilter(
'No valid file system find specifications were built from filter '
'file.')
return find_specs
|
python
|
{
"resource": ""
}
|
q25387
|
BaseEngine.BuildArtifactsRegistry
|
train
|
def BuildArtifactsRegistry(
cls, artifact_definitions_path, custom_artifacts_path):
"""Build Find Specs from artifacts or filter file if available.
Args:
artifact_definitions_path (str): path to artifact definitions file.
custom_artifacts_path (str): path to custom artifact definitions file.
Returns:
artifacts.ArtifactDefinitionsRegistry: artifact definitions registry.
Raises:
RuntimeError: if no valid FindSpecs are built.
"""
if artifact_definitions_path and not os.path.isdir(
artifact_definitions_path):
raise errors.BadConfigOption(
'No such artifacts filter file: {0:s}.'.format(
artifact_definitions_path))
if custom_artifacts_path and not os.path.isfile(custom_artifacts_path):
raise errors.BadConfigOption(
'No such artifacts filter file: {0:s}.'.format(custom_artifacts_path))
registry = artifacts_registry.ArtifactDefinitionsRegistry()
reader = artifacts_reader.YamlArtifactsReader()
try:
registry.ReadFromDirectory(reader, artifact_definitions_path)
except (KeyError, artifacts_errors.FormatError) as exception:
raise errors.BadConfigOption((
'Unable to read artifact definitions from: {0:s} with error: '
'{1!s}').format(artifact_definitions_path, exception))
if custom_artifacts_path:
try:
registry.ReadFromFile(reader, custom_artifacts_path)
except (KeyError, artifacts_errors.FormatError) as exception:
raise errors.BadConfigOption((
'Unable to read artifact definitions from: {0:s} with error: '
'{1!s}').format(custom_artifacts_path, exception))
return registry
|
python
|
{
"resource": ""
}
|
q25388
|
OLECFPlugin._GetTimestamps
|
train
|
def _GetTimestamps(self, olecf_item):
"""Retrieves the timestamps from an OLECF item.
Args:
olecf_item (pyolecf.item): OLECF item.
Returns:
tuple[int, int]: creation and modification FILETIME timestamp.
"""
if not olecf_item:
return None, None
try:
creation_time = olecf_item.get_creation_time_as_integer()
except OverflowError as exception:
logger.warning(
'Unable to read the creation time with error: {0!s}'.format(
exception))
creation_time = 0
try:
modification_time = olecf_item.get_modification_time_as_integer()
except OverflowError as exception:
logger.warning(
'Unable to read the modification time with error: {0!s}'.format(
exception))
modification_time = 0
# If no useful events, return early.
if not creation_time and not modification_time:
return None, None
# Office template documents sometimes contain a creation time
# of -1 (0xffffffffffffffff).
if creation_time == 0xffffffffffffffff:
creation_time = 0
return creation_time, modification_time
|
python
|
{
"resource": ""
}
|
q25389
|
HashersManager.GetHasherNamesFromString
|
train
|
def GetHasherNamesFromString(cls, hasher_names_string):
"""Retrieves a list of a hasher names from a comma separated string.
Takes a string of comma separated hasher names transforms it to a list of
hasher names.
Args:
hasher_names_string (str): comma separated names of hashers to enable,
the string 'all' to enable all hashers or 'none' to disable all
hashers.
Returns:
list[str]: names of valid hashers from the string, or an empty list if no
valid names are found.
"""
hasher_names = []
if not hasher_names_string or hasher_names_string.strip() == 'none':
return hasher_names
if hasher_names_string.strip() == 'all':
return cls.GetHasherNames()
for hasher_name in hasher_names_string.split(','):
hasher_name = hasher_name.strip()
if not hasher_name:
continue
hasher_name = hasher_name.lower()
if hasher_name in cls._hasher_classes:
hasher_names.append(hasher_name)
return hasher_names
|
python
|
{
"resource": ""
}
|
q25390
|
HashersManager.GetHashersInformation
|
train
|
def GetHashersInformation(cls):
"""Retrieves the hashers information.
Returns:
list[tuple]: containing:
str: hasher name.
str: hasher description.
"""
hashers_information = []
for _, hasher_class in cls.GetHasherClasses():
description = getattr(hasher_class, 'DESCRIPTION', '')
hashers_information.append((hasher_class.NAME, description))
return hashers_information
|
python
|
{
"resource": ""
}
|
q25391
|
HashersManager.GetHasher
|
train
|
def GetHasher(cls, hasher_name):
"""Retrieves an instance of a specific hasher.
Args:
hasher_name (str): the name of the hasher to retrieve.
Returns:
BaseHasher: hasher.
Raises:
KeyError: if hasher class is not set for the corresponding name.
"""
hasher_name = hasher_name.lower()
if hasher_name not in cls._hasher_classes:
raise KeyError(
'hasher class not set for name: {0:s}.'.format(hasher_name))
hasher_class = cls._hasher_classes[hasher_name]
return hasher_class()
|
python
|
{
"resource": ""
}
|
q25392
|
HashersManager.GetHashers
|
train
|
def GetHashers(cls, hasher_names):
"""Retrieves instances for all the specified hashers.
Args:
hasher_names (list[str]): names of the hashers to retrieve.
Returns:
list[BaseHasher]: hashers.
"""
hashers = []
for hasher_name, hasher_class in iter(cls._hasher_classes.items()):
if hasher_name in hasher_names:
hashers.append(hasher_class())
return hashers
|
python
|
{
"resource": ""
}
|
q25393
|
HashersManager.GetHasherClasses
|
train
|
def GetHasherClasses(cls, hasher_names=None):
"""Retrieves the registered hashers.
Args:
hasher_names (list[str]): names of the hashers to retrieve.
Yields:
tuple: containing:
str: parser name
type: next hasher class.
"""
for hasher_name, hasher_class in iter(cls._hasher_classes.items()):
if not hasher_names or hasher_name in hasher_names:
yield hasher_name, hasher_class
|
python
|
{
"resource": ""
}
|
q25394
|
HashersManager.RegisterHasher
|
train
|
def RegisterHasher(cls, hasher_class):
"""Registers a hasher class.
The hasher classes are identified based on their lower case name.
Args:
hasher_class (type): class object of the hasher.
Raises:
KeyError: if hasher class is already set for the corresponding name.
"""
hasher_name = hasher_class.NAME.lower()
if hasher_name in cls._hasher_classes:
raise KeyError((
'hasher class already set for name: {0:s}.').format(
hasher_class.NAME))
cls._hasher_classes[hasher_name] = hasher_class
|
python
|
{
"resource": ""
}
|
q25395
|
SCCMParser._GetISO8601String
|
train
|
def _GetISO8601String(self, structure):
"""Retrieves an ISO8601 date time string from the structure.
The date and time values in the SCCM log are formatted as:
time="19:33:19.766-330" date="11-28-2014"
Args:
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
Returns:
str: ISO 8601 date time string.
Raises:
ValueError: if the structure cannot be converted into a date time string.
"""
fraction_of_second_length = len(structure.fraction_of_second)
if fraction_of_second_length not in (3, 6, 7):
raise ValueError(
'unsupported time fraction of second length: {0:d}'.format(
fraction_of_second_length))
try:
fraction_of_second = int(structure.fraction_of_second, 10)
except (TypeError, ValueError) as exception:
raise ValueError(
'unable to determine fraction of second with error: {0!s}'.format(
exception))
# TODO: improve precision support, but for now ignore the 100ns precision.
if fraction_of_second_length == 7:
fraction_of_second, _ = divmod(fraction_of_second, 10)
date_time_string = '{0:04d}-{1:02d}-{2:02d}T{3:02d}:{4:02d}:{5:02d}'.format(
structure.year, structure.month, structure.day, structure.hour,
structure.minute, structure.second)
if fraction_of_second_length > 0:
date_time_string = '{0:s}.{1:d}'.format(
date_time_string, fraction_of_second)
utc_offset_minutes = structure.get('utc_offset_minutes', None)
if utc_offset_minutes is not None:
try:
time_zone_offset = int(utc_offset_minutes[1:], 10)
except (IndexError, ValueError) as exception:
raise ValueError(
'Unable to parse time zone offset with error: {0!s}.'.format(
exception))
time_zone_hours, time_zone_minutes = divmod(time_zone_offset, 60)
date_time_string = '{0:s}{1:s}{2:02d}:{3:02d}'.format(
date_time_string, utc_offset_minutes[0], time_zone_hours,
time_zone_minutes)
return date_time_string
|
python
|
{
"resource": ""
}
|
q25396
|
SCCMParser.ParseRecord
|
train
|
def ParseRecord(self, parser_mediator, key, structure):
"""Parse the record and return an SCCM log event object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
key (str): name of the parsed structure.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
Raises:
ParseError: when the structure type is unknown.
"""
if key not in (
'log_entry', 'log_entry_at_end', 'log_entry_offset',
'log_entry_offset_at_end'):
raise errors.ParseError(
'Unable to parse record, unknown structure: {0:s}'.format(key))
try:
date_time_string = self._GetISO8601String(structure)
except ValueError as exception:
parser_mediator.ProduceExtractionWarning(
'unable to determine date time string with error: {0!s}'.format(
exception))
fraction_of_second_length = len(structure.fraction_of_second)
if fraction_of_second_length == 3:
date_time = dfdatetime_time_elements.TimeElementsInMilliseconds()
elif fraction_of_second_length in (6, 7):
date_time = dfdatetime_time_elements.TimeElementsInMicroseconds()
try:
date_time.CopyFromStringISO8601(date_time_string)
except ValueError as exception:
parser_mediator.ProduceExtractionWarning(
'unable to parse date time value: {0:s} with error: {1!s}'.format(
date_time_string, exception))
return
event_data = SCCMLogEventData()
event_data.component = structure.component
# TODO: pass line number to offset or remove.
event_data.offset = 0
event_data.text = structure.text
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
python
|
{
"resource": ""
}
|
q25397
|
SCCMParser.VerifyStructure
|
train
|
def VerifyStructure(self, parser_mediator, lines):
"""Verifies whether content corresponds to an SCCM log file.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
lines (str): one or more lines from the text file.
Returns:
bool: True if this is the correct parser, False otherwise.
"""
# Identify the token to which we attempt a match.
match = self._PARSING_COMPONENTS['msg_left_delimiter'].match
# Because logs files can lead with a partial event,
# we can't assume that the first character (post-BOM)
# in the file is the beginning of our match - so we
# look for match anywhere in lines.
return match in lines
|
python
|
{
"resource": ""
}
|
q25398
|
SelfFeederMixIn.Feed
|
train
|
def Feed(self, size=512):
"""Feed data into the buffer.
Args:
size: optional data size to read form the file-like object.
"""
data = self.file_object.read(size)
Lexer.Feed(self, data)
return len(data)
|
python
|
{
"resource": ""
}
|
q25399
|
SelfFeederMixIn.NextToken
|
train
|
def NextToken(self):
"""Retrieves the next token.
Returns:
The next token (instance of Token) or None.
"""
# If we don't have enough data - feed ourselves: We assume
# that we must have at least one sector in our buffer.
if len(self.buffer) < 512:
if self.Feed() == 0 and not self.buffer:
return None
return Lexer.NextToken(self)
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.