_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q25600
AnalysisPluginManager.GetAllPluginInformation
train
def GetAllPluginInformation(cls, show_all=True): """Retrieves a list of the registered analysis plugins. Args: show_all (Optional[bool]): True if all analysis plugin names should be listed. Returns: list[tuple[str, str, str]]: the name, docstring and type string of each analysis plugin in alphabetical order. """ results = [] for plugin_class in iter(cls._plugin_classes.values()): plugin_object = plugin_class() if not show_all and not plugin_class.ENABLE_IN_EXTRACTION: continue # TODO: Use a specific description variable, not the docstring. doc_string, _, _ = plugin_class.__doc__.partition('\n') type_string = cls._PLUGIN_TYPE_STRINGS.get(plugin_object.plugin_type) information_tuple = (plugin_object.plugin_name, doc_string, type_string) results.append(information_tuple) return sorted(results)
python
{ "resource": "" }
q25601
AnalysisPluginManager.GetPluginObjects
train
def GetPluginObjects(cls, plugin_names): """Retrieves the plugin objects. Args: plugin_names (list[str]): names of plugins that should be retrieved. Returns: dict[str, AnalysisPlugin]: analysis plugins per name. """ plugin_objects = {} for plugin_name, plugin_class in iter(cls._plugin_classes.items()): if plugin_name not in plugin_names: continue plugin_objects[plugin_name] = plugin_class() return plugin_objects
python
{ "resource": "" }
q25602
CompoundZIPPlugin.Process
train
def Process(self, parser_mediator, zip_file, archive_members): """Determines if this is the correct plugin; if so proceed with processing. This method checks if the zip file being contains the paths specified in REQUIRED_PATHS. If all paths are present, the plugin logic processing continues in InspectZipFile. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. zip_file (zipfile.ZipFile): the zip file. It should not be closed in this method, but will be closed by the parser logic in czip.py. archive_members (list[str]): file paths in the archive. Raises: UnableToParseFile: when the file cannot be parsed. ValueError: if a subclass has not specified REQUIRED_PATHS. """ if not self.REQUIRED_PATHS: raise ValueError('REQUIRED_PATHS not specified') if not set(archive_members).issuperset(self.REQUIRED_PATHS): raise errors.WrongCompoundZIPPlugin(self.NAME) logger.debug('Compound ZIP Plugin used: {0:s}'.format(self.NAME)) self.InspectZipFile(parser_mediator, zip_file)
python
{ "resource": "" }
q25603
PinfoTool._CalculateStorageCounters
train
def _CalculateStorageCounters(self, storage_reader): """Calculates the counters of the entire storage. Args: storage_reader (StorageReader): storage reader. Returns: dict[str,collections.Counter]: storage counters. """ analysis_reports_counter = collections.Counter() analysis_reports_counter_error = False event_labels_counter = collections.Counter() event_labels_counter_error = False parsers_counter = collections.Counter() parsers_counter_error = False for session in storage_reader.GetSessions(): # Check for a dict for backwards compatibility. if isinstance(session.analysis_reports_counter, dict): analysis_reports_counter += collections.Counter( session.analysis_reports_counter) elif isinstance(session.analysis_reports_counter, collections.Counter): analysis_reports_counter += session.analysis_reports_counter else: analysis_reports_counter_error = True # Check for a dict for backwards compatibility. if isinstance(session.event_labels_counter, dict): event_labels_counter += collections.Counter( session.event_labels_counter) elif isinstance(session.event_labels_counter, collections.Counter): event_labels_counter += session.event_labels_counter else: event_labels_counter_error = True # Check for a dict for backwards compatibility. if isinstance(session.parsers_counter, dict): parsers_counter += collections.Counter(session.parsers_counter) elif isinstance(session.parsers_counter, collections.Counter): parsers_counter += session.parsers_counter else: parsers_counter_error = True storage_counters = {} warnings_by_path_spec = collections.Counter() warnings_by_parser_chain = collections.Counter() for warning in list(storage_reader.GetWarnings()): warnings_by_path_spec[warning.path_spec.comparable] += 1 warnings_by_parser_chain[warning.parser_chain] += 1 storage_counters['warnings_by_path_spec'] = warnings_by_path_spec storage_counters['warnings_by_parser_chain'] = warnings_by_parser_chain if not analysis_reports_counter_error: storage_counters['analysis_reports'] = analysis_reports_counter if not event_labels_counter_error: storage_counters['event_labels'] = event_labels_counter if not parsers_counter_error: storage_counters['parsers'] = parsers_counter return storage_counters
python
{ "resource": "" }
q25604
PinfoTool._PrintAnalysisReportsDetails
train
def _PrintAnalysisReportsDetails(self, storage_reader): """Prints the details of the analysis reports. Args: storage_reader (StorageReader): storage reader. """ if not storage_reader.HasAnalysisReports(): self._output_writer.Write('No analysis reports stored.\n\n') return for index, analysis_report in enumerate( storage_reader.GetAnalysisReports()): title = 'Analysis report: {0:d}'.format(index) table_view = views.ViewsFactory.GetTableView( self._views_format_type, title=title) table_view.AddRow(['String', analysis_report.GetString()]) table_view.Write(self._output_writer)
python
{ "resource": "" }
q25605
PinfoTool._PrintWarningCounters
train
def _PrintWarningCounters(self, storage_counters): """Prints a summary of the warnings. Args: storage_counters (dict): storage counters. """ warnings_by_pathspec = storage_counters.get('warnings_by_path_spec', {}) warnings_by_parser_chain = storage_counters.get( 'warnings_by_parser_chain', {}) if not warnings_by_parser_chain: self._output_writer.Write('No warnings stored.\n\n') return table_view = views.ViewsFactory.GetTableView( self._views_format_type, title='Warnings generated per parser', column_names=['Parser (plugin) name', 'Number of warnings']) for parser_chain, count in warnings_by_parser_chain.items(): parser_chain = parser_chain or '<No parser>' table_view.AddRow([parser_chain, '{0:d}'.format(count)]) table_view.Write(self._output_writer) table_view = views.ViewsFactory.GetTableView( self._views_format_type, title='Pathspecs with most warnings', column_names=['Number of warnings', 'Pathspec']) top_pathspecs = warnings_by_pathspec.most_common(10) for pathspec, count in top_pathspecs: for path_index, line in enumerate(pathspec.split('\n')): if not line: continue if path_index == 0: table_view.AddRow(['{0:d}'.format(count), line]) else: table_view.AddRow(['', line]) table_view.Write(self._output_writer)
python
{ "resource": "" }
q25606
PinfoTool._PrintWarningsDetails
train
def _PrintWarningsDetails(self, storage): """Prints the details of the warnings. Args: storage (BaseStore): storage. """ if not storage.HasWarnings(): self._output_writer.Write('No warnings stored.\n\n') return for index, warning in enumerate(storage.GetWarnings()): title = 'Warning: {0:d}'.format(index) table_view = views.ViewsFactory.GetTableView( self._views_format_type, title=title) table_view.AddRow(['Message', warning.message]) table_view.AddRow(['Parser chain', warning.parser_chain]) path_specification = warning.path_spec.comparable for path_index, line in enumerate(path_specification.split('\n')): if not line: continue if path_index == 0: table_view.AddRow(['Path specification', line]) else: table_view.AddRow(['', line]) table_view.Write(self._output_writer)
python
{ "resource": "" }
q25607
PinfoTool._PrintEventLabelsCounter
train
def _PrintEventLabelsCounter( self, event_labels_counter, session_identifier=None): """Prints the event labels counter. Args: event_labels_counter (collections.Counter): number of event tags per label. session_identifier (Optional[str]): session identifier. """ if not event_labels_counter: return title = 'Event tags generated per label' if session_identifier: title = '{0:s}: {1:s}'.format(title, session_identifier) table_view = views.ViewsFactory.GetTableView( self._views_format_type, column_names=['Label', 'Number of event tags'], title=title) for key, value in sorted(event_labels_counter.items()): if key == 'total': continue table_view.AddRow([key, value]) try: total = event_labels_counter['total'] except KeyError: total = 'N/A' table_view.AddRow(['Total', total]) table_view.Write(self._output_writer)
python
{ "resource": "" }
q25608
PinfoTool._PrintParsersCounter
train
def _PrintParsersCounter(self, parsers_counter, session_identifier=None): """Prints the parsers counter Args: parsers_counter (collections.Counter): number of events per parser or parser plugin. session_identifier (Optional[str]): session identifier. """ if not parsers_counter: return title = 'Events generated per parser' if session_identifier: title = '{0:s}: {1:s}'.format(title, session_identifier) table_view = views.ViewsFactory.GetTableView( self._views_format_type, column_names=['Parser (plugin) name', 'Number of events'], title=title) for key, value in sorted(parsers_counter.items()): if key == 'total': continue table_view.AddRow([key, value]) table_view.AddRow(['Total', parsers_counter['total']]) table_view.Write(self._output_writer)
python
{ "resource": "" }
q25609
PinfoTool._PrintPreprocessingInformation
train
def _PrintPreprocessingInformation(self, storage_reader, session_number=None): """Prints the details of the preprocessing information. Args: storage_reader (StorageReader): storage reader. session_number (Optional[int]): session number. """ knowledge_base_object = knowledge_base.KnowledgeBase() storage_reader.ReadPreprocessingInformation(knowledge_base_object) # TODO: replace session_number by session_identifier. system_configuration = knowledge_base_object.GetSystemConfigurationArtifact( session_identifier=session_number) if not system_configuration: return title = 'System configuration' table_view = views.ViewsFactory.GetTableView( self._views_format_type, title=title) hostname = 'N/A' if system_configuration.hostname: hostname = system_configuration.hostname.name operating_system = system_configuration.operating_system or 'N/A' operating_system_product = ( system_configuration.operating_system_product or 'N/A') operating_system_version = ( system_configuration.operating_system_version or 'N/A') code_page = system_configuration.code_page or 'N/A' keyboard_layout = system_configuration.keyboard_layout or 'N/A' time_zone = system_configuration.time_zone or 'N/A' table_view.AddRow(['Hostname', hostname]) table_view.AddRow(['Operating system', operating_system]) table_view.AddRow(['Operating system product', operating_system_product]) table_view.AddRow(['Operating system version', operating_system_version]) table_view.AddRow(['Code page', code_page]) table_view.AddRow(['Keyboard layout', keyboard_layout]) table_view.AddRow(['Time zone', time_zone]) table_view.Write(self._output_writer) title = 'User accounts' table_view = views.ViewsFactory.GetTableView( self._views_format_type, column_names=['Username', 'User directory'], title=title) for user_account in system_configuration.user_accounts: table_view.AddRow([ user_account.username, user_account.user_directory]) table_view.Write(self._output_writer)
python
{ "resource": "" }
q25610
PinfoTool._PrintSessionsDetails
train
def _PrintSessionsDetails(self, storage_reader): """Prints the details of the sessions. Args: storage_reader (BaseStore): storage. """ for session_number, session in enumerate(storage_reader.GetSessions()): session_identifier = uuid.UUID(hex=session.identifier) session_identifier = '{0!s}'.format(session_identifier) start_time = 'N/A' if session.start_time is not None: start_time = timelib.Timestamp.CopyToIsoFormat(session.start_time) completion_time = 'N/A' if session.completion_time is not None: completion_time = timelib.Timestamp.CopyToIsoFormat( session.completion_time) enabled_parser_names = 'N/A' if session.enabled_parser_names: enabled_parser_names = ', '.join(sorted(session.enabled_parser_names)) command_line_arguments = session.command_line_arguments or 'N/A' parser_filter_expression = session.parser_filter_expression or 'N/A' preferred_encoding = session.preferred_encoding or 'N/A' # Workaround for some older Plaso releases writing preferred encoding as # bytes. if isinstance(preferred_encoding, py2to3.BYTES_TYPE): preferred_encoding = preferred_encoding.decode('utf-8') if session.artifact_filters: artifact_filters_string = ', '.join(session.artifact_filters) else: artifact_filters_string = 'N/A' filter_file = session.filter_file or 'N/A' title = 'Session: {0:s}'.format(session_identifier) table_view = views.ViewsFactory.GetTableView( self._views_format_type, title=title) table_view.AddRow(['Start time', start_time]) table_view.AddRow(['Completion time', completion_time]) table_view.AddRow(['Product name', session.product_name]) table_view.AddRow(['Product version', session.product_version]) table_view.AddRow(['Command line arguments', command_line_arguments]) table_view.AddRow(['Parser filter expression', parser_filter_expression]) table_view.AddRow(['Enabled parser and plugins', enabled_parser_names]) table_view.AddRow(['Preferred encoding', preferred_encoding]) table_view.AddRow(['Debug mode', session.debug_mode]) table_view.AddRow(['Artifact filters', artifact_filters_string]) table_view.AddRow(['Filter file', filter_file]) table_view.Write(self._output_writer) if self._verbose: self._PrintPreprocessingInformation(storage_reader, session_number + 1) self._PrintParsersCounter( session.parsers_counter, session_identifier=session_identifier) self._PrintAnalysisReportCounter( session.analysis_reports_counter, session_identifier=session_identifier) self._PrintEventLabelsCounter( session.event_labels_counter, session_identifier=session_identifier)
python
{ "resource": "" }
q25611
PinfoTool._PrintSessionsOverview
train
def _PrintSessionsOverview(self, storage_reader): """Prints a sessions overview. Args: storage_reader (StorageReader): storage reader. """ table_view = views.ViewsFactory.GetTableView( self._views_format_type, title='Sessions') for session in storage_reader.GetSessions(): start_time = timelib.Timestamp.CopyToIsoFormat( session.start_time) session_identifier = uuid.UUID(hex=session.identifier) session_identifier = '{0!s}'.format(session_identifier) table_view.AddRow([session_identifier, start_time]) table_view.Write(self._output_writer)
python
{ "resource": "" }
q25612
PinfoTool._PrintStorageInformationAsText
train
def _PrintStorageInformationAsText(self, storage_reader): """Prints information about the store as human-readable text. Args: storage_reader (StorageReader): storage reader. """ table_view = views.ViewsFactory.GetTableView( self._views_format_type, title='Plaso Storage Information') table_view.AddRow(['Filename', os.path.basename(self._storage_file_path)]) table_view.AddRow(['Format version', storage_reader.format_version]) table_view.AddRow( ['Serialization format', storage_reader.serialization_format]) table_view.Write(self._output_writer) if storage_reader.storage_type == definitions.STORAGE_TYPE_SESSION: self._PrintSessionsOverview(storage_reader) self._PrintSessionsDetails(storage_reader) storage_counters = self._CalculateStorageCounters(storage_reader) if 'parsers' not in storage_counters: self._output_writer.Write( 'Unable to determine number of events generated per parser.\n') else: self._PrintParsersCounter(storage_counters['parsers']) if 'analysis_reports' not in storage_counters: self._output_writer.Write( 'Unable to determine number of reports generated per plugin.\n') else: self._PrintAnalysisReportCounter(storage_counters['analysis_reports']) if 'event_labels' not in storage_counters: self._output_writer.Write( 'Unable to determine number of event tags generated per label.\n') else: self._PrintEventLabelsCounter(storage_counters['event_labels']) self._PrintWarningCounters(storage_counters) if self._verbose: self._PrintWarningsDetails(storage_reader) self._PrintAnalysisReportsDetails(storage_reader) elif storage_reader.storage_type == definitions.STORAGE_TYPE_TASK: self._PrintTasksInformation(storage_reader)
python
{ "resource": "" }
q25613
PinfoTool._PrintStorageInformationAsJSON
train
def _PrintStorageInformationAsJSON(self, storage_reader): """Writes a summary of sessions as machine-readable JSON. Args: storage_reader (StorageReader): storage reader. """ serializer = json_serializer.JSONAttributeContainerSerializer storage_counters = self._CalculateStorageCounters(storage_reader) storage_counters_json = json.dumps(storage_counters) self._output_writer.Write('{') self._output_writer.Write('"storage_counters": {0:s}'.format( storage_counters_json)) self._output_writer.Write(',\n') self._output_writer.Write(' "sessions": {') for index, session in enumerate(storage_reader.GetSessions()): json_string = serializer.WriteSerialized(session) if index != 0: self._output_writer.Write(',\n') self._output_writer.Write('"session_{0:s}": {1:s} '.format( session.identifier, json_string)) self._output_writer.Write('}}')
python
{ "resource": "" }
q25614
PinfoTool._PrintTasksInformation
train
def _PrintTasksInformation(self, storage_reader): """Prints information about the tasks. Args: storage_reader (StorageReader): storage reader. """ table_view = views.ViewsFactory.GetTableView( self._views_format_type, title='Tasks') for task_start, _ in storage_reader.GetSessions(): start_time = timelib.Timestamp.CopyToIsoFormat( task_start.timestamp) task_identifier = uuid.UUID(hex=task_start.identifier) task_identifier = '{0!s}'.format(task_identifier) table_view.AddRow([task_identifier, start_time]) table_view.Write(self._output_writer)
python
{ "resource": "" }
q25615
PinfoTool.PrintStorageInformation
train
def PrintStorageInformation(self): """Prints the storage information.""" storage_reader = storage_factory.StorageFactory.CreateStorageReaderForFile( self._storage_file_path) if not storage_reader: logger.error( 'Format of storage file: {0:s} not supported'.format( self._storage_file_path)) return try: if self._output_format == 'json': self._PrintStorageInformationAsJSON(storage_reader) elif self._output_format == 'text': self._PrintStorageInformationAsText(storage_reader) finally: storage_reader.Close()
python
{ "resource": "" }
q25616
FileStatParser._GetFileSystemTypeFromFileEntry
train
def _GetFileSystemTypeFromFileEntry(self, file_entry): """Retrieves the file system type indicator of a file entry. Args: file_entry (dfvfs.FileEntry): a file entry. Returns: str: file system type. """ if file_entry.type_indicator != dfvfs_definitions.TYPE_INDICATOR_TSK: return file_entry.type_indicator # TODO: Implement fs_type in dfVFS and remove this implementation # once that is in place. file_system = file_entry.GetFileSystem() fs_info = file_system.GetFsInfo() if fs_info.info: type_string = '{0!s}'.format(fs_info.info.ftype) if type_string.startswith('TSK_FS_TYPE_'): type_string = type_string[12:] if type_string.endswith('_DETECT'): type_string = type_string[:-7] return type_string
python
{ "resource": "" }
q25617
FileStatParser.ParseFileEntry
train
def ParseFileEntry(self, parser_mediator, file_entry): """Parses a file entry. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_entry (dfvfs.FileEntry): a file entry. """ stat_object = file_entry.GetStat() if not stat_object: return file_system_type = self._GetFileSystemTypeFromFileEntry(file_entry) event_data = FileStatEventData() event_data.file_entry_type = stat_object.type event_data.file_size = getattr(stat_object, 'size', None) event_data.file_system_type = file_system_type event_data.is_allocated = file_entry.IsAllocated() if file_entry.access_time: event = time_events.DateTimeValuesEvent( file_entry.access_time, definitions.TIME_DESCRIPTION_LAST_ACCESS) parser_mediator.ProduceEventWithEventData(event, event_data) if file_entry.creation_time: event = time_events.DateTimeValuesEvent( file_entry.creation_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data) if file_entry.change_time: event = time_events.DateTimeValuesEvent( file_entry.change_time, definitions.TIME_DESCRIPTION_CHANGE) parser_mediator.ProduceEventWithEventData(event, event_data) if file_entry.modification_time: event = time_events.DateTimeValuesEvent( file_entry.modification_time, definitions.TIME_DESCRIPTION_MODIFICATION) parser_mediator.ProduceEventWithEventData(event, event_data) for time_attribute, usage in self._TIMESTAMP_DESCRIPTIONS.items(): posix_time = getattr(stat_object, time_attribute, None) if posix_time is None: continue nano_time_attribute = '{0:s}_nano'.format(time_attribute) nano_time_attribute = getattr(stat_object, nano_time_attribute, None) timestamp = posix_time * 1000000 if nano_time_attribute is not None: # Note that the _nano values are in intervals of 100th nano seconds. micro_time_attribute, _ = divmod(nano_time_attribute, 10) timestamp += micro_time_attribute # TSK will return 0 if the timestamp is not set. if (file_entry.type_indicator == dfvfs_definitions.TYPE_INDICATOR_TSK and not timestamp): continue date_time = dfdatetime_posix_time.PosixTimeInMicroseconds( timestamp=timestamp) event = time_events.DateTimeValuesEvent(date_time, usage) parser_mediator.ProduceEventWithEventData(event, event_data)
python
{ "resource": "" }
q25618
BSMParser._FormatArgToken
train
def _FormatArgToken(self, token_data): """Formats an argument token as a dictionary of values. Args: token_data (bsm_token_data_arg32|bsm_token_data_arg64): AUT_ARG32 or AUT_ARG64 token data. Returns: dict[str, str]: token values. """ return { 'string': token_data.argument_value.rstrip('\x00'), 'num_arg': token_data.argument_index, 'is': token_data.argument_name}
python
{ "resource": "" }
q25619
BSMParser._FormatAttrToken
train
def _FormatAttrToken(self, token_data): """Formats an attribute token as a dictionary of values. Args: token_data (bsm_token_data_attr32|bsm_token_data_attr64): AUT_ATTR32 or AUT_ATTR64 token data. Returns: dict[str, str]: token values. """ return { 'mode': token_data.file_mode, 'uid': token_data.user_identifier, 'gid': token_data.group_identifier, 'system_id': token_data.file_system_identifier, 'node_id': token_data.file_identifier, 'device': token_data.device}
python
{ "resource": "" }
q25620
BSMParser._FormatDataToken
train
def _FormatDataToken(self, token_data): """Formats a data token as a dictionary of values. Args: token_data (bsm_token_data_data): AUT_DATA token data. Returns: dict[str, str]: token values. """ format_string = bsmtoken.BSM_TOKEN_DATA_PRINT.get( token_data.data_format, 'UNKNOWN') if token_data.data_format == 4: data = bytes(bytearray(token_data.data)).split(b'\x00')[0] data = data.decode('utf-8') else: data = ''.join(['{0:02x}'.format(byte) for byte in token_data.data]) return { 'format': format_string, 'data': data}
python
{ "resource": "" }
q25621
BSMParser._FormatInAddrExToken
train
def _FormatInAddrExToken(self, token_data): """Formats an extended IPv4 address token as a dictionary of values. Args: token_data (bsm_token_data_in_addr_ex): AUT_IN_ADDR_EX token data. Returns: dict[str, str]: token values. """ protocol = bsmtoken.BSM_PROTOCOLS.get(token_data.net_type, 'UNKNOWN') if token_data.net_type == 4: ip_address = self._FormatPackedIPv6Address(token_data.ip_address[:4]) elif token_data.net_type == 16: ip_address = self._FormatPackedIPv6Address(token_data.ip_address) return { 'protocols': protocol, 'net_type': token_data.net_type, 'address': ip_address}
python
{ "resource": "" }
q25622
BSMParser._FormatIPCPermToken
train
def _FormatIPCPermToken(self, token_data): """Formats an IPC permissions token as a dictionary of values. Args: token_data (bsm_token_data_ipc_perm): AUT_IPC_PERM token data. Returns: dict[str, str]: token values. """ return { 'user_id': token_data.user_identifier, 'group_id': token_data.group_identifier, 'creator_user_id': token_data.creator_user_identifier, 'creator_group_id': token_data.creator_group_identifier, 'access': token_data.access_mode}
python
{ "resource": "" }
q25623
BSMParser._FormatIPToken
train
def _FormatIPToken(self, token_data): """Formats an IPv4 packet header token as a dictionary of values. Args: token_data (bsm_token_data_ip): AUT_IP token data. Returns: dict[str, str]: token values. """ data = ''.join(['{0:02x}'.format(byte) for byte in token_data.data]) return {'IPv4_Header': data}
python
{ "resource": "" }
q25624
BSMParser._FormatOpaqueToken
train
def _FormatOpaqueToken(self, token_data): """Formats an opaque token as a dictionary of values. Args: token_data (bsm_token_data_opaque): AUT_OPAQUE token data. Returns: dict[str, str]: token values. """ data = ''.join(['{0:02x}'.format(byte) for byte in token_data.data]) return {'data': data}
python
{ "resource": "" }
q25625
BSMParser._FormatOtherFileToken
train
def _FormatOtherFileToken(self, token_data): """Formats an other file token as a dictionary of values. Args: token_data (bsm_token_data_other_file32): AUT_OTHER_FILE32 token data. Returns: dict[str, str]: token values. """ # TODO: if this timestamp is useful, it must be extracted as a separate # event object. timestamp = token_data.microseconds + ( token_data.timestamp * definitions.MICROSECONDS_PER_SECOND) date_time = dfdatetime_posix_time.PosixTimeInMicroseconds( timestamp=timestamp) date_time_string = date_time.CopyToDateTimeString() return { 'string': token_data.name.rstrip('\x00'), 'timestamp': date_time_string}
python
{ "resource": "" }
q25626
BSMParser._FormatReturnOrExitToken
train
def _FormatReturnOrExitToken(self, token_data): """Formats a return or exit token as a dictionary of values. Args: token_data (bsm_token_data_exit|bsm_token_data_return32| bsm_token_data_return64): AUT_EXIT, AUT_RETURN32 or AUT_RETURN64 token data. Returns: dict[str, str]: token values. """ error_string = bsmtoken.BSM_ERRORS.get(token_data.status, 'UNKNOWN') return { 'error': error_string, 'token_status': token_data.status, 'call_status': token_data.return_value}
python
{ "resource": "" }
q25627
BSMParser._FormatSocketExToken
train
def _FormatSocketExToken(self, token_data): """Formats an extended socket token as a dictionary of values. Args: token_data (bsm_token_data_socket_ex): AUT_SOCKET_EX token data. Returns: dict[str, str]: token values. """ if token_data.socket_domain == 10: local_ip_address = self._FormatPackedIPv6Address( token_data.local_ip_address) remote_ip_address = self._FormatPackedIPv6Address( token_data.remote_ip_address) else: local_ip_address = self._FormatPackedIPv4Address( token_data.local_ip_address) remote_ip_address = self._FormatPackedIPv4Address( token_data.remote_ip_address) return { 'from': local_ip_address, 'from_port': token_data.local_port, 'to': remote_ip_address, 'to_port': token_data.remote_port}
python
{ "resource": "" }
q25628
BSMParser._FormatSocketUnixToken
train
def _FormatSocketUnixToken(self, token_data): """Formats an Unix socket token as a dictionary of values. Args: token_data (bsm_token_data_sockunix): AUT_SOCKUNIX token data. Returns: dict[str, str]: token values. """ protocol = bsmtoken.BSM_PROTOCOLS.get(token_data.socket_family, 'UNKNOWN') return { 'protocols': protocol, 'family': token_data.socket_family, 'path': token_data.socket_path}
python
{ "resource": "" }
q25629
BSMParser._FormatTokenData
train
def _FormatTokenData(self, token_type, token_data): """Formats the token data as a dictionary of values. Args: token_type (int): token type. token_data (object): token data. Returns: dict[str, str]: formatted token values or an empty dictionary if no formatted token values could be determined. """ token_data_format_function = self._TOKEN_DATA_FORMAT_FUNCTIONS.get( token_type) if token_data_format_function: token_data_format_function = getattr( self, token_data_format_function, None) if not token_data_format_function: return {} return token_data_format_function(token_data)
python
{ "resource": "" }
q25630
BSMParser._ParseRecord
train
def _ParseRecord(self, parser_mediator, file_object): """Parses an event record. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): file-like object. Raises: ParseError: if the event record cannot be read. """ header_record_offset = file_object.tell() # Check the header token type before reading the token data to prevent # variable size tokens to consume a large amount of memory. token_type = self._ParseTokenType(file_object, header_record_offset) if token_type not in self._HEADER_TOKEN_TYPES: raise errors.ParseError( 'Unsupported header token type: 0x{0:02x}'.format(token_type)) token_type, token_data = self._ParseToken(file_object, header_record_offset) if token_data.format_version != 11: raise errors.ParseError('Unsupported format version type: {0:d}'.format( token_data.format_version)) timestamp = token_data.microseconds + ( token_data.timestamp * definitions.MICROSECONDS_PER_SECOND) event_type = token_data.event_type header_record_size = token_data.record_size record_end_offset = header_record_offset + header_record_size event_tokens = [] return_token_values = None file_offset = file_object.tell() while file_offset < record_end_offset: token_type, token_data = self._ParseToken(file_object, file_offset) if not token_data: raise errors.ParseError('Unsupported token type: 0x{0:02x}'.format( token_type)) file_offset = file_object.tell() if token_type == self._TOKEN_TYPE_AUT_TRAILER: break token_type_string = self._TOKEN_TYPES.get(token_type, 'UNKNOWN') token_values = self._FormatTokenData(token_type, token_data) event_tokens.append({token_type_string: token_values}) if token_type in ( self._TOKEN_TYPE_AUT_RETURN32, self._TOKEN_TYPE_AUT_RETURN64): return_token_values = token_values if token_data.signature != self._TRAILER_TOKEN_SIGNATURE: raise errors.ParseError('Unsupported signature in trailer token.') if token_data.record_size != header_record_size: raise errors.ParseError( 'Mismatch of event record size between header and trailer token.') event_data = BSMEventData() event_data.event_type = event_type event_data.extra_tokens = event_tokens event_data.offset = header_record_offset event_data.record_length = header_record_size event_data.return_value = return_token_values date_time = dfdatetime_posix_time.PosixTimeInMicroseconds( timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data)
python
{ "resource": "" }
q25631
BSMParser._ParseToken
train
def _ParseToken(self, file_object, file_offset): """Parses a token. Args: file_object (dfvfs.FileIO): file-like object. file_offset (int): offset of the token relative to the start of the file-like object. Returns: tuple: containing: int: token type object: token data or None if the token type is not supported. """ token_type = self._ParseTokenType(file_object, file_offset) token_data = None token_data_map_name = self._DATA_TYPE_MAP_PER_TOKEN_TYPE.get( token_type, None) if token_data_map_name: token_data_map = self._GetDataTypeMap(token_data_map_name) token_data, _ = self._ReadStructureFromFileObject( file_object, file_offset + 1, token_data_map) return token_type, token_data
python
{ "resource": "" }
q25632
BSMParser._ParseTokenType
train
def _ParseTokenType(self, file_object, file_offset): """Parses a token type. Args: file_object (dfvfs.FileIO): file-like object. file_offset (int): offset of the token relative to the start of the file-like object. Returns: int: token type """ token_type_map = self._GetDataTypeMap('uint8') token_type, _ = self._ReadStructureFromFileObject( file_object, file_offset, token_type_map) return token_type
python
{ "resource": "" }
q25633
BSMParser.ParseFileObject
train
def ParseFileObject(self, parser_mediator, file_object): """Parses a BSM file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): a file-like object. Raises: UnableToParseFile: when the file cannot be parsed. """ file_offset = file_object.get_offset() file_size = file_object.get_size() while file_offset < file_size: try: self._ParseRecord(parser_mediator, file_object) except errors.ParseError as exception: if file_offset == 0: raise errors.UnableToParseFile( 'Unable to parse first event record with error: {0!s}'.format( exception)) # TODO: skip to next event record. file_offset = file_object.get_offset()
python
{ "resource": "" }
q25634
SymantecParser._GetTimeElementsTuple
train
def _GetTimeElementsTuple(self, timestamp): """Retrieves a time elements tuple from the timestamp. A Symantec log timestamp consist of six hexadecimal octets, that represent: First octet: Number of years since 1970 Second octet: Month, where January is represented by 0 Third octet: Day of the month Fourth octet: Number of hours Fifth octet: Number of minutes Sixth octet: Number of seconds For example, 200A13080122 represents November 19, 2002, 8:01:34 AM. Args: timestamp (str): hexadecimal encoded date and time values. Returns: tuple: containing: year (int): year. month (int): month, where 1 represents January. day_of_month (int): day of month, where 1 is the first day of the month. hours (int): hours. minutes (int): minutes. seconds (int): seconds. """ year, month, day_of_month, hours, minutes, seconds = ( int(hexdigit[0] + hexdigit[1], 16) for hexdigit in zip( timestamp[::2], timestamp[1::2])) return (year + 1970, month + 1, day_of_month, hours, minutes, seconds)
python
{ "resource": "" }
q25635
SpotlightVolumePlugin.GetEntries
train
def GetEntries(self, parser_mediator, match=None, **unused_kwargs): """Extracts relevant Volume Configuration Spotlight entries. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS. """ stores = match.get('Stores', {}) for volume_name, volume in iter(stores.items()): datetime_value = volume.get('CreationDate', None) if not datetime_value: continue partial_path = volume['PartialPath'] event_data = plist_event.PlistTimeEventData() event_data.desc = 'Spotlight Volume {0:s} ({1:s}) activated.'.format( volume_name, partial_path) event_data.key = '' event_data.root = '/Stores' event = time_events.PythonDatetimeEvent( datetime_value, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
python
{ "resource": "" }
q25636
TransmissionPlugin.GetEntries
train
def GetEntries(self, parser_mediator, data=None, **unused_kwargs): """Extract data from Transmission's resume folder files. This is the main parsing engine for the parser. It determines if the selected file is the proper file to parse and extracts current running torrents. Transmission stores an individual Bencoded file for each active download in a folder named resume under the user's application data folder. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. data (Optional[dict[str, object]]): bencode data values. """ seeding_time = data.get('seeding-time-seconds', None) event_data = TransmissionEventData() event_data.destination = data.get('destination', None) # Convert seconds to minutes. event_data.seedtime, _ = divmod(seeding_time, 60) # Create timeline events based on extracted values. timestamp = data.get('added-date', None) if timestamp: date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_ADDED) parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = data.get('done-date', None) if timestamp: date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_FILE_DOWNLOADED) parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = data.get('activity-date', None) if timestamp: date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_LAST_ACCESS) parser_mediator.ProduceEventWithEventData(event, event_data)
python
{ "resource": "" }
q25637
ExtractionTool._CreateProcessingConfiguration
train
def _CreateProcessingConfiguration(self, knowledge_base): """Creates a processing configuration. Args: knowledge_base (KnowledgeBase): contains information from the source data needed for parsing. Returns: ProcessingConfiguration: processing configuration. Raises: BadConfigOption: if more than 1 parser and parser plugins preset was found for the detected operating system. """ # TODO: pass preferred_encoding. configuration = configurations.ProcessingConfiguration() configuration.artifact_filters = self._artifact_filters configuration.credentials = self._credential_configurations configuration.debug_output = self._debug_mode configuration.event_extraction.text_prepend = self._text_prepend configuration.extraction.hasher_file_size_limit = ( self._hasher_file_size_limit) configuration.extraction.hasher_names_string = self._hasher_names_string configuration.extraction.process_archives = self._process_archives configuration.extraction.process_compressed_streams = ( self._process_compressed_streams) configuration.extraction.yara_rules_string = self._yara_rules_string configuration.filter_file = self._filter_file configuration.input_source.mount_path = self._mount_path configuration.log_filename = self._log_file configuration.parser_filter_expression = self._parser_filter_expression configuration.preferred_year = self._preferred_year configuration.profiling.directory = self._profiling_directory configuration.profiling.sample_rate = self._profiling_sample_rate configuration.profiling.profilers = self._profilers configuration.temporary_directory = self._temporary_directory if not configuration.parser_filter_expression: operating_system = knowledge_base.GetValue('operating_system') operating_system_product = knowledge_base.GetValue( 'operating_system_product') operating_system_version = knowledge_base.GetValue( 'operating_system_version') preset_definitions = ( parsers_manager.ParsersManager.GetPresetsForOperatingSystem( operating_system, operating_system_product, operating_system_version)) if preset_definitions: preset_names = [ preset_definition.name for preset_definition in preset_definitions] filter_expression = ','.join(preset_names) logger.info('Parser filter expression set to: {0:s}'.format( filter_expression)) configuration.parser_filter_expression = filter_expression return configuration
python
{ "resource": "" }
q25638
ExtractionTool._ParsePerformanceOptions
train
def _ParsePerformanceOptions(self, options): """Parses the performance options. Args: options (argparse.Namespace): command line arguments. Raises: BadConfigOption: if the options are invalid. """ self._buffer_size = getattr(options, 'buffer_size', 0) if self._buffer_size: # TODO: turn this into a generic function that supports more size # suffixes both MB and MiB and also that does not allow m as a valid # indicator for MiB since m represents milli not Mega. try: if self._buffer_size[-1].lower() == 'm': self._buffer_size = int(self._buffer_size[:-1], 10) self._buffer_size *= self._BYTES_IN_A_MIB else: self._buffer_size = int(self._buffer_size, 10) except ValueError: raise errors.BadConfigOption( 'Invalid buffer size: {0!s}.'.format(self._buffer_size)) self._queue_size = self.ParseNumericOption(options, 'queue_size')
python
{ "resource": "" }
q25639
ExtractionTool._ReadParserPresetsFromFile
train
def _ReadParserPresetsFromFile(self): """Reads the parser presets from the presets.yaml file. Raises: BadConfigOption: if the parser presets file cannot be read. """ self._presets_file = os.path.join( self._data_location, self._PRESETS_FILE_NAME) if not os.path.isfile(self._presets_file): raise errors.BadConfigOption( 'No such parser presets file: {0:s}.'.format(self._presets_file)) try: parsers_manager.ParsersManager.ReadPresetsFromFile(self._presets_file) except errors.MalformedPresetError as exception: raise errors.BadConfigOption( 'Unable to read presets from file with error: {0!s}'.format( exception))
python
{ "resource": "" }
q25640
ExtractionTool._SetExtractionParsersAndPlugins
train
def _SetExtractionParsersAndPlugins(self, configuration, session): """Sets the parsers and plugins before extraction. Args: configuration (ProcessingConfiguration): processing configuration. session (Session): session. """ names_generator = parsers_manager.ParsersManager.GetParserAndPluginNames( parser_filter_expression=configuration.parser_filter_expression) session.enabled_parser_names = list(names_generator) session.parser_filter_expression = configuration.parser_filter_expression
python
{ "resource": "" }
q25641
ExtractionTool._SetExtractionPreferredTimeZone
train
def _SetExtractionPreferredTimeZone(self, knowledge_base): """Sets the preferred time zone before extraction. Args: knowledge_base (KnowledgeBase): contains information from the source data needed for parsing. """ # Note session.preferred_time_zone will default to UTC but # self._preferred_time_zone is None when not set. if self._preferred_time_zone: try: knowledge_base.SetTimeZone(self._preferred_time_zone) except ValueError: # pylint: disable=protected-access logger.warning( 'Unsupported time zone: {0:s}, defaulting to {1:s}'.format( self._preferred_time_zone, knowledge_base._time_zone.zone))
python
{ "resource": "" }
q25642
ExtractionTool.AddPerformanceOptions
train
def AddPerformanceOptions(self, argument_group): """Adds the performance options to the argument group. Args: argument_group (argparse._ArgumentGroup): argparse argument group. """ argument_group.add_argument( '--buffer_size', '--buffer-size', '--bs', dest='buffer_size', action='store', default=0, help=( 'The buffer size for the output (defaults to 196MiB).')) argument_group.add_argument( '--queue_size', '--queue-size', dest='queue_size', action='store', default=0, help=( 'The maximum number of queued items per worker ' '(defaults to {0:d})').format(self._DEFAULT_QUEUE_SIZE))
python
{ "resource": "" }
q25643
ExtractionTool.AddProcessingOptions
train
def AddProcessingOptions(self, argument_group): """Adds the processing options to the argument group. Args: argument_group (argparse._ArgumentGroup): argparse argument group. """ argument_group.add_argument( '--single_process', '--single-process', dest='single_process', action='store_true', default=False, help=( 'Indicate that the tool should run in a single process.')) argument_helper_names = ['temporary_directory', 'workers', 'zeromq'] if self._CanEnforceProcessMemoryLimit(): argument_helper_names.append('process_resources') helpers_manager.ArgumentHelperManager.AddCommandLineArguments( argument_group, names=argument_helper_names)
python
{ "resource": "" }
q25644
ExtractionTool.ListParsersAndPlugins
train
def ListParsersAndPlugins(self): """Lists information about the available parsers and plugins.""" parsers_information = parsers_manager.ParsersManager.GetParsersInformation() table_view = views.ViewsFactory.GetTableView( self._views_format_type, column_names=['Name', 'Description'], title='Parsers') for name, description in sorted(parsers_information): table_view.AddRow([name, description]) table_view.Write(self._output_writer) parser_names = parsers_manager.ParsersManager.GetNamesOfParsersWithPlugins() for parser_name in parser_names: plugins_information = ( parsers_manager.ParsersManager.GetParserPluginsInformation( parser_filter_expression=parser_name)) table_title = 'Parser plugins: {0:s}'.format(parser_name) table_view = views.ViewsFactory.GetTableView( self._views_format_type, column_names=['Name', 'Description'], title=table_title) for name, description in sorted(plugins_information): table_view.AddRow([name, description]) table_view.Write(self._output_writer) title = 'Parser presets' if self._presets_file: source_path = os.path.dirname(os.path.dirname(os.path.dirname( os.path.abspath(__file__)))) presets_file = self._presets_file if presets_file.startswith(source_path): presets_file = presets_file[len(source_path) + 1:] title = '{0:s} ({1:s})'.format(title, presets_file) presets_information = parsers_manager.ParsersManager.GetPresetsInformation() table_view = views.ViewsFactory.GetTableView( self._views_format_type, column_names=['Name', 'Parsers and plugins'], title=title) for name, description in sorted(presets_information): table_view.AddRow([name, description]) table_view.Write(self._output_writer)
python
{ "resource": "" }
q25645
XChatLogParser._ParseHeader
train
def _ParseHeader(self, parser_mediator, structure): """Parses a log header. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. structure (pyparsing.ParseResults): structure of tokens derived from a line of a text file. """ _, month, day, hours, minutes, seconds, year = structure.date_time month = timelib.MONTH_DICT.get(month.lower(), 0) time_elements_tuple = (year, month, day, hours, minutes, seconds) try: date_time = dfdatetime_time_elements.TimeElements( time_elements_tuple=time_elements_tuple) date_time.is_local_time = True except ValueError: parser_mediator.ProduceExtractionWarning( 'invalid date time value: {0!s}'.format(structure.date_time)) return self._last_month = month event_data = XChatLogEventData() if structure.log_action[0] == 'BEGIN': self._xchat_year = year event_data.text = 'XChat start logging' elif structure.log_action[0] == 'END': self._xchat_year = None event_data.text = 'XChat end logging' else: logger.debug('Unknown log action: {0:s}.'.format( ' '.join(structure.log_action))) return event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_ADDED, time_zone=parser_mediator.timezone) parser_mediator.ProduceEventWithEventData(event, event_data)
python
{ "resource": "" }
q25646
XChatLogParser.VerifyStructure
train
def VerifyStructure(self, parser_mediator, line): """Verify that this file is a XChat log file. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. line (str): line from a text file. Returns: bool: True if the line is in the expected format, False if not. """ try: structure = self._HEADER.parseString(line) except pyparsing.ParseException: logger.debug('Not a XChat log file') return False _, month, day, hours, minutes, seconds, year = structure.date_time month = timelib.MONTH_DICT.get(month.lower(), 0) time_elements_tuple = (year, month, day, hours, minutes, seconds) try: dfdatetime_time_elements.TimeElements( time_elements_tuple=time_elements_tuple) except ValueError: logger.debug('Not a XChat log file, invalid date and time: {0!s}'.format( structure.date_time)) return False return True
python
{ "resource": "" }
q25647
CustomDestinationsParser._ParseLNKFile
train
def _ParseLNKFile( self, parser_mediator, file_entry, file_offset, remaining_file_size): """Parses a LNK file stored within the .customDestinations-ms file. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_entry (dfvfs.FileEntry): a file entry. file_offset (int): offset to the LNK file, relative to the start of the .customDestinations-ms file. remaining_file_size (int): size of the data remaining in the .customDestinations-ms file. Returns: int: size of the LNK file data or 0 if the LNK file could not be read. """ path_spec = path_spec_factory.Factory.NewPathSpec( definitions.TYPE_INDICATOR_DATA_RANGE, range_offset=file_offset, range_size=remaining_file_size, parent=file_entry.path_spec) display_name = '{0:s} # 0x{1:08x}'.format(file_entry.name, file_offset) try: lnk_file_object = resolver.Resolver.OpenFileObject(path_spec) except (dfvfs_errors.BackEndError, RuntimeError) as exception: message = ( 'unable to open LNK file: {0:s} with error: {1!s}').format( display_name, exception) parser_mediator.ProduceExtractionWarning(message) return 0 parser_mediator.AppendToParserChain(self._WINLNK_PARSER) try: lnk_file_object.seek(0, os.SEEK_SET) self._WINLNK_PARSER.ParseFileLNKFile( parser_mediator, lnk_file_object, display_name) finally: parser_mediator.PopFromParserChain() # We cannot trust the file size in the LNK data so we get the last offset # that was read instead. lnk_file_size = lnk_file_object.get_offset() lnk_file_object.close() return lnk_file_size
python
{ "resource": "" }
q25648
HangoutsMessagePlugin.ParseMessagesRow
train
def ParseMessagesRow(self, parser_mediator, query, row, **unused_kwargs): """Parses an Messages row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row. """ query_hash = hash(query) event_data = HangoutsMessageData() event_data.sender = self._GetRowValue(query_hash, row, 'full_name') event_data.body = self._GetRowValue(query_hash, row, 'text') event_data.offset = self._GetRowValue(query_hash, row, '_id') event_data.query = query event_data.message_status = self._GetRowValue(query_hash, row, 'status') event_data.message_type = self._GetRowValue(query_hash, row, 'type') timestamp = self._GetRowValue(query_hash, row, 'timestamp') date_time = dfdatetime_posix_time.PosixTimeInMicroseconds( timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data)
python
{ "resource": "" }
q25649
SharedElasticsearchOutputModule._Connect
train
def _Connect(self): """Connects to an Elasticsearch server.""" elastic_host = {'host': self._host, 'port': self._port} if self._url_prefix: elastic_host['url_prefix'] = self._url_prefix elastic_http_auth = None if self._username is not None: elastic_http_auth = (self._username, self._password) self._client = elasticsearch.Elasticsearch( [elastic_host], http_auth=elastic_http_auth, use_ssl=self._use_ssl, ca_certs=self._ca_certs ) logger.debug( ('Connected to Elasticsearch server: {0:s} port: {1:d}' 'URL prefix {2!s}.').format(self._host, self._port, self._url_prefix))
python
{ "resource": "" }
q25650
SharedElasticsearchOutputModule._CreateIndexIfNotExists
train
def _CreateIndexIfNotExists(self, index_name, mappings): """Creates an Elasticsearch index if it does not exist. Args: index_name (str): mame of the index. mappings (dict[str, object]): mappings of the index. Raises: RuntimeError: if the Elasticsearch index cannot be created. """ try: if not self._client.indices.exists(index_name): self._client.indices.create( body={'mappings': mappings}, index=index_name) except elasticsearch.exceptions.ConnectionError as exception: raise RuntimeError( 'Unable to create Elasticsearch index with error: {0!s}'.format( exception))
python
{ "resource": "" }
q25651
SharedElasticsearchOutputModule._FlushEvents
train
def _FlushEvents(self): """Inserts the buffered event documents into Elasticsearch.""" try: # pylint: disable=unexpected-keyword-arg # pylint does not recognizes request_timeout as a valid kwarg. According # to http://elasticsearch-py.readthedocs.io/en/master/api.html#timeout # it should be supported. self._client.bulk( body=self._event_documents, doc_type=self._document_type, index=self._index_name, request_timeout=self._DEFAULT_REQUEST_TIMEOUT) except ValueError as exception: # Ignore problematic events logger.warning('Unable to bulk insert with error: {0!s}'.format( exception)) logger.debug('Inserted {0:d} events into Elasticsearch'.format( self._number_of_buffered_events)) self._event_documents = [] self._number_of_buffered_events = 0
python
{ "resource": "" }
q25652
SharedElasticsearchOutputModule._GetSanitizedEventValues
train
def _GetSanitizedEventValues(self, event): """Sanitizes the event for use in Elasticsearch. The event values need to be sanitized to prevent certain values from causing problems when indexing with Elasticsearch. For example the path specification is a nested dictionary which will cause problems for Elasticsearch automatic indexing. Args: event (EventObject): event. Returns: dict[str, object]: sanitized event values. Raises: NoFormatterFound: if no event formatter can be found to match the data type in the event. """ event_values = {} for attribute_name, attribute_value in event.GetAttributes(): # Ignore the regvalue attribute as it cause issues when indexing. if attribute_name == 'regvalue': continue if attribute_name == 'pathspec': try: attribute_value = JsonPathSpecSerializer.WriteSerialized( attribute_value) except TypeError: continue event_values[attribute_name] = attribute_value # Add a string representation of the timestamp. try: attribute_value = timelib.Timestamp.RoundToSeconds(event.timestamp) except TypeError as exception: logger.warning(( 'Unable to round timestamp {0!s}. error: {1!s}. ' 'Defaulting to 0').format(event.timestamp, exception)) attribute_value = 0 attribute_value = timelib.Timestamp.CopyToIsoFormat( attribute_value, timezone=self._output_mediator.timezone) event_values['datetime'] = attribute_value message, _ = self._output_mediator.GetFormattedMessages(event) if message is None: data_type = getattr(event, 'data_type', 'UNKNOWN') raise errors.NoFormatterFound( 'Unable to find event formatter for: {0:s}.'.format(data_type)) event_values['message'] = message # Tags needs to be a list for Elasticsearch to index correctly. try: labels = list(event_values['tag'].labels) except (KeyError, AttributeError): labels = [] event_values['tag'] = labels source_short, source = self._output_mediator.GetFormattedSources(event) if source is None or source_short is None: data_type = getattr(event, 'data_type', 'UNKNOWN') raise errors.NoFormatterFound( 'Unable to find event formatter for: {0:s}.'.format(data_type)) event_values['source_short'] = source_short event_values['source_long'] = source return event_values
python
{ "resource": "" }
q25653
SharedElasticsearchOutputModule._InsertEvent
train
def _InsertEvent(self, event, force_flush=False): """Inserts an event. Events are buffered in the form of documents and inserted to Elasticsearch when either forced to flush or when the flush interval (threshold) has been reached. Args: event (EventObject): event. force_flush (bool): True if buffered event documents should be inserted into Elasticsearch. """ if event: event_document = {'index': { '_index': self._index_name, '_type': self._document_type}} event_values = self._GetSanitizedEventValues(event) self._event_documents.append(event_document) self._event_documents.append(event_values) self._number_of_buffered_events += 1 if force_flush or self._number_of_buffered_events > self._flush_interval: self._FlushEvents()
python
{ "resource": "" }
q25654
SharedElasticsearchOutputModule.SetDocumentType
train
def SetDocumentType(self, document_type): """Sets the document type. Args: document_type (str): document type. """ self._document_type = document_type logger.debug('Elasticsearch document type: {0:s}'.format(document_type))
python
{ "resource": "" }
q25655
SharedElasticsearchOutputModule.SetFlushInterval
train
def SetFlushInterval(self, flush_interval): """Set the flush interval. Args: flush_interval (int): number of events to buffer before doing a bulk insert. """ self._flush_interval = flush_interval logger.debug('Elasticsearch flush interval: {0:d}'.format(flush_interval))
python
{ "resource": "" }
q25656
SharedElasticsearchOutputModule.SetIndexName
train
def SetIndexName(self, index_name): """Set the index name. Args: index_name (str): name of the index. """ self._index_name = index_name logger.debug('Elasticsearch index name: {0:s}'.format(index_name))
python
{ "resource": "" }
q25657
SharedElasticsearchOutputModule.SetServerInformation
train
def SetServerInformation(self, server, port): """Set the server information. Args: server (str): IP address or hostname of the server. port (int): Port number of the server. """ self._host = server self._port = port logger.debug('Elasticsearch server: {0!s} port: {1:d}'.format( server, port))
python
{ "resource": "" }
q25658
SharedElasticsearchOutputModule.SetUsername
train
def SetUsername(self, username): """Sets the username. Args: username (str): username to authenticate with. """ self._username = username logger.debug('Elasticsearch username: {0!s}'.format(username))
python
{ "resource": "" }
q25659
SharedElasticsearchOutputModule.SetUseSSL
train
def SetUseSSL(self, use_ssl): """Sets the use of ssl. Args: use_ssl (bool): enforces use of ssl. """ self._use_ssl = use_ssl logger.debug('Elasticsearch use_ssl: {0!s}'.format(use_ssl))
python
{ "resource": "" }
q25660
SharedElasticsearchOutputModule.SetCACertificatesPath
train
def SetCACertificatesPath(self, ca_certificates_path): """Sets the path to the CA certificates. Args: ca_certificates_path (str): path to file containing a list of root certificates to trust. Raises: BadConfigOption: if the CA certificates file does not exist. """ if not ca_certificates_path: return if not os.path.exists(ca_certificates_path): raise errors.BadConfigOption( 'No such certificate file: {0:s}.'.format(ca_certificates_path)) self._ca_certs = ca_certificates_path logger.debug('Elasticsearch ca_certs: {0!s}'.format(ca_certificates_path))
python
{ "resource": "" }
q25661
LsQuarantinePlugin.ParseLSQuarantineRow
train
def ParseLSQuarantineRow( self, parser_mediator, query, row, **unused_kwargs): """Parses a launch services quarantine event row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row. """ query_hash = hash(query) event_data = LsQuarantineEventData() event_data.agent = self._GetRowValue(query_hash, row, 'Agent') event_data.data = self._GetRowValue(query_hash, row, 'Data') event_data.query = query event_data.url = self._GetRowValue(query_hash, row, 'URL') timestamp = self._GetRowValue(query_hash, row, 'Time') date_time = dfdatetime_cocoa_time.CocoaTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_FILE_DOWNLOADED) parser_mediator.ProduceEventWithEventData(event, event_data)
python
{ "resource": "" }
q25662
MacOSSecuritydLogParser.VerifyStructure
train
def VerifyStructure(self, parser_mediator, line): """Verify that this file is a securityd log file. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. line (str): line from a text file. Returns: bool: True if the line is in the expected format, False if not. """ self._last_month = 0 self._year_use = parser_mediator.GetEstimatedYear() try: structure = self.SECURITYD_LINE.parseString(line) except pyparsing.ParseException: logger.debug('Not a MacOS securityd log file') return False time_elements_tuple = self._GetTimeElementsTuple(structure) try: dfdatetime_time_elements.TimeElements( time_elements_tuple=time_elements_tuple) except ValueError: logger.debug( 'Not a MacOS securityd log file, invalid date and time: {0!s}'.format( structure.date_time)) return False self._last_month = time_elements_tuple[1] return True
python
{ "resource": "" }
q25663
FormatterMediator._GetWinevtRcDatabaseReader
train
def _GetWinevtRcDatabaseReader(self): """Opens the Windows Event Log resource database reader. Returns: WinevtResourcesSqlite3DatabaseReader: Windows Event Log resource database reader or None. """ if not self._winevt_database_reader and self._data_location: database_path = os.path.join( self._data_location, self._WINEVT_RC_DATABASE) if not os.path.isfile(database_path): return None self._winevt_database_reader = ( winevt_rc.WinevtResourcesSqlite3DatabaseReader()) if not self._winevt_database_reader.Open(database_path): self._winevt_database_reader = None return self._winevt_database_reader
python
{ "resource": "" }
q25664
FormatterMediator.GetWindowsEventMessage
train
def GetWindowsEventMessage(self, log_source, message_identifier): """Retrieves the message string for a specific Windows Event Log source. Args: log_source (str): Event Log source, such as "Application Error". message_identifier (int): message identifier. Returns: str: message string or None if not available. """ database_reader = self._GetWinevtRcDatabaseReader() if not database_reader: return None if self._lcid != self.DEFAULT_LCID: message_string = database_reader.GetMessage( log_source, self.lcid, message_identifier) if message_string: return message_string return database_reader.GetMessage( log_source, self.DEFAULT_LCID, message_identifier)
python
{ "resource": "" }
q25665
FormatterMediator.SetPreferredLanguageIdentifier
train
def SetPreferredLanguageIdentifier(self, language_identifier): """Sets the preferred language identifier. Args: language_identifier (str): language identifier string such as "en-US" for US English or "is-IS" for Icelandic. Raises: KeyError: if the language identifier is not defined. ValueError: if the language identifier is not a string type. """ if not isinstance(language_identifier, py2to3.STRING_TYPES): raise ValueError('Language identifier is not a string.') values = language_ids.LANGUAGE_IDENTIFIERS.get( language_identifier.lower(), None) if not values: raise KeyError('Language identifier: {0:s} is not defined.'.format( language_identifier)) self._language_identifier = language_identifier self._lcid = values[0]
python
{ "resource": "" }
q25666
ArgumentHelperManager.AddCommandLineArguments
train
def AddCommandLineArguments( cls, argument_group, category=None, names=None): """Adds command line arguments to a configuration object. Args: argument_group (argparse._ArgumentGroup|argparse.ArgumentParser): argparse group. category (Optional[str]): category of helpers to apply to the group, such as storage, output, where None will apply the arguments to all helpers. The category can be used to add arguments to a specific group of registered helpers. names (Optional[list[str]]): names of argument helpers to apply, where None will apply the arguments to all helpers. """ # Process the helper classes in alphabetical order this is needed to # keep the argument order consistent. for helper_name, helper_class in sorted(cls._helper_classes.items()): if ((category and helper_class.CATEGORY != category) or (names and helper_name not in names)): continue helper_class.AddArguments(argument_group)
python
{ "resource": "" }
q25667
ArgumentHelperManager.DeregisterHelper
train
def DeregisterHelper(cls, helper_class): """Deregisters a helper class. The helper classes are identified based on their lower case name. Args: helper_class (type): class object of the argument helper. Raises: KeyError: if helper class is not set for the corresponding name. """ helper_name = helper_class.NAME.lower() if helper_name not in cls._helper_classes: raise KeyError('Helper class not set for name: {0:s}.'.format( helper_class.NAME)) del cls._helper_classes[helper_name]
python
{ "resource": "" }
q25668
ArgumentHelperManager.ParseOptions
train
def ParseOptions(cls, options, config_object, category=None, names=None): """Parses and validates arguments using the appropriate helpers. Args: options (argparse.Namespace): parser options. config_object (object): object to be configured by an argument helper. category (Optional[str]): category of helpers to apply to the group, such as storage, output, where None will apply the arguments to all helpers. The category can be used to add arguments to a specific group of registered helpers. names (Optional[list[str]]): names of argument helpers to apply, where None will apply the arguments to all helpers. """ for helper_name, helper_class in cls._helper_classes.items(): if ((category and helper_class.CATEGORY != category) or (names and helper_name not in names)): continue try: helper_class.ParseOptions(options, config_object) except errors.BadConfigObject: pass
python
{ "resource": "" }
q25669
ViperAnalyzer._QueryHash
train
def _QueryHash(self, digest): """Queries the Viper Server for a specfic hash. Args: digest (str): hash to look up. Returns: dict[str, object]: JSON response or None on error. """ if not self._url: self._url = '{0:s}://{1:s}:{2:d}/file/find'.format( self._protocol, self._host, self._port) request_data = {self.lookup_hash: digest} try: json_response = self.MakeRequestAndDecodeJSON( self._url, 'POST', data=request_data) except errors.ConnectionError as exception: json_response = None logger.error('Unable to query Viper with error: {0!s}.'.format( exception)) return json_response
python
{ "resource": "" }
q25670
ViperAnalyzer.Analyze
train
def Analyze(self, hashes): """Looks up hashes in Viper using the Viper HTTP API. Args: hashes (list[str]): hashes to look up. Returns: list[HashAnalysis]: hash analysis. Raises: RuntimeError: If no host has been set for Viper. """ hash_analyses = [] for digest in hashes: json_response = self._QueryHash(digest) hash_analysis = interface.HashAnalysis(digest, json_response) hash_analyses.append(hash_analysis) return hash_analyses
python
{ "resource": "" }
q25671
KeychainParser._ReadAttributeValueDateTime
train
def _ReadAttributeValueDateTime( self, attribute_values_data, record_offset, attribute_values_data_offset, attribute_value_offset): """Reads a date time attribute value. Args: attribute_values_data (bytes): attribute values data. record_offset (int): offset of the record relative to the start of the file. attribute_values_data_offset (int): offset of the attribute values data relative to the start of the record. attribute_value_offset (int): offset of the attribute relative to the start of the record. Returns: str: date and time values. Raises: ParseError: if the attribute value cannot be read. """ if attribute_value_offset == 0: return None data_type_map = self._GetDataTypeMap('keychain_date_time') file_offset = ( record_offset + attribute_values_data_offset + attribute_value_offset) attribute_value_offset -= attribute_values_data_offset + 1 attribute_value_data = attribute_values_data[attribute_value_offset:] try: date_time_attribute_value = self._ReadStructureFromByteStream( attribute_value_data, file_offset, data_type_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError(( 'Unable to map date time attribute value data at offset: 0x{0:08x} ' 'with error: {1!s}').format(file_offset, exception)) return date_time_attribute_value.date_time.rstrip('\x00')
python
{ "resource": "" }
q25672
KeychainParser._ReadAttributeValueInteger
train
def _ReadAttributeValueInteger( self, attribute_values_data, record_offset, attribute_values_data_offset, attribute_value_offset): """Reads an integer attribute value. Args: attribute_values_data (bytes): attribute values data. record_offset (int): offset of the record relative to the start of the file. attribute_values_data_offset (int): offset of the attribute values data relative to the start of the record. attribute_value_offset (int): offset of the attribute relative to the start of the record. Returns: int: integer value or None if attribute value offset is not set. Raises: ParseError: if the attribute value cannot be read. """ if attribute_value_offset == 0: return None data_type_map = self._GetDataTypeMap('uint32be') file_offset = ( record_offset + attribute_values_data_offset + attribute_value_offset) attribute_value_offset -= attribute_values_data_offset + 1 attribute_value_data = attribute_values_data[attribute_value_offset:] try: return self._ReadStructureFromByteStream( attribute_value_data, file_offset, data_type_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError(( 'Unable to map integer attribute value data at offset: 0x{0:08x} ' 'with error: {1!s}').format(file_offset, exception))
python
{ "resource": "" }
q25673
KeychainParser._ReadAttributeValueString
train
def _ReadAttributeValueString( self, attribute_values_data, record_offset, attribute_values_data_offset, attribute_value_offset): """Reads a string attribute value. Args: attribute_values_data (bytes): attribute values data. record_offset (int): offset of the record relative to the start of the file. attribute_values_data_offset (int): offset of the attribute values data relative to the start of the record. attribute_value_offset (int): offset of the attribute relative to the start of the record. Returns: str: string value or None if attribute value offset is not set. Raises: ParseError: if the attribute value cannot be read. """ if attribute_value_offset == 0: return None data_type_map = self._GetDataTypeMap('keychain_string') file_offset = ( record_offset + attribute_values_data_offset + attribute_value_offset) attribute_value_offset -= attribute_values_data_offset + 1 attribute_value_data = attribute_values_data[attribute_value_offset:] try: string_attribute_value = self._ReadStructureFromByteStream( attribute_value_data, file_offset, data_type_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError(( 'Unable to map string attribute value data at offset: 0x{0:08x} ' 'with error: {1!s}').format(file_offset, exception)) return string_attribute_value.string
python
{ "resource": "" }
q25674
KeychainParser._ReadFileHeader
train
def _ReadFileHeader(self, file_object): """Reads the file header. Args: file_object (file): file-like object. Returns: keychain_file_header: file header. Raises: ParseError: if the file header cannot be read. """ data_type_map = self._GetDataTypeMap('keychain_file_header') file_header, _ = self._ReadStructureFromFileObject( file_object, 0, data_type_map) if file_header.signature != self._FILE_SIGNATURE: raise errors.ParseError('Unsupported file signature.') if (file_header.major_format_version != self._MAJOR_VERSION or file_header.minor_format_version != self._MINOR_VERSION): raise errors.ParseError('Unsupported format version: {0:s}.{1:s}'.format( file_header.major_format_version, file_header.minor_format_version)) return file_header
python
{ "resource": "" }
q25675
KeychainParser._ReadRecord
train
def _ReadRecord(self, tables, file_object, record_offset, record_type): """Reads the record. Args: tables (dict[int, KeychainDatabaseTable]): tables per identifier. file_object (file): file-like object. record_offset (int): offset of the record relative to the start of the file. record_type (int): record type, which should correspond to a relation identifier of a table defined in the schema. Raises: ParseError: if the record cannot be read. """ table = tables.get(record_type, None) if not table: raise errors.ParseError( 'Missing table for relation identifier: 0x{0:08}'.format(record_type)) record_header = self._ReadRecordHeader(file_object, record_offset) record = collections.OrderedDict() if table.columns: attribute_value_offsets = self._ReadRecordAttributeValueOffset( file_object, record_offset + 24, len(table.columns)) file_offset = file_object.tell() record_data_offset = file_offset - record_offset record_data_size = record_header.data_size - (file_offset - record_offset) record_data = file_object.read(record_data_size) if record_header.key_data_size > 0: record['_key_'] = record_data[:record_header.key_data_size] if table.columns: for index, column in enumerate(table.columns): attribute_data_read_function = self._ATTRIBUTE_DATA_READ_FUNCTIONS.get( column.attribute_data_type, None) if attribute_data_read_function: attribute_data_read_function = getattr( self, attribute_data_read_function, None) if not attribute_data_read_function: attribute_value = None else: attribute_value = attribute_data_read_function( record_data, record_offset, record_data_offset, attribute_value_offsets[index]) record[column.attribute_name] = attribute_value table.records.append(record)
python
{ "resource": "" }
q25676
KeychainParser._ReadRecordAttributeValueOffset
train
def _ReadRecordAttributeValueOffset( self, file_object, file_offset, number_of_attribute_values): """Reads the record attribute value offsets. Args: file_object (file): file-like object. file_offset (int): offset of the record attribute values offsets relative to the start of the file. number_of_attribute_values (int): number of attribute values. Returns: keychain_record_attribute_value_offsets: record attribute value offsets. Raises: ParseError: if the record attribute value offsets cannot be read. """ offsets_data_size = number_of_attribute_values * 4 offsets_data = file_object.read(offsets_data_size) context = dtfabric_data_maps.DataTypeMapContext(values={ 'number_of_attribute_values': number_of_attribute_values}) data_type_map = self._GetDataTypeMap( 'keychain_record_attribute_value_offsets') try: attribute_value_offsets = self._ReadStructureFromByteStream( offsets_data, file_offset, data_type_map, context=context) except (ValueError, errors.ParseError) as exception: raise errors.ParseError(( 'Unable to map record attribute value offsets data at offset: ' '0x{0:08x} with error: {1!s}').format(file_offset, exception)) return attribute_value_offsets
python
{ "resource": "" }
q25677
KeychainParser._ReadRecordHeader
train
def _ReadRecordHeader(self, file_object, record_header_offset): """Reads the record header. Args: file_object (file): file-like object. record_header_offset (int): offset of the record header relative to the start of the file. Returns: keychain_record_header: record header. Raises: ParseError: if the record header cannot be read. """ data_type_map = self._GetDataTypeMap('keychain_record_header') record_header, _ = self._ReadStructureFromFileObject( file_object, record_header_offset, data_type_map) return record_header
python
{ "resource": "" }
q25678
KeychainParser._ReadTable
train
def _ReadTable(self, tables, file_object, table_offset): """Reads the table. Args: tables (dict[int, KeychainDatabaseTable]): tables per identifier. file_object (file): file-like object. table_offset (int): offset of the table relative to the start of the file. Raises: ParseError: if the table cannot be read. """ table_header = self._ReadTableHeader(file_object, table_offset) for record_offset in table_header.record_offsets: if record_offset == 0: continue record_offset += table_offset if table_header.record_type == self._RECORD_TYPE_CSSM_DL_DB_SCHEMA_INFO: self._ReadRecordSchemaInformation(tables, file_object, record_offset) elif table_header.record_type == ( self._RECORD_TYPE_CSSM_DL_DB_SCHEMA_INDEXES): self._ReadRecordSchemaIndexes(tables, file_object, record_offset) elif table_header.record_type == ( self._RECORD_TYPE_CSSM_DL_DB_SCHEMA_ATTRIBUTES): self._ReadRecordSchemaAttributes(tables, file_object, record_offset) else: self._ReadRecord( tables, file_object, record_offset, table_header.record_type)
python
{ "resource": "" }
q25679
KeychainParser._ReadTableHeader
train
def _ReadTableHeader(self, file_object, table_header_offset): """Reads the table header. Args: file_object (file): file-like object. table_header_offset (int): offset of the tables header relative to the start of the file. Returns: keychain_table_header: table header. Raises: ParseError: if the table header cannot be read. """ data_type_map = self._GetDataTypeMap('keychain_table_header') table_header, _ = self._ReadStructureFromFileObject( file_object, table_header_offset, data_type_map) return table_header
python
{ "resource": "" }
q25680
KeychainParser._ReadTablesArray
train
def _ReadTablesArray(self, file_object, tables_array_offset): """Reads the tables array. Args: file_object (file): file-like object. tables_array_offset (int): offset of the tables array relative to the start of the file. Returns: dict[int, KeychainDatabaseTable]: tables per identifier. Raises: ParseError: if the tables array cannot be read. """ # TODO: implement https://github.com/libyal/dtfabric/issues/12 and update # keychain_tables_array definition. data_type_map = self._GetDataTypeMap('keychain_tables_array') tables_array, _ = self._ReadStructureFromFileObject( file_object, tables_array_offset, data_type_map) tables = collections.OrderedDict() for table_offset in tables_array.table_offsets: self._ReadTable(tables, file_object, tables_array_offset + table_offset) return tables
python
{ "resource": "" }
q25681
KeychainParser._ParseDateTimeValue
train
def _ParseDateTimeValue(self, parser_mediator, date_time_value): """Parses a date time value. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. date_time_value (str): date time value (CSSM_DB_ATTRIBUTE_FORMAT_TIME_DATE) in the format: "YYYYMMDDhhmmssZ". Returns: dfdatetime.TimeElements: date and time extracted from the value or None if the value does not represent a valid string. """ if date_time_value[14] != 'Z': parser_mediator.ProduceExtractionWarning( 'invalid date and time value: {0!s}'.format(date_time_value)) return None try: year = int(date_time_value[0:4], 10) month = int(date_time_value[4:6], 10) day_of_month = int(date_time_value[6:8], 10) hours = int(date_time_value[8:10], 10) minutes = int(date_time_value[10:12], 10) seconds = int(date_time_value[12:14], 10) except (TypeError, ValueError): parser_mediator.ProduceExtractionWarning( 'invalid date and time value: {0!s}'.format(date_time_value)) return None time_elements_tuple = (year, month, day_of_month, hours, minutes, seconds) try: return dfdatetime_time_elements.TimeElements( time_elements_tuple=time_elements_tuple) except ValueError: parser_mediator.ProduceExtractionWarning( 'invalid date and time value: {0!s}'.format(date_time_value)) return None
python
{ "resource": "" }
q25682
KeychainParser._ParseBinaryDataAsString
train
def _ParseBinaryDataAsString(self, parser_mediator, binary_data_value): """Parses a binary data value as string Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. binary_data_value (bytes): binary data value (CSSM_DB_ATTRIBUTE_FORMAT_BLOB) Returns: str: binary data value formatted as a string or None if no string could be extracted or binary data value is None (NULL). """ if not binary_data_value: return None try: return binary_data_value.decode('utf-8') except UnicodeDecodeError: parser_mediator.ProduceExtractionWarning( 'invalid binary data string value: {0:s}'.format( repr(binary_data_value))) return None
python
{ "resource": "" }
q25683
KeychainParser._ParseApplicationPasswordRecord
train
def _ParseApplicationPasswordRecord(self, parser_mediator, record): """Extracts the information from an application password record. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. record (dict[str, object]): database record. Raises: ParseError: if Internet password record cannot be parsed. """ key = record.get('_key_', None) if not key or not key.startswith(b'ssgp'): raise errors.ParseError(( 'Unsupported application password record key value does not start ' 'with: "ssgp".')) event_data = KeychainApplicationRecordEventData() event_data.account_name = self._ParseBinaryDataAsString( parser_mediator, record['acct']) event_data.comments = self._ParseBinaryDataAsString( parser_mediator, record['crtr']) event_data.entry_name = self._ParseBinaryDataAsString( parser_mediator, record['PrintName']) ssgp_hash = codecs.encode(key[4:], 'hex') event_data.ssgp_hash = codecs.decode(ssgp_hash, 'utf-8') event_data.text_description = self._ParseBinaryDataAsString( parser_mediator, record['desc']) date_time = self._ParseDateTimeValue(parser_mediator, record['cdat']) if date_time: event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data) date_time = self._ParseDateTimeValue(parser_mediator, record['mdat']) if date_time: event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_MODIFICATION) parser_mediator.ProduceEventWithEventData(event, event_data)
python
{ "resource": "" }
q25684
KeychainParser._ParseInternetPasswordRecord
train
def _ParseInternetPasswordRecord(self, parser_mediator, record): """Extracts the information from an Internet password record. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. record (dict[str, object]): database record. Raises: ParseError: if Internet password record cannot be parsed. """ key = record.get('_key_', None) if not key or not key.startswith(b'ssgp'): raise errors.ParseError(( 'Unsupported Internet password record key value does not start ' 'with: "ssgp".')) protocol_string = codecs.decode('{0:08x}'.format(record['ptcl']), 'hex') protocol_string = codecs.decode(protocol_string, 'utf-8') event_data = KeychainInternetRecordEventData() event_data.account_name = self._ParseBinaryDataAsString( parser_mediator, record['acct']) event_data.comments = self._ParseBinaryDataAsString( parser_mediator, record['crtr']) event_data.entry_name = self._ParseBinaryDataAsString( parser_mediator, record['PrintName']) event_data.protocol = self._PROTOCOL_TRANSLATION_DICT.get( protocol_string, protocol_string) ssgp_hash = codecs.encode(key[4:], 'hex') event_data.ssgp_hash = codecs.decode(ssgp_hash, 'utf-8') event_data.text_description = self._ParseBinaryDataAsString( parser_mediator, record['desc']) event_data.type_protocol = self._ParseBinaryDataAsString( parser_mediator, record['atyp']) event_data.where = self._ParseBinaryDataAsString( parser_mediator, record['srvr']) date_time = self._ParseDateTimeValue(parser_mediator, record['cdat']) if date_time: event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data) date_time = self._ParseDateTimeValue(parser_mediator, record['mdat']) if date_time: event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_MODIFICATION) parser_mediator.ProduceEventWithEventData(event, event_data)
python
{ "resource": "" }
q25685
KeychainParser.ParseFileObject
train
def ParseFileObject(self, parser_mediator, file_object): """Parses a MacOS keychain file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): a file-like object. Raises: UnableToParseFile: when the file cannot be parsed. """ try: file_header = self._ReadFileHeader(file_object) except (ValueError, errors.ParseError): raise errors.UnableToParseFile('Unable to parse file header.') tables = self._ReadTablesArray(file_object, file_header.tables_array_offset) table = tables.get(self._RECORD_TYPE_APPLICATION_PASSWORD, None) if table: for record in table.records: self._ParseApplicationPasswordRecord(parser_mediator, record) table = tables.get(self._RECORD_TYPE_INTERNET_PASSWORD, None) if table: for record in table.records: self._ParseInternetPasswordRecord(parser_mediator, record)
python
{ "resource": "" }
q25686
Shared4n6TimeOutputModule._FormatDateTime
train
def _FormatDateTime(self, event): """Formats the date and time. Args: event (EventObject): event. Returns: str: date and time string or "N/A" if no event timestamp is available. """ if not event.timestamp: return 'N/A' # TODO: preserve dfdatetime as an object. # TODO: add support for self._output_mediator.timezone date_time = dfdatetime_posix_time.PosixTimeInMicroseconds( timestamp=event.timestamp) year, month, day_of_month = date_time.GetDate() hours, minutes, seconds = date_time.GetTimeOfDay() try: return '{0:04d}-{1:02d}-{2:02d} {3:02d}:{4:02d}:{5:02d}'.format( year, month, day_of_month, hours, minutes, seconds) except (TypeError, ValueError): self._ReportEventError(event, ( 'unable to copy timestamp: {0!s} to a human readable date and ' 'time. Defaulting to: "0000-00-00 00:00:00"').format(event.timestamp)) return '0000-00-00 00:00:00'
python
{ "resource": "" }
q25687
SQLite4n6TimeOutputModule._GetDistinctValues
train
def _GetDistinctValues(self, field_name): """Query database for unique field types. Args: field_name (str): name of the filed to retrieve. Returns: dict[str, int]: counts of field types by name. """ self._cursor.execute( 'SELECT {0:s}, COUNT({0:s}) FROM log2timeline GROUP BY {0:s}'.format( field_name)) result = {} row = self._cursor.fetchone() while row: if row[0]: result[row[0]] = row[1] row = self._cursor.fetchone() return result
python
{ "resource": "" }
q25688
SQLite4n6TimeOutputModule._ListTags
train
def _ListTags(self): """Query database for unique tag types.""" all_tags = [] self._cursor.execute('SELECT DISTINCT tag FROM log2timeline') # This cleans up the messy SQL return. tag_row = self._cursor.fetchone() while tag_row: tag_string = tag_row[0] if tag_string: tags = tag_string.split(',') for tag in tags: if tag not in all_tags: all_tags.append(tag) tag_row = self._cursor.fetchone() # TODO: make this method an iterator. return all_tags
python
{ "resource": "" }
q25689
SQLite4n6TimeOutputModule.Close
train
def Close(self): """Disconnects from the database. This method will create the necessary indices and commit outstanding transactions before disconnecting. """ # Build up indices for the fields specified in the args. # It will commit the inserts automatically before creating index. if not self._append: for field_name in self._fields: query = 'CREATE INDEX {0:s}_idx ON log2timeline ({0:s})'.format( field_name) self._cursor.execute(query) if self._set_status: self._set_status('Created index: {0:s}'.format(field_name)) # Get meta info and save into their tables. if self._set_status: self._set_status('Creating metadata...') for field in self._META_FIELDS: values = self._GetDistinctValues(field) self._cursor.execute('DELETE FROM l2t_{0:s}s'.format(field)) for name, frequency in iter(values.items()): self._cursor.execute(( 'INSERT INTO l2t_{0:s}s ({0:s}s, frequency) ' 'VALUES("{1:s}", {2:d}) ').format(field, name, frequency)) self._cursor.execute('DELETE FROM l2t_tags') for tag in self._ListTags(): self._cursor.execute('INSERT INTO l2t_tags (tag) VALUES (?)', [tag]) if self._set_status: self._set_status('Database created.') self._connection.commit() self._cursor.close() self._connection.close() self._cursor = None self._connection = None
python
{ "resource": "" }
q25690
FilterFile.BuildFindSpecs
train
def BuildFindSpecs(self, environment_variables=None): """Build find specification from a filter file. Args: environment_variables (Optional[list[EnvironmentVariableArtifact]]): environment variables. Returns: list[dfvfs.FindSpec]: find specification. """ path_attributes = {} if environment_variables: for environment_variable in environment_variables: attribute_name = environment_variable.name.lower() attribute_value = environment_variable.value if not isinstance(attribute_value, py2to3.STRING_TYPES): continue # Remove the drive letter. if len(attribute_value) > 2 and attribute_value[1] == ':': _, _, attribute_value = attribute_value.rpartition(':') if attribute_value.startswith('\\'): attribute_value = attribute_value.replace('\\', '/') path_attributes[attribute_name] = attribute_value find_specs = [] with open(self._path, 'r') as file_object: for line in file_object: line = line.strip() if line.startswith('#'): continue if path_attributes: try: line = line.format(**path_attributes) except KeyError as exception: logger.error(( 'Unable to expand path filter: {0:s} with error: ' '{1!s}').format(line, exception)) continue if not line.startswith('/'): logger.warning(( 'The path filter must be defined as an absolute path: ' '{0:s}').format(line)) continue # Convert the path filters into a list of path segments and strip # the root path segment. path_segments = line.split('/') path_segments.pop(0) if not path_segments[-1]: logger.warning( 'Empty last path segment in path filter: {0:s}'.format(line)) continue find_spec = file_system_searcher.FindSpec( location_regex=path_segments, case_sensitive=False) find_specs.append(find_spec) return find_specs
python
{ "resource": "" }
q25691
ParsersManager._GetParserFilters
train
def _GetParserFilters(cls, parser_filter_expression): """Retrieves the parsers and plugins to include and exclude. Takes a comma separated string and splits it up into two dictionaries, of parsers and plugins to include and to exclude from selection. If a particular filter is prepended with an exclamation point it will be added to the exclude section, otherwise in the include. Args: parser_filter_expression (str): parser filter expression, where None represents all parsers and plugins. Returns: tuple: containing: * dict[str, BaseParser]: included parsers and plugins by name. * dict[str, BaseParser]: excluded parsers and plugins by name. """ if not parser_filter_expression: return {}, {} includes = {} excludes = {} preset_names = cls._presets.GetNames() for parser_filter in parser_filter_expression.split(','): parser_filter = parser_filter.strip() if not parser_filter: continue if parser_filter.startswith('!'): parser_filter = parser_filter[1:] active_dict = excludes else: active_dict = includes parser_filter = parser_filter.lower() if parser_filter in preset_names: for parser_in_category in cls._GetParsersFromPresetCategory( parser_filter): parser, _, plugin = parser_in_category.partition('/') active_dict.setdefault(parser, []) if plugin: active_dict[parser].append(plugin) else: parser, _, plugin = parser_filter.partition('/') active_dict.setdefault(parser, []) if plugin: active_dict[parser].append(plugin) cls._ReduceParserFilters(includes, excludes) return includes, excludes
python
{ "resource": "" }
q25692
ParsersManager._GetParsersFromPresetCategory
train
def _GetParsersFromPresetCategory(cls, category): """Retrieves the parser names of specific preset category. Args: category (str): parser preset categories. Returns: list[str]: parser names in alphabetical order. """ preset_definition = cls._presets.GetPresetByName(category) if preset_definition is None: return [] preset_names = cls._presets.GetNames() parser_names = set() for element_name in preset_definition.parsers: if element_name in preset_names: category_parser_names = cls._GetParsersFromPresetCategory(element_name) parser_names.update(category_parser_names) else: parser_names.add(element_name) return sorted(parser_names)
python
{ "resource": "" }
q25693
ParsersManager._ReduceParserFilters
train
def _ReduceParserFilters(cls, includes, excludes): """Reduces the parsers and plugins to include and exclude. If an intersection is found, the parser or plugin is removed from the inclusion set. If a parser is not in inclusion set there is no need to have it in the exclusion set. Args: includes (dict[str, BaseParser]): included parsers and plugins by name. excludes (dict[str, BaseParser]): excluded parsers and plugins by name. """ if not includes or not excludes: return for parser_name in set(includes).intersection(excludes): # Check parser and plugin list for exact equivalence. if includes[parser_name] == excludes[parser_name]: logger.warning( 'Parser {0:s} was in both the inclusion and exclusion lists. ' 'Ignoring included parser.'.format(parser_name)) includes.pop(parser_name) continue # Remove plugins that defined are in both inclusion and exclusion lists. plugin_includes = includes[parser_name] plugin_excludes = excludes[parser_name] intersection = set(plugin_includes).intersection(plugin_excludes) if not intersection: continue logger.warning( 'Parser {0:s} plugins: {1:s} in both the inclusion and exclusion ' 'lists. Ignoring included plugins.'.format( parser_name, ', '.join(intersection))) plugins_list = list(set(plugin_includes).difference(intersection)) includes[parser_name] = plugins_list # Remove excluded parsers that do not run. parsers_to_pop = [] for parser_name in excludes: if parser_name in includes: continue logger.warning( 'The excluded parser: {0:s} is not associated with the included ' 'parsers: {1:s}. Ignoring excluded parser.'.format( parser_name, ', '.join(includes.keys()))) parsers_to_pop.append(parser_name) for parser_name in parsers_to_pop: excludes.pop(parser_name)
python
{ "resource": "" }
q25694
ParsersManager.CreateSignatureScanner
train
def CreateSignatureScanner(cls, specification_store): """Creates a signature scanner for format specifications with signatures. Args: specification_store (FormatSpecificationStore): format specifications with signatures. Returns: pysigscan.scanner: signature scanner. """ scanner_object = pysigscan.scanner() for format_specification in specification_store.specifications: for signature in format_specification.signatures: pattern_offset = signature.offset if pattern_offset is None: signature_flags = pysigscan.signature_flags.NO_OFFSET elif pattern_offset < 0: pattern_offset *= -1 signature_flags = pysigscan.signature_flags.RELATIVE_FROM_END else: signature_flags = pysigscan.signature_flags.RELATIVE_FROM_START scanner_object.add_signature( signature.identifier, pattern_offset, signature.pattern, signature_flags) return scanner_object
python
{ "resource": "" }
q25695
ParsersManager.GetFormatsWithSignatures
train
def GetFormatsWithSignatures(cls, parser_filter_expression=None): """Retrieves the format specifications that have signatures. This method will create a specification store for parsers that define a format specification with signatures and a list of parser names for those that do not. Args: parser_filter_expression (Optional[str]): parser filter expression, where None represents all parsers and plugins. Returns: tuple: containing: * FormatSpecificationStore: format specifications with signatures. * list[str]: names of parsers that do not have format specifications with signatures, or have signatures but also need to be applied 'brute force'. """ specification_store = specification.FormatSpecificationStore() remainder_list = [] for parser_name, parser_class in cls.GetParsers( parser_filter_expression=parser_filter_expression): format_specification = parser_class.GetFormatSpecification() if format_specification and format_specification.signatures: specification_store.AddSpecification(format_specification) # The plist parser is a special case, where it both defines a signature # and also needs to be applied 'brute-force' to non-matching files, # as the signature matches binary plists, but not XML or JSON plists. if parser_name == 'plist': remainder_list.append(parser_name) else: remainder_list.append(parser_name) return specification_store, remainder_list
python
{ "resource": "" }
q25696
ParsersManager.GetNamesOfParsersWithPlugins
train
def GetNamesOfParsersWithPlugins(cls): """Retrieves the names of all parsers with plugins. Returns: list[str]: names of all parsers with plugins. """ parser_names = [] for parser_name, parser_class in cls.GetParsers(): if parser_class.SupportsPlugins(): parser_names.append(parser_name) return sorted(parser_names)
python
{ "resource": "" }
q25697
ParsersManager.GetParserAndPluginNames
train
def GetParserAndPluginNames(cls, parser_filter_expression=None): """Retrieves the parser and parser plugin names. Args: parser_filter_expression (Optional[str]): parser filter expression, where None represents all parsers and plugins. Returns: list[str]: parser and parser plugin names. """ parser_and_plugin_names = [] for parser_name, parser_class in cls.GetParsers( parser_filter_expression=parser_filter_expression): parser_and_plugin_names.append(parser_name) if parser_class.SupportsPlugins(): for plugin_name, _ in parser_class.GetPlugins(): parser_and_plugin_names.append( '{0:s}/{1:s}'.format(parser_name, plugin_name)) return parser_and_plugin_names
python
{ "resource": "" }
q25698
ParsersManager.GetParserPluginsInformation
train
def GetParserPluginsInformation(cls, parser_filter_expression=None): """Retrieves the parser plugins information. Args: parser_filter_expression (Optional[str]): parser filter expression, where None represents all parsers and plugins. Returns: list[tuple[str, str]]: pairs of parser plugin names and descriptions. """ parser_plugins_information = [] for _, parser_class in cls.GetParsers( parser_filter_expression=parser_filter_expression): if parser_class.SupportsPlugins(): for plugin_name, plugin_class in parser_class.GetPlugins(): description = getattr(plugin_class, 'DESCRIPTION', '') parser_plugins_information.append((plugin_name, description)) return parser_plugins_information
python
{ "resource": "" }
q25699
ParsersManager.GetParserObjectByName
train
def GetParserObjectByName(cls, parser_name): """Retrieves a specific parser object by its name. Args: parser_name (str): name of the parser. Returns: BaseParser: parser object or None. """ parser_class = cls._parser_classes.get(parser_name, None) if parser_class: return parser_class() return None
python
{ "resource": "" }