_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q25900
OutputManager.GetOutputClass
train
def GetOutputClass(cls, name): """Retrieves the output class for a specific name. Args: name (str): name of the output module. Returns: type: output module class. Raises: KeyError: if there is no output class found with the supplied name. ValueError: if name is not a string. """ if not isinstance(name, py2to3.STRING_TYPES): raise ValueError('Name attribute is not a string.') name = name.lower() if name not in cls._output_classes: raise KeyError( 'Name: [{0:s}] not registered as an output module.'.format(name)) return cls._output_classes[name]
python
{ "resource": "" }
q25901
OutputManager.GetOutputClasses
train
def GetOutputClasses(cls): """Retrieves the available output classes its associated name. Yields: tuple[str, type]: output class name and type object. """ for _, output_class in iter(cls._output_classes.items()): yield output_class.NAME, output_class
python
{ "resource": "" }
q25902
OutputManager.HasOutputClass
train
def HasOutputClass(cls, name): """Determines if a specific output class is registered with the manager. Args: name (str): name of the output module. Returns: bool: True if the output class is registered. """ if not isinstance(name, py2to3.STRING_TYPES): return False return name.lower() in cls._output_classes
python
{ "resource": "" }
q25903
OutputManager.IsLinearOutputModule
train
def IsLinearOutputModule(cls, name): """Determines if a specific output class is a linear output module. Args: name (str): name of the output module. Returns: True: if the output module is linear. """ name = name.lower() output_class = cls._output_classes.get(name, None) if not output_class: output_class = cls._disabled_output_classes.get(name, None) if output_class: return issubclass(output_class, interface.LinearOutputModule) return False
python
{ "resource": "" }
q25904
OutputManager.NewOutputModule
train
def NewOutputModule(cls, name, output_mediator): """Creates a new output module object for the specified output format. Args: name (str): name of the output module. output_mediator (OutputMediator): output mediator. Returns: OutputModule: output module. Raises: KeyError: if there is no output class found with the supplied name. ValueError: if name is not a string. """ output_class = cls.GetOutputClass(name) return output_class(output_mediator)
python
{ "resource": "" }
q25905
OutputManager.RegisterOutput
train
def RegisterOutput(cls, output_class, disabled=False): """Registers an output class. The output classes are identified based on their NAME attribute. Args: output_class (type): output module class. disabled (Optional[bool]): True if the output module is disabled due to the module not loading correctly or not. Raises: KeyError: if output class is already set for the corresponding name. """ output_name = output_class.NAME.lower() if disabled: class_dict = cls._disabled_output_classes else: class_dict = cls._output_classes if output_name in class_dict: raise KeyError(( 'Output class already set for name: {0:s}.').format( output_class.NAME)) class_dict[output_name] = output_class
python
{ "resource": "" }
q25906
OutputManager.RegisterOutputs
train
def RegisterOutputs(cls, output_classes, disabled=False): """Registers output classes. The output classes are identified based on their NAME attribute. Args: output_classes (list[type]): output module classes. disabled (Optional[bool]): True if the output module is disabled due to the module not loading correctly or not. Raises: KeyError: if output class is already set for the corresponding name. """ for output_class in output_classes: cls.RegisterOutput(output_class, disabled)
python
{ "resource": "" }
q25907
CPUTimeMeasurement.SampleStart
train
def SampleStart(self): """Starts measuring the CPU time.""" self._start_cpu_time = time.clock() self.start_sample_time = time.time() self.total_cpu_time = 0
python
{ "resource": "" }
q25908
CPUTimeMeasurement.SampleStop
train
def SampleStop(self): """Stops measuring the CPU time.""" if self._start_cpu_time is not None: self.total_cpu_time += time.clock() - self._start_cpu_time
python
{ "resource": "" }
q25909
SampleFileProfiler._WritesString
train
def _WritesString(self, content): """Writes a string to the sample file. Args: content (str): content to write to the sample file. """ content_bytes = codecs.encode(content, 'utf-8') self._sample_file.write(content_bytes)
python
{ "resource": "" }
q25910
CPUTimeProfiler.StartTiming
train
def StartTiming(self, profile_name): """Starts timing CPU time. Args: profile_name (str): name of the profile to sample. """ if profile_name not in self._profile_measurements: self._profile_measurements[profile_name] = CPUTimeMeasurement() self._profile_measurements[profile_name].SampleStart()
python
{ "resource": "" }
q25911
CPUTimeProfiler.StopTiming
train
def StopTiming(self, profile_name): """Stops timing CPU time. Args: profile_name (str): name of the profile to sample. """ measurements = self._profile_measurements.get(profile_name) if measurements: measurements.SampleStop() sample = '{0:f}\t{1:s}\t{2:f}\n'.format( measurements.start_sample_time, profile_name, measurements.total_cpu_time) self._WritesString(sample)
python
{ "resource": "" }
q25912
StorageProfiler.Sample
train
def Sample(self, operation, description, data_size, compressed_data_size): """Takes a sample of data read or written for profiling. Args: operation (str): operation, either 'read' or 'write'. description (str): description of the data read. data_size (int): size of the data read in bytes. compressed_data_size (int): size of the compressed data read in bytes. """ sample_time = time.time() sample = '{0:f}\t{1:s}\t{2:s}\t{3:d}\t{4:d}\n'.format( sample_time, operation, description, data_size, compressed_data_size) self._WritesString(sample)
python
{ "resource": "" }
q25913
TaskQueueProfiler.Sample
train
def Sample(self, tasks_status): """Takes a sample of the status of queued tasks for profiling. Args: tasks_status (TasksStatus): status information about tasks. """ sample_time = time.time() sample = '{0:f}\t{1:d}\t{2:d}\t{3:d}\t{4:d}\t{5:d}\n'.format( sample_time, tasks_status.number_of_queued_tasks, tasks_status.number_of_tasks_processing, tasks_status.number_of_tasks_pending_merge, tasks_status.number_of_abandoned_tasks, tasks_status.total_number_of_tasks) self._WritesString(sample)
python
{ "resource": "" }
q25914
TasksProfiler.Sample
train
def Sample(self, task, status): """Takes a sample of the status of a task for profiling. Args: task (Task): a task. status (str): status. """ sample_time = time.time() sample = '{0:f}\t{1:s}\t{2:s}\n'.format( sample_time, task.identifier, status) self._WritesString(sample)
python
{ "resource": "" }
q25915
TimesketchOutputModule.Close
train
def Close(self): """Closes the connection to TimeSketch Elasticsearch database. Sends the remaining events for indexing and removes the processing status on the Timesketch search index object. """ super(TimesketchOutputModule, self).Close() with self._timesketch.app_context(): search_index = timesketch_sketch.SearchIndex.query.filter_by( index_name=self._index_name).first() search_index.status.remove(search_index.status[0]) timesketch_db_session.add(search_index) timesketch_db_session.commit()
python
{ "resource": "" }
q25916
TimesketchOutputModule.SetTimelineName
train
def SetTimelineName(self, timeline_name): """Sets the timeline name. Args: timeline_name (str): timeline name. """ self._timeline_name = timeline_name logger.info('Timeline name: {0:s}'.format(self._timeline_name))
python
{ "resource": "" }
q25917
TimesketchOutputModule.SetTimelineOwner
train
def SetTimelineOwner(self, username): """Sets the username of the user that should own the timeline. Args: username (str): username. """ self._timeline_owner = username logger.info('Owner of the timeline: {0!s}'.format(self._timeline_owner))
python
{ "resource": "" }
q25918
TimesketchOutputModule.WriteHeader
train
def WriteHeader(self): """Sets up the Elasticsearch index and the Timesketch database object. Creates the Elasticsearch index with Timesketch specific settings and the Timesketch SearchIndex database object. """ # This cannot be static because we use the value of self._document_type # from arguments. mappings = { self._document_type: { 'properties': { 'timesketch_label': { 'type': 'nested' } } } } # Get Elasticsearch host and port from Timesketch configuration. with self._timesketch.app_context(): self._host = current_app.config['ELASTIC_HOST'] self._port = current_app.config['ELASTIC_PORT'] self._Connect() self._CreateIndexIfNotExists(self._index_name, mappings) user = None if self._timeline_owner: user = timesketch_user.User.query.filter_by( username=self._timeline_owner).first() if not user: raise RuntimeError( 'Unknown Timesketch user: {0:s}'.format(self._timeline_owner)) else: logger.warning('Timeline will be visible to all Timesketch users') with self._timesketch.app_context(): search_index = timesketch_sketch.SearchIndex.get_or_create( name=self._timeline_name, description=self._timeline_name, user=user, index_name=self._index_name) # Grant the user read permission on the mapping object and set status. # If user is None the timeline becomes visible to all users. search_index.grant_permission(user=user, permission='read') # In case we have a user grant additional permissions. if user: search_index.grant_permission(user=user, permission='write') search_index.grant_permission(user=user, permission='delete') # Let the Timesketch UI know that the timeline is processing. search_index.set_status('processing') # Save the mapping object to the Timesketch database. timesketch_db_session.add(search_index) timesketch_db_session.commit() logger.debug('Adding events to Timesketch.')
python
{ "resource": "" }
q25919
BaseFirefoxCacheParser._ParseHTTPHeaders
train
def _ParseHTTPHeaders(self, header_data, offset, display_name): """Extract relevant information from HTTP header. Args: header_data (bytes): HTTP header data. offset (int): offset of the cache record, relative to the start of the Firefox cache file. display_name (str): display name of the Firefox cache file. Returns: tuple: containing: str: HTTP request method or None if the value cannot be extracted. str: HTTP response code or None if the value cannot be extracted. """ header_string = header_data.decode('ascii', errors='replace') try: http_header_start = header_string.index('request-method') except ValueError: logger.debug('No request method in header: "{0:s}"'.format(header_string)) return None, None # HTTP request and response headers. http_headers = header_string[http_header_start::] header_parts = http_headers.split('\x00') # TODO: check len(header_parts). request_method = header_parts[1] if request_method not in self._REQUEST_METHODS: logger.debug(( '[{0:s}] {1:s}:{2:d}: Unknown HTTP method \'{3:s}\'. Response ' 'headers: \'{4:s}\'').format( self.NAME, display_name, offset, request_method, header_string)) try: response_head_start = http_headers.index('response-head') except ValueError: logger.debug('No response head in header: "{0:s}"'.format(header_string)) return request_method, None # HTTP response headers. response_head = http_headers[response_head_start::] response_head_parts = response_head.split('\x00') # Response code, followed by other response header key-value pairs, # separated by newline. # TODO: check len(response_head_parts). response_head_text = response_head_parts[1] response_head_text_parts = response_head_text.split('\r\n') # The first line contains response code. # TODO: check len(response_head_text_parts). response_code = response_head_text_parts[0] if not response_code.startswith('HTTP'): logger.debug(( '[{0:s}] {1:s}:{2:d}: Could not determine HTTP response code. ' 'Response headers: \'{3:s}\'.').format( self.NAME, display_name, offset, header_string)) return request_method, response_code
python
{ "resource": "" }
q25920
FirefoxCacheParser._GetFirefoxConfig
train
def _GetFirefoxConfig(self, file_object, display_name): """Determine cache file block size. Args: file_object (dfvfs.FileIO): a file-like object. display_name (str): display name. Returns: firefox_cache_config: namedtuple containing the block size and first record offset. Raises: UnableToParseFile: if no valid cache record could be found. """ # There ought to be a valid record within the first 4 MiB. We use this # limit to prevent reading large invalid files. to_read = min(file_object.get_size(), self._INITIAL_CACHE_FILE_SIZE) while file_object.get_offset() < to_read: offset = file_object.get_offset() try: cache_entry, _ = self._ReadCacheEntry( file_object, display_name, self._MINIMUM_BLOCK_SIZE) # We have not yet determined the block size, so we use the smallest # possible size. record_size = ( self._CACHE_ENTRY_HEADER_SIZE + cache_entry.request_size + cache_entry.information_size) if record_size >= 4096: # _CACHE_003_ block_size = 4096 elif record_size >= 1024: # _CACHE_002_ block_size = 1024 else: # _CACHE_001_ block_size = 256 return self.FIREFOX_CACHE_CONFIG(block_size, offset) except IOError: logger.debug('[{0:s}] {1:s}:{2:d}: Invalid record.'.format( self.NAME, display_name, offset)) raise errors.UnableToParseFile( 'Could not find a valid cache record. Not a Firefox cache file.')
python
{ "resource": "" }
q25921
FirefoxCacheParser._ReadCacheEntry
train
def _ReadCacheEntry(self, file_object, display_name, block_size): """Reads a cache entry. Args: file_object (dfvfs.FileIO): a file-like object. display_name (str): display name. block_size (int): block size. Returns: tuple: containing: firefox_cache1_entry_header: cache record header structure. FirefoxCacheEventData: event data. Raises: IOError: if the cache record header cannot be validated. OSError: if the cache record header cannot be validated. ParseError: if the cache record header cannot be parsed. """ file_offset = file_object.get_offset() # TODO: merge reading the cache entry header and body by having dtFabric # implement the sanity checks done in _ValidateCacheEntryHeader. # Seeing that this parser tries to read each block for a possible # cache entry, we read the fixed-size values first. cache_entry_header_map = self._GetDataTypeMap('firefox_cache1_entry_header') try: cache_entry_header, header_data_size = self._ReadStructureFromFileObject( file_object, file_offset, cache_entry_header_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError( 'Unable to parse Firefox cache entry header with error: {0!s}'.format( exception)) if not self._ValidateCacheEntryHeader(cache_entry_header): # Skip to the next block potentially containing a cache entry. file_offset = block_size - header_data_size file_object.seek(file_offset, os.SEEK_CUR) raise IOError('Not a valid Firefox cache record.') body_data_size = ( cache_entry_header.request_size + cache_entry_header.information_size) cache_entry_body_data = self._ReadData( file_object, file_offset + header_data_size, body_data_size) url = cache_entry_body_data[:cache_entry_header.request_size].decode( 'ascii').rstrip('\x00') request_method, response_code = self._ParseHTTPHeaders( cache_entry_body_data[cache_entry_header.request_size:], file_offset, display_name) # A request can span multiple blocks, so we use modulo. cache_entry_data_size = header_data_size + body_data_size _, remaining_data_size = divmod(cache_entry_data_size, block_size) if remaining_data_size > 0: file_object.seek(block_size - remaining_data_size, os.SEEK_CUR) event_data = FirefoxCacheEventData() event_data.data_size = cache_entry_header.cached_data_size event_data.fetch_count = cache_entry_header.fetch_count event_data.info_size = cache_entry_header.information_size event_data.location = cache_entry_header.location event_data.request_method = request_method event_data.request_size = cache_entry_header.request_size event_data.response_code = response_code event_data.url = url event_data.version = '{0:d}.{1:d}'.format( cache_entry_header.major_format_version, cache_entry_header.minor_format_version) return cache_entry_header, event_data
python
{ "resource": "" }
q25922
FirefoxCacheParser._ValidateCacheEntryHeader
train
def _ValidateCacheEntryHeader(self, cache_entry_header): """Determines whether the values in the cache entry header are valid. Args: cache_entry_header (firefox_cache1_entry_header): cache entry header. Returns: bool: True if the cache entry header is valid. """ return ( cache_entry_header.request_size > 0 and cache_entry_header.request_size < self._MAXIMUM_URL_LENGTH and cache_entry_header.major_format_version == 1 and cache_entry_header.last_fetched_time > 0 and cache_entry_header.fetch_count > 0)
python
{ "resource": "" }
q25923
FirefoxCache2Parser._GetCacheFileMetadataHeaderOffset
train
def _GetCacheFileMetadataHeaderOffset(self, file_object): """Determines the offset of the cache file metadata header. This method is inspired by the work of James Habben: https://github.com/JamesHabben/FirefoxCache2 Args: file_object (dfvfs.FileIO): a file-like object. Returns: int: offset of the file cache metadata header relative to the start of the file. Raises: IOError: if the start of the cache file metadata could not be determined. """ file_object.seek(-4, os.SEEK_END) file_offset = file_object.tell() metadata_size_map = self._GetDataTypeMap('uint32be') try: metadata_size, _ = self._ReadStructureFromFileObject( file_object, file_offset, metadata_size_map) except (ValueError, errors.ParseError) as exception: raise errors.UnableToParseFile( 'Unable to parse cache file metadata size with error: {0!s}'.format( exception)) # Firefox splits the content into chunks. number_of_chunks, remainder = divmod(metadata_size, self._CHUNK_SIZE) if remainder != 0: number_of_chunks += 1 # Each chunk in the cached record is padded with two bytes. # Skip the first 4 bytes which contains a hash value of the cached content. return metadata_size + (number_of_chunks * 2) + 4
python
{ "resource": "" }
q25924
FirefoxCache2Parser._ValidateCacheFileMetadataHeader
train
def _ValidateCacheFileMetadataHeader(self, cache_file_metadata_header): """Determines whether the cache file metadata header is valid. Args: cache_file_metadata_header (firefox_cache2_file_metadata_header): cache file metadata header. Returns: bool: True if the cache file metadata header is valid. """ # TODO: add support for format version 2 and 3 return ( cache_file_metadata_header.key_size > 0 and cache_file_metadata_header.key_size < self._MAXIMUM_URL_LENGTH and cache_file_metadata_header.format_version == 1 and cache_file_metadata_header.last_fetched_time > 0 and cache_file_metadata_header.fetch_count > 0)
python
{ "resource": "" }
q25925
PsortTool._CheckStorageFile
train
def _CheckStorageFile(self, storage_file_path): # pylint: disable=arguments-differ """Checks if the storage file path is valid. Args: storage_file_path (str): path of the storage file. Raises: BadConfigOption: if the storage file path is invalid. """ if os.path.exists(storage_file_path): if not os.path.isfile(storage_file_path): raise errors.BadConfigOption( 'Storage file: {0:s} already exists and is not a file.'.format( storage_file_path)) logger.warning('Appending to an already existing storage file.') dirname = os.path.dirname(storage_file_path) if not dirname: dirname = '.' # TODO: add a more thorough check to see if the storage file really is # a plaso storage file. if not os.access(dirname, os.W_OK): raise errors.BadConfigOption( 'Unable to write to storage file: {0:s}'.format(storage_file_path))
python
{ "resource": "" }
q25926
PsortTool._GetAnalysisPlugins
train
def _GetAnalysisPlugins(self, analysis_plugins_string): """Retrieves analysis plugins. Args: analysis_plugins_string (str): comma separated names of analysis plugins to enable. Returns: list[AnalysisPlugin]: analysis plugins. """ if not analysis_plugins_string: return [] analysis_plugins_list = [ name.strip() for name in analysis_plugins_string.split(',')] analysis_plugins = self._analysis_manager.GetPluginObjects( analysis_plugins_list) return analysis_plugins.values()
python
{ "resource": "" }
q25927
PsortTool._ParseAnalysisPluginOptions
train
def _ParseAnalysisPluginOptions(self, options): """Parses the analysis plugin options. Args: options (argparse.Namespace): command line arguments. """ # Get a list of all available plugins. analysis_plugin_info = self._analysis_manager.GetAllPluginInformation() # Use set-comprehension to create a set of the analysis plugin names. analysis_plugin_names = { name.lower() for name, _, _ in analysis_plugin_info} analysis_plugins = self.ParseStringOption(options, 'analysis_plugins') if not analysis_plugins: return # Use set-comprehension to create a set of the requested plugin names. requested_plugin_names = { name.strip().lower() for name in analysis_plugins.split(',')} # Check to see if we are trying to load plugins that do not exist. difference = requested_plugin_names.difference(analysis_plugin_names) if difference: raise errors.BadConfigOption( 'Non-existent analysis plugins specified: {0:s}'.format( ' '.join(difference))) self._analysis_plugins = self._GetAnalysisPlugins(analysis_plugins) for analysis_plugin in self._analysis_plugins: helpers_manager.ArgumentHelperManager.ParseOptions( options, analysis_plugin)
python
{ "resource": "" }
q25928
PsortTool.AddProcessingOptions
train
def AddProcessingOptions(self, argument_group): """Adds processing options to the argument group Args: argument_group (argparse._ArgumentGroup): argparse argument group. """ argument_helper_names = ['temporary_directory', 'zeromq'] if self._CanEnforceProcessMemoryLimit(): argument_helper_names.append('process_resources') helpers_manager.ArgumentHelperManager.AddCommandLineArguments( argument_group, names=argument_helper_names) argument_group.add_argument( '--worker-memory-limit', '--worker_memory_limit', dest='worker_memory_limit', action='store', type=int, metavar='SIZE', help=( 'Maximum amount of memory (data segment and shared memory) ' 'a worker process is allowed to consume in bytes, where 0 ' 'represents no limit. The default limit is 2147483648 (2 GiB). ' 'If a worker process exceeds this limit is is killed by the main ' '(foreman) process.'))
python
{ "resource": "" }
q25929
PsortTool.ProcessStorage
train
def ProcessStorage(self): """Processes a plaso storage file. Raises: BadConfigOption: when a configuration parameter fails validation. RuntimeError: if a non-recoverable situation is encountered. """ self._CheckStorageFile(self._storage_file_path) self._status_view.SetMode(self._status_view_mode) self._status_view.SetStorageFileInformation(self._storage_file_path) status_update_callback = ( self._status_view.GetAnalysisStatusUpdateCallback()) session = engine.BaseEngine.CreateSession( command_line_arguments=self._command_line_arguments, preferred_encoding=self.preferred_encoding) storage_reader = storage_factory.StorageFactory.CreateStorageReaderForFile( self._storage_file_path) if not storage_reader: logger.error('Format of storage file: {0:s} not supported'.format( self._storage_file_path)) return self._number_of_analysis_reports = ( storage_reader.GetNumberOfAnalysisReports()) storage_reader.Close() configuration = configurations.ProcessingConfiguration() configuration.data_location = self._data_location configuration.profiling.directory = self._profiling_directory configuration.profiling.sample_rate = self._profiling_sample_rate configuration.profiling.profilers = self._profilers analysis_counter = None if self._analysis_plugins: storage_writer = ( storage_factory.StorageFactory.CreateStorageWriterForFile( session, self._storage_file_path)) # TODO: add single processing support. analysis_engine = psort.PsortMultiProcessEngine( use_zeromq=self._use_zeromq) analysis_engine.AnalyzeEvents( self._knowledge_base, storage_writer, self._data_location, self._analysis_plugins, configuration, event_filter=self._event_filter, event_filter_expression=self._event_filter_expression, status_update_callback=status_update_callback, worker_memory_limit=self._worker_memory_limit) analysis_counter = collections.Counter() for item, value in iter(session.analysis_reports_counter.items()): analysis_counter[item] = value if self._output_format != 'null': storage_reader = ( storage_factory.StorageFactory.CreateStorageReaderForFile( self._storage_file_path)) # TODO: add single processing support. analysis_engine = psort.PsortMultiProcessEngine( use_zeromq=self._use_zeromq) analysis_engine.ExportEvents( self._knowledge_base, storage_reader, self._output_module, configuration, deduplicate_events=self._deduplicate_events, event_filter=self._event_filter, status_update_callback=status_update_callback, time_slice=self._time_slice, use_time_slicer=self._use_time_slicer) if self._quiet_mode: return self._output_writer.Write('Processing completed.\n') if analysis_counter: table_view = views.ViewsFactory.GetTableView( self._views_format_type, title='Analysis reports generated') for element, count in analysis_counter.most_common(): if element != 'total': table_view.AddRow([element, count]) table_view.AddRow(['Total', analysis_counter['total']]) table_view.Write(self._output_writer) storage_reader = storage_factory.StorageFactory.CreateStorageReaderForFile( self._storage_file_path) self._PrintAnalysisReportsDetails(storage_reader)
python
{ "resource": "" }
q25930
BencodePlugin._GetKeys
train
def _GetKeys(self, data, keys, depth=1): """Helper function to return keys nested in a bencode dict. By default this function will return the values for the named keys requested by a plugin in match{}. The default setting is to look a single layer down from the root (same as the check for plugin applicability). This level is suitable for most cases. For cases where there is variability in the name at the first level (e.g. it is the MAC addresses of a device, or a UUID) it is possible to override the depth limit and use _GetKeys to fetch from a deeper level. Args: data (dict[str, object]): bencode data values. keys (list[str]): keys that should be returned. depth (int): how many levels deep to check for a match. Returns: dict[str, object]: a dictionary with just the keys requested. """ keys = set(keys) match = {} if depth == 1: for key in keys: match[key] = data[key] else: for _, parsed_key, parsed_value in self._RecurseKey( data, depth=depth): if parsed_key in keys: match[parsed_key] = parsed_value if set(match.keys()) == keys: return match return match
python
{ "resource": "" }
q25931
BencodePlugin._RecurseKey
train
def _RecurseKey(self, recur_item, root='', depth=15): """Flattens nested dictionaries and lists by yielding their values. The hierarchy of a bencode file is a series of nested dictionaries and lists. This is a helper function helps plugins navigate the structure without having to reimplement their own recursive methods. This method implements an overridable depth limit to prevent processing extremely deeply nested dictionaries. If the limit is reached a debug message is logged indicating which key processing stopped on. Args: recur_item (object): object to be checked for additional nested items. root (str): the pathname of the current working key. depth (int): a counter to ensure we stop at the maximum recursion depth. Yields: tuple: containing: str: root str: key str: value """ if depth < 1: logger.debug('Recursion limit hit for key: {0:s}'.format(root)) return if isinstance(recur_item, (list, tuple)): for recur in recur_item: for key in self._RecurseKey(recur, root, depth): yield key return if not hasattr(recur_item, 'iteritems'): return for key, value in iter(recur_item.items()): yield root, key, value if isinstance(value, dict): value = [value] if isinstance(value, list): for item in value: if isinstance(item, dict): for keyval in self._RecurseKey( item, root=root + '/' + key, depth=depth - 1): yield keyval
python
{ "resource": "" }
q25932
SQLiteStorageMergeReader._GetContainerTypes
train
def _GetContainerTypes(self): """Retrieves the container types to merge. Container types not defined in _CONTAINER_TYPES are ignored and not merged. Specific container types reference other container types, such as event referencing event data. The names are ordered to ensure the attribute containers are merged in the correct order. Returns: list[str]: names of the container types to merge. """ self._cursor.execute(self._TABLE_NAMES_QUERY) table_names = [row[0] for row in self._cursor.fetchall()] return [ table_name for table_name in self._CONTAINER_TYPES if table_name in table_names]
python
{ "resource": "" }
q25933
SQLiteStorageMergeReader._Open
train
def _Open(self): """Opens the task storage for reading.""" self._connection = sqlite3.connect( self._path, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES) self._cursor = self._connection.cursor()
python
{ "resource": "" }
q25934
SQLiteStorageMergeReader._ReadStorageMetadata
train
def _ReadStorageMetadata(self): """Reads the task storage metadata.""" query = 'SELECT key, value FROM metadata' self._cursor.execute(query) metadata_values = {row[0]: row[1] for row in self._cursor.fetchall()} self._compression_format = metadata_values['compression_format']
python
{ "resource": "" }
q25935
SQLiteStorageMergeReader._PrepareForNextContainerType
train
def _PrepareForNextContainerType(self): """Prepares for the next container type. This method prepares the task storage for merging the next container type. It set the active container type, its add method and active cursor accordingly. """ self._active_container_type = self._container_types.pop(0) self._add_active_container_method = self._add_container_type_methods.get( self._active_container_type) query = 'SELECT _identifier, _data FROM {0:s}'.format( self._active_container_type) self._cursor.execute(query) self._active_cursor = self._cursor
python
{ "resource": "" }
q25936
SQLiteStorageMergeReader.MergeAttributeContainers
train
def MergeAttributeContainers( self, callback=None, maximum_number_of_containers=0): """Reads attribute containers from a task storage file into the writer. Args: callback (function[StorageWriter, AttributeContainer]): function to call after each attribute container is deserialized. maximum_number_of_containers (Optional[int]): maximum number of containers to merge, where 0 represent no limit. Returns: bool: True if the entire task storage file has been merged. Raises: RuntimeError: if the add method for the active attribute container type is missing. OSError: if the task storage file cannot be deleted. ValueError: if the maximum number of containers is a negative value. """ if maximum_number_of_containers < 0: raise ValueError('Invalid maximum number of containers') if not self._cursor: self._Open() self._ReadStorageMetadata() self._container_types = self._GetContainerTypes() number_of_containers = 0 while self._active_cursor or self._container_types: if not self._active_cursor: self._PrepareForNextContainerType() if maximum_number_of_containers == 0: rows = self._active_cursor.fetchall() else: number_of_rows = maximum_number_of_containers - number_of_containers rows = self._active_cursor.fetchmany(size=number_of_rows) if not rows: self._active_cursor = None continue for row in rows: identifier = identifiers.SQLTableIdentifier( self._active_container_type, row[0]) if self._compression_format == definitions.COMPRESSION_FORMAT_ZLIB: serialized_data = zlib.decompress(row[1]) else: serialized_data = row[1] attribute_container = self._DeserializeAttributeContainer( self._active_container_type, serialized_data) attribute_container.SetIdentifier(identifier) if self._active_container_type == self._CONTAINER_TYPE_EVENT_TAG: event_identifier = identifiers.SQLTableIdentifier( self._CONTAINER_TYPE_EVENT, attribute_container.event_row_identifier) attribute_container.SetEventIdentifier(event_identifier) del attribute_container.event_row_identifier if callback: callback(self._storage_writer, attribute_container) self._add_active_container_method(attribute_container) number_of_containers += 1 if (maximum_number_of_containers != 0 and number_of_containers >= maximum_number_of_containers): return False self._Close() os.remove(self._path) return True
python
{ "resource": "" }
q25937
MacWifiLogParser._GetAction
train
def _GetAction(self, action, text): """Parse the well known actions for easy reading. Args: action (str): the function or action called by the agent. text (str): mac Wifi log text. Returns: str: a formatted string representing the known (or common) action. If the action is not known the original log text is returned. """ # TODO: replace "x in y" checks by startswith if possible. if 'airportdProcessDLILEvent' in action: interface = text.split()[0] return 'Interface {0:s} turn up.'.format(interface) if 'doAutoJoin' in action: match = self._CONNECTED_RE.match(text) if match: ssid = match.group(1)[1:-1] else: ssid = 'Unknown' return 'Wifi connected to SSID {0:s}'.format(ssid) if 'processSystemPSKAssoc' in action: wifi_parameters = self._WIFI_PARAMETERS_RE.search(text) if wifi_parameters: ssid = wifi_parameters.group(1) bssid = wifi_parameters.group(2) security = wifi_parameters.group(3) if not ssid: ssid = 'Unknown' if not bssid: bssid = 'Unknown' if not security: security = 'Unknown' return ( 'New wifi configured. BSSID: {0:s}, SSID: {1:s}, ' 'Security: {2:s}.').format(bssid, ssid, security) return text
python
{ "resource": "" }
q25938
MacWifiLogParser.VerifyStructure
train
def VerifyStructure(self, parser_mediator, line): """Verify that this file is a Mac Wifi log file. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. line (str): line from a text file. Returns: bool: True if the line is in the expected format, False if not. """ self._last_month = 0 self._year_use = parser_mediator.GetEstimatedYear() key = 'header' try: structure = self._MAC_WIFI_HEADER.parseString(line) except pyparsing.ParseException: structure = None if not structure: key = 'turned_over_header' try: structure = self._MAC_WIFI_TURNED_OVER_HEADER.parseString(line) except pyparsing.ParseException: structure = None if not structure: logger.debug('Not a Mac Wifi log file') return False time_elements_tuple = self._GetTimeElementsTuple(key, structure) try: dfdatetime_time_elements.TimeElementsInMilliseconds( time_elements_tuple=time_elements_tuple) except ValueError: logger.debug( 'Not a Mac Wifi log file, invalid date and time: {0!s}'.format( structure.date_time)) return False self._last_month = time_elements_tuple[1] return True
python
{ "resource": "" }
q25939
AutomaticDestinationsOLECFPlugin._ParseDistributedTrackingIdentifier
train
def _ParseDistributedTrackingIdentifier( self, parser_mediator, uuid_object, origin): """Extracts data from a Distributed Tracking identifier. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. uuid_object (uuid.UUID): UUID of the Distributed Tracking identifier. origin (str): origin of the event (event source). Returns: str: UUID string of the Distributed Tracking identifier. """ if uuid_object.version == 1: event_data = windows_events.WindowsDistributedLinkTrackingEventData( uuid_object, origin) date_time = dfdatetime_uuid_time.UUIDTime(timestamp=uuid_object.time) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data) return '{{{0!s}}}'.format(uuid_object)
python
{ "resource": "" }
q25940
AutomaticDestinationsOLECFPlugin.ParseDestList
train
def ParseDestList(self, parser_mediator, olecf_item): """Parses the DestList OLECF item. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. olecf_item (pyolecf.item): OLECF item. Raises: UnableToParseFile: if the DestList cannot be parsed. """ header_map = self._GetDataTypeMap('dest_list_header') try: header, entry_offset = self._ReadStructureFromFileObject( olecf_item, 0, header_map) except (ValueError, errors.ParseError) as exception: raise errors.UnableToParseFile( 'Unable to parse DestList header with error: {0!s}'.format( exception)) if header.format_version == 1: entry_map = self._GetDataTypeMap('dest_list_entry_v1') elif header.format_version in (3, 4): entry_map = self._GetDataTypeMap('dest_list_entry_v3') else: parser_mediator.ProduceExtractionWarning( 'unsupported format version: {0:d}.'.format(header.format_version)) return while entry_offset < olecf_item.size: try: entry, entry_data_size = self._ReadStructureFromFileObject( olecf_item, entry_offset, entry_map) except (ValueError, errors.ParseError) as exception: raise errors.UnableToParseFile( 'Unable to parse DestList entry with error: {0!s}'.format( exception)) display_name = 'DestList entry at offset: 0x{0:08x}'.format(entry_offset) try: droid_volume_identifier = self._ParseDistributedTrackingIdentifier( parser_mediator, entry.droid_volume_identifier, display_name) except (TypeError, ValueError) as exception: droid_volume_identifier = '' parser_mediator.ProduceExtractionWarning( 'unable to read droid volume identifier with error: {0!s}'.format( exception)) try: droid_file_identifier = self._ParseDistributedTrackingIdentifier( parser_mediator, entry.droid_file_identifier, display_name) except (TypeError, ValueError) as exception: droid_file_identifier = '' parser_mediator.ProduceExtractionWarning( 'unable to read droid file identifier with error: {0!s}'.format( exception)) try: birth_droid_volume_identifier = ( self._ParseDistributedTrackingIdentifier( parser_mediator, entry.birth_droid_volume_identifier, display_name)) except (TypeError, ValueError) as exception: birth_droid_volume_identifier = '' parser_mediator.ProduceExtractionWarning(( 'unable to read birth droid volume identifier with error: ' '{0:s}').format( exception)) try: birth_droid_file_identifier = self._ParseDistributedTrackingIdentifier( parser_mediator, entry.birth_droid_file_identifier, display_name) except (TypeError, ValueError) as exception: birth_droid_file_identifier = '' parser_mediator.ProduceExtractionWarning(( 'unable to read birth droid file identifier with error: ' '{0:s}').format( exception)) if entry.last_modification_time == 0: date_time = dfdatetime_semantic_time.SemanticTime('Not set') else: date_time = dfdatetime_filetime.Filetime( timestamp=entry.last_modification_time) event_data = AutomaticDestinationsDestListEntryEventData() event_data.birth_droid_file_identifier = birth_droid_file_identifier event_data.birth_droid_volume_identifier = birth_droid_volume_identifier event_data.droid_file_identifier = droid_file_identifier event_data.droid_volume_identifier = droid_volume_identifier event_data.entry_number = entry.entry_number event_data.hostname = entry.hostname.rstrip('\x00') event_data.offset = entry_offset event_data.path = entry.path.rstrip('\x00') event_data.pin_status = entry.pin_status event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_MODIFICATION) parser_mediator.ProduceEventWithEventData(event, event_data) entry_offset += entry_data_size
python
{ "resource": "" }
q25941
ParserMediator._GetEarliestYearFromFileEntry
train
def _GetEarliestYearFromFileEntry(self): """Retrieves the year from the file entry date and time values. This function uses the creation time if available otherwise the change time (metadata last modification time) is used. Returns: int: year of the file entry or None. """ file_entry = self.GetFileEntry() if not file_entry: return None stat_object = file_entry.GetStat() posix_time = getattr(stat_object, 'crtime', None) if posix_time is None: posix_time = getattr(stat_object, 'ctime', None) # Gzip files don't store the creation or metadata modification times, # but the modification time stored in the file is a good proxy. if file_entry.TYPE_INDICATOR == dfvfs_definitions.TYPE_INDICATOR_GZIP: posix_time = getattr(stat_object, 'mtime', None) if posix_time is None: logger.warning( 'Unable to determine earliest year from file stat information.') return None try: year = timelib.GetYearFromPosixTime( posix_time, timezone=self._knowledge_base.timezone) return year except ValueError as exception: logger.error(( 'Unable to determine earliest year from file stat information with ' 'error: {0!s}').format(exception)) return None
python
{ "resource": "" }
q25942
ParserMediator._GetInode
train
def _GetInode(self, inode_value): """Retrieves the inode from the inode value. Args: inode_value (int|str): inode, such as 1 or '27-128-1'. Returns: int: inode or -1 if the inode value cannot be converted to an integer. """ if isinstance(inode_value, py2to3.INTEGER_TYPES): return inode_value if isinstance(inode_value, float): return int(inode_value) if not isinstance(inode_value, py2to3.STRING_TYPES): return -1 if b'-' in inode_value: inode_value, _, _ = inode_value.partition(b'-') try: return int(inode_value, 10) except ValueError: return -1
python
{ "resource": "" }
q25943
ParserMediator.AddEventAttribute
train
def AddEventAttribute(self, attribute_name, attribute_value): """Adds an attribute that will be set on all events produced. Setting attributes using this method will cause events produced via this mediator to have an attribute with the provided name set with the provided value. Args: attribute_name (str): name of the attribute to add. attribute_value (str): value of the attribute to add. Raises: KeyError: if the event attribute is already set. """ if attribute_name in self._extra_event_attributes: raise KeyError('Event attribute {0:s} already set'.format( attribute_name)) self._extra_event_attributes[attribute_name] = attribute_value
python
{ "resource": "" }
q25944
ParserMediator.GetDisplayName
train
def GetDisplayName(self, file_entry=None): """Retrieves the display name for a file entry. Args: file_entry (Optional[dfvfs.FileEntry]): file entry object, where None will return the display name of self._file_entry. Returns: str: human readable string that describes the path to the file entry. Raises: ValueError: if the file entry is missing. """ if file_entry is None: file_entry = self._file_entry if file_entry is None: raise ValueError('Missing file entry') path_spec = getattr(file_entry, 'path_spec', None) relative_path = path_helper.PathHelper.GetRelativePathForPathSpec( path_spec, mount_path=self._mount_path) if not relative_path: return file_entry.name return self.GetDisplayNameForPathSpec(path_spec)
python
{ "resource": "" }
q25945
ParserMediator.GetEstimatedYear
train
def GetEstimatedYear(self): """Retrieves an estimate of the year. This function determines the year in the following manner: * see if the user provided a preferred year; * see if knowledge base defines a year e.g. derived from preprocessing; * determine the year based on the file entry metadata; * default to the current year; Returns: int: estimated year. """ # TODO: improve this method to get a more reliable estimate. # Preserve the year-less date and sort this out in the psort phase. if self._preferred_year: return self._preferred_year if self._knowledge_base.year: return self._knowledge_base.year # TODO: Find a decent way to actually calculate the correct year # instead of relying on stats object. year = self._GetEarliestYearFromFileEntry() if not year: year = self._GetLatestYearFromFileEntry() if not year: year = timelib.GetCurrentYear() return year
python
{ "resource": "" }
q25946
ParserMediator.GetFilename
train
def GetFilename(self): """Retrieves the name of the active file entry. Returns: str: name of the active file entry or None. """ if not self._file_entry: return None data_stream = getattr(self._file_entry.path_spec, 'data_stream', None) if data_stream: return '{0:s}:{1:s}'.format(self._file_entry.name, data_stream) return self._file_entry.name
python
{ "resource": "" }
q25947
ParserMediator.ProcessEvent
train
def ProcessEvent( self, event, parser_chain=None, file_entry=None, query=None): """Processes an event before it written to the storage. Args: event (EventObject|EventData): event or event data. parser_chain (Optional[str]): parsing chain up to this point. file_entry (Optional[dfvfs.FileEntry]): file entry, where None will use the current file entry set in the mediator. query (Optional[str]): query that was used to obtain the event. Raises: KeyError: if there's an attempt to add a duplicate attribute value to the event. """ # TODO: rename this to event.parser_chain or equivalent. if not getattr(event, 'parser', None) and parser_chain: event.parser = parser_chain # TODO: deprecate text_prepend in favor of an event tag. if not getattr(event, 'text_prepend', None) and self._text_prepend: event.text_prepend = self._text_prepend if file_entry is None: file_entry = self._file_entry display_name = None if file_entry: event.pathspec = file_entry.path_spec if not getattr(event, 'filename', None): path_spec = getattr(file_entry, 'path_spec', None) event.filename = path_helper.PathHelper.GetRelativePathForPathSpec( path_spec, mount_path=self._mount_path) if not display_name: # TODO: dfVFS refactor: move display name to output since the path # specification contains the full information. display_name = self.GetDisplayName(file_entry) stat_object = file_entry.GetStat() inode_value = getattr(stat_object, 'ino', None) # TODO: refactor to ProcessEventData. # Note that we use getattr here since event can be either EventObject # or EventData. if getattr(event, 'inode', None) is None and inode_value is not None: event.inode = self._GetInode(inode_value) if not getattr(event, 'display_name', None) and display_name: event.display_name = display_name if not getattr(event, 'hostname', None) and self.hostname: event.hostname = self.hostname if not getattr(event, 'username', None): user_sid = getattr(event, 'user_sid', None) username = self._knowledge_base.GetUsernameByIdentifier(user_sid) if username: event.username = username if not getattr(event, 'query', None) and query: event.query = query for attribute, value in iter(self._extra_event_attributes.items()): if hasattr(event, attribute): raise KeyError('Event already has a value for {0:s}'.format(attribute)) setattr(event, attribute, value)
python
{ "resource": "" }
q25948
ParserMediator.ProduceEventSource
train
def ProduceEventSource(self, event_source): """Produces an event source. Args: event_source (EventSource): an event source. Raises: RuntimeError: when storage writer is not set. """ if not self._storage_writer: raise RuntimeError('Storage writer not set.') self._storage_writer.AddEventSource(event_source) self._number_of_event_sources += 1 self.last_activity_timestamp = time.time()
python
{ "resource": "" }
q25949
ParserMediator.ProduceExtractionWarning
train
def ProduceExtractionWarning(self, message, path_spec=None): """Produces an extraction warning. Args: message (str): message of the warning. path_spec (Optional[dfvfs.PathSpec]): path specification, where None will use the path specification of current file entry set in the mediator. Raises: RuntimeError: when storage writer is not set. """ if not self._storage_writer: raise RuntimeError('Storage writer not set.') if not path_spec and self._file_entry: path_spec = self._file_entry.path_spec parser_chain = self.GetParserChain() warning = warnings.ExtractionWarning( message=message, parser_chain=parser_chain, path_spec=path_spec) self._storage_writer.AddWarning(warning) self._number_of_warnings += 1 self.last_activity_timestamp = time.time()
python
{ "resource": "" }
q25950
ParserMediator.RemoveEventAttribute
train
def RemoveEventAttribute(self, attribute_name): """Removes an attribute from being set on all events produced. Args: attribute_name (str): name of the attribute to remove. Raises: KeyError: if the event attribute is not set. """ if attribute_name not in self._extra_event_attributes: raise KeyError('Event attribute: {0:s} not set'.format(attribute_name)) del self._extra_event_attributes[attribute_name]
python
{ "resource": "" }
q25951
ParserMediator.SampleMemoryUsage
train
def SampleMemoryUsage(self, parser_name): """Takes a sample of the memory usage for profiling. Args: parser_name (str): name of the parser. """ if self._memory_profiler: used_memory = self._process_information.GetUsedMemory() or 0 self._memory_profiler.Sample(parser_name, used_memory)
python
{ "resource": "" }
q25952
ParserMediator.SetInputSourceConfiguration
train
def SetInputSourceConfiguration(self, configuration): """Sets the input source configuration settings. Args: configuration (InputSourceConfiguration): input source configuration. """ mount_path = configuration.mount_path # Remove a trailing path separator from the mount path so the relative # paths will start with a path separator. if mount_path and mount_path.endswith(os.sep): mount_path = mount_path[:-1] self._mount_path = mount_path
python
{ "resource": "" }
q25953
ParserMediator.SetStorageWriter
train
def SetStorageWriter(self, storage_writer): """Sets the storage writer. Args: storage_writer (StorageWriter): storage writer. """ self._storage_writer = storage_writer # Reset the last event data information. Each storage file should # contain event data for their events. self._last_event_data_hash = None self._last_event_data_identifier = None
python
{ "resource": "" }
q25954
EventObjectFilter.CompileFilter
train
def CompileFilter(self, filter_expression): """Compiles the filter expression. The filter expression contains an object filter expression. Args: filter_expression (str): filter expression. Raises: ParseError: if the filter expression cannot be parsed. """ filter_parser = pfilter.BaseParser(filter_expression).Parse() matcher = filter_parser.Compile(pfilter.PlasoAttributeFilterImplementation) self._filter_expression = filter_expression self._matcher = matcher
python
{ "resource": "" }
q25955
EventObjectFilter.Match
train
def Match(self, event): """Determines if an event matches the filter. Args: event (EventObject): an event. Returns: bool: True if the event matches the filter. """ if not self._matcher: return True self._decision = self._matcher.Matches(event) return self._decision
python
{ "resource": "" }
q25956
StatusView._AddsAnalysisProcessStatusTableRow
train
def _AddsAnalysisProcessStatusTableRow(self, process_status, table_view): """Adds an analysis process status table row. Args: process_status (ProcessStatus): processing status. table_view (CLITabularTableView): table view. """ used_memory = self._FormatSizeInUnitsOf1024(process_status.used_memory) events = '' if (process_status.number_of_consumed_events is not None and process_status.number_of_consumed_events_delta is not None): events = '{0:d} ({1:d})'.format( process_status.number_of_consumed_events, process_status.number_of_consumed_events_delta) event_tags = '' if (process_status.number_of_produced_event_tags is not None and process_status.number_of_produced_event_tags_delta is not None): event_tags = '{0:d} ({1:d})'.format( process_status.number_of_produced_event_tags, process_status.number_of_produced_event_tags_delta) reports = '' if (process_status.number_of_produced_reports is not None and process_status.number_of_produced_reports_delta is not None): reports = '{0:d} ({1:d})'.format( process_status.number_of_produced_reports, process_status.number_of_produced_reports_delta) table_view.AddRow([ process_status.identifier, process_status.pid, process_status.status, used_memory, events, event_tags, reports])
python
{ "resource": "" }
q25957
StatusView._AddExtractionProcessStatusTableRow
train
def _AddExtractionProcessStatusTableRow(self, process_status, table_view): """Adds an extraction process status table row. Args: process_status (ProcessStatus): processing status. table_view (CLITabularTableView): table view. """ used_memory = self._FormatSizeInUnitsOf1024(process_status.used_memory) sources = '' if (process_status.number_of_produced_sources is not None and process_status.number_of_produced_sources_delta is not None): sources = '{0:d} ({1:d})'.format( process_status.number_of_produced_sources, process_status.number_of_produced_sources_delta) events = '' if (process_status.number_of_produced_events is not None and process_status.number_of_produced_events_delta is not None): events = '{0:d} ({1:d})'.format( process_status.number_of_produced_events, process_status.number_of_produced_events_delta) # TODO: shorten display name to fit in 80 chars and show the filename. table_view.AddRow([ process_status.identifier, process_status.pid, process_status.status, used_memory, sources, events, process_status.display_name])
python
{ "resource": "" }
q25958
StatusView._FormatSizeInUnitsOf1024
train
def _FormatSizeInUnitsOf1024(self, size): """Represents a number of bytes in units of 1024. Args: size (int): size in bytes. Returns: str: human readable string of the size. """ magnitude_1024 = 0 used_memory_1024 = float(size) while used_memory_1024 >= 1024: used_memory_1024 /= 1024 magnitude_1024 += 1 if 0 < magnitude_1024 <= 7: return '{0:.1f} {1:s}'.format( used_memory_1024, self._UNITS_1024[magnitude_1024]) return '{0:d} B'.format(size)
python
{ "resource": "" }
q25959
StatusView._PrintAnalysisStatusHeader
train
def _PrintAnalysisStatusHeader(self, processing_status): """Prints the analysis status header. Args: processing_status (ProcessingStatus): processing status. """ self._output_writer.Write( 'Storage file\t\t: {0:s}\n'.format(self._storage_file_path)) self._PrintProcessingTime(processing_status) if processing_status and processing_status.events_status: self._PrintEventsStatus(processing_status.events_status) self._output_writer.Write('\n')
python
{ "resource": "" }
q25960
StatusView._PrintAnalysisStatusUpdateLinear
train
def _PrintAnalysisStatusUpdateLinear(self, processing_status): """Prints an analysis status update in linear mode. Args: processing_status (ProcessingStatus): processing status. """ for worker_status in processing_status.workers_status: status_line = ( '{0:s} (PID: {1:d}) - events consumed: {2:d} - running: ' '{3!s}\n').format( worker_status.identifier, worker_status.pid, worker_status.number_of_consumed_events, worker_status.status not in definitions.ERROR_STATUS_INDICATORS) self._output_writer.Write(status_line)
python
{ "resource": "" }
q25961
StatusView._PrintAnalysisStatusUpdateWindow
train
def _PrintAnalysisStatusUpdateWindow(self, processing_status): """Prints an analysis status update in window mode. Args: processing_status (ProcessingStatus): processing status. """ if self._stdout_output_writer: self._ClearScreen() output_text = 'plaso - {0:s} version {1:s}\n\n'.format( self._tool_name, plaso.__version__) self._output_writer.Write(output_text) self._PrintAnalysisStatusHeader(processing_status) table_view = views.CLITabularTableView(column_names=[ 'Identifier', 'PID', 'Status', 'Memory', 'Events', 'Tags', 'Reports'], column_sizes=[23, 7, 15, 15, 15, 15, 0]) self._AddsAnalysisProcessStatusTableRow( processing_status.foreman_status, table_view) for worker_status in processing_status.workers_status: self._AddsAnalysisProcessStatusTableRow(worker_status, table_view) table_view.Write(self._output_writer) self._output_writer.Write('\n') if processing_status.aborted: self._output_writer.Write( 'Processing aborted - waiting for clean up.\n\n') if self._stdout_output_writer: # We need to explicitly flush stdout to prevent partial status updates. sys.stdout.flush()
python
{ "resource": "" }
q25962
StatusView._PrintExtractionStatusUpdateLinear
train
def _PrintExtractionStatusUpdateLinear(self, processing_status): """Prints an extraction status update in linear mode. Args: processing_status (ProcessingStatus): processing status. """ for worker_status in processing_status.workers_status: status_line = ( '{0:s} (PID: {1:d}) - events produced: {2:d} - file: {3:s} ' '- running: {4!s}\n').format( worker_status.identifier, worker_status.pid, worker_status.number_of_produced_events, worker_status.display_name, worker_status.status not in definitions.ERROR_STATUS_INDICATORS) self._output_writer.Write(status_line)
python
{ "resource": "" }
q25963
StatusView._PrintExtractionStatusUpdateWindow
train
def _PrintExtractionStatusUpdateWindow(self, processing_status): """Prints an extraction status update in window mode. Args: processing_status (ProcessingStatus): processing status. """ if self._stdout_output_writer: self._ClearScreen() output_text = 'plaso - {0:s} version {1:s}\n\n'.format( self._tool_name, plaso.__version__) self._output_writer.Write(output_text) self.PrintExtractionStatusHeader(processing_status) table_view = views.CLITabularTableView(column_names=[ 'Identifier', 'PID', 'Status', 'Memory', 'Sources', 'Events', 'File'], column_sizes=[15, 7, 15, 15, 15, 15, 0]) self._AddExtractionProcessStatusTableRow( processing_status.foreman_status, table_view) for worker_status in processing_status.workers_status: self._AddExtractionProcessStatusTableRow(worker_status, table_view) table_view.Write(self._output_writer) self._output_writer.Write('\n') if processing_status.aborted: self._output_writer.Write( 'Processing aborted - waiting for clean up.\n\n') # TODO: remove update flicker. For win32console we could set the cursor # top left, write the table, clean the remainder of the screen buffer # and set the cursor at the end of the table. if self._stdout_output_writer: # We need to explicitly flush stdout to prevent partial status updates. sys.stdout.flush()
python
{ "resource": "" }
q25964
StatusView._PrintEventsStatus
train
def _PrintEventsStatus(self, events_status): """Prints the status of the events. Args: events_status (EventsStatus): events status. """ if events_status: table_view = views.CLITabularTableView( column_names=['Events:', 'Filtered', 'In time slice', 'Duplicates', 'MACB grouped', 'Total'], column_sizes=[15, 15, 15, 15, 15, 0]) table_view.AddRow([ '', events_status.number_of_filtered_events, events_status.number_of_events_from_time_slice, events_status.number_of_duplicate_events, events_status.number_of_macb_grouped_events, events_status.total_number_of_events]) self._output_writer.Write('\n') table_view.Write(self._output_writer)
python
{ "resource": "" }
q25965
StatusView._PrintProcessingTime
train
def _PrintProcessingTime(self, processing_status): """Prints the processing time. Args: processing_status (ProcessingStatus): processing status. """ if not processing_status: processing_time = '00:00:00' else: processing_time = time.time() - processing_status.start_time time_struct = time.gmtime(processing_time) processing_time = time.strftime('%H:%M:%S', time_struct) self._output_writer.Write( 'Processing time\t\t: {0:s}\n'.format(processing_time))
python
{ "resource": "" }
q25966
StatusView._PrintTasksStatus
train
def _PrintTasksStatus(self, processing_status): """Prints the status of the tasks. Args: processing_status (ProcessingStatus): processing status. """ if processing_status and processing_status.tasks_status: tasks_status = processing_status.tasks_status table_view = views.CLITabularTableView( column_names=['Tasks:', 'Queued', 'Processing', 'Merging', 'Abandoned', 'Total'], column_sizes=[15, 7, 15, 15, 15, 0]) table_view.AddRow([ '', tasks_status.number_of_queued_tasks, tasks_status.number_of_tasks_processing, tasks_status.number_of_tasks_pending_merge, tasks_status.number_of_abandoned_tasks, tasks_status.total_number_of_tasks]) self._output_writer.Write('\n') table_view.Write(self._output_writer)
python
{ "resource": "" }
q25967
StatusView.GetAnalysisStatusUpdateCallback
train
def GetAnalysisStatusUpdateCallback(self): """Retrieves the analysis status update callback function. Returns: function: status update callback function or None if not available. """ if self._mode == self.MODE_LINEAR: return self._PrintAnalysisStatusUpdateLinear if self._mode == self.MODE_WINDOW: return self._PrintAnalysisStatusUpdateWindow return None
python
{ "resource": "" }
q25968
StatusView.GetExtractionStatusUpdateCallback
train
def GetExtractionStatusUpdateCallback(self): """Retrieves the extraction status update callback function. Returns: function: status update callback function or None if not available. """ if self._mode == self.MODE_LINEAR: return self._PrintExtractionStatusUpdateLinear if self._mode == self.MODE_WINDOW: return self._PrintExtractionStatusUpdateWindow return None
python
{ "resource": "" }
q25969
StatusView.PrintExtractionStatusHeader
train
def PrintExtractionStatusHeader(self, processing_status): """Prints the extraction status header. Args: processing_status (ProcessingStatus): processing status. """ self._output_writer.Write( 'Source path\t\t: {0:s}\n'.format(self._source_path)) self._output_writer.Write( 'Source type\t\t: {0:s}\n'.format(self._source_type)) if self._artifact_filters: artifacts_string = ', '.join(self._artifact_filters) self._output_writer.Write('Artifact filters\t: {0:s}\n'.format( artifacts_string)) if self._filter_file: self._output_writer.Write('Filter file\t\t: {0:s}\n'.format( self._filter_file)) self._PrintProcessingTime(processing_status) self._PrintTasksStatus(processing_status) self._output_writer.Write('\n')
python
{ "resource": "" }
q25970
StatusView.PrintExtractionSummary
train
def PrintExtractionSummary(self, processing_status): """Prints a summary of the extraction. Args: processing_status (ProcessingStatus): processing status. """ if not processing_status: self._output_writer.Write( 'WARNING: missing processing status information.\n') elif not processing_status.aborted: if processing_status.error_path_specs: self._output_writer.Write('Processing completed with errors.\n') else: self._output_writer.Write('Processing completed.\n') number_of_warnings = ( processing_status.foreman_status.number_of_produced_warnings) if number_of_warnings: output_text = '\n'.join([ '', ('Number of warnings generated while extracting events: ' '{0:d}.').format(number_of_warnings), '', 'Use pinfo to inspect warnings in more detail.', '']) self._output_writer.Write(output_text) if processing_status.error_path_specs: output_text = '\n'.join([ '', 'Path specifications that could not be processed:', '']) self._output_writer.Write(output_text) for path_spec in processing_status.error_path_specs: self._output_writer.Write(path_spec.comparable) self._output_writer.Write('\n') self._output_writer.Write('\n')
python
{ "resource": "" }
q25971
StatusView.SetSourceInformation
train
def SetSourceInformation( self, source_path, source_type, artifact_filters=None, filter_file=None): """Sets the source information. Args: source_path (str): path of the source. source_type (str): source type. artifact_filters (Optional[list[str]]): names of artifact definitions to use as filters. filter_file (Optional[str]): filter file. """ self._artifact_filters = artifact_filters self._filter_file = filter_file self._source_path = source_path self._source_type = self._SOURCE_TYPES.get(source_type, 'UNKNOWN')
python
{ "resource": "" }
q25972
EventTagIndex._Build
train
def _Build(self, storage_file): """Builds the event tag index. Args: storage_file (BaseStorageFile): storage file. """ self._index = {} for event_tag in storage_file.GetEventTags(): self.SetEventTag(event_tag)
python
{ "resource": "" }
q25973
EventTagIndex.GetEventTagByIdentifier
train
def GetEventTagByIdentifier(self, storage_file, event_identifier): """Retrieves the most recently updated event tag for an event. Args: storage_file (BaseStorageFile): storage file. event_identifier (AttributeContainerIdentifier): event attribute container identifier. Returns: EventTag: event tag or None if the event has no event tag. """ if not self._index: self._Build(storage_file) lookup_key = event_identifier.CopyToString() event_tag_identifier = self._index.get(lookup_key, None) if not event_tag_identifier: return None return storage_file.GetEventTagByIdentifier(event_tag_identifier)
python
{ "resource": "" }
q25974
EventTagIndex.SetEventTag
train
def SetEventTag(self, event_tag): """Sets an event tag in the index. Args: event_tag (EventTag): event tag. """ event_identifier = event_tag.GetEventIdentifier() lookup_key = event_identifier.CopyToString() self._index[lookup_key] = event_tag.GetIdentifier()
python
{ "resource": "" }
q25975
PathHelper._ExpandUsersHomeDirectoryPathSegments
train
def _ExpandUsersHomeDirectoryPathSegments( cls, path_segments, path_separator, user_accounts): """Expands a path to contain all users home or profile directories. Expands the artifacts path variable "%%users.homedir%%" or "%%users.userprofile%%". Args: path_segments (list[str]): path segments. path_separator (str): path segment separator. user_accounts (list[UserAccountArtifact]): user accounts. Returns: list[str]: paths returned for user accounts without a drive indicator. """ if not path_segments: return [] user_paths = [] first_path_segment = path_segments[0].lower() if first_path_segment not in ('%%users.homedir%%', '%%users.userprofile%%'): if cls._IsWindowsDrivePathSegment(path_segments[0]): path_segments[0] = '' user_path = path_separator.join(path_segments) user_paths.append(user_path) else: for user_account in user_accounts: user_path_segments = user_account.GetUserDirectoryPathSegments() if not user_path_segments: continue if cls._IsWindowsDrivePathSegment(user_path_segments[0]): user_path_segments[0] = '' # Prevent concatenating two consecutive path segment separators. if not user_path_segments[-1]: user_path_segments.pop() user_path_segments.extend(path_segments[1:]) user_path = path_separator.join(user_path_segments) user_paths.append(user_path) return user_paths
python
{ "resource": "" }
q25976
PathHelper._IsWindowsDrivePathSegment
train
def _IsWindowsDrivePathSegment(cls, path_segment): """Determines if the path segment contains a Windows Drive indicator. A drive indicator can be a drive letter or %SystemDrive%. Args: path_segment (str): path segment. Returns: bool: True if the path segment contains a Windows Drive indicator. """ if (len(path_segment) == 2 and path_segment[1] == ':' and path_segment[0].isalpha()): return True path_segment = path_segment.upper() return path_segment in ('%%ENVIRON_SYSTEMDRIVE%%', '%SYSTEMDRIVE%')
python
{ "resource": "" }
q25977
PathHelper.AppendPathEntries
train
def AppendPathEntries( cls, path, path_separator, number_of_wildcards, skip_first): """Appends glob wildcards to a path. This function will append glob wildcards "*" to a path, returning paths with an additional glob wildcard up to the specified number. E.g. given the path "/tmp" and a number of 2 wildcards, this function will return "tmp/*", "tmp/*/*". When skip_first is true the path with the first wildcard is not returned as a result. Args: path (str): path to append glob wildcards to. path_separator (str): path segment separator. number_of_wildcards (int): number of glob wildcards to append. skip_first (bool): True if the the first path with glob wildcard should be skipped as a result. Returns: list[str]: paths with glob wildcards. """ if path[-1] == path_separator: path = path[:-1] if skip_first: path = ''.join([path, path_separator, '*']) number_of_wildcards -= 1 paths = [] for _ in range(0, number_of_wildcards): path = ''.join([path, path_separator, '*']) paths.append(path) return paths
python
{ "resource": "" }
q25978
PathHelper.ExpandRecursiveGlobs
train
def ExpandRecursiveGlobs(cls, path, path_separator): """Expands recursive like globs present in an artifact path. If a path ends in '**', with up to two optional digits such as '**10', the '**' will recursively match all files and zero or more directories from the specified path. The optional digits indicate the recursion depth. By default recursion depth is 10 directories. If the glob is followed by the specified path segment separator, only directories and subdirectories will be matched. Args: path (str): path to be expanded. path_separator (str): path segment separator. Returns: list[str]: String path expanded for each glob. """ glob_regex = r'(.*)?{0:s}\*\*(\d{{1,2}})?({0:s})?$'.format( re.escape(path_separator)) match = re.search(glob_regex, path) if not match: return [path] skip_first = False if match.group(3): skip_first = True if match.group(2): iterations = int(match.group(2)) else: iterations = cls._RECURSIVE_GLOB_LIMIT logger.warning(( 'Path "{0:s}" contains fully recursive glob, limiting to 10 ' 'levels').format(path)) return cls.AppendPathEntries( match.group(1), path_separator, iterations, skip_first)
python
{ "resource": "" }
q25979
PathHelper.ExpandWindowsPath
train
def ExpandWindowsPath(cls, path, environment_variables): """Expands a Windows path containing environment variables. Args: path (str): Windows path with environment variables. environment_variables (list[EnvironmentVariableArtifact]): environment variables. Returns: str: expanded Windows path. """ if environment_variables is None: environment_variables = [] lookup_table = {} if environment_variables: for environment_variable in environment_variables: attribute_name = environment_variable.name.upper() attribute_value = environment_variable.value if not isinstance(attribute_value, py2to3.STRING_TYPES): continue lookup_table[attribute_name] = attribute_value path_segments = path.split('\\') # Make a copy of path_segments since this loop can change it. for index, path_segment in enumerate(list(path_segments)): if (len(path_segment) <= 2 or not path_segment.startswith('%') or not path_segment.endswith('%')): continue path_segment_upper_case = path_segment.upper() if path_segment_upper_case.startswith('%%ENVIRON_'): lookup_key = path_segment_upper_case[10:-2] else: lookup_key = path_segment_upper_case[1:-1] path_segment = lookup_table.get(lookup_key, path_segment) path_segment = path_segment.split('\\') expanded_path_segments = list(path_segments[:index]) expanded_path_segments.extend(path_segment) expanded_path_segments.extend(path_segments[index + 1:]) path_segments = expanded_path_segments if cls._IsWindowsDrivePathSegment(path_segments[0]): path_segments[0] = '' return '\\'.join(path_segments)
python
{ "resource": "" }
q25980
PathHelper.GetDisplayNameForPathSpec
train
def GetDisplayNameForPathSpec( cls, path_spec, mount_path=None, text_prepend=None): """Retrieves the display name of a path specification. Args: path_spec (dfvfs.PathSpec): path specification. mount_path (Optional[str]): path where the file system that is used by the path specification is mounted, such as "/mnt/image". The mount path will be stripped from the absolute path defined by the path specification. text_prepend (Optional[str]): text to prepend. Returns: str: human readable version of the path specification or None. """ if not path_spec: return None relative_path = cls.GetRelativePathForPathSpec( path_spec, mount_path=mount_path) if not relative_path: return path_spec.type_indicator if text_prepend: relative_path = '{0:s}{1:s}'.format(text_prepend, relative_path) parent_path_spec = path_spec.parent if parent_path_spec and path_spec.type_indicator in ( dfvfs_definitions.TYPE_INDICATOR_BZIP2, dfvfs_definitions.TYPE_INDICATOR_GZIP): parent_path_spec = parent_path_spec.parent if parent_path_spec and parent_path_spec.type_indicator == ( dfvfs_definitions.TYPE_INDICATOR_VSHADOW): store_index = getattr(path_spec.parent, 'store_index', None) if store_index is not None: return 'VSS{0:d}:{1:s}:{2:s}'.format( store_index + 1, path_spec.type_indicator, relative_path) return '{0:s}:{1:s}'.format(path_spec.type_indicator, relative_path)
python
{ "resource": "" }
q25981
PathHelper.GetRelativePathForPathSpec
train
def GetRelativePathForPathSpec(cls, path_spec, mount_path=None): """Retrieves the relative path of a path specification. If a mount path is defined the path will be relative to the mount point, otherwise the path is relative to the root of the file system that is used by the path specification. Args: path_spec (dfvfs.PathSpec): path specification. mount_path (Optional[str]): path where the file system that is used by the path specification is mounted, such as "/mnt/image". The mount path will be stripped from the absolute path defined by the path specification. Returns: str: relative path or None. """ if not path_spec: return None # TODO: Solve this differently, quite possibly inside dfVFS using mount # path spec. location = getattr(path_spec, 'location', None) if not location and path_spec.HasParent(): location = getattr(path_spec.parent, 'location', None) if not location: return None data_stream = getattr(path_spec, 'data_stream', None) if data_stream: location = '{0:s}:{1:s}'.format(location, data_stream) if path_spec.type_indicator != dfvfs_definitions.TYPE_INDICATOR_OS: return location # If we are parsing a mount point we don't want to include the full # path to file's location here, we are only interested in the path # relative to the mount point. if mount_path and location.startswith(mount_path): location = location[len(mount_path):] return location
python
{ "resource": "" }
q25982
GetUnicodeString
train
def GetUnicodeString(value): """Attempts to convert the argument to a Unicode string. Args: value (list|int|bytes|str): value to convert. Returns: str: string representation of the argument. """ if isinstance(value, list): value = [GetUnicodeString(item) for item in value] return ''.join(value) if isinstance(value, py2to3.INTEGER_TYPES): value = '{0:d}'.format(value) if not isinstance(value, py2to3.UNICODE_TYPE): return codecs.decode(value, 'utf8', 'ignore') return value
python
{ "resource": "" }
q25983
GenericBinaryOperator.Operate
train
def Operate(self, values): """Takes a list of values and if at least one matches, returns True.""" for val in values: try: if self.Operation(val, self.right_operand): return True except (TypeError, ValueError): pass return False
python
{ "resource": "" }
q25984
ValueExpander.Expand
train
def Expand(self, obj, path): """Returns a list of all the values for the given path in the object obj. Given a path such as ["sub1", "sub2"] it returns all the values available in obj.sub1.sub2 as a list. sub1 and sub2 must be data attributes or properties. If sub1 returns a list of objects, or a generator, Expand aggregates the values for the remaining path for each of the objects, thus returning a list of all the values under the given path for the input object. Args: obj: An object that will be traversed for the given path path: A list of strings Yields: The values once the object is traversed. """ if isinstance(path, py2to3.STRING_TYPES): path = path.split(self.FIELD_SEPARATOR) attr_name = self._GetAttributeName(path) attr_value = self._GetValue(obj, attr_name) if attr_value is None: return if len(path) == 1: for value in self._AtLeaf(attr_value): yield value else: for value in self._AtNonLeaf(attr_value, path): yield value
python
{ "resource": "" }
q25985
ContextExpression.SetExpression
train
def SetExpression(self, expression): """Set the expression.""" if isinstance(expression, lexer.Expression): self.args = [expression] else: raise errors.ParseError( 'Expected expression, got {0:s}.'.format(expression))
python
{ "resource": "" }
q25986
ContextExpression.Compile
train
def Compile(self, filter_implementation): """Compile the expression.""" arguments = [self.attribute] for argument in self.args: arguments.append(argument.Compile(filter_implementation)) expander = filter_implementation.FILTERS['ValueExpander'] context_cls = filter_implementation.FILTERS['Context'] return context_cls(arguments=arguments, value_expander=expander)
python
{ "resource": "" }
q25987
Parser.FlipAllowed
train
def FlipAllowed(self): """Raise an error if the not keyword is used where it is not allowed.""" if not hasattr(self, 'flipped'): raise errors.ParseError('Not defined.') if not self.flipped: return if self.current_expression.operator: if not self.current_expression.operator.lower() in ( 'is', 'contains', 'inset', 'equals'): raise errors.ParseError( 'Keyword \'not\' does not work against operator: {0:s}'.format( self.current_expression.operator))
python
{ "resource": "" }
q25988
Parser.FlipLogic
train
def FlipLogic(self, **unused_kwargs): """Flip the boolean logic of the expression. If an expression is configured to return True when the condition is met this logic will flip that to False, and vice versa. """ if hasattr(self, 'flipped') and self.flipped: raise errors.ParseError( 'The operator \'not\' can only be expressed once.') if self.current_expression.args: raise errors.ParseError( 'Unable to place the keyword \'not\' after an argument.') self.flipped = True # Check if this flip operation should be allowed. self.FlipAllowed() if hasattr(self.current_expression, 'FlipBool'): self.current_expression.FlipBool() logging.debug('Negative matching [flipping boolean logic].') else: logging.warning( 'Unable to perform a negative match, issuing a positive one.')
python
{ "resource": "" }
q25989
Parser.InsertIntArg
train
def InsertIntArg(self, string='', **unused_kwargs): """Inserts an Integer argument.""" try: int_value = int(string) except (TypeError, ValueError): raise errors.ParseError('{0:s} is not a valid integer.'.format(string)) return self.InsertArg(int_value)
python
{ "resource": "" }
q25990
Parser.Reduce
train
def Reduce(self): """Reduce the token stack into an AST.""" # Check for sanity if self.state != 'INITIAL' and self.state != 'BINARY': self.Error('Premature end of expression') length = len(self.stack) while length > 1: # Precedence order self._CombineParenthesis() self._CombineBinaryExpressions('and') self._CombineBinaryExpressions('or') self._CombineContext() # No change if len(self.stack) == length: break length = len(self.stack) if length != 1: self.Error('Illegal query expression') return self.stack[0]
python
{ "resource": "" }
q25991
SpotlightPlugin.GetEntries
train
def GetEntries(self, parser_mediator, match=None, **unused_kwargs): """Extracts relevant Spotlight entries. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS. """ shortcuts = match.get('UserShortcuts', {}) for search_text, data in iter(shortcuts.items()): datetime_value = data.get('LAST_USED', None) if not datetime_value: continue display_name = data.get('DISPLAY_NAME', '<DISPLAY_NAME>') path = data.get('PATH', '<PATH>') event_data = plist_event.PlistTimeEventData() event_data.desc = ( 'Spotlight term searched "{0:s}" associate to {1:s} ({2:s})').format( search_text, display_name, path) event_data.key = search_text event_data.root = '/UserShortcuts' event = time_events.PythonDatetimeEvent( datetime_value, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
python
{ "resource": "" }
q25992
SQLitePlugin._GetRowValue
train
def _GetRowValue(self, query_hash, row, value_name): """Retrieves a value from the row. Args: query_hash (int): hash of the query, that uniquely identifies the query that produced the row. row (sqlite3.Row): row. value_name (str): name of the value. Returns: object: value. """ keys_name_to_index_map = self._keys_per_query.get(query_hash, None) if not keys_name_to_index_map: keys_name_to_index_map = { name: index for index, name in enumerate(row.keys())} self._keys_per_query[query_hash] = keys_name_to_index_map value_index = keys_name_to_index_map.get(value_name) # Note that pysqlite does not accept a Unicode string in row['string'] and # will raise "IndexError: Index must be int or string". return row[value_index]
python
{ "resource": "" }
q25993
SQLitePlugin._HashRow
train
def _HashRow(cls, row): """Hashes the given row. Args: row (sqlite3.Row): row. Returns: int: hash value of the given row. """ values = [] for value in row: try: value = '{0!s}'.format(value) except UnicodeDecodeError: # In Python 2, blobs are "read-write buffer" and will cause a # UnicodeDecodeError exception if we try format it as a string. # Since Python 3 does not support the buffer type we cannot check # the type of value. value = repr(value) values.append(value) return hash(' '.join(values))
python
{ "resource": "" }
q25994
SQLitePlugin._ParseQuery
train
def _ParseQuery(self, parser_mediator, database, query, callback, cache): """Queries a database and parses the results. Args: parser_mediator (ParserMediator): parser mediator. database (SQLiteDatabase): database. query (str): query. callback (function): function to invoke to parse an individual row. cache (SQLiteCache): cache. """ row_cache = cache.GetRowCache(query) try: rows = database.Query(query) except sqlite3.DatabaseError as exception: parser_mediator.ProduceExtractionWarning( 'unable to run query: {0:s} on database with error: {1!s}'.format( query, exception)) return for index, row in enumerate(rows): if parser_mediator.abort: break row_hash = self._HashRow(row) if row_hash in row_cache: continue try: callback(parser_mediator, query, row, cache=cache, database=database) except Exception as exception: # pylint: disable=broad-except parser_mediator.ProduceExtractionWarning(( 'unable to parse row: {0:d} with callback: {1:s} on database ' 'with error: {2!s}').format( index, callback.__name__, exception)) # TODO: consider removing return. return row_cache.add(row_hash)
python
{ "resource": "" }
q25995
SQLitePlugin.CheckSchema
train
def CheckSchema(self, database): """Checks the schema of a database with that defined in the plugin. Args: database (SQLiteDatabase): database. Returns: bool: True if the schema of the database matches that defined by the plugin, or False if the schemas do not match or no schema is defined by the plugin. """ schema_match = False if self.SCHEMAS: for schema in self.SCHEMAS: if database and database.schema == schema: schema_match = True return schema_match
python
{ "resource": "" }
q25996
SQLitePlugin.Process
train
def Process( self, parser_mediator, cache=None, database=None, **unused_kwargs): """Determine if this is the right plugin for this database. This function takes a SQLiteDatabase object and compares the list of required tables against the available tables in the database. If all the tables defined in REQUIRED_TABLES are present in the database then this plugin is considered to be the correct plugin and the function will return back a generator that yields event objects. Args: parser_mediator (ParserMediator): parser mediator. cache (Optional[SQLiteCache]): cache. database (Optional[SQLiteDatabase]): database. Raises: ValueError: If the database or cache value are missing. """ if cache is None: raise ValueError('Missing cache value.') if database is None: raise ValueError('Missing database value.') # This will raise if unhandled keyword arguments are passed. super(SQLitePlugin, self).Process(parser_mediator) for query, callback_method in self.QUERIES: if parser_mediator.abort: break callback = getattr(self, callback_method, None) if callback is None: logger.warning( '[{0:s}] missing callback method: {1:s} for query: {2:s}'.format( self.NAME, callback_method, query)) continue self._ParseQuery(parser_mediator, database, query, callback, cache)
python
{ "resource": "" }
q25997
SQLiteStorageFile._AddAttributeContainer
train
def _AddAttributeContainer(self, container_type, attribute_container): """Adds an attribute container. Args: container_type (str): attribute container type. attribute_container (AttributeContainer): attribute container. Raises: IOError: if the attribute container cannot be serialized. OSError: if the attribute container cannot be serialized. """ container_list = self._GetSerializedAttributeContainerList(container_type) identifier = identifiers.SQLTableIdentifier( container_type, container_list.next_sequence_number + 1) attribute_container.SetIdentifier(identifier) serialized_data = self._SerializeAttributeContainer(attribute_container) container_list.PushAttributeContainer(serialized_data) if container_list.data_size > self._maximum_buffer_size: self._WriteSerializedAttributeContainerList(container_type)
python
{ "resource": "" }
q25998
SQLiteStorageFile._AddSerializedEvent
train
def _AddSerializedEvent(self, event): """Adds an serialized event. Args: event (EventObject): event. Raises: IOError: if the event cannot be serialized. OSError: if the event cannot be serialized. """ identifier = identifiers.SQLTableIdentifier( self._CONTAINER_TYPE_EVENT, self._serialized_event_heap.number_of_events + 1) event.SetIdentifier(identifier) serialized_data = self._SerializeAttributeContainer(event) self._serialized_event_heap.PushEvent(event.timestamp, serialized_data) if self._serialized_event_heap.data_size > self._maximum_buffer_size: self._WriteSerializedAttributeContainerList(self._CONTAINER_TYPE_EVENT)
python
{ "resource": "" }
q25999
SQLiteStorageFile._CheckStorageMetadata
train
def _CheckStorageMetadata(cls, metadata_values, check_readable_only=False): """Checks the storage metadata. Args: metadata_values (dict[str, str]): metadata values per key. check_readable_only (Optional[bool]): whether the store should only be checked to see if it can be read. If False, the store will be checked to see if it can be read and written to. Raises: IOError: if the format version or the serializer format is not supported. OSError: if the format version or the serializer format is not supported. """ format_version = metadata_values.get('format_version', None) if not format_version: raise IOError('Missing format version.') try: format_version = int(format_version, 10) except (TypeError, ValueError): raise IOError('Invalid format version: {0!s}.'.format(format_version)) if not check_readable_only and format_version != cls._FORMAT_VERSION: raise IOError('Format version: {0:d} is not supported.'.format( format_version)) if format_version < cls._COMPATIBLE_FORMAT_VERSION: raise IOError( 'Format version: {0:d} is too old and no longer supported.'.format( format_version)) if format_version > cls._FORMAT_VERSION: raise IOError( 'Format version: {0:d} is too new and not yet supported.'.format( format_version)) metadata_values['format_version'] = format_version compression_format = metadata_values.get('compression_format', None) if compression_format not in definitions.COMPRESSION_FORMATS: raise IOError('Unsupported compression format: {0:s}'.format( compression_format)) serialization_format = metadata_values.get('serialization_format', None) if serialization_format != definitions.SERIALIZER_FORMAT_JSON: raise IOError('Unsupported serialization format: {0:s}'.format( serialization_format)) storage_type = metadata_values.get('storage_type', None) if storage_type not in definitions.STORAGE_TYPES: raise IOError('Unsupported storage type: {0:s}'.format( storage_type))
python
{ "resource": "" }