_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q25200
Event.invoke_webhook_handlers
train
def invoke_webhook_handlers(self): """ Invokes any webhook handlers that have been registered for this event based on event type or event sub-type. See event handlers registered in the ``djstripe.event_handlers`` module (or handlers registered in djstripe plugins or contrib packages). """ webhooks.call_handlers(event=self) signal = WEBHOOK_SIGNALS.get(self.type) if signal: return signal.send(sender=Event, event=self)
python
{ "resource": "" }
q25201
sync_subscriber
train
def sync_subscriber(subscriber): """Sync a Customer with Stripe api data.""" customer, _created = Customer.get_or_create(subscriber=subscriber) try: customer.sync_from_stripe_data(customer.api_retrieve()) customer._sync_subscriptions() customer._sync_invoices() customer._sync_cards() customer._sync_charges() except InvalidRequestError as e: print("ERROR: " + str(e)) return customer
python
{ "resource": "" }
q25202
get_callback_function
train
def get_callback_function(setting_name, default=None): """ Resolve a callback function based on a setting name. If the setting value isn't set, default is returned. If the setting value is already a callable function, that value is used - If the setting value is a string, an attempt is made to import it. Anything else will result in a failed import causing ImportError to be raised. :param setting_name: The name of the setting to resolve a callback from. :type setting_name: string (``str``/``unicode``) :param default: The default to return if setting isn't populated. :type default: ``bool`` :returns: The resolved callback function (if any). :type: ``callable`` """ func = getattr(settings, setting_name, None) if not func: return default if callable(func): return func if isinstance(func, str): func = import_string(func) if not callable(func): raise ImproperlyConfigured("{name} must be callable.".format(name=setting_name)) return func
python
{ "resource": "" }
q25203
get_subscriber_model
train
def get_subscriber_model(): """ Attempt to pull settings.DJSTRIPE_SUBSCRIBER_MODEL. Users have the option of specifying a custom subscriber model via the DJSTRIPE_SUBSCRIBER_MODEL setting. This methods falls back to AUTH_USER_MODEL if DJSTRIPE_SUBSCRIBER_MODEL is not set. Returns the subscriber model that is active in this project. """ model_name = get_subscriber_model_string() # Attempt a Django 1.7 app lookup try: subscriber_model = django_apps.get_model(model_name) except ValueError: raise ImproperlyConfigured( "DJSTRIPE_SUBSCRIBER_MODEL must be of the form 'app_label.model_name'." ) except LookupError: raise ImproperlyConfigured( "DJSTRIPE_SUBSCRIBER_MODEL refers to model '{model}' " "that has not been installed.".format(model=model_name) ) if ( "email" not in [field_.name for field_ in subscriber_model._meta.get_fields()] ) and not hasattr(subscriber_model, "email"): raise ImproperlyConfigured("DJSTRIPE_SUBSCRIBER_MODEL must have an email attribute.") if model_name != settings.AUTH_USER_MODEL: # Custom user model detected. Make sure the callback is configured. func = get_callback_function("DJSTRIPE_SUBSCRIBER_MODEL_REQUEST_CALLBACK") if not func: raise ImproperlyConfigured( "DJSTRIPE_SUBSCRIBER_MODEL_REQUEST_CALLBACK must be implemented " "if a DJSTRIPE_SUBSCRIBER_MODEL is defined." ) return subscriber_model
python
{ "resource": "" }
q25204
set_stripe_api_version
train
def set_stripe_api_version(version=None, validate=True): """ Set the desired API version to use for Stripe requests. :param version: The version to set for the Stripe API. :type version: ``str`` :param validate: If True validate the value for the specified version). :type validate: ``bool`` """ version = version or get_stripe_api_version() if validate: valid = validate_stripe_api_version(version) if not valid: raise ValueError("Bad stripe API version: {}".format(version)) stripe.api_version = version
python
{ "resource": "" }
q25205
Command.handle
train
def handle(self, *args, **options): """Call sync_subscriber on Subscribers without customers associated to them.""" qs = get_subscriber_model().objects.filter(djstripe_customers__isnull=True) count = 0 total = qs.count() for subscriber in qs: count += 1 perc = int(round(100 * (float(count) / float(total)))) print( "[{0}/{1} {2}%] Syncing {3} [{4}]".format( count, total, perc, subscriber.email, subscriber.pk ) ) sync_subscriber(subscriber)
python
{ "resource": "" }
q25206
AnalysisProcess._ProcessEvent
train
def _ProcessEvent(self, mediator, event): """Processes an event. Args: mediator (AnalysisMediator): mediates interactions between analysis plugins and other components, such as storage and dfvfs. event (EventObject): event. """ try: self._analysis_plugin.ExamineEvent(mediator, event) except Exception as exception: # pylint: disable=broad-except self.SignalAbort() # TODO: write analysis error. if self._debug_output: logger.warning('Unhandled exception while processing event object.') logger.exception(exception)
python
{ "resource": "" }
q25207
GoogleDriveSyncLogParser._GetISO8601String
train
def _GetISO8601String(self, structure): """Retrieves an ISO 8601 date time string from the structure. The date and time values in Google Drive Sync log files are formatted as: "2018-01-24 18:25:08,454 -0800". Args: structure (pyparsing.ParseResults): structure of tokens derived from a line of a text file. Returns: str: ISO 8601 date time string. Raises: ValueError: if the structure cannot be converted into a date time string. """ time_zone_offset = structure.time_zone_offset try: time_zone_offset_hours = int(time_zone_offset[1:3], 10) time_zone_offset_minutes = int(time_zone_offset[3:5], 10) except (IndexError, TypeError, ValueError) as exception: raise ValueError( 'unable to parse time zone offset with error: {0!s}.'.format( exception)) try: iso8601 = ( '{0:04d}-{1:02d}-{2:02d}T{3:02d}:{4:02d}:{5:02d}.{6:03d}' '{7:s}{8:02d}:{9:02d}').format( structure.year, structure.month, structure.day, structure.hours, structure.minutes, structure.seconds, structure.microseconds, time_zone_offset[0], time_zone_offset_hours, time_zone_offset_minutes) except ValueError as exception: raise ValueError( 'unable to format date time string with error: {0!s}.'.format( exception)) return iso8601
python
{ "resource": "" }
q25208
GoogleDriveSyncLogParser._ParseRecordLogline
train
def _ParseRecordLogline(self, parser_mediator, structure): """Parses a logline record structure and produces events. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. structure (pyparsing.ParseResults): structure of tokens derived from a line of a text file. """ date_time = dfdatetime_time_elements.TimeElementsInMilliseconds() try: datetime_iso8601 = self._GetISO8601String(structure.date_time) date_time.CopyFromStringISO8601(datetime_iso8601) except ValueError: parser_mediator.ProduceExtractionWarning( 'invalid date time value: {0!s}'.format(structure.date_time)) return event_data = GoogleDriveSyncLogEventData() event_data.log_level = structure.log_level event_data.pid = structure.pid event_data.thread = structure.thread event_data.source_code = structure.source_code # Replace newlines with spaces in structure.message to preserve output. event_data.message = structure.message.replace('\n', ' ') event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_ADDED) parser_mediator.ProduceEventWithEventData(event, event_data)
python
{ "resource": "" }
q25209
GoogleDriveSyncLogParser.VerifyStructure
train
def VerifyStructure(self, parser_mediator, lines): """Verify that this file is a Google Drive Sync log file. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. lines (str): one or more lines from the text file. Returns: bool: True if this is the correct parser, False otherwise. """ try: structure = self._GDS_LINE.parseString(lines) except pyparsing.ParseException as exception: logger.debug('Not a Google Drive Sync log file: {0!s}'.format(exception)) return False date_time = dfdatetime_time_elements.TimeElementsInMilliseconds() try: datetime_iso8601 = self._GetISO8601String(structure.date_time) date_time.CopyFromStringISO8601(datetime_iso8601) except ValueError as exception: logger.debug(( 'Not a Google Drive Sync log file, invalid date/time: {0!s} ' 'with error: {1!s}').format(structure.date_time, exception)) return False return True
python
{ "resource": "" }
q25210
BaseGoogleChromeHistoryPlugin._GetVisitSource
train
def _GetVisitSource(self, visit_identifier, cache, database): """Retrieves a visit source type based on the identifier. Args: visit_identifier (str): identifier from the visits table for the particular record. cache (SQLiteCache): cache which contains cached results from querying the visit_source table. database (SQLiteDatabase): database. Returns: int: visit source type or None if no visit source type was found for the identifier. """ sync_cache_results = cache.GetResults('sync') if not sync_cache_results: result_set = database.Query(self._SYNC_CACHE_QUERY) cache.CacheQueryResults(result_set, 'sync', 'id', ('source',)) sync_cache_results = cache.GetResults('sync') if sync_cache_results and visit_identifier: results = sync_cache_results.get(visit_identifier, None) if results: return results[0] return None
python
{ "resource": "" }
q25211
BaseGoogleChromeHistoryPlugin.ParseLastVisitedRow
train
def ParseLastVisitedRow( self, parser_mediator, query, row, cache=None, database=None, **unused_kwargs): """Parses a last visited row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row. cache (SQLiteCache): cache which contains cached results from querying the visits and urls tables. database (Optional[SQLiteDatabase]): database. """ query_hash = hash(query) hidden = self._GetRowValue(query_hash, row, 'hidden') transition = self._GetRowValue(query_hash, row, 'transition') visit_identifier = self._GetRowValue(query_hash, row, 'visit_id') from_visit = self._GetRowValue(query_hash, row, 'from_visit') event_data = ChromeHistoryPageVisitedEventData() event_data.from_visit = self._GetUrl(from_visit, cache, database) event_data.offset = self._GetRowValue(query_hash, row, 'id') event_data.query = query event_data.page_transition_type = ( transition & self._PAGE_TRANSITION_CORE_MASK) event_data.title = self._GetRowValue(query_hash, row, 'title') event_data.typed_count = self._GetRowValue(query_hash, row, 'typed_count') event_data.url = self._GetRowValue(query_hash, row, 'url') event_data.url_hidden = hidden == '1' event_data.visit_source = self._GetVisitSource( visit_identifier, cache, database) timestamp = self._GetRowValue(query_hash, row, 'visit_time') date_time = dfdatetime_webkit_time.WebKitTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_LAST_VISITED) parser_mediator.ProduceEventWithEventData(event, event_data)
python
{ "resource": "" }
q25212
GoogleChrome27HistoryPlugin.ParseFileDownloadedRow
train
def ParseFileDownloadedRow( self, parser_mediator, query, row, **unused_kwargs): """Parses a file downloaded row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row. """ query_hash = hash(query) event_data = ChromeHistoryFileDownloadedEventData() event_data.full_path = self._GetRowValue(query_hash, row, 'target_path') event_data.offset = self._GetRowValue(query_hash, row, 'id') event_data.query = query event_data.received_bytes = self._GetRowValue( query_hash, row, 'received_bytes') event_data.total_bytes = self._GetRowValue(query_hash, row, 'total_bytes') event_data.url = self._GetRowValue(query_hash, row, 'url') timestamp = self._GetRowValue(query_hash, row, 'start_time') date_time = dfdatetime_webkit_time.WebKitTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_FILE_DOWNLOADED) parser_mediator.ProduceEventWithEventData(event, event_data)
python
{ "resource": "" }
q25213
CLITool._EnforceProcessMemoryLimit
train
def _EnforceProcessMemoryLimit(self, memory_limit): """Enforces a process memory limit. Args: memory_limit (int): maximum number of bytes the process is allowed to allocate, where 0 represents no limit and None a default of 4 GiB. """ # Resource is not supported on Windows. if resource: if memory_limit is None: memory_limit = 4 * 1024 * 1024 * 1024 elif memory_limit == 0: memory_limit = resource.RLIM_INFINITY resource.setrlimit(resource.RLIMIT_DATA, (memory_limit, memory_limit))
python
{ "resource": "" }
q25214
CLITool._ParseLogFileOptions
train
def _ParseLogFileOptions(self, options): """Parses the log file options. Args: options (argparse.Namespace): command line arguments. """ self._log_file = self.ParseStringOption(options, 'log_file') if not self._log_file: local_date_time = datetime.datetime.now() self._log_file = ( '{0:s}-{1:04d}{2:02d}{3:02d}T{4:02d}{5:02d}{6:02d}.log.gz').format( self.NAME, local_date_time.year, local_date_time.month, local_date_time.day, local_date_time.hour, local_date_time.minute, local_date_time.second)
python
{ "resource": "" }
q25215
CLITool._ParseTimezoneOption
train
def _ParseTimezoneOption(self, options): """Parses the timezone options. Args: options (argparse.Namespace): command line arguments. Raises: BadConfigOption: if the options are invalid. """ time_zone_string = self.ParseStringOption(options, 'timezone') if isinstance(time_zone_string, py2to3.STRING_TYPES): if time_zone_string.lower() == 'list': self.list_timezones = True elif time_zone_string: try: pytz.timezone(time_zone_string) except pytz.UnknownTimeZoneError: raise errors.BadConfigOption( 'Unknown time zone: {0:s}'.format(time_zone_string)) self._preferred_time_zone = time_zone_string
python
{ "resource": "" }
q25216
CLITool._PromptUserForInput
train
def _PromptUserForInput(self, input_text): """Prompts user for an input. Args: input_text (str): text used for prompting the user for input. Returns: str: input read from the user. """ self._output_writer.Write('{0:s}: '.format(input_text)) return self._input_reader.Read()
python
{ "resource": "" }
q25217
CLITool.AddBasicOptions
train
def AddBasicOptions(self, argument_group): """Adds the basic options to the argument group. Args: argument_group (argparse._ArgumentGroup): argparse argument group. """ version_string = self.GetVersionInformation() # We want a custom help message and not the default argparse one. argument_group.add_argument( '-h', '--help', action='help', help='Show this help message and exit.') argument_group.add_argument( '--troubles', dest='show_troubleshooting', action='store_true', default=False, help='Show troubleshooting information.') argument_group.add_argument( '-V', '--version', dest='version', action='version', version=version_string, help='Show the version information.')
python
{ "resource": "" }
q25218
CLITool.AddInformationalOptions
train
def AddInformationalOptions(self, argument_group): """Adds the informational options to the argument group. Args: argument_group (argparse._ArgumentGroup): argparse argument group. """ argument_group.add_argument( '-d', '--debug', dest='debug', action='store_true', default=False, help='Enable debug output.') argument_group.add_argument( '-q', '--quiet', dest='quiet', action='store_true', default=False, help='Disable informational output.')
python
{ "resource": "" }
q25219
CLITool.AddLogFileOptions
train
def AddLogFileOptions(self, argument_group): """Adds the log file option to the argument group. Args: argument_group (argparse._ArgumentGroup): argparse argument group. """ argument_group.add_argument( '--logfile', '--log_file', '--log-file', action='store', metavar='FILENAME', dest='log_file', type=str, default='', help=( 'Path of the file in which to store log messages, by default ' 'this file will be named: "{0:s}-YYYYMMDDThhmmss.log.gz". Note ' 'that the file will be gzip compressed if the extension is ' '".gz".').format(self.NAME))
python
{ "resource": "" }
q25220
CLITool.AddTimeZoneOption
train
def AddTimeZoneOption(self, argument_group): """Adds the time zone option to the argument group. Args: argument_group (argparse._ArgumentGroup): argparse argument group. """ # Note the default here is None so we can determine if the time zone # option was set. argument_group.add_argument( '-z', '--zone', '--timezone', dest='timezone', action='store', type=str, default=None, help=( 'explicitly define the timezone. Typically the timezone is ' 'determined automatically where possible otherwise it will ' 'default to UTC. Use "-z list" to see a list of available ' 'timezones.'))
python
{ "resource": "" }
q25221
CLITool.GetCommandLineArguments
train
def GetCommandLineArguments(self): """Retrieves the command line arguments. Returns: str: command line arguments. """ command_line_arguments = sys.argv if not command_line_arguments: return '' if isinstance(command_line_arguments[0], py2to3.BYTES_TYPE): encoding = sys.stdin.encoding # Note that sys.stdin.encoding can be None. if not encoding: encoding = self.preferred_encoding try: command_line_arguments = [ argument.decode(encoding) for argument in command_line_arguments] except UnicodeDecodeError: logger.error( 'Unable to properly read command line input due to encoding ' 'error. Replacing non Basic Latin (C0) characters with "?" or ' '"\\ufffd".') command_line_arguments = [ argument.decode(encoding, errors='replace') for argument in command_line_arguments] return ' '.join(command_line_arguments)
python
{ "resource": "" }
q25222
CLITool.ListTimeZones
train
def ListTimeZones(self): """Lists the timezones.""" max_length = 0 for timezone_name in pytz.all_timezones: if len(timezone_name) > max_length: max_length = len(timezone_name) utc_date_time = datetime.datetime.utcnow() table_view = views.ViewsFactory.GetTableView( self._views_format_type, column_names=['Timezone', 'UTC Offset'], title='Zones') for timezone_name in pytz.all_timezones: try: local_timezone = pytz.timezone(timezone_name) except AssertionError as exception: logger.error(( 'Unable to determine information about timezone: {0:s} with ' 'error: {1!s}').format(timezone_name, exception)) continue local_date_string = '{0!s}'.format( local_timezone.localize(utc_date_time)) if '+' in local_date_string: _, _, diff = local_date_string.rpartition('+') diff_string = '+{0:s}'.format(diff) else: _, _, diff = local_date_string.rpartition('-') diff_string = '-{0:s}'.format(diff) table_view.AddRow([timezone_name, diff_string]) table_view.Write(self._output_writer)
python
{ "resource": "" }
q25223
CLITool.ParseNumericOption
train
def ParseNumericOption(self, options, name, base=10, default_value=None): """Parses a numeric option. If the option is not set the default value is returned. Args: options (argparse.Namespace): command line arguments. name (str): name of the numeric option. base (Optional[int]): base of the numeric value. default_value (Optional[object]): default value. Returns: int: numeric value. Raises: BadConfigOption: if the options are invalid. """ numeric_value = getattr(options, name, None) if not numeric_value: return default_value try: return int(numeric_value, base) except (TypeError, ValueError): name = name.replace('_', ' ') raise errors.BadConfigOption( 'Unsupported numeric value {0:s}: {1!s}.'.format( name, numeric_value))
python
{ "resource": "" }
q25224
CLITool.PrintSeparatorLine
train
def PrintSeparatorLine(self): """Prints a separator line.""" self._output_writer.Write('-' * self._LINE_LENGTH) self._output_writer.Write('\n')
python
{ "resource": "" }
q25225
_ImportPythonModule
train
def _ImportPythonModule(module_name): """Imports a Python module. Args: module_name (str): name of the module. Returns: module: Python module or None if the module cannot be imported. """ try: module_object = list(map(__import__, [module_name]))[0] except ImportError: return None # If the module name contains dots get the upper most module object. if '.' in module_name: for submodule_name in module_name.split('.')[1:]: module_object = getattr(module_object, submodule_name, None) return module_object
python
{ "resource": "" }
q25226
ChromeCacheIndexFileParser._ParseIndexTable
train
def _ParseIndexTable(self, file_object): """Parses the index table. Args: file_object (dfvfs.FileIO): a file-like object to parse. Raises: ParseError: if the index table cannot be read. """ cache_address_map = self._GetDataTypeMap('uint32le') file_offset = file_object.get_offset() cache_address_data = file_object.read(4) while len(cache_address_data) == 4: try: value = self._ReadStructureFromByteStream( cache_address_data, file_offset, cache_address_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError(( 'Unable to map cache address at offset: 0x{0:08x} with error: ' '{1!s}').format(file_offset, exception)) if value: cache_address = CacheAddress(value) self.index_table.append(cache_address) file_offset += 4 cache_address_data = file_object.read(4)
python
{ "resource": "" }
q25227
ChromeCacheIndexFileParser.ParseFileObject
train
def ParseFileObject(self, parser_mediator, file_object): """Parses a file-like object. Args: parser_mediator (ParserMediator): a parser mediator. file_object (dfvfs.FileIO): a file-like object to parse. Raises: ParseError: when the file cannot be parsed. """ try: self._ParseFileHeader(file_object) except errors.ParseError as exception: raise errors.ParseError( 'Unable to parse index file header with error: {0!s}'.format( exception)) # Skip over the LRU data, which is 112 bytes in size. file_object.seek(112, os.SEEK_CUR) self._ParseIndexTable(file_object)
python
{ "resource": "" }
q25228
ChromeCacheDataBlockFileParser._ParseFileHeader
train
def _ParseFileHeader(self, file_object): """Parses the file header. Args: file_object (dfvfs.FileIO): a file-like object to parse. Raises: ParseError: if the file header cannot be read. """ file_header_map = self._GetDataTypeMap( 'chrome_cache_data_block_file_header') try: file_header, _ = self._ReadStructureFromFileObject( file_object, 0, file_header_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError( 'Unable to parse data block file header with error: {0!s}'.format( exception)) if file_header.signature != self._FILE_SIGNATURE: raise errors.ParseError('Unsupported data block file signature') format_version = '{0:d}.{1:d}'.format( file_header.major_version, file_header.minor_version) if format_version not in ('2.0', '2.1'): raise errors.ParseError( 'Unsupported data block file format version: {0:s}'.format( format_version)) if file_header.block_size not in (256, 1024, 4096): raise errors.ParseError( 'Unsupported data block file block size: {0:d}'.format( file_header.block_size))
python
{ "resource": "" }
q25229
ChromeCacheParser._ParseCacheEntries
train
def _ParseCacheEntries(self, parser_mediator, index_table, data_block_files): """Parses Chrome Cache file entries. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. index_table (list[CacheAddress]): the cache addresses which are stored in the index file. data_block_files (dict[str: file]): look up table for the data block file-like object handles. """ # Parse the cache entries in the data block files. for cache_address in index_table: cache_address_chain_length = 0 while cache_address.value != 0: if cache_address_chain_length >= 64: parser_mediator.ProduceExtractionWarning( 'Maximum allowed cache address chain length reached.') break data_block_file_object = data_block_files.get( cache_address.filename, None) if not data_block_file_object: message = 'Cache address: 0x{0:08x} missing data file.'.format( cache_address.value) parser_mediator.ProduceExtractionWarning(message) break try: cache_entry = self._data_block_file_parser.ParseCacheEntry( data_block_file_object, cache_address.block_offset) except (IOError, errors.ParseError) as exception: parser_mediator.ProduceExtractionWarning( 'Unable to parse cache entry with error: {0!s}'.format( exception)) break event_data = ChromeCacheEntryEventData() event_data.original_url = cache_entry.original_url date_time = dfdatetime_webkit_time.WebKitTime( timestamp=cache_entry.creation_time) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_LAST_VISITED) parser_mediator.ProduceEventWithEventData(event, event_data) cache_address = cache_entry.next cache_address_chain_length += 1
python
{ "resource": "" }
q25230
ChromeCacheParser._ParseIndexTable
train
def _ParseIndexTable( self, parser_mediator, file_system, file_entry, index_table): """Parses a Chrome Cache index table. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_system (dfvfs.FileSystem): file system. file_entry (dfvfs.FileEntry): file entry. index_table (list[CacheAddress]): the cache addresses which are stored in the index file. """ # Build a lookup table for the data block files. path_segments = file_system.SplitPath(file_entry.path_spec.location) data_block_files = {} for cache_address in index_table: if cache_address.filename not in data_block_files: # Remove the previous filename from the path segments list and # add one of the data block files. path_segments.pop() path_segments.append(cache_address.filename) # We need to pass only used arguments to the path specification # factory otherwise it will raise. kwargs = {} if file_entry.path_spec.parent: kwargs['parent'] = file_entry.path_spec.parent kwargs['location'] = file_system.JoinPath(path_segments) data_block_file_path_spec = path_spec_factory.Factory.NewPathSpec( file_entry.path_spec.TYPE_INDICATOR, **kwargs) try: data_block_file_entry = path_spec_resolver.Resolver.OpenFileEntry( data_block_file_path_spec) except RuntimeError as exception: message = ( 'Unable to open data block file: {0:s} with error: ' '{1!s}'.format(kwargs['location'], exception)) parser_mediator.ProduceExtractionWarning(message) data_block_file_entry = None if not data_block_file_entry: message = 'Missing data block file: {0:s}'.format( cache_address.filename) parser_mediator.ProduceExtractionWarning(message) data_block_file_object = None else: data_block_file_object = data_block_file_entry.GetFileObject() try: self._data_block_file_parser.ParseFileObject( parser_mediator, data_block_file_object) except (IOError, errors.ParseError) as exception: message = ( 'Unable to parse data block file: {0:s} with error: ' '{1!s}').format(cache_address.filename, exception) parser_mediator.ProduceExtractionWarning(message) data_block_file_object.close() data_block_file_object = None data_block_files[cache_address.filename] = data_block_file_object try: self._ParseCacheEntries( parser_mediator, index_table, data_block_files) finally: for data_block_file_object in iter(data_block_files.values()): if data_block_file_object: data_block_file_object.close()
python
{ "resource": "" }
q25231
ChromeCacheParser.ParseFileEntry
train
def ParseFileEntry(self, parser_mediator, file_entry): """Parses Chrome Cache files. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_entry (dfvfs.FileEntry): file entry. Raises: UnableToParseFile: when the file cannot be parsed. """ index_file_parser = ChromeCacheIndexFileParser() file_object = file_entry.GetFileObject() try: index_file_parser.ParseFileObject(parser_mediator, file_object) except (IOError, errors.ParseError) as exception: file_object.close() display_name = parser_mediator.GetDisplayName() raise errors.UnableToParseFile( '[{0:s}] unable to parse index file {1:s} with error: {2!s}'.format( self.NAME, display_name, exception)) # TODO: create event based on index file creation time. try: file_system = file_entry.GetFileSystem() self._ParseIndexTable( parser_mediator, file_system, file_entry, index_file_parser.index_table) finally: file_object.close()
python
{ "resource": "" }
q25232
XLSXOutputModule._FormatDateTime
train
def _FormatDateTime(self, event): """Formats the date to a datetime object without timezone information. Note: timezone information must be removed due to lack of support by xlsxwriter and Excel. Args: event (EventObject): event. Returns: datetime.datetime|str: date and time value or a string containing "ERROR" on OverflowError. """ try: datetime_object = datetime.datetime( 1970, 1, 1, 0, 0, 0, 0, tzinfo=pytz.UTC) datetime_object += datetime.timedelta(microseconds=event.timestamp) datetime_object.astimezone(self._output_mediator.timezone) return datetime_object.replace(tzinfo=None) except (OverflowError, ValueError) as exception: self._ReportEventError(event, ( 'unable to copy timestamp: {0!s} to a human readable date and time ' 'with error: {1!s}. Defaulting to: "ERROR"').format( event.timestamp, exception)) return 'ERROR'
python
{ "resource": "" }
q25233
XLSXOutputModule._RemoveIllegalXMLCharacters
train
def _RemoveIllegalXMLCharacters(self, xml_string): """Removes illegal characters for XML. If the input is not a string it will be returned unchanged. Args: xml_string (str): XML with possible illegal characters. Returns: str: XML where all illegal characters have been removed. """ if not isinstance(xml_string, py2to3.STRING_TYPES): return xml_string return self._ILLEGAL_XML_RE.sub('\ufffd', xml_string)
python
{ "resource": "" }
q25234
XLSXOutputModule.Open
train
def Open(self): """Creates a new workbook. Raises: IOError: if the specified output file already exists. OSError: if the specified output file already exists. ValueError: if the filename is not set. """ if not self._filename: raise ValueError('Missing filename.') if os.path.isfile(self._filename): raise IOError(( 'Unable to use an already existing file for output ' '[{0:s}]').format(self._filename)) options = { 'constant_memory': True, 'strings_to_urls': False, 'strings_to_formulas': False, 'default_date_format': self._timestamp_format} self._workbook = xlsxwriter.Workbook(self._filename, options) self._sheet = self._workbook.add_worksheet('Sheet') self._current_row = 0
python
{ "resource": "" }
q25235
XLSXOutputModule.WriteEventBody
train
def WriteEventBody(self, event): """Writes the body of an event object to the spreadsheet. Args: event (EventObject): event. """ for field_name in self._fields: if field_name == 'datetime': output_value = self._FormatDateTime(event) else: output_value = self._dynamic_fields_helper.GetFormattedField( event, field_name) output_value = self._RemoveIllegalXMLCharacters(output_value) # Auto adjust the column width based on the length of the output value. column_index = self._fields.index(field_name) self._column_widths.setdefault(column_index, 0) if field_name == 'datetime': column_width = min( self._MAX_COLUMN_WIDTH, len(self._timestamp_format) + 2) else: column_width = min(self._MAX_COLUMN_WIDTH, len(output_value) + 2) self._column_widths[column_index] = max( self._MIN_COLUMN_WIDTH, self._column_widths[column_index], column_width) self._sheet.set_column( column_index, column_index, self._column_widths[column_index]) if (field_name == 'datetime' and isinstance(output_value, datetime.datetime)): self._sheet.write_datetime( self._current_row, column_index, output_value) else: self._sheet.write(self._current_row, column_index, output_value) self._current_row += 1
python
{ "resource": "" }
q25236
XLSXOutputModule.WriteHeader
train
def WriteHeader(self): """Writes the header to the spreadsheet.""" self._column_widths = {} bold = self._workbook.add_format({'bold': True}) bold.set_align('center') for index, field_name in enumerate(self._fields): self._sheet.write(self._current_row, index, field_name, bold) self._column_widths[index] = len(field_name) + 2 self._current_row += 1 self._sheet.autofilter(0, len(self._fields) - 1, 0, 0) self._sheet.freeze_panes(1, 0)
python
{ "resource": "" }
q25237
ApacheAccessParser.VerifyStructure
train
def VerifyStructure(self, parser_mediator, line): """Verifies that this is an apache access log file. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. line (str): line from the text file. Returns: bool: True if this is the correct parser, False otherwise. """ return max([parser.matches(line) for _, parser in self.LINE_STRUCTURES])
python
{ "resource": "" }
q25238
SkypePlugin.ParseAccountInformation
train
def ParseAccountInformation( self, parser_mediator, query, row, **unused_kwargs): """Parses account information. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row with account information. """ query_hash = hash(query) display_name = self._GetRowValue(query_hash, row, 'given_displayname') fullname = self._GetRowValue(query_hash, row, 'fullname') # TODO: Move this to the formatter, and ensure username is rendered # properly when fullname and/or display_name is None. username = '{0!s} <{1!s}>'.format(fullname, display_name) event_data = SkypeAccountEventData() event_data.country = self._GetRowValue(query_hash, row, 'country') event_data.display_name = display_name event_data.email = self._GetRowValue(query_hash, row, 'emails') event_data.offset = self._GetRowValue(query_hash, row, 'id') event_data.query = query event_data.username = username timestamp = self._GetRowValue(query_hash, row, 'profile_timestamp') if timestamp: date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent(date_time, 'Profile Changed') parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = self._GetRowValue(query_hash, row, 'authreq_timestamp') if timestamp: date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, 'Authenticate Request') parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = self._GetRowValue(query_hash, row, 'lastonline_timestamp') if timestamp: date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent(date_time, 'Last Online') parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = self._GetRowValue(query_hash, row, 'mood_timestamp') if timestamp: date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent(date_time, 'Mood Event') parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = self._GetRowValue(query_hash, row, 'sent_authrequest_time') if timestamp: date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent(date_time, 'Auth Request Sent') parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = self._GetRowValue(query_hash, row, 'lastused_timestamp') if timestamp: date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent(date_time, 'Last Used') parser_mediator.ProduceEventWithEventData(event, event_data)
python
{ "resource": "" }
q25239
SkypePlugin.ParseChat
train
def ParseChat(self, parser_mediator, query, row, **unused_kwargs): """Parses a chat message. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row resulting from query. """ query_hash = hash(query) participants = self._GetRowValue(query_hash, row, 'participants') author = self._GetRowValue(query_hash, row, 'author') dialog_partner = self._GetRowValue(query_hash, row, 'dialog_partner') from_displayname = self._GetRowValue(query_hash, row, 'from_displayname') accounts = [] participants = participants.split(' ') for participant in participants: if participant != author: accounts.append(participant) to_account = ', '.join(accounts) if not to_account: to_account = dialog_partner or 'Unknown User' from_account = '{0:s} <{1:s}>'.format(from_displayname, author) event_data = SkypeChatEventData() event_data.from_account = from_account event_data.query = query event_data.text = self._GetRowValue(query_hash, row, 'body_xml') event_data.title = self._GetRowValue(query_hash, row, 'title') event_data.to_account = to_account timestamp = self._GetRowValue(query_hash, row, 'timestamp') if timestamp: date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent(date_time, 'Chat from Skype') parser_mediator.ProduceEventWithEventData(event, event_data)
python
{ "resource": "" }
q25240
SkypePlugin.ParseSMS
train
def ParseSMS(self, parser_mediator, query, row, **unused_kwargs): """Parses an SMS. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row resulting from query. """ query_hash = hash(query) phone_number = self._GetRowValue(query_hash, row, 'dstnum_sms') if phone_number: phone_number = phone_number.replace(' ', '') event_data = SkypeSMSEventData() event_data.number = phone_number event_data.query = query event_data.text = self._GetRowValue(query_hash, row, 'msg_sms') timestamp = self._GetRowValue(query_hash, row, 'time_sms') if timestamp: date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent(date_time, 'SMS from Skype') parser_mediator.ProduceEventWithEventData(event, event_data)
python
{ "resource": "" }
q25241
SkypePlugin.ParseCall
train
def ParseCall(self, parser_mediator, query, row, **unused_kwargs): """Parses a call. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row resulting from query. query (Optional[str]): query. """ query_hash = hash(query) guid = self._GetRowValue(query_hash, row, 'guid') is_incoming = self._GetRowValue(query_hash, row, 'is_incoming') videostatus = self._GetRowValue(query_hash, row, 'videostatus') try: aux = guid if aux: aux_list = aux.split('-') src_aux = aux_list[0] dst_aux = aux_list[1] else: src_aux = 'Unknown [no GUID]' dst_aux = 'Unknown [no GUID]' except IndexError: src_aux = 'Unknown [{0:s}]'.format(guid) dst_aux = 'Unknown [{0:s}]'.format(guid) if is_incoming == '0': user_start_call = True source = src_aux ip_address = self._GetRowValue(query_hash, row, 'ip_address') if ip_address: destination = '{0:s} <{1:s}>'.format(dst_aux, ip_address) else: destination = dst_aux else: user_start_call = False source = src_aux destination = dst_aux call_identifier = self._GetRowValue(query_hash, row, 'id') event_data = SkypeCallEventData() event_data.dst_call = destination event_data.offset = call_identifier event_data.query = query event_data.src_call = source event_data.user_start_call = user_start_call event_data.video_conference = videostatus == '3' timestamp = self._GetRowValue(query_hash, row, 'try_call') event_data.call_type = 'WAITING' date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent(date_time, 'Call from Skype') parser_mediator.ProduceEventWithEventData(event, event_data) try: timestamp = self._GetRowValue(query_hash, row, 'accept_call') timestamp = int(timestamp) except (ValueError, TypeError): timestamp = None if timestamp: event_data.call_type = 'ACCEPTED' date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent(date_time, 'Call from Skype') parser_mediator.ProduceEventWithEventData(event, event_data) try: call_duration = self._GetRowValue(query_hash, row, 'call_duration') call_duration = int(call_duration) except (ValueError, TypeError): parser_mediator.ProduceExtractionWarning( 'unable to determine when call: {0:s} was finished.'.format( call_identifier)) call_duration = None if call_duration: timestamp += call_duration event_data.call_type = 'FINISHED' date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent(date_time, 'Call from Skype') parser_mediator.ProduceEventWithEventData(event, event_data)
python
{ "resource": "" }
q25242
ProcessInfo.GetUsedMemory
train
def GetUsedMemory(self): """Retrieves the amount of memory used by the process. Returns: int: amount of memory in bytes used by the process or None if not available. """ try: memory_info = self._process.memory_info() except psutil.NoSuchProcess: return None # Psutil will return different memory information depending on what is # available in that platform. memory_data = getattr(memory_info, 'data', 0) memory_shared = getattr(memory_info, 'shared', 0) return memory_data + memory_shared
python
{ "resource": "" }
q25243
AirportPlugin.GetEntries
train
def GetEntries(self, parser_mediator, match=None, **unused_kwargs): """Extracts relevant Airport entries. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS. """ if 'RememberedNetworks' not in match: return for wifi in match['RememberedNetworks']: ssid = wifi.get('SSIDString', 'UNKNOWN_SSID') security_type = wifi.get('SecurityType', 'UNKNOWN_SECURITY_TYPE') event_data = plist_event.PlistTimeEventData() event_data.desc = ( '[WiFi] Connected to network: <{0:s}> using security {1:s}').format( ssid, security_type) event_data.key = 'item' event_data.root = '/RememberedNetworks' datetime_value = wifi.get('LastConnected', None) if datetime_value: event = time_events.PythonDatetimeEvent( datetime_value, definitions.TIME_DESCRIPTION_WRITTEN) else: date_time = dfdatetime_semantic_time.SemanticTime('Not set') event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_NOT_A_TIME) parser_mediator.ProduceEventWithEventData(event, event_data)
python
{ "resource": "" }
q25244
MactimeParser._GetIntegerValue
train
def _GetIntegerValue(self, row, value_name): """Converts a specific value of the row to an integer. Args: row (dict[str, str]): fields of a single row, as specified in COLUMNS. value_name (str): name of the value within the row. Returns: int: value or None if the value cannot be converted. """ value = row.get(value_name, None) try: return int(value, 10) except (TypeError, ValueError): return None
python
{ "resource": "" }
q25245
ChromeAutofillPlugin.ParseAutofillRow
train
def ParseAutofillRow( self, parser_mediator, query, row, **unused_kwargs): """Parses an autofill entry row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row. """ query_hash = hash(query) event_data = ChromeAutofillEventData() event_data.field_name = self._GetRowValue(query_hash, row, 'name') event_data.value = self._GetRowValue(query_hash, row, 'value') event_data.usage_count = self._GetRowValue(query_hash, row, 'count') event_data.query = query # Create one event for the first time an autofill entry was used timestamp = self._GetRowValue(query_hash, row, 'date_created') date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data) # If the autofill value has been used more than once, create another # event for the most recent time it was used. if event_data.usage_count > 1: timestamp = self._GetRowValue(query_hash, row, 'date_last_used') date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_LAST_USED) parser_mediator.ProduceEventWithEventData(event, event_data)
python
{ "resource": "" }
q25246
MultiProcessEngine._AbortJoin
train
def _AbortJoin(self, timeout=None): """Aborts all registered processes by joining with the parent process. Args: timeout (int): number of seconds to wait for processes to join, where None represents no timeout. """ for pid, process in iter(self._processes_per_pid.items()): logger.debug('Waiting for process: {0:s} (PID: {1:d}).'.format( process.name, pid)) process.join(timeout=timeout) if not process.is_alive(): logger.debug('Process {0:s} (PID: {1:d}) stopped.'.format( process.name, pid))
python
{ "resource": "" }
q25247
MultiProcessEngine._AbortKill
train
def _AbortKill(self): """Aborts all registered processes by sending a SIGKILL or equivalent.""" for pid, process in iter(self._processes_per_pid.items()): if not process.is_alive(): continue logger.warning('Killing process: {0:s} (PID: {1:d}).'.format( process.name, pid)) self._KillProcess(pid)
python
{ "resource": "" }
q25248
MultiProcessEngine._AbortTerminate
train
def _AbortTerminate(self): """Aborts all registered processes by sending a SIGTERM or equivalent.""" for pid, process in iter(self._processes_per_pid.items()): if not process.is_alive(): continue logger.warning('Terminating process: {0:s} (PID: {1:d}).'.format( process.name, pid)) process.terminate()
python
{ "resource": "" }
q25249
MultiProcessEngine._CheckStatusWorkerProcess
train
def _CheckStatusWorkerProcess(self, pid): """Checks the status of a worker process. If a worker process is not responding the process is terminated and a replacement process is started. Args: pid (int): process ID (PID) of a registered worker process. Raises: KeyError: if the process is not registered with the engine. """ # TODO: Refactor this method, simplify and separate concerns (monitoring # vs management). self._RaiseIfNotRegistered(pid) process = self._processes_per_pid[pid] process_status = self._QueryProcessStatus(process) if process_status is None: process_is_alive = False else: process_is_alive = True process_information = self._process_information_per_pid[pid] used_memory = process_information.GetUsedMemory() or 0 if self._worker_memory_limit and used_memory > self._worker_memory_limit: logger.warning(( 'Process: {0:s} (PID: {1:d}) killed because it exceeded the ' 'memory limit: {2:d}.').format( process.name, pid, self._worker_memory_limit)) self._KillProcess(pid) if isinstance(process_status, dict): self._rpc_errors_per_pid[pid] = 0 status_indicator = process_status.get('processing_status', None) else: rpc_errors = self._rpc_errors_per_pid.get(pid, 0) + 1 self._rpc_errors_per_pid[pid] = rpc_errors if rpc_errors > self._MAXIMUM_RPC_ERRORS: process_is_alive = False if process_is_alive: rpc_port = process.rpc_port.value logger.warning(( 'Unable to retrieve process: {0:s} (PID: {1:d}) status via ' 'RPC socket: http://localhost:{2:d}').format( process.name, pid, rpc_port)) processing_status_string = 'RPC error' status_indicator = definitions.STATUS_INDICATOR_RUNNING else: processing_status_string = 'killed' status_indicator = definitions.STATUS_INDICATOR_KILLED process_status = { 'processing_status': processing_status_string} self._UpdateProcessingStatus(pid, process_status, used_memory) # _UpdateProcessingStatus can also change the status of the worker, # So refresh the status if applicable. for worker_status in self._processing_status.workers_status: if worker_status.pid == pid: status_indicator = worker_status.status break if status_indicator in definitions.ERROR_STATUS_INDICATORS: logger.error(( 'Process {0:s} (PID: {1:d}) is not functioning correctly. ' 'Status code: {2!s}.').format(process.name, pid, status_indicator)) self._TerminateProcessByPid(pid) replacement_process = None for replacement_process_attempt in range( self._MAXIMUM_REPLACEMENT_RETRIES): logger.info(( 'Attempt: {0:d} to start replacement worker process for ' '{1:s}').format(replacement_process_attempt + 1, process.name)) replacement_process = self._StartWorkerProcess( process.name, self._storage_writer) if replacement_process: break time.sleep(self._REPLACEMENT_WORKER_RETRY_DELAY) if not replacement_process: logger.error( 'Unable to create replacement worker process for: {0:s}'.format( process.name))
python
{ "resource": "" }
q25250
MultiProcessEngine._KillProcess
train
def _KillProcess(self, pid): """Issues a SIGKILL or equivalent to the process. Args: pid (int): process identifier (PID). """ if sys.platform.startswith('win'): process_terminate = 1 handle = ctypes.windll.kernel32.OpenProcess( process_terminate, False, pid) ctypes.windll.kernel32.TerminateProcess(handle, -1) ctypes.windll.kernel32.CloseHandle(handle) else: try: os.kill(pid, signal.SIGKILL) except OSError as exception: logger.error('Unable to kill process {0:d} with error: {1!s}'.format( pid, exception))
python
{ "resource": "" }
q25251
MultiProcessEngine._QueryProcessStatus
train
def _QueryProcessStatus(self, process): """Queries a process to determine its status. Args: process (MultiProcessBaseProcess): process to query for its status. Returns: dict[str, str]: status values received from the worker process. """ process_is_alive = process.is_alive() if process_is_alive: rpc_client = self._rpc_clients_per_pid.get(process.pid, None) process_status = rpc_client.CallFunction() else: process_status = None return process_status
python
{ "resource": "" }
q25252
MultiProcessEngine._RegisterProcess
train
def _RegisterProcess(self, process): """Registers a process with the engine. Args: process (MultiProcessBaseProcess): process. Raises: KeyError: if the process is already registered with the engine. ValueError: if the process is missing. """ if process is None: raise ValueError('Missing process.') if process.pid in self._processes_per_pid: raise KeyError( 'Already managing process: {0!s} (PID: {1:d})'.format( process.name, process.pid)) self._processes_per_pid[process.pid] = process
python
{ "resource": "" }
q25253
MultiProcessEngine._StartMonitoringProcess
train
def _StartMonitoringProcess(self, process): """Starts monitoring a process. Args: process (MultiProcessBaseProcess): process. Raises: IOError: if the RPC client cannot connect to the server. KeyError: if the process is not registered with the engine or if the process is already being monitored. OSError: if the RPC client cannot connect to the server. ValueError: if the process is missing. """ if process is None: raise ValueError('Missing process.') pid = process.pid if pid in self._process_information_per_pid: raise KeyError( 'Already monitoring process (PID: {0:d}).'.format(pid)) if pid in self._rpc_clients_per_pid: raise KeyError( 'RPC client (PID: {0:d}) already exists'.format(pid)) rpc_client = plaso_xmlrpc.XMLProcessStatusRPCClient() # Make sure that a worker process has started its RPC server. # The RPC port will be 0 if no server is available. rpc_port = process.rpc_port.value time_waited_for_process = 0.0 while not rpc_port: time.sleep(0.1) rpc_port = process.rpc_port.value time_waited_for_process += 0.1 if time_waited_for_process >= self._RPC_SERVER_TIMEOUT: raise IOError( 'RPC client unable to determine server (PID: {0:d}) port.'.format( pid)) hostname = 'localhost' if not rpc_client.Open(hostname, rpc_port): raise IOError(( 'RPC client unable to connect to server (PID: {0:d}) ' 'http://{1:s}:{2:d}').format(pid, hostname, rpc_port)) self._rpc_clients_per_pid[pid] = rpc_client self._process_information_per_pid[pid] = process_info.ProcessInfo(pid)
python
{ "resource": "" }
q25254
MultiProcessEngine._StartStatusUpdateThread
train
def _StartStatusUpdateThread(self): """Starts the status update thread.""" self._status_update_active = True self._status_update_thread = threading.Thread( name='Status update', target=self._StatusUpdateThreadMain) self._status_update_thread.start()
python
{ "resource": "" }
q25255
MultiProcessEngine._StopMonitoringProcess
train
def _StopMonitoringProcess(self, process): """Stops monitoring a process. Args: process (MultiProcessBaseProcess): process. Raises: KeyError: if the process is not monitored. ValueError: if the process is missing. """ if process is None: raise ValueError('Missing process.') pid = process.pid self._RaiseIfNotMonitored(pid) del self._process_information_per_pid[pid] rpc_client = self._rpc_clients_per_pid.get(pid, None) if rpc_client: rpc_client.Close() del self._rpc_clients_per_pid[pid] if pid in self._rpc_errors_per_pid: del self._rpc_errors_per_pid[pid] logger.debug('Stopped monitoring process: {0:s} (PID: {1:d})'.format( process.name, pid))
python
{ "resource": "" }
q25256
MultiProcessEngine._StopMonitoringProcesses
train
def _StopMonitoringProcesses(self): """Stops monitoring all processes.""" # We need to make a copy of the list of pids since we are changing # the dict in the loop. for pid in list(self._process_information_per_pid.keys()): self._RaiseIfNotRegistered(pid) process = self._processes_per_pid[pid] self._StopMonitoringProcess(process)
python
{ "resource": "" }
q25257
MultiProcessEngine._StopStatusUpdateThread
train
def _StopStatusUpdateThread(self): """Stops the status update thread.""" self._status_update_active = False if self._status_update_thread.isAlive(): self._status_update_thread.join() self._status_update_thread = None
python
{ "resource": "" }
q25258
MultiProcessEngine._TerminateProcessByPid
train
def _TerminateProcessByPid(self, pid): """Terminate a process that's monitored by the engine. Args: pid (int): process identifier (PID). Raises: KeyError: if the process is not registered with and monitored by the engine. """ self._RaiseIfNotRegistered(pid) process = self._processes_per_pid[pid] self._TerminateProcess(process) self._StopMonitoringProcess(process)
python
{ "resource": "" }
q25259
MultiProcessEngine._TerminateProcess
train
def _TerminateProcess(self, process): """Terminate a process. Args: process (MultiProcessBaseProcess): process to terminate. """ pid = process.pid logger.warning('Terminating process: (PID: {0:d}).'.format(pid)) process.terminate() # Wait for the process to exit. process.join(timeout=self._PROCESS_JOIN_TIMEOUT) if process.is_alive(): logger.warning('Killing process: (PID: {0:d}).'.format(pid)) self._KillProcess(pid)
python
{ "resource": "" }
q25260
PyParseRangeCheck
train
def PyParseRangeCheck(lower_bound, upper_bound): """Verify that a number is within a defined range. This is a callback method for pyparsing setParseAction that verifies that a read number is within a certain range. To use this method it needs to be defined as a callback method in setParseAction with the upper and lower bound set as parameters. Args: lower_bound (int): lower bound of the range. upper_bound (int): upper bound of the range. Returns: Function: callback method that can be used by pyparsing setParseAction. """ # pylint: disable=unused-argument def CheckRange(string, location, tokens): """Parse the arguments. Args: string (str): original string. location (int): location in the string where the match was made tokens (list[str]): tokens. """ try: check_number = tokens[0] except IndexError: check_number = -1 if check_number < lower_bound: raise pyparsing.ParseException( 'Value: {0:d} precedes lower bound: {1:d}'.format( check_number, lower_bound)) if check_number > upper_bound: raise pyparsing.ParseException( 'Value: {0:d} exceeds upper bound: {1:d}'.format( check_number, upper_bound)) # Since callback methods for pyparsing need to accept certain parameters # and there is no way to define conditions, like upper and lower bounds # we need to return here a method that accepts those pyparsing parameters. return CheckRange
python
{ "resource": "" }
q25261
PyParseIntCast
train
def PyParseIntCast(string, location, tokens): """Return an integer from a string. This is a pyparsing callback method that converts the matched string into an integer. The method modifies the content of the tokens list and converts them all to an integer value. Args: string (str): original string. location (int): location in the string where the match was made. tokens (list[str]): extracted tokens, where the string to be converted is stored. """ # Cast the regular tokens. for index, token in enumerate(tokens): try: tokens[index] = int(token) except ValueError: logger.error('Unable to cast [{0:s}] to an int, setting to 0'.format( token)) tokens[index] = 0 # We also need to cast the dictionary built tokens. for key in tokens.keys(): try: tokens[key] = int(tokens[key], 10) except ValueError: logger.error( 'Unable to cast [{0:s} = {1:d}] to an int, setting to 0'.format( key, tokens[key])) tokens[key] = 0
python
{ "resource": "" }
q25262
PyParseJoinList
train
def PyParseJoinList(string, location, tokens): """Return a joined token from a list of tokens. This is a callback method for pyparsing setParseAction that modifies the returned token list to join all the elements in the list to a single token. Args: string (str): original string. location (int): location in the string where the match was made. tokens (list[str]): extracted tokens, where the string to be converted is stored. """ join_list = [] for token in tokens: try: join_list.append(str(token)) except UnicodeDecodeError: join_list.append(repr(token)) tokens[0] = ''.join(join_list) del tokens[1:]
python
{ "resource": "" }
q25263
PyparsingSingleLineTextParser._IsText
train
def _IsText(self, bytes_in, encoding=None): """Examine the bytes in and determine if they are indicative of text. Parsers need quick and at least semi reliable method of discovering whether or not a particular byte stream is text or resembles text or not. This can be used in text parsers to determine if a file is a text file or not for instance. The method assumes the byte sequence is either ASCII, UTF-8, UTF-16 or method supplied character encoding. Otherwise it will make the assumption the byte sequence is not text, but a byte sequence. Args: bytes_in (bytes|str): byte stream to examine. encoding (Optional[str]): encoding to test, if not defined ASCII and UTF-8 are tried. Returns: bool: True if the bytes stream contains text. """ # TODO: Improve speed and accuracy of this method. # Start with the assumption we are dealing with text. is_text = True if isinstance(bytes_in, py2to3.UNICODE_TYPE): return is_text # Check if this is ASCII text string. for value in bytes_in: if py2to3.PY_2: value = ord(value) if not 31 < value < 128: is_text = False break # We have an ASCII string. if is_text: return is_text # Check if this is UTF-8 try: bytes_in.decode('utf-8') return True except UnicodeDecodeError: pass if encoding: try: bytes_in.decode(encoding) return True except LookupError: logger.error('Unsupported encoding: {0:s}'.format(encoding)) except UnicodeDecodeError: pass return False
python
{ "resource": "" }
q25264
PyparsingSingleLineTextParser._ReadLine
train
def _ReadLine(self, text_file_object, max_len=None, depth=0): """Reads a line from a text file. Args: text_file_object (dfvfs.TextFile): text file. max_len (Optional[int]): maximum number of bytes a single line can take, where None means all remaining bytes should be read. depth (Optional[int]): number of new lines the parser encountered. Returns: str: single line read from the file-like object, or the maximum number of characters, if max_len defined and line longer than the defined size. Raises: UnicodeDecodeError: if the text cannot be decoded using the specified encoding. """ line = text_file_object.readline(size=max_len) if not line: return '' if line in self._EMPTY_LINES: if depth == self._MAXIMUM_DEPTH: return '' return self._ReadLine(text_file_object, max_len=max_len, depth=depth + 1) return line.strip()
python
{ "resource": "" }
q25265
EncodedTextReader._ReadLine
train
def _ReadLine(self, file_object): """Reads a line from the file object. Args: file_object (dfvfs.FileIO): file-like object. Returns: str: line read from the file-like object. """ if len(self._buffer) < self._buffer_size: content = file_object.read(self._buffer_size) content = content.decode(self._encoding) self._buffer = ''.join([self._buffer, content]) line, new_line, self._buffer = self._buffer.partition('\n') if not line and not new_line: line = self._buffer self._buffer = '' self._current_offset += len(line) # Strip carriage returns from the text. if line.endswith('\r'): line = line[:-len('\r')] if new_line: line = ''.join([line, '\n']) self._current_offset += len('\n') return line
python
{ "resource": "" }
q25266
EncodedTextReader.ReadLine
train
def ReadLine(self, file_object): """Reads a line. Args: file_object (dfvfs.FileIO): file-like object. Returns: str: line read from the lines buffer. """ line, _, self.lines = self.lines.partition('\n') if not line: self.ReadLines(file_object) line, _, self.lines = self.lines.partition('\n') return line
python
{ "resource": "" }
q25267
EncodedTextReader.ReadLines
train
def ReadLines(self, file_object): """Reads lines into the lines buffer. Args: file_object (dfvfs.FileIO): file-like object. """ lines_size = len(self.lines) if lines_size < self._buffer_size: lines_size = self._buffer_size - lines_size while lines_size > 0: line = self._ReadLine(file_object) if not line: break self.lines = ''.join([self.lines, line]) lines_size -= len(line)
python
{ "resource": "" }
q25268
EncodedTextReader.SkipAhead
train
def SkipAhead(self, file_object, number_of_characters): """Skips ahead a number of characters. Args: file_object (dfvfs.FileIO): file-like object. number_of_characters (int): number of characters. """ lines_size = len(self.lines) while number_of_characters >= lines_size: number_of_characters -= lines_size self.lines = '' self.ReadLines(file_object) lines_size = len(self.lines) if lines_size == 0: return self.lines = self.lines[number_of_characters:]
python
{ "resource": "" }
q25269
UTorrentPlugin.GetEntries
train
def GetEntries(self, parser_mediator, data=None, **unused_kwargs): """Extracts uTorrent active torrents. This is the main parsing engine for the plugin. It determines if the selected file is the proper file to parse and extracts current running torrents. interface.Process() checks for the given BENCODE_KEYS set, ensures that it matches, and then passes the bencoded data to this function for parsing. This plugin then parses the entire set of bencoded data to extract the variable file-name keys to retrieve their values. uTorrent creates a file, resume.dat, and a backup, resume.dat.old, to for all active torrents. This is typically stored in the user's application data folder. These files, at a minimum, contain a '.fileguard' key and a dictionary with a key name for a particular download with a '.torrent' file extension. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. data (Optional[dict[str, object]]): bencode data values. """ # Walk through one of the torrent keys to ensure it's from a valid file. for key, value in iter(data.items()): if not '.torrent' in key: continue caption = value.get('caption') path = value.get('path') seedtime = value.get('seedtime') if not caption or not path or seedtime < 0: raise errors.WrongBencodePlugin(self.NAME) for torrent, value in iter(data.items()): if not '.torrent' in torrent: continue event_data = UTorrentEventData() event_data.caption = value.get('caption', None) event_data.path = value.get('path', None) # Convert seconds to minutes. seedtime = value.get('seedtime', None) event_data.seedtime, _ = divmod(seedtime, 60) # Create timeline events based on extracted values. for event_key, event_value in iter(value.items()): if event_key == 'added_on': date_time = dfdatetime_posix_time.PosixTime(timestamp=event_value) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_ADDED) parser_mediator.ProduceEventWithEventData(event, event_data) elif event_key == 'completed_on': date_time = dfdatetime_posix_time.PosixTime(timestamp=event_value) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_FILE_DOWNLOADED) parser_mediator.ProduceEventWithEventData(event, event_data) elif event_key == 'modtimes': for modtime in event_value: # Some values are stored as 0, skip those. if not modtime: continue date_time = dfdatetime_posix_time.PosixTime(timestamp=modtime) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_MODIFICATION) parser_mediator.ProduceEventWithEventData(event, event_data)
python
{ "resource": "" }
q25270
SafariHistoryPlugin.GetEntries
train
def GetEntries(self, parser_mediator, match=None, **unused_kwargs): """Extracts Safari history items. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS. """ format_version = match.get('WebHistoryFileVersion', None) if format_version != 1: parser_mediator.ProduceExtractionWarning( 'unsupported Safari history version: {0!s}'.format(format_version)) return if 'WebHistoryDates' not in match: return for history_entry in match.get('WebHistoryDates', {}): last_visited_date = history_entry.get('lastVisitedDate', None) if last_visited_date is None: parser_mediator.ProduceExtractionWarning('missing last visited date') continue try: # Last visited date is a string containing a floating point value. timestamp = float(last_visited_date) except (TypeError, ValueError): parser_mediator.ProduceExtractionWarning( 'unable to convert last visited date {0:s}'.format( last_visited_date)) continue display_title = history_entry.get('displayTitle', None) event_data = SafariHistoryEventData() if display_title != event_data.title: event_data.display_title = display_title event_data.title = history_entry.get('title', None) event_data.url = history_entry.get('', None) event_data.visit_count = history_entry.get('visitCount', None) event_data.was_http_non_get = history_entry.get( 'lastVisitWasHTTPNonGet', None) # Convert the floating point value to an integer. # TODO: add support for the fractional part of the floating point value. timestamp = int(timestamp) date_time = dfdatetime_cocoa_time.CocoaTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_LAST_VISITED) parser_mediator.ProduceEventWithEventData(event, event_data)
python
{ "resource": "" }
q25271
AnalyzersManager.DeregisterAnalyzer
train
def DeregisterAnalyzer(cls, analyzer_class): """Deregisters a analyzer class. The analyzer classes are identified based on their lower case name. Args: analyzer_class (type): class object of the analyzer. Raises: KeyError: if analyzer class is not set for the corresponding name. """ analyzer_name = analyzer_class.NAME.lower() if analyzer_name not in cls._analyzer_classes: raise KeyError('analyzer class not set for name: {0:s}'.format( analyzer_class.NAME)) del cls._analyzer_classes[analyzer_name]
python
{ "resource": "" }
q25272
AnalyzersManager.GetAnalyzersInformation
train
def GetAnalyzersInformation(cls): """Retrieves the analyzers information. Returns: list[tuple]: containing: str: analyzer name. str: analyzer description. """ analyzer_information = [] for _, analyzer_class in cls.GetAnalyzers(): description = getattr(analyzer_class, 'DESCRIPTION', '') analyzer_information.append((analyzer_class.NAME, description)) return analyzer_information
python
{ "resource": "" }
q25273
AnalyzersManager.GetAnalyzerInstance
train
def GetAnalyzerInstance(cls, analyzer_name): """Retrieves an instance of a specific analyzer. Args: analyzer_name (str): name of the analyzer to retrieve. Returns: BaseAnalyzer: analyzer instance. Raises: KeyError: if analyzer class is not set for the corresponding name. """ analyzer_name = analyzer_name.lower() if analyzer_name not in cls._analyzer_classes: raise KeyError( 'analyzer class not set for name: {0:s}.'.format(analyzer_name)) analyzer_class = cls._analyzer_classes[analyzer_name] return analyzer_class()
python
{ "resource": "" }
q25274
AnalyzersManager.GetAnalyzerInstances
train
def GetAnalyzerInstances(cls, analyzer_names): """Retrieves instances for all the specified analyzers. Args: analyzer_names (list[str]): names of the analyzers to retrieve. Returns: list[BaseAnalyzer]: analyzer instances. """ analyzer_instances = [] for analyzer_name, analyzer_class in iter(cls.GetAnalyzers()): if analyzer_name in analyzer_names: analyzer_instances.append(analyzer_class()) return analyzer_instances
python
{ "resource": "" }
q25275
AnalyzersManager.GetAnalyzers
train
def GetAnalyzers(cls): """Retrieves the registered analyzers. Yields: tuple: containing: str: the uniquely identifying name of the analyzer type: the analyzer class. """ for analyzer_name, analyzer_class in iter(cls._analyzer_classes.items()): yield analyzer_name, analyzer_class
python
{ "resource": "" }
q25276
FileHashesPlugin.ExamineEvent
train
def ExamineEvent(self, mediator, event): """Analyzes an event and creates extracts hashes as required. Args: mediator (AnalysisMediator): mediates interactions between analysis plugins and other components, such as storage and dfvfs. event (EventObject): event to examine. """ pathspec = getattr(event, 'pathspec', None) if pathspec is None: return if self._paths_with_hashes.get(pathspec, None): # We've already processed an event with this pathspec and extracted the # hashes from it. return hash_attributes = {} for attribute_name, attribute_value in event.GetAttributes(): if attribute_name.endswith('_hash'): hash_attributes[attribute_name] = attribute_value self._paths_with_hashes[pathspec] = hash_attributes
python
{ "resource": "" }
q25277
FileHashesPlugin._GeneratePathString
train
def _GeneratePathString(self, mediator, pathspec, hashes): """Generates a string containing a pathspec and its hashes. Args: mediator (AnalysisMediator): mediates interactions between analysis plugins and other components, such as storage and dfvfs. pathspec (dfvfs.Pathspec): the path specification) to generate a string for. hashes (dict[str, str]): mapping of hash attribute names to the value of that hash for the path specification being processed. Returns: str: string of the form "display_name: hash_type=hash_value". For example, "OS:/path/spec: test_hash=4 other_hash=5". """ display_name = mediator.GetDisplayNameForPathSpec(pathspec) path_string = '{0:s}:'.format(display_name) for hash_name, hash_value in sorted(hashes.items()): path_string = '{0:s} {1:s}={2:s}'.format( path_string, hash_name, hash_value) return path_string
python
{ "resource": "" }
q25278
WinJobParser._ParseEventData
train
def _ParseEventData(self, variable_length_section): """Parses the event data form a variable-length data section. Args: variable_length_section (job_variable_length_data_section): a Windows Scheduled Task job variable-length data section. Returns: WinJobEventData: event data of the job file. """ event_data = WinJobEventData() event_data.application = ( variable_length_section.application_name.rstrip('\x00')) event_data.comment = variable_length_section.comment.rstrip('\x00') event_data.parameters = ( variable_length_section.parameters.rstrip('\x00')) event_data.username = variable_length_section.author.rstrip('\x00') event_data.working_directory = ( variable_length_section.working_directory.rstrip('\x00')) return event_data
python
{ "resource": "" }
q25279
WinJobParser._ParseLastRunTime
train
def _ParseLastRunTime(self, parser_mediator, fixed_length_section): """Parses the last run time from a fixed-length data section. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. fixed_length_section (job_fixed_length_data_section): a Windows Scheduled Task job fixed-length data section. Returns: dfdatetime.DateTimeValues: last run date and time or None if not available. """ systemtime_struct = fixed_length_section.last_run_time system_time_tuple = ( systemtime_struct.year, systemtime_struct.month, systemtime_struct.weekday, systemtime_struct.day_of_month, systemtime_struct.hours, systemtime_struct.minutes, systemtime_struct.seconds, systemtime_struct.milliseconds) date_time = None if system_time_tuple != self._EMPTY_SYSTEM_TIME_TUPLE: try: date_time = dfdatetime_systemtime.Systemtime( system_time_tuple=system_time_tuple) except ValueError: parser_mediator.ProduceExtractionWarning( 'invalid last run time: {0!s}'.format(system_time_tuple)) return date_time
python
{ "resource": "" }
q25280
WinJobParser._ParseTriggerEndTime
train
def _ParseTriggerEndTime(self, parser_mediator, trigger): """Parses the end time from a trigger. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. trigger (job_trigger): a trigger. Returns: dfdatetime.DateTimeValues: last run date and time or None if not available. """ time_elements_tuple = ( trigger.end_date.year, trigger.end_date.month, trigger.end_date.day_of_month, 0, 0, 0) date_time = None if time_elements_tuple != (0, 0, 0, 0, 0, 0): try: date_time = dfdatetime_time_elements.TimeElements( time_elements_tuple=time_elements_tuple) date_time.is_local_time = True # TODO: add functionality to dfdatetime to control precision. date_time._precision = dfdatetime_definitions.PRECISION_1_DAY # pylint: disable=protected-access except ValueError: parser_mediator.ProduceExtractionWarning( 'invalid trigger end time: {0!s}'.format(time_elements_tuple)) return date_time
python
{ "resource": "" }
q25281
WinJobParser._ParseTriggerStartTime
train
def _ParseTriggerStartTime(self, parser_mediator, trigger): """Parses the start time from a trigger. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. trigger (job_trigger): a trigger. Returns: dfdatetime.DateTimeValues: last run date and time or None if not available. """ time_elements_tuple = ( trigger.start_date.year, trigger.start_date.month, trigger.start_date.day_of_month, trigger.start_time.hours, trigger.start_time.minutes, 0) date_time = None if time_elements_tuple != (0, 0, 0, 0, 0, 0): try: date_time = dfdatetime_time_elements.TimeElements( time_elements_tuple=time_elements_tuple) date_time.is_local_time = True # TODO: add functionality to dfdatetime to control precision. date_time._precision = dfdatetime_definitions.PRECISION_1_MINUTE # pylint: disable=protected-access except ValueError: parser_mediator.ProduceExtractionWarning( 'invalid trigger start time: {0!s}'.format(time_elements_tuple)) return date_time
python
{ "resource": "" }
q25282
WinJobParser.ParseFileObject
train
def ParseFileObject(self, parser_mediator, file_object): """Parses a Windows job file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): a file-like object. Raises: UnableToParseFile: when the file cannot be parsed. """ fixed_section_data_map = self._GetDataTypeMap( 'job_fixed_length_data_section') try: fixed_length_section, file_offset = self._ReadStructureFromFileObject( file_object, 0, fixed_section_data_map) except (ValueError, errors.ParseError) as exception: raise errors.UnableToParseFile( 'Unable to parse fixed-length data section with error: {0!s}'.format( exception)) if not fixed_length_section.product_version in self._PRODUCT_VERSIONS: raise errors.UnableToParseFile( 'Unsupported product version in: 0x{0:04x}'.format( fixed_length_section.product_version)) if not fixed_length_section.format_version == 1: raise errors.UnableToParseFile( 'Unsupported format version in: {0:d}'.format( fixed_length_section.format_version)) variable_section_data_map = self._GetDataTypeMap( 'job_variable_length_data_section') try: variable_length_section, data_size = self._ReadStructureFromFileObject( file_object, file_offset, variable_section_data_map) except (ValueError, errors.ParseError) as exception: raise errors.UnableToParseFile(( 'Unable to parse variable-length data section with error: ' '{0!s}').format(exception)) file_offset += data_size event_data = self._ParseEventData(variable_length_section) date_time = self._ParseLastRunTime(parser_mediator, fixed_length_section) if date_time: event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_LAST_RUN) parser_mediator.ProduceEventWithEventData(event, event_data) trigger_data_map = self._GetDataTypeMap('job_trigger') for trigger_index in range(0, variable_length_section.number_of_triggers): try: trigger, data_size = self._ReadStructureFromFileObject( file_object, file_offset, trigger_data_map) except (ValueError, errors.ParseError) as exception: raise errors.UnableToParseFile(( 'Unable to parse trigger: {0:d} with error: {2!s}').format( trigger_index, exception)) file_offset += data_size event_data.trigger_type = trigger.trigger_type date_time = self._ParseTriggerStartTime(parser_mediator, trigger) if date_time: event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_SCHEDULED_TO_START, time_zone=parser_mediator.timezone) parser_mediator.ProduceEventWithEventData(event, event_data) date_time = self._ParseTriggerEndTime(parser_mediator, trigger) if date_time: event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_SCHEDULED_TO_START, time_zone=parser_mediator.timezone) parser_mediator.ProduceEventWithEventData(event, event_data)
python
{ "resource": "" }
q25283
EventTag.AddComment
train
def AddComment(self, comment): """Adds a comment to the event tag. Args: comment (str): comment. """ if not comment: return if not self.comment: self.comment = comment else: self.comment = ''.join([self.comment, comment])
python
{ "resource": "" }
q25284
EventTag.AddLabel
train
def AddLabel(self, label): """Adds a label to the event tag. Args: label (str): label. Raises: TypeError: if the label provided is not a string. ValueError: if a label is malformed. """ if not isinstance(label, py2to3.STRING_TYPES): raise TypeError('label is not a string type. Is {0:s}'.format( type(label))) if not self._VALID_LABEL_REGEX.match(label): raise ValueError(( 'Unsupported label: "{0:s}". A label must only consist of ' 'alphanumeric characters or underscores.').format(label)) if label not in self.labels: self.labels.append(label)
python
{ "resource": "" }
q25285
EventTag.AddLabels
train
def AddLabels(self, labels): """Adds labels to the event tag. Args: labels (list[str]): labels. Raises: ValueError: if a label is malformed. """ for label in labels: if not self._VALID_LABEL_REGEX.match(label): raise ValueError(( 'Unsupported label: "{0:s}". A label must only consist of ' 'alphanumeric characters or underscores.').format(label)) for label in labels: if label not in self.labels: self.labels.append(label)
python
{ "resource": "" }
q25286
EventTag.CopyToDict
train
def CopyToDict(self): """Copies the event tag to a dictionary. Returns: dict[str, object]: event tag attributes. """ result_dict = { 'labels': self.labels } if self.comment: result_dict['comment'] = self.comment return result_dict
python
{ "resource": "" }
q25287
EventTag.CopyTextToLabel
train
def CopyTextToLabel(cls, text, prefix=''): """Copies a string to a label. A label only supports a limited set of characters therefore unsupported characters are replaced with an underscore. Args: text (str): label text. prefix (Optional[str]): label prefix. Returns: str: label. """ text = '{0:s}{1:s}'.format(prefix, text) return cls._INVALID_LABEL_CHARACTERS_REGEX.sub('_', text)
python
{ "resource": "" }
q25288
SSHSyslogPlugin.ParseMessage
train
def ParseMessage(self, parser_mediator, key, date_time, tokens): """Produces an event from a syslog body that matched one of the grammars. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. key (str): name of the matching grammar. date_time (dfdatetime.DateTimeValues): date and time values. tokens (dict[str, str]): tokens derived from a syslog message based on the defined grammar. Raises: ValueError: If an unknown key is provided. """ if key not in ('failed_connection', 'login', 'opened_connection'): raise ValueError('Unknown grammar key: {0:s}'.format(key)) if key == 'login': event_data = SSHLoginEventData() elif key == 'failed_connection': event_data = SSHFailedConnectionEventData() elif key == 'opened_connection': event_data = SSHOpenedConnectionEventData() event_data.address = tokens.get('address', None) event_data.authentication_method = tokens.get( 'authentication_method', None) event_data.body = tokens.get('body', None) event_data.fingerprint = tokens.get('fingerprint', None) event_data.hostname = tokens.get('hostname', None) # TODO: pass line number to offset or remove. event_data.offset = 0 event_data.pid = tokens.get('pid', None) event_data.protocol = tokens.get('protocol', None) event_data.port = tokens.get('port', None) event_data.reporter = tokens.get('reporter', None) event_data.severity = tokens.get('severity', None) event_data.username = tokens.get('username', None) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
python
{ "resource": "" }
q25289
ShutdownWindowsRegistryPlugin._ParseFiletime
train
def _ParseFiletime(self, byte_stream): """Parses a FILETIME date and time value from a byte stream. Args: byte_stream (bytes): byte stream. Returns: dfdatetime.Filetime: FILETIME date and time value or None if no value is set. Raises: ParseError: if the FILETIME could not be parsed. """ filetime_map = self._GetDataTypeMap('filetime') try: filetime = self._ReadStructureFromByteStream( byte_stream, 0, filetime_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError( 'Unable to parse FILETIME value with error: {0!s}'.format( exception)) if filetime == 0: return None try: return dfdatetime_filetime.Filetime(timestamp=filetime) except ValueError: raise errors.ParseError( 'Invalid FILETIME value: 0x{0:08x}'.format(filetime))
python
{ "resource": "" }
q25290
ShutdownWindowsRegistryPlugin.ExtractEvents
train
def ExtractEvents(self, parser_mediator, registry_key, **kwargs): """Extracts events from a ShutdownTime Windows Registry value. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_key (dfwinreg.WinRegistryKey): Windows Registry key. """ shutdown_value = registry_key.GetValueByName('ShutdownTime') if not shutdown_value: return try: date_time = self._ParseFiletime(shutdown_value.data) except errors.ParseError as exception: parser_mediator.ProduceExtractionWarning( 'unable to determine shutdown timestamp with error: {0!s}'.format( exception)) return if not date_time: date_time = dfdatetime_semantic_time.SemanticTime('Not set') event_data = ShutdownWindowsRegistryEventData() event_data.key_path = registry_key.path event_data.offset = shutdown_value.offset event_data.value_name = shutdown_value.name event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_LAST_SHUTDOWN) parser_mediator.ProduceEventWithEventData(event, event_data)
python
{ "resource": "" }
q25291
BaseCookiePlugin.Process
train
def Process(self, parser_mediator, cookie_name, cookie_data, url, **kwargs): """Determine if this is the right plugin for this cookie. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. cookie_name (str): the name of the cookie value. cookie_data (bytes): the cookie data, as a byte sequence. url (str): the full URL or path where the cookie was set. Raises: errors.WrongPlugin: If the cookie name differs from the one supplied in COOKIE_NAME. ValueError: If cookie_name or cookie_data are not set. """ if cookie_name is None or cookie_data is None: raise ValueError('Cookie name or data are not set.') if cookie_name != self.COOKIE_NAME: raise errors.WrongPlugin( 'Not the correct cookie plugin for: {0:s} [{1:s}]'.format( cookie_name, self.NAME)) # This will raise if unhandled keyword arguments are passed. super(BaseCookiePlugin, self).Process(parser_mediator) self.GetEntries(parser_mediator, cookie_data=cookie_data, url=url)
python
{ "resource": "" }
q25292
Log2TimelineTool._GetPluginData
train
def _GetPluginData(self): """Retrieves the version and various plugin information. Returns: dict[str, list[str]]: available parsers and plugins. """ return_dict = {} return_dict['Versions'] = [ ('plaso engine', plaso.__version__), ('python', sys.version)] hashers_information = hashers_manager.HashersManager.GetHashersInformation() parsers_information = parsers_manager.ParsersManager.GetParsersInformation() plugins_information = ( parsers_manager.ParsersManager.GetParserPluginsInformation()) presets_information = parsers_manager.ParsersManager.GetPresetsInformation() return_dict['Hashers'] = hashers_information return_dict['Parsers'] = parsers_information return_dict['Parser Plugins'] = plugins_information return_dict['Parser Presets'] = presets_information return return_dict
python
{ "resource": "" }
q25293
Log2TimelineTool.ExtractEventsFromSources
train
def ExtractEventsFromSources(self): """Processes the sources and extracts events. Raises: BadConfigOption: if the storage file path is invalid or the storage format not supported or an invalid filter was specified. SourceScannerError: if the source scanner could not find a supported file system. UserAbort: if the user initiated an abort. """ self._CheckStorageFile(self._storage_file_path, warn_about_existing=True) scan_context = self.ScanSource(self._source_path) self._source_type = scan_context.source_type self._status_view.SetMode(self._status_view_mode) self._status_view.SetSourceInformation( self._source_path, self._source_type, artifact_filters=self._artifact_filters, filter_file=self._filter_file) status_update_callback = ( self._status_view.GetExtractionStatusUpdateCallback()) self._output_writer.Write('\n') self._status_view.PrintExtractionStatusHeader(None) self._output_writer.Write('Processing started.\n') session = engine.BaseEngine.CreateSession( artifact_filter_names=self._artifact_filters, command_line_arguments=self._command_line_arguments, debug_mode=self._debug_mode, filter_file_path=self._filter_file, preferred_encoding=self.preferred_encoding, preferred_time_zone=self._preferred_time_zone, preferred_year=self._preferred_year) storage_writer = storage_factory.StorageFactory.CreateStorageWriter( self._storage_format, session, self._storage_file_path) if not storage_writer: raise errors.BadConfigOption( 'Unsupported storage format: {0:s}'.format(self._storage_format)) single_process_mode = self._single_process_mode if self._source_type == dfvfs_definitions.SOURCE_TYPE_FILE: # No need to multi process a single file source. single_process_mode = True if single_process_mode: extraction_engine = single_process_engine.SingleProcessEngine() else: extraction_engine = multi_process_engine.TaskMultiProcessEngine( use_zeromq=self._use_zeromq) # If the source is a directory or a storage media image # run pre-processing. if self._source_type in self._SOURCE_TYPES_TO_PREPROCESS: self._PreprocessSources(extraction_engine) configuration = self._CreateProcessingConfiguration( extraction_engine.knowledge_base) self._SetExtractionParsersAndPlugins(configuration, session) self._SetExtractionPreferredTimeZone(extraction_engine.knowledge_base) try: filter_find_specs = extraction_engine.BuildFilterFindSpecs( self._artifact_definitions_path, self._custom_artifacts_path, extraction_engine.knowledge_base, self._artifact_filters, self._filter_file) except errors.InvalidFilter as exception: raise errors.BadConfigOption( 'Unable to build filter specification: {0!s}'.format(exception)) processing_status = None if single_process_mode: logger.debug('Starting extraction in single process mode.') processing_status = extraction_engine.ProcessSources( self._source_path_specs, storage_writer, self._resolver_context, configuration, filter_find_specs=filter_find_specs, status_update_callback=status_update_callback) else: logger.debug('Starting extraction in multi process mode.') processing_status = extraction_engine.ProcessSources( session.identifier, self._source_path_specs, storage_writer, configuration, enable_sigsegv_handler=self._enable_sigsegv_handler, filter_find_specs=filter_find_specs, number_of_worker_processes=self._number_of_extraction_workers, status_update_callback=status_update_callback, worker_memory_limit=self._worker_memory_limit) self._status_view.PrintExtractionSummary(processing_status)
python
{ "resource": "" }
q25294
Log2TimelineTool.ShowInfo
train
def ShowInfo(self): """Shows information about available hashers, parsers, plugins, etc.""" self._output_writer.Write( '{0:=^80s}\n'.format(' log2timeline/plaso information ')) plugin_list = self._GetPluginData() for header, data in plugin_list.items(): table_view = views.ViewsFactory.GetTableView( self._views_format_type, column_names=['Name', 'Description'], title=header) for entry_header, entry_data in sorted(data): table_view.AddRow([entry_header, entry_data]) table_view.Write(self._output_writer)
python
{ "resource": "" }
q25295
AndroidAppUsageParser.ParseFileObject
train
def ParseFileObject(self, parser_mediator, file_object): """Parses an Android usage-history file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): file-like object. Raises: UnableToParseFile: when the file cannot be parsed. """ data = file_object.read(self._HEADER_READ_SIZE) if not data.startswith(b'<?xml'): raise errors.UnableToParseFile( 'Not an Android usage history file [not XML]') _, _, data = data.partition(b'\n') if not data.startswith(b'<usage-history'): raise errors.UnableToParseFile( 'Not an Android usage history file [wrong XML root key]') # The current offset of the file-like object needs to point at # the start of the file for ElementTree to parse the XML data correctly. file_object.seek(0, os.SEEK_SET) xml = ElementTree.parse(file_object) root_node = xml.getroot() for application_node in root_node: package_name = application_node.get('name', None) for part_node in application_node.iter(): if part_node.tag != 'comp': continue last_resume_time = part_node.get('lrt', None) if last_resume_time is None: parser_mediator.ProduceExtractionWarning('missing last resume time.') continue try: last_resume_time = int(last_resume_time, 10) except ValueError: parser_mediator.ProduceExtractionWarning( 'unsupported last resume time: {0:s}.'.format(last_resume_time)) continue event_data = AndroidAppUsageEventData() event_data.component = part_node.get('name', None) event_data.package = package_name date_time = dfdatetime_java_time.JavaTime(timestamp=last_resume_time) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_LAST_RESUME) parser_mediator.ProduceEventWithEventData(event, event_data)
python
{ "resource": "" }
q25296
FileObjectWinRegistryFileReader.Open
train
def Open(self, file_object, ascii_codepage='cp1252'): """Opens a Windows Registry file-like object. Args: file_object (dfvfs.FileIO): Windows Registry file-like object. ascii_codepage (Optional[str]): ASCII string codepage. Returns: WinRegistryFile: Windows Registry file or None. """ registry_file = dfwinreg_regf.REGFWinRegistryFile( ascii_codepage=ascii_codepage) # We don't catch any IOErrors here since we want to produce a parse error # from the parser if this happens. registry_file.Open(file_object) return registry_file
python
{ "resource": "" }
q25297
WinRegistryParser._CanProcessKeyWithPlugin
train
def _CanProcessKeyWithPlugin(self, registry_key, plugin): """Determines if a plugin can process a Windows Registry key or its values. Args: registry_key (dfwinreg.WinRegistryKey): Windows Registry key. plugin (WindowsRegistryPlugin): Windows Registry plugin. Returns: bool: True if the Registry key can be processed with the plugin. """ for registry_key_filter in plugin.FILTERS: # Skip filters that define key paths since they are already # checked by the path filter. if getattr(registry_key_filter, 'key_paths', []): continue if registry_key_filter.Match(registry_key): return True return False
python
{ "resource": "" }
q25298
WinRegistryParser._NormalizeKeyPath
train
def _NormalizeKeyPath(self, key_path): """Normalizes a Windows Registry key path. Args: key_path (str): Windows Registry key path. Returns: str: normalized Windows Registry key path. """ normalized_key_path = key_path.lower() # The Registry key path should start with: # HKEY_LOCAL_MACHINE\System\ControlSet followed by 3 digits # which makes 39 characters. if (len(normalized_key_path) < 39 or not normalized_key_path.startswith(self._CONTROL_SET_PREFIX)): return normalized_key_path # Key paths that contain ControlSet### must be normalized to # CurrentControlSet. return ''.join([ self._NORMALIZED_CONTROL_SET_PREFIX, normalized_key_path[39:]])
python
{ "resource": "" }
q25299
WinRegistryParser._ParseRecurseKeys
train
def _ParseRecurseKeys(self, parser_mediator, root_key): """Parses the Registry keys recursively. Args: parser_mediator (ParserMediator): parser mediator. root_key (dfwinreg.WinRegistryKey): root Windows Registry key. """ for registry_key in root_key.RecurseKeys(): if parser_mediator.abort: break self._ParseKey(parser_mediator, registry_key)
python
{ "resource": "" }