_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q25800
|
FormatSpecificationStore.AddNewSpecification
|
train
|
def AddNewSpecification(self, identifier):
"""Adds a new format specification.
Args:
identifier (str): format identifier, which should be unique for the store.
Returns:
FormatSpecification: format specification.
Raises:
KeyError: if the store already contains a specification with
the same identifier.
"""
if identifier in self._format_specifications:
raise KeyError(
'Format specification {0:s} is already defined in store.'.format(
identifier))
self._format_specifications[identifier] = FormatSpecification(identifier)
return self._format_specifications[identifier]
|
python
|
{
"resource": ""
}
|
q25801
|
FormatSpecificationStore.AddSpecification
|
train
|
def AddSpecification(self, specification):
"""Adds a format specification.
Args:
specification (FormatSpecification): format specification.
Raises:
KeyError: if the store already contains a specification with
the same identifier.
"""
if specification.identifier in self._format_specifications:
raise KeyError(
'Format specification {0:s} is already defined in store.'.format(
specification.identifier))
self._format_specifications[specification.identifier] = specification
for signature in specification.signatures:
signature_index = len(self._signature_map)
signature_identifier = '{0:s}:{1:d}'.format(
specification.identifier, signature_index)
if signature_identifier in self._signature_map:
raise KeyError('Signature {0:s} is already defined in map.'.format(
signature_identifier))
signature.SetIdentifier(signature_identifier)
self._signature_map[signature_identifier] = specification
|
python
|
{
"resource": ""
}
|
q25802
|
IPodPlugin.GetEntries
|
train
|
def GetEntries(self, parser_mediator, match=None, **unused_kwargs):
"""Extract device information from the iPod plist.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.
"""
devices = match.get('Devices', {})
for device_identifier, device_information in iter(devices.items()):
datetime_value = device_information.get('Connected', None)
if not datetime_value:
continue
event_data = IPodPlistEventData()
event_data.device_id = device_identifier
# TODO: refactor.
for key, value in iter(device_information.items()):
if key == 'Connected':
continue
attribute_name = key.lower().replace(' ', '_')
setattr(event_data, attribute_name, value)
event = time_events.PythonDatetimeEvent(
datetime_value, definitions.TIME_DESCRIPTION_LAST_CONNECTED)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
python
|
{
"resource": ""
}
|
q25803
|
OutputMediator.GetEventFormatter
|
train
|
def GetEventFormatter(self, event):
"""Retrieves the event formatter for a specific event type.
Args:
event (EventObject): event.
Returns:
EventFormatter: event formatter or None.
"""
data_type = getattr(event, 'data_type', None)
if not data_type:
return None
return formatters_manager.FormattersManager.GetFormatterObject(
event.data_type)
|
python
|
{
"resource": ""
}
|
q25804
|
OutputMediator.GetFormattedMessages
|
train
|
def GetFormattedMessages(self, event):
"""Retrieves the formatted messages related to the event.
Args:
event (EventObject): event.
Returns:
tuple: containing:
str: full message string or None if no event formatter was found.
str: short message string or None if no event formatter was found.
"""
event_formatter = self.GetEventFormatter(event)
if not event_formatter:
return None, None
return event_formatter.GetMessages(self._formatter_mediator, event)
|
python
|
{
"resource": ""
}
|
q25805
|
OutputMediator.GetFormattedSources
|
train
|
def GetFormattedSources(self, event):
"""Retrieves the formatted sources related to the event.
Args:
event (EventObject): event.
Returns:
tuple: containing:
str: full source string or None if no event formatter was found.
str: short source string or None if no event formatter was found.
"""
event_formatter = self.GetEventFormatter(event)
if not event_formatter:
return None, None
return event_formatter.GetSources(event)
|
python
|
{
"resource": ""
}
|
q25806
|
OutputMediator.GetMACBRepresentation
|
train
|
def GetMACBRepresentation(self, event):
"""Retrieves the MACB representation.
Args:
event (EventObject): event.
Returns:
str: MACB representation.
"""
data_type = getattr(event, 'data_type', None)
if not data_type:
return '....'
# The filestat parser is somewhat limited.
if data_type == 'fs:stat':
descriptions = event.timestamp_desc.split(';')
return_characters = ['.', '.', '.', '.']
for description in descriptions:
if description in (
'mtime', definitions.TIME_DESCRIPTION_MODIFICATION):
return_characters[0] = 'M'
elif description in (
'atime', definitions.TIME_DESCRIPTION_LAST_ACCESS):
return_characters[1] = 'A'
elif description in (
'ctime', definitions.TIME_DESCRIPTION_CHANGE):
return_characters[2] = 'C'
elif description in (
'crtime', definitions.TIME_DESCRIPTION_CREATION):
return_characters[3] = 'B'
return ''.join(return_characters)
# Access time.
if event.timestamp_desc in [
definitions.TIME_DESCRIPTION_LAST_ACCESS,
definitions.TIME_DESCRIPTION_ACCOUNT_CREATED,
definitions.TIME_DESCRIPTION_LAST_VISITED,
definitions.TIME_DESCRIPTION_START,
definitions.TIME_DESCRIPTION_LAST_SHUTDOWN,
definitions.TIME_DESCRIPTION_LAST_LOGIN,
definitions.TIME_DESCRIPTION_LAST_PASSWORD_RESET,
definitions.TIME_DESCRIPTION_LAST_CONNECTED,
definitions.TIME_DESCRIPTION_LAST_RUN,
definitions.TIME_DESCRIPTION_LAST_PRINTED]:
return '.A..'
# Content modification.
if event.timestamp_desc in [
definitions.TIME_DESCRIPTION_MODIFICATION,
definitions.TIME_DESCRIPTION_WRITTEN,
definitions.TIME_DESCRIPTION_DELETED]:
return 'M...'
# Content creation time.
if event.timestamp_desc in [
definitions.TIME_DESCRIPTION_CREATION,
definitions.TIME_DESCRIPTION_ADDED,
definitions.TIME_DESCRIPTION_FILE_DOWNLOADED,
definitions.TIME_DESCRIPTION_FIRST_CONNECTED]:
return '...B'
# Metadata modification.
if event.timestamp_desc in [
definitions.TIME_DESCRIPTION_CHANGE,
definitions.TIME_DESCRIPTION_ENTRY_MODIFICATION]:
return '..C.'
return '....'
|
python
|
{
"resource": ""
}
|
q25807
|
OutputMediator.GetMACBRepresentationFromDescriptions
|
train
|
def GetMACBRepresentationFromDescriptions(self, timestamp_descriptions):
"""Determines the MACB representation from the timestamp descriptions.
MACB representation is a shorthand for representing one or more of
modification, access, change, birth timestamp descriptions as the letters
"MACB" or a "." if the corresponding timestamp is not set.
Note that this is an output format shorthand and does not guarantee that
the timestamps represent the same occurrence.
Args:
timestamp_descriptions (list[str]): timestamp descriptions, which are
defined in definitions.TIME_DESCRIPTIONS.
Returns:
str: MACB representation.
"""
macb_representation = []
if ('mtime' in timestamp_descriptions or
definitions.TIME_DESCRIPTION_MODIFICATION in timestamp_descriptions):
macb_representation.append('M')
else:
macb_representation.append('.')
if ('atime' in timestamp_descriptions or
definitions.TIME_DESCRIPTION_LAST_ACCESS in timestamp_descriptions):
macb_representation.append('A')
else:
macb_representation.append('.')
if ('ctime' in timestamp_descriptions or
definitions.TIME_DESCRIPTION_CHANGE in timestamp_descriptions):
macb_representation.append('C')
else:
macb_representation.append('.')
if ('crtime' in timestamp_descriptions or
definitions.TIME_DESCRIPTION_CREATION in timestamp_descriptions):
macb_representation.append('B')
else:
macb_representation.append('.')
return ''.join(macb_representation)
|
python
|
{
"resource": ""
}
|
q25808
|
OutputMediator.GetUsername
|
train
|
def GetUsername(self, event, default_username='-'):
"""Retrieves the username related to the event.
Args:
event (EventObject): event.
default_username (Optional[str]): default username.
Returns:
str: username.
"""
username = getattr(event, 'username', None)
if username and username != '-':
return username
session_identifier = event.GetSessionIdentifier()
if session_identifier is None:
return default_username
user_sid = getattr(event, 'user_sid', None)
username = self._knowledge_base.GetUsernameByIdentifier(
user_sid, session_identifier=session_identifier)
return username or default_username
|
python
|
{
"resource": ""
}
|
q25809
|
OutputMediator.SetTimezone
|
train
|
def SetTimezone(self, timezone):
"""Sets the timezone.
Args:
timezone (str): timezone.
Raises:
ValueError: if the timezone is not supported.
"""
if not timezone:
return
try:
self._timezone = pytz.timezone(timezone)
except pytz.UnknownTimeZoneError:
raise ValueError('Unsupported timezone: {0:s}'.format(timezone))
|
python
|
{
"resource": ""
}
|
q25810
|
AttributeContainersManager.DeregisterAttributeContainer
|
train
|
def DeregisterAttributeContainer(cls, attribute_container_class):
"""Deregisters an attribute container class.
The attribute container classes are identified based on their lower case
container type.
Args:
attribute_container_class (type): attribute container class.
Raises:
KeyError: if attribute container class is not set for
the corresponding container type.
"""
container_type = attribute_container_class.CONTAINER_TYPE.lower()
if container_type not in cls._attribute_container_classes:
raise KeyError(
'Attribute container class not set for container type: '
'{0:s}.'.format(attribute_container_class.CONTAINER_TYPE))
del cls._attribute_container_classes[container_type]
|
python
|
{
"resource": ""
}
|
q25811
|
SyslogPlugin.Process
|
train
|
def Process(self, parser_mediator, date_time, syslog_tokens, **kwargs):
"""Processes the data structure produced by the parser.
Args:
parser_mediator (ParserMediator): mediates the interactions between
parsers and other components, such as storage and abort signals.
date_time (dfdatetime.DateTimeValues): date and time values.
syslog_tokens (dict[str, str]): names of the fields extracted by the
syslog parser and the matching grammar, and values are the values of
those fields.
Raises:
AttributeError: If the syslog_tokens do not include a 'body' attribute.
WrongPlugin: If the plugin is unable to parse the syslog tokens.
"""
body = syslog_tokens.get('body', None)
if not body:
raise AttributeError('Missing required attribute: body')
for key, grammar in iter(self.MESSAGE_GRAMMARS):
try:
tokens = grammar.parseString(body)
syslog_tokens.update(tokens.asDict())
self.ParseMessage(parser_mediator, key, date_time, syslog_tokens)
return
except pyparsing.ParseException:
pass
raise errors.WrongPlugin('Unable to create event from: {0:s}'.format(body))
|
python
|
{
"resource": ""
}
|
q25812
|
FSEventsdEventFormatter._GetFlagValues
|
train
|
def _GetFlagValues(self, flags):
"""Determines which events are indicated by a set of fsevents flags.
Args:
flags (int): fsevents record flags.
Returns:
str: a comma separated string containing descriptions of the flag values
stored in an fsevents record.
"""
event_types = []
for event_flag, description in self._FLAG_VALUES.items():
if event_flag & flags:
event_types.append(description)
return ', '.join(event_types)
|
python
|
{
"resource": ""
}
|
q25813
|
DefaultOLECFPlugin._ParseItem
|
train
|
def _ParseItem(self, parser_mediator, olecf_item):
"""Parses an OLECF item.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
olecf_item (pyolecf.item): OLECF item.
Returns:
bool: True if an event was produced.
"""
result = False
event_data = OLECFItemEventData()
event_data.name = olecf_item.name
event_data.offset = 0
event_data.size = olecf_item.size
creation_time, modification_time = self._GetTimestamps(olecf_item)
if creation_time:
date_time = dfdatetime_filetime.Filetime(timestamp=creation_time)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
result = True
if modification_time:
date_time = dfdatetime_filetime.Filetime(timestamp=modification_time)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_MODIFICATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
result = True
for sub_item in olecf_item.sub_items:
if self._ParseItem(parser_mediator, sub_item):
result = True
return result
|
python
|
{
"resource": ""
}
|
q25814
|
TaggingFile.GetEventTaggingRules
|
train
|
def GetEventTaggingRules(self):
"""Retrieves the event tagging rules from the tagging file.
Returns:
dict[str, FilterObject]: tagging rules, that consists of one or more
filter objects per label.
Raises:
TaggingFileError: if a filter expression cannot be compiled.
"""
tagging_rules = {}
label_name = None
with io.open(self._path, 'r', encoding='utf-8') as tagging_file:
for line in tagging_file.readlines():
line = line.rstrip()
stripped_line = line.lstrip()
if not stripped_line or stripped_line[0] == '#':
continue
if not line[0].isspace():
label_name = line
tagging_rules[label_name] = []
continue
if not label_name:
continue
filter_object = event_filter.EventObjectFilter()
try:
filter_object.CompileFilter(stripped_line)
except errors.ParseError as exception:
raise errors.TaggingFileError((
'Unable to compile filter for label: {0:s} with error: '
'{1!s}').format(label_name, exception))
if filter_object not in tagging_rules[label_name]:
tagging_rules[label_name].append(filter_object)
return tagging_rules
|
python
|
{
"resource": ""
}
|
q25815
|
CupsIppParser._GetStringValue
|
train
|
def _GetStringValue(self, data_dict, name, default_value=None):
"""Retrieves a specific string value from the data dict.
Args:
data_dict (dict[str, list[str]): values per name.
name (str): name of the value to retrieve.
default_value (Optional[object]): value to return if the name has no value
set in data_dict.
Returns:
str: value represented as a string.
"""
values = data_dict.get(name, None)
if not values:
return default_value
for index, value in enumerate(values):
if ',' in value:
values[index] = '"{0:s}"'.format(value)
return ', '.join(values)
|
python
|
{
"resource": ""
}
|
q25816
|
CupsIppParser._ParseAttribute
|
train
|
def _ParseAttribute(self, file_object):
"""Parses a CUPS IPP attribute from a file-like object.
Args:
file_object (dfvfs.FileIO): file-like object.
Returns:
tuple[str, object]: attribute name and value.
Raises:
ParseError: if the attribute cannot be parsed.
"""
file_offset = file_object.tell()
attribute_map = self._GetDataTypeMap('cups_ipp_attribute')
try:
attribute, _ = self._ReadStructureFromFileObject(
file_object, file_offset, attribute_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError(
'Unable to parse attribute with error: {0!s}'.format(exception))
value = None
if attribute.tag_value in self._INTEGER_TAG_VALUES:
# TODO: correct file offset to point to the start of value_data.
value = self._ParseIntegerValue(attribute.value_data, file_offset)
elif attribute.tag_value == self._TAG_VALUE_BOOLEAN:
value = self._ParseBooleanValue(attribute.value_data)
elif attribute.tag_value == self._TAG_VALUE_DATE_TIME:
# TODO: correct file offset to point to the start of value_data.
value = self._ParseDateTimeValue(attribute.value_data, file_offset)
elif attribute.tag_value in self._STRING_WITHOUT_LANGUAGE_VALUES:
value = attribute.value_data.decode(self._last_charset_attribute)
elif attribute.tag_value in self._ASCII_STRING_VALUES:
value = attribute.value_data.decode('ascii')
if attribute.tag_value == self._TAG_VALUE_CHARSET:
self._last_charset_attribute = value
else:
value = attribute.value_data
return attribute.name, value
|
python
|
{
"resource": ""
}
|
q25817
|
CupsIppParser._ParseAttributesGroup
|
train
|
def _ParseAttributesGroup(self, file_object):
"""Parses a CUPS IPP attributes group from a file-like object.
Args:
file_object (dfvfs.FileIO): file-like object.
Yields:
tuple[str, object]: attribute name and value.
Raises:
ParseError: if the attributes group cannot be parsed.
"""
tag_value_map = self._GetDataTypeMap('int8')
tag_value = 0
while tag_value != self._DELIMITER_TAG_END_OF_ATTRIBUTES:
file_offset = file_object.tell()
tag_value, _ = self._ReadStructureFromFileObject(
file_object, file_offset, tag_value_map)
if tag_value >= 0x10:
file_object.seek(file_offset, os.SEEK_SET)
yield self._ParseAttribute(file_object)
elif (tag_value != self._DELIMITER_TAG_END_OF_ATTRIBUTES and
tag_value not in self._DELIMITER_TAGS):
raise errors.ParseError((
'Unsupported attributes groups start tag value: '
'0x{0:02x}.').format(tag_value))
|
python
|
{
"resource": ""
}
|
q25818
|
CupsIppParser._ParseBooleanValue
|
train
|
def _ParseBooleanValue(self, byte_stream):
"""Parses a boolean value.
Args:
byte_stream (bytes): byte stream.
Returns:
bool: boolean value.
Raises:
ParseError: when the boolean value cannot be parsed.
"""
if byte_stream == b'\x00':
return False
if byte_stream == b'\x01':
return True
raise errors.ParseError('Unsupported boolean value.')
|
python
|
{
"resource": ""
}
|
q25819
|
CupsIppParser._ParseDateTimeValue
|
train
|
def _ParseDateTimeValue(self, byte_stream, file_offset):
"""Parses a CUPS IPP RFC2579 date-time value from a byte stream.
Args:
byte_stream (bytes): byte stream.
file_offset (int): offset of the attribute data relative to the start of
the file-like object.
Returns:
dfdatetime.RFC2579DateTime: RFC2579 date-time stored in the value.
Raises:
ParseError: when the RFC2579 date-time value cannot be parsed.
"""
datetime_value_map = self._GetDataTypeMap('cups_ipp_datetime_value')
try:
value = self._ReadStructureFromByteStream(
byte_stream, file_offset, datetime_value_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError(
'Unable to parse datetime value with error: {0!s}'.format(exception))
direction_from_utc = chr(value.direction_from_utc)
rfc2579_date_time_tuple = (
value.year, value.month, value.day_of_month,
value.hours, value.minutes, value.seconds, value.deciseconds,
direction_from_utc, value.hours_from_utc, value.minutes_from_utc)
return dfdatetime_rfc2579_date_time.RFC2579DateTime(
rfc2579_date_time_tuple=rfc2579_date_time_tuple)
|
python
|
{
"resource": ""
}
|
q25820
|
CupsIppParser._ParseIntegerValue
|
train
|
def _ParseIntegerValue(self, byte_stream, file_offset):
"""Parses an integer value.
Args:
byte_stream (bytes): byte stream.
file_offset (int): offset of the attribute data relative to the start of
the file-like object.
Returns:
int: integer value.
Raises:
ParseError: when the integer value cannot be parsed.
"""
data_type_map = self._GetDataTypeMap('int32be')
try:
return self._ReadStructureFromByteStream(
byte_stream, file_offset, data_type_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError(
'Unable to parse integer value with error: {0!s}'.format(exception))
|
python
|
{
"resource": ""
}
|
q25821
|
CupsIppParser._ParseHeader
|
train
|
def _ParseHeader(self, parser_mediator, file_object):
"""Parses a CUPS IPP header from a file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
Raises:
UnableToParseFile: when the header cannot be parsed.
"""
header_map = self._GetDataTypeMap('cups_ipp_header')
try:
header, _ = self._ReadStructureFromFileObject(file_object, 0, header_map)
except (ValueError, errors.ParseError) as exception:
raise errors.UnableToParseFile(
'[{0:s}] Unable to parse header with error: {1!s}'.format(
self.NAME, exception))
format_version = '{0:d}.{1:d}'.format(
header.major_version, header.minor_version)
if format_version not in self._SUPPORTED_FORMAT_VERSIONS:
raise errors.UnableToParseFile(
'[{0:s}] Unsupported format version {1:s}.'.format(
self.NAME, format_version))
if header.operation_identifier != 5:
# TODO: generate ExtractionWarning instead of printing debug output.
display_name = parser_mediator.GetDisplayName()
logger.debug((
'[{0:s}] Non-standard operation identifier: 0x{1:08x} in file header '
'of: {2:s}.').format(
self.NAME, header.operation_identifier, display_name))
|
python
|
{
"resource": ""
}
|
q25822
|
CupsIppParser.ParseFileObject
|
train
|
def ParseFileObject(self, parser_mediator, file_object):
"""Parses a CUPS IPP file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
self._last_charset_attribute = 'ascii'
self._ParseHeader(parser_mediator, file_object)
data_dict = {}
time_dict = {}
try:
for name, value in self._ParseAttributesGroup(file_object):
name = self._ATTRIBUTE_NAME_TRANSLATION.get(name, name)
if name in self._DATE_TIME_VALUE_NAMES:
time_dict.setdefault(name, []).append(value)
else:
data_dict.setdefault(name, []).append(value)
except (ValueError, errors.ParseError) as exception:
parser_mediator.ProduceExtractionWarning(
'unable to parse attributes with error: {0!s}'.format(exception))
return
event_data = CupsIppEventData()
event_data.application = self._GetStringValue(data_dict, 'application')
event_data.computer_name = self._GetStringValue(data_dict, 'computer_name')
event_data.copies = data_dict.get('copies', [0])[0]
event_data.data_dict = data_dict
event_data.doc_type = self._GetStringValue(data_dict, 'doc_type')
event_data.job_id = self._GetStringValue(data_dict, 'job_id')
event_data.job_name = self._GetStringValue(data_dict, 'job_name')
event_data.user = self._GetStringValue(data_dict, 'user')
event_data.owner = self._GetStringValue(data_dict, 'owner')
event_data.printer_id = self._GetStringValue(data_dict, 'printer_id')
event_data.uri = self._GetStringValue(data_dict, 'uri')
for name, usage in iter(self._DATE_TIME_VALUES.items()):
for date_time in time_dict.get(name, []):
event = time_events.DateTimeValuesEvent(date_time, usage)
parser_mediator.ProduceEventWithEventData(event, event_data)
for name, usage in iter(self._POSIX_TIME_VALUES.items()):
for time_value in time_dict.get(name, []):
date_time = dfdatetime_posix_time.PosixTime(timestamp=time_value)
event = time_events.DateTimeValuesEvent(date_time, usage)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
python
|
{
"resource": ""
}
|
q25823
|
AnalysisPlugin._CreateEventTag
|
train
|
def _CreateEventTag(self, event, comment, labels):
"""Creates an event tag.
Args:
event (EventObject): event to tag.
comment (str): event tag comment.
labels (list[str]): event tag labels.
Returns:
EventTag: the event tag.
"""
event_identifier = event.GetIdentifier()
event_tag = events.EventTag(comment=comment)
event_tag.SetEventIdentifier(event_identifier)
event_tag.AddLabels(labels)
event_identifier_string = event_identifier.CopyToString()
logger.debug('Created event tag: {0:s} for event: {1:s}'.format(
comment, event_identifier_string))
return event_tag
|
python
|
{
"resource": ""
}
|
q25824
|
HashTaggingAnalysisPlugin._HandleHashAnalysis
|
train
|
def _HandleHashAnalysis(self, hash_analysis):
"""Deals with the results of the analysis of a hash.
This method ensures that labels are generated for the hash,
then tags all events derived from files with that hash.
Args:
hash_analysis (HashAnalysis): hash analysis plugin's results for a given
hash.
Returns:
tuple: containing:
list[dfvfs.PathSpec]: pathspecs that had the hash value looked up.
list[str]: labels that corresponds to the hash value that was looked up.
list[EventTag]: event tags for all events that were extracted from the
path specifications.
"""
tags = []
labels = self.GenerateLabels(hash_analysis.hash_information)
path_specifications = self._hash_pathspecs.pop(hash_analysis.subject_hash)
for path_specification in path_specifications:
event_identifiers = self._event_identifiers_by_pathspec.pop(
path_specification, [])
if not labels:
continue
for event_identifier in event_identifiers:
event_tag = events.EventTag(comment=self._comment)
event_tag.SetEventIdentifier(event_identifier)
event_tag.AddLabels(labels)
tags.append(event_tag)
return path_specifications, labels, tags
|
python
|
{
"resource": ""
}
|
q25825
|
HashTaggingAnalysisPlugin._EnsureRequesterStarted
|
train
|
def _EnsureRequesterStarted(self):
"""Checks if the analyzer is running and starts it if not."""
if not self._analyzer_started:
self._analyzer.start()
self._analyzer_started = True
|
python
|
{
"resource": ""
}
|
q25826
|
HashTaggingAnalysisPlugin.ExamineEvent
|
train
|
def ExamineEvent(self, mediator, event):
"""Evaluates whether an event contains the right data for a hash lookup.
Args:
mediator (AnalysisMediator): mediates interactions between
analysis plugins and other components, such as storage and dfvfs.
event (EventObject): event.
"""
self._EnsureRequesterStarted()
path_spec = event.pathspec
event_identifiers = self._event_identifiers_by_pathspec[path_spec]
event_identifier = event.GetIdentifier()
event_identifiers.append(event_identifier)
if event.data_type not in self.DATA_TYPES or not self._analyzer.lookup_hash:
return
lookup_hash = '{0:s}_hash'.format(self._analyzer.lookup_hash)
lookup_hash = getattr(event, lookup_hash, None)
if not lookup_hash:
display_name = mediator.GetDisplayNameForPathSpec(path_spec)
logger.warning((
'Lookup hash attribute: {0:s}_hash missing from event that '
'originated from: {1:s}.').format(
self._analyzer.lookup_hash, display_name))
return
path_specs = self._hash_pathspecs[lookup_hash]
path_specs.append(path_spec)
# There may be multiple path specification that have the same hash. We only
# want to look them up once.
if len(path_specs) == 1:
self.hash_queue.put(lookup_hash)
|
python
|
{
"resource": ""
}
|
q25827
|
HashTaggingAnalysisPlugin._ContinueReportCompilation
|
train
|
def _ContinueReportCompilation(self):
"""Determines if the plugin should continue trying to compile the report.
Returns:
bool: True if the plugin should continue, False otherwise.
"""
analyzer_alive = self._analyzer.is_alive()
hash_queue_has_tasks = self.hash_queue.unfinished_tasks > 0
analysis_queue = not self.hash_analysis_queue.empty()
# pylint: disable=consider-using-ternary
return (analyzer_alive and hash_queue_has_tasks) or analysis_queue
|
python
|
{
"resource": ""
}
|
q25828
|
HashTaggingAnalysisPlugin._LogProgressUpdateIfReasonable
|
train
|
def _LogProgressUpdateIfReasonable(self):
"""Prints a progress update if enough time has passed."""
next_log_time = (
self._time_of_last_status_log +
self.SECONDS_BETWEEN_STATUS_LOG_MESSAGES)
current_time = time.time()
if current_time < next_log_time:
return
completion_time = time.ctime(current_time + self.EstimateTimeRemaining())
log_message = (
'{0:s} hash analysis plugin running. {1:d} hashes in queue, '
'estimated completion time {2:s}.'.format(
self.NAME, self.hash_queue.qsize(), completion_time))
logger.info(log_message)
self._time_of_last_status_log = current_time
|
python
|
{
"resource": ""
}
|
q25829
|
HashTaggingAnalysisPlugin.EstimateTimeRemaining
|
train
|
def EstimateTimeRemaining(self):
"""Estimates how long until all hashes have been analyzed.
Returns:
int: estimated number of seconds until all hashes have been analyzed.
"""
number_of_hashes = self.hash_queue.qsize()
hashes_per_batch = self._analyzer.hashes_per_batch
wait_time_per_batch = self._analyzer.wait_after_analysis
analyses_performed = self._analyzer.analyses_performed
if analyses_performed == 0:
average_analysis_time = self._analyzer.seconds_spent_analyzing
else:
average_analysis_time, _ = divmod(
self._analyzer.seconds_spent_analyzing, analyses_performed)
batches_remaining, _ = divmod(number_of_hashes, hashes_per_batch)
estimated_seconds_per_batch = average_analysis_time + wait_time_per_batch
return batches_remaining * estimated_seconds_per_batch
|
python
|
{
"resource": ""
}
|
q25830
|
HashAnalyzer._GetHashes
|
train
|
def _GetHashes(self, target_queue, max_hashes):
"""Retrieves a list of items from a queue.
Args:
target_queue (Queue.queue): queue to retrieve hashes from.
max_hashes (int): maximum number of items to retrieve from the
target_queue.
Returns:
list[object]: list of at most max_hashes elements from the target_queue.
The list may have no elements if the target_queue is empty.
"""
hashes = []
for _ in range(0, max_hashes):
try:
item = target_queue.get_nowait()
except Queue.Empty:
continue
hashes.append(item)
return hashes
|
python
|
{
"resource": ""
}
|
q25831
|
HashAnalyzer.run
|
train
|
def run(self):
"""The method called by the threading library to start the thread."""
while not self._abort:
hashes = self._GetHashes(self._hash_queue, self.hashes_per_batch)
if hashes:
time_before_analysis = time.time()
hash_analyses = self.Analyze(hashes)
current_time = time.time()
self.seconds_spent_analyzing += current_time - time_before_analysis
self.analyses_performed += 1
for hash_analysis in hash_analyses:
self._hash_analysis_queue.put(hash_analysis)
self._hash_queue.task_done()
time.sleep(self.wait_after_analysis)
else:
# Wait for some more hashes to be added to the queue.
time.sleep(self.EMPTY_QUEUE_WAIT_TIME)
|
python
|
{
"resource": ""
}
|
q25832
|
HashAnalyzer.SetLookupHash
|
train
|
def SetLookupHash(self, lookup_hash):
"""Sets the hash to query.
Args:
lookup_hash (str): name of the hash attribute to look up.
Raises:
ValueError: if the lookup hash is not supported.
"""
if lookup_hash not in self.SUPPORTED_HASHES:
raise ValueError('Unsupported lookup hash: {0!s}'.format(lookup_hash))
self.lookup_hash = lookup_hash
|
python
|
{
"resource": ""
}
|
q25833
|
HTTPHashAnalyzer._CheckPythonVersionAndDisableWarnings
|
train
|
def _CheckPythonVersionAndDisableWarnings(self):
"""Checks python version, and disables SSL warnings.
urllib3 will warn on each HTTPS request made by older versions of Python.
Rather than spamming the user, we print one warning message, then disable
warnings in urllib3.
"""
if self._checked_for_old_python_version:
return
if sys.version_info[0:3] < (2, 7, 9):
logger.warning(
'You are running a version of Python prior to 2.7.9. Your version '
'of Python has multiple weaknesses in its SSL implementation that '
'can allow an attacker to read or modify SSL encrypted data. '
'Please update. Further SSL warnings will be suppressed. See '
'https://www.python.org/dev/peps/pep-0466/ for more information.')
# Some distributions de-vendor urllib3 from requests, so we have to
# check if this has occurred and disable warnings in the correct
# package.
urllib3_module = urllib3
if not urllib3_module:
if hasattr(requests, 'packages'):
urllib3_module = getattr(requests.packages, 'urllib3')
if urllib3_module and hasattr(urllib3_module, 'disable_warnings'):
urllib3_module.disable_warnings()
self._checked_for_old_python_version = True
|
python
|
{
"resource": ""
}
|
q25834
|
HTTPHashAnalyzer.MakeRequestAndDecodeJSON
|
train
|
def MakeRequestAndDecodeJSON(self, url, method, **kwargs):
"""Make a HTTP request and decode the results as JSON.
Args:
url (str): URL to make a request to.
method (str): HTTP method to used to make the request. GET and POST are
supported.
kwargs: parameters to the requests .get() or post() methods, depending
on the value of the method parameter.
Returns:
dict[str, object]: body of the HTTP response, decoded from JSON.
Raises:
ConnectionError: If it is not possible to connect to the given URL, or it
the request returns a HTTP error.
ValueError: If an invalid HTTP method is specified.
"""
method_upper = method.upper()
if method_upper not in ('GET', 'POST'):
raise ValueError('Method {0:s} is not supported')
if url.lower().startswith('https'):
self._CheckPythonVersionAndDisableWarnings()
try:
if method_upper == 'GET':
response = requests.get(url, **kwargs)
elif method_upper == 'POST':
response = requests.post(url, **kwargs)
response.raise_for_status()
except requests.ConnectionError as exception:
error_string = 'Unable to connect to {0:s} with error: {1!s}'.format(
url, exception)
raise errors.ConnectionError(error_string)
except requests.HTTPError as exception:
error_string = '{0:s} returned a HTTP error: {1!s}'.format(
url, exception)
raise errors.ConnectionError(error_string)
return response.json()
|
python
|
{
"resource": ""
}
|
q25835
|
ApplicationUsagePlugin.ParseApplicationUsageRow
|
train
|
def ParseApplicationUsageRow(
self, parser_mediator, query, row, **unused_kwargs):
"""Parses an application usage row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
"""
query_hash = hash(query)
# TODO: replace usage by definition(s) in eventdata. Not sure which values
# it will hold here.
application_name = self._GetRowValue(query_hash, row, 'event')
usage = 'Application {0:s}'.format(application_name)
event_data = MacOSApplicationUsageEventData()
event_data.application = self._GetRowValue(query_hash, row, 'app_path')
event_data.app_version = self._GetRowValue(query_hash, row, 'app_version')
event_data.bundle_id = self._GetRowValue(query_hash, row, 'bundle_id')
event_data.count = self._GetRowValue(query_hash, row, 'number_times')
event_data.query = query
timestamp = self._GetRowValue(query_hash, row, 'last_time')
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, usage)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
python
|
{
"resource": ""
}
|
q25836
|
EventExtractionWorker._AnalyzeDataStream
|
train
|
def _AnalyzeDataStream(self, mediator, file_entry, data_stream_name):
"""Analyzes the contents of a specific data stream of a file entry.
The results of the analyzers are set in the parser mediator as attributes
that are added to produced event objects. Note that some file systems
allow directories to have data streams, e.g. NTFS.
Args:
mediator (ParserMediator): mediates the interactions between
parsers and other components, such as storage and abort signals.
file_entry (dfvfs.FileEntry): file entry whose data stream is to be
analyzed.
data_stream_name (str): name of the data stream.
Raises:
RuntimeError: if the file-like object cannot be retrieved from
the file entry.
"""
display_name = mediator.GetDisplayName()
logger.debug('[AnalyzeDataStream] analyzing file: {0:s}'.format(
display_name))
if self._processing_profiler:
self._processing_profiler.StartTiming('analyzing')
try:
file_object = file_entry.GetFileObject(data_stream_name=data_stream_name)
if not file_object:
raise RuntimeError((
'Unable to retrieve file-like object for file entry: '
'{0:s}.').format(display_name))
try:
self._AnalyzeFileObject(mediator, file_object)
finally:
file_object.close()
finally:
if self._processing_profiler:
self._processing_profiler.StopTiming('analyzing')
logger.debug(
'[AnalyzeDataStream] completed analyzing file: {0:s}'.format(
display_name))
|
python
|
{
"resource": ""
}
|
q25837
|
EventExtractionWorker._AnalyzeFileObject
|
train
|
def _AnalyzeFileObject(self, mediator, file_object):
"""Processes a file-like object with analyzers.
Args:
mediator (ParserMediator): mediates the interactions between
parsers and other components, such as storage and abort signals.
file_object (dfvfs.FileIO): file-like object to process.
"""
maximum_read_size = max([
analyzer_object.SIZE_LIMIT for analyzer_object in self._analyzers])
hashers_only = True
for analyzer_object in self._analyzers:
if not isinstance(analyzer_object, hashing_analyzer.HashingAnalyzer):
hashers_only = False
break
file_size = file_object.get_size()
if (hashers_only and self._hasher_file_size_limit and
file_size > self._hasher_file_size_limit):
return
file_object.seek(0, os.SEEK_SET)
data = file_object.read(maximum_read_size)
while data:
if self._abort:
break
for analyzer_object in self._analyzers:
if self._abort:
break
if (not analyzer_object.INCREMENTAL_ANALYZER and
file_size > analyzer_object.SIZE_LIMIT):
continue
if (isinstance(analyzer_object, hashing_analyzer.HashingAnalyzer) and
self._hasher_file_size_limit and
file_size > self._hasher_file_size_limit):
continue
self.processing_status = analyzer_object.PROCESSING_STATUS_HINT
analyzer_object.Analyze(data)
self.last_activity_timestamp = time.time()
data = file_object.read(maximum_read_size)
display_name = mediator.GetDisplayName()
for analyzer_object in self._analyzers:
if self._abort:
break
for result in analyzer_object.GetResults():
logger.debug((
'[AnalyzeFileObject] attribute {0:s}:{1:s} calculated for '
'file: {2:s}.').format(
result.attribute_name, result.attribute_value, display_name))
mediator.AddEventAttribute(
result.attribute_name, result.attribute_value)
analyzer_object.Reset()
self.processing_status = definitions.STATUS_INDICATOR_RUNNING
|
python
|
{
"resource": ""
}
|
q25838
|
EventExtractionWorker._CanSkipDataStream
|
train
|
def _CanSkipDataStream(self, file_entry, data_stream):
"""Determines if analysis and extraction of a data stream can be skipped.
This is used to prevent Plaso trying to run analyzers or extract content
from a pipe or socket it encounters while processing a mounted filesystem.
Args:
file_entry (dfvfs.FileEntry): file entry to consider for skipping.
data_stream (dfvfs.DataStream): data stream to consider for skipping.
Returns:
bool: True if the data stream can be skipped.
"""
if file_entry.IsFile():
return False
if data_stream.IsDefault():
return True
return False
|
python
|
{
"resource": ""
}
|
q25839
|
EventExtractionWorker._CanSkipContentExtraction
|
train
|
def _CanSkipContentExtraction(self, file_entry):
"""Determines if content extraction of a file entry can be skipped.
Args:
file_entry (dfvfs.FileEntry): file entry of which to determine content
extraction can be skipped.
Returns:
bool: True if content extraction can be skipped.
"""
# TODO: make this filtering solution more generic. Also see:
# https://github.com/log2timeline/plaso/issues/467
location = getattr(file_entry.path_spec, 'location', None)
if not location:
return False
data_stream_name = getattr(file_entry.path_spec, 'data_stream', None)
if data_stream_name:
return False
file_system = file_entry.GetFileSystem()
path_segments = file_system.SplitPath(location)
if not path_segments:
return False
if self._CHROME_CACHE_DATA_FILE_RE.match(path_segments[-1]):
location_segments = path_segments[:-1]
location_segments.append('index')
location = file_system.JoinPath(location_segments)
index_path_spec = path_spec_factory.Factory.NewPathSpec(
file_entry.type_indicator, location=location,
parent=file_entry.path_spec.parent)
if file_system.FileEntryExistsByPathSpec(index_path_spec):
# TODO: improve this check if "index" is a Chrome Cache index file.
return True
elif self._FIREFOX_CACHE_DATA_FILE_RE.match(path_segments[-1]):
location_segments = path_segments[:-4]
location_segments.append('_CACHE_MAP_')
location = file_system.JoinPath(location_segments)
cache_map_path_spec = path_spec_factory.Factory.NewPathSpec(
file_entry.type_indicator, location=location,
parent=file_entry.path_spec.parent)
if file_system.FileEntryExistsByPathSpec(cache_map_path_spec):
# TODO: improve this check if "_CACHE_MAP_" is a Firefox Cache
# version 1 cache map file.
return True
elif self._FIREFOX_CACHE2_DATA_FILE_RE.match(path_segments[-1]):
location_segments = path_segments[:-2]
location_segments.append('index')
location = file_system.JoinPath(location_segments)
index_path_spec = path_spec_factory.Factory.NewPathSpec(
file_entry.type_indicator, location=location,
parent=file_entry.path_spec.parent)
if file_system.FileEntryExistsByPathSpec(index_path_spec):
# TODO: improve this check if "index" is a Firefox Cache version 2
# index file.
return True
elif len(path_segments) == 1 and path_segments[0].lower() in (
'hiberfil.sys', 'pagefile.sys', 'swapfile.sys'):
return True
return False
|
python
|
{
"resource": ""
}
|
q25840
|
EventExtractionWorker._ExtractContentFromDataStream
|
train
|
def _ExtractContentFromDataStream(
self, mediator, file_entry, data_stream_name):
"""Extracts content from a data stream.
Args:
mediator (ParserMediator): mediates the interactions between
parsers and other components, such as storage and abort signals.
file_entry (dfvfs.FileEntry): file entry to extract its content.
data_stream_name (str): name of the data stream whose content is to be
extracted.
"""
self.processing_status = definitions.STATUS_INDICATOR_EXTRACTING
if self._processing_profiler:
self._processing_profiler.StartTiming('extracting')
self._event_extractor.ParseDataStream(
mediator, file_entry, data_stream_name)
if self._processing_profiler:
self._processing_profiler.StopTiming('extracting')
self.processing_status = definitions.STATUS_INDICATOR_RUNNING
self.last_activity_timestamp = time.time()
|
python
|
{
"resource": ""
}
|
q25841
|
EventExtractionWorker._ExtractMetadataFromFileEntry
|
train
|
def _ExtractMetadataFromFileEntry(self, mediator, file_entry, data_stream):
"""Extracts metadata from a file entry.
Args:
mediator (ParserMediator): mediates the interactions between
parsers and other components, such as storage and abort signals.
file_entry (dfvfs.FileEntry): file entry to extract metadata from.
data_stream (dfvfs.DataStream): data stream or None if the file entry
has no data stream.
"""
# Do not extract metadata from the root file entry when it is virtual.
if file_entry.IsRoot() and file_entry.type_indicator not in (
self._TYPES_WITH_ROOT_METADATA):
return
# We always want to extract the file entry metadata but we only want
# to parse it once per file entry, so we only use it if we are
# processing the default data stream of regular files.
if data_stream and not data_stream.IsDefault():
return
display_name = mediator.GetDisplayName()
logger.debug(
'[ExtractMetadataFromFileEntry] processing file entry: {0:s}'.format(
display_name))
self.processing_status = definitions.STATUS_INDICATOR_EXTRACTING
if self._processing_profiler:
self._processing_profiler.StartTiming('extracting')
self._event_extractor.ParseFileEntryMetadata(mediator, file_entry)
if self._processing_profiler:
self._processing_profiler.StopTiming('extracting')
self.processing_status = definitions.STATUS_INDICATOR_RUNNING
|
python
|
{
"resource": ""
}
|
q25842
|
EventExtractionWorker._IsMetadataFile
|
train
|
def _IsMetadataFile(self, file_entry):
"""Determines if the file entry is a metadata file.
Args:
file_entry (dfvfs.FileEntry): a file entry object.
Returns:
bool: True if the file entry is a metadata file.
"""
if (file_entry.type_indicator == dfvfs_definitions.TYPE_INDICATOR_TSK and
file_entry.path_spec.location in self._METADATA_FILE_LOCATIONS_TSK):
return True
return False
|
python
|
{
"resource": ""
}
|
q25843
|
EventExtractionWorker._ProcessDirectory
|
train
|
def _ProcessDirectory(self, mediator, file_entry):
"""Processes a directory file entry.
Args:
mediator (ParserMediator): mediates the interactions between
parsers and other components, such as storage and abort signals.
file_entry (dfvfs.FileEntry): file entry of the directory.
"""
self.processing_status = definitions.STATUS_INDICATOR_COLLECTING
if self._processing_profiler:
self._processing_profiler.StartTiming('collecting')
for sub_file_entry in file_entry.sub_file_entries:
if self._abort:
break
try:
if not sub_file_entry.IsAllocated():
continue
except dfvfs_errors.BackEndError as exception:
warning_message = (
'unable to process directory entry: {0:s} with error: '
'{1!s}').format(sub_file_entry.name, exception)
mediator.ProduceExtractionWarning(
warning_message, path_spec=file_entry.path_spec)
continue
# For TSK-based file entries only, ignore the virtual /$OrphanFiles
# directory.
if sub_file_entry.type_indicator == dfvfs_definitions.TYPE_INDICATOR_TSK:
if file_entry.IsRoot() and sub_file_entry.name == '$OrphanFiles':
continue
event_source = event_sources.FileEntryEventSource(
path_spec=sub_file_entry.path_spec)
# TODO: move this into a dfVFS file entry property.
stat_object = sub_file_entry.GetStat()
if stat_object:
event_source.file_entry_type = stat_object.type
mediator.ProduceEventSource(event_source)
self.last_activity_timestamp = time.time()
if self._processing_profiler:
self._processing_profiler.StopTiming('collecting')
self.processing_status = definitions.STATUS_INDICATOR_RUNNING
|
python
|
{
"resource": ""
}
|
q25844
|
EventExtractionWorker._ProcessFileEntry
|
train
|
def _ProcessFileEntry(self, mediator, file_entry):
"""Processes a file entry.
Args:
mediator (ParserMediator): mediates the interactions between
parsers and other components, such as storage and abort signals.
file_entry (dfvfs.FileEntry): file entry.
"""
display_name = mediator.GetDisplayName()
logger.debug(
'[ProcessFileEntry] processing file entry: {0:s}'.format(display_name))
reference_count = mediator.resolver_context.GetFileObjectReferenceCount(
file_entry.path_spec)
try:
if self._IsMetadataFile(file_entry):
self._ProcessMetadataFile(mediator, file_entry)
else:
file_entry_processed = False
for data_stream in file_entry.data_streams:
if self._abort:
break
if self._CanSkipDataStream(file_entry, data_stream):
logger.debug((
'[ProcessFileEntry] Skipping datastream {0:s} for {1:s}: '
'{2:s}').format(
data_stream.name, file_entry.type_indicator, display_name))
continue
self._ProcessFileEntryDataStream(mediator, file_entry, data_stream)
file_entry_processed = True
if not file_entry_processed:
# For when the file entry does not contain a data stream.
self._ProcessFileEntryDataStream(mediator, file_entry, None)
finally:
new_reference_count = (
mediator.resolver_context.GetFileObjectReferenceCount(
file_entry.path_spec))
if reference_count != new_reference_count:
# Clean up after parsers that do not call close explicitly.
if mediator.resolver_context.ForceRemoveFileObject(
file_entry.path_spec):
logger.warning(
'File-object not explicitly closed for file: {0:s}'.format(
display_name))
logger.debug(
'[ProcessFileEntry] done processing file entry: {0:s}'.format(
display_name))
|
python
|
{
"resource": ""
}
|
q25845
|
EventExtractionWorker._ProcessFileEntryDataStream
|
train
|
def _ProcessFileEntryDataStream(self, mediator, file_entry, data_stream):
"""Processes a specific data stream of a file entry.
Args:
mediator (ParserMediator): mediates the interactions between
parsers and other components, such as storage and abort signals.
file_entry (dfvfs.FileEntry): file entry containing the data stream.
data_stream (dfvfs.DataStream): data stream or None if the file entry
has no data stream.
"""
display_name = mediator.GetDisplayName()
data_stream_name = getattr(data_stream, 'name', '') or ''
logger.debug((
'[ProcessFileEntryDataStream] processing data stream: "{0:s}" of '
'file entry: {1:s}').format(data_stream_name, display_name))
mediator.ClearEventAttributes()
if data_stream and self._analyzers:
# Since AnalyzeDataStream generates event attributes it needs to be
# called before producing events.
self._AnalyzeDataStream(mediator, file_entry, data_stream.name)
self._ExtractMetadataFromFileEntry(mediator, file_entry, data_stream)
# Not every file entry has a data stream. In such cases we want to
# extract the metadata only.
if not data_stream:
return
# Determine if the content of the file entry should not be extracted.
skip_content_extraction = self._CanSkipContentExtraction(file_entry)
if skip_content_extraction:
display_name = mediator.GetDisplayName()
logger.debug(
'Skipping content extraction of: {0:s}'.format(display_name))
self.processing_status = definitions.STATUS_INDICATOR_IDLE
return
path_spec = copy.deepcopy(file_entry.path_spec)
if data_stream and not data_stream.IsDefault():
path_spec.data_stream = data_stream.name
archive_types = []
compressed_stream_types = []
if self._process_compressed_streams:
compressed_stream_types = self._GetCompressedStreamTypes(
mediator, path_spec)
if not compressed_stream_types:
archive_types = self._GetArchiveTypes(mediator, path_spec)
if archive_types:
if self._process_archives:
self._ProcessArchiveTypes(mediator, path_spec, archive_types)
if dfvfs_definitions.TYPE_INDICATOR_ZIP in archive_types:
# ZIP files are the base of certain file formats like docx.
self._ExtractContentFromDataStream(
mediator, file_entry, data_stream.name)
elif compressed_stream_types:
self._ProcessCompressedStreamTypes(
mediator, path_spec, compressed_stream_types)
else:
self._ExtractContentFromDataStream(
mediator, file_entry, data_stream.name)
|
python
|
{
"resource": ""
}
|
q25846
|
EventExtractionWorker._ProcessMetadataFile
|
train
|
def _ProcessMetadataFile(self, mediator, file_entry):
"""Processes a metadata file.
Args:
mediator (ParserMediator): mediates the interactions between
parsers and other components, such as storage and abort signals.
file_entry (dfvfs.FileEntry): file entry of the metadata file.
"""
self.processing_status = definitions.STATUS_INDICATOR_EXTRACTING
self._event_extractor.ParseFileEntryMetadata(mediator, file_entry)
for data_stream in file_entry.data_streams:
if self._abort:
break
self.last_activity_timestamp = time.time()
self._event_extractor.ParseMetadataFile(
mediator, file_entry, data_stream.name)
|
python
|
{
"resource": ""
}
|
q25847
|
EventExtractionWorker._SetHashers
|
train
|
def _SetHashers(self, hasher_names_string):
"""Sets the hasher names.
Args:
hasher_names_string (str): comma separated names of the hashers
to enable, where 'none' disables the hashing analyzer.
"""
if not hasher_names_string or hasher_names_string == 'none':
return
analyzer_object = analyzers_manager.AnalyzersManager.GetAnalyzerInstance(
'hashing')
analyzer_object.SetHasherNames(hasher_names_string)
self._analyzers.append(analyzer_object)
|
python
|
{
"resource": ""
}
|
q25848
|
EventExtractionWorker._SetYaraRules
|
train
|
def _SetYaraRules(self, yara_rules_string):
"""Sets the Yara rules.
Args:
yara_rules_string (str): unparsed Yara rule definitions.
"""
if not yara_rules_string:
return
analyzer_object = analyzers_manager.AnalyzersManager.GetAnalyzerInstance(
'yara')
analyzer_object.SetRules(yara_rules_string)
self._analyzers.append(analyzer_object)
|
python
|
{
"resource": ""
}
|
q25849
|
EventExtractionWorker.SetExtractionConfiguration
|
train
|
def SetExtractionConfiguration(self, configuration):
"""Sets the extraction configuration settings.
Args:
configuration (ExtractionConfiguration): extraction configuration.
"""
self._hasher_file_size_limit = configuration.hasher_file_size_limit
self._SetHashers(configuration.hasher_names_string)
self._process_archives = configuration.process_archives
self._process_compressed_streams = configuration.process_compressed_streams
self._SetYaraRules(configuration.yara_rules_string)
|
python
|
{
"resource": ""
}
|
q25850
|
MacAppFirewallParser.VerifyStructure
|
train
|
def VerifyStructure(self, parser_mediator, line):
"""Verify that this file is a Mac AppFirewall log file.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
line (str): line from a text file.
Returns:
bool: True if the line is in the expected format, False if not.
"""
self._last_month = 0
self._year_use = parser_mediator.GetEstimatedYear()
try:
structure = self.FIREWALL_LINE.parseString(line)
except pyparsing.ParseException as exception:
logger.debug((
'Unable to parse file as a Mac AppFirewall log file with error: '
'{0!s}').format(exception))
return False
if structure.action != 'creating /var/log/appfirewall.log':
logger.debug(
'Not a Mac AppFirewall log file, invalid action: {0!s}'.format(
structure.action))
return False
if structure.status != 'Error':
logger.debug(
'Not a Mac AppFirewall log file, invalid status: {0!s}'.format(
structure.status))
return False
time_elements_tuple = self._GetTimeElementsTuple(structure)
try:
dfdatetime_time_elements.TimeElements(
time_elements_tuple=time_elements_tuple)
except ValueError:
logger.debug((
'Not a Mac AppFirewall log file, invalid date and time: '
'{0!s}').format(structure.date_time))
return False
self._last_month = time_elements_tuple[1]
return True
|
python
|
{
"resource": ""
}
|
q25851
|
EventExtractor._CheckParserCanProcessFileEntry
|
train
|
def _CheckParserCanProcessFileEntry(self, parser, file_entry):
"""Determines if a parser can process a file entry.
Args:
file_entry (dfvfs.FileEntry): file entry.
parser (BaseParser): parser.
Returns:
bool: True if the file entry can be processed by the parser object.
"""
for filter_object in parser.FILTERS:
if filter_object.Match(file_entry):
return True
return False
|
python
|
{
"resource": ""
}
|
q25852
|
EventExtractor._GetSignatureMatchParserNames
|
train
|
def _GetSignatureMatchParserNames(self, file_object):
"""Determines if a file-like object matches one of the known signatures.
Args:
file_object (file): file-like object whose contents will be checked
for known signatures.
Returns:
list[str]: parser names for which the contents of the file-like object
matches their known signatures.
"""
parser_names = []
scan_state = pysigscan.scan_state()
self._file_scanner.scan_file_object(scan_state, file_object)
for scan_result in iter(scan_state.scan_results):
format_specification = (
self._formats_with_signatures.GetSpecificationBySignature(
scan_result.identifier))
if format_specification.identifier not in parser_names:
parser_names.append(format_specification.identifier)
return parser_names
|
python
|
{
"resource": ""
}
|
q25853
|
EventExtractor._InitializeParserObjects
|
train
|
def _InitializeParserObjects(self, parser_filter_expression=None):
"""Initializes the parser objects.
Args:
parser_filter_expression (Optional[str]): the parser filter expression,
None represents all parsers and plugins.
The parser filter expression is a comma separated value string that
denotes a list of parser names to include and/or exclude. Each entry
can have the value of:
* An exact match of a list of parsers, or a preset (see
data/presets.yaml for the list of predefined presets).
* A name of a single parser (case insensitive), e.g. msiecf.
* A glob name for a single parser, e.g. '*msie*' (case insensitive).
"""
self._formats_with_signatures, non_sigscan_parser_names = (
parsers_manager.ParsersManager.GetFormatsWithSignatures(
parser_filter_expression=parser_filter_expression))
self._non_sigscan_parser_names = []
for parser_name in non_sigscan_parser_names:
if parser_name not in ('filestat', 'usnjrnl'):
self._non_sigscan_parser_names.append(parser_name)
self._file_scanner = parsers_manager.ParsersManager.CreateSignatureScanner(
self._formats_with_signatures)
self._parsers = parsers_manager.ParsersManager.GetParserObjects(
parser_filter_expression=parser_filter_expression)
active_parser_names = ', '.join(sorted(self._parsers.keys()))
logger.debug('Active parsers: {0:s}'.format(active_parser_names))
self._filestat_parser = self._parsers.get('filestat', None)
if 'filestat' in self._parsers:
del self._parsers['filestat']
self._mft_parser = self._parsers.get('mft', None)
self._usnjrnl_parser = self._parsers.get('usnjrnl', None)
if 'usnjrnl' in self._parsers:
del self._parsers['usnjrnl']
|
python
|
{
"resource": ""
}
|
q25854
|
EventExtractor._ParseDataStreamWithParser
|
train
|
def _ParseDataStreamWithParser(
self, parser_mediator, parser, file_entry, data_stream_name):
"""Parses a data stream of a file entry with a specific parser.
Args:
parser_mediator (ParserMediator): parser mediator.
parser (BaseParser): parser.
file_entry (dfvfs.FileEntry): file entry.
data_stream_name (str): data stream name.
Raises:
RuntimeError: if the file-like object is missing.
"""
file_object = file_entry.GetFileObject(data_stream_name=data_stream_name)
if not file_object:
raise RuntimeError(
'Unable to retrieve file-like object from file entry.')
try:
self._ParseFileEntryWithParser(
parser_mediator, parser, file_entry, file_object=file_object)
finally:
file_object.close()
|
python
|
{
"resource": ""
}
|
q25855
|
EventExtractor._ParseFileEntryWithParser
|
train
|
def _ParseFileEntryWithParser(
self, parser_mediator, parser, file_entry, file_object=None):
"""Parses a file entry with a specific parser.
Args:
parser_mediator (ParserMediator): parser mediator.
parser (BaseParser): parser.
file_entry (dfvfs.FileEntry): file entry.
file_object (Optional[file]): file-like object to parse.
If not set the parser will use the parser mediator to open
the file entry's default data stream as a file-like object.
Returns:
int: parse result which is _PARSE_RESULT_FAILURE if the file entry
could not be parsed, _PARSE_RESULT_SUCCESS if the file entry
successfully was parsed or _PARSE_RESULT_UNSUPPORTED when
UnableToParseFile was raised.
Raises:
TypeError: if parser object is not a supported parser type.
"""
if not isinstance(parser, (
parsers_interface.FileEntryParser, parsers_interface.FileObjectParser)):
raise TypeError('Unsupported parser object type.')
parser_mediator.ClearParserChain()
reference_count = (
parser_mediator.resolver_context.GetFileObjectReferenceCount(
file_entry.path_spec))
parser_mediator.SampleStartTiming(parser.NAME)
try:
if isinstance(parser, parsers_interface.FileEntryParser):
parser.Parse(parser_mediator)
elif isinstance(parser, parsers_interface.FileObjectParser):
parser.Parse(parser_mediator, file_object)
result = self._PARSE_RESULT_SUCCESS
# We catch IOError so we can determine the parser that generated the error.
except (IOError, dfvfs_errors.BackEndError) as exception:
display_name = parser_mediator.GetDisplayName(file_entry)
logger.warning(
'{0:s} unable to parse file: {1:s} with error: {2!s}'.format(
parser.NAME, display_name, exception))
result = self._PARSE_RESULT_FAILURE
except errors.UnableToParseFile as exception:
display_name = parser_mediator.GetDisplayName(file_entry)
logger.debug(
'{0:s} unable to parse file: {1:s} with error: {2!s}'.format(
parser.NAME, display_name, exception))
result = self._PARSE_RESULT_UNSUPPORTED
finally:
parser_mediator.SampleStopTiming(parser.NAME)
parser_mediator.SampleMemoryUsage(parser.NAME)
new_reference_count = (
parser_mediator.resolver_context.GetFileObjectReferenceCount(
file_entry.path_spec))
if reference_count != new_reference_count:
display_name = parser_mediator.GetDisplayName(file_entry)
logger.warning((
'[{0:s}] did not explicitly close file-object for file: '
'{1:s}.').format(parser.NAME, display_name))
return result
|
python
|
{
"resource": ""
}
|
q25856
|
EventExtractor._ParseFileEntryWithParsers
|
train
|
def _ParseFileEntryWithParsers(
self, parser_mediator, parser_names, file_entry, file_object=None):
"""Parses a file entry with a specific parsers.
Args:
parser_mediator (ParserMediator): parser mediator.
parser_names (list[str]): names of parsers.
file_entry (dfvfs.FileEntry): file entry.
file_object (Optional[file]): file-like object to parse.
If not set the parser will use the parser mediator to open
the file entry's default data stream as a file-like object.
Returns:
int: parse result which is _PARSE_RESULT_FAILURE if the file entry
could not be parsed, _PARSE_RESULT_SUCCESS if the file entry
successfully was parsed or _PARSE_RESULT_UNSUPPORTED when
UnableToParseFile was raised or no names of parser were provided.
Raises:
RuntimeError: if the parser object is missing.
"""
parse_results = self._PARSE_RESULT_UNSUPPORTED
for parser_name in parser_names:
parser = self._parsers.get(parser_name, None)
if not parser:
raise RuntimeError(
'Parser object missing for parser: {0:s}'.format(parser_name))
if parser.FILTERS:
if not self._CheckParserCanProcessFileEntry(parser, file_entry):
parse_results = self._PARSE_RESULT_SUCCESS
continue
display_name = parser_mediator.GetDisplayName(file_entry)
logger.debug((
'[ParseFileEntryWithParsers] parsing file: {0:s} with parser: '
'{1:s}').format(display_name, parser_name))
parse_result = self._ParseFileEntryWithParser(
parser_mediator, parser, file_entry, file_object=file_object)
if parse_result == self._PARSE_RESULT_FAILURE:
return self._PARSE_RESULT_FAILURE
if parse_result == self._PARSE_RESULT_SUCCESS:
parse_results = self._PARSE_RESULT_SUCCESS
return parse_results
|
python
|
{
"resource": ""
}
|
q25857
|
EventExtractor.ParseDataStream
|
train
|
def ParseDataStream(self, parser_mediator, file_entry, data_stream_name):
"""Parses a data stream of a file entry with the enabled parsers.
Args:
parser_mediator (ParserMediator): parser mediator.
file_entry (dfvfs.FileEntry): file entry.
data_stream_name (str): data stream name.
Raises:
RuntimeError: if the file-like object or the parser object is missing.
"""
file_object = file_entry.GetFileObject(data_stream_name=data_stream_name)
if not file_object:
raise RuntimeError(
'Unable to retrieve file-like object from file entry.')
try:
parser_names = self._GetSignatureMatchParserNames(file_object)
parse_with_non_sigscan_parsers = True
if parser_names:
parse_result = self._ParseFileEntryWithParsers(
parser_mediator, parser_names, file_entry, file_object=file_object)
if parse_result in (
self._PARSE_RESULT_FAILURE, self._PARSE_RESULT_SUCCESS):
parse_with_non_sigscan_parsers = False
if parse_with_non_sigscan_parsers:
self._ParseFileEntryWithParsers(
parser_mediator, self._non_sigscan_parser_names, file_entry,
file_object=file_object)
finally:
file_object.close()
|
python
|
{
"resource": ""
}
|
q25858
|
EventExtractor.ParseMetadataFile
|
train
|
def ParseMetadataFile(
self, parser_mediator, file_entry, data_stream_name):
"""Parses a metadata file.
Args:
parser_mediator (ParserMediator): parser mediator.
file_entry (dfvfs.FileEntry): file entry.
data_stream_name (str): data stream name.
"""
parent_path_spec = getattr(file_entry.path_spec, 'parent', None)
filename_upper = file_entry.name.upper()
if (self._mft_parser and parent_path_spec and
filename_upper in ('$MFT', '$MFTMIRR') and not data_stream_name):
self._ParseDataStreamWithParser(
parser_mediator, self._mft_parser, file_entry, '')
elif (self._usnjrnl_parser and parent_path_spec and
filename_upper == '$USNJRNL' and data_stream_name == '$J'):
# To be able to ignore the sparse data ranges the UsnJrnl parser
# needs to read directly from the volume.
volume_file_object = path_spec_resolver.Resolver.OpenFileObject(
parent_path_spec, resolver_context=parser_mediator.resolver_context)
try:
self._ParseFileEntryWithParser(
parser_mediator, self._usnjrnl_parser, file_entry,
file_object=volume_file_object)
finally:
volume_file_object.close()
|
python
|
{
"resource": ""
}
|
q25859
|
PathSpecExtractor._CalculateNTFSTimeHash
|
train
|
def _CalculateNTFSTimeHash(self, file_entry):
"""Calculates an MD5 from the date and time value of a NTFS file entry.
Args:
file_entry (dfvfs.FileEntry): file entry.
Returns:
str: hexadecimal representation of the MD5 hash value of the date and
time values of the file entry.
"""
date_time_values = []
access_time = getattr(file_entry, 'access_time', None)
if access_time:
date_time_string = access_time.CopyToDateTimeString()
date_time_values.append('atime:{0:s}'.format(date_time_string))
creation_time = getattr(file_entry, 'creation_time', None)
if creation_time:
date_time_string = creation_time.CopyToDateTimeString()
date_time_values.append('crtime:{0:s}'.format(date_time_string))
modification_time = getattr(file_entry, 'modification_time', None)
if modification_time:
date_time_string = modification_time.CopyToDateTimeString()
date_time_values.append('mtime:{0:s}'.format(date_time_string))
# file_entry.change_time is an alias of file_entry.entry_modification_time.
change_time = getattr(file_entry, 'change_time', None)
if change_time:
date_time_string = change_time.CopyToDateTimeString()
date_time_values.append('ctime:{0:s}'.format(date_time_string))
date_time_values = ''.join(date_time_values)
date_time_values = date_time_values.encode('ascii')
hash_value = hashlib.md5()
hash_value.update(date_time_values)
return hash_value.hexdigest()
|
python
|
{
"resource": ""
}
|
q25860
|
PathSpecExtractor._ExtractPathSpecsFromDirectory
|
train
|
def _ExtractPathSpecsFromDirectory(self, file_entry, depth=0):
"""Extracts path specification from a directory.
Args:
file_entry (dfvfs.FileEntry): file entry that refers to the directory.
depth (Optional[int]): current depth where 0 represents the file system
root.
Yields:
dfvfs.PathSpec: path specification of a file entry found in the directory.
"""
if depth >= self._MAXIMUM_DEPTH:
raise errors.MaximumRecursionDepth('Maximum recursion depth reached.')
# Need to do a breadth-first search otherwise we'll hit the Python
# maximum recursion depth.
sub_directories = []
for sub_file_entry in file_entry.sub_file_entries:
try:
if not sub_file_entry.IsAllocated() or sub_file_entry.IsLink():
continue
except dfvfs_errors.BackEndError as exception:
logger.warning(
'Unable to process file: {0:s} with error: {1!s}'.format(
sub_file_entry.path_spec.comparable.replace(
'\n', ';'), exception))
continue
# For TSK-based file entries only, ignore the virtual /$OrphanFiles
# directory.
if sub_file_entry.type_indicator == dfvfs_definitions.TYPE_INDICATOR_TSK:
if file_entry.IsRoot() and sub_file_entry.name == '$OrphanFiles':
continue
if sub_file_entry.IsDirectory():
sub_directories.append(sub_file_entry)
elif sub_file_entry.IsFile():
# If we are dealing with a VSS we want to calculate a hash
# value based on available timestamps and compare that to previously
# calculated hash values, and only include the file into the queue if
# the hash does not match.
if self._duplicate_file_check:
hash_value = self._CalculateNTFSTimeHash(sub_file_entry)
inode = getattr(sub_file_entry.path_spec, 'inode', 0)
if inode in self._hashlist:
if hash_value in self._hashlist[inode]:
continue
self._hashlist.setdefault(inode, []).append(hash_value)
for path_spec in self._ExtractPathSpecsFromFile(sub_file_entry):
yield path_spec
for sub_file_entry in sub_directories:
try:
for path_spec in self._ExtractPathSpecsFromDirectory(
sub_file_entry, depth=(depth + 1)):
yield path_spec
except (
IOError, dfvfs_errors.AccessError, dfvfs_errors.BackEndError,
dfvfs_errors.PathSpecError) as exception:
logger.warning('{0!s}'.format(exception))
|
python
|
{
"resource": ""
}
|
q25861
|
PathSpecExtractor._ExtractPathSpecsFromFile
|
train
|
def _ExtractPathSpecsFromFile(self, file_entry):
"""Extracts path specification from a file.
Args:
file_entry (dfvfs.FileEntry): file entry that refers to the file.
Yields:
dfvfs.PathSpec: path specification of a file entry found in the file.
"""
produced_main_path_spec = False
for data_stream in file_entry.data_streams:
# Make a copy so we don't make the changes on a path specification
# directly. Otherwise already produced path specifications can be
# altered in the process.
path_spec = copy.deepcopy(file_entry.path_spec)
if data_stream.name:
setattr(path_spec, 'data_stream', data_stream.name)
yield path_spec
if not data_stream.name:
produced_main_path_spec = True
if not produced_main_path_spec:
yield file_entry.path_spec
|
python
|
{
"resource": ""
}
|
q25862
|
PathSpecExtractor._ExtractPathSpecsFromFileSystem
|
train
|
def _ExtractPathSpecsFromFileSystem(
self, path_spec, find_specs=None, recurse_file_system=True,
resolver_context=None):
"""Extracts path specification from a file system within a specific source.
Args:
path_spec (dfvfs.PathSpec): path specification of the root of
the file system.
find_specs (Optional[list[dfvfs.FindSpec]]): find specifications.
recurse_file_system (Optional[bool]): True if extraction should
recurse into a file system.
resolver_context (Optional[dfvfs.Context]): resolver context.
Yields:
dfvfs.PathSpec: path specification of a file entry found in
the file system.
"""
try:
file_system = path_spec_resolver.Resolver.OpenFileSystem(
path_spec, resolver_context=resolver_context)
except (
dfvfs_errors.AccessError, dfvfs_errors.BackEndError,
dfvfs_errors.PathSpecError) as exception:
logger.error(
'Unable to open file system with error: {0!s}'.format(exception))
return
try:
if find_specs:
searcher = file_system_searcher.FileSystemSearcher(
file_system, path_spec)
for extracted_path_spec in searcher.Find(find_specs=find_specs):
yield extracted_path_spec
elif recurse_file_system:
file_entry = file_system.GetFileEntryByPathSpec(path_spec)
if file_entry:
for extracted_path_spec in self._ExtractPathSpecsFromDirectory(
file_entry):
yield extracted_path_spec
else:
yield path_spec
except (
dfvfs_errors.AccessError, dfvfs_errors.BackEndError,
dfvfs_errors.PathSpecError) as exception:
logger.warning('{0!s}'.format(exception))
finally:
file_system.Close()
|
python
|
{
"resource": ""
}
|
q25863
|
OLECFPropertySetStream._GetValueAsObject
|
train
|
def _GetValueAsObject(self, property_value):
"""Retrieves the property value as a Python object.
Args:
property_value (pyolecf.property_value): OLECF property value.
Returns:
object: property value as a Python object.
"""
if property_value.type == pyolecf.value_types.BOOLEAN:
return property_value.data_as_boolean
if property_value.type in self._INTEGER_TYPES:
return property_value.data_as_integer
if property_value.type in self._STRING_TYPES:
return property_value.data_as_string
try:
data = property_value.data
except IOError:
data = None
return data
|
python
|
{
"resource": ""
}
|
q25864
|
OLECFPropertySetStream._ReadPropertySet
|
train
|
def _ReadPropertySet(self, property_set):
"""Reads properties from a property set.
Args:
property_set (pyolecf.property_set): OLECF property set.
"""
# Combine the values of multiple property sections
# but do not override properties that are already set.
for property_section in property_set.sections:
if property_section.class_identifier != self._CLASS_IDENTIFIER:
continue
for property_value in property_section.properties:
property_name = self._PROPERTY_NAMES.get(
property_value.identifier, None)
if not property_name:
property_name = '0x{0:04}'.format(property_value.identifier)
value = self._GetValueAsObject(property_value)
if self._PROPERTY_VALUE_MAPPINGS:
value_callback_name = self._PROPERTY_VALUE_MAPPINGS.get(
property_name, None)
if value_callback_name:
value_callback_method = getattr(self, value_callback_name, None)
if value_callback_method:
value = value_callback_method(value)
if property_name in self._DATE_TIME_PROPERTIES:
properties_dict = self.date_time_properties
value = dfdatetime_filetime.Filetime(timestamp=value)
else:
properties_dict = self._properties
if property_name not in properties_dict:
properties_dict[property_name] = value
|
python
|
{
"resource": ""
}
|
q25865
|
OLECFPropertySetStream.GetEventData
|
train
|
def GetEventData(self, data_type):
"""Retrieves the properties as event data.
Args:
data_type (str): event data type.
Returns:
EventData: event data.
"""
event_data = events.EventData(data_type=data_type)
for property_name, property_value in iter(self._properties.items()):
if isinstance(property_value, py2to3.BYTES_TYPE):
property_value = repr(property_value)
setattr(event_data, property_name, property_value)
return event_data
|
python
|
{
"resource": ""
}
|
q25866
|
DocumentSummaryInformationOLECFPlugin.Process
|
train
|
def Process(self, parser_mediator, root_item=None, **kwargs):
"""Parses a document summary information OLECF item.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
root_item (Optional[pyolecf.item]): root item of the OLECF file.
Raises:
ValueError: If the root item is not set.
"""
# This will raise if unhandled keyword arguments are passed.
super(DocumentSummaryInformationOLECFPlugin, self).Process(
parser_mediator, **kwargs)
if not root_item:
raise ValueError('Root item not set.')
root_creation_time, root_modification_time = self._GetTimestamps(root_item)
for item_name in self.REQUIRED_ITEMS:
item = root_item.get_sub_item_by_name(item_name)
if not item:
continue
summary_information = OLECFDocumentSummaryInformation(item)
event_data = summary_information.GetEventData(
data_type='olecf:document_summary_info')
event_data.name = 'Document Summary Information'
if root_creation_time:
date_time = dfdatetime_filetime.Filetime(
timestamp=root_creation_time)
event = OLECFDocumentSummaryInformationEvent(
date_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
if root_modification_time:
date_time = dfdatetime_filetime.Filetime(
timestamp=root_modification_time)
event = OLECFDocumentSummaryInformationEvent(
date_time, definitions.TIME_DESCRIPTION_MODIFICATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
python
|
{
"resource": ""
}
|
q25867
|
SummaryInformationOLECFPlugin.Process
|
train
|
def Process(self, parser_mediator, root_item=None, **kwargs):
"""Parses a summary information OLECF item.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
root_item (Optional[pyolecf.item]): root item of the OLECF file.
Raises:
ValueError: If the root item is not set.
"""
# This will raise if unhandled keyword arguments are passed.
super(SummaryInformationOLECFPlugin, self).Process(
parser_mediator, **kwargs)
if not root_item:
raise ValueError('Root item not set.')
root_creation_time, root_modification_time = self._GetTimestamps(root_item)
for item_name in self.REQUIRED_ITEMS:
item = root_item.get_sub_item_by_name(item_name)
if not item:
continue
summary_information = OLECFSummaryInformation(item)
event_data = summary_information.GetEventData(
data_type='olecf:summary_info')
event_data.name = 'Summary Information'
for property_name, date_time in iter(
summary_information.date_time_properties.items()):
date_time_description = self._DATE_TIME_DESCRIPTIONS.get(
property_name, definitions.TIME_DESCRIPTION_UNKNOWN)
event = OLECFSummaryInformationEvent(date_time, date_time_description)
parser_mediator.ProduceEventWithEventData(event, event_data)
if root_creation_time:
date_time = dfdatetime_filetime.Filetime(
timestamp=root_creation_time)
event = OLECFSummaryInformationEvent(
date_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
if root_modification_time:
date_time = dfdatetime_filetime.Filetime(
timestamp=root_modification_time)
event = OLECFSummaryInformationEvent(
date_time, definitions.TIME_DESCRIPTION_MODIFICATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
python
|
{
"resource": ""
}
|
q25868
|
ArtifactDefinitionsFilterHelper.CheckKeyCompatibility
|
train
|
def CheckKeyCompatibility(cls, key_path):
"""Checks if a Windows Registry key path is supported by dfWinReg.
Args:
key_path (str): path of the Windows Registry key.
Returns:
bool: True if key is compatible or False if not.
"""
key_path_upper = key_path.upper()
for key_path_prefix in cls._COMPATIBLE_REGISTRY_KEY_PATH_PREFIXES:
if key_path_upper.startswith(key_path_prefix):
return True
logger.warning('Key path: "{0:s}" is currently not supported'.format(
key_path))
return False
|
python
|
{
"resource": ""
}
|
q25869
|
ArtifactDefinitionsFilterHelper.BuildFindSpecs
|
train
|
def BuildFindSpecs(self, artifact_filter_names, environment_variables=None):
"""Builds find specifications from artifact definitions.
Args:
artifact_filter_names (list[str]): names of artifact definitions that are
used for filtering file system and Windows Registry key paths.
environment_variables (Optional[list[EnvironmentVariableArtifact]]):
environment variables.
"""
find_specs = []
for name in artifact_filter_names:
definition = self._artifacts_registry.GetDefinitionByName(name)
if not definition:
logger.debug('undefined artifact definition: {0:s}'.format(name))
continue
logger.debug('building find spec from artifact definition: {0:s}'.format(
name))
artifact_find_specs = self._BuildFindSpecsFromArtifact(
definition, environment_variables)
find_specs.extend(artifact_find_specs)
for find_spec in find_specs:
if isinstance(find_spec, file_system_searcher.FindSpec):
self.file_system_find_specs.append(find_spec)
elif isinstance(find_spec, registry_searcher.FindSpec):
self.registry_find_specs.append(find_spec)
else:
logger.warning('Unsupported find specification type: {0:s}'.format(
type(find_spec)))
|
python
|
{
"resource": ""
}
|
q25870
|
ArtifactDefinitionsFilterHelper._BuildFindSpecsFromArtifact
|
train
|
def _BuildFindSpecsFromArtifact(self, definition, environment_variables):
"""Builds find specifications from an artifact definition.
Args:
definition (artifacts.ArtifactDefinition): artifact definition.
environment_variables (list[EnvironmentVariableArtifact]):
environment variables.
Returns:
list[dfvfs.FindSpec|dfwinreg.FindSpec]: dfVFS or dfWinReg find
specifications.
"""
find_specs = []
for source in definition.sources:
if source.type_indicator == artifact_types.TYPE_INDICATOR_FILE:
for path_entry in set(source.paths):
specifications = self._BuildFindSpecsFromFileSourcePath(
path_entry, source.separator, environment_variables,
self._knowledge_base.user_accounts)
find_specs.extend(specifications)
self.file_system_artifact_names.add(definition.name)
elif (source.type_indicator ==
artifact_types.TYPE_INDICATOR_WINDOWS_REGISTRY_KEY):
for key_path in set(source.keys):
if ArtifactDefinitionsFilterHelper.CheckKeyCompatibility(key_path):
specifications = self._BuildFindSpecsFromRegistrySourceKey(key_path)
find_specs.extend(specifications)
self.registry_artifact_names.add(definition.name)
elif (source.type_indicator ==
artifact_types.TYPE_INDICATOR_WINDOWS_REGISTRY_VALUE):
# TODO: Handle Registry Values Once Supported in dfwinreg.
# https://github.com/log2timeline/dfwinreg/issues/98
# Use set-comprehension to create a set of the source key paths.
key_paths = {
key_value['key'] for key_value in source.key_value_pairs}
key_paths_string = ', '.join(key_paths)
logger.warning((
'Windows Registry values are not supported, extracting keys: '
'"{0!s}"').format(key_paths_string))
for key_path in key_paths:
if ArtifactDefinitionsFilterHelper.CheckKeyCompatibility(key_path):
specifications = self._BuildFindSpecsFromRegistrySourceKey(key_path)
find_specs.extend(specifications)
self.registry_artifact_names.add(definition.name)
elif (source.type_indicator ==
artifact_types.TYPE_INDICATOR_ARTIFACT_GROUP):
for name in source.names:
specifications = self._BuildFindSpecsFromGroupName(
name, environment_variables)
find_specs.extend(specifications)
else:
logger.warning(
'Unsupported artifact definition source type: "{0:s}"'.format(
source.type_indicator))
return find_specs
|
python
|
{
"resource": ""
}
|
q25871
|
ArtifactDefinitionsFilterHelper._BuildFindSpecsFromGroupName
|
train
|
def _BuildFindSpecsFromGroupName(self, group_name, environment_variables):
"""Builds find specifications from a artifact group name.
Args:
group_name (str): artifact group name.
environment_variables (list[str]): environment variable attributes used to
dynamically populate environment variables in file and registry
artifacts.
Returns:
list[dfwinreg.FindSpec|dfvfs.FindSpec]: find specifications or None if no
artifact with the given name can be retrieved.
"""
definition = self._artifacts_registry.GetDefinitionByName(group_name)
if not definition:
return None
return self._BuildFindSpecsFromArtifact(definition, environment_variables)
|
python
|
{
"resource": ""
}
|
q25872
|
ArtifactDefinitionsFilterHelper._BuildFindSpecsFromFileSourcePath
|
train
|
def _BuildFindSpecsFromFileSourcePath(
self, source_path, path_separator, environment_variables, user_accounts):
"""Builds find specifications from a file source type.
Args:
source_path (str): file system path defined by the source.
path_separator (str): file system path segment separator.
environment_variables (list[str]): environment variable attributes used to
dynamically populate environment variables in key.
user_accounts (list[str]): identified user accounts stored in the
knowledge base.
Returns:
list[dfvfs.FindSpec]: find specifications for the file source type.
"""
find_specs = []
for path_glob in path_helper.PathHelper.ExpandRecursiveGlobs(
source_path, path_separator):
logger.debug('building find spec from path glob: {0:s}'.format(
path_glob))
for path in path_helper.PathHelper.ExpandUsersVariablePath(
path_glob, path_separator, user_accounts):
logger.debug('building find spec from path: {0:s}'.format(path))
if '%' in path:
path = path_helper.PathHelper.ExpandWindowsPath(
path, environment_variables)
logger.debug('building find spec from expanded path: {0:s}'.format(
path))
if not path.startswith(path_separator):
logger.warning((
'The path filter must be defined as an absolute path: '
'"{0:s}"').format(path))
continue
# Convert the path filters into a list of path segments and
# strip the root path segment.
path_segments = path.split(path_separator)
# Remove initial root entry
path_segments.pop(0)
if not path_segments[-1]:
logger.warning(
'Empty last path segment in path filter: "{0:s}"'.format(path))
path_segments.pop(-1)
try:
find_spec = file_system_searcher.FindSpec(
location_glob=path_segments, case_sensitive=False)
except ValueError as exception:
logger.error((
'Unable to build find specification for path: "{0:s}" with '
'error: {1!s}').format(path, exception))
continue
find_specs.append(find_spec)
return find_specs
|
python
|
{
"resource": ""
}
|
q25873
|
ArtifactDefinitionsFilterHelper._BuildFindSpecsFromRegistrySourceKey
|
train
|
def _BuildFindSpecsFromRegistrySourceKey(self, key_path):
"""Build find specifications from a Windows Registry source type.
Args:
key_path (str): Windows Registry key path defined by the source.
Returns:
list[dfwinreg.FindSpec]: find specifications for the Windows Registry
source type.
"""
find_specs = []
for key_path_glob in path_helper.PathHelper.ExpandRecursiveGlobs(
key_path, '\\'):
logger.debug('building find spec from key path glob: {0:s}'.format(
key_path_glob))
key_path_glob_upper = key_path_glob.upper()
if key_path_glob_upper.startswith('HKEY_USERS\\%%USERS.SID%%'):
key_path_glob = 'HKEY_CURRENT_USER{0:s}'.format(key_path_glob[26:])
find_spec = registry_searcher.FindSpec(key_path_glob=key_path_glob)
find_specs.append(find_spec)
return find_specs
|
python
|
{
"resource": ""
}
|
q25874
|
ChromeExtensionPlugin._GetChromeWebStorePage
|
train
|
def _GetChromeWebStorePage(self, extension_identifier):
"""Retrieves the page for the extension from the Chrome store website.
Args:
extension_identifier (str): Chrome extension identifier.
Returns:
str: page content or None.
"""
web_store_url = self._WEB_STORE_URL.format(xid=extension_identifier)
try:
response = requests.get(web_store_url)
except (requests.ConnectionError, requests.HTTPError) as exception:
logger.warning((
'[{0:s}] unable to retrieve URL: {1:s} with error: {2!s}').format(
self.NAME, web_store_url, exception))
return None
return response.text
|
python
|
{
"resource": ""
}
|
q25875
|
ChromeExtensionPlugin._GetPathSegmentSeparator
|
train
|
def _GetPathSegmentSeparator(self, path):
"""Given a path give back the path separator as a best guess.
Args:
path (str): path.
Returns:
str: path segment separator.
"""
if path.startswith('\\') or path[1:].startswith(':\\'):
return '\\'
if path.startswith('/'):
return '/'
if '/' and '\\' in path:
# Let's count slashes and guess which one is the right one.
forward_count = len(path.split('/'))
backward_count = len(path.split('\\'))
if forward_count > backward_count:
return '/'
return '\\'
# Now we are sure there is only one type of separators yet
# the path does not start with one.
if '/' in path:
return '/'
return '\\'
|
python
|
{
"resource": ""
}
|
q25876
|
ChromeExtensionPlugin._GetTitleFromChromeWebStore
|
train
|
def _GetTitleFromChromeWebStore(self, extension_identifier):
"""Retrieves the name of the extension from the Chrome store website.
Args:
extension_identifier (str): Chrome extension identifier.
Returns:
str: name of the extension or None.
"""
# Check if we have already looked this extension up.
if extension_identifier in self._extensions:
return self._extensions.get(extension_identifier)
page_content = self._GetChromeWebStorePage(extension_identifier)
if not page_content:
logger.warning(
'[{0:s}] no data returned for extension identifier: {1:s}'.format(
self.NAME, extension_identifier))
return None
first_line, _, _ = page_content.partition('\n')
match = self._TITLE_RE.search(first_line)
name = None
if match:
title = match.group(1)
if title.startswith('Chrome Web Store - '):
name = title[19:]
elif title.endswith('- Chrome Web Store'):
name = title[:-19]
if not name:
self._extensions[extension_identifier] = 'UNKNOWN'
return None
self._extensions[extension_identifier] = name
return name
|
python
|
{
"resource": ""
}
|
q25877
|
PlasoValueExpander._GetMessage
|
train
|
def _GetMessage(self, event_object):
"""Returns a properly formatted message string.
Args:
event_object: the event object (instance od EventObject).
Returns:
A formatted message string.
"""
# TODO: move this somewhere where the mediator can be instantiated once.
formatter_mediator = formatters_mediator.FormatterMediator()
result = ''
try:
result, _ = formatters_manager.FormattersManager.GetMessageStrings(
formatter_mediator, event_object)
except KeyError as exception:
logging.warning(
'Unable to correctly assemble event with error: {0!s}'.format(
exception))
return result
|
python
|
{
"resource": ""
}
|
q25878
|
PlasoValueExpander._GetSources
|
train
|
def _GetSources(self, event_object):
"""Returns properly formatted source strings.
Args:
event_object: the event object (instance od EventObject).
"""
try:
source_short, source_long = (
formatters_manager.FormattersManager.GetSourceStrings(event_object))
except KeyError as exception:
logging.warning(
'Unable to correctly assemble event with error: {0!s}'.format(
exception))
return source_short, source_long
|
python
|
{
"resource": ""
}
|
q25879
|
PlasoExpression.Compile
|
train
|
def Compile(self, filter_implementation):
"""Compiles the filter implementation.
Args:
filter_implementation: a filter object (instance of objectfilter.TODO).
Returns:
A filter operator (instance of TODO).
Raises:
ParserError: if an unknown operator is provided.
"""
self.attribute = self.swap_source.get(self.attribute, self.attribute)
arguments = [self.attribute]
op_str = self.operator.lower()
operator = filter_implementation.OPS.get(op_str, None)
if not operator:
raise errors.ParseError('Unknown operator {0:s} provided.'.format(
self.operator))
# Plaso specific implementation - if we are comparing a timestamp
# to a value, we use our specific implementation that compares
# timestamps in a "human readable" format.
if self.attribute == 'timestamp':
args = []
for argument in self.args:
args.append(DateCompareObject(argument))
self.args = args
for argument in self.args:
if isinstance(argument, DateCompareObject):
if 'Less' in str(operator):
TimeRangeCache.SetUpperTimestamp(argument.data)
else:
TimeRangeCache.SetLowerTimestamp(argument.data)
arguments.extend(self.args)
expander = filter_implementation.FILTERS['ValueExpander']
ops = operator(arguments=arguments, value_expander=expander)
if not self.bool_value:
if hasattr(ops, 'FlipBool'):
ops.FlipBool()
return ops
|
python
|
{
"resource": ""
}
|
q25880
|
TimeRangeCache.SetLowerTimestamp
|
train
|
def SetLowerTimestamp(cls, timestamp):
"""Sets the lower bound timestamp."""
if not hasattr(cls, '_lower'):
cls._lower = timestamp
return
if timestamp < cls._lower:
cls._lower = timestamp
|
python
|
{
"resource": ""
}
|
q25881
|
TimeRangeCache.SetUpperTimestamp
|
train
|
def SetUpperTimestamp(cls, timestamp):
"""Sets the upper bound timestamp."""
if not hasattr(cls, '_upper'):
cls._upper = timestamp
return
if timestamp > cls._upper:
cls._upper = timestamp
|
python
|
{
"resource": ""
}
|
q25882
|
TimeRangeCache.GetTimeRange
|
train
|
def GetTimeRange(cls):
"""Return the first and last timestamp of filter range."""
first = getattr(cls, '_lower', 0)
last = getattr(cls, '_upper', cls.MAX_INT64)
if first < last:
return first, last
return last, first
|
python
|
{
"resource": ""
}
|
q25883
|
SophosAVLogParser.VerifyStructure
|
train
|
def VerifyStructure(self, parser_mediator, line):
"""Verify that this file is a Sophos Anti-Virus log file.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfVFS.
line (str): line from a text file.
Returns:
bool: True if the line is in the expected format, False if not.
"""
try:
structure = self._LOG_LINE.parseString(line)
except pyparsing.ParseException:
logger.debug('Not a Sophos Anti-Virus log file')
return False
# Expect spaces at position 9 and 16.
if ' ' not in (line[8], line[15]):
logger.debug('Not a Sophos Anti-Virus log file')
return False
try:
dfdatetime_time_elements.TimeElements(
time_elements_tuple=structure.date_time)
except ValueError:
logger.debug((
'Not a Sophos Anti-Virus log file, invalid date and time: '
'{0!s}').format(structure.date_time))
return False
return True
|
python
|
{
"resource": ""
}
|
q25884
|
OpenXMLPlugin._GetPropertyValue
|
train
|
def _GetPropertyValue(self, parser_mediator, properties, property_name):
"""Retrieves a property value.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
properties (dict[str, object]): properties.
property_name (str): name of the property.
Returns:
str: property value.
"""
property_value = properties.get(property_name, None)
if isinstance(property_value, py2to3.BYTES_TYPE):
try:
# TODO: get encoding form XML metadata.
property_value = property_value.decode('utf-8')
except UnicodeDecodeError:
parser_mediator.ProduceExtractionWarning(
'unable to decode property: {0:s}'.format(property_name))
return property_value
|
python
|
{
"resource": ""
}
|
q25885
|
OpenXMLPlugin._FormatPropertyName
|
train
|
def _FormatPropertyName(self, property_name):
"""Formats a camel case property name as snake case.
Args:
property_name (str): property name in camel case.
Returns:
str: property name in snake case.
"""
# TODO: Add Unicode support.
fix_key = re.sub(r'(.)([A-Z][a-z]+)', r'\1_\2', property_name)
return re.sub(r'([a-z0-9])([A-Z])', r'\1_\2', fix_key).lower()
|
python
|
{
"resource": ""
}
|
q25886
|
OpenXMLPlugin._ParsePropertiesXMLFile
|
train
|
def _ParsePropertiesXMLFile(self, xml_data):
"""Parses a properties XML file.
Args:
xml_data (bytes): data of a _rels/.rels XML file.
Returns:
dict[str, object]: properties.
Raises:
zipfile.BadZipfile: if the properties XML file cannot be read.
"""
xml_root = ElementTree.fromstring(xml_data)
properties = {}
for xml_element in xml_root.iter():
if not xml_element.text:
continue
# The property name is formatted as: {URL}name
# For example: {http://purl.org/dc/terms/}modified
_, _, name = xml_element.tag.partition('}')
# Do not including the 'lpstr' attribute because it is very verbose.
if name == 'lpstr':
continue
property_name = self._PROPERTY_NAMES.get(name, None)
if not property_name:
property_name = self._FormatPropertyName(name)
properties[property_name] = xml_element.text
return properties
|
python
|
{
"resource": ""
}
|
q25887
|
OpenXMLPlugin.InspectZipFile
|
train
|
def InspectZipFile(self, parser_mediator, zip_file):
"""Parses an OXML file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
zip_file (zipfile.ZipFile): the zip file containing OXML content. It is
not be closed in this method, but will be closed by the parser logic
in czip.py.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
try:
xml_data = zip_file.read('_rels/.rels')
property_files = self._ParseRelationshipsXMLFile(xml_data)
except (IndexError, IOError, KeyError, OverflowError, ValueError,
zipfile.BadZipfile) as exception:
parser_mediator.ProduceExtractionWarning((
'Unable to parse relationships XML file: _rels/.rels with error: '
'{0!s}').format(exception))
return
metadata = {}
for path in property_files:
try:
xml_data = zip_file.read(path)
properties = self._ParsePropertiesXMLFile(xml_data)
except (IndexError, IOError, KeyError, OverflowError, ValueError,
zipfile.BadZipfile) as exception:
parser_mediator.ProduceExtractionWarning((
'Unable to parse properties XML file: {0:s} with error: '
'{1!s}').format(path, exception))
continue
metadata.update(properties)
event_data = OpenXMLEventData()
event_data.app_version = self._GetPropertyValue(
parser_mediator, metadata, 'app_version')
event_data.app_version = self._GetPropertyValue(
parser_mediator, metadata, 'app_version')
event_data.author = self._GetPropertyValue(
parser_mediator, metadata, 'author')
event_data.creating_app = self._GetPropertyValue(
parser_mediator, metadata, 'creating_app')
event_data.doc_security = self._GetPropertyValue(
parser_mediator, metadata, 'doc_security')
event_data.hyperlinks_changed = self._GetPropertyValue(
parser_mediator, metadata, 'hyperlinks_changed')
event_data.i4 = self._GetPropertyValue(
parser_mediator, metadata, 'i4')
event_data.last_saved_by = self._GetPropertyValue(
parser_mediator, metadata, 'last_saved_by')
event_data.links_up_to_date = self._GetPropertyValue(
parser_mediator, metadata, 'links_up_to_date')
event_data.number_of_characters = self._GetPropertyValue(
parser_mediator, metadata, 'number_of_characters')
event_data.number_of_characters_with_spaces = self._GetPropertyValue(
parser_mediator, metadata, 'number_of_characters_with_spaces')
event_data.number_of_lines = self._GetPropertyValue(
parser_mediator, metadata, 'number_of_lines')
event_data.number_of_pages = self._GetPropertyValue(
parser_mediator, metadata, 'number_of_pages')
event_data.number_of_paragraphs = self._GetPropertyValue(
parser_mediator, metadata, 'number_of_paragraphs')
event_data.number_of_words = self._GetPropertyValue(
parser_mediator, metadata, 'number_of_words')
event_data.revision_number = self._GetPropertyValue(
parser_mediator, metadata, 'revision_number')
event_data.scale_crop = self._GetPropertyValue(
parser_mediator, metadata, 'scale_crop')
event_data.shared_doc = self._GetPropertyValue(
parser_mediator, metadata, 'shared_doc')
event_data.template = self._GetPropertyValue(
parser_mediator, metadata, 'template')
event_data.total_time = self._GetPropertyValue(
parser_mediator, metadata, 'total_time')
self._ProduceEvent(
parser_mediator, event_data, metadata, 'created',
definitions.TIME_DESCRIPTION_CREATION, 'creation time')
self._ProduceEvent(
parser_mediator, event_data, metadata, 'modified',
definitions.TIME_DESCRIPTION_MODIFICATION, 'modification time')
self._ProduceEvent(
parser_mediator, event_data, metadata, 'last_printed',
definitions.TIME_DESCRIPTION_LAST_PRINTED, 'last printed time')
|
python
|
{
"resource": ""
}
|
q25888
|
AttributeContainer.CopyFromDict
|
train
|
def CopyFromDict(self, attributes):
"""Copies the attribute container from a dictionary.
Args:
attributes (dict[str, object]): attribute values per name.
"""
for attribute_name, attribute_value in attributes.items():
# Not using startswith to improve performance.
if attribute_name[0] == '_':
continue
setattr(self, attribute_name, attribute_value)
|
python
|
{
"resource": ""
}
|
q25889
|
AttributeContainer.GetAttributeNames
|
train
|
def GetAttributeNames(self):
"""Retrieves the names of all attributes.
Returns:
list[str]: attribute names.
"""
attribute_names = []
for attribute_name in iter(self.__dict__.keys()):
# Not using startswith to improve performance.
if attribute_name[0] == '_':
continue
attribute_names.append(attribute_name)
return attribute_names
|
python
|
{
"resource": ""
}
|
q25890
|
AttributeContainer.GetAttributes
|
train
|
def GetAttributes(self):
"""Retrieves the attribute names and values.
Attributes that are set to None are ignored.
Yields:
tuple[str, object]: attribute name and value.
"""
for attribute_name, attribute_value in iter(self.__dict__.items()):
# Not using startswith to improve performance.
if attribute_name[0] == '_' or attribute_value is None:
continue
yield attribute_name, attribute_value
|
python
|
{
"resource": ""
}
|
q25891
|
AttributeContainer.GetAttributeValuesString
|
train
|
def GetAttributeValuesString(self):
"""Retrieves a comparable string of the attribute values.
Returns:
str: comparable string of the attribute values.
"""
attributes = []
for attribute_name, attribute_value in sorted(self.__dict__.items()):
# Not using startswith to improve performance.
if attribute_name[0] == '_' or attribute_value is None:
continue
if isinstance(attribute_value, dict):
attribute_value = sorted(attribute_value.items())
elif isinstance(attribute_value, py2to3.BYTES_TYPE):
attribute_value = repr(attribute_value)
attribute_string = '{0:s}: {1!s}'.format(attribute_name, attribute_value)
attributes.append(attribute_string)
return ', '.join(attributes)
|
python
|
{
"resource": ""
}
|
q25892
|
ZeroMQQueue._SendItem
|
train
|
def _SendItem(self, zmq_socket, item, block=True):
"""Attempts to send an item to a ZeroMQ socket.
Args:
zmq_socket (zmq.Socket): used to the send the item.
item (object): sent on the queue. Will be pickled prior to sending.
block (Optional[bool]): whether the push should be performed in blocking
or non-blocking mode.
Returns:
bool: whether the item was sent successfully.
"""
try:
logger.debug('{0:s} sending item'.format(self.name))
if block:
zmq_socket.send_pyobj(item)
else:
zmq_socket.send_pyobj(item, zmq.DONTWAIT)
logger.debug('{0:s} sent item'.format(self.name))
return True
except zmq.error.Again:
logger.debug('{0:s} could not send an item'.format(self.name))
except zmq.error.ZMQError as exception:
if exception.errno == errno.EINTR:
logger.error(
'ZMQ syscall interrupted in {0:s}.'.format(
self.name))
return False
|
python
|
{
"resource": ""
}
|
q25893
|
ZeroMQQueue._ReceiveItemOnActivity
|
train
|
def _ReceiveItemOnActivity(self, zmq_socket):
"""Attempts to receive an item from a ZeroMQ socket.
Args:
zmq_socket (zmq.Socket): used to the receive the item.
Returns:
object: item from the socket.
Raises:
QueueEmpty: if no item could be received within the timeout.
zmq.error.ZMQError: if an error occurs in ZeroMQ
"""
events = zmq_socket.poll(
self._ZMQ_SOCKET_RECEIVE_TIMEOUT_MILLISECONDS)
if events:
try:
received_object = self._zmq_socket.recv_pyobj()
return received_object
except zmq.error.Again:
logger.error(
'{0:s}. Failed to receive item in time.'.format(
self.name))
raise
except zmq.error.ZMQError as exception:
if exception.errno == errno.EINTR:
logger.error(
'ZMQ syscall interrupted in {0:s}. Queue aborting.'.format(
self.name))
raise
raise errors.QueueEmpty
|
python
|
{
"resource": ""
}
|
q25894
|
ZeroMQQueue._SetSocketTimeouts
|
train
|
def _SetSocketTimeouts(self):
"""Sets the timeouts for socket send and receive."""
# Note that timeout must be an integer value. If timeout is a float
# it appears that zmq will not enforce the timeout.
timeout = int(self.timeout_seconds * 1000)
receive_timeout = min(
self._ZMQ_SOCKET_RECEIVE_TIMEOUT_MILLISECONDS, timeout)
send_timeout = min(self._ZMQ_SOCKET_SEND_TIMEOUT_MILLISECONDS, timeout)
self._zmq_socket.setsockopt(zmq.RCVTIMEO, receive_timeout)
self._zmq_socket.setsockopt(zmq.SNDTIMEO, send_timeout)
|
python
|
{
"resource": ""
}
|
q25895
|
ZeroMQQueue._CreateZMQSocket
|
train
|
def _CreateZMQSocket(self):
"""Creates a ZeroMQ socket."""
logger.debug('Creating socket for {0:s}'.format(self.name))
if not self._zmq_context:
self._zmq_context = zmq.Context()
# The terminate and close threading events need to be created when the
# socket is opened. Threading events are unpickleable objects and cannot
# passed in multiprocessing on Windows.
if not self._terminate_event:
self._terminate_event = threading.Event()
if not self._closed_event:
self._closed_event = threading.Event()
if self._zmq_socket:
logger.debug('Closing old socket for {0:s}'.format(self.name))
self._zmq_socket.close()
self._zmq_socket = None
self._zmq_socket = self._zmq_context.socket(self._SOCKET_TYPE)
self._SetSocketTimeouts()
self._SetSocketHighWaterMark()
if self.port:
address = '{0:s}:{1:d}'.format(self._SOCKET_ADDRESS, self.port)
if self.SOCKET_CONNECTION_TYPE == self.SOCKET_CONNECTION_CONNECT:
self._zmq_socket.connect(address)
logger.debug('{0:s} connected to {1:s}'.format(self.name, address))
else:
self._zmq_socket.bind(address)
logger.debug(
'{0:s} bound to specified port {1:s}'.format(self.name, address))
else:
self.port = self._zmq_socket.bind_to_random_port(self._SOCKET_ADDRESS)
logger.debug(
'{0:s} bound to random port {1:d}'.format(self.name, self.port))
|
python
|
{
"resource": ""
}
|
q25896
|
ZeroMQBufferedQueue._CreateZMQSocket
|
train
|
def _CreateZMQSocket(self):
"""Creates a ZeroMQ socket as well as a regular queue and a thread."""
super(ZeroMQBufferedQueue, self)._CreateZMQSocket()
if not self._zmq_thread:
thread_name = '{0:s}_zmq_responder'.format(self.name)
self._zmq_thread = threading.Thread(
target=self._ZeroMQResponder, args=[self._queue], name=thread_name)
self._zmq_thread.start()
|
python
|
{
"resource": ""
}
|
q25897
|
ZeroMQBufferedReplyQueue._ZeroMQResponder
|
train
|
def _ZeroMQResponder(self, source_queue):
"""Listens for requests and replies to clients.
Args:
source_queue (Queue.queue): queue to use to pull items from.
Raises:
RuntimeError: if closed or terminate event is missing.
"""
if not self._closed_event or not self._terminate_event:
raise RuntimeError('Missing closed or terminate event.')
logger.debug('{0:s} responder thread started'.format(self.name))
item = None
while not self._terminate_event.is_set():
if not item:
try:
if self._closed_event.is_set():
item = source_queue.get_nowait()
else:
item = source_queue.get(True, self._buffer_timeout_seconds)
except Queue.Empty:
if self._closed_event.is_set():
break
continue
try:
# We need to receive a request before we can reply with the item.
self._ReceiveItemOnActivity(self._zmq_socket)
except errors.QueueEmpty:
if self._closed_event.is_set() and self._queue.empty():
break
continue
sent_successfully = self._SendItem(self._zmq_socket, item)
item = None
if not sent_successfully:
logger.error('Queue {0:s} unable to send item.'.format(self.name))
break
logger.info('Queue {0:s} responder exiting.'.format(self.name))
self._zmq_socket.close(self._linger_seconds)
|
python
|
{
"resource": ""
}
|
q25898
|
OutputManager.DeregisterOutput
|
train
|
def DeregisterOutput(cls, output_class):
"""Deregisters an output class.
The output classes are identified based on their NAME attribute.
Args:
output_class (type): output module class.
Raises:
KeyError: if output class is not set for the corresponding data type.
"""
output_class_name = output_class.NAME.lower()
if output_class_name in cls._disabled_output_classes:
class_dict = cls._disabled_output_classes
else:
class_dict = cls._output_classes
if output_class_name not in class_dict:
raise KeyError(
'Output class not set for name: {0:s}.'.format(
output_class.NAME))
del class_dict[output_class_name]
|
python
|
{
"resource": ""
}
|
q25899
|
OutputManager.GetDisabledOutputClasses
|
train
|
def GetDisabledOutputClasses(cls):
"""Retrieves the disabled output classes and its associated name.
Yields:
tuple[str, type]: output module name and class.
"""
for _, output_class in iter(cls._disabled_output_classes.items()):
yield output_class.NAME, output_class
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.