_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q24000
SubDomainEnumerator._extract_from_sans
train
def _extract_from_sans(self): """Looks for different TLDs as well as different sub-domains in SAN list""" self.logger.info("{} Trying to find Subdomains in SANs list".format(COLORED_COMBOS.NOTIFY)) if self.host.naked: domain = self.host.naked tld_less = domain.split(".")[0] else: domain = self.host.target.split(".") tld_less = domain[1] domain = ".".join(domain[1:]) for san in self.sans: if (tld_less in san or domain in san) and self.target != san and not san.startswith("*"): self.logger.info("{} Subdomain detected: {}".format(COLORED_COMBOS.GOOD, san))
python
{ "resource": "" }
q24001
RequestHandler.get_new_session
train
def get_new_session(self): """Returns a new session using the object's proxies and headers""" session = Session() session.headers = self.headers session.proxies = self._get_request_proxies() return session
python
{ "resource": "" }
q24002
HelpUtilities.validate_port_range
train
def validate_port_range(cls, port_range): """Validate port range for Nmap scan""" ports = port_range.split("-") if all(ports) and int(ports[-1]) <= 65535 and not len(ports) != 2: return True raise ScannerException("Invalid port range {}".format(port_range))
python
{ "resource": "" }
q24003
HelpUtilities.create_output_directory
train
def create_output_directory(cls, outdir): """Tries to create base output directory""" cls.PATH = outdir try: os.mkdir(outdir) except FileExistsError: pass
python
{ "resource": "" }
q24004
FixParser.add_raw
train
def add_raw(self, length_tag, value_tag): """Define the tags used for a private raw data field. :param length_tag: tag number of length field. :param value_tag: tag number of value field. Data fields are not terminated by the SOH character as is usual for FIX, but instead have a second, preceding field that specifies the length of the value in bytes. The parser is initialised with all the data fields defined in FIX.5.0, but if your application uses private data fields, you can add them here, and the parser will process them correctly. """ self.raw_len_tags.append(length_tag) self.raw_data_tags.append(value_tag) return
python
{ "resource": "" }
q24005
FixParser.remove_raw
train
def remove_raw(self, length_tag, value_tag): """Remove the tags for a data type field. :param length_tag: tag number of the length field. :param value_tag: tag number of the value field. You can remove either private or standard data field definitions in case a particular application uses them for a field of a different type. """ self.raw_len_tags.remove(length_tag) self.raw_data_tags.remove(value_tag) return
python
{ "resource": "" }
q24006
FixParser.get_message
train
def get_message(self): """Process the accumulated buffer and return the first message. If the buffer starts with FIX fields other than BeginString (8), these are discarded until the start of a message is found. If no BeginString (8) field is found, this function returns None. Similarly, if (after a BeginString) no Checksum (10) field is found, the function returns None. Otherwise, it returns a simplefix.FixMessage instance initialised with the fields from the first complete message found in the buffer.""" # Break buffer into tag=value pairs. start = 0 point = 0 in_tag = True tag = 0 while point < len(self.buf): if in_tag and self.buf[point] == EQUALS_BYTE: tag_string = self.buf[start:point] point += 1 tag = int(tag_string) if tag in self.raw_data_tags and self.raw_len > 0: if self.raw_len > len(self.buf) - point: break value = self.buf[point:point + self.raw_len] self.pairs.append((tag, value)) self.buf = self.buf[point + self.raw_len + 1:] point = 0 self.raw_len = 0 start = point else: in_tag = False start = point elif self.buf[point] == SOH_BYTE: value = self.buf[start:point] self.pairs.append((tag, value)) self.buf = self.buf[point + 1:] point = 0 start = point in_tag = True if tag in self.raw_len_tags: self.raw_len = int(value) point += 1 if len(self.pairs) == 0: return None # Check first pair is FIX BeginString. while self.pairs and self.pairs[0][0] != 8: # Discard pairs until we find the beginning of a message. self.pairs.pop(0) if len(self.pairs) == 0: return None # Look for checksum. index = 0 while index < len(self.pairs) and self.pairs[index][0] != 10: index += 1 if index == len(self.pairs): return None # Found checksum, so we have a complete message. m = FixMessage() pairs = self.pairs[:index + 1] for tag, value in pairs: m.append_pair(tag, value) self.pairs = self.pairs[index + 1:] return m
python
{ "resource": "" }
q24007
fix_val
train
def fix_val(value): """Make a FIX value from a string, bytes, or number.""" if type(value) == bytes: return value if sys.version_info[0] == 2: return bytes(value) elif type(value) == str: return bytes(value, 'UTF-8') else: return bytes(str(value), 'ASCII')
python
{ "resource": "" }
q24008
fix_tag
train
def fix_tag(value): """Make a FIX tag value from string, bytes, or integer.""" if sys.version_info[0] == 2: return bytes(value) else: if type(value) == bytes: return value elif type(value) == str: return value.encode('ASCII') return str(value).encode('ASCII')
python
{ "resource": "" }
q24009
FixMessage.append_pair
train
def append_pair(self, tag, value, header=False): """Append a tag=value pair to this message. :param tag: Integer or string FIX tag number. :param value: FIX tag value. :param header: Append to header if True; default to body. Both parameters are explicitly converted to strings before storage, so it's ok to pass integers if that's easier for your program logic. Note: a Python 'None' value will be silently ignored, and no field is appended.""" if tag is None or value is None: return if int(tag) == 8: self.begin_string = fix_val(value) if int(tag) == 35: self.message_type = fix_val(value) if header: self.pairs.insert(self.header_index, (fix_tag(tag), fix_val(value))) self.header_index += 1 else: self.pairs.append((fix_tag(tag), fix_val(value))) return
python
{ "resource": "" }
q24010
FixMessage.append_time
train
def append_time(self, tag, timestamp=None, precision=3, utc=True, header=False): """Append a time field to this message. :param tag: Integer or string FIX tag number. :param timestamp: Time (see below) value to append, or None for now. :param precision: Number of decimal digits. Zero for seconds only, three for milliseconds, 6 for microseconds. Defaults to milliseconds. :param utc: Use UTC if True, local time if False. :param header: Append to header if True; default to body. THIS METHOD IS DEPRECATED! USE append_utc_timestamp() OR append_tz_timestamp() INSTEAD. Append a timestamp in FIX format from a Python time.time or datetime.datetime value. Note that prior to FIX 5.0, precision must be zero or three to be compliant with the standard.""" warnings.warn("simplefix.FixMessage.append_time() is deprecated. " "Use append_utc_timestamp() or append_tz_timestamp() " "instead.", DeprecationWarning) if not timestamp: t = datetime.datetime.utcnow() elif type(timestamp) is float: if utc: t = datetime.datetime.utcfromtimestamp(timestamp) else: t = datetime.datetime.fromtimestamp(timestamp) else: t = timestamp s = t.strftime("%Y%m%d-%H:%M:%S") if precision == 3: s += ".%03d" % (t.microsecond / 1000) elif precision == 6: s += ".%06d" % t.microsecond elif precision != 0: raise ValueError("Precision should be one of 0, 3 or 6 digits") return self.append_pair(tag, s, header=header)
python
{ "resource": "" }
q24011
FixMessage.append_utc_timestamp
train
def append_utc_timestamp(self, tag, timestamp=None, precision=3, header=False): """Append a field with a UTCTimestamp value. :param tag: Integer or string FIX tag number. :param timestamp: Time value, see below. :param precision: Number of decimal places: 0, 3 (ms) or 6 (us). :param header: Append to FIX header if True; default to body. The `timestamp` value should be a datetime, such as created by datetime.datetime.utcnow(); a float, being the number of seconds since midnight 1 Jan 1970 UTC, such as returned by time.time(); or, None, in which case datetime.datetime.utcnow() is used to get the current UTC time. Precision values other than zero (seconds), 3 (milliseconds), or 6 (microseconds) will raise an exception. Note that prior to FIX 5.0, only values of 0 or 3 comply with the standard.""" return self._append_utc_datetime(tag, "%Y%m%d-%H:%M:%S", timestamp, precision, header)
python
{ "resource": "" }
q24012
FixMessage.append_utc_time_only
train
def append_utc_time_only(self, tag, timestamp=None, precision=3, header=False): """Append a field with a UTCTimeOnly value. :param tag: Integer or string FIX tag number. :param timestamp: Time value, see below. :param precision: Number of decimal places: 0, 3 (ms) or 6 (us). :param header: Append to FIX header if True; default to body. The `timestamp` value should be a datetime, such as created by datetime.datetime.utcnow(); a float, being the number of seconds since midnight 1 Jan 1970 UTC, such as returned by time.time(); or, None, in which case datetime.datetime.utcnow() is used to get the current UTC time. Precision values other than zero (seconds), 3 (milliseconds), or 6 (microseconds) will raise an exception. Note that prior to FIX 5.0, only values of 0 or 3 comply with the standard.""" return self._append_utc_datetime(tag, "%H:%M:%S", timestamp, precision, header)
python
{ "resource": "" }
q24013
FixMessage.append_tz_timestamp
train
def append_tz_timestamp(self, tag, timestamp=None, precision=3, header=False): """Append a field with a TZTimestamp value, derived from local time. :param tag: Integer or string FIX tag number. :param timestamp: Time value, see below. :param precision: Number of decimal places: 0, 3 (ms) or 6 (us). :param header: Append to FIX header if True; default to body. The `timestamp` value should be a local datetime, such as created by datetime.datetime.now(); a float, being the number of seconds since midnight 1 Jan 1970 UTC, such as returned by time.time(); or, None, in which case datetime.datetime.now() is used to get the current local time. Precision values other than zero (seconds), 3 (milliseconds), or 6 (microseconds) will raise an exception. Note that prior to FIX 5.0, only values of 0 or 3 comply with the standard.""" # Get float offset from Unix epoch. if timestamp is None: now = time.time() elif type(timestamp) is float: now = timestamp else: now = time.mktime(timestamp.timetuple()) + \ (timestamp.microsecond * 1e-6) # Get offset of local timezone east of UTC. utc = datetime.datetime.utcfromtimestamp(now) local = datetime.datetime.fromtimestamp(now) td = local - utc offset = int(((td.days * 86400) + td.seconds) / 60) s = local.strftime("%Y%m%d-%H:%M:%S") if precision == 3: s += ".%03u" % (local.microsecond / 1000) elif precision == 6: s += ".%06u" % local.microsecond elif precision != 0: raise ValueError("Precision (%u) should be one of " "0, 3 or 6 digits" % precision) s += self._tz_offset_string(offset) return self.append_pair(tag, s, header=header)
python
{ "resource": "" }
q24014
FixMessage.append_tz_time_only
train
def append_tz_time_only(self, tag, timestamp=None, precision=3, header=False): """Append a field with a TZTimeOnly value. :param tag: Integer or string FIX tag number. :param timestamp: Time value, see below. :param precision: Number of decimal places: 0, 3 (ms) or 6 (us). :param header: Append to FIX header if True; default to body. The `timestamp` value should be a local datetime, such as created by datetime.datetime.now(); a float, being the number of seconds since midnight 1 Jan 1970 UTC, such as returned by time.time(); or, None, in which case datetime.datetime.now() is used to get the current UTC time. Precision values other than None (minutes), zero (seconds), 3 (milliseconds), or 6 (microseconds) will raise an exception. Note that prior to FIX 5.0, only values of 0 or 3 comply with the standard.""" if timestamp is None: t = datetime.datetime.now() elif type(timestamp) is float: t = datetime.datetime.fromtimestamp(timestamp) else: t = timestamp now = time.mktime(t.timetuple()) + (t.microsecond * 1e-6) utc = datetime.datetime.utcfromtimestamp(now) td = t - utc offset = int(((td.days * 86400) + td.seconds) / 60) s = t.strftime("%H:%M") if precision == 0: s += t.strftime(":%S") elif precision == 3: s += t.strftime(":%S") s += ".%03u" % (t.microsecond / 1000) elif precision == 6: s += t.strftime(":%S") s += ".%06u" % t.microsecond elif precision is not None: raise ValueError("Precision should be one of " "None, 0, 3 or 6 digits") s += self._tz_offset_string(offset) return self.append_pair(tag, s, header=header)
python
{ "resource": "" }
q24015
FixMessage.append_tz_time_only_parts
train
def append_tz_time_only_parts(self, tag, h, m, s=None, ms=None, us=None, offset=0, header=False): """Append a field with a TZTimeOnly value from components. :param tag: Integer or string FIX tag number. :param h: Hours, in range 0 to 23. :param m: Minutes, in range 0 to 59. :param s: Optional seconds, in range 0 to 59 (60 for leap second). :param ms: Optional milliseconds, in range 0 to 999. :param us: Optional microseconds, in range 0 to 999. :param offset: Minutes east of UTC, in range -1439 to +1439. :param header: Append to FIX header if True; default to body. Formats the TZTimeOnly value from its components. If `s`, `ms` or `us` are None, the precision is truncated at that point.""" ih = int(h) if ih < 0 or ih > 23: raise ValueError("Hour value `h` (%u) out of range " "0 to 23" % ih) im = int(m) if im < 0 or im > 59: raise ValueError("Minute value `m` (%u) out of range " "0 to 59" % im) v = "%02u:%02u" % (ih, im) if s is not None: isec = int(s) if isec < 0 or isec > 60: raise ValueError("Seconds value `s` (%u) out of range " "0 to 60" % isec) v += ":%02u" % isec if ms is not None: ims = int(ms) if ims < 0 or ims > 999: raise ValueError("Milliseconds value `ms` (%u) " "out of range 0 to 999" % ims) v += ".%03u" % ims if us is not None: ius = int(us) if ius < 0 or ius > 999: raise ValueError("Microseconds value `us` (%u) " "out of range 0 to 999" % ius) v += "%03u" % ius v += self._tz_offset_string(offset) return self.append_pair(tag, v, header=header)
python
{ "resource": "" }
q24016
FixMessage.append_string
train
def append_string(self, field, header=False): """Append a tag=value pair in string format. :param field: String "tag=value" to be appended to this message. :param header: Append to header if True; default to body. The string is split at the first '=' character, and the resulting tag and value strings are appended to the message.""" # Split into tag and value. bits = field.split('=', 1) if len(bits) != 2: raise ValueError("Field missing '=' separator.") # Check tag is an integer. try: tag_int = int(bits[0]) except ValueError: raise ValueError("Tag value must be an integer") # Save. self.append_pair(tag_int, bits[1], header=header) return
python
{ "resource": "" }
q24017
FixMessage.append_strings
train
def append_strings(self, string_list, header=False): """Append tag=pairs for each supplied string. :param string_list: List of "tag=value" strings. :param header: Append to header if True; default to body. Each string is split, and the resulting tag and value strings are appended to the message.""" for s in string_list: self.append_string(s, header=header) return
python
{ "resource": "" }
q24018
FixMessage.append_data
train
def append_data(self, len_tag, val_tag, data, header=False): """Append raw data, possibly including a embedded SOH. :param len_tag: Tag number for length field. :param val_tag: Tag number for value field. :param data: Raw data byte string. :param header: Append to header if True; default to body. Appends two pairs: a length pair, followed by a data pair, containing the raw data supplied. Example fields that should use this method include: 95/96, 212/213, 354/355, etc.""" self.append_pair(len_tag, len(data), header=header) self.append_pair(val_tag, data, header=header) return
python
{ "resource": "" }
q24019
FixMessage.get
train
def get(self, tag, nth=1): """Return n-th value for tag. :param tag: FIX field tag number. :param nth: Index of tag if repeating, first is 1. :return: None if nothing found, otherwise value matching tag. Defaults to returning the first matching value of 'tag', but if the 'nth' parameter is overridden, can get repeated fields.""" tag = fix_tag(tag) nth = int(nth) for t, v in self.pairs: if t == tag: nth -= 1 if nth == 0: return v return None
python
{ "resource": "" }
q24020
FixMessage.remove
train
def remove(self, tag, nth=1): """Remove the n-th occurrence of tag in this message. :param tag: FIX field tag number to be removed. :param nth: Index of tag if repeating, first is 1. :returns: Value of the field if removed, None otherwise.""" tag = fix_tag(tag) nth = int(nth) for i in range(len(self.pairs)): t, v = self.pairs[i] if t == tag: nth -= 1 if nth == 0: self.pairs.pop(i) return v return None
python
{ "resource": "" }
q24021
FixMessage.encode
train
def encode(self, raw=False): """Convert message to on-the-wire FIX format. :param raw: If True, encode pairs exactly as provided. Unless 'raw' is set, this function will calculate and correctly set the BodyLength (9) and Checksum (10) fields, and ensure that the BeginString (8), Body Length (9), Message Type (35) and Checksum (10) fields are in the right positions. This function does no further validation of the message content.""" buf = b"" if raw: # Walk pairs, creating string. for tag, value in self.pairs: buf += tag + b'=' + value + SOH_STR return buf # Cooked. for tag, value in self.pairs: if int(tag) in (8, 9, 35, 10): continue buf += tag + b'=' + value + SOH_STR # Prepend the message type. if self.message_type is None: raise ValueError("No message type set") buf = b"35=" + self.message_type + SOH_STR + buf # Calculate body length. # # From first byte after body length field, to the delimiter # before the checksum (which shouldn't be there yet). body_length = len(buf) # Prepend begin-string and body-length. if not self.begin_string: raise ValueError("No begin string set") buf = b"8=" + self.begin_string + SOH_STR + \ b"9=" + fix_val("%u" % body_length) + SOH_STR + \ buf # Calculate and append the checksum. checksum = 0 for c in buf: checksum += ord(c) if sys.version_info[0] == 2 else c buf += b"10=" + fix_val("%03u" % (checksum % 256,)) + SOH_STR return buf
python
{ "resource": "" }
q24022
is_suspicious
train
def is_suspicious( pe ): """ unusual locations of import tables non recognized section names presence of long ASCII strings """ relocations_overlap_entry_point = False sequential_relocs = 0 # If relocation data is found and the entries go over the entry point, and also are very # continuous or point outside section's boundaries => it might imply that an obfuscation # trick is being used or the relocations are corrupt (maybe intentionally) # if hasattr(pe, 'DIRECTORY_ENTRY_BASERELOC'): for base_reloc in pe.DIRECTORY_ENTRY_BASERELOC: last_reloc_rva = None for reloc in base_reloc.entries: if reloc.rva <= pe.OPTIONAL_HEADER.AddressOfEntryPoint <= reloc.rva + 4: relocations_overlap_entry_point = True if last_reloc_rva is not None and last_reloc_rva <= reloc.rva <= last_reloc_rva + 4: sequential_relocs += 1 last_reloc_rva = reloc.rva # If import tables or strings exist (are pointed to) to within the header or in the area # between the PE header and the first section that's supicious # # IMPLEMENT warnings_while_parsing = False # If we have warnings, that's suspicious, some of those will be because of out-of-ordinary # values are found in the PE header fields # Things that are reported in warnings: # (parsing problems, special section characteristics i.e. W & X, uncommon values of fields, # unusual entrypoint, suspicious imports) # warnings = pe.get_warnings() if warnings: warnings_while_parsing # If there are few or none (should come with a standard "density" of strings/kilobytes of data) longer (>8) # ascii sequences that might indicate packed data, (this is similar to the entropy test in some ways but # might help to discard cases of legitimate installer or compressed data) # If compressed data (high entropy) and is_driver => uuuuhhh, nasty pass
python
{ "resource": "" }
q24023
is_probably_packed
train
def is_probably_packed( pe ): """Returns True is there is a high likelihood that a file is packed or contains compressed data. The sections of the PE file will be analyzed, if enough sections look like containing compressed data and the data makes up for more than 20% of the total file size, the function will return True. """ # Calculate the lenth of the data up to the end of the last section in the # file. Overlay data won't be taken into account # total_pe_data_length = len( pe.trim() ) # Assume that the file is packed when no data is available if not total_pe_data_length: return True has_significant_amount_of_compressed_data = False # If some of the sections have high entropy and they make for more than 20% of the file's size # it's assumed that it could be an installer or a packed file total_compressed_data = 0 for section in pe.sections: s_entropy = section.get_entropy() s_length = len( section.get_data() ) # The value of 7.4 is empircal, based on looking at a few files packed # by different packers if s_entropy > 7.4: total_compressed_data += s_length if ((1.0 * total_compressed_data)/total_pe_data_length) > .2: has_significant_amount_of_compressed_data = True return has_significant_amount_of_compressed_data
python
{ "resource": "" }
q24024
SignatureDatabase.generate_section_signatures
train
def generate_section_signatures(self, pe, name, sig_length=512): """Generates signatures for all the sections in a PE file. If the section contains any data a signature will be created for it. The signature name will be a combination of the parameter 'name' and the section number and its name. """ section_signatures = list() for idx, section in enumerate(pe.sections): if section.SizeOfRawData < sig_length: continue #offset = pe.get_offset_from_rva(section.VirtualAddress) offset = section.PointerToRawData sig_name = '%s Section(%d/%d,%s)' % ( name, idx + 1, len(pe.sections), ''.join([c for c in section.Name if c in string.printable])) section_signatures.append( self.__generate_signature( pe, offset, sig_name, ep_only=False, section_start_only=True, sig_length=sig_length) ) return '\n'.join(section_signatures)+'\n'
python
{ "resource": "" }
q24025
SignatureDatabase.generate_ep_signature
train
def generate_ep_signature(self, pe, name, sig_length=512): """Generate signatures for the entry point of a PE file. Creates a signature whose name will be the parameter 'name' and the section number and its name. """ offset = pe.get_offset_from_rva(pe.OPTIONAL_HEADER.AddressOfEntryPoint) return self.__generate_signature( pe, offset, name, ep_only=True, sig_length=sig_length)
python
{ "resource": "" }
q24026
SignatureDatabase.__match_signature_tree
train
def __match_signature_tree(self, signature_tree, data, depth = 0): """Recursive function to find matches along the signature tree. signature_tree is the part of the tree left to walk data is the data being checked against the signature tree depth keeps track of how far we have gone down the tree """ matched_names = list () match = signature_tree # Walk the bytes in the data and match them # against the signature # for idx, byte in enumerate ( [b if isinstance(b, int) else ord(b) for b in data] ): # If the tree is exhausted... # if match is None : break # Get the next byte in the tree # match_next = match.get(byte, None) # If None is among the values for the key # it means that a signature in the database # ends here and that there's an exact match. # if None in list(match.values()): # idx represent how deep we are in the tree # #names = [idx+depth] names = list() # For each of the item pairs we check # if it has an element other than None, # if not then we have an exact signature # for item in list(match.items()): if item[1] is None : names.append (item[0]) matched_names.append(names) # If a wildcard is found keep scanning the signature # ignoring the byte. # if '??' in match : match_tree_alternate = match.get ('??', None) data_remaining = data[idx + 1 :] if data_remaining: matched_names.extend( self.__match_signature_tree( match_tree_alternate, data_remaining, idx+depth+1)) match = match_next # If we have any more packer name in the end of the signature tree # add them to the matches # if match is not None and None in list(match.values()): #names = [idx + depth + 1] names = list() for item in list(match.items()) : if item[1] is None: names.append(item[0]) matched_names.append(names) return matched_names
python
{ "resource": "" }
q24027
SignatureDatabase.load
train
def load(self , filename=None, data=None): """Load a PEiD signature file. Invoking this method on different files combines the signatures. """ self.__load(filename=filename, data=data)
python
{ "resource": "" }
q24028
_read_doc
train
def _read_doc(): """ Parse docstring from file 'pefile.py' and avoid importing this module directly. """ if sys.version_info.major == 2: with open('pefile.py', 'r') as f: tree = ast.parse(f.read()) else: with open('pefile.py', 'r', encoding='utf-8') as f: tree = ast.parse(f.read()) return ast.get_docstring(tree)
python
{ "resource": "" }
q24029
_read_attr
train
def _read_attr(attr_name): """ Parse attribute from file 'pefile.py' and avoid importing this module directly. __version__, __author__, __contact__, """ regex = attr_name + r"\s+=\s+'(.+)'" if sys.version_info.major == 2: with open('pefile.py', 'r') as f: match = re.search(regex, f.read()) else: with open('pefile.py', 'r', encoding='utf-8') as f: match = re.search(regex, f.read()) # Second item in the group is the value of attribute. return match.group(1)
python
{ "resource": "" }
q24030
ordLookup
train
def ordLookup(libname, ord_val, make_name=False): ''' Lookup a name for the given ordinal if it's in our database. ''' names = ords.get(libname.lower()) if names is None: if make_name is True: return formatOrdString(ord_val) return None name = names.get(ord_val) if name is None: return formatOrdString(ord_val) return name
python
{ "resource": "" }
q24031
retrieve_flags
train
def retrieve_flags(flag_dict, flag_filter): """Read the flags from a dictionary and return them in a usable form. Will return a list of (flag, value) for all flags in "flag_dict" matching the filter "flag_filter". """ return [(flag, value) for flag, value in list(flag_dict.items()) if isinstance(flag, (str, bytes)) and flag.startswith(flag_filter)]
python
{ "resource": "" }
q24032
UnicodeStringWrapperPostProcessor.ask_unicode_16
train
def ask_unicode_16(self, next_rva_ptr): """The next RVA is taken to be the one immediately following this one. Such RVA could indicate the natural end of the string and will be checked to see if there's a Unicode NULL character there. """ if self.__get_word_value_at_rva(next_rva_ptr-2) == 0: self.length = next_rva_ptr - self.rva_ptr return True return False
python
{ "resource": "" }
q24033
Dump.add_lines
train
def add_lines(self, txt, indent=0): """Adds a list of lines. The list can be indented with the optional argument 'indent'. """ for line in txt: self.add_line(line, indent)
python
{ "resource": "" }
q24034
Dump.add
train
def add(self, txt, indent=0): """Adds some text, no newline will be appended. The text can be indented with the optional argument 'indent'. """ self.text.append(u'{0}{1}'.format(' '*indent, txt))
python
{ "resource": "" }
q24035
Dump.get_text
train
def get_text(self): """Get the text in its current state.""" return u''.join(u'{0}'.format(b) for b in self.text)
python
{ "resource": "" }
q24036
Structure.dump_dict
train
def dump_dict(self): """Returns a dictionary representation of the structure.""" dump_dict = dict() dump_dict['Structure'] = self.name # Refer to the __set_format__ method for an explanation # of the following construct. for keys in self.__keys__: for key in keys: val = getattr(self, key) if isinstance(val, (int, long)): if key == 'TimeDateStamp' or key == 'dwTimeStamp': try: val = '0x%-8X [%s UTC]' % (val, time.asctime(time.gmtime(val))) except ValueError as e: val = '0x%-8X [INVALID TIME]' % val else: val = ''.join(chr(d) if chr(d) in string.printable else "\\x%02x" % d for d in [ord(c) if not isinstance(c, int) else c for c in val]) dump_dict[key] = {'FileOffset': self.__field_offsets__[key] + self.__file_offset__, 'Offset': self.__field_offsets__[key], 'Value': val} return dump_dict
python
{ "resource": "" }
q24037
SectionStructure.get_data
train
def get_data(self, start=None, length=None): """Get data chunk from a section. Allows to query data from the section by passing the addresses where the PE file would be loaded by default. It is then possible to retrieve code and data by their real addresses as they would be if loaded. Returns bytes() under Python 3.x and set() under Python 2.7 """ PointerToRawData_adj = self.pe.adjust_FileAlignment( self.PointerToRawData, self.pe.OPTIONAL_HEADER.FileAlignment ) VirtualAddress_adj = self.pe.adjust_SectionAlignment( self.VirtualAddress, self.pe.OPTIONAL_HEADER.SectionAlignment, self.pe.OPTIONAL_HEADER.FileAlignment ) if start is None: offset = PointerToRawData_adj else: offset = ( start - VirtualAddress_adj ) + PointerToRawData_adj if length is not None: end = offset + length else: end = offset + self.SizeOfRawData # PointerToRawData is not adjusted here as we might want to read any possible extra bytes # that might get cut off by aligning the start (and hence cutting something off the end) # if end > self.PointerToRawData + self.SizeOfRawData: end = self.PointerToRawData + self.SizeOfRawData return self.pe.__data__[offset:end]
python
{ "resource": "" }
q24038
PE.full_load
train
def full_load(self): """Process the data directories. This method will load the data directories which might not have been loaded if the "fast_load" option was used. """ self.parse_data_directories() class RichHeader(object): pass rich_header = self.parse_rich_header() if rich_header: self.RICH_HEADER = RichHeader() self.RICH_HEADER.checksum = rich_header.get('checksum', None) self.RICH_HEADER.values = rich_header.get('values', None) self.RICH_HEADER.key = rich_header.get('key', None) self.RICH_HEADER.raw_data = rich_header.get('raw_data', None) self.RICH_HEADER.clear_data = rich_header.get('clear_data', None) else: self.RICH_HEADER = None
python
{ "resource": "" }
q24039
PE.write
train
def write(self, filename=None): """Write the PE file. This function will process all headers and components of the PE file and include all changes made (by just assigning to attributes in the PE objects) and write the changes back to a file whose name is provided as an argument. The filename is optional, if not provided the data will be returned as a 'str' object. """ file_data = bytearray(self.__data__) for structure in self.__structures__: struct_data = bytearray(structure.__pack__()) offset = structure.get_file_offset() file_data[offset:offset+len(struct_data)] = struct_data if hasattr(self, 'VS_VERSIONINFO'): if hasattr(self, 'FileInfo'): for finfo in self.FileInfo: for entry in finfo: if hasattr(entry, 'StringTable'): for st_entry in entry.StringTable: for key, entry in list(st_entry.entries.items()): # Offsets and lengths of the keys and values. # Each value in the dictionary is a tuple: # (key length, value length) # The lengths are in characters, not in bytes. offsets = st_entry.entries_offsets[key] lengths = st_entry.entries_lengths[key] if len( entry ) > lengths[1]: l = entry.decode('utf-8').encode('utf-16le') file_data[offsets[1]:offsets[1]+lengths[1]*2 ] = l[:lengths[1]*2] else: encoded_data = entry.decode('utf-8').encode('utf-16le') file_data[offsets[1]:offsets[1]+len(encoded_data)] = encoded_data new_file_data = file_data if not filename: return new_file_data f = open(filename, 'wb+') f.write(new_file_data) f.close() return
python
{ "resource": "" }
q24040
PE.parse_data_directories
train
def parse_data_directories(self, directories=None, forwarded_exports_only=False, import_dllnames_only=False): """Parse and process the PE file's data directories. If the optional argument 'directories' is given, only the directories at the specified indexes will be parsed. Such functionality allows parsing of areas of interest without the burden of having to parse all others. The directories can then be specified as: For export / import only: directories = [ 0, 1 ] or (more verbosely): directories = [ DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_IMPORT'], DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_EXPORT'] ] If 'directories' is a list, the ones that are processed will be removed, leaving only the ones that are not present in the image. If `forwarded_exports_only` is True, the IMAGE_DIRECTORY_ENTRY_EXPORT attribute will only contain exports that are forwarded to another DLL. If `import_dllnames_only` is True, symbols will not be parsed from the import table and the entries in the IMAGE_DIRECTORY_ENTRY_IMPORT attribute will not have a `symbols` attribute. """ directory_parsing = ( ('IMAGE_DIRECTORY_ENTRY_IMPORT', self.parse_import_directory), ('IMAGE_DIRECTORY_ENTRY_EXPORT', self.parse_export_directory), ('IMAGE_DIRECTORY_ENTRY_RESOURCE', self.parse_resources_directory), ('IMAGE_DIRECTORY_ENTRY_DEBUG', self.parse_debug_directory), ('IMAGE_DIRECTORY_ENTRY_BASERELOC', self.parse_relocations_directory), ('IMAGE_DIRECTORY_ENTRY_TLS', self.parse_directory_tls), ('IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG', self.parse_directory_load_config), ('IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT', self.parse_delay_import_directory), ('IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT', self.parse_directory_bound_imports) ) if directories is not None: if not isinstance(directories, (tuple, list)): directories = [directories] for entry in directory_parsing: # OC Patch: # try: directory_index = DIRECTORY_ENTRY[entry[0]] dir_entry = self.OPTIONAL_HEADER.DATA_DIRECTORY[directory_index] except IndexError: break # Only process all the directories if no individual ones have # been chosen # if directories is None or directory_index in directories: if dir_entry.VirtualAddress: if forwarded_exports_only and entry[0] == 'IMAGE_DIRECTORY_ENTRY_EXPORT': value = entry[1](dir_entry.VirtualAddress, dir_entry.Size, forwarded_only=True) elif import_dllnames_only and entry[0] == 'IMAGE_DIRECTORY_ENTRY_IMPORT': value = entry[1](dir_entry.VirtualAddress, dir_entry.Size, dllnames_only=True) else: value = entry[1](dir_entry.VirtualAddress, dir_entry.Size) if value: setattr(self, entry[0][6:], value) if (directories is not None) and isinstance(directories, list) and (entry[0] in directories): directories.remove(directory_index)
python
{ "resource": "" }
q24041
PE.parse_resource_data_entry
train
def parse_resource_data_entry(self, rva): """Parse a data entry from the resources directory.""" try: # If the RVA is invalid all would blow up. Some EXEs seem to be # specially nasty and have an invalid RVA. data = self.get_data(rva, Structure(self.__IMAGE_RESOURCE_DATA_ENTRY_format__).sizeof() ) except PEFormatError as excp: self.__warnings.append( 'Error parsing a resource directory data entry, ' 'the RVA is invalid: 0x%x' % ( rva ) ) return None data_entry = self.__unpack_data__( self.__IMAGE_RESOURCE_DATA_ENTRY_format__, data, file_offset = self.get_offset_from_rva(rva) ) return data_entry
python
{ "resource": "" }
q24042
PE.get_memory_mapped_image
train
def get_memory_mapped_image(self, max_virtual_address=0x10000000, ImageBase=None): """Returns the data corresponding to the memory layout of the PE file. The data includes the PE header and the sections loaded at offsets corresponding to their relative virtual addresses. (the VirtualAddress section header member). Any offset in this data corresponds to the absolute memory address ImageBase+offset. The optional argument 'max_virtual_address' provides with means of limiting which sections are processed. Any section with their VirtualAddress beyond this value will be skipped. Normally, sections with values beyond this range are just there to confuse tools. It's a common trick to see in packed executables. If the 'ImageBase' optional argument is supplied, the file's relocations will be applied to the image by calling the 'relocate_image()' method. Beware that the relocation information is applied permanently. """ # Rebase if requested # if ImageBase is not None: # Keep a copy of the image's data before modifying it by rebasing it # original_data = self.__data__ self.relocate_image(ImageBase) # Collect all sections in one code block mapped_data = self.__data__[:] for section in self.sections: # Miscellaneous integrity tests. # Some packer will set these to bogus values to make tools go nuts. if section.Misc_VirtualSize == 0 and section.SizeOfRawData == 0: continue srd = section.SizeOfRawData prd = self.adjust_FileAlignment( section.PointerToRawData, self.OPTIONAL_HEADER.FileAlignment) VirtualAddress_adj = self.adjust_SectionAlignment( section.VirtualAddress, self.OPTIONAL_HEADER.SectionAlignment, self.OPTIONAL_HEADER.FileAlignment ) if (srd > len(self.__data__) or prd > len(self.__data__) or srd + prd > len(self.__data__) or VirtualAddress_adj >= max_virtual_address): continue padding_length = VirtualAddress_adj - len(mapped_data) if padding_length>0: mapped_data += b'\0'*padding_length elif padding_length<0: mapped_data = mapped_data[:padding_length] mapped_data += section.get_data() # If the image was rebased, restore it to its original form # if ImageBase is not None: self.__data__ = original_data return mapped_data
python
{ "resource": "" }
q24043
PE.get_string_from_data
train
def get_string_from_data(self, offset, data): """Get an ASCII string from data.""" s = self.get_bytes_from_data(offset, data) end = s.find(b'\0') if end >= 0: s = s[:end] return s
python
{ "resource": "" }
q24044
PE.get_section_by_offset
train
def get_section_by_offset(self, offset): """Get the section containing the given file offset.""" sections = [s for s in self.sections if s.contains_offset(offset)] if sections: return sections[0] return None
python
{ "resource": "" }
q24045
PE.set_dword_at_rva
train
def set_dword_at_rva(self, rva, dword): """Set the double word value at the file offset corresponding to the given RVA.""" return self.set_bytes_at_rva(rva, self.get_data_from_dword(dword))
python
{ "resource": "" }
q24046
PE.set_dword_at_offset
train
def set_dword_at_offset(self, offset, dword): """Set the double word value at the given file offset.""" return self.set_bytes_at_offset(offset, self.get_data_from_dword(dword))
python
{ "resource": "" }
q24047
PE.set_word_at_rva
train
def set_word_at_rva(self, rva, word): """Set the word value at the file offset corresponding to the given RVA.""" return self.set_bytes_at_rva(rva, self.get_data_from_word(word))
python
{ "resource": "" }
q24048
PE.set_word_at_offset
train
def set_word_at_offset(self, offset, word): """Set the word value at the given file offset.""" return self.set_bytes_at_offset(offset, self.get_data_from_word(word))
python
{ "resource": "" }
q24049
PE.set_qword_at_rva
train
def set_qword_at_rva(self, rva, qword): """Set the quad-word value at the file offset corresponding to the given RVA.""" return self.set_bytes_at_rva(rva, self.get_data_from_qword(qword))
python
{ "resource": "" }
q24050
PE.set_qword_at_offset
train
def set_qword_at_offset(self, offset, qword): """Set the quad-word value at the given file offset.""" return self.set_bytes_at_offset(offset, self.get_data_from_qword(qword))
python
{ "resource": "" }
q24051
PE.set_bytes_at_rva
train
def set_bytes_at_rva(self, rva, data): """Overwrite, with the given string, the bytes at the file offset corresponding to the given RVA. Return True if successful, False otherwise. It can fail if the offset is outside the file's boundaries. """ if not isinstance(data, bytes): raise TypeError('data should be of type: bytes') offset = self.get_physical_by_rva(rva) if not offset: return False return self.set_bytes_at_offset(offset, data)
python
{ "resource": "" }
q24052
PE.set_bytes_at_offset
train
def set_bytes_at_offset(self, offset, data): """Overwrite the bytes at the given file offset with the given string. Return True if successful, False otherwise. It can fail if the offset is outside the file's boundaries. """ if not isinstance(data, bytes): raise TypeError('data should be of type: bytes') if 0 <= offset < len(self.__data__): self.__data__ = ( self.__data__[:offset] + data + self.__data__[offset+len(data):] ) else: return False return True
python
{ "resource": "" }
q24053
PE.relocate_image
train
def relocate_image(self, new_ImageBase): """Apply the relocation information to the image using the provided new image base. This method will apply the relocation information to the image. Given the new base, all the relocations will be processed and both the raw data and the section's data will be fixed accordingly. The resulting image can be retrieved as well through the method: get_memory_mapped_image() In order to get something that would more closely match what could be found in memory once the Windows loader finished its work. """ relocation_difference = new_ImageBase - self.OPTIONAL_HEADER.ImageBase if self.OPTIONAL_HEADER.DATA_DIRECTORY[5].Size: if not hasattr(self, 'DIRECTORY_ENTRY_BASERELOC'): self.parse_data_directories( directories=[DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_BASERELOC']]) for reloc in self.DIRECTORY_ENTRY_BASERELOC: virtual_address = reloc.struct.VirtualAddress size_of_block = reloc.struct.SizeOfBlock # We iterate with an index because if the relocation is of type # IMAGE_REL_BASED_HIGHADJ we need to also process the next entry # at once and skip it for the next iteration # entry_idx = 0 while entry_idx<len(reloc.entries): entry = reloc.entries[entry_idx] entry_idx += 1 if entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_ABSOLUTE']: # Nothing to do for this type of relocation pass elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_HIGH']: # Fix the high 16-bits of a relocation # # Add high 16-bits of relocation_difference to the # 16-bit value at RVA=entry.rva self.set_word_at_rva( entry.rva, ( self.get_word_at_rva(entry.rva) + relocation_difference>>16)&0xffff ) elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_LOW']: # Fix the low 16-bits of a relocation # # Add low 16 bits of relocation_difference to the 16-bit value # at RVA=entry.rva self.set_word_at_rva( entry.rva, ( self.get_word_at_rva(entry.rva) + relocation_difference)&0xffff) elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_HIGHLOW']: # Handle all high and low parts of a 32-bit relocation # # Add relocation_difference to the value at RVA=entry.rva self.set_dword_at_rva( entry.rva, self.get_dword_at_rva(entry.rva)+relocation_difference) elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_HIGHADJ']: # Fix the high 16-bits of a relocation and adjust # # Add high 16-bits of relocation_difference to the 32-bit value # composed from the (16-bit value at RVA=entry.rva)<<16 plus # the 16-bit value at the next relocation entry. # # If the next entry is beyond the array's limits, # abort... the table is corrupt # if entry_idx == len(reloc.entries): break next_entry = reloc.entries[entry_idx] entry_idx += 1 self.set_word_at_rva( entry.rva, ((self.get_word_at_rva(entry.rva)<<16) + next_entry.rva + relocation_difference & 0xffff0000) >> 16 ) elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_DIR64']: # Apply the difference to the 64-bit value at the offset # RVA=entry.rva self.set_qword_at_rva( entry.rva, self.get_qword_at_rva(entry.rva) + relocation_difference) self.OPTIONAL_HEADER.ImageBase = new_ImageBase #correct VAs(virtual addresses) occurrences in directory information if hasattr(self, 'IMAGE_DIRECTORY_ENTRY_IMPORT'): for dll in self.DIRECTORY_ENTRY_IMPORT: for func in dll.imports: func.address += relocation_difference if hasattr(self, 'IMAGE_DIRECTORY_ENTRY_TLS'): self.DIRECTORY_ENTRY_TLS.struct.StartAddressOfRawData += relocation_difference self.DIRECTORY_ENTRY_TLS.struct.EndAddressOfRawData += relocation_difference self.DIRECTORY_ENTRY_TLS.struct.AddressOfIndex += relocation_difference self.DIRECTORY_ENTRY_TLS.struct.AddressOfCallBacks += relocation_difference if hasattr(self, 'IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG'): if self.DIRECTORY_ENTRY_LOAD_CONFIG.struct.LockPrefixTable: self.DIRECTORY_ENTRY_LOAD_CONFIG.struct.LockPrefixTable += relocation_difference if self.DIRECTORY_ENTRY_LOAD_CONFIG.struct.EditList: self.DIRECTORY_ENTRY_LOAD_CONFIG.struct.EditList += relocation_difference if self.DIRECTORY_ENTRY_LOAD_CONFIG.struct.SecurityCookie: self.DIRECTORY_ENTRY_LOAD_CONFIG.struct.SecurityCookie += relocation_difference if self.DIRECTORY_ENTRY_LOAD_CONFIG.struct.SEHandlerTable: self.DIRECTORY_ENTRY_LOAD_CONFIG.struct.SEHandlerTable += relocation_difference if self.DIRECTORY_ENTRY_LOAD_CONFIG.struct.GuardCFCheckFunctionPointer: self.DIRECTORY_ENTRY_LOAD_CONFIG.struct.GuardCFCheckFunctionPointer += relocation_difference if self.DIRECTORY_ENTRY_LOAD_CONFIG.struct.GuardCFFunctionTable: self.DIRECTORY_ENTRY_LOAD_CONFIG.struct.GuardCFFunctionTable += relocation_difference
python
{ "resource": "" }
q24054
PE.is_exe
train
def is_exe(self): """Check whether the file is a standard executable. This will return true only if the file has the IMAGE_FILE_EXECUTABLE_IMAGE flag set and the IMAGE_FILE_DLL not set and the file does not appear to be a driver either. """ EXE_flag = IMAGE_CHARACTERISTICS['IMAGE_FILE_EXECUTABLE_IMAGE'] if (not self.is_dll()) and (not self.is_driver()) and ( EXE_flag & self.FILE_HEADER.Characteristics) == EXE_flag: return True return False
python
{ "resource": "" }
q24055
PE.is_dll
train
def is_dll(self): """Check whether the file is a standard DLL. This will return true only if the image has the IMAGE_FILE_DLL flag set. """ DLL_flag = IMAGE_CHARACTERISTICS['IMAGE_FILE_DLL'] if ( DLL_flag & self.FILE_HEADER.Characteristics) == DLL_flag: return True return False
python
{ "resource": "" }
q24056
PE.is_driver
train
def is_driver(self): """Check whether the file is a Windows driver. This will return true only if there are reliable indicators of the image being a driver. """ # Checking that the ImageBase field of the OptionalHeader is above or # equal to 0x80000000 (that is, whether it lies in the upper 2GB of # the address space, normally belonging to the kernel) is not a # reliable enough indicator. For instance, PEs that play the invalid # ImageBase trick to get relocated could be incorrectly assumed to be # drivers. # This is not reliable either... # # if any((section.Characteristics & # SECTION_CHARACTERISTICS['IMAGE_SCN_MEM_NOT_PAGED']) for # section in self.sections ): # return True # If the import directory was not parsed (fast_load = True); do it now. if not hasattr(self, 'DIRECTORY_ENTRY_IMPORT'): self.parse_data_directories(directories=[ DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_IMPORT']]) # If there's still no import directory (the PE doesn't have one or it's # malformed), give up. if not hasattr(self, 'DIRECTORY_ENTRY_IMPORT'): return False # self.DIRECTORY_ENTRY_IMPORT will now exist, although it may be empty. # If it imports from "ntoskrnl.exe" or other kernel components it should # be a driver # system_DLLs = set((b'ntoskrnl.exe', b'hal.dll', b'ndis.sys', b'bootvid.dll', b'kdcom.dll')) if system_DLLs.intersection( [imp.dll.lower() for imp in self.DIRECTORY_ENTRY_IMPORT]): return True driver_like_section_names = set( ('page', 'paged')) if driver_like_section_names.intersection( [section.Name.lower().rstrip(b'\x00') for section in self.sections]) and ( self.OPTIONAL_HEADER.Subsystem in ( SUBSYSTEM_TYPE['IMAGE_SUBSYSTEM_NATIVE'], SUBSYSTEM_TYPE['IMAGE_SUBSYSTEM_NATIVE_WINDOWS'])): return True return False
python
{ "resource": "" }
q24057
PE.trim
train
def trim(self): """Return the just data defined by the PE headers, removing any overlayed data.""" overlay_data_offset = self.get_overlay_data_start_offset() if overlay_data_offset is not None: return self.__data__[ : overlay_data_offset ] return self.__data__[:]
python
{ "resource": "" }
q24058
ImageExtractor.filter_bad_names
train
def filter_bad_names(self, images): """\ takes a list of image elements and filters out the ones with bad names """ good_images = [] for image in images: if self.is_valid_filename(image): good_images.append(image) return good_images if len(good_images) > 0 else None
python
{ "resource": "" }
q24059
ImageExtractor.get_local_image
train
def get_local_image(self, src): """\ returns the bytes of the image file on disk """ local_image = ImageUtils.store_image(None, self.link_hash, src, self.config) return local_image
python
{ "resource": "" }
q24060
smart_unicode
train
def smart_unicode(s, encoding='utf-8', strings_only=False, errors='strict'): """ Returns a unicode object representing 's'. Treats bytestrings using the 'encoding' codec. If strings_only is True, don't convert (some) non-string-like objects. """ # if isinstance(s, Promise): # # The input is the result of a gettext_lazy() call. # return s return force_unicode(s, encoding, strings_only, errors)
python
{ "resource": "" }
q24061
force_unicode
train
def force_unicode(s, encoding='utf-8', strings_only=False, errors='strict'): """ Similar to smart_unicode, except that lazy instances are resolved to strings, rather than kept as lazy objects. If strings_only is True, don't convert (some) non-string-like objects. """ # Handle the common case first, saves 30-40% in performance when s # is an instance of unicode. This function gets called often in that # setting. if isinstance(s, unicode): return s if strings_only and is_protected_type(s): return s try: if not isinstance(s, basestring,): if hasattr(s, '__unicode__'): s = unicode(s) else: try: s = unicode(str(s), encoding, errors) except UnicodeEncodeError: if not isinstance(s, Exception): raise # If we get to here, the caller has passed in an Exception # subclass populated with non-ASCII data without special # handling to display as a string. We need to handle this # without raising a further exception. We do an # approximation to what the Exception's standard str() # output should be. s = u' '.join([force_unicode(arg, encoding, strings_only, errors) for arg in s]) elif not isinstance(s, unicode): # Note: We use .decode() here, instead of unicode(s, encoding, # errors), so that if s is a SafeString, it ends up being a # SafeUnicode at the end. s = s.decode(encoding, errors) except UnicodeDecodeError, e: if not isinstance(s, Exception): raise DjangoUnicodeDecodeError(s, *e.args) else: # If we get to here, the caller has passed in an Exception # subclass populated with non-ASCII bytestring data without a # working unicode method. Try to handle this without raising a # further exception by individually forcing the exception args # to unicode. s = u' '.join([force_unicode(arg, encoding, strings_only, errors) for arg in s]) return s
python
{ "resource": "" }
q24062
ContentExtractor.get_language
train
def get_language(self): """\ Returns the language is by the article or the configuration language """ # we don't want to force the target language # so we use the article.meta_lang if self.config.use_meta_language: if self.article.meta_lang: return self.article.meta_lang[:2] return self.config.target_language
python
{ "resource": "" }
q24063
ContentExtractor.get_siblings_score
train
def get_siblings_score(self, top_node): """\ we could have long articles that have tons of paragraphs so if we tried to calculate the base score against the total text score of those paragraphs it would be unfair. So we need to normalize the score based on the average scoring of the paragraphs within the top node. For example if our total score of 10 paragraphs was 1000 but each had an average value of 100 then 100 should be our base. """ base = 100000 paragraphs_number = 0 paragraphs_score = 0 nodes_to_check = self.parser.getElementsByTag(top_node, tag='p') for node in nodes_to_check: text_node = self.parser.getText(node) word_stats = self.stopwords_class(language=self.get_language()).get_stopword_count(text_node) high_link_density = self.is_highlink_density(node) if word_stats.get_stopword_count() > 2 and not high_link_density: paragraphs_number += 1 paragraphs_score += word_stats.get_stopword_count() if paragraphs_number > 0: base = paragraphs_score / paragraphs_number return base
python
{ "resource": "" }
q24064
ContentExtractor.update_score
train
def update_score(self, node, addToScore): """\ adds a score to the gravityScore Attribute we put on divs we'll get the current score then add the score we're passing in to the current """ current_score = 0 score_string = self.parser.getAttribute(node, 'gravityScore') if score_string: current_score = int(score_string) new_score = current_score + addToScore self.parser.setAttribute(node, "gravityScore", str(new_score))
python
{ "resource": "" }
q24065
ContentExtractor.update_node_count
train
def update_node_count(self, node, add_to_count): """\ stores how many decent nodes are under a parent node """ current_score = 0 count_string = self.parser.getAttribute(node, 'gravityNodes') if count_string: current_score = int(count_string) new_score = current_score + add_to_count self.parser.setAttribute(node, "gravityNodes", str(new_score))
python
{ "resource": "" }
q24066
ContentExtractor.is_highlink_density
train
def is_highlink_density(self, e): """\ checks the density of links within a node, is there not much text and most of it contains linky shit? if so it's no good """ links = self.parser.getElementsByTag(e, tag='a') if links is None or len(links) == 0: return False text = self.parser.getText(e) words = text.split(' ') words_number = float(len(words)) sb = [] for link in links: sb.append(self.parser.getText(link)) linkText = ''.join(sb) linkWords = linkText.split(' ') numberOfLinkWords = float(len(linkWords)) numberOfLinks = float(len(links)) linkDivisor = float(numberOfLinkWords / words_number) score = float(linkDivisor * numberOfLinks) if score >= 1.0: return True return False
python
{ "resource": "" }
q24067
ContentExtractor.post_cleanup
train
def post_cleanup(self): """\ remove any divs that looks like non-content, clusters of links, or paras with no gusto """ targetNode = self.article.top_node node = self.add_siblings(targetNode) for e in self.parser.getChildren(node): e_tag = self.parser.getTag(e) if e_tag != 'p': if self.is_highlink_density(e) \ or self.is_table_and_no_para_exist(e) \ or not self.is_nodescore_threshold_met(node, e): self.parser.remove(e) return node
python
{ "resource": "" }
q24068
VideoExtractor.get_video
train
def get_video(self, node): """ Create a video object from a video embed """ video = Video() video.embed_code = self.get_embed_code(node) video.embed_type = self.get_embed_type(node) video.width = self.get_width(node) video.height = self.get_height(node) video.src = self.get_src(node) video.provider = self.get_provider(video.src) return video
python
{ "resource": "" }
q24069
ImageUtils.store_image
train
def store_image(self, http_client, link_hash, src, config): """\ Writes an image src http string to disk as a temporary file and returns the LocallyStoredImage object that has the info you should need on the image """ # check for a cache hit already on disk image = self.read_localfile(link_hash, src, config) if image: return image # no cache found download the image data = self.fetch(http_client, src) if data: image = self.write_localfile(data, link_hash, src, config) if image: return image return None
python
{ "resource": "" }
q24070
OutputFormatter.remove_negativescores_nodes
train
def remove_negativescores_nodes(self): """\ if there are elements inside our top node that have a negative gravity score, let's give em the boot """ gravity_items = self.parser.css_select(self.top_node, "*[gravityScore]") for item in gravity_items: score = self.parser.getAttribute(item, 'gravityScore') score = int(score, 0) if score < 1: item.getparent().remove(item)
python
{ "resource": "" }
q24071
OutputFormatter.remove_fewwords_paragraphs
train
def remove_fewwords_paragraphs(self): """\ remove paragraphs that have less than x number of words, would indicate that it's some sort of link """ all_nodes = self.parser.getElementsByTags(self.get_top_node(), ['*']) all_nodes.reverse() for el in all_nodes: tag = self.parser.getTag(el) text = self.parser.getText(el) stop_words = self.stopwords_class(language=self.get_language()).get_stopword_count(text) if (tag != 'br' or text != '\\r') and stop_words.get_stopword_count() < 3 \ and len(self.parser.getElementsByTag(el, tag='object')) == 0 \ and len(self.parser.getElementsByTag(el, tag='embed')) == 0: self.parser.remove(el) # TODO # check if it is in the right place else: trimmed = self.parser.getText(el) if trimmed.startswith("(") and trimmed.endswith(")"): self.parser.remove(el)
python
{ "resource": "" }
q24072
Goose.extract
train
def extract(self, url=None, raw_html=None): """\ Main method to extract an article object from a URL, pass in a url and get back a Article """ cc = CrawlCandidate(self.config, url, raw_html) return self.crawl(cc)
python
{ "resource": "" }
q24073
MetasExtractor.get_meta_lang
train
def get_meta_lang(self): """\ Extract content language from meta """ # we have a lang attribute in html attr = self.parser.getAttribute(self.article.doc, attr='lang') if attr is None: # look up for a Content-Language in meta items = [ {'tag': 'meta', 'attr': 'http-equiv', 'value': 'content-language'}, {'tag': 'meta', 'attr': 'name', 'value': 'lang'} ] for item in items: meta = self.parser.getElementsByTag(self.article.doc, **item) if meta: attr = self.parser.getAttribute(meta[0], attr='content') break if attr: value = attr[:2] if re.search(RE_LANG, value): return value.lower() return None
python
{ "resource": "" }
q24074
MetasExtractor.get_meta_content
train
def get_meta_content(self, metaName): """\ Extract a given meta content form document """ meta = self.parser.css_select(self.article.doc, metaName) content = None if meta is not None and len(meta) > 0: content = self.parser.getAttribute(meta[0], 'content') if content: return content.strip() return ''
python
{ "resource": "" }
q24075
BaseSymbolic._verbose_reporter
train
def _verbose_reporter(self, run_details=None): """A report of the progress of the evolution process. Parameters ---------- run_details : dict Information about the evolution. """ if run_details is None: print(' |{:^25}|{:^42}|'.format('Population Average', 'Best Individual')) print('-' * 4 + ' ' + '-' * 25 + ' ' + '-' * 42 + ' ' + '-' * 10) line_format = '{:>4} {:>8} {:>16} {:>8} {:>16} {:>16} {:>10}' print(line_format.format('Gen', 'Length', 'Fitness', 'Length', 'Fitness', 'OOB Fitness', 'Time Left')) else: # Estimate remaining time for run gen = run_details['generation'][-1] generation_time = run_details['generation_time'][-1] remaining_time = (self.generations - gen - 1) * generation_time if remaining_time > 60: remaining_time = '{0:.2f}m'.format(remaining_time / 60.0) else: remaining_time = '{0:.2f}s'.format(remaining_time) oob_fitness = 'N/A' line_format = '{:4d} {:8.2f} {:16g} {:8d} {:16g} {:>16} {:>10}' if self.max_samples < 1.0: oob_fitness = run_details['best_oob_fitness'][-1] line_format = '{:4d} {:8.2f} {:16g} {:8d} {:16g} {:16g} {:>10}' print(line_format.format(run_details['generation'][-1], run_details['average_length'][-1], run_details['average_fitness'][-1], run_details['best_length'][-1], run_details['best_fitness'][-1], oob_fitness, remaining_time))
python
{ "resource": "" }
q24076
SymbolicRegressor.predict
train
def predict(self, X): """Perform regression on test vectors X. Parameters ---------- X : array-like, shape = [n_samples, n_features] Input vectors, where n_samples is the number of samples and n_features is the number of features. Returns ------- y : array, shape = [n_samples] Predicted values for X. """ if not hasattr(self, '_program'): raise NotFittedError('SymbolicRegressor not fitted.') X = check_array(X) _, n_features = X.shape if self.n_features_ != n_features: raise ValueError('Number of features of the model must match the ' 'input. Model n_features is %s and input ' 'n_features is %s.' % (self.n_features_, n_features)) y = self._program.execute(X) return y
python
{ "resource": "" }
q24077
SymbolicClassifier.predict_proba
train
def predict_proba(self, X): """Predict probabilities on test vectors X. Parameters ---------- X : array-like, shape = [n_samples, n_features] Input vectors, where n_samples is the number of samples and n_features is the number of features. Returns ------- proba : array, shape = [n_samples, n_classes] The class probabilities of the input samples. The order of the classes corresponds to that in the attribute `classes_`. """ if not hasattr(self, '_program'): raise NotFittedError('SymbolicClassifier not fitted.') X = check_array(X) _, n_features = X.shape if self.n_features_ != n_features: raise ValueError('Number of features of the model must match the ' 'input. Model n_features is %s and input ' 'n_features is %s.' % (self.n_features_, n_features)) scores = self._program.execute(X) proba = self._transformer(scores) proba = np.vstack([1 - proba, proba]).T return proba
python
{ "resource": "" }
q24078
SymbolicClassifier.predict
train
def predict(self, X): """Predict classes on test vectors X. Parameters ---------- X : array-like, shape = [n_samples, n_features] Input vectors, where n_samples is the number of samples and n_features is the number of features. Returns ------- y : array, shape = [n_samples,] The predicted classes of the input samples. """ proba = self.predict_proba(X) return self.classes_.take(np.argmax(proba, axis=1), axis=0)
python
{ "resource": "" }
q24079
SymbolicTransformer.transform
train
def transform(self, X): """Transform X according to the fitted transformer. Parameters ---------- X : array-like, shape = [n_samples, n_features] Input vectors, where n_samples is the number of samples and n_features is the number of features. Returns ------- X_new : array-like, shape = [n_samples, n_components] Transformed array. """ if not hasattr(self, '_best_programs'): raise NotFittedError('SymbolicTransformer not fitted.') X = check_array(X) _, n_features = X.shape if self.n_features_ != n_features: raise ValueError('Number of features of the model must match the ' 'input. Model n_features is %s and input ' 'n_features is %s.' % (self.n_features_, n_features)) X_new = np.array([gp.execute(X) for gp in self._best_programs]).T return X_new
python
{ "resource": "" }
q24080
make_fitness
train
def make_fitness(function, greater_is_better): """Make a fitness measure, a metric scoring the quality of a program's fit. This factory function creates a fitness measure object which measures the quality of a program's fit and thus its likelihood to undergo genetic operations into the next generation. The resulting object is able to be called with NumPy vectorized arguments and return a resulting floating point score quantifying the quality of the program's representation of the true relationship. Parameters ---------- function : callable A function with signature function(y, y_pred, sample_weight) that returns a floating point number. Where `y` is the input target y vector, `y_pred` is the predicted values from the genetic program, and sample_weight is the sample_weight vector. greater_is_better : bool Whether a higher value from `function` indicates a better fit. In general this would be False for metrics indicating the magnitude of the error, and True for metrics indicating the quality of fit. """ if not isinstance(greater_is_better, bool): raise ValueError('greater_is_better must be bool, got %s' % type(greater_is_better)) if function.__code__.co_argcount != 3: raise ValueError('function requires 3 arguments (y, y_pred, w),' ' got %d.' % function.__code__.co_argcount) if not isinstance(function(np.array([1, 1]), np.array([2, 2]), np.array([1, 1])), numbers.Number): raise ValueError('function must return a numeric.') return _Fitness(function, greater_is_better)
python
{ "resource": "" }
q24081
_weighted_pearson
train
def _weighted_pearson(y, y_pred, w): """Calculate the weighted Pearson correlation coefficient.""" with np.errstate(divide='ignore', invalid='ignore'): y_pred_demean = y_pred - np.average(y_pred, weights=w) y_demean = y - np.average(y, weights=w) corr = ((np.sum(w * y_pred_demean * y_demean) / np.sum(w)) / np.sqrt((np.sum(w * y_pred_demean ** 2) * np.sum(w * y_demean ** 2)) / (np.sum(w) ** 2))) if np.isfinite(corr): return np.abs(corr) return 0.
python
{ "resource": "" }
q24082
_weighted_spearman
train
def _weighted_spearman(y, y_pred, w): """Calculate the weighted Spearman correlation coefficient.""" y_pred_ranked = np.apply_along_axis(rankdata, 0, y_pred) y_ranked = np.apply_along_axis(rankdata, 0, y) return _weighted_pearson(y_pred_ranked, y_ranked, w)
python
{ "resource": "" }
q24083
_mean_absolute_error
train
def _mean_absolute_error(y, y_pred, w): """Calculate the mean absolute error.""" return np.average(np.abs(y_pred - y), weights=w)
python
{ "resource": "" }
q24084
_mean_square_error
train
def _mean_square_error(y, y_pred, w): """Calculate the mean square error.""" return np.average(((y_pred - y) ** 2), weights=w)
python
{ "resource": "" }
q24085
_root_mean_square_error
train
def _root_mean_square_error(y, y_pred, w): """Calculate the root mean square error.""" return np.sqrt(np.average(((y_pred - y) ** 2), weights=w))
python
{ "resource": "" }
q24086
_log_loss
train
def _log_loss(y, y_pred, w): """Calculate the log loss.""" eps = 1e-15 inv_y_pred = np.clip(1 - y_pred, eps, 1 - eps) y_pred = np.clip(y_pred, eps, 1 - eps) score = y * np.log(y_pred) + (1 - y) * np.log(inv_y_pred) return np.average(-score, weights=w)
python
{ "resource": "" }
q24087
_Program.build_program
train
def build_program(self, random_state): """Build a naive random program. Parameters ---------- random_state : RandomState instance The random number generator. Returns ------- program : list The flattened tree representation of the program. """ if self.init_method == 'half and half': method = ('full' if random_state.randint(2) else 'grow') else: method = self.init_method max_depth = random_state.randint(*self.init_depth) # Start a program with a function to avoid degenerative programs function = random_state.randint(len(self.function_set)) function = self.function_set[function] program = [function] terminal_stack = [function.arity] while terminal_stack: depth = len(terminal_stack) choice = self.n_features + len(self.function_set) choice = random_state.randint(choice) # Determine if we are adding a function or terminal if (depth < max_depth) and (method == 'full' or choice <= len(self.function_set)): function = random_state.randint(len(self.function_set)) function = self.function_set[function] program.append(function) terminal_stack.append(function.arity) else: # We need a terminal, add a variable or constant if self.const_range is not None: terminal = random_state.randint(self.n_features + 1) else: terminal = random_state.randint(self.n_features) if terminal == self.n_features: terminal = random_state.uniform(*self.const_range) if self.const_range is None: # We should never get here raise ValueError('A constant was produced with ' 'const_range=None.') program.append(terminal) terminal_stack[-1] -= 1 while terminal_stack[-1] == 0: terminal_stack.pop() if not terminal_stack: return program terminal_stack[-1] -= 1 # We should never get here return None
python
{ "resource": "" }
q24088
_Program.validate_program
train
def validate_program(self): """Rough check that the embedded program in the object is valid.""" terminals = [0] for node in self.program: if isinstance(node, _Function): terminals.append(node.arity) else: terminals[-1] -= 1 while terminals[-1] == 0: terminals.pop() terminals[-1] -= 1 return terminals == [-1]
python
{ "resource": "" }
q24089
_Program.export_graphviz
train
def export_graphviz(self, fade_nodes=None): """Returns a string, Graphviz script for visualizing the program. Parameters ---------- fade_nodes : list, optional A list of node indices to fade out for showing which were removed during evolution. Returns ------- output : string The Graphviz script to plot the tree representation of the program. """ terminals = [] if fade_nodes is None: fade_nodes = [] output = 'digraph program {\nnode [style=filled]\n' for i, node in enumerate(self.program): fill = '#cecece' if isinstance(node, _Function): if i not in fade_nodes: fill = '#136ed4' terminals.append([node.arity, i]) output += ('%d [label="%s", fillcolor="%s"] ;\n' % (i, node.name, fill)) else: if i not in fade_nodes: fill = '#60a6f6' if isinstance(node, int): if self.feature_names is None: feature_name = 'X%s' % node else: feature_name = self.feature_names[node] output += ('%d [label="%s", fillcolor="%s"] ;\n' % (i, feature_name, fill)) else: output += ('%d [label="%.3f", fillcolor="%s"] ;\n' % (i, node, fill)) if i == 0: # A degenerative program of only one node return output + '}' terminals[-1][0] -= 1 terminals[-1].append(i) while terminals[-1][0] == 0: output += '%d -> %d ;\n' % (terminals[-1][1], terminals[-1][-1]) terminals[-1].pop() if len(terminals[-1]) == 2: parent = terminals[-1][-1] terminals.pop() if not terminals: return output + '}' terminals[-1].append(parent) terminals[-1][0] -= 1 # We should never get here return None
python
{ "resource": "" }
q24090
_Program._depth
train
def _depth(self): """Calculates the maximum depth of the program tree.""" terminals = [0] depth = 1 for node in self.program: if isinstance(node, _Function): terminals.append(node.arity) depth = max(len(terminals), depth) else: terminals[-1] -= 1 while terminals[-1] == 0: terminals.pop() terminals[-1] -= 1 return depth - 1
python
{ "resource": "" }
q24091
_Program.execute
train
def execute(self, X): """Execute the program according to X. Parameters ---------- X : {array-like}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns ------- y_hats : array-like, shape = [n_samples] The result of executing the program on X. """ # Check for single-node programs node = self.program[0] if isinstance(node, float): return np.repeat(node, X.shape[0]) if isinstance(node, int): return X[:, node] apply_stack = [] for node in self.program: if isinstance(node, _Function): apply_stack.append([node]) else: # Lazily evaluate later apply_stack[-1].append(node) while len(apply_stack[-1]) == apply_stack[-1][0].arity + 1: # Apply functions that have sufficient arguments function = apply_stack[-1][0] terminals = [np.repeat(t, X.shape[0]) if isinstance(t, float) else X[:, t] if isinstance(t, int) else t for t in apply_stack[-1][1:]] intermediate_result = function(*terminals) if len(apply_stack) != 1: apply_stack.pop() apply_stack[-1].append(intermediate_result) else: return intermediate_result # We should never get here return None
python
{ "resource": "" }
q24092
_Program.get_all_indices
train
def get_all_indices(self, n_samples=None, max_samples=None, random_state=None): """Get the indices on which to evaluate the fitness of a program. Parameters ---------- n_samples : int The number of samples. max_samples : int The maximum number of samples to use. random_state : RandomState instance The random number generator. Returns ------- indices : array-like, shape = [n_samples] The in-sample indices. not_indices : array-like, shape = [n_samples] The out-of-sample indices. """ if self._indices_state is None and random_state is None: raise ValueError('The program has not been evaluated for fitness ' 'yet, indices not available.') if n_samples is not None and self._n_samples is None: self._n_samples = n_samples if max_samples is not None and self._max_samples is None: self._max_samples = max_samples if random_state is not None and self._indices_state is None: self._indices_state = random_state.get_state() indices_state = check_random_state(None) indices_state.set_state(self._indices_state) not_indices = sample_without_replacement( self._n_samples, self._n_samples - self._max_samples, random_state=indices_state) sample_counts = np.bincount(not_indices, minlength=self._n_samples) indices = np.where(sample_counts == 0)[0] return indices, not_indices
python
{ "resource": "" }
q24093
_Program.raw_fitness
train
def raw_fitness(self, X, y, sample_weight): """Evaluate the raw fitness of the program according to X, y. Parameters ---------- X : {array-like}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. sample_weight : array-like, shape = [n_samples] Weights applied to individual samples. Returns ------- raw_fitness : float The raw fitness of the program. """ y_pred = self.execute(X) if self.transformer: y_pred = self.transformer(y_pred) raw_fitness = self.metric(y, y_pred, sample_weight) return raw_fitness
python
{ "resource": "" }
q24094
_Program.fitness
train
def fitness(self, parsimony_coefficient=None): """Evaluate the penalized fitness of the program according to X, y. Parameters ---------- parsimony_coefficient : float, optional If automatic parsimony is being used, the computed value according to the population. Otherwise the initialized value is used. Returns ------- fitness : float The penalized fitness of the program. """ if parsimony_coefficient is None: parsimony_coefficient = self.parsimony_coefficient penalty = parsimony_coefficient * len(self.program) * self.metric.sign return self.raw_fitness_ - penalty
python
{ "resource": "" }
q24095
_Program.get_subtree
train
def get_subtree(self, random_state, program=None): """Get a random subtree from the program. Parameters ---------- random_state : RandomState instance The random number generator. program : list, optional (default=None) The flattened tree representation of the program. If None, the embedded tree in the object will be used. Returns ------- start, end : tuple of two ints The indices of the start and end of the random subtree. """ if program is None: program = self.program # Choice of crossover points follows Koza's (1992) widely used approach # of choosing functions 90% of the time and leaves 10% of the time. probs = np.array([0.9 if isinstance(node, _Function) else 0.1 for node in program]) probs = np.cumsum(probs / probs.sum()) start = np.searchsorted(probs, random_state.uniform()) stack = 1 end = start while stack > end - start: node = program[end] if isinstance(node, _Function): stack += node.arity end += 1 return start, end
python
{ "resource": "" }
q24096
_Program.crossover
train
def crossover(self, donor, random_state): """Perform the crossover genetic operation on the program. Crossover selects a random subtree from the embedded program to be replaced. A donor also has a subtree selected at random and this is inserted into the original parent to form an offspring. Parameters ---------- donor : list The flattened tree representation of the donor program. random_state : RandomState instance The random number generator. Returns ------- program : list The flattened tree representation of the program. """ # Get a subtree to replace start, end = self.get_subtree(random_state) removed = range(start, end) # Get a subtree to donate donor_start, donor_end = self.get_subtree(random_state, donor) donor_removed = list(set(range(len(donor))) - set(range(donor_start, donor_end))) # Insert genetic material from donor return (self.program[:start] + donor[donor_start:donor_end] + self.program[end:]), removed, donor_removed
python
{ "resource": "" }
q24097
_Program.subtree_mutation
train
def subtree_mutation(self, random_state): """Perform the subtree mutation operation on the program. Subtree mutation selects a random subtree from the embedded program to be replaced. A donor subtree is generated at random and this is inserted into the original parent to form an offspring. This implementation uses the "headless chicken" method where the donor subtree is grown using the initialization methods and a subtree of it is selected to be donated to the parent. Parameters ---------- random_state : RandomState instance The random number generator. Returns ------- program : list The flattened tree representation of the program. """ # Build a new naive program chicken = self.build_program(random_state) # Do subtree mutation via the headless chicken method! return self.crossover(chicken, random_state)
python
{ "resource": "" }
q24098
_Program.hoist_mutation
train
def hoist_mutation(self, random_state): """Perform the hoist mutation operation on the program. Hoist mutation selects a random subtree from the embedded program to be replaced. A random subtree of that subtree is then selected and this is 'hoisted' into the original subtrees location to form an offspring. This method helps to control bloat. Parameters ---------- random_state : RandomState instance The random number generator. Returns ------- program : list The flattened tree representation of the program. """ # Get a subtree to replace start, end = self.get_subtree(random_state) subtree = self.program[start:end] # Get a subtree of the subtree to hoist sub_start, sub_end = self.get_subtree(random_state, subtree) hoist = subtree[sub_start:sub_end] # Determine which nodes were removed for plotting removed = list(set(range(start, end)) - set(range(start + sub_start, start + sub_end))) return self.program[:start] + hoist + self.program[end:], removed
python
{ "resource": "" }
q24099
_Program.point_mutation
train
def point_mutation(self, random_state): """Perform the point mutation operation on the program. Point mutation selects random nodes from the embedded program to be replaced. Terminals are replaced by other terminals and functions are replaced by other functions that require the same number of arguments as the original node. The resulting tree forms an offspring. Parameters ---------- random_state : RandomState instance The random number generator. Returns ------- program : list The flattened tree representation of the program. """ program = copy(self.program) # Get the nodes to modify mutate = np.where(random_state.uniform(size=len(program)) < self.p_point_replace)[0] for node in mutate: if isinstance(program[node], _Function): arity = program[node].arity # Find a valid replacement with same arity replacement = len(self.arities[arity]) replacement = random_state.randint(replacement) replacement = self.arities[arity][replacement] program[node] = replacement else: # We've got a terminal, add a const or variable if self.const_range is not None: terminal = random_state.randint(self.n_features + 1) else: terminal = random_state.randint(self.n_features) if terminal == self.n_features: terminal = random_state.uniform(*self.const_range) if self.const_range is None: # We should never get here raise ValueError('A constant was produced with ' 'const_range=None.') program[node] = terminal return program, list(mutate)
python
{ "resource": "" }