idx int64 0 63k | question stringlengths 61 4.03k | target stringlengths 6 1.23k |
|---|---|---|
12,600 | def get_all ( self ) : for name , values in self . _map . items ( ) : for value in values : yield ( name , value ) | Return an iterator of name - value pairs . |
12,601 | def to_str ( self ) : pairs = [ ] for name , value in self . get_all ( ) : if value and self . _wrap_width : pairs . append ( '{0}:{1}' . format ( name , '\r\n' . join ( textwrap . wrap ( value , width = self . _wrap_width , drop_whitespace = False , initial_indent = ' ' , subsequent_indent = ' ' ) ) ) ) elif value : pairs . append ( '{0}: {1}' . format ( name , value ) ) else : pairs . append ( '{0}:' . format ( name ) ) pairs . append ( '' ) return '\r\n' . join ( pairs ) | Convert to string . |
12,602 | def to_bytes ( self , errors = 'strict' ) : return str ( self ) . encode ( self . encoding , errors = errors ) | Convert to bytes . |
12,603 | def recycle ( self ) : for connection in self . _connections : self . _connection_pool . no_wait_release ( connection ) self . _connections . clear ( ) | Clean up and return connections back to the pool . |
12,604 | def session ( self ) -> SessionT : session = self . _session_class ( ) ( connection_pool = self . _connection_pool , ) self . event_dispatcher . notify ( self . ClientEvent . new_session , session ) return session | Return a new session . |
12,605 | def count_cookies ( self , domain ) : cookies = self . cookie_jar . _cookies if domain in cookies : return sum ( [ len ( cookie ) for cookie in cookies [ domain ] . values ( ) ] ) else : return 0 | Return the number of cookies for the given domain . |
12,606 | def cookie_length ( self , domain ) : cookies = self . cookie_jar . _cookies if domain not in cookies : return 0 length = 0 for path in cookies [ domain ] : for name in cookies [ domain ] [ path ] : cookie = cookies [ domain ] [ path ] [ name ] length += len ( path ) + len ( name ) + len ( cookie . value or '' ) return length | Return approximate length of all cookie key - values for a domain . |
12,607 | def guess_listing_type ( lines , threshold = 100 ) : scores = { 'unix' : 0 , 'msdos' : 0 , 'nlst' : 0 , } for line in lines : if not line : continue if re . search ( r'---|r--|rw-|rwx' , line ) : scores [ 'unix' ] += 1 if '<DIR>' in line or re . search ( r'^.{0,4}\d\d' , line ) : scores [ 'msdos' ] += 1 words = line . split ( ' ' , 1 ) if len ( words ) == 1 : scores [ 'nlst' ] += 1 if max ( scores . values ( ) ) > threshold : break top = max ( scores . items ( ) , key = lambda item : item [ 1 ] ) if top [ 1 ] : return top [ 0 ] else : return 'unknown' | Guess the style of directory listing . |
12,608 | def parse_unix_perm ( text ) : if len ( text ) != 9 : return 0 perms = 0 for triad_index in range ( 3 ) : string_index = triad_index * 3 perms <<= 3 if text [ string_index ] == 'r' : perms |= 1 << 2 if text [ string_index + 1 ] == 'w' : perms |= 1 << 1 if text [ string_index + 2 ] in 'xs' : perms |= 1 return perms | Parse a Unix permission string and return integer value . |
12,609 | def parse ( self , lines ) : if self . type == 'msdos' : return self . parse_msdos ( lines ) elif self . type == 'unix' : return self . parse_unix ( lines ) elif self . type == 'nlst' : return self . parse_nlst ( lines ) else : raise UnknownListingError ( 'Unsupported listing type.' ) | Parse the lines . |
12,610 | def parse_datetime ( self , text ) : return parse_datetime ( text , date_format = self . date_format , is_day_period = self . is_day_period ) | Parse datetime from line of text . |
12,611 | def parse_msdos ( self , lines ) : for line in lines : fields = line . split ( None , 4 ) date_str = fields [ 0 ] time_str = fields [ 1 ] datetime_str = '{} {}' . format ( date_str , time_str ) file_datetime = self . parse_datetime ( datetime_str ) [ 0 ] if fields [ 2 ] == '<DIR>' : file_size = None file_type = 'dir' else : file_size = parse_int ( fields [ 2 ] ) file_type = 'file' filename = fields [ 3 ] yield FileEntry ( filename , file_type , file_size , file_datetime ) | Parse lines from a MS - DOS format . |
12,612 | def parse_unix ( self , lines ) : for line in lines : original_line = line fields = line . split ( ' ' ) after_perm_index = 0 for field in fields : after_perm_index += len ( field ) if not field : continue if field [ 0 ] in 'bcdlps-' : if field [ 0 ] == 'd' : file_type = 'dir' elif field [ 0 ] == '-' : file_type = 'file' elif field [ 0 ] == 'l' : file_type = 'symlink' else : file_type = 'other' perms = parse_unix_perm ( field [ 1 : ] ) break else : raise ListingError ( 'Failed to parse file type.' ) line = line [ after_perm_index : ] while line : try : datetime_obj , start_index , end_index = self . parse_datetime ( line ) except ValueError : line = line [ 4 : ] else : break else : raise ListingError ( 'Could parse a date from {}' . format ( repr ( original_line ) ) ) file_size = int ( line [ : start_index ] . rstrip ( ) . rpartition ( ' ' ) [ - 1 ] ) filename = line [ end_index : ] . strip ( ) if file_type == 'symlink' : filename , sep , symlink_dest = filename . partition ( ' -> ' ) else : symlink_dest = None yield FileEntry ( filename , file_type , file_size , datetime_obj , symlink_dest , perm = perms ) | Parse listings from a Unix ls command format . |
12,613 | def parse_input ( self ) : if self . _text : lines = iter ( self . _text . splitlines ( ) ) elif self . _file : lines = self . _file else : lines = ( ) sample_lines = [ ] for line in lines : if len ( sample_lines ) > 100 : break sample_lines . append ( line ) lines = itertools . chain ( sample_lines , lines ) self . guess_type ( sample_lines ) datetime_format = wpull . protocol . ftp . ls . date . guess_datetime_format ( sample_lines ) self . set_datetime_format ( datetime_format ) return self . parse ( lines ) | Parse the listings . |
12,614 | def open_file ( cls , filename : str , response : BaseResponse , mode = 'wb+' ) : _logger . debug ( 'Saving file to {0}, mode={1}.' , filename , mode ) dir_path = os . path . dirname ( filename ) if dir_path and not os . path . exists ( dir_path ) : os . makedirs ( dir_path ) response . body = Body ( open ( filename , mode ) ) | Open a file object on to the Response Body . |
12,615 | def set_timestamp ( cls , filename : str , response : HTTPResponse ) : last_modified = response . fields . get ( 'Last-Modified' ) if not last_modified : return try : last_modified = email . utils . parsedate ( last_modified ) except ValueError : _logger . exception ( 'Failed to parse date.' ) return last_modified = time . mktime ( last_modified ) os . utime ( filename , ( time . time ( ) , last_modified ) ) | Set the Last - Modified timestamp onto the given file . |
12,616 | def save_headers ( cls , filename : str , response : HTTPResponse ) : new_filename = filename + '-new' with open ( 'wb' ) as new_file : new_file . write ( response . header ( ) ) with wpull . util . reset_file_offset ( response . body ) : response . body . seek ( 0 ) shutil . copyfileobj ( response . body , new_file ) os . remove ( filename ) os . rename ( new_filename , filename ) | Prepend the HTTP response header to the file . |
12,617 | def _compute_filename ( self , request : BaseRequest ) : path = self . _path_namer . get_filename ( request . url_info ) if os . path . isdir ( path ) : path += '.f' else : dir_name , name = os . path . split ( path ) path = os . path . join ( anti_clobber_dir_path ( dir_name ) , name ) return path | Get the appropriate filename from the request . |
12,618 | def _process_file_continue_request ( self , request : BaseRequest ) : if os . path . exists ( self . _filename ) : size = os . path . getsize ( self . _filename ) request . set_continue ( size ) self . _file_continue_requested = True _logger . debug ( 'Continue file from {0}.' , size ) else : _logger . debug ( 'No file to continue.' ) | Modify the request to resume downloading file . |
12,619 | def _process_file_continue_response ( self , response : HTTPResponse ) : code = response . status_code if code == http . client . PARTIAL_CONTENT : self . open_file ( self . _filename , response , mode = 'ab+' ) else : self . _raise_cannot_continue_error ( ) | Process a partial content response . |
12,620 | def _process_file_continue_ftp_response ( self , response : FTPResponse ) : if response . request . restart_value and response . restart_value : self . open_file ( self . _filename , response , mode = 'ab+' ) else : self . _raise_cannot_continue_error ( ) | Process a restarted content response . |
12,621 | def _rename_with_content_disposition ( self , response : HTTPResponse ) : if not self . _filename : return if response . request . url_info . scheme not in ( 'http' , 'https' ) : return header_value = response . fields . get ( 'Content-Disposition' ) if not header_value : return filename = parse_content_disposition ( header_value ) if filename : dir_path = os . path . dirname ( self . _filename ) new_filename = self . _path_namer . safe_filename ( filename ) self . _filename = os . path . join ( dir_path , new_filename ) | Rename using the Content - Disposition header . |
12,622 | def session ( self ) -> BaseFileWriterSession : return self . session_class ( self . _path_namer , self . _file_continuing , self . _headers_included , self . _local_timestamping , self . _adjust_extension , self . _content_disposition , self . _trust_server_names , ) | Return the File Writer Session . |
12,623 | def parse_charset ( header_string ) : match = re . search ( r , header_string , re . IGNORECASE ) if match : return match . group ( 1 ) | Parse a Content - Type string for the document encoding . |
12,624 | def should_close ( http_version , connection_field ) : connection_field = ( connection_field or '' ) . lower ( ) if http_version == 'HTTP/1.0' : return connection_field . replace ( '-' , '' ) != 'keepalive' else : return connection_field == 'close' | Return whether the connection should be closed . |
12,625 | def seek_file_end ( file ) : try : file . seek ( 0 , 2 ) except ValueError : while True : data = file . read ( 4096 ) if not data : break | Seek to the end of the file . |
12,626 | def parse_iso8601_str ( string ) : datetime_obj = datetime . datetime . strptime ( string , "%Y-%m-%dT%H:%M:%SZ" ) return int ( calendar . timegm ( datetime_obj . utctimetuple ( ) ) ) | Parse a fixed ISO8601 datetime string . |
12,627 | def python_version ( ) : major , minor , patch = sys . version_info [ 0 : 3 ] return '{0}.{1}.{2}' . format ( major , minor , patch ) | Return the Python version as a string . |
12,628 | def filter_pem ( data ) : assert isinstance ( data , bytes ) , 'Expect bytes. Got {}.' . format ( type ( data ) ) certs = set ( ) new_list = [ ] in_pem_block = False for line in re . split ( br'[\r\n]+' , data ) : if line == b'-----BEGIN CERTIFICATE-----' : assert not in_pem_block in_pem_block = True elif line == b'-----END CERTIFICATE-----' : assert in_pem_block in_pem_block = False content = b'' . join ( new_list ) content = rewrap_bytes ( content ) certs . add ( b'-----BEGIN CERTIFICATE-----\n' + content + b'\n-----END CERTIFICATE-----\n' ) new_list = [ ] elif in_pem_block : new_list . append ( line ) return certs | Processes the bytes for PEM certificates . |
12,629 | def rewrap_bytes ( data ) : return b'\n' . join ( data [ index : index + 70 ] for index in range ( 0 , len ( data ) , 70 ) ) | Rewrap characters to 70 character width . |
12,630 | def get_package_data ( filename , mode = 'rb' ) : if os . path . exists ( filename ) : with open ( filename , mode = mode ) as in_file : return in_file . read ( ) else : parts = os . path . normpath ( filename ) . split ( os . sep ) for part , index in zip ( parts , range ( len ( parts ) ) ) : if part . endswith ( '.zip' ) : zip_path = os . sep . join ( parts [ : index + 1 ] ) member_path = os . sep . join ( parts [ index + 1 : ] ) break if platform . system ( ) == 'Windows' : member_path = member_path . replace ( '\\' , '/' ) with zipfile . ZipFile ( zip_path ) as zip_file : return zip_file . read ( member_path ) | Return the contents of a real file or a zip file . |
12,631 | def get_package_filename ( filename , package_dir = None ) : if getattr ( sys , 'frozen' , False ) : package_dir = os . path . join ( sys . _MEIPASS , os . path . basename ( os . path . dirname ( __file__ ) ) ) elif not package_dir : package_dir = os . path . dirname ( __file__ ) return os . path . join ( package_dir , filename ) | Return the filename of the data file . |
12,632 | def get_exception_message ( instance ) : args = getattr ( instance , 'args' , None ) if args : return str ( instance ) try : return type ( instance ) . __name__ except AttributeError : return str ( instance ) | Try to get the exception message or the class name . |
12,633 | def dump ( self , obj ) : pickle . dump ( obj , self . _file , protocol = self . _protocol ) | Pickle an object . |
12,634 | def quoted_attribute_value ( self , value ) : quote_with = '"' if '"' in value : if "'" in value : replace_with = """ value = value . replace ( '"' , replace_with ) else : quote_with = "'" return quote_with + value + quote_with | Make a value into a quoted XML attribute possibly escaping it . |
12,635 | def encodings ( self ) : tried = set ( ) for e in self . override_encodings : if self . _usable ( e , tried ) : yield e if self . _usable ( self . sniffed_encoding , tried ) : yield self . sniffed_encoding if self . declared_encoding is None : self . declared_encoding = self . find_declared_encoding ( self . markup , self . is_html ) if self . _usable ( self . declared_encoding , tried ) : yield self . declared_encoding if self . chardet_encoding is None : self . chardet_encoding = chardet_dammit ( self . markup ) if self . _usable ( self . chardet_encoding , tried ) : yield self . chardet_encoding for e in ( 'utf-8' , 'windows-1252' ) : if self . _usable ( e , tried ) : yield e | Yield a number of encodings that might work for this markup . |
12,636 | def strip_byte_order_mark ( cls , data ) : encoding = None if ( len ( data ) >= 4 ) and ( data [ : 2 ] == b'\xfe\xff' ) and ( data [ 2 : 4 ] != '\x00\x00' ) : encoding = 'utf-16be' data = data [ 2 : ] elif ( len ( data ) >= 4 ) and ( data [ : 2 ] == b'\xff\xfe' ) and ( data [ 2 : 4 ] != '\x00\x00' ) : encoding = 'utf-16le' data = data [ 2 : ] elif data [ : 3 ] == b'\xef\xbb\xbf' : encoding = 'utf-8' data = data [ 3 : ] elif data [ : 4 ] == b'\x00\x00\xfe\xff' : encoding = 'utf-32be' data = data [ 4 : ] elif data [ : 4 ] == b'\xff\xfe\x00\x00' : encoding = 'utf-32le' data = data [ 4 : ] return data , encoding | If a byte - order mark is present strip it and return the encoding it implies . |
12,637 | def find_declared_encoding ( cls , markup , is_html = False , search_entire_document = False ) : if search_entire_document : xml_endpos = html_endpos = len ( markup ) else : xml_endpos = 1024 html_endpos = max ( 2048 , int ( len ( markup ) * 0.05 ) ) declared_encoding = None declared_encoding_match = xml_encoding_re . search ( markup , endpos = xml_endpos ) if not declared_encoding_match and is_html : declared_encoding_match = html_meta_re . search ( markup , endpos = html_endpos ) if declared_encoding_match is not None : declared_encoding = declared_encoding_match . groups ( ) [ 0 ] . decode ( 'ascii' , 'replace' ) if declared_encoding : return declared_encoding . lower ( ) return None | Given a document tries to find its declared encoding . |
12,638 | def _sub_ms_char ( self , match ) : orig = match . group ( 1 ) if self . smart_quotes_to == 'ascii' : sub = self . MS_CHARS_TO_ASCII . get ( orig ) . encode ( ) else : sub = self . MS_CHARS . get ( orig ) if type ( sub ) == tuple : if self . smart_quotes_to == 'xml' : sub = '&#x' . encode ( ) + sub [ 1 ] . encode ( ) + ';' . encode ( ) else : sub = '&' . encode ( ) + sub [ 0 ] . encode ( ) + ';' . encode ( ) else : sub = sub . encode ( ) return sub | Changes a MS smart quote character to an XML or HTML entity or an ASCII character . |
12,639 | def detwingle ( cls , in_bytes , main_encoding = "utf8" , embedded_encoding = "windows-1252" ) : if embedded_encoding . replace ( '_' , '-' ) . lower ( ) not in ( 'windows-1252' , 'windows_1252' ) : raise NotImplementedError ( "Windows-1252 and ISO-8859-1 are the only currently supported " "embedded encodings." ) if main_encoding . lower ( ) not in ( 'utf8' , 'utf-8' ) : raise NotImplementedError ( "UTF-8 is the only currently supported main encoding." ) byte_chunks = [ ] chunk_start = 0 pos = 0 while pos < len ( in_bytes ) : byte = in_bytes [ pos ] if not isinstance ( byte , int ) : byte = ord ( byte ) if ( byte >= cls . FIRST_MULTIBYTE_MARKER and byte <= cls . LAST_MULTIBYTE_MARKER ) : for start , end , size in cls . MULTIBYTE_MARKERS_AND_SIZES : if byte >= start and byte <= end : pos += size break elif byte >= 0x80 and byte in cls . WINDOWS_1252_TO_UTF8 : byte_chunks . append ( in_bytes [ chunk_start : pos ] ) byte_chunks . append ( cls . WINDOWS_1252_TO_UTF8 [ byte ] ) pos += 1 chunk_start = pos else : pos += 1 if chunk_start == 0 : return in_bytes else : byte_chunks . append ( in_bytes [ chunk_start : ] ) return b'' . join ( byte_chunks ) | Fix characters from one encoding embedded in some other encoding . |
12,640 | def scrape_file ( self , file , encoding = None , base_url = None ) : elements = self . iter_elements ( file , encoding = encoding ) link_contexts = set ( ) link_infos = self . _element_walker . iter_links ( elements ) for link_info in link_infos : element_base_url = base_url if link_info . base_link : clean_base_url = clean_link_soup ( link_info . base_link ) if element_base_url and base_url : element_base_url = urljoin_safe ( base_url , clean_base_url ) or base_url if element_base_url : url = urljoin_safe ( element_base_url , clean_link_soup ( link_info . link ) , allow_fragments = False ) else : url = clean_link_soup ( link_info . link ) if url : link_contexts . add ( LinkContext ( url , inline = link_info . inline , linked = link_info . linked , link_type = link_info . link_type , extra = link_info ) ) scrape_result = ScrapeResult ( link_contexts , encoding ) scrape_result [ 'base_url' ] = base_url return scrape_result | Scrape a file for links . |
12,641 | def _is_accepted ( self , element_tag ) : element_tag = element_tag . lower ( ) if self . _ignored_tags is not None and element_tag in self . _ignored_tags : return False if self . _followed_tags is not None : return element_tag in self . _followed_tags else : return True | Return if the link is accepted by the filters . |
12,642 | def iter_links ( self , elements ) : for element in elements : if not isinstance ( element , Element ) : continue for link_infos in self . iter_links_element ( element ) : yield link_infos | Iterate the document root for links . |
12,643 | def iter_links_element ( self , element ) : attrib = element . attrib tag = element . tag if tag == 'link' : iterable = self . iter_links_link_element ( element ) elif tag == 'meta' : iterable = self . iter_links_meta_element ( element ) elif tag in ( 'object' , 'applet' ) : iterable = self . iter_links_object_element ( element ) elif tag == 'param' : iterable = self . iter_links_param_element ( element ) elif tag == 'style' : iterable = self . iter_links_style_element ( element ) elif tag == 'script' : iterable = self . iter_links_script_element ( element ) else : iterable = self . iter_links_plain_element ( element ) if tag in ( 'link' , 'url' , 'icon' ) : iterable = itertools . chain ( iterable , self . iter_links_element_text ( element ) ) for link_info in iterable : yield link_info if 'style' in attrib and self . css_scraper : for link in self . css_scraper . scrape_links ( attrib [ 'style' ] ) : yield LinkInfo ( element = element , tag = element . tag , attrib = 'style' , link = link , inline = True , linked = False , base_link = None , value_type = 'css' , link_type = LinkType . media , ) | Iterate a HTML element . |
12,644 | def iter_links_element_text ( cls , element ) : if element . text : link_type = identify_link_type ( element . text ) yield LinkInfo ( element = element , tag = element . tag , attrib = None , link = element . text , inline = False , linked = True , base_link = None , value_type = 'plain' , link_type = link_type ) | Get the element text as a link . |
12,645 | def iter_links_link_element ( self , element ) : rel = element . attrib . get ( 'rel' , '' ) stylesheet = 'stylesheet' in rel icon = 'icon' in rel inline = stylesheet or icon if stylesheet : link_type = LinkType . css elif icon : link_type = LinkType . media else : link_type = None for attrib_name , link in self . iter_links_by_attrib ( element ) : yield LinkInfo ( element = element , tag = element . tag , attrib = attrib_name , link = link , inline = inline , linked = not inline , base_link = None , value_type = 'plain' , link_type = link_type ) | Iterate a link for URLs . |
12,646 | def iter_links_meta_element ( cls , element ) : if element . attrib . get ( 'http-equiv' , '' ) . lower ( ) == 'refresh' : content_value = element . attrib . get ( 'content' ) if content_value : link = parse_refresh ( content_value ) if link : yield LinkInfo ( element = element , tag = element . tag , attrib = 'http-equiv' , link = link , inline = False , linked = True , base_link = None , value_type = 'refresh' , link_type = None ) else : for link_info in cls . iter_links_open_graph_meta ( element ) : yield link_info | Iterate the meta element for links . |
12,647 | def iter_links_object_element ( cls , element ) : base_link = element . attrib . get ( 'codebase' , None ) if base_link : link_type = element . attrib . get ( base_link ) yield LinkInfo ( element = element , tag = element . tag , attrib = 'codebase' , link = base_link , inline = True , linked = False , base_link = None , value_type = 'plain' , link_type = link_type ) for attribute in ( 'code' , 'src' , 'classid' , 'data' ) : if attribute in element . attrib : link_type = identify_link_type ( element . attrib . get ( attribute ) ) yield LinkInfo ( element = element , tag = element . tag , attrib = attribute , link = element . attrib . get ( attribute ) , inline = True , linked = False , base_link = base_link , value_type = 'plain' , link_type = link_type ) if 'archive' in element . attrib : for match in re . finditer ( r'[^ ]+' , element . attrib . get ( 'archive' ) ) : value = match . group ( 0 ) link_type = identify_link_type ( value ) yield LinkInfo ( element = element , tag = element . tag , attrib = 'archive' , link = value , inline = True , linked = False , base_link = base_link , value_type = 'list' , link_type = link_type ) | Iterate object and embed elements . |
12,648 | def iter_links_param_element ( cls , element ) : valuetype = element . attrib . get ( 'valuetype' , '' ) if valuetype . lower ( ) == 'ref' and 'value' in element . attrib : link_type = identify_link_type ( element . attrib . get ( 'value' ) ) yield LinkInfo ( element = element , tag = element . tag , attrib = 'value' , link = element . attrib . get ( 'value' ) , inline = True , linked = False , base_link = None , value_type = 'plain' , link_type = link_type ) | Iterate a param element . |
12,649 | def iter_links_style_element ( self , element ) : if self . css_scraper and element . text : link_iter = self . css_scraper . scrape_links ( element . text , context = True ) for link , context in link_iter : if context == 'import' : link_type = LinkType . css else : link_type = LinkType . media yield LinkInfo ( element = element , tag = element . tag , attrib = None , link = link , inline = True , linked = False , base_link = None , value_type = 'css' , link_type = link_type ) | Iterate a style element . |
12,650 | def iter_links_script_element ( self , element ) : if self . javascript_scraper and element . text : link_iter = self . javascript_scraper . scrape_links ( element . text , context = True ) for link , context in link_iter : inline = is_likely_inline ( link ) if context is True : link_type = None else : link_type = context yield LinkInfo ( element = element , tag = element . tag , attrib = None , link = link , inline = inline , linked = not inline , base_link = None , value_type = 'script' , link_type = link_type ) for link in self . iter_links_plain_element ( element ) : yield link | Iterate a script element . |
12,651 | def iter_links_plain_element ( self , element ) : for attrib_name , link in self . iter_links_by_attrib ( element ) : if attrib_name in self . LINK_ATTRIBUTES : inline = self . is_link_inline ( element . tag , attrib_name ) linked = self . is_html_link ( element . tag , attrib_name ) else : inline = is_likely_inline ( link ) linked = not inline link_type = identify_link_type ( link ) yield LinkInfo ( element = element , tag = element . tag , attrib = attrib_name , link = link , inline = inline , linked = linked , base_link = None , value_type = 'plain' , link_type = link_type ) | Iterate any element for links using generic rules . |
12,652 | def iter_links_by_attrib ( self , element ) : for attrib_name in element . attrib . keys ( ) : attrib_value = element . attrib . get ( attrib_name ) if attrib_name in self . LINK_ATTRIBUTES : if self . javascript_scraper and attrib_value . lstrip ( ) . startswith ( 'javascript:' ) : for link in self . iter_links_by_js_attrib ( attrib_name , percent_decode ( attrib_value ) ) : yield link else : yield attrib_name , attrib_value elif self . javascript_scraper and attrib_name [ : 5 ] in self . DYNAMIC_ATTRIBUTES : for link in self . iter_links_by_js_attrib ( attrib_name , attrib_value ) : yield link elif attrib_name . startswith ( 'data-' ) : if is_likely_link ( attrib_value ) and not is_unlikely_link ( attrib_value ) : yield attrib_name , attrib_value elif attrib_name == 'srcset' : items = self . iter_links_by_srcset_attrib ( attrib_name , attrib_value ) for item in items : yield item | Iterate an element by looking at its attributes for links . |
12,653 | def iter_links_by_js_attrib ( self , attrib_name , attrib_value ) : links = self . javascript_scraper . scrape_links ( attrib_value ) for link in links : yield attrib_name , link | Iterate links of a JavaScript pseudo - link attribute . |
12,654 | def is_link_inline ( cls , tag , attribute ) : if tag in cls . TAG_ATTRIBUTES and attribute in cls . TAG_ATTRIBUTES [ tag ] : attr_flags = cls . TAG_ATTRIBUTES [ tag ] [ attribute ] return attr_flags & cls . ATTR_INLINE return attribute != 'href' | Return whether the link is likely to be inline object . |
12,655 | def is_html_link ( cls , tag , attribute ) : if tag in cls . TAG_ATTRIBUTES and attribute in cls . TAG_ATTRIBUTES [ tag ] : attr_flags = cls . TAG_ATTRIBUTES [ tag ] [ attribute ] return attr_flags & cls . ATTR_HTML return attribute == 'href' | Return whether the link is likely to be external object . |
12,656 | def robots_cannot_follow ( cls , element ) : return ( element . tag == 'meta' and element . attrib . get ( 'name' , '' ) . lower ( ) == 'robots' and 'nofollow' in element . attrib . get ( 'value' , '' ) . lower ( ) ) | Return whether we cannot follow links due to robots . txt directives . |
12,657 | def iter_processed_text ( self , file , encoding = None , base_url = None ) : for text , is_link in self . iter_text ( file , encoding ) : if is_link and base_url : new_link = urljoin_safe ( base_url , text , allow_fragments = False ) if new_link : yield ( new_link , is_link ) else : yield ( new_link , False ) else : yield ( text , is_link ) | Return the file text and processed absolute links . |
12,658 | def scrape_links ( self , text , context = False ) : return self . iter_processed_links ( io . StringIO ( text ) , context = context ) | Convenience function for scraping from a text string . |
12,659 | def scrape ( self , request , response , link_type = None ) : for scraper in self . _document_scrapers : scrape_result = scraper . scrape ( request , response , link_type ) if scrape_result is None : continue if scrape_result . link_contexts : return scrape_result | Iterate the scrapers returning the first of the results . |
12,660 | def scrape_info ( self , request , response , link_type = None ) : info = { } for scraper in self . _document_scrapers : scrape_result = scraper . scrape ( request , response , link_type ) info [ scraper ] = scrape_result return info | Iterate the scrapers and return a dict of results . |
12,661 | def feed ( self , data_len , feed_time = None ) : self . _bytes_transferred += data_len self . _collected_bytes_transferred += data_len time_now = feed_time or time . time ( ) time_diff = time_now - self . _last_feed_time if time_diff < self . _sample_min_time : return self . _last_feed_time = time . time ( ) if data_len == 0 and time_diff >= self . _stall_time : self . _stalled = True return self . _samples . append ( ( time_diff , self . _collected_bytes_transferred ) ) self . _collected_bytes_transferred = 0 | Update the bandwidth meter . |
12,662 | def speed ( self ) : if self . _stalled : return 0 time_sum = 0 data_len_sum = 0 for time_diff , data_len in self . _samples : time_sum += time_diff data_len_sum += data_len if time_sum : return data_len_sum / time_sum else : return 0 | Return the current transfer speed . |
12,663 | def get_info ( self ) : if self . _min_disk : for path in self . _resource_paths : usage = psutil . disk_usage ( path ) yield ResourceInfo ( path , usage . free , self . _min_disk ) if self . _min_memory : usage = psutil . virtual_memory ( ) yield ResourceInfo ( None , usage . available , self . _min_memory ) | Return ResourceInfo instances . |
12,664 | def check ( self ) : for info in self . get_info ( ) : if info . free < info . limit : return info | Check resource levels . |
12,665 | def load ( self , response ) : self . _response = response if self . next_location ( raw = True ) : self . _num_redirects += 1 | Load the response and increment the counter . |
12,666 | def next_location ( self , raw = False ) : if self . _response : location = self . _response . fields . get ( 'location' ) if not location or raw : return location return wpull . url . urljoin ( self . _response . request . url_info . url , location ) | Returns the next location . |
12,667 | def is_redirect ( self ) : if self . _response : status_code = self . _response . status_code return status_code in self . _codes or status_code in self . _repeat_codes | Return whether the response contains a redirect code . |
12,668 | def _build_resolver ( cls , session : AppSession ) : args = session . args dns_timeout = args . dns_timeout if args . timeout : dns_timeout = args . timeout if args . inet_family == 'IPv4' : family = IPFamilyPreference . ipv4_only elif args . inet_family == 'IPv6' : family = IPFamilyPreference . ipv6_only elif args . prefer_family == 'IPv6' : family = IPFamilyPreference . prefer_ipv6 elif args . prefer_family == 'IPv4' : family = IPFamilyPreference . prefer_ipv4 else : family = IPFamilyPreference . any return session . factory . new ( 'Resolver' , family = family , timeout = dns_timeout , rotate = args . rotate_dns , cache = session . factory . class_map [ 'Resolver' ] . new_cache ( ) if args . dns_cache else None , ) | Build resolver . |
12,669 | def _build_connection_pool ( cls , session : AppSession ) : args = session . args connect_timeout = args . connect_timeout read_timeout = args . read_timeout if args . timeout : connect_timeout = read_timeout = args . timeout if args . limit_rate : bandwidth_limiter = session . factory . new ( 'BandwidthLimiter' , args . limit_rate ) else : bandwidth_limiter = None connection_factory = functools . partial ( Connection , timeout = read_timeout , connect_timeout = connect_timeout , bind_host = session . args . bind_address , bandwidth_limiter = bandwidth_limiter , ) ssl_connection_factory = functools . partial ( SSLConnection , timeout = read_timeout , connect_timeout = connect_timeout , bind_host = session . args . bind_address , ssl_context = session . ssl_context , ) if not session . args . no_proxy : if session . args . https_proxy : http_proxy = session . args . http_proxy . split ( ':' , 1 ) proxy_ssl = True elif session . args . http_proxy : http_proxy = session . args . http_proxy . split ( ':' , 1 ) proxy_ssl = False else : http_proxy = None proxy_ssl = None if http_proxy : http_proxy [ 1 ] = int ( http_proxy [ 1 ] ) if session . args . proxy_user : authentication = ( session . args . proxy_user , session . args . proxy_password ) else : authentication = None session . factory . class_map [ 'ConnectionPool' ] = HTTPProxyConnectionPool host_filter = session . factory . new ( 'ProxyHostFilter' , accept_domains = session . args . proxy_domains , reject_domains = session . args . proxy_exclude_domains , accept_hostnames = session . args . proxy_hostnames , reject_hostnames = session . args . proxy_exclude_hostnames ) return session . factory . new ( 'ConnectionPool' , http_proxy , proxy_ssl = proxy_ssl , authentication = authentication , resolver = session . factory [ 'Resolver' ] , connection_factory = connection_factory , ssl_connection_factory = ssl_connection_factory , host_filter = host_filter , ) return session . factory . new ( 'ConnectionPool' , resolver = session . factory [ 'Resolver' ] , connection_factory = connection_factory , ssl_connection_factory = ssl_connection_factory ) | Create connection pool . |
12,670 | def convert_all ( self ) : for url_record in self . _url_table . get_all ( ) : if url_record . status != Status . done : continue self . convert_by_record ( url_record ) | Convert all links in URL table . |
12,671 | def read_chunk_header ( self ) : try : chunk_size_hex = yield from self . _connection . readline ( ) except ValueError as error : raise ProtocolError ( 'Invalid chunk size: {0}' . format ( error ) ) from error if not chunk_size_hex . endswith ( b'\n' ) : raise NetworkError ( 'Connection closed.' ) try : chunk_size = int ( chunk_size_hex . split ( b';' , 1 ) [ 0 ] . strip ( ) , 16 ) except ValueError as error : raise ProtocolError ( 'Invalid chunk size: {0}' . format ( error ) ) from error if chunk_size < 0 : raise ProtocolError ( 'Chunk size cannot be negative.' ) self . _chunk_size = self . _bytes_left = chunk_size return chunk_size , chunk_size_hex | Read a single chunk s header . |
12,672 | def read_chunk_body ( self ) : bytes_left = self . _bytes_left if bytes_left > 0 : size = min ( bytes_left , self . _read_size ) data = yield from self . _connection . read ( size ) self . _bytes_left -= len ( data ) return ( data , data ) elif bytes_left < 0 : raise ProtocolError ( 'Chunked-transfer overrun.' ) elif bytes_left : raise NetworkError ( 'Connection closed.' ) newline_data = yield from self . _connection . readline ( ) if len ( newline_data ) > 2 : raise ProtocolError ( 'Error reading newline after chunk.' ) self . _chunk_size = self . _bytes_left = None return ( b'' , newline_data ) | Read a fragment of a single chunk . |
12,673 | def read_trailer ( self ) : _logger . debug ( 'Reading chunked trailer.' ) trailer_data_list = [ ] while True : trailer_data = yield from self . _connection . readline ( ) trailer_data_list . append ( trailer_data ) if not trailer_data . strip ( ) : break return b'' . join ( trailer_data_list ) | Read the HTTP trailer fields . |
12,674 | def get_version_tuple ( string ) : match = re . match ( r'(\d+)\.(\d+)\.?(\d*)([abc]?)(\d*)' , string ) major = int ( match . group ( 1 ) ) minor = int ( match . group ( 2 ) ) patch = int ( match . group ( 3 ) or 0 ) level = RELEASE_LEVEL_MAP . get ( match . group ( 4 ) , 'final' ) serial = int ( match . group ( 5 ) or 0 ) return major , minor , patch , level , serial | Return a version tuple from a string . |
12,675 | def _read_input_urls ( cls , session : AppSession , default_scheme = 'http' ) : url_string_iter = session . args . urls or ( ) url_rewriter = session . factory . get ( 'URLRewriter' ) if session . args . input_file : if session . args . force_html : lines = cls . _input_file_as_html_links ( session ) else : lines = cls . _input_file_as_lines ( session ) url_string_iter = itertools . chain ( url_string_iter , lines ) base_url = session . args . base for url_string in url_string_iter : _logger . debug ( __ ( 'Parsing URL {0}' , url_string ) ) if base_url : url_string = wpull . url . urljoin ( base_url , url_string ) try : url_info = wpull . url . URLInfo . parse ( url_string , default_scheme = default_scheme ) _logger . debug ( __ ( 'Parsed URL {0}' , url_info ) ) if url_rewriter : url_info = url_rewriter . rewrite ( url_info ) _logger . debug ( __ ( 'Rewritten URL {0}' , url_info ) ) yield url_info except ValueError as e : _logger . info ( __ ( 'Invalid URL {0}: {1}' , url_string , e ) ) | Read the URLs provided by the user . |
12,676 | def _input_file_as_lines ( cls , session : AppSession ) : if session . args . input_file == sys . stdin : input_file = session . args . input_file else : reader = codecs . getreader ( session . args . local_encoding or 'utf-8' ) input_file = reader ( session . args . input_file ) return input_file | Read lines from input file and return them . |
12,677 | def _input_file_as_html_links ( cls , session : AppSession ) : scrape_result = session . factory [ 'HTMLScraper' ] . scrape_file ( session . args . input_file , encoding = session . args . local_encoding or 'utf-8' ) for context in scrape_result . link_contexts : yield context . link | Read input file as HTML and return the links . |
12,678 | def _new_initial_request ( self , with_body : bool = True ) : url_record = self . _item_session . url_record url_info = url_record . url_info request = self . _item_session . app_session . factory [ 'WebClient' ] . request_factory ( url_info . url ) self . _populate_common_request ( request ) if with_body : if url_record . post_data or self . _processor . fetch_params . post_data : self . _add_post_data ( request ) if self . _file_writer_session : request = self . _file_writer_session . process_request ( request ) return request | Return a new Request to be passed to the Web Client . |
12,679 | def _populate_common_request ( self , request ) : url_record = self . _item_session . url_record if url_record . parent_url and not request . fields . get ( 'Referer' ) : self . _add_referrer ( request , url_record ) if self . _fetch_rule . http_login : request . username , request . password = self . _fetch_rule . http_login | Populate the Request with common fields . |
12,680 | def _add_referrer ( cls , request : Request , url_record : URLRecord ) : if url_record . parent_url . startswith ( 'https://' ) and url_record . url_info . scheme == 'http' : return request . fields [ 'Referer' ] = url_record . parent_url | Add referrer URL to request . |
12,681 | def _process_robots ( self ) : try : self . _item_session . request = request = self . _new_initial_request ( with_body = False ) verdict , reason = ( yield from self . _should_fetch_reason_with_robots ( request ) ) except REMOTE_ERRORS as error : _logger . error ( _ ( 'Fetching robots.txt for ‘{url}’ ' 'encountered an error: {error}' ) , url = self . _next_url_info . url , error = error ) self . _result_rule . handle_error ( self . _item_session , error ) wait_time = self . _result_rule . get_wait_time ( self . _item_session , error = error ) if wait_time : _logger . debug ( 'Sleeping {0}.' , wait_time ) yield from asyncio . sleep ( wait_time ) return False else : _logger . debug ( 'Robots filter verdict {} reason {}' , verdict , reason ) if not verdict : self . _item_session . skip ( ) return False return True | Process robots . txt . |
12,682 | def _process_loop ( self ) : while not self . _web_client_session . done ( ) : self . _item_session . request = self . _web_client_session . next_request ( ) verdict , reason = self . _should_fetch_reason ( ) _logger . debug ( 'Filter verdict {} reason {}' , verdict , reason ) if not verdict : self . _item_session . skip ( ) break exit_early , wait_time = yield from self . _fetch_one ( cast ( Request , self . _item_session . request ) ) if wait_time : _logger . debug ( 'Sleeping {}' , wait_time ) yield from asyncio . sleep ( wait_time ) if exit_early : break | Fetch URL including redirects . |
12,683 | def _next_url_info ( self ) -> URLInfo : if not self . _web_client_session : return self . _item_session . url_record . url_info return self . _web_client_session . next_request ( ) . url_info | Return the next URLInfo to be processed . |
12,684 | def _should_fetch_reason ( self ) -> Tuple [ bool , str ] : is_redirect = False if self . _strong_redirects : try : is_redirect = self . _web_client_session . redirect_tracker . is_redirect ( ) except AttributeError : pass return self . _fetch_rule . check_subsequent_web_request ( self . _item_session , is_redirect = is_redirect ) | Return info about whether the URL should be fetched . |
12,685 | def _should_fetch_reason_with_robots ( self , request : Request ) -> Tuple [ bool , str ] : result = yield from self . _fetch_rule . check_initial_web_request ( self . _item_session , request ) return result | Return info whether the URL should be fetched including checking robots . txt . |
12,686 | def _add_post_data ( self , request : Request ) : if self . _item_session . url_record . post_data : data = wpull . string . to_bytes ( self . _item_session . url_record . post_data ) else : data = wpull . string . to_bytes ( self . _processor . fetch_params . post_data ) request . method = 'POST' request . fields [ 'Content-Type' ] = 'application/x-www-form-urlencoded' request . fields [ 'Content-Length' ] = str ( len ( data ) ) _logger . debug ( 'Posting with data {0}.' , data ) if not request . body : request . body = Body ( io . BytesIO ( ) ) with wpull . util . reset_file_offset ( request . body ) : request . body . write ( data ) | Add data to the payload . |
12,687 | def int_0_inf ( cls , string ) : if string == 'inf' : return 0 try : value = int ( string ) except ValueError as error : raise argparse . ArgumentTypeError ( error ) if value < 0 : raise argparse . ArgumentTypeError ( _ ( 'Value must not be negative.' ) ) else : return value | Convert string to int . |
12,688 | def int_bytes ( cls , string ) : if string [ - 1 ] in ( 'k' , 'm' ) : value = cls . int_0_inf ( string [ : - 1 ] ) unit = string [ - 1 ] if unit == 'k' : value *= 2 ** 10 else : value *= 2 ** 20 return value else : return cls . int_0_inf ( string ) | Convert string describing size to int . |
12,689 | def comma_list ( cls , string ) : items = string . split ( ',' ) items = list ( [ item . strip ( ) for item in items ] ) return items | Convert a comma separated string to list . |
12,690 | def comma_choice_list ( cls , string ) : items = string . split ( ',' ) items = CommaChoiceListArgs ( [ item . strip ( ) for item in items ] ) return items | Convert a comma separated string to CommaChoiceListArgs . |
12,691 | def read_links ( self , file , encoding = None ) : return [ item [ 0 ] for item in self . iter_text ( file , encoding ) if item [ 1 ] ] | Return an iterator of links found in the document . |
12,692 | def _build_file_writer ( cls , session : AppSession ) : args = session . args if args . delete_after : return session . factory . new ( 'FileWriter' ) elif args . output_document : session . factory . class_map [ 'FileWriter' ] = SingleDocumentWriter return session . factory . new ( 'FileWriter' , args . output_document , headers_included = args . save_headers ) use_dir = ( len ( args . urls ) != 1 or args . page_requisites or args . recursive ) if args . use_directories == 'force' : use_dir = True elif args . use_directories == 'no' : use_dir = False os_type = 'windows' if 'windows' in args . restrict_file_names else 'unix' ascii_only = 'ascii' in args . restrict_file_names no_control = 'nocontrol' not in args . restrict_file_names if 'lower' in args . restrict_file_names : case = 'lower' elif 'upper' in args . restrict_file_names : case = 'upper' else : case = None path_namer = session . factory . new ( 'PathNamer' , args . directory_prefix , index = args . default_page , use_dir = use_dir , cut = args . cut_dirs , protocol = args . protocol_directories , hostname = args . host_directories , os_type = os_type , ascii_only = ascii_only , no_control = no_control , case = case , max_filename_length = args . max_filename_length , ) if args . recursive or args . page_requisites or args . continue_download : if args . clobber_method == 'disable' : file_class = OverwriteFileWriter else : file_class = IgnoreFileWriter elif args . timestamping : file_class = TimestampingFileWriter else : file_class = AntiClobberFileWriter session . factory . class_map [ 'FileWriter' ] = file_class return session . factory . new ( 'FileWriter' , path_namer , file_continuing = args . continue_download , headers_included = args . save_headers , local_timestamping = args . use_server_timestamps , adjust_extension = args . adjust_extension , content_disposition = args . content_disposition , trust_server_names = args . trust_server_names , ) | Create the File Writer . |
12,693 | def setup_signal_handlers ( self ) : if platform . system ( ) == 'Windows' : _logger . warning ( _ ( 'Graceful stopping with Unix signals is not supported ' 'on this OS.' ) ) return event_loop = asyncio . get_event_loop ( ) graceful_called = False def graceful_stop_callback ( ) : nonlocal graceful_called if graceful_called : forceful_stop_callback ( ) return graceful_called = True _logger . info ( _ ( 'Stopping once all requests complete...' ) ) _logger . info ( _ ( 'Interrupt again to force stopping immediately.' ) ) self . stop ( ) def forceful_stop_callback ( ) : _logger . info ( _ ( 'Forcing immediate stop...' ) ) logging . raiseExceptions = False event_loop . stop ( ) event_loop . add_signal_handler ( signal . SIGINT , graceful_stop_callback ) event_loop . add_signal_handler ( signal . SIGTERM , forceful_stop_callback ) | Setup Ctrl + C and SIGTERM handlers . |
12,694 | def _update_exit_code_from_error ( self , error ) : for error_type , exit_code in self . ERROR_CODE_MAP . items ( ) : if isinstance ( error , error_type ) : self . update_exit_code ( exit_code ) break else : self . update_exit_code ( ExitStatus . generic_error ) | Set the exit code based on the error type . |
12,695 | def update_exit_code ( self , code : int ) : if code : if self . _exit_code : self . _exit_code = min ( self . _exit_code , code ) else : self . _exit_code = code | Set the exit code if it is serious than before . |
12,696 | def consult_robots_txt ( self , request : HTTPRequest ) -> bool : if not self . _robots_txt_checker : return True result = yield from self . _robots_txt_checker . can_fetch ( request ) return result | Consult by fetching robots . txt as needed . |
12,697 | def consult_filters ( self , url_info : URLInfo , url_record : URLRecord , is_redirect : bool = False ) -> Tuple [ bool , str , dict ] : if not self . _url_filter : return True , 'nofilters' , None test_info = self . _url_filter . test_info ( url_info , url_record ) verdict = test_info [ 'verdict' ] if verdict : reason = 'filters' elif is_redirect and self . is_only_span_hosts_failed ( test_info ) : verdict = True reason = 'redirect' else : reason = 'filters' return verdict , reason , test_info | Consult the URL filter . |
12,698 | def consult_hook ( self , item_session : ItemSession , verdict : bool , reason : str , test_info : dict ) : try : reasons = { 'filters' : test_info [ 'map' ] , 'reason' : reason , } verdict = self . hook_dispatcher . call ( PluginFunctions . accept_url , item_session , verdict , reasons , ) reason = 'callback_hook' except HookDisconnected : pass return verdict , reason | Consult the scripting hook . |
12,699 | def check_initial_web_request ( self , item_session : ItemSession , request : HTTPRequest ) -> Tuple [ bool , str ] : verdict , reason , test_info = self . consult_filters ( item_session . request . url_info , item_session . url_record ) if verdict and self . _robots_txt_checker : can_fetch = yield from self . consult_robots_txt ( request ) if not can_fetch : verdict = False reason = 'robotstxt' verdict , reason = self . consult_hook ( item_session , verdict , reason , test_info ) return verdict , reason | Check robots . txt URL filters and scripting hook . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.