idx int64 0 63k | question stringlengths 61 4.03k | target stringlengths 6 1.23k |
|---|---|---|
12,600 | def get_all ( self ) : for name , values in self . _map . items ( ) : for value in values : yield ( name , value ) | Return an iterator of name - value pairs . |
12,601 | def to_str ( self ) : pairs = [ ] for name , value in self . get_all ( ) : if value and self . _wrap_width : pairs . append ( '{0}:{1}' . format ( name , '\r\n' . join ( textwrap . wrap ( value , width = self . _wrap_width , drop_whitespace = False , initial_indent = ' ' , subsequent_indent = ' ' ) ) ) ) elif value : p... | Convert to string . |
12,602 | def to_bytes ( self , errors = 'strict' ) : return str ( self ) . encode ( self . encoding , errors = errors ) | Convert to bytes . |
12,603 | def recycle ( self ) : for connection in self . _connections : self . _connection_pool . no_wait_release ( connection ) self . _connections . clear ( ) | Clean up and return connections back to the pool . |
12,604 | def session ( self ) -> SessionT : session = self . _session_class ( ) ( connection_pool = self . _connection_pool , ) self . event_dispatcher . notify ( self . ClientEvent . new_session , session ) return session | Return a new session . |
12,605 | def count_cookies ( self , domain ) : cookies = self . cookie_jar . _cookies if domain in cookies : return sum ( [ len ( cookie ) for cookie in cookies [ domain ] . values ( ) ] ) else : return 0 | Return the number of cookies for the given domain . |
12,606 | def cookie_length ( self , domain ) : cookies = self . cookie_jar . _cookies if domain not in cookies : return 0 length = 0 for path in cookies [ domain ] : for name in cookies [ domain ] [ path ] : cookie = cookies [ domain ] [ path ] [ name ] length += len ( path ) + len ( name ) + len ( cookie . value or '' ) return... | Return approximate length of all cookie key - values for a domain . |
12,607 | def guess_listing_type ( lines , threshold = 100 ) : scores = { 'unix' : 0 , 'msdos' : 0 , 'nlst' : 0 , } for line in lines : if not line : continue if re . search ( r'---|r--|rw-|rwx' , line ) : scores [ 'unix' ] += 1 if '<DIR>' in line or re . search ( r'^.{0,4}\d\d' , line ) : scores [ 'msdos' ] += 1 words = line . ... | Guess the style of directory listing . |
12,608 | def parse_unix_perm ( text ) : if len ( text ) != 9 : return 0 perms = 0 for triad_index in range ( 3 ) : string_index = triad_index * 3 perms <<= 3 if text [ string_index ] == 'r' : perms |= 1 << 2 if text [ string_index + 1 ] == 'w' : perms |= 1 << 1 if text [ string_index + 2 ] in 'xs' : perms |= 1 return perms | Parse a Unix permission string and return integer value . |
12,609 | def parse ( self , lines ) : if self . type == 'msdos' : return self . parse_msdos ( lines ) elif self . type == 'unix' : return self . parse_unix ( lines ) elif self . type == 'nlst' : return self . parse_nlst ( lines ) else : raise UnknownListingError ( 'Unsupported listing type.' ) | Parse the lines . |
12,610 | def parse_datetime ( self , text ) : return parse_datetime ( text , date_format = self . date_format , is_day_period = self . is_day_period ) | Parse datetime from line of text . |
12,611 | def parse_msdos ( self , lines ) : for line in lines : fields = line . split ( None , 4 ) date_str = fields [ 0 ] time_str = fields [ 1 ] datetime_str = '{} {}' . format ( date_str , time_str ) file_datetime = self . parse_datetime ( datetime_str ) [ 0 ] if fields [ 2 ] == '<DIR>' : file_size = None file_type = 'dir' e... | Parse lines from a MS - DOS format . |
12,612 | def parse_unix ( self , lines ) : for line in lines : original_line = line fields = line . split ( ' ' ) after_perm_index = 0 for field in fields : after_perm_index += len ( field ) if not field : continue if field [ 0 ] in 'bcdlps-' : if field [ 0 ] == 'd' : file_type = 'dir' elif field [ 0 ] == '-' : file_type = 'fil... | Parse listings from a Unix ls command format . |
12,613 | def parse_input ( self ) : if self . _text : lines = iter ( self . _text . splitlines ( ) ) elif self . _file : lines = self . _file else : lines = ( ) sample_lines = [ ] for line in lines : if len ( sample_lines ) > 100 : break sample_lines . append ( line ) lines = itertools . chain ( sample_lines , lines ) self . gu... | Parse the listings . |
12,614 | def open_file ( cls , filename : str , response : BaseResponse , mode = 'wb+' ) : _logger . debug ( 'Saving file to {0}, mode={1}.' , filename , mode ) dir_path = os . path . dirname ( filename ) if dir_path and not os . path . exists ( dir_path ) : os . makedirs ( dir_path ) response . body = Body ( open ( filename , ... | Open a file object on to the Response Body . |
12,615 | def set_timestamp ( cls , filename : str , response : HTTPResponse ) : last_modified = response . fields . get ( 'Last-Modified' ) if not last_modified : return try : last_modified = email . utils . parsedate ( last_modified ) except ValueError : _logger . exception ( 'Failed to parse date.' ) return last_modified = ti... | Set the Last - Modified timestamp onto the given file . |
12,616 | def save_headers ( cls , filename : str , response : HTTPResponse ) : new_filename = filename + '-new' with open ( 'wb' ) as new_file : new_file . write ( response . header ( ) ) with wpull . util . reset_file_offset ( response . body ) : response . body . seek ( 0 ) shutil . copyfileobj ( response . body , new_file ) ... | Prepend the HTTP response header to the file . |
12,617 | def _compute_filename ( self , request : BaseRequest ) : path = self . _path_namer . get_filename ( request . url_info ) if os . path . isdir ( path ) : path += '.f' else : dir_name , name = os . path . split ( path ) path = os . path . join ( anti_clobber_dir_path ( dir_name ) , name ) return path | Get the appropriate filename from the request . |
12,618 | def _process_file_continue_request ( self , request : BaseRequest ) : if os . path . exists ( self . _filename ) : size = os . path . getsize ( self . _filename ) request . set_continue ( size ) self . _file_continue_requested = True _logger . debug ( 'Continue file from {0}.' , size ) else : _logger . debug ( 'No file... | Modify the request to resume downloading file . |
12,619 | def _process_file_continue_response ( self , response : HTTPResponse ) : code = response . status_code if code == http . client . PARTIAL_CONTENT : self . open_file ( self . _filename , response , mode = 'ab+' ) else : self . _raise_cannot_continue_error ( ) | Process a partial content response . |
12,620 | def _process_file_continue_ftp_response ( self , response : FTPResponse ) : if response . request . restart_value and response . restart_value : self . open_file ( self . _filename , response , mode = 'ab+' ) else : self . _raise_cannot_continue_error ( ) | Process a restarted content response . |
12,621 | def _rename_with_content_disposition ( self , response : HTTPResponse ) : if not self . _filename : return if response . request . url_info . scheme not in ( 'http' , 'https' ) : return header_value = response . fields . get ( 'Content-Disposition' ) if not header_value : return filename = parse_content_disposition ( h... | Rename using the Content - Disposition header . |
12,622 | def session ( self ) -> BaseFileWriterSession : return self . session_class ( self . _path_namer , self . _file_continuing , self . _headers_included , self . _local_timestamping , self . _adjust_extension , self . _content_disposition , self . _trust_server_names , ) | Return the File Writer Session . |
12,623 | def parse_charset ( header_string ) : match = re . search ( r , header_string , re . IGNORECASE ) if match : return match . group ( 1 ) | Parse a Content - Type string for the document encoding . |
12,624 | def should_close ( http_version , connection_field ) : connection_field = ( connection_field or '' ) . lower ( ) if http_version == 'HTTP/1.0' : return connection_field . replace ( '-' , '' ) != 'keepalive' else : return connection_field == 'close' | Return whether the connection should be closed . |
12,625 | def seek_file_end ( file ) : try : file . seek ( 0 , 2 ) except ValueError : while True : data = file . read ( 4096 ) if not data : break | Seek to the end of the file . |
12,626 | def parse_iso8601_str ( string ) : datetime_obj = datetime . datetime . strptime ( string , "%Y-%m-%dT%H:%M:%SZ" ) return int ( calendar . timegm ( datetime_obj . utctimetuple ( ) ) ) | Parse a fixed ISO8601 datetime string . |
12,627 | def python_version ( ) : major , minor , patch = sys . version_info [ 0 : 3 ] return '{0}.{1}.{2}' . format ( major , minor , patch ) | Return the Python version as a string . |
12,628 | def filter_pem ( data ) : assert isinstance ( data , bytes ) , 'Expect bytes. Got {}.' . format ( type ( data ) ) certs = set ( ) new_list = [ ] in_pem_block = False for line in re . split ( br'[\r\n]+' , data ) : if line == b'-----BEGIN CERTIFICATE-----' : assert not in_pem_block in_pem_block = True elif line == b'---... | Processes the bytes for PEM certificates . |
12,629 | def rewrap_bytes ( data ) : return b'\n' . join ( data [ index : index + 70 ] for index in range ( 0 , len ( data ) , 70 ) ) | Rewrap characters to 70 character width . |
12,630 | def get_package_data ( filename , mode = 'rb' ) : if os . path . exists ( filename ) : with open ( filename , mode = mode ) as in_file : return in_file . read ( ) else : parts = os . path . normpath ( filename ) . split ( os . sep ) for part , index in zip ( parts , range ( len ( parts ) ) ) : if part . endswith ( '.zi... | Return the contents of a real file or a zip file . |
12,631 | def get_package_filename ( filename , package_dir = None ) : if getattr ( sys , 'frozen' , False ) : package_dir = os . path . join ( sys . _MEIPASS , os . path . basename ( os . path . dirname ( __file__ ) ) ) elif not package_dir : package_dir = os . path . dirname ( __file__ ) return os . path . join ( package_dir ,... | Return the filename of the data file . |
12,632 | def get_exception_message ( instance ) : args = getattr ( instance , 'args' , None ) if args : return str ( instance ) try : return type ( instance ) . __name__ except AttributeError : return str ( instance ) | Try to get the exception message or the class name . |
12,633 | def dump ( self , obj ) : pickle . dump ( obj , self . _file , protocol = self . _protocol ) | Pickle an object . |
12,634 | def quoted_attribute_value ( self , value ) : quote_with = '"' if '"' in value : if "'" in value : replace_with = """ value = value . replace ( '"' , replace_with ) else : quote_with = "'" return quote_with + value + quote_with | Make a value into a quoted XML attribute possibly escaping it . |
12,635 | def encodings ( self ) : tried = set ( ) for e in self . override_encodings : if self . _usable ( e , tried ) : yield e if self . _usable ( self . sniffed_encoding , tried ) : yield self . sniffed_encoding if self . declared_encoding is None : self . declared_encoding = self . find_declared_encoding ( self . markup , s... | Yield a number of encodings that might work for this markup . |
12,636 | def strip_byte_order_mark ( cls , data ) : encoding = None if ( len ( data ) >= 4 ) and ( data [ : 2 ] == b'\xfe\xff' ) and ( data [ 2 : 4 ] != '\x00\x00' ) : encoding = 'utf-16be' data = data [ 2 : ] elif ( len ( data ) >= 4 ) and ( data [ : 2 ] == b'\xff\xfe' ) and ( data [ 2 : 4 ] != '\x00\x00' ) : encoding = 'utf-1... | If a byte - order mark is present strip it and return the encoding it implies . |
12,637 | def find_declared_encoding ( cls , markup , is_html = False , search_entire_document = False ) : if search_entire_document : xml_endpos = html_endpos = len ( markup ) else : xml_endpos = 1024 html_endpos = max ( 2048 , int ( len ( markup ) * 0.05 ) ) declared_encoding = None declared_encoding_match = xml_encoding_re . ... | Given a document tries to find its declared encoding . |
12,638 | def _sub_ms_char ( self , match ) : orig = match . group ( 1 ) if self . smart_quotes_to == 'ascii' : sub = self . MS_CHARS_TO_ASCII . get ( orig ) . encode ( ) else : sub = self . MS_CHARS . get ( orig ) if type ( sub ) == tuple : if self . smart_quotes_to == 'xml' : sub = '&#x' . encode ( ) + sub [ 1 ] . encode ( ) +... | Changes a MS smart quote character to an XML or HTML entity or an ASCII character . |
12,639 | def detwingle ( cls , in_bytes , main_encoding = "utf8" , embedded_encoding = "windows-1252" ) : if embedded_encoding . replace ( '_' , '-' ) . lower ( ) not in ( 'windows-1252' , 'windows_1252' ) : raise NotImplementedError ( "Windows-1252 and ISO-8859-1 are the only currently supported " "embedded encodings." ) if ma... | Fix characters from one encoding embedded in some other encoding . |
12,640 | def scrape_file ( self , file , encoding = None , base_url = None ) : elements = self . iter_elements ( file , encoding = encoding ) link_contexts = set ( ) link_infos = self . _element_walker . iter_links ( elements ) for link_info in link_infos : element_base_url = base_url if link_info . base_link : clean_base_url =... | Scrape a file for links . |
12,641 | def _is_accepted ( self , element_tag ) : element_tag = element_tag . lower ( ) if self . _ignored_tags is not None and element_tag in self . _ignored_tags : return False if self . _followed_tags is not None : return element_tag in self . _followed_tags else : return True | Return if the link is accepted by the filters . |
12,642 | def iter_links ( self , elements ) : for element in elements : if not isinstance ( element , Element ) : continue for link_infos in self . iter_links_element ( element ) : yield link_infos | Iterate the document root for links . |
12,643 | def iter_links_element ( self , element ) : attrib = element . attrib tag = element . tag if tag == 'link' : iterable = self . iter_links_link_element ( element ) elif tag == 'meta' : iterable = self . iter_links_meta_element ( element ) elif tag in ( 'object' , 'applet' ) : iterable = self . iter_links_object_element ... | Iterate a HTML element . |
12,644 | def iter_links_element_text ( cls , element ) : if element . text : link_type = identify_link_type ( element . text ) yield LinkInfo ( element = element , tag = element . tag , attrib = None , link = element . text , inline = False , linked = True , base_link = None , value_type = 'plain' , link_type = link_type ) | Get the element text as a link . |
12,645 | def iter_links_link_element ( self , element ) : rel = element . attrib . get ( 'rel' , '' ) stylesheet = 'stylesheet' in rel icon = 'icon' in rel inline = stylesheet or icon if stylesheet : link_type = LinkType . css elif icon : link_type = LinkType . media else : link_type = None for attrib_name , link in self . iter... | Iterate a link for URLs . |
12,646 | def iter_links_meta_element ( cls , element ) : if element . attrib . get ( 'http-equiv' , '' ) . lower ( ) == 'refresh' : content_value = element . attrib . get ( 'content' ) if content_value : link = parse_refresh ( content_value ) if link : yield LinkInfo ( element = element , tag = element . tag , attrib = 'http-eq... | Iterate the meta element for links . |
12,647 | def iter_links_object_element ( cls , element ) : base_link = element . attrib . get ( 'codebase' , None ) if base_link : link_type = element . attrib . get ( base_link ) yield LinkInfo ( element = element , tag = element . tag , attrib = 'codebase' , link = base_link , inline = True , linked = False , base_link = None... | Iterate object and embed elements . |
12,648 | def iter_links_param_element ( cls , element ) : valuetype = element . attrib . get ( 'valuetype' , '' ) if valuetype . lower ( ) == 'ref' and 'value' in element . attrib : link_type = identify_link_type ( element . attrib . get ( 'value' ) ) yield LinkInfo ( element = element , tag = element . tag , attrib = 'value' ,... | Iterate a param element . |
12,649 | def iter_links_style_element ( self , element ) : if self . css_scraper and element . text : link_iter = self . css_scraper . scrape_links ( element . text , context = True ) for link , context in link_iter : if context == 'import' : link_type = LinkType . css else : link_type = LinkType . media yield LinkInfo ( elemen... | Iterate a style element . |
12,650 | def iter_links_script_element ( self , element ) : if self . javascript_scraper and element . text : link_iter = self . javascript_scraper . scrape_links ( element . text , context = True ) for link , context in link_iter : inline = is_likely_inline ( link ) if context is True : link_type = None else : link_type = cont... | Iterate a script element . |
12,651 | def iter_links_plain_element ( self , element ) : for attrib_name , link in self . iter_links_by_attrib ( element ) : if attrib_name in self . LINK_ATTRIBUTES : inline = self . is_link_inline ( element . tag , attrib_name ) linked = self . is_html_link ( element . tag , attrib_name ) else : inline = is_likely_inline ( ... | Iterate any element for links using generic rules . |
12,652 | def iter_links_by_attrib ( self , element ) : for attrib_name in element . attrib . keys ( ) : attrib_value = element . attrib . get ( attrib_name ) if attrib_name in self . LINK_ATTRIBUTES : if self . javascript_scraper and attrib_value . lstrip ( ) . startswith ( 'javascript:' ) : for link in self . iter_links_by_js_... | Iterate an element by looking at its attributes for links . |
12,653 | def iter_links_by_js_attrib ( self , attrib_name , attrib_value ) : links = self . javascript_scraper . scrape_links ( attrib_value ) for link in links : yield attrib_name , link | Iterate links of a JavaScript pseudo - link attribute . |
12,654 | def is_link_inline ( cls , tag , attribute ) : if tag in cls . TAG_ATTRIBUTES and attribute in cls . TAG_ATTRIBUTES [ tag ] : attr_flags = cls . TAG_ATTRIBUTES [ tag ] [ attribute ] return attr_flags & cls . ATTR_INLINE return attribute != 'href' | Return whether the link is likely to be inline object . |
12,655 | def is_html_link ( cls , tag , attribute ) : if tag in cls . TAG_ATTRIBUTES and attribute in cls . TAG_ATTRIBUTES [ tag ] : attr_flags = cls . TAG_ATTRIBUTES [ tag ] [ attribute ] return attr_flags & cls . ATTR_HTML return attribute == 'href' | Return whether the link is likely to be external object . |
12,656 | def robots_cannot_follow ( cls , element ) : return ( element . tag == 'meta' and element . attrib . get ( 'name' , '' ) . lower ( ) == 'robots' and 'nofollow' in element . attrib . get ( 'value' , '' ) . lower ( ) ) | Return whether we cannot follow links due to robots . txt directives . |
12,657 | def iter_processed_text ( self , file , encoding = None , base_url = None ) : for text , is_link in self . iter_text ( file , encoding ) : if is_link and base_url : new_link = urljoin_safe ( base_url , text , allow_fragments = False ) if new_link : yield ( new_link , is_link ) else : yield ( new_link , False ) else : y... | Return the file text and processed absolute links . |
12,658 | def scrape_links ( self , text , context = False ) : return self . iter_processed_links ( io . StringIO ( text ) , context = context ) | Convenience function for scraping from a text string . |
12,659 | def scrape ( self , request , response , link_type = None ) : for scraper in self . _document_scrapers : scrape_result = scraper . scrape ( request , response , link_type ) if scrape_result is None : continue if scrape_result . link_contexts : return scrape_result | Iterate the scrapers returning the first of the results . |
12,660 | def scrape_info ( self , request , response , link_type = None ) : info = { } for scraper in self . _document_scrapers : scrape_result = scraper . scrape ( request , response , link_type ) info [ scraper ] = scrape_result return info | Iterate the scrapers and return a dict of results . |
12,661 | def feed ( self , data_len , feed_time = None ) : self . _bytes_transferred += data_len self . _collected_bytes_transferred += data_len time_now = feed_time or time . time ( ) time_diff = time_now - self . _last_feed_time if time_diff < self . _sample_min_time : return self . _last_feed_time = time . time ( ) if data_l... | Update the bandwidth meter . |
12,662 | def speed ( self ) : if self . _stalled : return 0 time_sum = 0 data_len_sum = 0 for time_diff , data_len in self . _samples : time_sum += time_diff data_len_sum += data_len if time_sum : return data_len_sum / time_sum else : return 0 | Return the current transfer speed . |
12,663 | def get_info ( self ) : if self . _min_disk : for path in self . _resource_paths : usage = psutil . disk_usage ( path ) yield ResourceInfo ( path , usage . free , self . _min_disk ) if self . _min_memory : usage = psutil . virtual_memory ( ) yield ResourceInfo ( None , usage . available , self . _min_memory ) | Return ResourceInfo instances . |
12,664 | def check ( self ) : for info in self . get_info ( ) : if info . free < info . limit : return info | Check resource levels . |
12,665 | def load ( self , response ) : self . _response = response if self . next_location ( raw = True ) : self . _num_redirects += 1 | Load the response and increment the counter . |
12,666 | def next_location ( self , raw = False ) : if self . _response : location = self . _response . fields . get ( 'location' ) if not location or raw : return location return wpull . url . urljoin ( self . _response . request . url_info . url , location ) | Returns the next location . |
12,667 | def is_redirect ( self ) : if self . _response : status_code = self . _response . status_code return status_code in self . _codes or status_code in self . _repeat_codes | Return whether the response contains a redirect code . |
12,668 | def _build_resolver ( cls , session : AppSession ) : args = session . args dns_timeout = args . dns_timeout if args . timeout : dns_timeout = args . timeout if args . inet_family == 'IPv4' : family = IPFamilyPreference . ipv4_only elif args . inet_family == 'IPv6' : family = IPFamilyPreference . ipv6_only elif args . p... | Build resolver . |
12,669 | def _build_connection_pool ( cls , session : AppSession ) : args = session . args connect_timeout = args . connect_timeout read_timeout = args . read_timeout if args . timeout : connect_timeout = read_timeout = args . timeout if args . limit_rate : bandwidth_limiter = session . factory . new ( 'BandwidthLimiter' , args... | Create connection pool . |
12,670 | def convert_all ( self ) : for url_record in self . _url_table . get_all ( ) : if url_record . status != Status . done : continue self . convert_by_record ( url_record ) | Convert all links in URL table . |
12,671 | def read_chunk_header ( self ) : try : chunk_size_hex = yield from self . _connection . readline ( ) except ValueError as error : raise ProtocolError ( 'Invalid chunk size: {0}' . format ( error ) ) from error if not chunk_size_hex . endswith ( b'\n' ) : raise NetworkError ( 'Connection closed.' ) try : chunk_size = in... | Read a single chunk s header . |
12,672 | def read_chunk_body ( self ) : bytes_left = self . _bytes_left if bytes_left > 0 : size = min ( bytes_left , self . _read_size ) data = yield from self . _connection . read ( size ) self . _bytes_left -= len ( data ) return ( data , data ) elif bytes_left < 0 : raise ProtocolError ( 'Chunked-transfer overrun.' ) elif b... | Read a fragment of a single chunk . |
12,673 | def read_trailer ( self ) : _logger . debug ( 'Reading chunked trailer.' ) trailer_data_list = [ ] while True : trailer_data = yield from self . _connection . readline ( ) trailer_data_list . append ( trailer_data ) if not trailer_data . strip ( ) : break return b'' . join ( trailer_data_list ) | Read the HTTP trailer fields . |
12,674 | def get_version_tuple ( string ) : match = re . match ( r'(\d+)\.(\d+)\.?(\d*)([abc]?)(\d*)' , string ) major = int ( match . group ( 1 ) ) minor = int ( match . group ( 2 ) ) patch = int ( match . group ( 3 ) or 0 ) level = RELEASE_LEVEL_MAP . get ( match . group ( 4 ) , 'final' ) serial = int ( match . group ( 5 ) or... | Return a version tuple from a string . |
12,675 | def _read_input_urls ( cls , session : AppSession , default_scheme = 'http' ) : url_string_iter = session . args . urls or ( ) url_rewriter = session . factory . get ( 'URLRewriter' ) if session . args . input_file : if session . args . force_html : lines = cls . _input_file_as_html_links ( session ) else : lines = cls... | Read the URLs provided by the user . |
12,676 | def _input_file_as_lines ( cls , session : AppSession ) : if session . args . input_file == sys . stdin : input_file = session . args . input_file else : reader = codecs . getreader ( session . args . local_encoding or 'utf-8' ) input_file = reader ( session . args . input_file ) return input_file | Read lines from input file and return them . |
12,677 | def _input_file_as_html_links ( cls , session : AppSession ) : scrape_result = session . factory [ 'HTMLScraper' ] . scrape_file ( session . args . input_file , encoding = session . args . local_encoding or 'utf-8' ) for context in scrape_result . link_contexts : yield context . link | Read input file as HTML and return the links . |
12,678 | def _new_initial_request ( self , with_body : bool = True ) : url_record = self . _item_session . url_record url_info = url_record . url_info request = self . _item_session . app_session . factory [ 'WebClient' ] . request_factory ( url_info . url ) self . _populate_common_request ( request ) if with_body : if url_reco... | Return a new Request to be passed to the Web Client . |
12,679 | def _populate_common_request ( self , request ) : url_record = self . _item_session . url_record if url_record . parent_url and not request . fields . get ( 'Referer' ) : self . _add_referrer ( request , url_record ) if self . _fetch_rule . http_login : request . username , request . password = self . _fetch_rule . htt... | Populate the Request with common fields . |
12,680 | def _add_referrer ( cls , request : Request , url_record : URLRecord ) : if url_record . parent_url . startswith ( 'https://' ) and url_record . url_info . scheme == 'http' : return request . fields [ 'Referer' ] = url_record . parent_url | Add referrer URL to request . |
12,681 | def _process_robots ( self ) : try : self . _item_session . request = request = self . _new_initial_request ( with_body = False ) verdict , reason = ( yield from self . _should_fetch_reason_with_robots ( request ) ) except REMOTE_ERRORS as error : _logger . error ( _ ( 'Fetching robots.txt for ‘{url}’ ' 'encountered an... | Process robots . txt . |
12,682 | def _process_loop ( self ) : while not self . _web_client_session . done ( ) : self . _item_session . request = self . _web_client_session . next_request ( ) verdict , reason = self . _should_fetch_reason ( ) _logger . debug ( 'Filter verdict {} reason {}' , verdict , reason ) if not verdict : self . _item_session . sk... | Fetch URL including redirects . |
12,683 | def _next_url_info ( self ) -> URLInfo : if not self . _web_client_session : return self . _item_session . url_record . url_info return self . _web_client_session . next_request ( ) . url_info | Return the next URLInfo to be processed . |
12,684 | def _should_fetch_reason ( self ) -> Tuple [ bool , str ] : is_redirect = False if self . _strong_redirects : try : is_redirect = self . _web_client_session . redirect_tracker . is_redirect ( ) except AttributeError : pass return self . _fetch_rule . check_subsequent_web_request ( self . _item_session , is_redirect = i... | Return info about whether the URL should be fetched . |
12,685 | def _should_fetch_reason_with_robots ( self , request : Request ) -> Tuple [ bool , str ] : result = yield from self . _fetch_rule . check_initial_web_request ( self . _item_session , request ) return result | Return info whether the URL should be fetched including checking robots . txt . |
12,686 | def _add_post_data ( self , request : Request ) : if self . _item_session . url_record . post_data : data = wpull . string . to_bytes ( self . _item_session . url_record . post_data ) else : data = wpull . string . to_bytes ( self . _processor . fetch_params . post_data ) request . method = 'POST' request . fields [ 'C... | Add data to the payload . |
12,687 | def int_0_inf ( cls , string ) : if string == 'inf' : return 0 try : value = int ( string ) except ValueError as error : raise argparse . ArgumentTypeError ( error ) if value < 0 : raise argparse . ArgumentTypeError ( _ ( 'Value must not be negative.' ) ) else : return value | Convert string to int . |
12,688 | def int_bytes ( cls , string ) : if string [ - 1 ] in ( 'k' , 'm' ) : value = cls . int_0_inf ( string [ : - 1 ] ) unit = string [ - 1 ] if unit == 'k' : value *= 2 ** 10 else : value *= 2 ** 20 return value else : return cls . int_0_inf ( string ) | Convert string describing size to int . |
12,689 | def comma_list ( cls , string ) : items = string . split ( ',' ) items = list ( [ item . strip ( ) for item in items ] ) return items | Convert a comma separated string to list . |
12,690 | def comma_choice_list ( cls , string ) : items = string . split ( ',' ) items = CommaChoiceListArgs ( [ item . strip ( ) for item in items ] ) return items | Convert a comma separated string to CommaChoiceListArgs . |
12,691 | def read_links ( self , file , encoding = None ) : return [ item [ 0 ] for item in self . iter_text ( file , encoding ) if item [ 1 ] ] | Return an iterator of links found in the document . |
12,692 | def _build_file_writer ( cls , session : AppSession ) : args = session . args if args . delete_after : return session . factory . new ( 'FileWriter' ) elif args . output_document : session . factory . class_map [ 'FileWriter' ] = SingleDocumentWriter return session . factory . new ( 'FileWriter' , args . output_documen... | Create the File Writer . |
12,693 | def setup_signal_handlers ( self ) : if platform . system ( ) == 'Windows' : _logger . warning ( _ ( 'Graceful stopping with Unix signals is not supported ' 'on this OS.' ) ) return event_loop = asyncio . get_event_loop ( ) graceful_called = False def graceful_stop_callback ( ) : nonlocal graceful_called if graceful_ca... | Setup Ctrl + C and SIGTERM handlers . |
12,694 | def _update_exit_code_from_error ( self , error ) : for error_type , exit_code in self . ERROR_CODE_MAP . items ( ) : if isinstance ( error , error_type ) : self . update_exit_code ( exit_code ) break else : self . update_exit_code ( ExitStatus . generic_error ) | Set the exit code based on the error type . |
12,695 | def update_exit_code ( self , code : int ) : if code : if self . _exit_code : self . _exit_code = min ( self . _exit_code , code ) else : self . _exit_code = code | Set the exit code if it is serious than before . |
12,696 | def consult_robots_txt ( self , request : HTTPRequest ) -> bool : if not self . _robots_txt_checker : return True result = yield from self . _robots_txt_checker . can_fetch ( request ) return result | Consult by fetching robots . txt as needed . |
12,697 | def consult_filters ( self , url_info : URLInfo , url_record : URLRecord , is_redirect : bool = False ) -> Tuple [ bool , str , dict ] : if not self . _url_filter : return True , 'nofilters' , None test_info = self . _url_filter . test_info ( url_info , url_record ) verdict = test_info [ 'verdict' ] if verdict : reason... | Consult the URL filter . |
12,698 | def consult_hook ( self , item_session : ItemSession , verdict : bool , reason : str , test_info : dict ) : try : reasons = { 'filters' : test_info [ 'map' ] , 'reason' : reason , } verdict = self . hook_dispatcher . call ( PluginFunctions . accept_url , item_session , verdict , reasons , ) reason = 'callback_hook' exc... | Consult the scripting hook . |
12,699 | def check_initial_web_request ( self , item_session : ItemSession , request : HTTPRequest ) -> Tuple [ bool , str ] : verdict , reason , test_info = self . consult_filters ( item_session . request . url_info , item_session . url_record ) if verdict and self . _robots_txt_checker : can_fetch = yield from self . consult_... | Check robots . txt URL filters and scripting hook . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.