idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
12,300
def send ( self , destination , body , content_type = None , headers = None , ** keyword_headers ) : assert destination is not None , "'destination' is required" assert body is not None , "'body' is required" headers = utils . merge_headers ( [ headers , keyword_headers ] ) headers [ HDR_DESTINATION ] = destination if ...
Send a message to a destination .
12,301
def subscribe ( self , destination , id = None , ack = 'auto' , headers = None , ** keyword_headers ) : assert destination is not None , "'destination' is required" headers = utils . merge_headers ( [ headers , keyword_headers ] ) headers [ HDR_DESTINATION ] = destination if id : headers [ HDR_ID ] = id headers [ HDR_A...
Subscribe to a destination .
12,302
def unsubscribe ( self , destination = None , id = None , headers = None , ** keyword_headers ) : assert id is not None or destination is not None , "'id' or 'destination' is required" headers = utils . merge_headers ( [ headers , keyword_headers ] ) if id : headers [ HDR_ID ] = id if destination : headers [ HDR_DESTIN...
Unsubscribe from a destination by either id or the destination name .
12,303
def connect ( self , username = None , passcode = None , wait = False , headers = None , ** keyword_headers ) : cmd = CMD_STOMP headers = utils . merge_headers ( [ headers , keyword_headers ] ) headers [ HDR_ACCEPT_VERSION ] = self . version if self . transport . vhost : headers [ HDR_HOST ] = self . transport . vhost ...
Start a connection .
12,304
def get_socket ( host , port , timeout = None ) : for res in getaddrinfo ( host , port , 0 , SOCK_STREAM ) : af , socktype , proto , canonname , sa = res sock = None try : sock = socket ( af , socktype , proto ) if timeout is not None : sock . settimeout ( timeout ) sock . connect ( sa ) return sock except error : if s...
Return a socket .
12,305
def start ( self ) : self . running = True self . attempt_connection ( ) receiver_thread = self . create_thread_fc ( self . __receiver_loop ) receiver_thread . name = "StompReceiver%s" % getattr ( receiver_thread , "name" , "Thread" ) self . notify ( 'connecting' )
Start the connection . This should be called after all listeners have been registered . If this method is not called no frames will be received by the connection .
12,306
def stop ( self ) : with self . __receiver_thread_exit_condition : while not self . __receiver_thread_exited and self . is_connected ( ) : self . __receiver_thread_exit_condition . wait ( )
Stop the connection . Performs a clean shutdown by waiting for the receiver thread to exit .
12,307
def notify ( self , frame_type , headers = None , body = None ) : if frame_type == 'receipt' : receipt = headers [ 'receipt-id' ] receipt_value = self . __receipts . get ( receipt ) with self . __send_wait_condition : self . set_receipt ( receipt , None ) self . __send_wait_condition . notify ( ) if receipt_value == CM...
Utility function for notifying listeners of incoming and outgoing messages
12,308
def transmit ( self , frame ) : with self . __listeners_change_condition : listeners = sorted ( self . listeners . items ( ) ) for ( _ , listener ) in listeners : if not listener : continue try : listener . on_send ( frame ) except AttributeError : continue if frame . cmd == CMD_DISCONNECT and HDR_RECEIPT in frame . he...
Convert a frame object to a frame string and transmit to the server .
12,309
def wait_for_connection ( self , timeout = None ) : if timeout is not None : wait_time = timeout / 10.0 else : wait_time = None with self . __connect_wait_condition : while self . running and not self . is_connected ( ) and not self . connection_error : self . __connect_wait_condition . wait ( wait_time ) if not self ....
Wait until we ve established a connection with the server .
12,310
def __receiver_loop ( self ) : log . info ( "Starting receiver loop" ) notify_disconnected = True try : while self . running : try : while self . running : frames = self . __read ( ) for frame in frames : f = utils . parse_frame ( frame ) if f is None : continue if self . __auto_decode : f . body = decode ( f . body ) ...
Main loop listening for incoming data .
12,311
def is_connected ( self ) : try : return self . socket is not None and self . socket . getsockname ( ) [ 1 ] != 0 and BaseTransport . is_connected ( self ) except socket . error : return False
Return true if the socket managed by this connection is connected
12,312
def disconnect_socket ( self ) : self . running = False if self . socket is not None : if self . __need_ssl ( ) : try : self . socket = self . socket . unwrap ( ) except Exception : _ , e , _ = sys . exc_info ( ) log . warning ( e ) elif hasattr ( socket , 'SHUT_RDWR' ) : try : self . socket . shutdown ( socket . SHUT_...
Disconnect the underlying socket connection
12,313
def set_ssl ( self , for_hosts = [ ] , key_file = None , cert_file = None , ca_certs = None , cert_validator = None , ssl_version = DEFAULT_SSL_VERSION , password = None ) : if not ssl : raise Exception ( "SSL connection requested, but SSL library not found" ) for host_port in for_hosts : self . __ssl_params [ host_por...
Sets up SSL configuration for the given hosts . This ensures socket is wrapped in a SSL connection raising an exception if the SSL module can t be found .
12,314
def __need_ssl ( self , host_and_port = None ) : if not host_and_port : host_and_port = self . current_host_and_port return host_and_port in self . __ssl_params
Whether current host needs SSL or not .
12,315
def get_ssl ( self , host_and_port = None ) : if not host_and_port : host_and_port = self . current_host_and_port return self . __ssl_params . get ( host_and_port )
Get SSL params for the given host .
12,316
def __print_async ( self , frame_type , headers , body ) : if self . __quit : return if self . verbose : self . __sysout ( frame_type ) for k , v in headers . items ( ) : self . __sysout ( '%s: %s' % ( k , v ) ) else : if 'message-id' in headers : self . __sysout ( 'message-id: %s' % headers [ 'message-id' ] ) if 'subs...
Utility function to print a message and setup the command prompt for the next input
12,317
def simple_tokenize ( text , include_punctuation = False ) : text = unicodedata . normalize ( 'NFC' , text ) if include_punctuation : return [ token . casefold ( ) for token in TOKEN_RE_WITH_PUNCTUATION . findall ( text ) ] else : return [ token . strip ( "'" ) . casefold ( ) for token in TOKEN_RE . findall ( text ) ]
Tokenize the given text using a straightforward Unicode - aware token expression .
12,318
def tokenize ( text , lang , include_punctuation = False , external_wordlist = False ) : global _mecab_tokenize , _jieba_tokenize language = langcodes . get ( lang ) info = get_language_info ( language ) text = preprocess_text ( text , language ) if info [ 'tokenizer' ] == 'mecab' : from wordfreq . mecab import mecab_t...
Tokenize this text in a way that s relatively simple but appropriate for the language . Strings that are looked up in wordfreq will be run through this function first so that they can be expected to match the data .
12,319
def lossy_tokenize ( text , lang , include_punctuation = False , external_wordlist = False ) : global _simplify_chinese info = get_language_info ( lang ) tokens = tokenize ( text , lang , include_punctuation , external_wordlist ) if info [ 'lookup_transliteration' ] == 'zh-Hans' : from wordfreq . chinese import simplif...
Get a list of tokens for this text with largely the same results and options as tokenize but aggressively normalize some text in a lossy way that s good for counting word frequencies .
12,320
def read_cBpack ( filename ) : with gzip . open ( filename , 'rb' ) as infile : data = msgpack . load ( infile , raw = False ) header = data [ 0 ] if ( not isinstance ( header , dict ) or header . get ( 'format' ) != 'cB' or header . get ( 'version' ) != 1 ) : raise ValueError ( "Unexpected header: %r" % header ) retur...
Read a file from an idiosyncratic format that we use for storing approximate word frequencies called cBpack .
12,321
def available_languages ( wordlist = 'best' ) : if wordlist == 'best' : available = available_languages ( 'small' ) available . update ( available_languages ( 'large' ) ) return available elif wordlist == 'combined' : logger . warning ( "The 'combined' wordlists have been renamed to 'small'." ) wordlist = 'small' avail...
Given a wordlist name return a dictionary of language codes to filenames representing all the languages in which that wordlist is available .
12,322
def get_frequency_dict ( lang , wordlist = 'best' , match_cutoff = 30 ) : freqs = { } pack = get_frequency_list ( lang , wordlist , match_cutoff ) for index , bucket in enumerate ( pack ) : freq = cB_to_freq ( - index ) for word in bucket : freqs [ word ] = freq return freqs
Get a word frequency list as a dictionary mapping tokens to frequencies as floating - point probabilities .
12,323
def word_frequency ( word , lang , wordlist = 'best' , minimum = 0. ) : args = ( word , lang , wordlist , minimum ) try : return _wf_cache [ args ] except KeyError : if len ( _wf_cache ) >= CACHE_SIZE : _wf_cache . clear ( ) _wf_cache [ args ] = _word_frequency ( * args ) return _wf_cache [ args ]
Get the frequency of word in the language with code lang from the specified wordlist .
12,324
def zipf_frequency ( word , lang , wordlist = 'best' , minimum = 0. ) : freq_min = zipf_to_freq ( minimum ) freq = word_frequency ( word , lang , wordlist , freq_min ) return round ( freq_to_zipf ( freq ) , 2 )
Get the frequency of word in the language with code lang on the Zipf scale .
12,325
def top_n_list ( lang , n , wordlist = 'best' , ascii_only = False ) : results = [ ] for word in iter_wordlist ( lang , wordlist ) : if ( not ascii_only ) or max ( word ) <= '~' : results . append ( word ) if len ( results ) >= n : break return results
Return a frequency list of length n in descending order of frequency . This list contains words from wordlist of the given language . If ascii_only then only ascii words are considered .
12,326
def random_words ( lang = 'en' , wordlist = 'best' , nwords = 5 , bits_per_word = 12 , ascii_only = False ) : n_choices = 2 ** bits_per_word choices = top_n_list ( lang , n_choices , wordlist , ascii_only = ascii_only ) if len ( choices ) < n_choices : raise ValueError ( "There aren't enough words in the wordlist to pr...
Returns a string of random space separated words .
12,327
def random_ascii_words ( lang = 'en' , wordlist = 'best' , nwords = 5 , bits_per_word = 12 ) : return random_words ( lang , wordlist , nwords , bits_per_word , ascii_only = True )
Returns a string of random space separated ASCII words .
12,328
def jieba_tokenize ( text , external_wordlist = False ) : global jieba_tokenizer , jieba_orig_tokenizer if external_wordlist : if jieba_orig_tokenizer is None : jieba_orig_tokenizer = jieba . Tokenizer ( dictionary = ORIG_DICT_FILENAME ) return jieba_orig_tokenizer . lcut ( text ) else : if jieba_tokenizer is None : ji...
Tokenize the given text into tokens whose word frequencies can probably be looked up . This uses Jieba a word - frequency - based tokenizer .
12,329
def preprocess_text ( text , language ) : info = get_language_info ( language ) text = unicodedata . normalize ( info [ 'normal_form' ] , text ) if info [ 'transliteration' ] is not None : text = transliterate ( info [ 'transliteration' ] , text ) if info [ 'remove_marks' ] : text = remove_marks ( text ) if info [ 'dot...
This function applies pre - processing steps that convert forms of words considered equivalent into one standardized form .
12,330
def _language_in_list ( language , targets , min_score = 80 ) : matched = best_match ( language , targets , min_score = min_score ) return matched [ 1 ] > 0
A helper function to determine whether this language matches one of the target languages with a match score above a certain threshold .
12,331
def mecab_tokenize ( text , lang ) : if lang not in MECAB_DICTIONARY_NAMES : raise ValueError ( "Can't run MeCab on language %r" % lang ) if lang not in MECAB_ANALYZERS : MECAB_ANALYZERS [ lang ] = make_mecab_analyzer ( MECAB_DICTIONARY_NAMES [ lang ] ) analyzer = MECAB_ANALYZERS [ lang ] text = unicodedata . normalize...
Use the mecab - python3 package to tokenize the given text . The lang must be ja for Japanese or ko for Korean .
12,332
def transliterate ( table , text ) : if table == 'sr-Latn' : return text . translate ( SR_LATN_TABLE ) elif table == 'az-Latn' : return text . translate ( AZ_LATN_TABLE ) else : raise ValueError ( "Unknown transliteration table: {!r}" . format ( table ) )
Transliterate text according to one of the tables above .
12,333
def _update_exit_code_from_stats ( cls , statistics : Statistics , app : Application ) : for error_type in statistics . errors : exit_code = app . ERROR_CODE_MAP . get ( error_type ) if exit_code : app . update_exit_code ( exit_code )
Set the current exit code based on the Statistics .
12,334
def is_response ( cls , response ) : if response . body : if cls . is_file ( response . body ) : return True
Return whether the document is likely to be a Sitemap .
12,335
def is_file ( cls , file ) : peeked_data = wpull . util . peek_file ( file ) if is_gzip ( peeked_data ) : try : peeked_data = wpull . decompression . gzip_uncompress ( peeked_data , truncated = True ) except zlib . error : pass peeked_data = wpull . string . printable_bytes ( peeked_data ) if b'<?xml' in peeked_data an...
Return whether the file is likely a Sitemap .
12,336
def normalize_hostname ( hostname ) : try : new_hostname = hostname . encode ( 'idna' ) . decode ( 'ascii' ) . lower ( ) except UnicodeError as error : raise UnicodeError ( 'Hostname {} rejected: {}' . format ( hostname , error ) ) from error if hostname != new_hostname : new_hostname . encode ( 'idna' ) return new_hos...
Normalizes a hostname so that it is ASCII and valid domain name .
12,337
def normalize_path ( path , encoding = 'utf-8' ) : if not path . startswith ( '/' ) : path = '/' + path path = percent_encode ( flatten_path ( path , flatten_slashes = True ) , encoding = encoding ) return uppercase_percent_encoding ( path )
Normalize a path string .
12,338
def normalize_query ( text , encoding = 'utf-8' ) : path = percent_encode_plus ( text , encoding = encoding ) return uppercase_percent_encoding ( path )
Normalize a query string .
12,339
def normalize_fragment ( text , encoding = 'utf-8' ) : path = percent_encode ( text , encoding = encoding , encode_set = FRAGMENT_ENCODE_SET ) return uppercase_percent_encoding ( path )
Normalize a fragment .
12,340
def normalize_username ( text , encoding = 'utf-8' ) : path = percent_encode ( text , encoding = encoding , encode_set = USERNAME_ENCODE_SET ) return uppercase_percent_encoding ( path )
Normalize a username
12,341
def normalize_password ( text , encoding = 'utf-8' ) : path = percent_encode ( text , encoding = encoding , encode_set = PASSWORD_ENCODE_SET ) return uppercase_percent_encoding ( path )
Normalize a password
12,342
def percent_encode ( text , encode_set = DEFAULT_ENCODE_SET , encoding = 'utf-8' ) : byte_string = text . encode ( encoding ) try : mapping = _percent_encoder_map_cache [ encode_set ] except KeyError : mapping = _percent_encoder_map_cache [ encode_set ] = PercentEncoderMap ( encode_set ) . __getitem__ return '' . join ...
Percent encode text .
12,343
def percent_encode_plus ( text , encode_set = QUERY_ENCODE_SET , encoding = 'utf-8' ) : if ' ' not in text : return percent_encode ( text , encode_set , encoding ) else : result = percent_encode ( text , encode_set , encoding ) return result . replace ( ' ' , '+' )
Percent encode text for query strings .
12,344
def schemes_similar ( scheme1 , scheme2 ) : if scheme1 == scheme2 : return True if scheme1 in ( 'http' , 'https' ) and scheme2 in ( 'http' , 'https' ) : return True return False
Return whether URL schemes are similar .
12,345
def is_subdir ( base_path , test_path , trailing_slash = False , wildcards = False ) : if trailing_slash : base_path = base_path . rsplit ( '/' , 1 ) [ 0 ] + '/' test_path = test_path . rsplit ( '/' , 1 ) [ 0 ] + '/' else : if not base_path . endswith ( '/' ) : base_path += '/' if not test_path . endswith ( '/' ) : tes...
Return whether the a path is a subpath of another .
12,346
def uppercase_percent_encoding ( text ) : if '%' not in text : return text return re . sub ( r'%[a-f0-9][a-f0-9]' , lambda match : match . group ( 0 ) . upper ( ) , text )
Uppercases percent - encoded sequences .
12,347
def split_query ( qs , keep_blank_values = False ) : items = [ ] for pair in qs . split ( '&' ) : name , delim , value = pair . partition ( '=' ) if not delim and keep_blank_values : value = None if keep_blank_values or value : items . append ( ( name , value ) ) return items
Split the query string .
12,348
def query_to_map ( text ) : dict_obj = { } for key , value in split_query ( text , True ) : if key not in dict_obj : dict_obj [ key ] = [ ] if value : dict_obj [ key ] . append ( value . replace ( '+' , ' ' ) ) else : dict_obj [ key ] . append ( '' ) return query_to_map ( text )
Return a key - values mapping from a query string .
12,349
def urljoin ( base_url , url , allow_fragments = True ) : if url . startswith ( '//' ) and len ( url ) > 2 : scheme = base_url . partition ( ':' ) [ 0 ] if scheme : return urllib . parse . urljoin ( base_url , '{0}:{1}' . format ( scheme , url ) , allow_fragments = allow_fragments ) return urllib . parse . urljoin ( ba...
Join URLs like urllib . parse . urljoin but allow scheme - relative URL .
12,350
def flatten_path ( path , flatten_slashes = False ) : if not path or path == '/' : return '/' if path [ 0 ] == '/' : path = path [ 1 : ] parts = path . split ( '/' ) new_parts = collections . deque ( ) for part in parts : if part == '.' or ( flatten_slashes and not part ) : continue elif part != '..' : new_parts . appe...
Flatten an absolute URL path by removing the dot segments .
12,351
def parse ( cls , url , default_scheme = 'http' , encoding = 'utf-8' ) : if url is None : return None url = url . strip ( ) if frozenset ( url ) & C0_CONTROL_SET : raise ValueError ( 'URL contains control codes: {}' . format ( ascii ( url ) ) ) scheme , sep , remaining = url . partition ( ':' ) if not scheme : raise Va...
Parse a URL and return a URLInfo .
12,352
def parse_authority ( cls , authority ) : userinfo , sep , host = authority . partition ( '@' ) if not sep : return '' , userinfo else : return userinfo , host
Parse the authority part and return userinfo and host .
12,353
def parse_userinfo ( cls , userinfo ) : username , sep , password = userinfo . partition ( ':' ) return username , password
Parse the userinfo and return username and password .
12,354
def parse_host ( cls , host ) : if host . endswith ( ']' ) : return cls . parse_hostname ( host ) , None else : hostname , sep , port = host . rpartition ( ':' ) if sep : port = int ( port ) if port < 0 or port > 65535 : raise ValueError ( 'Port number invalid' ) else : hostname = port port = None return cls . parse_ho...
Parse the host and return hostname and port .
12,355
def parse_hostname ( cls , hostname ) : if hostname . startswith ( '[' ) : return cls . parse_ipv6_hostname ( hostname ) else : try : new_hostname = normalize_ipv4_address ( hostname ) except ValueError : new_hostname = hostname new_hostname = normalize_hostname ( new_hostname ) if any ( char in new_hostname for char i...
Parse the hostname and normalize .
12,356
def parse_ipv6_hostname ( cls , hostname ) : if not hostname . startswith ( '[' ) or not hostname . endswith ( ']' ) : raise ValueError ( 'Invalid IPv6 address: {}' . format ( ascii ( hostname ) ) ) hostname = ipaddress . IPv6Address ( hostname [ 1 : - 1 ] ) . compressed return hostname
Parse and normalize a IPv6 address .
12,357
def to_dict ( self ) : return dict ( raw = self . raw , scheme = self . scheme , authority = self . authority , netloc = self . authority , path = self . path , query = self . query , fragment = self . fragment , userinfo = self . userinfo , username = self . username , password = self . password , host = self . host ,...
Return a dict of the attributes .
12,358
def is_port_default ( self ) : if self . scheme in RELATIVE_SCHEME_DEFAULT_PORTS : return RELATIVE_SCHEME_DEFAULT_PORTS [ self . scheme ] == self . port
Return whether the URL is using the default port .
12,359
def hostname_with_port ( self ) : default_port = RELATIVE_SCHEME_DEFAULT_PORTS . get ( self . scheme ) if not default_port : return '' assert '[' not in self . hostname assert ']' not in self . hostname if self . is_ipv6 ( ) : hostname = '[{}]' . format ( self . hostname ) else : hostname = self . hostname if default_p...
Return the host portion but omit default port if needed .
12,360
def _new_url_record ( cls , request : Request ) -> URLRecord : url_record = URLRecord ( ) url_record . url = request . url_info . url url_record . status = Status . in_progress url_record . try_count = 0 url_record . level = 0 return url_record
Return new empty URLRecord .
12,361
def _server_begin_response_callback ( self , response : Response ) : self . _item_session . response = response if self . _cookie_jar : self . _cookie_jar . extract_cookies ( response , self . _item_session . request ) action = self . _result_rule . handle_pre_response ( self . _item_session ) self . _file_writer_sessi...
Pre - response callback handler .
12,362
def _server_end_response_callback ( self , respoonse : Response ) : request = self . _item_session . request response = self . _item_session . response _logger . info ( __ ( _ ( 'Fetched ‘{url}’: {status_code} {reason}. ' 'Length: {content_length} [{content_type}].' ) , url = request . url , status_code = response . st...
Response callback handler .
12,363
def _init_stream ( self ) : assert not self . _control_connection self . _control_connection = yield from self . _acquire_request_connection ( self . _request ) self . _control_stream = ControlStream ( self . _control_connection ) self . _commander = Commander ( self . _control_stream ) read_callback = functools . part...
Create streams and commander .
12,364
def _log_in ( self ) : username = self . _request . url_info . username or self . _request . username or 'anonymous' password = self . _request . url_info . password or self . _request . password or '-wpull@' cached_login = self . _login_table . get ( self . _control_connection ) if cached_login and cached_login == ( u...
Connect and login .
12,365
def start ( self , request : Request ) -> Response : if self . _session_state != SessionState . ready : raise RuntimeError ( 'Session not ready' ) response = Response ( ) yield from self . _prepare_fetch ( request , response ) response . file_transfer_size = yield from self . _fetch_size ( request ) if request . restar...
Start a file or directory listing download .
12,366
def start_listing ( self , request : Request ) -> ListingResponse : if self . _session_state != SessionState . ready : raise RuntimeError ( 'Session not ready' ) response = ListingResponse ( ) yield from self . _prepare_fetch ( request , response ) yield from self . _open_data_stream ( ) mlsd_command = Command ( 'MLSD'...
Fetch a file listing .
12,367
def _prepare_fetch ( self , request : Request , response : Response ) : self . _request = request self . _response = response yield from self . _init_stream ( ) connection_closed = self . _control_connection . closed ( ) if connection_closed : self . _login_table . pop ( self . _control_connection , None ) yield from s...
Prepare for a fetch .
12,368
def _begin_stream ( self , command : Command ) : begin_reply = yield from self . _commander . begin_stream ( command ) self . _response . reply = begin_reply self . event_dispatcher . notify ( self . Event . begin_transfer , self . _response )
Start data stream transfer .
12,369
def download_listing ( self , file : Optional [ IO ] , duration_timeout : Optional [ float ] = None ) -> ListingResponse : if self . _session_state != SessionState . directory_request_sent : raise RuntimeError ( 'File request not sent' ) self . _session_state = SessionState . file_request_sent yield from self . downloa...
Read file listings .
12,370
def _open_data_stream ( self ) : @ asyncio . coroutine def connection_factory ( address : Tuple [ int , int ] ) : self . _data_connection = yield from self . _acquire_connection ( address [ 0 ] , address [ 1 ] ) return self . _data_connection self . _data_stream = yield from self . _commander . setup_data_stream ( conn...
Open the data stream connection .
12,371
def _fetch_size ( self , request : Request ) -> int : try : size = yield from self . _commander . size ( request . file_path ) return size except FTPServerError : return
Return size of file .
12,372
def parse_refresh ( text ) : match = re . search ( r'url\s*=(.+)' , text , re . IGNORECASE ) if match : url = match . group ( 1 ) if url . startswith ( '"' ) : url = url . strip ( '"' ) elif url . startswith ( "'" ) : url = url . strip ( "'" ) return clean_link_soup ( url )
Parses text for HTTP Refresh URL .
12,373
def is_likely_inline ( link ) : file_type = mimetypes . guess_type ( link , strict = False ) [ 0 ] if file_type : top_level_type , subtype = file_type . split ( '/' , 1 ) return top_level_type in ( 'image' , 'video' , 'audio' ) or subtype == 'javascript'
Return whether the link is likely to be inline .
12,374
def is_likely_link ( text ) : text = text . lower ( ) if ( text . startswith ( 'http://' ) or text . startswith ( 'https://' ) or text . startswith ( 'ftp://' ) or text . startswith ( '/' ) or text . startswith ( '//' ) or text . endswith ( '/' ) or text . startswith ( '../' ) ) : return True dummy , dot , file_extensi...
Return whether the text is likely to be a link .
12,375
def is_unlikely_link ( text ) : if text [ : 1 ] in ',;+:' or text [ - 1 : ] in '.,;+:' : return True if re . search ( r , text ) : return True if text [ : 1 ] == '.' and not text . startswith ( './' ) and not text . startswith ( '../' ) : return True if text in ( '/' , '//' ) : return True if '//' in text and '://' not...
Return whether the text is likely to cause false positives .
12,376
def identify_link_type ( filename ) : mime_type = mimetypes . guess_type ( filename ) [ 0 ] if not mime_type : return if mime_type == 'text/css' : return LinkType . css elif mime_type == 'application/javascript' : return LinkType . javascript elif mime_type == 'text/html' or mime_type . endswith ( 'xml' ) : return Link...
Return link type guessed by filename extension .
12,377
def new_encoded_stream ( args , stream ) : if args . ascii_print : return wpull . util . ASCIIStreamWriter ( stream ) else : return stream
Return a stream writer .
12,378
def _schedule ( self ) : if self . _running : _logger . debug ( 'Schedule check function.' ) self . _call_later_handle = self . _event_loop . call_later ( self . _timeout , self . _check )
Schedule check function .
12,379
def _check ( self ) : _logger . debug ( 'Check if timeout.' ) self . _call_later_handle = None if self . _touch_time is not None : difference = self . _event_loop . time ( ) - self . _touch_time _logger . debug ( 'Time difference %s' , difference ) if difference > self . _timeout : self . _connection . close ( ) self ....
Check and close connection if needed .
12,380
def close ( self ) : if self . _call_later_handle : self . _call_later_handle . cancel ( ) self . _running = False
Stop running timers .
12,381
def closed ( self ) -> bool : return not self . writer or not self . reader or self . reader . at_eof ( )
Return whether the connection is closed .
12,382
def connect ( self ) : _logger . debug ( __ ( 'Connecting to {0}.' , self . _address ) ) if self . _state != ConnectionState . ready : raise Exception ( 'Closed connection must be reset before reusing.' ) if self . _sock : connection_future = asyncio . open_connection ( sock = self . _sock , ** self . _connection_kwarg...
Establish a connection .
12,383
def readline ( self ) -> bytes : assert self . _state == ConnectionState . created , 'Expect conn created. Got {}.' . format ( self . _state ) with self . _close_timer . with_timeout ( ) : data = yield from self . run_network_operation ( self . reader . readline ( ) , close_timeout = self . _timeout , name = 'Readline'...
Read a line of data .
12,384
def run_network_operation ( self , task , wait_timeout = None , close_timeout = None , name = 'Network operation' ) : if wait_timeout is not None and close_timeout is not None : raise Exception ( 'Cannot use wait_timeout and close_timeout at the same time' ) try : if close_timeout is not None : with self . _close_timer...
Run the task and raise appropriate exceptions .
12,385
def start_tls ( self , ssl_context : Union [ bool , dict , ssl . SSLContext ] = True ) -> 'SSLConnection' : sock = self . writer . get_extra_info ( 'socket' ) ssl_conn = SSLConnection ( self . _address , ssl_context = ssl_context , hostname = self . _hostname , timeout = self . _timeout , connect_timeout = self . _conn...
Start client TLS on this connection and return SSLConnection .
12,386
def _verify_cert ( self , sock : ssl . SSLSocket ) : verify_mode = self . _ssl_context . verify_mode assert verify_mode in ( ssl . CERT_NONE , ssl . CERT_REQUIRED , ssl . CERT_OPTIONAL ) , 'Unknown verify mode {}' . format ( verify_mode ) if verify_mode == ssl . CERT_NONE : return cert = sock . getpeercert ( ) if not c...
Check if certificate matches hostname .
12,387
def trim ( self ) : now_time = time . time ( ) while self . _seq and self . _seq [ 0 ] . expire_time < now_time : item = self . _seq . popleft ( ) del self . _map [ item . key ] if self . _max_items : while self . _seq and len ( self . _seq ) > self . _max_items : item = self . _seq . popleft ( ) del self . _map [ item...
Remove items that are expired or exceed the max size .
12,388
def strip_path_session_id ( path ) : for pattern in SESSION_ID_PATH_PATTERNS : match = pattern . match ( path ) if match : path = match . group ( 1 ) + match . group ( 3 ) return path
Strip session ID from URL path .
12,389
def rewrite ( self , url_info : URLInfo ) -> URLInfo : if url_info . scheme not in ( 'http' , 'https' ) : return url_info if self . _session_id_enabled : url = '{scheme}://{authority}{path}?{query}#{fragment}' . format ( scheme = url_info . scheme , authority = url_info . authority , path = strip_path_session_id ( url_...
Rewrite the given URL .
12,390
def parse_address ( text : str ) -> Tuple [ str , int ] : match = re . search ( r'\(' r'(\d{1,3})\s*,' r'\s*(\d{1,3})\s*,' r'\s*(\d{1,3})\s*,' r'\s*(\d{1,3})\s*,' r'\s*(\d{1,3})\s*,' r'\s*(\d{1,3})\s*' r'\)' , text ) if match : return ( '{0}.{1}.{2}.{3}' . format ( int ( match . group ( 1 ) ) , int ( match . group ( 2 ...
Parse PASV address .
12,391
def reply_code_tuple ( code : int ) -> Tuple [ int , int , int ] : return code // 100 , code // 10 % 10 , code % 10
Return the reply code as a tuple .
12,392
def parse_machine_listing ( text : str , convert : bool = True , strict : bool = True ) -> List [ dict ] : listing = [ ] for line in text . splitlines ( False ) : facts = line . split ( ';' ) row = { } filename = None for fact in facts : name , sep , value = fact . partition ( '=' ) if sep : name = name . strip ( ) . l...
Parse machine listing .
12,393
def convert_machine_list_value ( name : str , value : str ) -> Union [ datetime . datetime , str , int ] : if name == 'modify' : return convert_machine_list_time_val ( value ) elif name == 'size' : return int ( value ) else : return value
Convert sizes and time values .
12,394
def convert_machine_list_time_val ( text : str ) -> datetime . datetime : text = text [ : 14 ] if len ( text ) != 14 : raise ValueError ( 'Time value not 14 chars' ) year = int ( text [ 0 : 4 ] ) month = int ( text [ 4 : 6 ] ) day = int ( text [ 6 : 8 ] ) hour = int ( text [ 8 : 10 ] ) minute = int ( text [ 10 : 12 ] )...
Convert RFC 3659 time - val to datetime objects .
12,395
def machine_listings_to_file_entries ( listings : Iterable [ dict ] ) -> Iterable [ FileEntry ] : for listing in listings : yield FileEntry ( listing [ 'name' ] , type = listing . get ( 'type' ) , size = listing . get ( 'size' ) , date = listing . get ( 'modify' ) )
Convert results from parsing machine listings to FileEntry list .
12,396
def reply_code ( self ) : if len ( self . args ) >= 2 and isinstance ( self . args [ 1 ] , int ) : return self . args [ 1 ]
Return reply code .
12,397
def _run_producer_wrapper ( self ) : try : yield from self . _producer . process ( ) except Exception as error : if not isinstance ( error , StopIteration ) : _logger . debug ( 'Producer died.' , exc_info = True ) self . stop ( ) raise else : self . stop ( )
Run the producer if exception stop engine .
12,398
def read_cdx ( file , encoding = 'utf8' ) : with codecs . getreader ( encoding ) ( file ) as stream : header_line = stream . readline ( ) separator = header_line [ 0 ] field_keys = header_line . strip ( ) . split ( separator ) if field_keys . pop ( 0 ) != 'CDX' : raise ValueError ( 'CDX header not found.' ) for line in...
Iterate CDX file .
12,399
def set_common_fields ( self , warc_type : str , content_type : str ) : self . fields [ self . WARC_TYPE ] = warc_type self . fields [ self . CONTENT_TYPE ] = content_type self . fields [ self . WARC_DATE ] = wpull . util . datetime_str ( ) self . fields [ self . WARC_RECORD_ID ] = '<{0}>' . format ( uuid . uuid4 ( ) ....
Set the required fields for the record .