idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
52,500
def open_resource ( name ) : name_parts = name . lstrip ( '/' ) . split ( '/' ) for part in name_parts : if part == os . path . pardir or os . path . sep in part : raise ValueError ( 'Bad path segment: %r' % part ) filename = os . path . join ( os . path . dirname ( __file__ ) , 'zoneinfo' , * name_parts ) if not os . path . exists ( filename ) : try : from pkg_resources import resource_stream except ImportError : resource_stream = None if resource_stream is not None : return resource_stream ( __name__ , 'zoneinfo/' + name ) return open ( filename , 'rb' )
Open a resource from the zoneinfo subdir for reading .
52,501
def FixedOffset ( offset , _tzinfos = { } ) : if offset == 0 : return UTC info = _tzinfos . get ( offset ) if info is None : info = _tzinfos . setdefault ( offset , _FixedOffset ( offset ) ) return info
return a fixed - offset timezone based off a number of minutes .
52,502
def boolean_or_list ( config_name , args , configs , alternative_names = [ ] ) : for key in alternative_names + [ config_name ] : if hasattr ( args , key ) and getattr ( args , key ) : setattr ( args , config_name , [ '.*' ] ) return setattr ( args , config_name , [ ] ) option = None alternative_names . insert ( 0 , config_name ) for key in alternative_names : if configs . has_option ( 'settings' , key ) : option = configs . get ( 'settings' , key ) break if option is not None : if option . strip ( ) . lower ( ) == 'true' : setattr ( args , config_name , [ '.*' ] ) elif option . strip ( ) . lower ( ) != 'false' : for pattern in option . split ( "\n" ) : if pattern . strip ( ) != '' : getattr ( args , config_name ) . append ( pattern )
Get a boolean or list of regexes from args and configs .
52,503
def get_style_defs ( self , arg = '' ) : cp = self . commandprefix styles = [ ] for name , definition in iteritems ( self . cmd2def ) : styles . append ( r'\expandafter\def\csname %s@tok@%s\endcsname{%s}' % ( cp , name , definition ) ) return STYLE_TEMPLATE % { 'cp' : self . commandprefix , 'styles' : '\n' . join ( styles ) }
Return the command sequences needed to define the commands used to format text in the verbatim environment . arg is ignored .
52,504
def _fn_matches ( fn , glob ) : if glob not in _pattern_cache : pattern = _pattern_cache [ glob ] = re . compile ( fnmatch . translate ( glob ) ) return pattern . match ( fn ) return _pattern_cache [ glob ] . match ( fn )
Return whether the supplied file name fn matches pattern filename .
52,505
def get_all_formatters ( ) : for info in itervalues ( FORMATTERS ) : if info [ 1 ] not in _formatter_cache : _load_formatters ( info [ 0 ] ) yield _formatter_cache [ info [ 1 ] ] for _ , formatter in find_plugin_formatters ( ) : yield formatter
Return a generator for all formatter classes .
52,506
def find_formatter_class ( alias ) : for module_name , name , aliases , _ , _ in itervalues ( FORMATTERS ) : if alias in aliases : if name not in _formatter_cache : _load_formatters ( module_name ) return _formatter_cache [ name ] for _ , cls in find_plugin_formatters ( ) : if alias in cls . aliases : return cls
Lookup a formatter by alias .
52,507
def get_formatter_by_name ( _alias , ** options ) : cls = find_formatter_class ( _alias ) if cls is None : raise ClassNotFound ( "no formatter found for name %r" % _alias ) return cls ( ** options )
Lookup and instantiate a formatter by alias .
52,508
def load_formatter_from_file ( filename , formattername = "CustomFormatter" , ** options ) : try : custom_namespace = { } exec ( open ( filename , 'rb' ) . read ( ) , custom_namespace ) if formattername not in custom_namespace : raise ClassNotFound ( 'no valid %s class found in %s' % ( formattername , filename ) ) formatter_class = custom_namespace [ formattername ] return formatter_class ( ** options ) except IOError as err : raise ClassNotFound ( 'cannot read %s' % filename ) except ClassNotFound as err : raise except Exception as err : raise ClassNotFound ( 'error when loading custom formatter: %s' % err )
Load a formatter from a file .
52,509
def get_formatter_for_filename ( fn , ** options ) : fn = basename ( fn ) for modname , name , _ , filenames , _ in itervalues ( FORMATTERS ) : for filename in filenames : if _fn_matches ( fn , filename ) : if name not in _formatter_cache : _load_formatters ( modname ) return _formatter_cache [ name ] ( ** options ) for cls in find_plugin_formatters ( ) : for filename in cls . filenames : if _fn_matches ( fn , filename ) : return cls ( ** options ) raise ClassNotFound ( "no formatter found for file name %r" % fn )
Lookup and instantiate a formatter by filename pattern .
52,510
def get_tokens_unprocessed ( self , text , stack = ( 'root' , ) ) : self . content_type = None return RegexLexer . get_tokens_unprocessed ( self , text , stack )
Reset the content - type state .
52,511
def find_lexer_class ( name ) : if name in _lexer_cache : return _lexer_cache [ name ] for module_name , lname , aliases , _ , _ in itervalues ( LEXERS ) : if name == lname : _load_lexers ( module_name ) return _lexer_cache [ name ] for cls in find_plugin_lexers ( ) : if cls . name == name : return cls
Lookup a lexer class by name .
52,512
def find_lexer_class_by_name ( _alias ) : if not _alias : raise ClassNotFound ( 'no lexer for alias %r found' % _alias ) for module_name , name , aliases , _ , _ in itervalues ( LEXERS ) : if _alias . lower ( ) in aliases : if name not in _lexer_cache : _load_lexers ( module_name ) return _lexer_cache [ name ] for cls in find_plugin_lexers ( ) : if _alias . lower ( ) in cls . aliases : return cls raise ClassNotFound ( 'no lexer for alias %r found' % _alias )
Lookup a lexer class by alias .
52,513
def load_lexer_from_file ( filename , lexername = "CustomLexer" , ** options ) : try : custom_namespace = { } exec ( open ( filename , 'rb' ) . read ( ) , custom_namespace ) if lexername not in custom_namespace : raise ClassNotFound ( 'no valid %s class found in %s' % ( lexername , filename ) ) lexer_class = custom_namespace [ lexername ] return lexer_class ( ** options ) except IOError as err : raise ClassNotFound ( 'cannot read %s' % filename ) except ClassNotFound as err : raise except Exception as err : raise ClassNotFound ( 'error when loading custom lexer: %s' % err )
Load a lexer from a file .
52,514
def get_lexer_for_mimetype ( _mime , ** options ) : for modname , name , _ , _ , mimetypes in itervalues ( LEXERS ) : if _mime in mimetypes : if name not in _lexer_cache : _load_lexers ( modname ) return _lexer_cache [ name ] ( ** options ) for cls in find_plugin_lexers ( ) : if _mime in cls . mimetypes : return cls ( ** options ) raise ClassNotFound ( 'no lexer for mimetype %r found' % _mime )
Get a lexer for a mimetype .
52,515
def _iter_lexerclasses ( plugins = True ) : for key in sorted ( LEXERS ) : module_name , name = LEXERS [ key ] [ : 2 ] if name not in _lexer_cache : _load_lexers ( module_name ) yield _lexer_cache [ name ] if plugins : for lexer in find_plugin_lexers ( ) : yield lexer
Return an iterator over all lexer classes .
52,516
def get_file_stats ( file_name , entity_type = 'file' , lineno = None , cursorpos = None , plugin = None , language = None , local_file = None ) : language = standardize_language ( language , plugin ) stats = { 'language' : language , 'dependencies' : [ ] , 'lines' : None , 'lineno' : lineno , 'cursorpos' : cursorpos , } if entity_type == 'file' : lexer = get_lexer ( language ) if not language : language , lexer = guess_language ( file_name , local_file ) parser = DependencyParser ( local_file or file_name , lexer ) stats . update ( { 'language' : use_root_language ( language , lexer ) , 'dependencies' : parser . parse ( ) , 'lines' : number_lines_in_file ( local_file or file_name ) , } ) return stats
Returns a hash of information about the entity .
52,517
def guess_language ( file_name , local_file ) : lexer = None language = get_language_from_extension ( file_name ) if language : lexer = get_lexer ( language ) else : lexer = smart_guess_lexer ( file_name , local_file ) if lexer : language = u ( lexer . name ) return language , lexer
Guess lexer and language for a file .
52,518
def smart_guess_lexer ( file_name , local_file ) : lexer = None text = get_file_head ( file_name ) lexer1 , accuracy1 = guess_lexer_using_filename ( local_file or file_name , text ) lexer2 , accuracy2 = guess_lexer_using_modeline ( text ) if lexer1 : lexer = lexer1 if ( lexer2 and accuracy2 and ( not accuracy1 or accuracy2 > accuracy1 ) ) : lexer = lexer2 return lexer
Guess Pygments lexer for a file .
52,519
def guess_lexer_using_filename ( file_name , text ) : lexer , accuracy = None , None try : lexer = custom_pygments_guess_lexer_for_filename ( file_name , text ) except SkipHeartbeat as ex : raise SkipHeartbeat ( u ( ex ) ) except : log . traceback ( logging . DEBUG ) if lexer is not None : try : accuracy = lexer . analyse_text ( text ) except : log . traceback ( logging . DEBUG ) return lexer , accuracy
Guess lexer for given text limited to lexers for this file s extension .
52,520
def guess_lexer_using_modeline ( text ) : lexer , accuracy = None , None file_type = None try : file_type = get_filetype_from_buffer ( text ) except : log . traceback ( logging . DEBUG ) if file_type is not None : try : lexer = get_lexer_by_name ( file_type ) except ClassNotFound : log . traceback ( logging . DEBUG ) if lexer is not None : try : accuracy = lexer . analyse_text ( text ) except : log . traceback ( logging . DEBUG ) return lexer , accuracy
Guess lexer for given text using Vim modeline .
52,521
def get_language_from_extension ( file_name ) : filepart , extension = os . path . splitext ( file_name ) pathpart , filename = os . path . split ( file_name ) if filename == 'go.mod' : return 'Go' if re . match ( r'\.h.*$' , extension , re . IGNORECASE ) or re . match ( r'\.c.*$' , extension , re . IGNORECASE ) : if os . path . exists ( u ( '{0}{1}' ) . format ( u ( filepart ) , u ( '.c' ) ) ) or os . path . exists ( u ( '{0}{1}' ) . format ( u ( filepart ) , u ( '.C' ) ) ) : return 'C' if os . path . exists ( u ( '{0}{1}' ) . format ( u ( filepart ) , u ( '.m' ) ) ) or os . path . exists ( u ( '{0}{1}' ) . format ( u ( filepart ) , u ( '.M' ) ) ) : return 'Objective-C' if os . path . exists ( u ( '{0}{1}' ) . format ( u ( filepart ) , u ( '.mm' ) ) ) or os . path . exists ( u ( '{0}{1}' ) . format ( u ( filepart ) , u ( '.MM' ) ) ) : return 'Objective-C++' available_extensions = extensions_in_same_folder ( file_name ) for ext in CppLexer . filenames : ext = ext . lstrip ( '*' ) if ext in available_extensions : return 'C++' if '.c' in available_extensions : return 'C' if re . match ( r'\.m$' , extension , re . IGNORECASE ) and ( os . path . exists ( u ( '{0}{1}' ) . format ( u ( filepart ) , u ( '.h' ) ) ) or os . path . exists ( u ( '{0}{1}' ) . format ( u ( filepart ) , u ( '.H' ) ) ) ) : return 'Objective-C' if re . match ( r'\.mm$' , extension , re . IGNORECASE ) and ( os . path . exists ( u ( '{0}{1}' ) . format ( u ( filepart ) , u ( '.h' ) ) ) or os . path . exists ( u ( '{0}{1}' ) . format ( u ( filepart ) , u ( '.H' ) ) ) ) : return 'Objective-C++' return None
Returns a matching language for the given file extension .
52,522
def standardize_language ( language , plugin ) : if not language : return None if plugin : plugin = plugin . split ( ' ' ) [ - 1 ] . split ( '/' ) [ 0 ] . split ( '-' ) [ 0 ] standardized = get_language_from_json ( language , plugin ) if standardized is not None : return standardized return get_language_from_json ( language , 'default' )
Maps a string to the equivalent Pygments language .
52,523
def get_language_from_json ( language , key ) : file_name = os . path . join ( os . path . dirname ( __file__ ) , 'languages' , '{0}.json' ) . format ( key . lower ( ) ) if os . path . exists ( file_name ) : try : with open ( file_name , 'r' , encoding = 'utf-8' ) as fh : languages = json . loads ( fh . read ( ) ) if languages . get ( language . lower ( ) ) : return languages [ language . lower ( ) ] except : log . traceback ( logging . DEBUG ) return None
Finds the given language in a json file .
52,524
def get_file_head ( file_name ) : text = None try : with open ( file_name , 'r' , encoding = 'utf-8' ) as fh : text = fh . read ( 512000 ) except : try : with open ( file_name , 'r' , encoding = sys . getfilesystemencoding ( ) ) as fh : text = fh . read ( 512000 ) except : log . traceback ( logging . DEBUG ) return text
Returns the first 512000 bytes of the file s contents .
52,525
def custom_pygments_guess_lexer_for_filename ( _fn , _text , ** options ) : fn = basename ( _fn ) primary = { } matching_lexers = set ( ) for lexer in _iter_lexerclasses ( ) : for filename in lexer . filenames : if _fn_matches ( fn , filename ) : matching_lexers . add ( lexer ) primary [ lexer ] = True for filename in lexer . alias_filenames : if _fn_matches ( fn , filename ) : matching_lexers . add ( lexer ) primary [ lexer ] = False if not matching_lexers : raise ClassNotFound ( 'no lexer for filename %r found' % fn ) if len ( matching_lexers ) == 1 : return matching_lexers . pop ( ) ( ** options ) result = [ ] for lexer in matching_lexers : rv = lexer . analyse_text ( _text ) if rv == 1.0 : return lexer ( ** options ) result . append ( customize_lexer_priority ( _fn , rv , lexer ) ) matlab = list ( filter ( lambda x : x [ 2 ] . name . lower ( ) == 'matlab' , result ) ) if len ( matlab ) > 0 : objc = list ( filter ( lambda x : x [ 2 ] . name . lower ( ) == 'objective-c' , result ) ) if objc and objc [ 0 ] [ 0 ] == matlab [ 0 ] [ 0 ] : raise SkipHeartbeat ( 'Skipping because not enough language accuracy.' ) def type_sort ( t ) : return ( t [ 0 ] , primary [ t [ 2 ] ] , t [ 1 ] , t [ 2 ] . __name__ ) result . sort ( key = type_sort ) return result [ - 1 ] [ 2 ] ( ** options )
Overwrite pygments . lexers . guess_lexer_for_filename to customize the priority of different lexers based on popularity of languages .
52,526
def customize_lexer_priority ( file_name , accuracy , lexer ) : priority = lexer . priority lexer_name = lexer . name . lower ( ) . replace ( 'sharp' , '#' ) if lexer_name in LANGUAGES : priority = LANGUAGES [ lexer_name ] elif lexer_name == 'matlab' : available_extensions = extensions_in_same_folder ( file_name ) if '.mat' in available_extensions : accuracy += 0.01 if '.h' not in available_extensions : accuracy += 0.01 elif lexer_name == 'objective-c' : available_extensions = extensions_in_same_folder ( file_name ) if '.mat' in available_extensions : accuracy -= 0.01 else : accuracy += 0.01 if '.h' in available_extensions : accuracy += 0.01 return ( accuracy , priority , lexer )
Customize lexer priority
52,527
def extensions_in_same_folder ( file_name ) : directory = os . path . dirname ( file_name ) files = os . listdir ( directory ) extensions = list ( zip ( * map ( os . path . splitext , files ) ) ) [ 1 ] extensions = set ( [ ext . lower ( ) for ext in extensions ] ) return extensions
Returns a list of file extensions from the same folder as file_name .
52,528
def analyse_text ( text ) : if re . search ( r'/\*\**\s*rexx' , text , re . IGNORECASE ) : return 1.0 elif text . startswith ( '/*' ) : lowerText = text . lower ( ) result = sum ( weight for ( pattern , weight ) in RexxLexer . PATTERNS_AND_WEIGHTS if pattern . search ( lowerText ) ) + 0.01 return min ( result , 1.0 )
Check for inital comment and patterns that distinguish Rexx from other C - like languages .
52,529
def analyse_text ( text ) : result = 0.0 lines = text . split ( '\n' ) hasEndProc = False hasHeaderComment = False hasFile = False hasJob = False hasProc = False hasParm = False hasReport = False def isCommentLine ( line ) : return EasytrieveLexer . _COMMENT_LINE_REGEX . match ( lines [ 0 ] ) is not None def isEmptyLine ( line ) : return not bool ( line . strip ( ) ) while lines and ( isEmptyLine ( lines [ 0 ] ) or isCommentLine ( lines [ 0 ] ) ) : if not isEmptyLine ( lines [ 0 ] ) : hasHeaderComment = True del lines [ 0 ] if EasytrieveLexer . _MACRO_HEADER_REGEX . match ( lines [ 0 ] ) : result = 0.4 if hasHeaderComment : result += 0.4 else : for line in lines : words = line . split ( ) if ( len ( words ) >= 2 ) : firstWord = words [ 0 ] if not hasReport : if not hasJob : if not hasFile : if not hasParm : if firstWord == 'PARM' : hasParm = True if firstWord == 'FILE' : hasFile = True if firstWord == 'JOB' : hasJob = True elif firstWord == 'PROC' : hasProc = True elif firstWord == 'END-PROC' : hasEndProc = True elif firstWord == 'REPORT' : hasReport = True if hasJob and ( hasProc == hasEndProc ) : if hasHeaderComment : result += 0.1 if hasParm : if hasProc : result += 0.8 else : result += 0.5 else : result += 0.11 if hasParm : result += 0.2 if hasFile : result += 0.01 if hasReport : result += 0.01 assert 0.0 <= result <= 1.0 return result
Perform a structural analysis for basic Easytrieve constructs .
52,530
def analyse_text ( text ) : result = 0.0 lines = text . split ( '\n' ) if len ( lines ) > 0 : if JclLexer . _JOB_HEADER_PATTERN . match ( lines [ 0 ] ) : result = 1.0 assert 0.0 <= result <= 1.0 return result
Recognize JCL job by header .
52,531
def _objdump_lexer_tokens ( asm_lexer ) : hex_re = r'[0-9A-Za-z]' return { 'root' : [ ( '(.*?)(:)( +file format )(.*?)$' , bygroups ( Name . Label , Punctuation , Text , String ) ) , ( '(Disassembly of section )(.*?)(:)$' , bygroups ( Text , Name . Label , Punctuation ) ) , ( '(' + hex_re + '+)( )(<)(.*?)([-+])(0[xX][A-Za-z0-9]+)(>:)$' , bygroups ( Number . Hex , Text , Punctuation , Name . Function , Punctuation , Number . Hex , Punctuation ) ) , ( '(' + hex_re + '+)( )(<)(.*?)(>:)$' , bygroups ( Number . Hex , Text , Punctuation , Name . Function , Punctuation ) ) , ( '( *)(' + hex_re + r'+:)(\t)((?:' + hex_re + hex_re + ' )+)( *\t)([a-zA-Z].*?)$' , bygroups ( Text , Name . Label , Text , Number . Hex , Text , using ( asm_lexer ) ) ) , ( '( *)(' + hex_re + r'+:)(\t)((?:' + hex_re + hex_re + ' )+)( *)(.*?)$' , bygroups ( Text , Name . Label , Text , Number . Hex , Text , String ) ) , ( '( *)(' + hex_re + r'+:)(\t)((?:' + hex_re + hex_re + ' )+)$' , bygroups ( Text , Name . Label , Text , Number . Hex ) ) , ( r'\t\.\.\.$' , Text ) , ( r'(\t\t\t)(' + hex_re + r'+:)( )([^\t]+)(\t)(.*?)([-+])(0x' + hex_re + '+)$' , bygroups ( Text , Name . Label , Text , Name . Property , Text , Name . Constant , Punctuation , Number . Hex ) ) , ( r'(\t\t\t)(' + hex_re + r'+:)( )([^\t]+)(\t)(.*?)$' , bygroups ( Text , Name . Label , Text , Name . Property , Text , Name . Constant ) ) , ( r'[^\n]+\n' , Other ) ] }
Common objdump lexer tokens to wrap an ASM lexer .
52,532
def get_key ( self , fileobj ) : mapping = self . get_map ( ) if mapping is None : raise RuntimeError ( "Selector is closed" ) try : return mapping [ fileobj ] except KeyError : raise KeyError ( "{0!r} is not registered" . format ( fileobj ) )
Return the key associated with a registered file object .
52,533
def get_filetype_from_buffer ( buf , max_lines = 5 ) : lines = buf . splitlines ( ) for l in lines [ - 1 : - max_lines - 1 : - 1 ] : ret = get_filetype_from_line ( l ) if ret : return ret for i in range ( max_lines , - 1 , - 1 ) : if i < len ( lines ) : ret = get_filetype_from_line ( lines [ i ] ) if ret : return ret return None
Scan the buffer for modelines and return filetype if one is found .
52,534
def get_font ( self , bold , oblique ) : if bold and oblique : return self . fonts [ 'BOLDITALIC' ] elif bold : return self . fonts [ 'BOLD' ] elif oblique : return self . fonts [ 'ITALIC' ] else : return self . fonts [ 'NORMAL' ]
Get the font based on bold and italic flags .
52,535
def _get_char_x ( self , charno ) : return charno * self . fontw + self . image_pad + self . line_number_width
Get the X coordinate of a character position .
52,536
def _get_text_pos ( self , charno , lineno ) : return self . _get_char_x ( charno ) , self . _get_line_y ( lineno )
Get the actual position for a character and line position .
52,537
def _get_image_size ( self , maxcharno , maxlineno ) : return ( self . _get_char_x ( maxcharno ) + self . image_pad , self . _get_line_y ( maxlineno + 0 ) + self . image_pad )
Get the required image size .
52,538
def _draw_linenumber ( self , posno , lineno ) : self . _draw_text ( self . _get_linenumber_pos ( posno ) , str ( lineno ) . rjust ( self . line_number_chars ) , font = self . fonts . get_font ( self . line_number_bold , self . line_number_italic ) , fill = self . line_number_fg , )
Remember a line number drawable to paint later .
52,539
def _draw_text ( self , pos , text , font , ** kw ) : self . drawables . append ( ( pos , text , font , kw ) )
Remember a single drawable tuple to paint later .
52,540
def _create_drawables ( self , tokensource ) : lineno = charno = maxcharno = 0 for ttype , value in tokensource : while ttype not in self . styles : ttype = ttype . parent style = self . styles [ ttype ] value = value . expandtabs ( 4 ) lines = value . splitlines ( True ) for i , line in enumerate ( lines ) : temp = line . rstrip ( '\n' ) if temp : self . _draw_text ( self . _get_text_pos ( charno , lineno ) , temp , font = self . _get_style_font ( style ) , fill = self . _get_text_color ( style ) ) charno += len ( temp ) maxcharno = max ( maxcharno , charno ) if line . endswith ( '\n' ) : charno = 0 lineno += 1 self . maxcharno = maxcharno self . maxlineno = lineno
Create drawables for the token content .
52,541
def _draw_line_numbers ( self ) : if not self . line_numbers : return for p in xrange ( self . maxlineno ) : n = p + self . line_number_start if ( n % self . line_number_step ) == 0 : self . _draw_linenumber ( p , n )
Create drawables for the line numbers .
52,542
def _paint_line_number_bg ( self , im ) : if not self . line_numbers : return if self . line_number_fg is None : return draw = ImageDraw . Draw ( im ) recth = im . size [ - 1 ] rectw = self . image_pad + self . line_number_width - self . line_number_pad draw . rectangle ( [ ( 0 , 0 ) , ( rectw , recth ) ] , fill = self . line_number_bg ) draw . line ( [ ( rectw , 0 ) , ( rectw , recth ) ] , fill = self . line_number_fg ) del draw
Paint the line number background on the image .
52,543
def update ( self , attrs ) : data = self . dict ( ) data . update ( attrs ) heartbeat = Heartbeat ( data , self . args , self . configs , _clone = True ) return heartbeat
Return a copy of the current Heartbeat with updated attributes .
52,544
def sanitize ( self ) : if not self . args . hide_file_names : return self if self . entity is None : return self if self . type != 'file' : return self if self . should_obfuscate_filename ( ) : self . _sanitize_metadata ( ) extension = u ( os . path . splitext ( self . entity ) [ 1 ] ) self . entity = u ( 'HIDDEN{0}' ) . format ( extension ) elif self . should_obfuscate_project ( ) : self . _sanitize_metadata ( ) return self
Removes sensitive data including file names and dependencies .
52,545
def should_obfuscate_filename ( self ) : for pattern in self . args . hide_file_names : try : compiled = re . compile ( pattern , re . IGNORECASE ) if compiled . search ( self . entity ) : return True except re . error as ex : log . warning ( u ( 'Regex error ({msg}) for hide_file_names pattern: {pattern}' ) . format ( msg = u ( ex ) , pattern = u ( pattern ) , ) ) return False
Returns True if hide_file_names is true or the entity file path matches one in the list of obfuscated file paths .
52,546
def _format_local_file ( self ) : if self . type != 'file' : return if not self . entity : return if not is_win : return if self . _file_exists ( ) : return self . args . local_file = self . _to_unc_path ( self . entity )
When args . local_file empty on Windows tries to map args . entity to a unc path .
52,547
def create_negotiate_message ( self , domain_name = None , workstation = None ) : self . negotiate_message = NegotiateMessage ( self . negotiate_flags , domain_name , workstation ) return base64 . b64encode ( self . negotiate_message . get_data ( ) )
Create an NTLM NEGOTIATE_MESSAGE
52,548
def parse_challenge_message ( self , msg2 ) : msg2 = base64 . b64decode ( msg2 ) self . challenge_message = ChallengeMessage ( msg2 )
Parse the NTLM CHALLENGE_MESSAGE from the server and add it to the Ntlm context fields
52,549
def create_authenticate_message ( self , user_name , password , domain_name = None , workstation = None , server_certificate_hash = None ) : self . authenticate_message = AuthenticateMessage ( user_name , password , domain_name , workstation , self . challenge_message , self . ntlm_compatibility , server_certificate_hash ) self . authenticate_message . add_mic ( self . negotiate_message , self . challenge_message ) if self . negotiate_flags & NegotiateFlags . NTLMSSP_NEGOTIATE_SEAL or self . negotiate_flags & NegotiateFlags . NTLMSSP_NEGOTIATE_SIGN : self . session_security = SessionSecurity ( struct . unpack ( "<I" , self . authenticate_message . negotiate_flags ) [ 0 ] , self . authenticate_message . exported_session_key ) return base64 . b64encode ( self . authenticate_message . get_data ( ) )
Create an NTLM AUTHENTICATE_MESSAGE based on the Ntlm context and the previous messages sent and received
52,550
def lex ( code , lexer ) : try : return lexer . get_tokens ( code ) except TypeError as err : if ( isinstance ( err . args [ 0 ] , str ) and ( 'unbound method get_tokens' in err . args [ 0 ] or 'missing 1 required positional argument' in err . args [ 0 ] ) ) : raise TypeError ( 'lex() argument must be a lexer instance, ' 'not a class' ) raise
Lex code with lexer and return an iterable of tokens .
52,551
def format ( tokens , formatter , outfile = None ) : try : if not outfile : realoutfile = getattr ( formatter , 'encoding' , None ) and BytesIO ( ) or StringIO ( ) formatter . format ( tokens , realoutfile ) return realoutfile . getvalue ( ) else : formatter . format ( tokens , outfile ) except TypeError as err : if ( isinstance ( err . args [ 0 ] , str ) and ( 'unbound method format' in err . args [ 0 ] or 'missing 1 required positional argument' in err . args [ 0 ] ) ) : raise TypeError ( 'format() argument must be a formatter instance, ' 'not a class' ) raise
Format a tokenlist tokens with the formatter formatter .
52,552
def highlight ( code , lexer , formatter , outfile = None ) : return format ( lex ( code , lexer ) , formatter , outfile )
Lex code with lexer and format it with the formatter formatter .
52,553
def getConfigFile ( ) : fileName = '.wakatime.cfg' home = os . environ . get ( 'WAKATIME_HOME' ) if home : return os . path . join ( os . path . expanduser ( home ) , fileName ) return os . path . join ( os . path . expanduser ( '~' ) , fileName )
Returns the config file location .
52,554
def find_filter_class ( filtername ) : if filtername in FILTERS : return FILTERS [ filtername ] for name , cls in find_plugin_filters ( ) : if name == filtername : return cls return None
Lookup a filter by name . Return None if not found .
52,555
def get_filter_by_name ( filtername , ** options ) : cls = find_filter_class ( filtername ) if cls : return cls ( ** options ) else : raise ClassNotFound ( 'filter %r not found' % filtername )
Return an instantiated filter .
52,556
def get_tokens_unprocessed ( self , text ) : tokens = self . _block_re . split ( text ) tokens . reverse ( ) state = idx = 0 try : while True : if state == 0 : val = tokens . pop ( ) yield idx , Other , val idx += len ( val ) state = 1 elif state == 1 : tag = tokens . pop ( ) if tag in ( '<%%' , '%%>' ) : yield idx , Other , tag idx += 3 state = 0 elif tag == '<%#' : yield idx , Comment . Preproc , tag val = tokens . pop ( ) yield idx + 3 , Comment , val idx += 3 + len ( val ) state = 2 elif tag in ( '<%' , '<%=' , '<%-' ) : yield idx , Comment . Preproc , tag idx += len ( tag ) data = tokens . pop ( ) r_idx = 0 for r_idx , r_token , r_value in self . ruby_lexer . get_tokens_unprocessed ( data ) : yield r_idx + idx , r_token , r_value idx += len ( data ) state = 2 elif tag in ( '%>' , '-%>' ) : yield idx , Error , tag idx += len ( tag ) state = 0 else : yield idx , Comment . Preproc , tag [ 0 ] r_idx = 0 for r_idx , r_token , r_value in self . ruby_lexer . get_tokens_unprocessed ( tag [ 1 : ] ) : yield idx + 1 + r_idx , r_token , r_value idx += len ( tag ) state = 0 elif state == 2 : tag = tokens . pop ( ) if tag not in ( '%>' , '-%>' ) : yield idx , Other , tag else : yield idx , Comment . Preproc , tag idx += len ( tag ) state = 0 except IndexError : return
Since ERB doesn t allow <% and other tags inside of ruby blocks we have to use a split approach here that fails for that too .
52,557
def format_file_path ( filepath ) : try : is_windows_network_mount = WINDOWS_NETWORK_MOUNT_PATTERN . match ( filepath ) filepath = os . path . realpath ( os . path . abspath ( filepath ) ) filepath = re . sub ( BACKSLASH_REPLACE_PATTERN , '/' , filepath ) is_windows_drive = WINDOWS_DRIVE_PATTERN . match ( filepath ) if is_windows_drive : filepath = filepath . capitalize ( ) if is_windows_network_mount : filepath = '/' + filepath except : pass return filepath
Formats a path as absolute and with the correct platform separator .
52,558
def close ( self ) : old_pool , self . pool = self . pool , None try : while True : conn = old_pool . get ( block = False ) if conn : conn . close ( ) except queue . Empty : pass
Close all pooled connections and disable the pool .
52,559
def is_same_host ( self , url ) : if url . startswith ( '/' ) : return True scheme , host , port = get_host ( url ) host = _ipv6_host ( host ) . lower ( ) if self . port and not port : port = port_by_scheme . get ( scheme ) elif not self . port and port == port_by_scheme . get ( scheme ) : port = None return ( scheme , host , port ) == ( self . scheme , self . host , self . port )
Check if the given url is a member of the same host as this connection pool .
52,560
def filename ( self , value ) : warnings . warn ( "The 'filename' attribute will be removed in future versions. " "Use 'source' instead." , DeprecationWarning , stacklevel = 2 ) self . source = value
Deprecated user source .
52,561
def options ( self , section ) : try : opts = self . _sections [ section ] . copy ( ) except KeyError : raise from_none ( NoSectionError ( section ) ) opts . update ( self . _defaults ) return list ( opts . keys ( ) )
Return a list of option names for the given section name .
52,562
def read_string ( self , string , source = '<string>' ) : sfile = io . StringIO ( string ) self . read_file ( sfile , source )
Read configuration from a given string .
52,563
def read_dict ( self , dictionary , source = '<dict>' ) : elements_added = set ( ) for section , keys in dictionary . items ( ) : section = str ( section ) try : self . add_section ( section ) except ( DuplicateSectionError , ValueError ) : if self . _strict and section in elements_added : raise elements_added . add ( section ) for key , value in keys . items ( ) : key = self . optionxform ( str ( key ) ) if value is not None : value = str ( value ) if self . _strict and ( section , key ) in elements_added : raise DuplicateOptionError ( section , key , source ) elements_added . add ( ( section , key ) ) self . set ( section , key , value )
Read configuration from a dictionary .
52,564
def readfp ( self , fp , filename = None ) : warnings . warn ( "This method will be removed in future versions. " "Use 'parser.read_file()' instead." , DeprecationWarning , stacklevel = 2 ) self . read_file ( fp , source = filename )
Deprecated use read_file instead .
52,565
def has_option ( self , section , option ) : if not section or section == self . default_section : option = self . optionxform ( option ) return option in self . _defaults elif section not in self . _sections : return False else : option = self . optionxform ( option ) return ( option in self . _sections [ section ] or option in self . _defaults )
Check for the existence of a given option in a given section . If the specified section is None or an empty string DEFAULT is assumed . If the specified section does not exist returns False .
52,566
def _write_section ( self , fp , section_name , section_items , delimiter ) : fp . write ( "[{0}]\n" . format ( section_name ) ) for key , value in section_items : value = self . _interpolation . before_write ( self , section_name , key , value ) if value is not None or not self . _allow_no_value : value = delimiter + str ( value ) . replace ( '\n' , '\n\t' ) else : value = "" fp . write ( "{0}{1}\n" . format ( key , value ) ) fp . write ( "\n" )
Write a single section to the specified fp .
52,567
def _unify_values ( self , section , vars ) : sectiondict = { } try : sectiondict = self . _sections [ section ] except KeyError : if section != self . default_section : raise NoSectionError ( section ) vardict = { } if vars : for key , value in vars . items ( ) : if value is not None : value = str ( value ) vardict [ self . optionxform ( key ) ] = value return _ChainMap ( vardict , sectiondict , self . _defaults )
Create a sequence of lookups with vars taking priority over the section which takes priority over the DEFAULTSECT .
52,568
def _convert_to_boolean ( self , value ) : if value . lower ( ) not in self . BOOLEAN_STATES : raise ValueError ( 'Not a boolean: %s' % value ) return self . BOOLEAN_STATES [ value . lower ( ) ]
Return a boolean value translating from other types if necessary .
52,569
def _validate_value_types ( self , ** kwargs ) : section = kwargs . get ( 'section' , "" ) option = kwargs . get ( 'option' , "" ) value = kwargs . get ( 'value' , "" ) if PY2 and bytes in ( type ( section ) , type ( option ) , type ( value ) ) : warnings . warn ( "You passed a bytestring. Implicitly decoding as UTF-8 string." " This will not work on Python 3. Please switch to using" " Unicode strings across the board." , DeprecationWarning , stacklevel = 2 , ) if isinstance ( section , bytes ) : section = section . decode ( 'utf8' ) if isinstance ( option , bytes ) : option = option . decode ( 'utf8' ) if isinstance ( value , bytes ) : value = value . decode ( 'utf8' ) if not isinstance ( section , str ) : raise TypeError ( "section names must be strings" ) if not isinstance ( option , str ) : raise TypeError ( "option keys must be strings" ) if not self . _allow_no_value or value : if not isinstance ( value , str ) : raise TypeError ( "option values must be strings" ) return section , option , value
Raises a TypeError for non - string values .
52,570
def set ( self , section , option , value = None ) : _ , option , value = self . _validate_value_types ( option = option , value = value ) super ( ConfigParser , self ) . set ( section , option , value )
Set an option . Extends RawConfigParser . set by validating type and interpolation syntax on the value .
52,571
def add_section ( self , section ) : section , _ , _ = self . _validate_value_types ( section = section ) super ( ConfigParser , self ) . add_section ( section )
Create a new section in the configuration . Extends RawConfigParser . add_section by validating if the section name is a string .
52,572
def get ( self , option , fallback = None , ** kwargs ) : kwargs . setdefault ( 'raw' , False ) kwargs . setdefault ( 'vars' , None ) _impl = kwargs . pop ( '_impl' , None ) if not _impl : _impl = self . _parser . get return _impl ( self . _name , option , fallback = fallback , ** kwargs )
Get an option value .
52,573
def analyse_text ( text ) : if re . match ( r'^\s*REBOL\s*\[' , text , re . IGNORECASE ) : return 1.0 elif re . search ( r'\s*REBOL\s*[' , text , re . IGNORECASE ) : return 0.5
Check if code contains REBOL header and so it probably not R code
52,574
def get_sign_key ( exported_session_key , magic_constant ) : sign_key = hashlib . md5 ( exported_session_key + magic_constant ) . digest ( ) return sign_key
3 . 4 . 5 . 2 SIGNKEY
52,575
def _wait_for_io_events ( socks , events , timeout = None ) : if not HAS_SELECT : raise ValueError ( 'Platform does not have a selector' ) if not isinstance ( socks , list ) : if hasattr ( socks , "fileno" ) : socks = [ socks ] else : socks = list ( socks ) with DefaultSelector ( ) as selector : for sock in socks : selector . register ( sock , events ) return [ key [ 0 ] . fileobj for key in selector . select ( timeout ) if key [ 1 ] & events ]
Waits for IO events to be available from a list of sockets or optionally a single socket if passed in . Returns a list of sockets that can be interacted with immediately .
52,576
def make_analysator ( f ) : def text_analyse ( text ) : try : rv = f ( text ) except Exception : return 0.0 if not rv : return 0.0 try : return min ( 1.0 , max ( 0.0 , float ( rv ) ) ) except ( ValueError , TypeError ) : return 0.0 text_analyse . __doc__ = f . __doc__ return staticmethod ( text_analyse )
Return a static text analyser function that returns float values .
52,577
def shebang_matches ( text , regex ) : r index = text . find ( '\n' ) if index >= 0 : first_line = text [ : index ] . lower ( ) else : first_line = text . lower ( ) if first_line . startswith ( '#!' ) : try : found = [ x for x in split_path_re . split ( first_line [ 2 : ] . strip ( ) ) if x and not x . startswith ( '-' ) ] [ - 1 ] except IndexError : return False regex = re . compile ( r'^%s(\.(exe|cmd|bat|bin))?$' % regex , re . IGNORECASE ) if regex . search ( found ) is not None : return True return False
r Check if the given regular expression matches the last part of the shebang if one exists .
52,578
def looks_like_xml ( text ) : if xml_decl_re . match ( text ) : return True key = hash ( text ) try : return _looks_like_xml_cache [ key ] except KeyError : m = doctype_lookup_re . match ( text ) if m is not None : return True rv = tag_re . search ( text [ : 1000 ] ) is not None _looks_like_xml_cache [ key ] = rv return rv
Check if a doctype exists or if we have some tags .
52,579
def unirange ( a , b ) : if b < a : raise ValueError ( "Bad character range" ) if a < 0x10000 or b < 0x10000 : raise ValueError ( "unirange is only defined for non-BMP ranges" ) if sys . maxunicode > 0xffff : return u'[%s-%s]' % ( unichr ( a ) , unichr ( b ) ) else : ah , al = _surrogatepair ( a ) bh , bl = _surrogatepair ( b ) if ah == bh : return u'(?:%s[%s-%s])' % ( unichr ( ah ) , unichr ( al ) , unichr ( bl ) ) else : buf = [ ] buf . append ( u'%s[%s-%s]' % ( unichr ( ah ) , unichr ( al ) , ah == bh and unichr ( bl ) or unichr ( 0xdfff ) ) ) if ah - bh > 1 : buf . append ( u'[%s-%s][%s-%s]' % unichr ( ah + 1 ) , unichr ( bh - 1 ) , unichr ( 0xdc00 ) , unichr ( 0xdfff ) ) if ah != bh : buf . append ( u'%s[%s-%s]' % ( unichr ( bh ) , unichr ( 0xdc00 ) , unichr ( bl ) ) ) return u'(?:' + u'|' . join ( buf ) + u')'
Returns a regular expression string to match the given non - BMP range .
52,580
def format_lines ( var_name , seq , raw = False , indent_level = 0 ) : lines = [ ] base_indent = ' ' * indent_level * 4 inner_indent = ' ' * ( indent_level + 1 ) * 4 lines . append ( base_indent + var_name + ' = (' ) if raw : for i in seq : lines . append ( inner_indent + i + ',' ) else : for i in seq : r = repr ( i + '"' ) lines . append ( inner_indent + r [ : - 2 ] + r [ - 1 ] + ',' ) lines . append ( base_indent + ')' ) return '\n' . join ( lines )
Formats a sequence of strings for output .
52,581
def duplicates_removed ( it , already_seen = ( ) ) : lst = [ ] seen = set ( ) for i in it : if i in seen or i in already_seen : continue lst . append ( i ) seen . add ( i ) return lst
Returns a list with duplicates removed from the iterable it .
52,582
def _lex_fortran ( self , match , ctx = None ) : lexer = FortranLexer ( ) text = match . group ( 0 ) + "\n" for index , token , value in lexer . get_tokens_unprocessed ( text ) : value = value . replace ( '\n' , '' ) if value != '' : yield index , token , value
Lex a line just as free form fortran without line break .
52,583
def get_project_info ( configs , heartbeat , data ) : project_name , branch_name = heartbeat . project , heartbeat . branch if heartbeat . type != 'file' : project_name = project_name or heartbeat . args . project or heartbeat . args . alternate_project return project_name , branch_name if project_name is None or branch_name is None : for plugin_cls in CONFIG_PLUGINS : plugin_name = plugin_cls . __name__ . lower ( ) plugin_configs = get_configs_for_plugin ( plugin_name , configs ) project = plugin_cls ( heartbeat . entity , configs = plugin_configs ) if project . process ( ) : project_name = project_name or project . name ( ) branch_name = project . branch ( ) break if project_name is None : project_name = data . get ( 'project' ) or heartbeat . args . project hide_project = heartbeat . should_obfuscate_project ( ) if hide_project and project_name is not None : return project_name , None if project_name is None or branch_name is None : for plugin_cls in REV_CONTROL_PLUGINS : plugin_name = plugin_cls . __name__ . lower ( ) plugin_configs = get_configs_for_plugin ( plugin_name , configs ) project = plugin_cls ( heartbeat . entity , configs = plugin_configs ) if project . process ( ) : project_name = project_name or project . name ( ) branch_name = branch_name or project . branch ( ) if hide_project : branch_name = None project_name = generate_project_name ( ) project_file = os . path . join ( project . folder ( ) , '.wakatime-project' ) try : with open ( project_file , 'w' ) as fh : fh . write ( project_name ) except IOError : project_name = None break if project_name is None and not hide_project : project_name = data . get ( 'alternate_project' ) or heartbeat . args . alternate_project return project_name , branch_name
Find the current project and branch .
52,584
def generate_project_name ( ) : adjectives = [ 'aged' , 'ancient' , 'autumn' , 'billowing' , 'bitter' , 'black' , 'blue' , 'bold' , 'broad' , 'broken' , 'calm' , 'cold' , 'cool' , 'crimson' , 'curly' , 'damp' , 'dark' , 'dawn' , 'delicate' , 'divine' , 'dry' , 'empty' , 'falling' , 'fancy' , 'flat' , 'floral' , 'fragrant' , 'frosty' , 'gentle' , 'green' , 'hidden' , 'holy' , 'icy' , 'jolly' , 'late' , 'lingering' , 'little' , 'lively' , 'long' , 'lucky' , 'misty' , 'morning' , 'muddy' , 'mute' , 'nameless' , 'noisy' , 'odd' , 'old' , 'orange' , 'patient' , 'plain' , 'polished' , 'proud' , 'purple' , 'quiet' , 'rapid' , 'raspy' , 'red' , 'restless' , 'rough' , 'round' , 'royal' , 'shiny' , 'shrill' , 'shy' , 'silent' , 'small' , 'snowy' , 'soft' , 'solitary' , 'sparkling' , 'spring' , 'square' , 'steep' , 'still' , 'summer' , 'super' , 'sweet' , 'throbbing' , 'tight' , 'tiny' , 'twilight' , 'wandering' , 'weathered' , 'white' , 'wild' , 'winter' , 'wispy' , 'withered' , 'yellow' , 'young' ] nouns = [ 'art' , 'band' , 'bar' , 'base' , 'bird' , 'block' , 'boat' , 'bonus' , 'bread' , 'breeze' , 'brook' , 'bush' , 'butterfly' , 'cake' , 'cell' , 'cherry' , 'cloud' , 'credit' , 'darkness' , 'dawn' , 'dew' , 'disk' , 'dream' , 'dust' , 'feather' , 'field' , 'fire' , 'firefly' , 'flower' , 'fog' , 'forest' , 'frog' , 'frost' , 'glade' , 'glitter' , 'grass' , 'hall' , 'hat' , 'haze' , 'heart' , 'hill' , 'king' , 'lab' , 'lake' , 'leaf' , 'limit' , 'math' , 'meadow' , 'mode' , 'moon' , 'morning' , 'mountain' , 'mouse' , 'mud' , 'night' , 'paper' , 'pine' , 'poetry' , 'pond' , 'queen' , 'rain' , 'recipe' , 'resonance' , 'rice' , 'river' , 'salad' , 'scene' , 'sea' , 'shadow' , 'shape' , 'silence' , 'sky' , 'smoke' , 'snow' , 'snowflake' , 'sound' , 'star' , 'sun' , 'sun' , 'sunset' , 'surf' , 'term' , 'thunder' , 'tooth' , 'tree' , 'truth' , 'union' , 'unit' , 'violet' , 'voice' , 'water' , 'waterfall' , 'wave' , 'wildflower' , 'wind' , 'wood' ] numbers = [ str ( x ) for x in range ( 10 ) ] return ' ' . join ( [ random . choice ( adjectives ) . capitalize ( ) , random . choice ( nouns ) . capitalize ( ) , random . choice ( numbers ) + random . choice ( numbers ) , ] )
Generates a random project name .
52,585
def save ( self , session ) : if not HAS_SQL : return try : conn , c = self . connect ( ) c . execute ( 'DELETE FROM {0}' . format ( self . table_name ) ) values = { 'value' : sqlite3 . Binary ( pickle . dumps ( session , protocol = 2 ) ) , } c . execute ( 'INSERT INTO {0} VALUES (:value)' . format ( self . table_name ) , values ) conn . commit ( ) conn . close ( ) except : log . traceback ( logging . DEBUG )
Saves a requests . Session object for the next heartbeat process .
52,586
def get ( self ) : if not HAS_SQL : return requests . session ( ) try : conn , c = self . connect ( ) except : log . traceback ( logging . DEBUG ) return requests . session ( ) session = None try : c . execute ( 'BEGIN IMMEDIATE' ) c . execute ( 'SELECT value FROM {0} LIMIT 1' . format ( self . table_name ) ) row = c . fetchone ( ) if row is not None : session = pickle . loads ( row [ 0 ] ) except : log . traceback ( logging . DEBUG ) try : conn . close ( ) except : log . traceback ( logging . DEBUG ) return session if session is not None else requests . session ( )
Returns a requests . Session object .
52,587
def delete ( self ) : if not HAS_SQL : return try : conn , c = self . connect ( ) c . execute ( 'DELETE FROM {0}' . format ( self . table_name ) ) conn . commit ( ) conn . close ( ) except : log . traceback ( logging . DEBUG )
Clears all cached Session objects .
52,588
def resolve_redirects ( self , resp , req , stream = False , timeout = None , verify = True , cert = None , proxies = None , yield_requests = False , ** adapter_kwargs ) : hist = [ ] url = self . get_redirect_target ( resp ) while url : prepared_request = req . copy ( ) hist . append ( resp ) resp . history = hist [ 1 : ] try : resp . content except ( ChunkedEncodingError , ContentDecodingError , RuntimeError ) : resp . raw . read ( decode_content = False ) if len ( resp . history ) >= self . max_redirects : raise TooManyRedirects ( 'Exceeded %s redirects.' % self . max_redirects , response = resp ) resp . close ( ) if url . startswith ( '//' ) : parsed_rurl = urlparse ( resp . url ) url = '%s:%s' % ( to_native_string ( parsed_rurl . scheme ) , url ) parsed = urlparse ( url ) url = parsed . geturl ( ) if not parsed . netloc : url = urljoin ( resp . url , requote_uri ( url ) ) else : url = requote_uri ( url ) prepared_request . url = to_native_string ( url ) self . rebuild_method ( prepared_request , resp ) if resp . status_code not in ( codes . temporary_redirect , codes . permanent_redirect ) : purged_headers = ( 'Content-Length' , 'Content-Type' , 'Transfer-Encoding' ) for header in purged_headers : prepared_request . headers . pop ( header , None ) prepared_request . body = None headers = prepared_request . headers try : del headers [ 'Cookie' ] except KeyError : pass extract_cookies_to_jar ( prepared_request . _cookies , req , resp . raw ) merge_cookies ( prepared_request . _cookies , self . cookies ) prepared_request . prepare_cookies ( prepared_request . _cookies ) proxies = self . rebuild_proxies ( prepared_request , proxies ) self . rebuild_auth ( prepared_request , resp ) rewindable = ( prepared_request . _body_position is not None and ( 'Content-Length' in headers or 'Transfer-Encoding' in headers ) ) if rewindable : rewind_body ( prepared_request ) req = prepared_request if yield_requests : yield req else : resp = self . send ( req , stream = stream , timeout = timeout , verify = verify , cert = cert , proxies = proxies , allow_redirects = False , ** adapter_kwargs ) extract_cookies_to_jar ( self . cookies , prepared_request , resp . raw ) url = self . get_redirect_target ( resp ) yield resp
Receives a Response . Returns a generator of Responses or Requests .
52,589
def rebuild_method ( self , prepared_request , response ) : method = prepared_request . method if response . status_code == codes . see_other and method != 'HEAD' : method = 'GET' if response . status_code == codes . found and method != 'HEAD' : method = 'GET' if response . status_code == codes . moved and method == 'POST' : method = 'GET' prepared_request . method = method
When being redirected we may want to change the method of the request based on certain specs or browser behavior .
52,590
def apply_filters ( stream , filters , lexer = None ) : def _apply ( filter_ , stream ) : for token in filter_ . filter ( lexer , stream ) : yield token for filter_ in filters : stream = _apply ( filter_ , stream ) return stream
Use this method to apply an iterable of filters to a stream . If lexer is given it s forwarded to the filter otherwise the filter receives None .
52,591
def reset_indent ( token_class ) : def callback ( lexer , match , context ) : text = match . group ( ) context . indent_stack = [ ] context . indent = - 1 context . next_indent = 0 context . block_scalar_indent = None yield match . start ( ) , token_class , text context . pos = match . end ( ) return callback
Reset the indentation levels .
52,592
def save_indent ( token_class , start = False ) : def callback ( lexer , match , context ) : text = match . group ( ) extra = '' if start : context . next_indent = len ( text ) if context . next_indent < context . indent : while context . next_indent < context . indent : context . indent = context . indent_stack . pop ( ) if context . next_indent > context . indent : extra = text [ context . indent : ] text = text [ : context . indent ] else : context . next_indent += len ( text ) if text : yield match . start ( ) , token_class , text if extra : yield match . start ( ) + len ( text ) , token_class . Error , extra context . pos = match . end ( ) return callback
Save a possible indentation level .
52,593
def set_block_scalar_indent ( token_class ) : def callback ( lexer , match , context ) : text = match . group ( ) context . block_scalar_indent = None if not text : return increment = match . group ( 1 ) if increment : current_indent = max ( context . indent , 0 ) increment = int ( increment ) context . block_scalar_indent = current_indent + increment if text : yield match . start ( ) , token_class , text context . pos = match . end ( ) return callback
Set an explicit indentation level for a block scalar .
52,594
def parse_block_scalar_indent ( token_class ) : def callback ( lexer , match , context ) : text = match . group ( ) if context . block_scalar_indent is None : if len ( text ) <= max ( context . indent , 0 ) : context . stack . pop ( ) context . stack . pop ( ) return context . block_scalar_indent = len ( text ) else : if len ( text ) < context . block_scalar_indent : context . stack . pop ( ) context . stack . pop ( ) return if text : yield match . start ( ) , token_class , text context . pos = match . end ( ) return callback
Process indentation spaces in a block scalar .
52,595
def content ( self ) : if self . _content is False : if self . _content_consumed : raise RuntimeError ( 'The content for this response was already consumed' ) if self . status_code == 0 or self . raw is None : self . _content = None else : self . _content = bytes ( ) . join ( self . iter_content ( CONTENT_CHUNK_SIZE ) ) or bytes ( ) self . _content_consumed = True return self . _content
Content of the response in bytes .
52,596
def py_scanstring ( s , end , encoding = None , strict = True , _b = BACKSLASH , _m = STRINGCHUNK . match , _join = u ( '' ) . join , _PY3 = PY3 , _maxunicode = sys . maxunicode ) : if encoding is None : encoding = DEFAULT_ENCODING chunks = [ ] _append = chunks . append begin = end - 1 while 1 : chunk = _m ( s , end ) if chunk is None : raise JSONDecodeError ( "Unterminated string starting at" , s , begin ) end = chunk . end ( ) content , terminator = chunk . groups ( ) if content : if not _PY3 and not isinstance ( content , text_type ) : content = text_type ( content , encoding ) _append ( content ) if terminator == '"' : break elif terminator != '\\' : if strict : msg = "Invalid control character %r at" raise JSONDecodeError ( msg , s , end ) else : _append ( terminator ) continue try : esc = s [ end ] except IndexError : raise JSONDecodeError ( "Unterminated string starting at" , s , begin ) if esc != 'u' : try : char = _b [ esc ] except KeyError : msg = "Invalid \\X escape sequence %r" raise JSONDecodeError ( msg , s , end ) end += 1 else : msg = "Invalid \\uXXXX escape sequence" esc = s [ end + 1 : end + 5 ] escX = esc [ 1 : 2 ] if len ( esc ) != 4 or escX == 'x' or escX == 'X' : raise JSONDecodeError ( msg , s , end - 1 ) try : uni = int ( esc , 16 ) except ValueError : raise JSONDecodeError ( msg , s , end - 1 ) end += 5 if ( _maxunicode > 65535 and uni & 0xfc00 == 0xd800 and s [ end : end + 2 ] == '\\u' ) : esc2 = s [ end + 2 : end + 6 ] escX = esc2 [ 1 : 2 ] if len ( esc2 ) == 4 and not ( escX == 'x' or escX == 'X' ) : try : uni2 = int ( esc2 , 16 ) except ValueError : raise JSONDecodeError ( msg , s , end ) if uni2 & 0xfc00 == 0xdc00 : uni = 0x10000 + ( ( ( uni - 0xd800 ) << 10 ) | ( uni2 - 0xdc00 ) ) end += 6 char = unichr ( uni ) _append ( char ) return _join ( chunks ) , end
Scan the string s for a JSON string . End is the index of the character in s after the quote that started the JSON string . Unescapes all valid JSON string escape sequences and raises ValueError on attempt to decode an invalid string . If strict is False then literal control characters are allowed in the string .
52,597
def _get_css_class ( self , ttype ) : ttypeclass = _get_ttype_class ( ttype ) if ttypeclass : return self . classprefix + ttypeclass return ''
Return the css class of this token type prefixed with the classprefix option .
52,598
def _get_css_classes ( self , ttype ) : cls = self . _get_css_class ( ttype ) while ttype not in STANDARD_TYPES : ttype = ttype . parent cls = self . _get_css_class ( ttype ) + ' ' + cls return cls
Return the css classes of this token type prefixed with the classprefix option .
52,599
def get_style_defs ( self , arg = None ) : if arg is None : arg = ( 'cssclass' in self . options and '.' + self . cssclass or '' ) if isinstance ( arg , string_types ) : args = [ arg ] else : args = list ( arg ) def prefix ( cls ) : if cls : cls = '.' + cls tmp = [ ] for arg in args : tmp . append ( ( arg and arg + ' ' or '' ) + cls ) return ', ' . join ( tmp ) styles = [ ( level , ttype , cls , style ) for cls , ( style , ttype , level ) in iteritems ( self . class2style ) if cls and style ] styles . sort ( ) lines = [ '%s { %s } /* %s */' % ( prefix ( cls ) , style , repr ( ttype ) [ 6 : ] ) for ( level , ttype , cls , style ) in styles ] if arg and not self . nobackground and self . style . background_color is not None : text_style = '' if Text in self . ttype2class : text_style = ' ' + self . class2style [ self . ttype2class [ Text ] ] [ 0 ] lines . insert ( 0 , '%s { background: %s;%s }' % ( prefix ( '' ) , self . style . background_color , text_style ) ) if self . style . highlight_color is not None : lines . insert ( 0 , '%s.hll { background-color: %s }' % ( prefix ( '' ) , self . style . highlight_color ) ) return '\n' . join ( lines )
Return CSS style definitions for the classes produced by the current highlighting style . arg can be a string or list of selectors to insert before the token type classes .