idx int64 0 63k | question stringlengths 53 5.28k | target stringlengths 5 805 |
|---|---|---|
55,200 | def render ( self , request , template , context ) : if self . allow_force_html and self . request . GET . get ( 'html' , False ) : html = get_template ( template ) . render ( context ) return HttpResponse ( html ) else : response = HttpResponse ( content_type = 'application/pdf' ) if self . prompt_download : response [ 'Content-Disposition' ] = 'attachment; filename="{}"' . format ( self . get_download_name ( ) ) helpers . render_pdf ( template = template , file_ = response , url_fetcher = self . url_fetcher , context = context , ) return response | Returns a response . By default this will contain the rendered PDF but if both allow_force_html is True and the querystring html = true was set it will return a plain HTML . |
55,201 | def replace ( self , s , data , attrs = None ) : s = s . format ( ** self . rc [ 'labels' ] ) attrs = attrs or data . attrs if hasattr ( getattr ( data , 'psy' , None ) , 'arr_name' ) : attrs = attrs . copy ( ) attrs [ 'arr_name' ] = data . psy . arr_name s = safe_modulo ( s , attrs ) if isinstance ( data , InteractiveList ) : data = data [ 0 ] tname = self . any_decoder . get_tname ( next ( self . plotter . iter_base_variables ) , data . coords ) if tname is not None and tname in data . coords : time = data . coords [ tname ] if not time . values . ndim : try : s = pd . to_datetime ( str ( time . values [ ( ) ] ) ) . strftime ( s ) except ValueError : pass if six . PY2 : return s . decode ( 'utf-8' ) return s | Replace the attributes of the plotter data in a string |
55,202 | def get_fig_data_attrs ( self , delimiter = None ) : if self . project is not None : delimiter = next ( filter ( lambda d : d is not None , [ delimiter , self . delimiter , self . rc [ 'delimiter' ] ] ) ) figs = self . project . figs fig = self . ax . get_figure ( ) if self . plotter . _initialized and fig in figs : ret = figs [ fig ] . joined_attrs ( delimiter = delimiter , plot_data = True ) else : ret = self . get_enhanced_attrs ( self . plotter . plot_data ) self . logger . debug ( 'Can not get the figure attributes because plot has not ' 'yet been initialized!' ) return ret else : return self . get_enhanced_attrs ( self . plotter . plot_data ) | Join the data attributes with other plotters in the project |
55,203 | def get_fmt_widget ( self , parent , project ) : from psy_simple . widgets . texts import LabelWidget return LabelWidget ( parent , self , project ) | Create a combobox with the attributes |
55,204 | def clear_other_texts ( self , remove = False ) : fig = self . ax . get_figure ( ) if len ( fig . texts ) == 1 : return for i , text in enumerate ( fig . texts ) : if text == self . _text : continue if text . get_position ( ) == self . _text . get_position ( ) : if not remove : text . set_text ( '' ) else : del fig [ i ] | Make sure that no other text is a the same position as this one |
55,205 | def transform ( self ) : ax = self . ax return { 'axes' : ax . transAxes , 'fig' : ax . get_figure ( ) . transFigure , 'data' : ax . transData } | Dictionary containing the relevant transformations |
55,206 | def _remove_texttuple ( self , pos ) : for i , ( old_x , old_y , s , old_cs , d ) in enumerate ( self . value ) : if ( old_x , old_y , old_cs ) == pos : self . value . pop ( i ) return raise ValueError ( "{0} not found!" . format ( pos ) ) | Remove a texttuple from the value in the plotter |
55,207 | def _update_texttuple ( self , x , y , s , cs , d ) : pos = ( x , y , cs ) for i , ( old_x , old_y , old_s , old_cs , old_d ) in enumerate ( self . value ) : if ( old_x , old_y , old_cs ) == pos : self . value [ i ] = ( old_x , old_y , s , old_cs , d ) return raise ValueError ( "No text tuple found at {0}!" . format ( pos ) ) | Update the text tuple at x and y with the given s and d |
55,208 | def share ( self , fmto , ** kwargs ) : kwargs . setdefault ( 'texts_to_remove' , self . _texts_to_remove ) super ( Text , self ) . share ( fmto , ** kwargs ) | Share the settings of this formatoption with other data objects |
55,209 | def preprocess_cell ( self , cell : "NotebookNode" , resources : dict , index : int ) -> Tuple [ "NotebookNode" , dict ] : if cell . cell_type == "markdown" : variables = cell [ "metadata" ] . get ( "variables" , { } ) if len ( variables ) > 0 : cell . source = self . replace_variables ( cell . source , variables ) if resources . get ( "delete_pymarkdown" , False ) : del cell . metadata [ "variables" ] return cell , resources | Preprocess cell . |
55,210 | def index_dir ( self , folder ) : folder_path = folder print ( 'Indexing folder: ' + folder_path ) nested_dir = { } folder = folder_path . rstrip ( os . sep ) start = folder . rfind ( os . sep ) + 1 for root , dirs , files in os . walk ( folder ) : folders = root [ start : ] . split ( os . sep ) subdir = { } for f in files : if os . path . splitext ( f ) [ 1 ] == '.md' : with open ( os . path . abspath ( os . path . join ( root , f ) ) , encoding = 'utf-8' ) as fp : try : _ , meta = self . mrk . extract_meta ( fp . read ( ) ) except : print ( "Skipping indexing " + f + "; Could not parse metadata" ) meta = { 'title' : f } pass subdir [ f ] = meta parent = nested_dir for fold in folders [ : - 1 ] : parent = parent . get ( fold ) parent [ folders [ - 1 ] ] = subdir return nested_dir | Creates a nested dictionary that represents the folder structure of folder . Also extracts meta data from all markdown posts and adds to the dictionary . |
55,211 | def cycles_created_by ( callable ) : with restore_gc_state ( ) : gc . disable ( ) gc . collect ( ) gc . set_debug ( gc . DEBUG_SAVEALL ) callable ( ) new_object_count = gc . collect ( ) if new_object_count : objects = gc . garbage [ - new_object_count : ] del gc . garbage [ - new_object_count : ] else : objects = [ ] return ObjectGraph ( objects ) | Return graph of cyclic garbage created by the given callable . |
55,212 | def snapshot ( ) : all_objects = gc . get_objects ( ) this_frame = inspect . currentframe ( ) selected_objects = [ ] for obj in all_objects : if obj is not this_frame : selected_objects . append ( obj ) graph = ObjectGraph ( selected_objects ) del this_frame , all_objects , selected_objects , obj return graph | Return the graph of all currently gc - tracked objects . |
55,213 | def extendMarkdown ( self , md , md_globals ) : md . registerExtension ( self ) for processor in ( self . preprocessors or [ ] ) : md . preprocessors . add ( processor . __name__ . lower ( ) , processor ( md ) , '_end' ) for pattern in ( self . inlinepatterns or [ ] ) : md . inlinePatterns . add ( pattern . __name__ . lower ( ) , pattern ( md ) , '_end' ) for processor in ( self . postprocessors or [ ] ) : md . postprocessors . add ( processor . __name__ . lower ( ) , processor ( md ) , '_end' ) | Every extension requires a extendMarkdown method to tell the markdown renderer how use the extension . |
55,214 | def run ( paths , output = _I_STILL_HATE_EVERYTHING , recurse = core . flat , sort_by = None , ls = core . ls , stdout = stdout , ) : if output is _I_STILL_HATE_EVERYTHING : output = core . columnized if stdout . isatty ( ) else core . one_per_line if sort_by is None : if output == core . as_tree : def sort_by ( thing ) : return ( thing . parent ( ) , thing . basename ( ) . lstrip ( string . punctuation ) . lower ( ) , ) else : def sort_by ( thing ) : return thing def _sort_by ( thing ) : return not getattr ( thing , "_always_sorts_first" , False ) , sort_by ( thing ) contents = [ path_and_children for path in paths or ( project . from_path ( FilePath ( "." ) ) , ) for path_and_children in recurse ( path = path , ls = ls ) ] for line in output ( contents , sort_by = _sort_by ) : stdout . write ( line ) stdout . write ( "\n" ) | Project - oriented directory and file information lister . |
55,215 | def getCustomLogger ( name , logLevel , logFormat = '%(asctime)s %(levelname)-9s:%(name)s:%(module)s:%(funcName)s: %(message)s' ) : assert isinstance ( logFormat , basestring ) , ( "logFormat must be a string but is %r" % logFormat ) assert isinstance ( logLevel , basestring ) , ( "logLevel must be a string but is %r" % logLevel ) assert isinstance ( name , basestring ) , ( "name must be a string but is %r" % name ) validLogLevels = [ 'CRITICAL' , 'DEBUG' , 'ERROR' , 'INFO' , 'WARNING' ] if not logLevel : logLevel = 'DEBUG' if logLevel . upper ( ) not in validLogLevels : logLevel = 'DEBUG' numericLevel = getattr ( logging , logLevel . upper ( ) , None ) if not isinstance ( numericLevel , int ) : raise ValueError ( "Invalid log level: %s" % logLevel ) logging . basicConfig ( level = numericLevel , format = logFormat ) logger = logging . getLogger ( name ) return logger | Set up logging |
55,216 | def mkdir_p ( path ) : assert isinstance ( path , basestring ) , ( "path must be a string but is %r" % path ) try : os . makedirs ( path ) except OSError as exception : if exception . errno != errno . EEXIST : raise | Mimic mkdir - p since os module doesn t provide one . |
55,217 | def setup_exchanges ( app ) : with app . producer_or_acquire ( ) as P : for q in app . amqp . queues . values ( ) : P . maybe_declare ( q ) | Setup result exchange to route all tasks to platform queue . |
55,218 | def setup_app ( app , throw = True ) : success = True try : for func in SETUP_FUNCS : try : func ( app ) except Exception : success = False if throw : raise else : msg = "Failed to run setup function %r(app)" logger . exception ( msg , func . __name__ ) finally : setattr ( app , 'is_set_up' , success ) | Ensure application is set up to expected configuration . This function is typically triggered by the worker_init signal however it must be called manually by codebases that are run only as task producers or from within a Python shell . |
55,219 | def _poplast ( self ) : try : tup = self . data . pop ( ) except IndexError as ex : ex . args = ( 'DEPQ is already empty' , ) raise self_items = self . items try : self_items [ tup [ 0 ] ] -= 1 if self_items [ tup [ 0 ] ] == 0 : del self_items [ tup [ 0 ] ] except TypeError : r = repr ( tup [ 0 ] ) self_items [ r ] -= 1 if self_items [ r ] == 0 : del self_items [ r ] return tup | For avoiding lock during inserting to keep maxlen |
55,220 | def DatabaseEnabled ( cls ) : if not issubclass ( cls , Storable ) : raise ValueError ( "%s is not a subclass of gludb.datab.Storage" % repr ( cls ) ) cls . ensure_table = classmethod ( _ensure_table ) cls . find_one = classmethod ( _find_one ) cls . find_all = classmethod ( _find_all ) cls . find_by_index = classmethod ( _find_by_index ) cls . save = _save cls . delete = _delete return cls | Given persistence methods to classes with this annotation . |
55,221 | def _find_playlist ( self ) : data = None if self . id : data = self . connection . get_item ( 'find_playlist_by_id' , playlist_id = self . id ) elif self . reference_id : data = self . connection . get_item ( 'find_playlist_by_reference_id' , reference_id = self . reference_id ) if data : self . _load ( data ) | Internal method to populate the object given the id or reference_id that has been set in the constructor . |
55,222 | def _to_dict ( self ) : data = { 'name' : self . name , 'referenceId' : self . reference_id , 'shortDescription' : self . short_description , 'playlistType' : self . type , 'id' : self . id } if self . videos : for video in self . videos : if video . id not in self . video_ids : self . video_ids . append ( video . id ) if self . video_ids : data [ 'videoIds' ] = self . video_ids [ data . pop ( key ) for key in data . keys ( ) if data [ key ] == None ] return data | Internal method that serializes object into a dictionary . |
55,223 | def _load ( self , data ) : self . raw_data = data self . id = data [ 'id' ] self . reference_id = data [ 'referenceId' ] self . name = data [ 'name' ] self . short_description = data [ 'shortDescription' ] self . thumbnail_url = data [ 'thumbnailURL' ] self . videos = [ ] self . video_ids = data [ 'videoIds' ] self . type = data [ 'playlistType' ] for video in data . get ( 'videos' , [ ] ) : self . videos . append ( pybrightcove . video . Video ( data = video , connection = self . connection ) ) | Internal method that deserializes a pybrightcove . playlist . Playlist object . |
55,224 | def save ( self ) : d = self . _to_dict ( ) if len ( d . get ( 'videoIds' , [ ] ) ) > 0 : if not self . id : self . id = self . connection . post ( 'create_playlist' , playlist = d ) else : data = self . connection . post ( 'update_playlist' , playlist = d ) if data : self . _load ( data ) | Create or update a playlist . |
55,225 | def delete ( self , cascade = False ) : if self . id : self . connection . post ( 'delete_playlist' , playlist_id = self . id , cascade = cascade ) self . id = None | Deletes this playlist . |
55,226 | def find_all ( connection = None , page_size = 100 , page_number = 0 , sort_by = DEFAULT_SORT_BY , sort_order = DEFAULT_SORT_ORDER ) : return pybrightcove . connection . ItemResultSet ( "find_all_playlists" , Playlist , connection , page_size , page_number , sort_by , sort_order ) | List all playlists . |
55,227 | def find_by_ids ( ids , connection = None , page_size = 100 , page_number = 0 , sort_by = DEFAULT_SORT_BY , sort_order = DEFAULT_SORT_ORDER ) : ids = ',' . join ( [ str ( i ) for i in ids ] ) return pybrightcove . connection . ItemResultSet ( 'find_playlists_by_ids' , Playlist , connection , page_size , page_number , sort_by , sort_order , playlist_ids = ids ) | List playlists by specific IDs . |
55,228 | def find_by_reference_ids ( reference_ids , connection = None , page_size = 100 , page_number = 0 , sort_by = DEFAULT_SORT_BY , sort_order = DEFAULT_SORT_ORDER ) : reference_ids = ',' . join ( [ str ( i ) for i in reference_ids ] ) return pybrightcove . connection . ItemResultSet ( "find_playlists_by_reference_ids" , Playlist , connection , page_size , page_number , sort_by , sort_order , reference_ids = reference_ids ) | List playlists by specific reference_ids . |
55,229 | def find_for_player_id ( player_id , connection = None , page_size = 100 , page_number = 0 , sort_by = DEFAULT_SORT_BY , sort_order = DEFAULT_SORT_ORDER ) : return pybrightcove . connection . ItemResultSet ( "find_playlists_for_player_id" , Playlist , connection , page_size , page_number , sort_by , sort_order , player_id = player_id ) | List playlists for a for given player id . |
55,230 | def get_options_for_id ( options : Dict [ str , Dict [ str , Any ] ] , identifier : str ) : check_var ( options , var_types = dict , var_name = 'options' ) res = options [ identifier ] if identifier in options . keys ( ) else dict ( ) check_var ( res , var_types = dict , var_name = 'options[' + identifier + ']' ) return res | Helper method from the full options dict of dicts to return either the options related to this parser or an empty dictionary . It also performs all the var type checks |
55,231 | def _convert ( self , desired_type : Type [ T ] , source_obj : S , logger : Logger , options : Dict [ str , Dict [ str , Any ] ] ) -> T : pass | Implementing classes should implement this method to perform the conversion itself |
55,232 | def _convert ( self , desired_type : Type [ T ] , source_obj : S , logger : Logger , options : Dict [ str , Dict [ str , Any ] ] ) -> T : try : if self . unpack_options : opts = self . get_applicable_options ( options ) if self . function_args is not None : return self . conversion_method ( desired_type , source_obj , logger , ** self . function_args , ** opts ) else : return self . conversion_method ( desired_type , source_obj , logger , ** opts ) else : if self . function_args is not None : return self . conversion_method ( desired_type , source_obj , logger , options , ** self . function_args ) else : return self . conversion_method ( desired_type , source_obj , logger , options ) except TypeError as e : raise CaughtTypeError . create ( self . conversion_method , e ) | Delegates to the user - provided method . Passes the appropriate part of the options according to the function name . |
55,233 | def remove_first ( self , inplace : bool = False ) : if len ( self . _converters_list ) > 1 : if inplace : self . _converters_list = self . _converters_list [ 1 : ] self . from_type = self . _converters_list [ 0 ] . from_type return else : new = copy ( self ) new . _converters_list = new . _converters_list [ 1 : ] new . from_type = new . _converters_list [ 0 ] . from_type return new else : raise ValueError ( 'cant remove first: would make it empty!' ) | Utility method to remove the first converter of this chain . If inplace is True this object is modified and None is returned . Otherwise a copy is returned |
55,234 | def add_conversion_steps ( self , converters : List [ Converter ] , inplace : bool = False ) : check_var ( converters , var_types = list , min_len = 1 ) if inplace : for converter in converters : self . add_conversion_step ( converter , inplace = True ) else : new = copy ( self ) new . add_conversion_steps ( converters , inplace = True ) return new | Utility method to add converters to this chain . If inplace is True this object is modified and None is returned . Otherwise a copy is returned |
55,235 | def add_conversion_step ( self , converter : Converter [ S , T ] , inplace : bool = False ) : if self . is_generic ( ) and converter . is_generic ( ) : raise ValueError ( 'Cannot chain this generic converter chain to the provided converter : it is generic too!' ) elif converter . can_be_appended_to ( self , self . strict ) : if inplace : self . _converters_list . append ( converter ) self . to_type = converter . to_type return else : new = copy ( self ) new . _converters_list . append ( converter ) new . to_type = converter . to_type return new else : raise TypeError ( 'Cannnot register a converter on this conversion chain : source type \'' + get_pretty_type_str ( converter . from_type ) + '\' is not compliant with current destination type of the chain : \'' + get_pretty_type_str ( self . to_type ) + ' (this chain performs ' + ( '' if self . strict else 'non-' ) + 'strict mode matching)' ) | Utility method to add a converter to this chain . If inplace is True this object is modified and None is returned . Otherwise a copy is returned |
55,236 | def insert_conversion_steps_at_beginning ( self , converters : List [ Converter ] , inplace : bool = False ) : if inplace : for converter in reversed ( converters ) : self . insert_conversion_step_at_beginning ( converter , inplace = True ) return else : new = copy ( self ) for converter in reversed ( converters ) : new . insert_conversion_step_at_beginning ( converter , inplace = True ) return new | Utility method to insert converters at the beginning ofthis chain . If inplace is True this object is modified and None is returned . Otherwise a copy is returned |
55,237 | def _convert ( self , desired_type : Type [ T ] , obj : S , logger : Logger , options : Dict [ str , Dict [ str , Any ] ] ) -> T : for converter in self . _converters_list [ : - 1 ] : obj = converter . convert ( converter . to_type , obj , logger , options ) return self . _converters_list [ - 1 ] . convert ( desired_type , obj , logger , options ) | Apply the converters of the chain in order to produce the desired result . Only the last converter will see the desired type the others will be asked to produce their declared to_type . |
55,238 | def listens_to ( name , sender = None , weak = True ) : def decorator ( f ) : if sender : return signal ( name ) . connect ( f , sender = sender , weak = weak ) return signal ( name ) . connect ( f , weak = weak ) return decorator | Listens to a named signal |
55,239 | def LoadInstallations ( counter ) : process = subprocess . Popen ( [ "pip" , "list" , "--format=json" ] , stdout = subprocess . PIPE ) output , _ = process . communicate ( ) installations = json . loads ( output ) for i in installations : counter . labels ( i [ "name" ] , i [ "version" ] ) . inc ( ) | Load installed packages and export the version map . |
55,240 | def RESTrequest ( * args , ** kwargs ) : verbose = kwargs . get ( 'verbose' , False ) force_download = kwargs . get ( 'force' , False ) save = kwargs . get ( 'force' , True ) args = list ( chain . from_iterable ( a . split ( '/' ) for a in args ) ) args = [ a for a in args if a ] request = 'http://rest.kegg.jp/' + "/" . join ( args ) print_verbose ( verbose , "richiedo la pagina: " + request ) filename = "KEGG_" + "_" . join ( args ) try : if force_download : raise IOError ( ) print_verbose ( verbose , "loading the cached file " + filename ) with open ( filename , 'r' ) as f : data = pickle . load ( f ) except IOError : print_verbose ( verbose , "downloading the library,it may take some time" ) import urllib2 try : req = urllib2 . urlopen ( request ) data = req . read ( ) if save : with open ( filename , 'w' ) as f : print_verbose ( verbose , "saving the file to " + filename ) pickle . dump ( data , f ) except urllib2 . HTTPError as e : raise e return data | return and save the blob of data that is returned from kegg without caring to the format |
55,241 | def command_help_long ( self ) : indent = " " * 2 help = "Command must be one of:\n" for action_name in self . parser . valid_commands : help += "%s%-10s %-70s\n" % ( indent , action_name , self . parser . commands [ action_name ] . desc_short . capitalize ( ) ) help += '\nSee \'%s help COMMAND\' for help and information on a command' % self . parser . prog return help | Return command help for use in global parser usage string |
55,242 | def run ( self ) : self . parser = MultioptOptionParser ( usage = "%prog <command> [options] [args]" , prog = self . clsname , version = self . version , option_list = self . global_options , description = self . desc_short , commands = self . command_set , epilog = self . footer ) try : self . options , self . args = self . parser . parse_args ( self . argv ) except Exception , e : print str ( e ) pass if len ( self . args ) < 1 : self . parser . print_lax_help ( ) return 2 self . command = self . args . pop ( 0 ) showHelp = False if self . command == 'help' : if len ( self . args ) < 1 : self . parser . print_lax_help ( ) return 2 else : self . command = self . args . pop ( ) showHelp = True if self . command not in self . valid_commands : self . parser . print_cmd_error ( self . command ) return 2 self . command_set [ self . command ] . set_cmdname ( self . command ) subcmd_parser = self . command_set [ self . command ] . get_parser ( self . clsname , self . version , self . global_options ) subcmd_options , subcmd_args = subcmd_parser . parse_args ( self . args ) if showHelp : subcmd_parser . print_help_long ( ) return 1 try : self . command_set [ self . command ] . func ( subcmd_options , * subcmd_args ) except ( CommandError , TypeError ) , e : subcmd_parser . print_exec_error ( self . command , str ( e ) ) print return 2 return 1 | Run the multiopt parser |
55,243 | def add ( self , host = None , f_community = None , f_access = None , f_version = None ) : return self . send . snmp_add ( host , f_community , f_access , f_version ) | Add an SNMP community string to a host |
55,244 | def delete_collection ( db_name , collection_name , host = 'localhost' , port = 27017 ) : client = MongoClient ( "mongodb://%s:%d" % ( host , port ) ) client [ db_name ] . drop_collection ( collection_name ) | Almost exclusively for testing . |
55,245 | def _check_1st_line ( line , ** kwargs ) : components = kwargs . get ( "components" , ( ) ) max_first_line = kwargs . get ( "max_first_line" , 50 ) errors = [ ] lineno = 1 if len ( line ) > max_first_line : errors . append ( ( "M190" , lineno , max_first_line , len ( line ) ) ) if line . endswith ( "." ) : errors . append ( ( "M191" , lineno ) ) if ':' not in line : errors . append ( ( "M110" , lineno ) ) else : component , msg = line . split ( ':' , 1 ) if component not in components : errors . append ( ( "M111" , lineno , component ) ) return errors | First line check . |
55,246 | def _check_bullets ( lines , ** kwargs ) : max_length = kwargs . get ( "max_length" , 72 ) labels = { l for l , _ in kwargs . get ( "commit_msg_labels" , tuple ( ) ) } def _strip_ticket_directives ( line ) : return re . sub ( r'( \([^)]*\)){1,}$' , '' , line ) errors = [ ] missed_lines = [ ] skipped = [ ] for ( i , line ) in enumerate ( lines [ 1 : ] ) : if line . startswith ( '*' ) : dot_found = False if len ( missed_lines ) > 0 : errors . append ( ( "M130" , i + 2 ) ) if lines [ i ] . strip ( ) != '' : errors . append ( ( "M120" , i + 2 ) ) if _strip_ticket_directives ( line ) . endswith ( '.' ) : dot_found = True label = _re_bullet_label . search ( line ) if label and label . group ( 'label' ) not in labels : errors . append ( ( "M122" , i + 2 , label . group ( 'label' ) ) ) for ( j , indented ) in enumerate ( lines [ i + 2 : ] ) : if indented . strip ( ) == '' : break if not re . search ( r"^ {2}\S" , indented ) : errors . append ( ( "M121" , i + j + 3 ) ) else : skipped . append ( i + j + 1 ) stripped_line = _strip_ticket_directives ( indented ) if stripped_line . endswith ( '.' ) : dot_found = True elif stripped_line . strip ( ) : dot_found = False if not dot_found : errors . append ( ( "M123" , i + 2 ) ) elif i not in skipped and line . strip ( ) : missed_lines . append ( ( i + 2 , line ) ) if len ( line ) > max_length : errors . append ( ( "M190" , i + 2 , max_length , len ( line ) ) ) return errors , missed_lines | Check that the bullet point list is well formatted . |
55,247 | def _check_signatures ( lines , ** kwargs ) : trusted = kwargs . get ( "trusted" , ( ) ) signatures = tuple ( kwargs . get ( "signatures" , ( ) ) ) alt_signatures = tuple ( kwargs . get ( "alt_signatures" , ( ) ) ) min_reviewers = kwargs . get ( "min_reviewers" , 3 ) matching = [ ] errors = [ ] signatures += alt_signatures test_signatures = re . compile ( "^({0})" . format ( "|" . join ( signatures ) ) ) test_alt_signatures = re . compile ( "^({0})" . format ( "|" . join ( alt_signatures ) ) ) for i , line in lines : if signatures and test_signatures . search ( line ) : if line . endswith ( "." ) : errors . append ( ( "M191" , i ) ) if not alt_signatures or not test_alt_signatures . search ( line ) : matching . append ( line ) else : errors . append ( ( "M102" , i ) ) if not matching : errors . append ( ( "M101" , 1 ) ) errors . append ( ( "M100" , 1 ) ) elif len ( matching ) < min_reviewers : pattern = re . compile ( '|' . join ( map ( lambda x : '<' + re . escape ( x ) + '>' , trusted ) ) ) trusted_matching = list ( filter ( None , map ( pattern . search , matching ) ) ) if len ( trusted_matching ) == 0 : errors . append ( ( "M100" , 1 ) ) return errors | Check that the signatures are valid . |
55,248 | def check_message ( message , ** kwargs ) : if kwargs . pop ( "allow_empty" , False ) : if not message or message . isspace ( ) : return [ ] lines = re . split ( r"\r\n|\r|\n" , message ) errors = _check_1st_line ( lines [ 0 ] , ** kwargs ) err , signature_lines = _check_bullets ( lines , ** kwargs ) errors += err errors += _check_signatures ( signature_lines , ** kwargs ) def _format ( code , lineno , args ) : return "{0}: {1} {2}" . format ( lineno , code , _messages_codes [ code ] . format ( * args ) ) return list ( map ( lambda x : _format ( x [ 0 ] , x [ 1 ] , x [ 2 : ] ) , sorted ( errors , key = lambda x : x [ 0 ] ) ) ) | Check the message format . |
55,249 | def _register_pyflakes_check ( ) : from flake8_isort import Flake8Isort from flake8_blind_except import check_blind_except codes = { "UnusedImport" : "F401" , "ImportShadowedByLoopVar" : "F402" , "ImportStarUsed" : "F403" , "LateFutureImport" : "F404" , "Redefined" : "F801" , "RedefinedInListComp" : "F812" , "UndefinedName" : "F821" , "UndefinedExport" : "F822" , "UndefinedLocal" : "F823" , "DuplicateArgument" : "F831" , "UnusedVariable" : "F841" , } for name , obj in vars ( pyflakes . messages ) . items ( ) : if name [ 0 ] . isupper ( ) and obj . message : obj . tpl = "{0} {1}" . format ( codes . get ( name , "F999" ) , obj . message ) pep8 . register_check ( _PyFlakesChecker , codes = [ 'F' ] ) parser = pep8 . get_parser ( '' , '' ) Flake8Isort . add_options ( parser ) options , args = parser . parse_args ( [ ] ) pep8 . register_check ( Flake8Isort , codes = [ 'I' ] ) pep8 . register_check ( check_blind_except , codes = [ 'B90' ] ) | Register the pyFlakes checker into PEP8 set of checks . |
55,250 | def check_pydocstyle ( filename , ** kwargs ) : ignore = kwargs . get ( "ignore" ) match = kwargs . get ( "match" , None ) match_dir = kwargs . get ( "match_dir" , None ) errors = [ ] if match and not re . match ( match , os . path . basename ( filename ) ) : return errors if match_dir : path = os . path . split ( os . path . abspath ( filename ) ) [ 0 ] while path != "/" : path , dirname = os . path . split ( path ) if not re . match ( match_dir , dirname ) : return errors checker = pydocstyle . PEP257Checker ( ) with open ( filename ) as fp : try : for error in checker . check_source ( fp . read ( ) , filename ) : if ignore is None or error . code not in ignore : message = re . sub ( "(D[0-9]{3}): ?(.*)" , r"\1 \2" , error . message ) errors . append ( "{0}: {1}" . format ( error . line , message ) ) except tokenize . TokenError as e : errors . append ( "{1}:{2} {0}" . format ( e . args [ 0 ] , * e . args [ 1 ] ) ) except pydocstyle . AllError as e : errors . append ( str ( e ) ) return errors | Perform static analysis on the given file docstrings . |
55,251 | def check_license ( filename , ** kwargs ) : year = kwargs . pop ( "year" , datetime . now ( ) . year ) python_style = kwargs . pop ( "python_style" , True ) ignores = kwargs . get ( "ignore" ) template = "{0}: {1} {2}" if python_style : re_comment = re . compile ( r"^#.*|\{#.*|[\r\n]+$" ) starter = "# " else : re_comment = re . compile ( r"^/\*.*| \*.*|[\r\n]+$" ) starter = " *" errors = [ ] lines = [ ] file_is_empty = False license = "" lineno = 0 try : with codecs . open ( filename , "r" , "utf-8" ) as fp : line = fp . readline ( ) blocks = [ ] while re_comment . match ( line ) : if line . startswith ( starter ) : line = line [ len ( starter ) : ] . lstrip ( ) blocks . append ( line ) lines . append ( ( lineno , line . strip ( ) ) ) lineno , line = lineno + 1 , fp . readline ( ) file_is_empty = line == "" license = "" . join ( blocks ) except UnicodeDecodeError : errors . append ( ( lineno + 1 , "L190" , "utf-8" ) ) license = "" if file_is_empty and not license . strip ( ) : return errors match_year = _re_copyright_year . search ( license ) if match_year is None : errors . append ( ( lineno + 1 , "L101" ) ) elif int ( match_year . group ( "year" ) ) != year : theline = match_year . group ( 0 ) lno = lineno for no , l in lines : if theline . strip ( ) == l : lno = no break errors . append ( ( lno + 1 , "L102" , year , match_year . group ( "year" ) ) ) else : program_match = _re_program . search ( license ) program_2_match = _re_program_2 . search ( license ) program_3_match = _re_program_3 . search ( license ) if program_match is None : errors . append ( ( lineno , "L100" ) ) elif ( program_2_match is None or program_3_match is None or ( program_match . group ( "program" ) . upper ( ) != program_2_match . group ( "program" ) . upper ( ) != program_3_match . group ( "program" ) . upper ( ) ) ) : errors . append ( ( lineno , "L103" ) ) def _format_error ( lineno , code , * args ) : return template . format ( lineno , code , _licenses_codes [ code ] . format ( * args ) ) def _filter_codes ( error ) : if not ignores or error [ 1 ] not in ignores : return error return list ( map ( lambda x : _format_error ( * x ) , filter ( _filter_codes , errors ) ) ) | Perform a license check on the given file . |
55,252 | def get_options ( config = None ) : if config is None : from . import config config . get = lambda key , default = None : getattr ( config , key , default ) base = { "components" : config . get ( "COMPONENTS" ) , "signatures" : config . get ( "SIGNATURES" ) , "commit_msg_template" : config . get ( "COMMIT_MSG_TEMPLATE" ) , "commit_msg_labels" : config . get ( "COMMIT_MSG_LABELS" ) , "alt_signatures" : config . get ( "ALT_SIGNATURES" ) , "trusted" : config . get ( "TRUSTED_DEVELOPERS" ) , "pep8" : config . get ( "CHECK_PEP8" , True ) , "pydocstyle" : config . get ( "CHECK_PYDOCSTYLE" , True ) , "license" : config . get ( "CHECK_LICENSE" , True ) , "pyflakes" : config . get ( "CHECK_PYFLAKES" , True ) , "ignore" : config . get ( "IGNORE" ) , "select" : config . get ( "SELECT" ) , "match" : config . get ( "PYDOCSTYLE_MATCH" ) , "match_dir" : config . get ( "PYDOCSTYLE_MATCH_DIR" ) , "min_reviewers" : config . get ( "MIN_REVIEWERS" ) , "colors" : config . get ( "COLORS" , True ) , "excludes" : config . get ( "EXCLUDES" , [ ] ) , "authors" : config . get ( "AUTHORS" ) , "exclude_author_names" : config . get ( "EXCLUDE_AUTHOR_NAMES" ) , } options = { } for k , v in base . items ( ) : if v is not None : options [ k ] = v return options | Build the options from the config object . |
55,253 | def run ( self ) : for msg in self . messages : col = getattr ( msg , 'col' , 0 ) yield msg . lineno , col , ( msg . tpl % msg . message_args ) , msg . __class__ | Yield the error messages . |
55,254 | def error ( self , line_number , offset , text , check ) : code = super ( _Report , self ) . error ( line_number , offset , text , check ) if code : self . errors . append ( ( line_number , offset + 1 , code , text , check ) ) | Run the checks and collect the errors . |
55,255 | def prompt ( prompt_string , default = None , secret = False , boolean = False , bool_type = None ) : if boolean or bool_type in BOOLEAN_DEFAULTS : if bool_type is None : bool_type = 'y_n' default_msg = BOOLEAN_DEFAULTS [ bool_type ] [ is_affirmative ( default ) ] else : default_msg = " (default {val}): " prompt_string += ( default_msg . format ( val = default ) if default else ": " ) if secret : val = getpass ( prompt_string ) else : val = input ( prompt_string ) val = ( val if val else default ) if boolean : val = val . lower ( ) . startswith ( 'y' ) return val | Prompt user for a string with a default value |
55,256 | def jflatten ( j ) : nobs , nf , nargs = j . shape nrows , ncols = nf * nobs , nargs * nobs jflat = np . zeros ( ( nrows , ncols ) ) for n in xrange ( nobs ) : r , c = n * nf , n * nargs jflat [ r : ( r + nf ) , c : ( c + nargs ) ] = j [ n ] return jflat | Flatten 3_D Jacobian into 2 - D . |
55,257 | def jtosparse ( j ) : data = j . flatten ( ) . tolist ( ) nobs , nf , nargs = j . shape indices = zip ( * [ ( r , c ) for n in xrange ( nobs ) for r in xrange ( n * nf , ( n + 1 ) * nf ) for c in xrange ( n * nargs , ( n + 1 ) * nargs ) ] ) return csr_matrix ( ( data , indices ) , shape = ( nobs * nf , nobs * nargs ) ) | Generate sparse matrix coordinates from 3 - D Jacobian . |
55,258 | def upload_file ( self , service_rec = None , host_service = None , filename = None , pw_data = None , f_type = None , add_to_evidence = True ) : return self . send . accounts_upload_file ( service_rec , host_service , filename , pw_data , f_type , add_to_evidence ) | Upload a password file |
55,259 | def parse_datetime ( time_str ) : try : return dateutil . parser . parse ( time_str ) . replace ( microsecond = 0 ) . astimezone ( UTC_TZINFO ) except ValueError : raise ParseError ( "Invalid time string: %s" % time_str ) | Wraps dateutil s parser function to set an explicit UTC timezone and to make sure microseconds are 0 . Unified Uploader format and EMK format bother don t use microseconds at all . |
55,260 | def backup_name ( self , timestamp = None ) : suffix = datetime2string ( coalesce ( timestamp , datetime . now ( ) ) , "%Y%m%d_%H%M%S" ) return File . add_suffix ( self . _filename , suffix ) | RETURN A FILENAME THAT CAN SERVE AS A BACKUP FOR THIS FILE |
55,261 | def append ( self , content , encoding = 'utf8' ) : if not self . parent . exists : self . parent . create ( ) with open ( self . _filename , "ab" ) as output_file : if not is_text ( content ) : Log . error ( u"expecting to write unicode only" ) output_file . write ( content . encode ( encoding ) ) output_file . write ( b"\n" ) | add a line to file |
55,262 | def url_param2value ( param ) : if param == None : return Null if param == None : return Null def _decode ( v ) : output = [ ] i = 0 while i < len ( v ) : c = v [ i ] if c == "%" : d = hex2chr ( v [ i + 1 : i + 3 ] ) output . append ( d ) i += 3 else : output . append ( c ) i += 1 output = text_type ( "" . join ( output ) ) try : return json2value ( output ) except Exception : pass return output query = Data ( ) for p in param . split ( '&' ) : if not p : continue if p . find ( "=" ) == - 1 : k = p v = True else : k , v = p . split ( "=" ) v = _decode ( v ) u = query . get ( k ) if u is None : query [ k ] = v elif is_list ( u ) : u += [ v ] else : query [ k ] = [ u , v ] return query | CONVERT URL QUERY PARAMETERS INTO DICT |
55,263 | def configfile_from_path ( path , strict = True ) : extension = path . split ( '.' ) [ - 1 ] conf_type = FILE_TYPES . get ( extension ) if not conf_type : raise exc . UnrecognizedFileExtension ( "Cannot parse file of type {0}. Choices are {1}." . format ( extension , FILE_TYPES . keys ( ) , ) ) return conf_type ( path = path , strict = strict ) | Get a ConfigFile object based on a file path . |
55,264 | def configuration_from_paths ( paths , strict = True ) : for path in paths : cfg = configfile_from_path ( path , strict = strict ) . config return cfg | Get a Configuration object based on multiple file paths . |
55,265 | def set_environment_var_options ( config , env = None , prefix = 'CONFPY' ) : env = env or os . environ for section_name , section in config : for option_name , _ in section : var_name = '{0}_{1}_{2}' . format ( prefix . upper ( ) , section_name . upper ( ) , option_name . upper ( ) , ) env_var = env . get ( var_name ) if env_var : setattr ( section , option_name , env_var ) return config | Set any configuration options which have an environment var set . |
55,266 | def set_cli_options ( config , arguments = None ) : arguments = arguments or sys . argv [ 1 : ] parser = argparse . ArgumentParser ( ) for section_name , section in config : for option_name , _ in section : var_name = '{0}_{1}' . format ( section_name . lower ( ) , option_name . lower ( ) , ) parser . add_argument ( '--{0}' . format ( var_name ) ) args , _ = parser . parse_known_args ( arguments ) args = vars ( args ) for section_name , section in config : for option_name , _ in section : var_name = '{0}_{1}' . format ( section_name . lower ( ) , option_name . lower ( ) , ) value = args . get ( var_name ) if value : setattr ( section , option_name , value ) return config | Set any configuration options which have a CLI value set . |
55,267 | def check_for_missing_options ( config ) : for section_name , section in config : for option_name , option in section : if option . required and option . value is None : raise exc . MissingRequiredOption ( "Option {0} in namespace {1} is required." . format ( option_name , section_name , ) ) return config | Iter over a config and raise if a required option is still not set . |
55,268 | def parse_options ( files , env_prefix = 'CONFPY' , strict = True ) : return check_for_missing_options ( config = set_cli_options ( config = set_environment_var_options ( config = configuration_from_paths ( paths = files , strict = strict , ) , prefix = env_prefix , ) , ) ) | Parse configuration options and return a configuration object . |
55,269 | def render ( self , sphinx_app : Sphinx , context ) : builder : StandaloneHTMLBuilder = sphinx_app . builder resource = sphinx_app . env . resources [ self . docname ] context [ 'sphinx_app' ] = sphinx_app context [ 'widget' ] = self context [ 'resource' ] = resource self . make_context ( context , sphinx_app ) template = self . template + '.html' html = builder . templates . render ( template , context ) return html | Given a Sphinx builder and context with sphinx_app in it generate HTML |
55,270 | def desc ( t = None , reg = True ) : def decorated_fn ( cls ) : if not inspect . isclass ( cls ) : return NotImplemented ( 'For now we can only describe classes' ) name = t or camel_case_to_underscore ( cls . __name__ ) [ 0 ] if reg : di . injector . register ( name , cls ) else : di . injector . describe ( name , cls ) return cls return decorated_fn | Describe Class Dependency |
55,271 | def label ( self , value ) : return self . _labels . get ( value ) or text . pretty ( self ( value ) ) | Returns a pretty text version of the key for the inputted value . |
55,272 | def setLabel ( self , value , label ) : if label : self . _labels [ value ] = label else : self . _labels . pop ( value , None ) | Sets the label text for the inputted value . This will override the default pretty text label that is used for the key . |
55,273 | def valueByLabel ( self , label ) : keys = self . keys ( ) labels = [ text . pretty ( key ) for key in keys ] if label in labels : return self [ keys [ labels . index ( label ) ] ] return 0 | Determine a given value based on the inputted label . |
55,274 | def load_config_file ( self ) : config_parser = SafeConfigParser ( ) config_parser . read ( self . CONFIG_FILE ) if config_parser . has_section ( 'handlers' ) : self . _config [ 'handlers_package' ] = config_parser . get ( 'handlers' , 'package' ) if config_parser . has_section ( 'auth' ) : self . _config [ 'consumer_key' ] = config_parser . get ( 'auth' , 'consumer_key' ) self . _config [ 'consumer_secret' ] = config_parser . get ( 'auth' , 'consumer_secret' ) self . _config [ 'token_key' ] = config_parser . get ( 'auth' , 'token_key' ) self . _config [ 'token_secret' ] = config_parser . get ( 'auth' , 'token_secret' ) if config_parser . has_section ( 'stream' ) : self . _config [ 'user_stream' ] = config_parser . get ( 'stream' , 'user_stream' ) . lower ( ) == 'true' else : self . _config [ 'user_stream' ] = False if config_parser . has_option ( 'general' , 'min_seconds_between_errors' ) : self . _config [ 'min_seconds_between_errors' ] = config_parser . get ( 'general' , 'min_seconds_between_errors' ) if config_parser . has_option ( 'general' , 'sleep_seconds_on_consecutive_errors' ) : self . _config [ 'sleep_seconds_on_consecutive_errors' ] = config_parser . get ( 'general' , 'sleep_seconds_on_consecutive_errors' ) | Parse configuration file and get config values . |
55,275 | def load_config_from_cli_arguments ( self , * args , ** kwargs ) : self . _load_config_from_cli_argument ( key = 'handlers_package' , ** kwargs ) self . _load_config_from_cli_argument ( key = 'auth' , ** kwargs ) self . _load_config_from_cli_argument ( key = 'user_stream' , ** kwargs ) self . _load_config_from_cli_argument ( key = 'min_seconds_between_errors' , ** kwargs ) self . _load_config_from_cli_argument ( key = 'sleep_seconds_on_consecutive_errors' , ** kwargs ) | Get config values of passed in CLI options . |
55,276 | def get ( self , id ) : data = self . db . get_data ( self . get_path , id = id ) return self . _build_item ( ** data [ 'Data' ] [ self . name ] ) | Gets the dict data and builds the item object . |
55,277 | def save ( self , entity ) : assert isinstance ( entity , Entity ) , " entity must have an instance of Entity" return self . __collection . save ( entity . as_dict ( ) ) | Maps entity to dict and returns future |
55,278 | def find_one ( self , ** kwargs ) : future = TracebackFuture ( ) def handle_response ( result , error ) : if error : future . set_exception ( error ) else : instance = self . __entity ( ) instance . map_dict ( result ) future . set_result ( instance ) self . __collection . find_one ( kwargs , callback = handle_response ) return future | Returns future . |
55,279 | def update ( self , entity ) : assert isinstance ( entity , Entity ) , "Error: entity must have an instance of Entity" return self . __collection . update ( { '_id' : entity . _id } , { '$set' : entity . as_dict ( ) } ) | Executes collection s update method based on keyword args . |
55,280 | def open ( self , results = False ) : webbrowser . open ( self . results_url if results else self . url ) | Open the strawpoll in a browser . Can specify to open the main or results page . |
55,281 | def main ( ) : argv = sys . argv if len ( argv ) < 2 : targetfile = 'target.y' else : targetfile = argv [ 1 ] print 'Parsing ruleset: ' + targetfile , flex_a = Flexparser ( ) mma = flex_a . yyparse ( targetfile ) print 'OK' print 'Perform minimization on initial automaton:' , mma . minimize ( ) print 'OK' print 'Perform Brzozowski on minimal automaton:' , brzozowski_a = Brzozowski ( mma ) mma_regex = brzozowski_a . get_regex ( ) print mma_regex | Testing function for DFA brzozowski algebraic method Operation |
55,282 | def load_mmd ( ) : global _MMD_LIB global _LIB_LOCATION try : lib_file = 'libMultiMarkdown' + SHLIB_EXT [ platform . system ( ) ] _LIB_LOCATION = os . path . abspath ( os . path . join ( DEFAULT_LIBRARY_DIR , lib_file ) ) if not os . path . isfile ( _LIB_LOCATION ) : _LIB_LOCATION = ctypes . util . find_library ( 'MultiMarkdown' ) _MMD_LIB = ctypes . cdll . LoadLibrary ( _LIB_LOCATION ) except : _MMD_LIB = None | Loads libMultiMarkdown for usage |
55,283 | def _expand_source ( source , dname , fmt ) : _MMD_LIB . g_string_new . restype = ctypes . POINTER ( GString ) _MMD_LIB . g_string_new . argtypes = [ ctypes . c_char_p ] src = source . encode ( 'utf-8' ) gstr = _MMD_LIB . g_string_new ( src ) _MMD_LIB . prepend_mmd_header ( gstr ) _MMD_LIB . append_mmd_footer ( gstr ) manif = _MMD_LIB . g_string_new ( b"" ) _MMD_LIB . transclude_source . argtypes = [ ctypes . POINTER ( GString ) , ctypes . c_char_p , ctypes . c_char_p , ctypes . c_int , ctypes . POINTER ( GString ) ] _MMD_LIB . transclude_source ( gstr , dname . encode ( 'utf-8' ) , None , fmt , manif ) manifest_txt = manif . contents . str full_txt = gstr . contents . str _MMD_LIB . g_string_free ( manif , True ) _MMD_LIB . g_string_free ( gstr , True ) manifest_txt = [ ii for ii in manifest_txt . decode ( 'utf-8' ) . split ( '\n' ) if ii ] return full_txt . decode ( 'utf-8' ) , manifest_txt | Expands source text to include headers footers and expands Multimarkdown transclusion directives . |
55,284 | def has_metadata ( source , ext ) : _MMD_LIB . has_metadata . argtypes = [ ctypes . c_char_p , ctypes . c_int ] _MMD_LIB . has_metadata . restype = ctypes . c_bool return _MMD_LIB . has_metadata ( source . encode ( 'utf-8' ) , ext ) | Returns a flag indicating if a given block of MultiMarkdown text contains metadata . |
55,285 | def convert ( source , ext = COMPLETE , fmt = HTML , dname = None ) : if dname and not ext & COMPATIBILITY : if os . path . isfile ( dname ) : dname = os . path . abspath ( os . path . dirname ( dname ) ) source , _ = _expand_source ( source , dname , fmt ) _MMD_LIB . markdown_to_string . argtypes = [ ctypes . c_char_p , ctypes . c_ulong , ctypes . c_int ] _MMD_LIB . markdown_to_string . restype = ctypes . c_char_p src = source . encode ( 'utf-8' ) return _MMD_LIB . markdown_to_string ( src , ext , fmt ) . decode ( 'utf-8' ) | Converts a string of MultiMarkdown text to the requested format . Transclusion is performed if the COMPATIBILITY extension is not set and dname is set to a valid directory |
55,286 | def convert_from ( fname , ext = COMPLETE , fmt = HTML ) : dname = os . path . abspath ( os . path . dirname ( fname ) ) with open ( fname , 'r' ) as fp : src = fp . read ( ) return convert ( src , ext , fmt , dname ) | Reads in a file and performs MultiMarkdown conversion with transclusion ocurring based on the file directory . Returns the converted string . |
55,287 | def manifest ( txt , dname ) : _ , files = _expand_source ( txt , dname , HTML ) return files | Extracts file manifest for a body of text with the given directory . |
55,288 | def keys ( source , ext = COMPLETE ) : _MMD_LIB . extract_metadata_keys . restype = ctypes . c_char_p _MMD_LIB . extract_metadata_keys . argtypes = [ ctypes . c_char_p , ctypes . c_ulong ] src = source . encode ( 'utf-8' ) all_keys = _MMD_LIB . extract_metadata_keys ( src , ext ) all_keys = all_keys . decode ( 'utf-8' ) if all_keys else '' key_list = [ ii for ii in all_keys . split ( '\n' ) if ii ] return key_list | Extracts metadata keys from the provided MultiMarkdown text . |
55,289 | def value ( source , key , ext = COMPLETE ) : _MMD_LIB . extract_metadata_value . restype = ctypes . c_char_p _MMD_LIB . extract_metadata_value . argtypes = [ ctypes . c_char_p , ctypes . c_ulong , ctypes . c_char_p ] src = source . encode ( 'utf-8' ) dkey = key . encode ( 'utf-8' ) value = _MMD_LIB . extract_metadata_value ( src , ext , dkey ) return value . decode ( 'utf-8' ) if value else '' | Extracts value for the specified metadata key from the given extension set . |
55,290 | def tweet ( self , text , in_reply_to = None , filename = None , file = None ) : if filename is None : return Tweet ( self . _client . update_status ( status = text , in_reply_to_status_id = in_reply_to ) . _json ) else : return Tweet ( self . _client . update_with_media ( filename = filename , file = file , status = text , in_reply_to_status_id = in_reply_to ) . _json ) | Post a new tweet . |
55,291 | def retweet ( self , id ) : try : self . _client . retweet ( id = id ) return True except TweepError as e : if e . api_code == TWITTER_PAGE_DOES_NOT_EXISTS_ERROR : return False raise | Retweet a tweet . |
55,292 | def get_tweet ( self , id ) : try : return Tweet ( self . _client . get_status ( id = id ) . _json ) except TweepError as e : if e . api_code == TWITTER_TWEET_NOT_FOUND_ERROR : return None raise | Get an existing tweet . |
55,293 | def get_user ( self , id ) : try : return User ( self . _client . get_user ( user_id = id ) . _json ) except TweepError as e : if e . api_code == TWITTER_USER_NOT_FOUND_ERROR : return None raise | Get a user s info . |
55,294 | def remove_tweet ( self , id ) : try : self . _client . destroy_status ( id = id ) return True except TweepError as e : if e . api_code in [ TWITTER_PAGE_DOES_NOT_EXISTS_ERROR , TWITTER_DELETE_OTHER_USER_TWEET ] : return False raise | Delete a tweet . |
55,295 | def create_list ( self , name , mode = 'public' , description = None ) : return List ( tweepy_list_to_json ( self . _client . create_list ( name = name , mode = mode , description = description ) ) ) | Create a list |
55,296 | def destroy_list ( self , list_id ) : return List ( tweepy_list_to_json ( self . _client . destroy_list ( list_id = list_id ) ) ) | Destroy a list |
55,297 | def update_list ( self , list_id , name = None , mode = None , description = None ) : return List ( tweepy_list_to_json ( self . _client . update_list ( list_id = list_id , name = name , mode = mode , description = description ) ) ) | Update a list |
55,298 | def list_timeline ( self , list_id , since_id = None , max_id = None , count = 20 ) : statuses = self . _client . list_timeline ( list_id = list_id , since_id = since_id , max_id = max_id , count = count ) return [ Tweet ( tweet . _json ) for tweet in statuses ] | List the tweets of specified list . |
55,299 | def get_list ( self , list_id ) : return List ( tweepy_list_to_json ( self . _client . get_list ( list_id = list_id ) ) ) | Get info of specified list |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.