idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
54,000
def get_facet_objects_serializer ( self , * args , ** kwargs ) : facet_objects_serializer_class = self . get_facet_objects_serializer_class ( ) kwargs [ "context" ] = self . get_serializer_context ( ) return facet_objects_serializer_class ( * args , ** kwargs )
Return the serializer instance which should be used for serializing faceted objects .
54,001
def bind ( self , field_name , parent ) : assert self . source != field_name , ( "It is redundant to specify `source='%s'` on field '%s' in " "serializer '%s', because it is the same as the field name. " "Remove the `source` keyword argument." % ( field_name , self . __class__ . __name__ , parent . __class__ . __name__...
Initializes the field name and parent for the field instance . Called when a field is added to the parent serializer instance . Taken from DRF and modified to support drf_haystack multiple index functionality .
54,002
def _get_default_field_kwargs ( model , field ) : kwargs = { } try : field_name = field . model_attr or field . index_fieldname model_field = model . _meta . get_field ( field_name ) kwargs . update ( get_field_kwargs ( field_name , model_field ) ) delete_attrs = [ "allow_blank" , "choices" , "model_field" , "allow_uni...
Get the required attributes from the model field in order to instantiate a REST Framework serializer field .
54,003
def _get_index_class_name ( self , index_cls ) : cls_name = index_cls . __name__ aliases = self . Meta . index_aliases return aliases . get ( cls_name , cls_name . split ( '.' ) [ - 1 ] )
Converts in index model class to a name suitable for use as a field name prefix . A user may optionally specify custom aliases via an index_aliases attribute on the Meta class
54,004
def get_fields ( self ) : fields = self . Meta . fields exclude = self . Meta . exclude ignore_fields = self . Meta . ignore_fields indices = self . Meta . index_classes declared_fields = copy . deepcopy ( self . _declared_fields ) prefix_field_names = len ( indices ) > 1 field_mapping = OrderedDict ( ) for index_cls i...
Get the required fields for serializing the result .
54,005
def to_representation ( self , instance ) : if self . Meta . serializers : ret = self . multi_serializer_representation ( instance ) else : ret = super ( HaystackSerializer , self ) . to_representation ( instance ) prefix_field_names = len ( getattr ( self . Meta , "index_classes" ) ) > 1 current_index = self . _get_in...
If we have a serializer mapping use that . Otherwise use standard serializer behavior Since we might be dealing with multiple indexes some fields might not be valid for all results . Do not render the fields which don t belong to the search result .
54,006
def get_narrow_url ( self , instance ) : text = instance [ 0 ] request = self . context [ "request" ] query_params = request . GET . copy ( ) page_query_param = self . get_paginate_by_param ( ) if page_query_param and page_query_param in query_params : del query_params [ page_query_param ] selected_facets = set ( query...
Return a link suitable for narrowing on the current item .
54,007
def to_representation ( self , field , instance ) : self . parent_field = field return super ( FacetFieldSerializer , self ) . to_representation ( instance )
Set the parent_field property equal to the current field on the serializer class so that each field can query it to see what kind of attribute they are processing .
54,008
def get_fields ( self ) : field_mapping = OrderedDict ( ) for field , data in self . instance . items ( ) : field_mapping . update ( { field : self . facet_dict_field_class ( child = self . facet_list_field_class ( child = self . facet_field_serializer_class ( data ) ) , required = False ) } ) if self . serialize_objec...
This returns a dictionary containing the top most fields dates fields and queries .
54,009
def get_objects ( self , instance ) : view = self . context [ "view" ] queryset = self . context [ "objects" ] page = view . paginate_queryset ( queryset ) if page is not None : serializer = view . get_facet_objects_serializer ( page , many = True ) return OrderedDict ( [ ( "count" , self . get_count ( queryset ) ) , (...
Return a list of objects matching the faceted result .
54,010
def get_document_field ( instance ) : for name , field in instance . searchindex . fields . items ( ) : if field . document is True : return name
Returns which field the search index has marked as it s document = True field .
54,011
def apply_filters ( self , queryset , applicable_filters = None , applicable_exclusions = None ) : if applicable_filters : queryset = queryset . filter ( applicable_filters ) if applicable_exclusions : queryset = queryset . exclude ( applicable_exclusions ) return queryset
Apply constructed filters and excludes and return the queryset
54,012
def build_filters ( self , view , filters = None ) : query_builder = self . get_query_builder ( backend = self , view = view ) return query_builder . build_query ( ** ( filters if filters else { } ) )
Get the query builder instance and return constructed query filters .
54,013
def filter_queryset ( self , request , queryset , view ) : applicable_filters , applicable_exclusions = self . build_filters ( view , filters = self . get_request_filters ( request ) ) return self . apply_filters ( queryset = queryset , applicable_filters = self . process_filters ( applicable_filters , queryset , view ...
Return the filtered queryset .
54,014
def get_query_builder ( self , * args , ** kwargs ) : query_builder = self . get_query_builder_class ( ) return query_builder ( * args , ** kwargs )
Return the query builder class instance that should be used to build the query which is passed to the search engine backend .
54,015
def apply_filters ( self , queryset , applicable_filters = None , applicable_exclusions = None ) : for field , options in applicable_filters [ "field_facets" ] . items ( ) : queryset = queryset . facet ( field , ** options ) for field , options in applicable_filters [ "date_facets" ] . items ( ) : queryset = queryset ....
Apply faceting to the queryset
54,016
def __convert_to_df ( a , val_col = None , group_col = None , val_id = None , group_id = None ) : if not group_col : group_col = 'groups' if not val_col : val_col = 'vals' if isinstance ( a , DataFrame ) : x = a . copy ( ) if not { group_col , val_col } . issubset ( a . columns ) : raise ValueError ( 'Specify correct c...
Hidden helper method to create a DataFrame with input data for further processing .
54,017
def posthoc_tukey_hsd ( x , g , alpha = 0.05 ) : result = pairwise_tukeyhsd ( x , g , alpha = 0.05 ) groups = np . array ( result . groupsunique , dtype = np . str ) groups_len = len ( groups ) vs = np . zeros ( ( groups_len , groups_len ) , dtype = np . int ) for a in result . summary ( ) [ 1 : ] : a0 = str ( a [ 0 ] ...
Pairwise comparisons with TukeyHSD confidence intervals . This is a convenience function to make statsmodels pairwise_tukeyhsd method more applicable for further use .
54,018
def posthoc_mannwhitney ( a , val_col = None , group_col = None , use_continuity = True , alternative = 'two-sided' , p_adjust = None , sort = True ) : x , _val_col , _group_col = __convert_to_df ( a , val_col , group_col ) if not sort : x [ _group_col ] = Categorical ( x [ _group_col ] , categories = x [ _group_col ] ...
Pairwise comparisons with Mann - Whitney rank test .
54,019
def posthoc_wilcoxon ( a , val_col = None , group_col = None , zero_method = 'wilcox' , correction = False , p_adjust = None , sort = False ) : x , _val_col , _group_col = __convert_to_df ( a , val_col , group_col ) if not sort : x [ _group_col ] = Categorical ( x [ _group_col ] , categories = x [ _group_col ] . unique...
Pairwise comparisons with Wilcoxon signed - rank test . It is a non - parametric version of the paired T - test for use with non - parametric ANOVA .
54,020
def shutdown_waits_for ( coro , loop = None ) : loop = loop or get_event_loop ( ) fut = loop . create_future ( ) async def coro_proxy ( ) : try : result = await coro except ( CancelledError , Exception ) as e : set_fut_done = partial ( fut . set_exception , e ) else : set_fut_done = partial ( fut . set_result , result ...
Prevent coro from being cancelled during the shutdown sequence .
54,021
def run ( coro : 'Optional[Coroutine]' = None , * , loop : Optional [ AbstractEventLoop ] = None , shutdown_handler : Optional [ Callable [ [ AbstractEventLoop ] , None ] ] = None , executor_workers : int = 10 , executor : Optional [ Executor ] = None , use_uvloop : bool = False ) -> None : logger . debug ( 'Entering r...
Start up the event loop and wait for a signal to shut down .
54,022
def command ( self , * args , ** kwargs ) : if len ( args ) == 1 and isinstance ( args [ 0 ] , collections . Callable ) : return self . _generate_command ( args [ 0 ] ) else : def _command ( func ) : return self . _generate_command ( func , * args , ** kwargs ) return _command
Convenient decorator simply creates corresponding command
54,023
def _generate_command ( self , func , name = None , ** kwargs ) : func_pointer = name or func . __name__ storm_config = get_storm_config ( ) aliases , additional_kwarg = None , None if 'aliases' in storm_config : for command , alias_list in six . iteritems ( storm_config . get ( "aliases" ) ) : if func_pointer == comma...
Generates a command parser for given func .
54,024
def execute ( self , arg_list ) : arg_map = self . parser . parse_args ( arg_list ) . __dict__ command = arg_map . pop ( self . _COMMAND_FLAG ) return command ( ** arg_map )
Main function to parse and dispatch commands by given arg_list
54,025
def add ( name , connection_uri , id_file = "" , o = [ ] , config = None ) : storm_ = get_storm_instance ( config ) try : if '@' in name : raise ValueError ( 'invalid value: "@" cannot be used in name.' ) user , host , port = parse ( connection_uri , user = get_default ( "user" , storm_ . defaults ) , port = get_defaul...
Adds a new entry to sshconfig .
54,026
def clone ( name , clone_name , config = None ) : storm_ = get_storm_instance ( config ) try : if '@' in name : raise ValueError ( 'invalid value: "@" cannot be used in name.' ) storm_ . clone_entry ( name , clone_name ) print ( get_formatted_message ( '{0} added to your ssh config. you can connect ' 'it by typing "ssh...
Clone an entry to the sshconfig .
54,027
def move ( name , entry_name , config = None ) : storm_ = get_storm_instance ( config ) try : if '@' in name : raise ValueError ( 'invalid value: "@" cannot be used in name.' ) storm_ . clone_entry ( name , entry_name , keep_original = False ) print ( get_formatted_message ( '{0} moved in ssh config. you can ' 'connect...
Move an entry to the sshconfig .
54,028
def edit ( name , connection_uri , id_file = "" , o = [ ] , config = None ) : storm_ = get_storm_instance ( config ) try : if ',' in name : name = " " . join ( name . split ( "," ) ) user , host , port = parse ( connection_uri , user = get_default ( "user" , storm_ . defaults ) , port = get_default ( "port" , storm_ . ...
Edits the related entry in ssh config .
54,029
def update ( name , connection_uri = "" , id_file = "" , o = [ ] , config = None ) : storm_ = get_storm_instance ( config ) settings = { } if id_file != "" : settings [ 'identityfile' ] = id_file for option in o : k , v = option . split ( "=" ) settings [ k ] = v try : storm_ . update_entry ( name , ** settings ) print...
Enhanced version of the edit command featuring multiple edits using regular expressions to match entries
54,030
def delete ( name , config = None ) : storm_ = get_storm_instance ( config ) try : storm_ . delete_entry ( name ) print ( get_formatted_message ( 'hostname "{0}" deleted successfully.' . format ( name ) , 'success' ) ) except ValueError as error : print ( get_formatted_message ( error , 'error' ) , file = sys . stderr ...
Deletes a single host .
54,031
def list ( config = None ) : storm_ = get_storm_instance ( config ) try : result = colored ( 'Listing entries:' , 'white' , attrs = [ "bold" , ] ) + "\n\n" result_stack = "" for host in storm_ . list_entries ( True ) : if host . get ( "type" ) == 'entry' : if not host . get ( "host" ) == "*" : result += " {0} -> {1}...
Lists all hosts from ssh config .
54,032
def search ( search_text , config = None ) : storm_ = get_storm_instance ( config ) try : results = storm_ . search_host ( search_text ) if len ( results ) == 0 : print ( 'no results found.' ) if len ( results ) > 0 : message = 'Listing results for {0}:\n' . format ( search_text ) message += "" . join ( results ) print...
Searches entries by given search text .
54,033
def delete_all ( config = None ) : storm_ = get_storm_instance ( config ) try : storm_ . delete_all_entries ( ) print ( get_formatted_message ( 'all entries deleted.' , 'success' ) ) except Exception as error : print ( get_formatted_message ( str ( error ) , 'error' ) , file = sys . stderr ) sys . exit ( 1 )
Deletes all hosts from ssh config .
54,034
def backup ( target_file , config = None ) : storm_ = get_storm_instance ( config ) try : storm_ . backup ( target_file ) except Exception as error : print ( get_formatted_message ( str ( error ) , 'error' ) , file = sys . stderr ) sys . exit ( 1 )
Backups the main ssh configuration into target file .
54,035
def web ( port , debug = False , theme = "modern" , ssh_config = None ) : from storm import web as _web _web . run ( port , debug , theme , ssh_config )
Starts the web UI .
54,036
def _strip_list_attributes ( graph_ ) : for n_ in graph_ . nodes ( data = True ) : for k , v in n_ [ 1 ] . iteritems ( ) : if type ( v ) is list : graph_ . node [ n_ [ 0 ] ] [ k ] = unicode ( v ) for e_ in graph_ . edges ( data = True ) : for k , v in e_ [ 2 ] . iteritems ( ) : if type ( v ) is list : graph_ . edge [ e...
Converts lists attributes to strings for all nodes and edges in G .
54,037
def _safe_type ( value ) : if type ( value ) is str : dtype = 'string' if type ( value ) is unicode : dtype = 'string' if type ( value ) is int : dtype = 'integer' if type ( value ) is float : dtype = 'real' return dtype
Converts Python type names to XGMML - safe type names .
54,038
def read ( path , corpus = True , index_by = 'wosid' , streaming = False , parse_only = None , corpus_class = Corpus , ** kwargs ) : if not os . path . exists ( path ) : raise ValueError ( 'No such file or directory' ) if parse_only : parse_only . append ( index_by ) if streaming : return streaming_read ( path , corpus...
Parse one or more WoS field - tagged data files .
54,039
def parse_author ( self , value ) : tokens = tuple ( [ t . upper ( ) . strip ( ) for t in value . split ( ',' ) ] ) if len ( tokens ) == 1 : tokens = value . split ( ' ' ) if len ( tokens ) > 0 : if len ( tokens ) > 1 : aulast , auinit = tokens [ 0 : 2 ] else : aulast = tokens [ 0 ] auinit = '' else : aulast , auinit =...
Attempts to split an author name into last and first parts .
54,040
def handle_CR ( self , value ) : citation = self . entry_class ( ) value = strip_tags ( value ) ptn = '([\w\s\W]+),\s([0-9]{4}),\s([\w\s]+)' ny_match = re . match ( ptn , value , flags = re . U ) nj_match = re . match ( '([\w\s\W]+),\s([\w\s]+)' , value , flags = re . U ) if ny_match is not None : name_raw , date , jou...
Parses cited references .
54,041
def postprocess_WC ( self , entry ) : if type ( entry . WC ) not in [ str , unicode ] : WC = u' ' . join ( [ unicode ( k ) for k in entry . WC ] ) else : WC = entry . WC entry . WC = [ k . strip ( ) . upper ( ) for k in WC . split ( ';' ) ]
Parse WC keywords .
54,042
def postprocess_subject ( self , entry ) : if type ( entry . subject ) not in [ str , unicode ] : subject = u' ' . join ( [ unicode ( k ) for k in entry . subject ] ) else : subject = entry . subject entry . subject = [ k . strip ( ) . upper ( ) for k in subject . split ( ';' ) ]
Parse subject keywords .
54,043
def postprocess_authorKeywords ( self , entry ) : if type ( entry . authorKeywords ) not in [ str , unicode ] : aK = u' ' . join ( [ unicode ( k ) for k in entry . authorKeywords ] ) else : aK = entry . authorKeywords entry . authorKeywords = [ k . strip ( ) . upper ( ) for k in aK . split ( ';' ) ]
Parse author keywords .
54,044
def postprocess_keywordsPlus ( self , entry ) : if type ( entry . keywordsPlus ) in [ str , unicode ] : entry . keywordsPlus = [ k . strip ( ) . upper ( ) for k in entry . keywordsPlus . split ( ';' ) ]
Parse WoS Keyword Plus keywords .
54,045
def postprocess_funding ( self , entry ) : if type ( entry . funding ) not in [ str , unicode ] : return sources = [ fu . strip ( ) for fu in entry . funding . split ( ';' ) ] sources_processed = [ ] for source in sources : m = re . search ( '(.*)?\s+\[(.+)\]' , source ) if m : agency , grant = m . groups ( ) else : ag...
Separates funding agency from grant numbers .
54,046
def postprocess_authors_full ( self , entry ) : if type ( entry . authors_full ) is not list : entry . authors_full = [ entry . authors_full ]
If only a single author was found ensure that authors_full is nonetheless a list .
54,047
def postprocess_authors_init ( self , entry ) : if type ( entry . authors_init ) is not list : entry . authors_init = [ entry . authors_init ]
If only a single author was found ensure that authors_init is nonetheless a list .
54,048
def postprocess_citedReferences ( self , entry ) : if type ( entry . citedReferences ) is not list : entry . citedReferences = [ entry . citedReferences ]
If only a single cited reference was found ensure that citedReferences is nonetheless a list .
54,049
def plot_burstness ( corpus , B , ** kwargs ) : try : import matplotlib . pyplot as plt import matplotlib . patches as mpatches except ImportError : raise RuntimeError ( 'This method requires the package matplotlib.' ) color = kwargs . get ( 'color' , 'red' ) years = sorted ( corpus . indices [ 'date' ] . keys ( ) ) wi...
Generate a figure depicting burstness profiles for feature .
54,050
def simplify_multigraph ( multigraph , time = False ) : graph = nx . Graph ( ) for node in multigraph . nodes ( data = True ) : u = node [ 0 ] node_attribs = node [ 1 ] graph . add_node ( u , node_attribs ) for v in multigraph [ u ] : edges = multigraph . get_edge_data ( u , v ) edge_attribs = { 'weight' : len ( edges ...
Simplifies a graph by condensing multiple edges between the same node pair into a single edge with a weight attribute equal to the number of edges .
54,051
def citation_count ( papers , key = 'ayjid' , verbose = False ) : if verbose : print "Generating citation counts for " + unicode ( len ( papers ) ) + " papers..." counts = Counter ( ) for P in papers : if P [ 'citations' ] is not None : for p in P [ 'citations' ] : counts [ p [ key ] ] += 1 return counts
Generates citation counts for all of the papers cited by papers .
54,052
def connected ( G , method_name , ** kwargs ) : warnings . warn ( "To be removed in 0.8. Use GraphCollection.analyze instead." , DeprecationWarning ) return G . analyze ( [ 'connected' , method_name ] , ** kwargs )
Performs analysis methods from networkx . connected on each graph in the collection .
54,053
def attachment_probability ( G ) : warnings . warn ( "Removed in 0.8. Too domain-specific." ) probs = { } G_ = None k_ = None for k , g in G . graphs . iteritems ( ) : new_edges = { } if G_ is not None : for n in g . nodes ( ) : try : old_neighbors = set ( G_ [ n ] . keys ( ) ) if len ( old_neighbors ) > 0 : new_neighb...
Calculates the observed attachment probability for each node at each time - step . Attachment probability is calculated based on the observed new edges in the next time - step . So if a node acquires new edges at time t this will accrue to the node s attachment probability at time t - 1 . Thus at a given time one can a...
54,054
def global_closeness_centrality ( g , node = None , normalize = True ) : if not node : C = { } for node in g . nodes ( ) : C [ node ] = global_closeness_centrality ( g , node , normalize = normalize ) return C values = nx . shortest_path_length ( g , node ) . values ( ) c = sum ( [ 1. / pl for pl in values if pl != 0. ...
Calculates global closeness centrality for one or all nodes in the network .
54,055
def ngrams ( path , elem , ignore_hash = True ) : grams = GramGenerator ( path , elem , ignore_hash = ignore_hash ) return FeatureSet ( { k : Feature ( f ) for k , f in grams } )
Yields N - grams from a JSTOR DfR dataset .
54,056
def tokenize ( ngrams , min_tf = 2 , min_df = 2 , min_len = 3 , apply_stoplist = False ) : vocab = { } vocab_ = { } word_tf = Counter ( ) word_df = Counter ( ) token_tf = Counter ( ) token_df = Counter ( ) t_ngrams = { } for grams in ngrams . values ( ) : for g , c in grams : word_tf [ g ] += c word_df [ g ] += 1 if ap...
Builds a vocabulary and replaces words with vocab indices .
54,057
def _handle_pagerange ( pagerange ) : try : pr = re . compile ( "pp\.\s([0-9]+)\-([0-9]+)" ) start , end = re . findall ( pr , pagerange ) [ 0 ] except IndexError : start = end = 0 return unicode ( start ) , unicode ( end )
Yields start and end pages from DfR pagerange field .
54,058
def _handle_authors ( authors ) : aulast = [ ] auinit = [ ] if type ( authors ) is list : for author in authors : if type ( author ) is str : author = unicode ( author ) author = unidecode ( author ) try : l , i = _handle_author ( author ) aulast . append ( l ) auinit . append ( i ) except ValueError : pass elif type (...
Yields aulast and auinit lists from value of authors node .
54,059
def _handle_author ( author ) : lname = author . split ( ' ' ) try : auinit = lname [ 0 ] [ 0 ] final = lname [ - 1 ] . upper ( ) if final in [ 'JR.' , 'III' ] : aulast = lname [ - 2 ] . upper ( ) + " " + final . strip ( "." ) else : aulast = final except IndexError : raise ValueError ( "malformed author name" ) return...
Yields aulast and auinit from an author s full name .
54,060
def _get ( self , i ) : with open ( os . path . join ( self . path , self . elem , self . files [ i ] ) , 'r' ) as f : contents = re . sub ( '(&)(?!amp;)' , lambda match : '&' , f . read ( ) ) root = ET . fromstring ( contents ) doi = root . attrib [ 'id' ] if self . K : return doi grams = [ ] for gram in root . fi...
Retrieve data for the ith file in the dataset .
54,061
def _generate_corpus ( self ) : target = self . temp + 'mallet' paths = write_documents ( self . corpus , target , self . featureset_name , [ 'date' , 'title' ] ) self . corpus_path , self . metapath = paths self . _export_corpus ( )
Writes a corpus to disk amenable to MALLET topic modeling .
54,062
def _export_corpus ( self ) : if not os . path . exists ( self . mallet_bin ) : raise IOError ( "MALLET path invalid or non-existent." ) self . input_path = os . path . join ( self . temp , "input.mallet" ) exit = subprocess . call ( [ self . mallet_bin , 'import-file' , '--input' , self . corpus_path , '--output' , se...
Calls MALLET s import - file method .
54,063
def run ( self , ** kwargs ) : if not os . path . exists ( self . mallet_bin ) : raise IOError ( "MALLET path invalid or non-existent." ) for attr in [ 'Z' , 'max_iter' ] : if not hasattr ( self , attr ) : raise AttributeError ( 'Please set {0}' . format ( attr ) ) self . ll = [ ] self . num_iters = 0 logger . debug ( ...
Calls MALLET s train - topic method .
54,064
def topics_in ( self , d , topn = 5 ) : return self . theta . features [ d ] . top ( topn )
List the top topn topics in document d .
54,065
def list_topic ( self , k , Nwords = 10 ) : return [ ( self . vocabulary [ w ] , p ) for w , p in self . phi . features [ k ] . top ( Nwords ) ]
List the top topn words for topic k .
54,066
def list_topics ( self , Nwords = 10 ) : return [ ( k , self . list_topic ( k , Nwords ) ) for k in xrange ( len ( self . phi ) ) ]
List the top Nwords words for each topic .
54,067
def print_topics ( self , Nwords = 10 ) : print ( 'Topic\tTop %i words' % Nwords ) for k , words in self . list_topics ( Nwords ) : print ( unicode ( k ) . ljust ( 3 ) + '\t' + ' ' . join ( list ( zip ( * words ) ) [ 0 ] ) )
Print the top Nwords words for each topic .
54,068
def topic_over_time ( self , k , mode = 'counts' , slice_kwargs = { } ) : return self . corpus . feature_distribution ( 'topics' , k , mode = mode , ** slice_kwargs )
Calculate the representation of topic k in the corpus over time .
54,069
def distribution ( self , ** slice_kwargs ) : values = [ ] keys = [ ] for key , size in self . slice ( count_only = True , ** slice_kwargs ) : values . append ( size ) keys . append ( key ) return keys , values
Calculates the number of papers in each slice as defined by slice_kwargs .
54,070
def feature_distribution ( self , featureset_name , feature , mode = 'counts' , ** slice_kwargs ) : values = [ ] keys = [ ] fset = self . features [ featureset_name ] for key , papers in self . slice ( subcorpus = False , ** slice_kwargs ) : allfeatures = [ v for v in chain ( * [ fset . features [ self . _generate_inde...
Calculates the distribution of a feature across slices of the corpus .
54,071
def top_features ( self , featureset_name , topn = 20 , by = 'counts' , perslice = False , slice_kwargs = { } ) : if perslice : return [ ( k , subcorpus . features [ featureset_name ] . top ( topn , by = by ) ) for k , subcorpus in self . slice ( ** slice_kwargs ) ] return self . features [ featureset_name ] . top ( to...
Retrieves the top topn most numerous features in the corpus .
54,072
def feature_burstness ( corpus , featureset_name , feature , k = 5 , normalize = True , s = 1.1 , gamma = 1. , ** slice_kwargs ) : if featureset_name not in corpus . features : corpus . index_feature ( featureset_name ) if 'date' not in corpus . indices : corpus . index ( 'date' ) dates = [ min ( corpus . indices [ 'da...
Estimate burstness profile for a feature over the date axis .
54,073
def cocitation ( corpus , min_weight = 1 , edge_attrs = [ 'ayjid' , 'date' ] , ** kwargs ) : return cooccurrence ( corpus , 'citations' , min_weight = min_weight , edge_attrs = edge_attrs , ** kwargs )
Generate a cocitation network .
54,074
def context_chunk ( self , context , j ) : N_chunks = len ( self . contexts [ context ] ) start = self . contexts [ context ] [ j ] if j == N_chunks - 1 : end = len ( self ) else : end = self . contexts [ context ] [ j + 1 ] return [ self [ i ] for i in xrange ( start , end ) ]
Retrieve the tokens in the j th chunk of context context .
54,075
def add_context ( self , name , indices , level = None ) : self . _validate_context ( ( name , indices ) ) if level is None : level = len ( self . contexts_ranked ) self . contexts_ranked . insert ( level , name ) self . contexts [ name ] = indices
Add a new context level to the hierarchy .
54,076
def index ( self , name , graph ) : nodes = graph . nodes ( ) new_nodes = list ( set ( nodes ) - set ( self . node_index . values ( ) ) ) start = max ( len ( self . node_index ) - 1 , max ( self . node_index . keys ( ) ) ) for i in xrange ( start , start + len ( new_nodes ) ) : n = new_nodes . pop ( ) self . node_index...
Index any new nodes in graph and relabel the nodes in graph using the index .
54,077
def terms ( model , threshold = 0.01 , ** kwargs ) : select = lambda f , v , c , dc : v > threshold graph = cooccurrence ( model . phi , filter = select , ** kwargs ) label_map = { k : v for k , v in model . vocabulary . items ( ) if k in graph . nodes ( ) } graph . name = '' return networkx . relabel_nodes ( graph , l...
Two terms are coupled if the posterior probability for both terms is greather than threshold for the same topic .
54,078
def topic_coupling ( model , threshold = None , ** kwargs ) : if not threshold : threshold = 3. / model . Z select = lambda f , v , c , dc : v > threshold graph = coupling ( model . corpus , 'topics' , filter = select , ** kwargs ) graph . name = '' return graph
Two papers are coupled if they both contain a shared topic above a threshold .
54,079
def kl_divergence ( V_a , V_b ) : Ndiff = _shared_features ( V_a , V_b ) aprob = map ( lambda v : float ( v ) / sum ( V_a ) , V_a ) bprob = map ( lambda v : float ( v ) / sum ( V_b ) , V_b ) aprob , bprob = _smooth ( aprob , bprob , Ndiff ) return sum ( map ( lambda a , b : ( a - b ) * log ( a / b ) , aprob , bprob ) )
Calculate Kullback - Leibler distance .
54,080
def _shared_features ( adense , bdense ) : a_indices = set ( nonzero ( adense ) ) b_indices = set ( nonzero ( bdense ) ) shared = list ( a_indices & b_indices ) diff = list ( a_indices - b_indices ) Ndiff = len ( diff ) return Ndiff
Number of features in adense that are also in bdense .
54,081
def cooccurrence ( corpus_or_featureset , featureset_name = None , min_weight = 1 , edge_attrs = [ 'ayjid' , 'date' ] , filter = None ) : if not filter : filter = lambda f , v , c , dc : dc >= min_weight featureset = _get_featureset ( corpus_or_featureset , featureset_name ) if type ( corpus_or_featureset ) in [ Corpus...
A network of feature elements linked by their joint occurrence in papers .
54,082
def coupling ( corpus_or_featureset , featureset_name = None , min_weight = 1 , filter = lambda f , v , c , dc : True , node_attrs = [ ] ) : featureset = _get_featureset ( corpus_or_featureset , featureset_name ) c = lambda f : featureset . count ( f ) dc = lambda f : featureset . documentCount ( f ) f = lambda elem : ...
A network of papers linked by their joint posession of features .
54,083
def multipartite ( corpus , featureset_names , min_weight = 1 , filters = { } ) : pairs = Counter ( ) node_type = { corpus . _generate_index ( p ) : { 'type' : 'paper' } for p in corpus . papers } for featureset_name in featureset_names : ftypes = { } featureset = _get_featureset ( corpus , featureset_name ) for paper ...
A network of papers and one or more featuresets .
54,084
def _strip_punctuation ( s ) : if type ( s ) is str and not PYTHON_3 : return s . translate ( string . maketrans ( "" , "" ) , string . punctuation ) else : translate_table = dict ( ( ord ( char ) , u'' ) for char in u'!"#%\'()*+,-./:;<=>?@[\]^_`{|}~' ) return s . translate ( translate_table )
Removes all punctuation characters from a string .
54,085
def overlap ( listA , listB ) : if ( listA is None ) or ( listB is None ) : return [ ] else : return list ( set ( listA ) & set ( listB ) )
Return list of objects shared by listA listB .
54,086
def subdict ( super_dict , keys ) : sub_dict = { } valid_keys = super_dict . keys ( ) for key in keys : if key in valid_keys : sub_dict [ key ] = super_dict [ key ] return sub_dict
Returns a subset of the super_dict with the specified keys .
54,087
def concat_list ( listA , listB , delim = ' ' ) : if len ( listA ) != len ( listB ) : raise IndexError ( 'Input lists are not parallel.' ) listC = [ ] for i in xrange ( len ( listA ) ) : app = listA [ i ] + delim + listB [ i ] listC . append ( app ) return listC
Concatenate list elements pair - wise with the delim character Returns the concatenated list Raises index error if lists are not parallel
54,088
def strip_non_ascii ( s ) : stripped = ( c for c in s if 0 < ord ( c ) < 127 ) clean_string = u'' . join ( stripped ) return clean_string
Returns the string without non - ASCII characters .
54,089
def dict_from_node ( node , recursive = False ) : dict = { } for snode in node : if len ( snode ) > 0 : if recursive : value = dict_from_node ( snode , True ) else : value = len ( snode ) elif snode . text is not None : value = snode . text else : value = u'' if snode . tag in dict . keys ( ) : if type ( dict [ snode ....
Converts ElementTree node to a dictionary .
54,090
def feed ( self , data ) : try : self . rawdata = self . rawdata + data except TypeError : data = unicode ( data ) self . rawdata = self . rawdata + data self . goahead ( 0 )
added this check as sometimes we are getting the data in integer format instead of string
54,091
def serializePaper ( self ) : pid = tethnedao . getMaxPaperID ( ) papers_details = [ ] for paper in self . corpus : pid = pid + 1 paper_key = getattr ( paper , Serialize . paper_source_map [ self . source ] ) self . paperIdMap [ paper_key ] = pid paper_data = { "model" : "django-tethne.paper" , "pk" : self . paperIdMap...
This method creates a fixture for the django - tethne_paper model .
54,092
def serializeCitation ( self ) : citation_details = [ ] citation_id = tethnedao . getMaxCitationID ( ) for citation in self . corpus . features [ 'citations' ] . index . values ( ) : date_match = re . search ( r'(\d+)' , citation ) if date_match is not None : date = date_match . group ( 1 ) if date_match is None : date...
This method creates a fixture for the django - tethne_citation model .
54,093
def serializeInstitution ( self ) : institution_data = [ ] institution_instance_data = [ ] affiliation_data = [ ] affiliation_id = tethnedao . getMaxAffiliationID ( ) institution_id = tethnedao . getMaxInstitutionID ( ) institution_instance_id = tethnedao . getMaxInstitutionInstanceID ( ) for paper in self . corpus : i...
This method creates a fixture for the django - tethne_citation_institution model .
54,094
def get_details_from_inst_literal ( self , institute_literal , institution_id , institution_instance_id , paper_key ) : institute_details = institute_literal . split ( ',' ) institute_name = institute_details [ 0 ] country = institute_details [ len ( institute_details ) - 1 ] . lstrip ( ) . replace ( '.' , '' ) institu...
This method parses the institute literal to get the following 1 . Department naame 2 . Country 3 . University name 4 . ZIP STATE AND CITY ( Only if the country is USA . For other countries the standard may vary . So parsing these values becomes very difficult . However the complete address can be found in the column Ad...
54,095
def get_affiliation_details ( self , value , affiliation_id , institute_literal ) : tokens = tuple ( [ t . upper ( ) . strip ( ) for t in value . split ( ',' ) ] ) if len ( tokens ) == 1 : tokens = value . split ( ) if len ( tokens ) > 0 : if len ( tokens ) > 1 : aulast , auinit = tokens [ 0 : 2 ] else : aulast = token...
This method is used to map the Affiliation between an author and Institution .
54,096
def start ( self ) : while not self . is_start ( self . current_tag ) : self . next ( ) self . new_entry ( )
Find the first data entry and prepare to parse .
54,097
def handle ( self , tag , data ) : if self . is_end ( tag ) : self . postprocess_entry ( ) if self . is_start ( tag ) : self . new_entry ( ) if not data or not tag : return if getattr ( self , 'parse_only' , None ) and tag not in self . parse_only : return if isinstance ( data , unicode ) : data = unicodedata . normali...
Process a single line of data and store the result .
54,098
def open ( self ) : if not os . path . exists ( self . path ) : raise IOError ( "No such path: {0}" . format ( self . path ) ) with open ( self . path , "rb" ) as f : msg = f . read ( ) result = chardet . detect ( msg ) self . buffer = codecs . open ( self . path , "rb" , encoding = result [ 'encoding' ] ) self . at_eo...
Open the data file .
54,099
def next ( self ) : line = self . buffer . readline ( ) while line == '\n' : line = self . buffer . readline ( ) if line == '' : self . at_eof = True return None , None match = re . match ( '([A-Z]{2}|[C][1])\W(.*)' , line ) if match is not None : self . current_tag , data = match . groups ( ) else : self . current_tag...
Get the next line of data .