idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
18,400
def makeNodeTuple ( citation , idVal , nodeInfo , fullInfo , nodeType , count , coreCitesDict , coreValues , detailedValues , addCR ) : d = { } if nodeInfo : if nodeType == 'full' : if coreValues : if citation in coreCitesDict : R = coreCitesDict [ citation ] d [ 'MK-ID' ] = R . id if not detailedValues : infoVals = [ ] for tag in coreValues : tagVal = R . get ( tag ) if isinstance ( tagVal , str ) : infoVals . append ( tagVal . replace ( ',' , '' ) ) elif isinstance ( tagVal , list ) : infoVals . append ( tagVal [ 0 ] . replace ( ',' , '' ) ) else : pass d [ 'info' ] = ', ' . join ( infoVals ) else : for tag in coreValues : v = R . get ( tag , None ) if isinstance ( v , list ) : d [ tag ] = '|' . join ( sorted ( v ) ) else : d [ tag ] = v d [ 'inCore' ] = True if addCR : d [ 'citations' ] = '|' . join ( ( str ( c ) for c in R . get ( 'citations' , [ ] ) ) ) else : d [ 'MK-ID' ] = 'None' d [ 'info' ] = citation . allButDOI ( ) d [ 'inCore' ] = False if addCR : d [ 'citations' ] = '' else : d [ 'info' ] = citation . allButDOI ( ) elif nodeType == 'journal' : if citation . isJournal ( ) : d [ 'info' ] = str ( citation . FullJournalName ( ) ) else : d [ 'info' ] = "None" elif nodeType == 'original' : d [ 'info' ] = str ( citation ) else : d [ 'info' ] = idVal if fullInfo : d [ 'fullCite' ] = str ( citation ) if count : d [ 'count' ] = 1 return ( idVal , d )
Makes a tuple of idVal and a dict of the selected attributes
18,401
def expandRecs ( G , RecCollect , nodeType , weighted ) : for Rec in RecCollect : fullCiteList = [ makeID ( c , nodeType ) for c in Rec . createCitation ( multiCite = True ) ] if len ( fullCiteList ) > 1 : for i , citeID1 in enumerate ( fullCiteList ) : if citeID1 in G : for citeID2 in fullCiteList [ i + 1 : ] : if citeID2 not in G : G . add_node ( citeID2 , ** G . node [ citeID1 ] ) if weighted : G . add_edge ( citeID1 , citeID2 , weight = 1 ) else : G . add_edge ( citeID1 , citeID2 ) elif weighted : try : G . edges [ citeID1 , citeID2 ] [ 'weight' ] += 1 except KeyError : G . add_edge ( citeID1 , citeID2 , weight = 1 ) for e1 , e2 , data in G . edges ( citeID1 , data = True ) : G . add_edge ( citeID2 , e2 , ** data )
Expand all the citations from _RecCollect_
18,402
def dropNonJournals ( self , ptVal = 'J' , dropBad = True , invert = False ) : if dropBad : self . dropBadEntries ( ) if invert : self . _collection = { r for r in self . _collection if r [ 'pubType' ] != ptVal . upper ( ) } else : self . _collection = { r for r in self . _collection if r [ 'pubType' ] == ptVal . upper ( ) }
Drops the non journal type Records from the collection this is done by checking _ptVal_ against the PT tag
18,403
def writeFile ( self , fname = None ) : if len ( self . _collectedTypes ) < 2 : recEncoding = self . peek ( ) . encoding ( ) else : recEncoding = 'utf-8' if fname : f = open ( fname , mode = 'w' , encoding = recEncoding ) else : f = open ( self . name [ : 200 ] + '.txt' , mode = 'w' , encoding = recEncoding ) if self . _collectedTypes == { 'WOSRecord' } : f . write ( "\ufeffFN Thomson Reuters Web of Science\u2122\n" ) f . write ( "VR 1.0\n" ) elif self . _collectedTypes == { 'MedlineRecord' } : f . write ( '\n' ) elif self . _collectedTypes == { 'ScopusRecord' } : f . write ( "\ufeff{}\n" . format ( ',' . join ( scopusHeader ) ) ) for R in self . _collection : R . writeRecord ( f ) f . write ( '\n' ) if self . _collectedTypes == { 'WOSRecord' } : f . write ( 'EF' ) f . close ( )
Writes the RecordCollection to a file the written file s format is identical to those download from WOS . The order of Records written is random .
18,404
def writeBib ( self , fname = None , maxStringLength = 1000 , wosMode = False , reducedOutput = False , niceIDs = True ) : if fname : f = open ( fname , mode = 'w' , encoding = 'utf-8' ) else : f = open ( self . name [ : 200 ] + '.bib' , mode = 'w' , encoding = 'utf-8' ) f . write ( "%This file was generated by the metaknowledge Python package.\n%The contents have been automatically generated and are likely to not work with\n%LaTeX without some human intervention. This file is meant for other automatic\n%systems and not to be used directly for making citations\n" ) for R in self : try : f . write ( '\n\n' ) f . write ( R . bibString ( maxLength = maxStringLength , WOSMode = wosMode , restrictedOutput = reducedOutput , niceID = niceIDs ) ) except BadWOSRecord : pass except AttributeError : raise RecordsNotCompatible ( "The Record '{}', with ID '{}' does not support writing to bibtext files." . format ( R , R . id ) ) f . close ( )
Writes a bibTex entry to _fname_ for each Record in the collection .
18,405
def makeDict ( self , onlyTheseTags = None , longNames = False , raw = False , numAuthors = True , genderCounts = True ) : if onlyTheseTags : for i in range ( len ( onlyTheseTags ) ) : if onlyTheseTags [ i ] in fullToTagDict : onlyTheseTags [ i ] = fullToTagDict [ onlyTheseTags [ i ] ] retrievedFields = onlyTheseTags else : retrievedFields = [ ] for R in self : tagsLst = [ t for t in R . keys ( ) if t not in retrievedFields ] retrievedFields += tagsLst if longNames : try : retrievedFields = [ tagToFullDict [ t ] for t in retrievedFields ] except KeyError : raise KeyError ( "One of the tags could not be converted to a long name." ) retDict = { k : [ ] for k in retrievedFields } if numAuthors : retDict [ "num-Authors" ] = [ ] if genderCounts : retDict . update ( { 'num-Male' : [ ] , 'num-Female' : [ ] , 'num-Unknown' : [ ] } ) for R in self : if numAuthors : retDict [ "num-Authors" ] . append ( len ( R . get ( 'authorsShort' , [ ] ) ) ) if genderCounts : m , f , u = R . authGenders ( _countsTuple = True ) retDict [ 'num-Male' ] . append ( m ) retDict [ 'num-Female' ] . append ( f ) retDict [ 'num-Unknown' ] . append ( u ) for k , v in R . subDict ( retrievedFields , raw = raw ) . items ( ) : retDict [ k ] . append ( v ) return retDict
Returns a dict with each key a tag and the values being lists of the values for each of the Records in the collection None is given when there is no value and they are in the same order across each tag .
18,406
def getCitations ( self , field = None , values = None , pandasFriendly = True , counts = True ) : retCites = [ ] if values is not None : if isinstance ( values , ( str , int , float ) ) or not isinstance ( values , collections . abc . Container ) : values = [ values ] for R in self : retCites += R . getCitations ( field = field , values = values , pandasFriendly = False ) if pandasFriendly : return _pandasPrep ( retCites , counts ) else : return list ( set ( retCites ) )
Creates a pandas ready dict with each row a different citation the contained Records and columns containing the original string year journal author s name and the number of times it occured .
18,407
def networkCoCitation ( self , dropAnon = True , nodeType = "full" , nodeInfo = True , fullInfo = False , weighted = True , dropNonJournals = False , count = True , keyWords = None , detailedCore = True , detailedCoreAttributes = False , coreOnly = False , expandedCore = False , addCR = False ) : allowedTypes = [ "full" , "original" , "author" , "journal" , "year" ] if nodeType not in allowedTypes : raise RCValueError ( "{} is not an allowed nodeType." . format ( nodeType ) ) coreValues = [ ] if bool ( detailedCore ) : try : for tag in detailedCore : coreValues . append ( normalizeToTag ( tag ) ) except TypeError : coreValues = [ 'id' , 'authorsFull' , 'year' , 'title' , 'journal' , 'volume' , 'beginningPage' ] tmpgrph = nx . Graph ( ) pcount = 0 progArgs = ( 0 , "Starting to make a co-citation network" ) if metaknowledge . VERBOSE_MODE : progKwargs = { 'dummy' : False } else : progKwargs = { 'dummy' : True } with _ProgressBar ( * progArgs , ** progKwargs ) as PBar : if coreOnly or coreValues or expandedCore : coreCitesDict = { R . createCitation ( ) : R for R in self } if coreOnly : coreCites = coreCitesDict . keys ( ) else : coreCites = None else : coreCitesDict = None coreCites = None for R in self : if PBar : pcount += 1 PBar . updateVal ( pcount / len ( self ) , "Analyzing: {}" . format ( R ) ) Cites = R . get ( 'citations' ) if Cites : filteredCites = filterCites ( Cites , nodeType , dropAnon , dropNonJournals , keyWords , coreCites ) addToNetwork ( tmpgrph , filteredCites , count , weighted , nodeType , nodeInfo , fullInfo , coreCitesDict , coreValues , detailedCoreAttributes , addCR , headNd = None ) if expandedCore : if PBar : PBar . updateVal ( .98 , "Expanding core Records" ) expandRecs ( tmpgrph , self , nodeType , weighted ) if PBar : PBar . finish ( "Done making a co-citation network from {}" . format ( self ) ) return tmpgrph
Creates a co - citation network for the RecordCollection .
18,408
def networkBibCoupling ( self , weighted = True , fullInfo = False , addCR = False ) : progArgs = ( 0 , "Make a citation network for coupling" ) if metaknowledge . VERBOSE_MODE : progKwargs = { 'dummy' : False } else : progKwargs = { 'dummy' : True } with _ProgressBar ( * progArgs , ** progKwargs ) as PBar : citeGrph = self . networkCitation ( weighted = False , directed = True , detailedCore = True , fullInfo = fullInfo , count = False , nodeInfo = True , addCR = addCR , _quiet = True ) pcount = 0 pmax = len ( citeGrph ) PBar . updateVal ( .2 , "Starting to classify nodes" ) workingGrph = nx . Graph ( ) couplingSet = set ( ) for n , d in citeGrph . nodes ( data = True ) : pcount += 1 PBar . updateVal ( .2 + .4 * ( pcount / pmax ) , "Classifying: {}" . format ( n ) ) if d [ 'inCore' ] : workingGrph . add_node ( n , ** d ) if citeGrph . in_degree ( n ) > 0 : couplingSet . add ( n ) pcount = 0 pmax = len ( couplingSet ) for n in couplingSet : PBar . updateVal ( .6 + .4 * ( pcount / pmax ) , "Coupling: {}" . format ( n ) ) citesLst = list ( citeGrph . in_edges ( n ) ) for i , edgeOuter in enumerate ( citesLst ) : outerNode = edgeOuter [ 0 ] for edgeInner in citesLst [ i + 1 : ] : innerNode = edgeInner [ 0 ] if weighted and workingGrph . has_edge ( outerNode , innerNode ) : workingGrph . edges [ outerNode , innerNode ] [ 'weight' ] += 1 elif weighted : workingGrph . add_edge ( outerNode , innerNode , weight = 1 ) else : workingGrph . add_edge ( outerNode , innerNode ) PBar . finish ( "Done making a bib-coupling network from {}" . format ( self ) ) return workingGrph
Creates a bibliographic coupling network based on citations for the RecordCollection .
18,409
def yearSplit ( self , startYear , endYear , dropMissingYears = True ) : recordsInRange = set ( ) for R in self : try : if R . get ( 'year' ) >= startYear and R . get ( 'year' ) <= endYear : recordsInRange . add ( R ) except TypeError : if dropMissingYears : pass else : raise RCret = RecordCollection ( recordsInRange , name = "{}({}-{})" . format ( self . name , startYear , endYear ) , quietStart = True ) RCret . _collectedTypes = self . _collectedTypes . copy ( ) return RCret
Creates a RecordCollection of Records from the years between _startYear_ and _endYear_ inclusive .
18,410
def localCiteStats ( self , pandasFriendly = False , keyType = "citation" ) : count = 0 recCount = len ( self ) progArgs = ( 0 , "Starting to get the local stats on {}s." . format ( keyType ) ) if metaknowledge . VERBOSE_MODE : progKwargs = { 'dummy' : False } else : progKwargs = { 'dummy' : True } with _ProgressBar ( * progArgs , ** progKwargs ) as PBar : keyTypesLst = [ "citation" , "journal" , "year" , "author" ] citesDict = { } if keyType not in keyTypesLst : raise TypeError ( "{} is not a valid key type, only '{}' or '{}' are." . format ( keyType , "', '" . join ( keyTypesLst [ : - 1 ] ) , keyTypesLst [ - 1 ] ) ) for R in self : rCites = R . get ( 'citations' ) if PBar : count += 1 PBar . updateVal ( count / recCount , "Analysing: {}" . format ( R . UT ) ) if rCites : for c in rCites : if keyType == keyTypesLst [ 0 ] : cVal = c else : cVal = getattr ( c , keyType ) if cVal is None : continue if cVal in citesDict : citesDict [ cVal ] += 1 else : citesDict [ cVal ] = 1 if PBar : PBar . finish ( "Done, {} {} fields analysed" . format ( len ( citesDict ) , keyType ) ) if pandasFriendly : citeLst = [ ] countLst = [ ] for cite , occ in citesDict . items ( ) : citeLst . append ( cite ) countLst . append ( occ ) return { "Citations" : citeLst , "Counts" : countLst } else : return citesDict
Returns a dict with all the citations in the CR field as keys and the number of times they occur as the values
18,411
def localCitesOf ( self , rec ) : localCites = [ ] if isinstance ( rec , Record ) : recCite = rec . createCitation ( ) if isinstance ( rec , str ) : try : recCite = self . getID ( rec ) except ValueError : try : recCite = Citation ( rec ) except AttributeError : raise ValueError ( "{} is not a valid WOS string or a valid citation string" . format ( recCite ) ) else : if recCite is None : return RecordCollection ( inCollection = localCites , name = "Records_citing_{}" . format ( rec ) , quietStart = True ) else : recCite = recCite . createCitation ( ) elif isinstance ( rec , Citation ) : recCite = rec else : raise ValueError ( "{} is not a valid input, rec must be a Record, string or Citation object." . format ( rec ) ) for R in self : rCites = R . get ( 'citations' ) if rCites : for cite in rCites : if recCite == cite : localCites . append ( R ) break return RecordCollection ( inCollection = localCites , name = "Records_citing_'{}'" . format ( rec ) , quietStart = True )
Takes in a Record WOS string citation string or Citation and returns a RecordCollection of all records that cite it .
18,412
def citeFilter ( self , keyString = '' , field = 'all' , reverse = False , caseSensitive = False ) : retRecs = [ ] keyString = str ( keyString ) for R in self : try : if field == 'all' : for cite in R . get ( 'citations' ) : if caseSensitive : if keyString in cite . original : retRecs . append ( R ) break else : if keyString . upper ( ) in cite . original . upper ( ) : retRecs . append ( R ) break elif field == 'author' : for cite in R . get ( 'citations' ) : try : if keyString . upper ( ) in cite . author . upper ( ) : retRecs . append ( R ) break except AttributeError : pass elif field == 'journal' : for cite in R . get ( 'citations' ) : try : if keyString . upper ( ) in cite . journal : retRecs . append ( R ) break except AttributeError : pass elif field == 'year' : for cite in R . get ( 'citations' ) : try : if int ( keyString ) == cite . year : retRecs . append ( R ) break except AttributeError : pass elif field == 'V' : for cite in R . get ( 'citations' ) : try : if keyString . upper ( ) in cite . V : retRecs . append ( R ) break except AttributeError : pass elif field == 'P' : for cite in R . get ( 'citations' ) : try : if keyString . upper ( ) in cite . P : retRecs . append ( R ) break except AttributeError : pass elif field == 'misc' : for cite in R . get ( 'citations' ) : try : if keyString . upper ( ) in cite . misc : retRecs . append ( R ) break except AttributeError : pass elif field == 'anonymous' : for cite in R . get ( 'citations' ) : if cite . isAnonymous ( ) : retRecs . append ( R ) break elif field == 'bad' : for cite in R . get ( 'citations' ) : if cite . bad : retRecs . append ( R ) break except TypeError : pass if reverse : excluded = [ ] for R in self : if R not in retRecs : excluded . append ( R ) return RecordCollection ( inCollection = excluded , name = self . name , quietStart = True ) else : return RecordCollection ( inCollection = retRecs , name = self . name , quietStart = True )
Filters Records by some string _keyString_ in their citations and returns all Records with at least one citation possessing _keyString_ in the field given by _field_ .
18,413
def filterNonJournals ( citesLst , invert = False ) : retCites = [ ] for c in citesLst : if c . isJournal ( ) : if not invert : retCites . append ( c ) elif invert : retCites . append ( c ) return retCites
Removes the Citations from _citesLst_ that are not journals
18,414
def add ( self , elem ) : if isinstance ( elem , self . _allowedTypes ) : self . _collection . add ( elem ) self . _collectedTypes . add ( type ( elem ) . __name__ ) else : raise CollectionTypeError ( "{} can only contain '{}', '{}' is not allowed." . format ( type ( self ) . __name__ , self . _allowedTypes , elem ) )
Adds _elem_ to the collection .
18,415
def remove ( self , elem ) : try : return self . _collection . remove ( elem ) except KeyError : raise KeyError ( "'{}' was not found in the {}: '{}'." . format ( elem , type ( self ) . __name__ , self ) ) from None
Removes _elem_ from the collection will raise a KeyError is _elem_ is missing
18,416
def clear ( self ) : self . bad = False self . errors = { } self . _collection . clear ( )
Removes all elements from the collection and resets the error handling
18,417
def pop ( self ) : try : return self . _collection . pop ( ) except KeyError : raise KeyError ( "Nothing left in the {}: '{}'." . format ( type ( self ) . __name__ , self ) ) from None
Removes a random element from the collection and returns it
18,418
def copy ( self ) : collectedCopy = copy . copy ( self ) collectedCopy . _collection = copy . copy ( collectedCopy . _collection ) self . _collectedTypes = copy . copy ( self . _collectedTypes ) self . _allowedTypes = copy . copy ( self . _allowedTypes ) collectedCopy . errors = copy . copy ( collectedCopy . errors ) return collectedCopy
Creates a shallow copy of the collection
18,419
def chunk ( self , maxSize ) : chunks = [ ] currentSize = maxSize + 1 for i in self : if currentSize >= maxSize : currentSize = 0 chunks . append ( type ( self ) ( { i } , name = 'Chunk-{}-of-{}' . format ( len ( chunks ) , self . name ) , quietStart = True ) ) else : chunks [ - 1 ] . add ( i ) currentSize += 1 return chunks
Splits the Collection into _maxSize_ size or smaller Collections
18,420
def split ( self , maxSize ) : chunks = [ ] currentSize = maxSize + 1 try : while True : if currentSize >= maxSize : currentSize = 0 chunks . append ( type ( self ) ( { self . pop ( ) } , name = 'Chunk-{}-of-{}' . format ( len ( chunks ) , self . name ) , quietStart = True ) ) else : chunks [ - 1 ] . add ( self . pop ( ) ) currentSize += 1 except KeyError : self . clear ( ) self . name = 'Emptied-{}' . format ( self . name ) return chunks
Destructively splits the Collection into _maxSize_ size or smaller Collections . The source Collection will be empty after this operation
18,421
def containsID ( self , idVal ) : for i in self : if i . id == idVal : return True return False
Checks if the collected items contains the give _idVal_
18,422
def discardID ( self , idVal ) : for i in self : if i . id == idVal : self . _collection . discard ( i ) return
Checks if the collected items contains the give _idVal_ and discards it if it is found will not raise an exception if item is not found
18,423
def removeID ( self , idVal ) : for i in self : if i . id == idVal : self . _collection . remove ( i ) return raise KeyError ( "A Record with the ID '{}' was not found in the RecordCollection: '{}'." . format ( idVal , self ) )
Checks if the collected items contains the give _idVal_ and removes it if it is found will raise a KeyError if item is not found
18,424
def badEntries ( self ) : badEntries = set ( ) for i in self : if i . bad : badEntries . add ( i ) return type ( self ) ( badEntries , quietStart = True )
Creates a new collection of the same type with only the bad entries
18,425
def dropBadEntries ( self ) : self . _collection = set ( ( i for i in self if not i . bad ) ) self . bad = False self . errors = { }
Removes all the bad entries from the collection
18,426
def tags ( self ) : tags = set ( ) for i in self : tags |= set ( i . keys ( ) ) return tags
Creates a list of all the tags of the contained items
18,427
def rankedSeries ( self , tag , outputFile = None , giveCounts = True , giveRanks = False , greatestFirst = True , pandasMode = True , limitTo = None ) : if giveRanks and giveCounts : raise mkException ( "rankedSeries cannot return counts and ranks only one of giveRanks or giveCounts can be True." ) seriesDict = { } for R in self : try : val = R [ tag ] except KeyError : continue if not isinstance ( val , list ) : val = [ val ] for entry in val : if limitTo and entry not in limitTo : continue if entry in seriesDict : seriesDict [ entry ] += 1 else : seriesDict [ entry ] = 1 seriesList = sorted ( seriesDict . items ( ) , key = lambda x : x [ 1 ] , reverse = greatestFirst ) if outputFile is not None : with open ( outputFile , 'w' ) as f : writer = csv . writer ( f , dialect = 'excel' ) writer . writerow ( ( str ( tag ) , 'count' ) ) writer . writerows ( seriesList ) if giveCounts and not pandasMode : return seriesList elif giveRanks or pandasMode : if not greatestFirst : seriesList . reverse ( ) currentRank = 1 retList = [ ] panDict = { 'entry' : [ ] , 'count' : [ ] , 'rank' : [ ] } try : currentCount = seriesList [ 0 ] [ 1 ] except IndexError : pass else : for valString , count in seriesList : if currentCount > count : currentRank += 1 currentCount = count if pandasMode : panDict [ 'entry' ] . append ( valString ) panDict [ 'count' ] . append ( count ) panDict [ 'rank' ] . append ( currentRank ) else : retList . append ( ( valString , currentRank ) ) if not greatestFirst : retList . reverse ( ) if pandasMode : return panDict else : return retList else : return [ e for e , c in seriesList ]
Creates an pandas dict of the ordered list of all the values of _tag_ with and ranked by their number of occurrences . A list can also be returned with the the counts or ranks added or it can be written to a file .
18,428
def timeSeries ( self , tag = None , outputFile = None , giveYears = True , greatestFirst = True , limitTo = False , pandasMode = True ) : seriesDict = { } for R in self : try : year = R [ 'year' ] except KeyError : continue if tag is None : seriesDict [ R ] = { year : 1 } else : try : val = R [ tag ] except KeyError : continue if not isinstance ( val , list ) : val = [ val ] for entry in val : if limitTo and entry not in limitTo : continue if entry in seriesDict : try : seriesDict [ entry ] [ year ] += 1 except KeyError : seriesDict [ entry ] [ year ] = 1 else : seriesDict [ entry ] = { year : 1 } seriesList = [ ] for e , yd in seriesDict . items ( ) : seriesList += [ ( e , y ) for y in yd . keys ( ) ] seriesList = sorted ( seriesList , key = lambda x : x [ 1 ] , reverse = greatestFirst ) if outputFile is not None : with open ( outputFile , 'w' ) as f : writer = csv . writer ( f , dialect = 'excel' ) writer . writerow ( ( str ( tag ) , 'years' ) ) writer . writerows ( ( ( k , '|' . join ( ( str ( y ) for y in v ) ) ) for k , v in seriesDict . items ( ) ) ) if pandasMode : panDict = { 'entry' : [ ] , 'count' : [ ] , 'year' : [ ] } for entry , year in seriesList : panDict [ 'entry' ] . append ( entry ) panDict [ 'year' ] . append ( year ) panDict [ 'count' ] . append ( seriesDict [ entry ] [ year ] ) return panDict elif giveYears : return seriesList else : return [ e for e , c in seriesList ]
Creates an pandas dict of the ordered list of all the values of _tag_ with and ranked by the year the occurred in multiple year occurrences will create multiple entries . A list can also be returned with the the counts or years added or it can be written to a file .
18,429
def cooccurrenceCounts ( self , keyTag , * countedTags ) : if not isinstance ( keyTag , str ) : raise TagError ( "'{}' is not a string it cannot be used as a tag." . format ( keyTag ) ) if len ( countedTags ) < 1 : TagError ( "You need to provide atleast one tag" ) for tag in countedTags : if not isinstance ( tag , str ) : raise TagError ( "'{}' is not a string it cannot be used as a tag." . format ( tag ) ) occurenceDict = { } progArgs = ( 0 , "Starting to count the co-occurrences of '{}' and' {}'" . format ( keyTag , "','" . join ( countedTags ) ) ) if metaknowledge . VERBOSE_MODE : progKwargs = { 'dummy' : False } else : progKwargs = { 'dummy' : True } with _ProgressBar ( * progArgs , ** progKwargs ) as PBar : for i , R in enumerate ( self ) : PBar . updateVal ( i / len ( self ) , "Analyzing {}" . format ( R ) ) keyVal = R . get ( keyTag ) if keyVal is None : continue if not isinstance ( keyVal , list ) : keyVal = [ keyVal ] for key in keyVal : if key not in occurenceDict : occurenceDict [ key ] = { } for tag in countedTags : tagval = R . get ( tag ) if tagval is None : continue if not isinstance ( tagval , list ) : tagval = [ tagval ] for val in tagval : for key in keyVal : try : occurenceDict [ key ] [ val ] += 1 except KeyError : occurenceDict [ key ] [ val ] = 1 PBar . finish ( "Done extracting the co-occurrences of '{}' and '{}'" . format ( keyTag , "','" . join ( countedTags ) ) ) return occurenceDict
Counts the number of times values from any of the _countedTags_ occurs with _keyTag_ . The counts are retuned as a dictionary with the values of _keyTag_ mapping to dictionaries with each of the _countedTags_ values mapping to thier counts .
18,430
def makeNodeID ( Rec , ndType , extras = None ) : if ndType == 'raw' : recID = Rec else : recID = Rec . get ( ndType ) if recID is None : pass elif isinstance ( recID , list ) : recID = tuple ( recID ) else : recID = recID extraDict = { } if extras : for tag in extras : if tag == "raw" : extraDict [ 'Tag' ] = Rec else : extraDict [ 'Tag' ] = Rec . get ( tag ) return recID , extraDict
Helper to make a node ID extras is currently not used
18,431
def pandoc_process ( app , what , name , obj , options , lines ) : if not lines : return None input_format = app . config . mkdsupport_use_parser output_format = 'rst' text = SEP . join ( lines ) text = pypandoc . convert_text ( text , output_format , format = input_format ) del lines [ : ] lines . extend ( text . split ( SEP ) )
Convert docstrings in Markdown into reStructureText using pandoc
18,432
def beginningPage ( R ) : p = R [ 'PG' ] if p . startswith ( 'suppl ' ) : p = p [ 6 : ] return p . split ( ' ' ) [ 0 ] . split ( '-' ) [ 0 ] . replace ( ';' , '' )
As pages may not be given as numbers this is the most accurate this function can be
18,433
def copy ( self ) : c = copy . copy ( self ) c . _fieldDict = c . _fieldDict . copy ( ) return c
Correctly copies the Record
18,434
def values ( self , raw = False ) : if raw : return self . _fieldDict . values ( ) else : return collections . abc . Mapping . values ( self )
Like values for dicts but with a raw option
18,435
def items ( self , raw = False ) : if raw : return self . _fieldDict . items ( ) else : return collections . abc . Mapping . items ( self )
Like items for dicts but with a raw option
18,436
def getCitations ( self , field = None , values = None , pandasFriendly = True ) : retCites = [ ] if values is not None : if isinstance ( values , ( str , int , float ) ) or not isinstance ( values , collections . abc . Container ) : values = [ values ] if field is not None : for cite in self . get ( 'citations' , [ ] ) : try : targetVal = getattr ( cite , field ) if values is None or targetVal in values : retCites . append ( cite ) except AttributeError : pass else : retCites = self . get ( 'citations' , [ ] ) if pandasFriendly : return _pandasPrep ( retCites , False ) return retCites
Creates a pandas ready dict with each row a different citation and columns containing the original string year journal and author s name .
18,437
def subDict ( self , tags , raw = False ) : retDict = { } for tag in tags : retDict [ tag ] = self . get ( tag , raw = raw ) return retDict
Creates a dict of values of _tags_ from the Record . The tags are the keys and the values are the values . If the tag is missing the value will be None .
18,438
def authGenders ( self , countsOnly = False , fractionsMode = False , _countsTuple = False ) : authDict = recordGenders ( self ) if _countsTuple or countsOnly or fractionsMode : rawList = list ( authDict . values ( ) ) countsList = [ ] for k in ( 'Male' , 'Female' , 'Unknown' ) : countsList . append ( rawList . count ( k ) ) if fractionsMode : tot = sum ( countsList ) for i in range ( 3 ) : countsList . append ( countsList . pop ( 0 ) / tot ) if _countsTuple : return tuple ( countsList ) else : return { 'Male' : countsList [ 0 ] , 'Female' : countsList [ 1 ] , 'Unknown' : countsList [ 2 ] } else : return authDict
Creates a dict mapping Male Female and Unknown to lists of the names of all the authors .
18,439
def proQuestParser ( proFile ) : nameDict = { } recSet = set ( ) error = None lineNum = 0 try : with open ( proFile , 'r' , encoding = 'utf-8' ) as openfile : f = enumerate ( openfile , start = 1 ) for i in range ( 12 ) : lineNum , line = next ( f ) while True : lineNum , line = next ( f ) lineNum , line = next ( f ) if line == 'Bibliography\n' : for i in range ( 3 ) : lineNum , line = next ( f ) break else : s = line . split ( '. ' ) nameDict [ int ( s [ 0 ] ) ] = '. ' . join ( s [ 1 : ] ) [ : - 1 ] while True : lineNum , line = next ( f ) if line == 'Bibliography\n' : break elif line . startswith ( 'Document ' ) : n = int ( line [ 9 : ] . split ( ' of ' ) [ 0 ] ) R = ProQuestRecord ( f , sFile = proFile , sLine = lineNum ) if R . get ( 'Title' ) != nameDict [ n ] : error = BadProQuestFile ( "The numbering of the titles at the beginning of the file does not match the records inside. Line {} has a record titled '{}' with number {}, the name should be '{}'." . format ( lineNum , R . get ( 'Title' , "TITLE MISSING" ) , n , nameDict [ n ] ) ) raise StopIteration recSet . add ( R ) lineNum , line = next ( f ) else : error = BadProQuestFile ( "The file '{}' has parts of it that are unparsable starting at line: {}. It is likely that the seperators between the records are incorrect" . format ( proFile , lineNum ) ) raise StopIteration except ( UnicodeDecodeError , StopIteration , ValueError ) as e : if error is None : error = BadProQuestFile ( "The file '{}' has parts of it that are unparsable starting at line: {}.\nThe error was: '{}'" . format ( proFile , lineNum , e ) ) return recSet , error
Parses a ProQuest file _proFile_ to extract the individual entries .
18,440
def getInvestigators ( self , tags = None , seperator = ";" , _getTag = False ) : if tags is None : tags = [ 'Investigator' ] elif isinstance ( tags , str ) : tags = [ 'Investigator' , tags ] else : tags . append ( 'Investigator' ) return super ( ) . getInvestigators ( tags = tags , seperator = seperator , _getTag = _getTag )
Returns a list of the names of investigators . The optional arguments are ignored .
18,441
def nameStringGender ( s , noExcept = False ) : global mappingDict try : first = s . split ( ', ' ) [ 1 ] . split ( ' ' ) [ 0 ] . title ( ) except IndexError : if noExcept : return 'Unknown' else : return GenderException ( "The given String: '{}' does not have a last name, first name pair in with a ', ' seperation." . format ( s ) ) if mappingDict is None : mappingDict = getMapping ( ) return mappingDict . get ( first , 'Unknown' )
Expects first last
18,442
def j9urlGenerator ( nameDict = False ) : start = "https://images.webofknowledge.com/images/help/WOS/" end = "_abrvjt.html" if nameDict : urls = { "0-9" : start + "0-9" + end } for c in string . ascii_uppercase : urls [ c ] = start + c + end else : urls = [ start + "0-9" + end ] for c in string . ascii_uppercase : urls . append ( start + c + end ) return urls
How to get all the urls for the WOS Journal Title Abbreviations . Each is varies by only a few characters . These are the currently in use urls they may change .
18,443
def _j9SaveCurrent ( sDir = '.' ) : dname = os . path . normpath ( sDir + '/' + datetime . datetime . now ( ) . strftime ( "%Y-%m-%d_J9_AbbreviationDocs" ) ) if not os . path . isdir ( dname ) : os . mkdir ( dname ) os . chdir ( dname ) else : os . chdir ( dname ) for urlID , urlString in j9urlGenerator ( nameDict = True ) . items ( ) : fname = "{}_abrvjt.html" . format ( urlID ) f = open ( fname , 'wb' ) f . write ( urllib . request . urlopen ( urlString ) . read ( ) )
Downloads and saves all the webpages
18,444
def _getDict ( j9Page ) : slines = j9Page . read ( ) . decode ( 'utf-8' ) . split ( '\n' ) while slines . pop ( 0 ) != "<DL>" : pass currentName = slines . pop ( 0 ) . split ( '"></A><DT>' ) [ 1 ] currentTag = slines . pop ( 0 ) . split ( "<B><DD>\t" ) [ 1 ] j9Dict = { } while True : try : j9Dict [ currentTag ] . append ( currentName ) except KeyError : j9Dict [ currentTag ] = [ currentName ] try : currentName = slines . pop ( 0 ) . split ( '</B><DT>' ) [ 1 ] currentTag = slines . pop ( 0 ) . split ( "<B><DD>\t" ) [ 1 ] except IndexError : break return j9Dict
Parses a Journal Title Abbreviations page
18,445
def _getCurrentj9Dict ( ) : urls = j9urlGenerator ( ) j9Dict = { } for url in urls : d = _getDict ( urllib . request . urlopen ( url ) ) if len ( d ) == 0 : raise RuntimeError ( "Parsing failed, this is could require an update of the parser." ) j9Dict . update ( d ) return j9Dict
Downloads and parses all the webpages
18,446
def updatej9DB ( dbname = abrevDBname , saveRawHTML = False ) : if saveRawHTML : rawDir = '{}/j9Raws' . format ( os . path . dirname ( __file__ ) ) if not os . path . isdir ( rawDir ) : os . mkdir ( rawDir ) _j9SaveCurrent ( sDir = rawDir ) dbLoc = os . path . join ( os . path . normpath ( os . path . dirname ( __file__ ) ) , dbname ) try : with dbm . dumb . open ( dbLoc , flag = 'c' ) as db : try : j9Dict = _getCurrentj9Dict ( ) except urllib . error . URLError : raise urllib . error . URLError ( "Unable to access server, check your connection" ) for k , v in j9Dict . items ( ) : if k in db : for jName in v : if jName not in j9Dict [ k ] : j9Dict [ k ] += '|' + jName else : db [ k ] = '|' . join ( v ) except dbm . dumb . error as e : raise JournalDataBaseError ( "Something happened with the database of WOS journal names. To fix this you should delete the 1 to 3 files whose names start with {}. If this doesn't work (sorry), deleteing everything in '{}' and reinstalling metaknowledge should.\nThe error was '{}'" . format ( dbLoc , os . path . dirname ( __file__ ) , e ) )
Updates the database of Journal Title Abbreviations . Requires an internet connection . The data base is saved relative to the source file not the working directory .
18,447
def getj9dict ( dbname = abrevDBname , manualDB = manualDBname , returnDict = 'both' ) : dbLoc = os . path . normpath ( os . path . dirname ( __file__ ) ) retDict = { } try : if returnDict == 'both' or returnDict == 'WOS' : with dbm . dumb . open ( dbLoc + '/{}' . format ( dbname ) ) as db : if len ( db ) == 0 : raise JournalDataBaseError ( "J9 Database empty or missing, to regenerate it import and run metaknowledge.WOS.journalAbbreviations.updatej9DB()." ) for k , v in db . items ( ) : retDict [ k . decode ( 'utf-8' ) ] = v . decode ( 'utf-8' ) . split ( '|' ) except JournalDataBaseError : updatej9DB ( ) return getj9dict ( dbname = dbname , manualDB = manualDB , returnDict = returnDict ) try : if returnDict == 'both' or returnDict == 'manual' : if os . path . isfile ( dbLoc + '/{}.dat' . format ( manualDB ) ) : with dbm . dumb . open ( dbLoc + '/{}' . format ( manualDB ) ) as db : for k , v in db . items ( ) : retDict [ k . decode ( 'utf-8' ) ] = v . decode ( 'utf-8' ) . split ( '|' ) else : if returnDict == 'manual' : raise JournalDataBaseError ( "Manual J9 Database ({0}) missing, to create it run addToDB(dbname = {0})" . format ( manualDB ) ) except JournalDataBaseError : updatej9DB ( dbname = manualDB ) return getj9dict ( dbname = dbname , manualDB = manualDB , returnDict = returnDict ) return retDict
Returns the dictionary of journal abbreviations mapping to a list of the associated journal names . By default the local database is used . The database is in the file _dbname_ in the same directory as this source file
18,448
def normalizeToTag ( val ) : try : val = val . upper ( ) except AttributeError : raise KeyError ( "{} is not a tag or name string" . format ( val ) ) if val not in tagsAndNameSetUpper : raise KeyError ( "{} is not a tag or name string" . format ( val ) ) else : try : return fullToTagDictUpper [ val ] except KeyError : return val
Converts tags or full names to 2 character tags case insensitive
18,449
def normalizeToName ( val ) : if val not in tagsAndNameSet : raise KeyError ( "{} is not a tag or name string" . format ( val ) ) else : try : return tagToFullDict [ val ] except KeyError : return val
Converts tags or full names to full names case sensitive
18,450
def update ( self , other ) : if type ( self ) != type ( other ) : return NotImplemented else : if other . bad : self . error = other . error self . bad = True self . _fieldDict . update ( other . _fieldDict )
Adds all the tag - entry pairs from _other_ to the Grant . If there is a conflict _other_ takes precedence .
18,451
def relay_events_from ( self , originator , event_type , * more_event_types ) : handlers = { event_type : lambda * args , ** kwargs : self . dispatch_event ( event_type , * args , ** kwargs ) for event_type in ( event_type , ) + more_event_types } originator . set_handlers ( ** handlers )
Configure this handler to re - dispatch events from another handler .
18,452
def start_event ( self , event_type , * args , dt = 1 / 60 ) : if not any ( self . __yield_handlers ( event_type ) ) : return def on_time_interval ( dt ) : self . dispatch_event ( event_type , * args , dt ) pyglet . clock . schedule_interval ( on_time_interval , dt ) self . __timers [ event_type ] = on_time_interval
Begin dispatching the given event at the given frequency .
18,453
def stop_event ( self , event_type ) : if event_type in self . __timers : pyglet . clock . unschedule ( self . __timers [ event_type ] )
Stop dispatching the given event .
18,454
def __yield_handlers ( self , event_type ) : if event_type not in self . event_types : raise ValueError ( "%r not found in %r.event_types == %r" % ( event_type , self , self . event_types ) ) for frame in list ( self . _event_stack ) : if event_type in frame : yield frame [ event_type ] if hasattr ( self , event_type ) : yield getattr ( self , event_type )
Yield all the handlers registered for the given event type .
18,455
def _filter_pending_updates ( self ) : from more_itertools import unique_everseen as unique yield from reversed ( list ( unique ( reversed ( self . _pending_updates ) ) ) )
Return all the updates that need to be applied from a list of all the updates that were called while the hold was active . This method is meant to be overridden by subclasses that want to customize how held updates are applied .
18,456
def get_html ( self ) : here = path . abspath ( path . dirname ( __file__ ) ) env = Environment ( loader = FileSystemLoader ( path . join ( here , "res/" ) ) ) suggest = env . get_template ( "suggest.htm.j2" ) return suggest . render ( logo = path . join ( here , "res/logo.png" ) , user_login = self . user , repos = self . repos , )
Method to convert the repository list to a search results page .
18,457
def to_html ( self , write_to ) : page_html = self . get_html ( ) with open ( write_to , "wb" ) as writefile : writefile . write ( page_html . encode ( "utf-8" ) )
Method to convert the repository list to a search results page and write it to a HTML file .
18,458
def get_unique_repositories ( repo_list ) : unique_list = list ( ) included = defaultdict ( lambda : False ) for repo in repo_list : if not included [ repo . full_name ] : unique_list . append ( repo ) included [ repo . full_name ] = True return unique_list
Method to create unique list of repositories from the list of repositories given .
18,459
def minus ( repo_list_a , repo_list_b ) : included = defaultdict ( lambda : False ) for repo in repo_list_b : included [ repo . full_name ] = True a_minus_b = list ( ) for repo in repo_list_a : if not included [ repo . full_name ] : included [ repo . full_name ] = True a_minus_b . append ( repo ) return a_minus_b
Method to create a list of repositories such that the repository belongs to repo list a but not repo list b .
18,460
def __populate_repositories_of_interest ( self , username ) : user = self . github . get_user ( username ) self . user_starred_repositories . extend ( user . get_starred ( ) ) if self . deep_dive : for following_user in user . get_following ( ) : self . user_following_starred_repositories . extend ( following_user . get_starred ( ) )
Method to populate repositories which will be used to suggest repositories for the user . For this purpose we use two kinds of repositories .
18,461
def __get_interests ( self ) : repos_of_interest = itertools . chain ( self . user_starred_repositories , self . user_following_starred_repositories , ) repo_descriptions = [ repo . description for repo in repos_of_interest ] return list ( set ( repo_descriptions ) )
Method to procure description of repositories the authenticated user is interested in .
18,462
def __get_words_to_ignore ( self ) : english_stopwords = stopwords . words ( "english" ) here = path . abspath ( path . dirname ( __file__ ) ) git_languages = [ ] with open ( path . join ( here , "gitlang/languages.txt" ) , "r" ) as langauges : git_languages = [ line . strip ( ) for line in langauges ] words_to_avoid = [ ] with open ( path . join ( here , "gitlang/others.txt" ) , "r" ) as languages : words_to_avoid = [ line . strip ( ) for line in languages ] return set ( itertools . chain ( english_stopwords , git_languages , words_to_avoid ) )
Compiles list of all words to ignore .
18,463
def __clean_and_tokenize ( self , doc_list ) : doc_list = filter ( lambda x : x is not None and len ( x ) <= GitSuggest . MAX_DESC_LEN , doc_list , ) cleaned_doc_list = list ( ) tokenizer = RegexpTokenizer ( r"[a-zA-Z]+" ) stopwords = self . __get_words_to_ignore ( ) dict_words = self . __get_words_to_consider ( ) for doc in doc_list : lower = doc . lower ( ) tokens = tokenizer . tokenize ( lower ) tokens = [ tok for tok in tokens if tok in dict_words ] tokens = [ tok for tok in tokens if tok not in stopwords ] tokens = [ tok for tok in tokens if tok is not None ] cleaned_doc_list . append ( tokens ) return cleaned_doc_list
Method to clean and tokenize the document list .
18,464
def __construct_lda_model ( self ) : repos_of_interest = self . __get_interests ( ) cleaned_tokens = self . __clean_and_tokenize ( repos_of_interest ) if not cleaned_tokens : cleaned_tokens = [ [ "zkfgzkfgzkfgzkfgzkfgzkfg" ] ] dictionary = corpora . Dictionary ( cleaned_tokens ) corpus = [ dictionary . doc2bow ( text ) for text in cleaned_tokens ] self . lda_model = models . ldamodel . LdaModel ( corpus , num_topics = 1 , id2word = dictionary , passes = 10 )
Method to create LDA model to procure list of topics from .
18,465
def __get_query_for_repos ( self , term_count = 5 ) : repo_query_terms = list ( ) for term in self . lda_model . get_topic_terms ( 0 , topn = term_count ) : repo_query_terms . append ( self . lda_model . id2word [ term [ 0 ] ] ) return " " . join ( repo_query_terms )
Method to procure query based on topics authenticated user is interested in .
18,466
def get_suggested_repositories ( self ) : if self . suggested_repositories is None : repository_set = list ( ) for term_count in range ( 5 , 2 , - 1 ) : query = self . __get_query_for_repos ( term_count = term_count ) repository_set . extend ( self . __get_repos_for_query ( query ) ) catchy_repos = GitSuggest . minus ( repository_set , self . user_starred_repositories ) filtered_repos = [ ] if len ( catchy_repos ) > 0 : for repo in catchy_repos : if ( repo is not None and repo . description is not None and len ( repo . description ) <= GitSuggest . MAX_DESC_LEN ) : filtered_repos . append ( repo ) filtered_repos = sorted ( filtered_repos , key = attrgetter ( "stargazers_count" ) , reverse = True , ) self . suggested_repositories = GitSuggest . get_unique_repositories ( filtered_repos ) for repository in self . suggested_repositories : yield repository
Method to procure suggested repositories for the user .
18,467
def guess_type ( s ) : sc = s . replace ( ',' , '' ) try : return int ( sc ) except ValueError : pass try : return float ( sc ) except ValueError : pass return s
attempt to convert string value into numeric type
18,468
def parse ( self , node ) : self . _attrs = { } vals = [ ] yielded = False for x in self . _read_parts ( node ) : if isinstance ( x , Field ) : yielded = True x . attrs = self . _attrs yield x else : vals . append ( ustr ( x ) . strip ( ' \n\t' ) ) joined = ' ' . join ( [ x for x in vals if x ] ) if joined : yielded = True yield Field ( node , guess_type ( joined ) , self . _attrs ) if not yielded : yield Field ( node , "" , self . _attrs )
Return generator yielding Field objects for a given node
18,469
def parse ( self , * nodes ) : for n in nodes : if not n . contents : continue row = self . _parse ( n ) if not row . is_null : yield row
Parse one or more tr nodes yielding wikitables . Row objects
18,470
def _find_header_row ( self ) : th_max = 0 header_idx = 0 for idx , tr in enumerate ( self . _tr_nodes ) : th_count = len ( tr . contents . filter_tags ( matches = ftag ( 'th' ) ) ) if th_count > th_max : th_max = th_count header_idx = idx if not th_max : return self . _log ( 'found header at row %d (%d <th> elements)' % ( header_idx , th_max ) ) header_row = self . _tr_nodes . pop ( header_idx ) return header_row . contents . filter_tags ( matches = ftag ( 'th' ) )
Evaluate all rows and determine header position based on greatest number of th tagged elements
18,471
def _make_default_header ( self ) : td_max = 0 for idx , tr in enumerate ( self . _tr_nodes ) : td_count = len ( tr . contents . filter_tags ( matches = ftag ( 'td' ) ) ) if td_count > td_max : td_max = td_count self . _log ( 'creating default header (%d columns)' % td_max ) return [ 'column%d' % n for n in range ( 0 , td_max ) ]
Return a generic placeholder header based on the tables column count
18,472
def fetch_page ( self , title , method = 'GET' ) : params = { 'prop' : 'revisions' , 'format' : 'json' , 'action' : 'query' , 'explaintext' : '' , 'titles' : title , 'rvprop' : 'content' } r = self . request ( method , self . base_url , params = params ) r . raise_for_status ( ) pages = r . json ( ) [ "query" ] [ "pages" ] pageid = list ( pages . keys ( ) ) [ 0 ] if pageid == '-1' : raise ArticleNotFound ( 'no matching articles returned' ) return pages [ pageid ]
Query for page by title
18,473
def print_stack ( pid , include_greenlet = False , debugger = None , verbose = False ) : sys_stdout = getattr ( sys . stdout , 'buffer' , sys . stdout ) sys_stderr = getattr ( sys . stderr , 'buffer' , sys . stderr ) make_args = make_gdb_args environ = dict ( os . environ ) if ( debugger == 'lldb' or ( debugger is None and platform . system ( ) . lower ( ) == 'darwin' ) ) : make_args = make_lldb_args environ [ 'PATH' ] = '/usr/bin:%s' % environ . get ( 'PATH' , '' ) tmp_fd , tmp_path = tempfile . mkstemp ( ) os . chmod ( tmp_path , 0o777 ) commands = [ ] commands . append ( FILE_OPEN_COMMAND ) commands . extend ( UTILITY_COMMANDS ) commands . extend ( THREAD_STACK_COMMANDS ) if include_greenlet : commands . extend ( GREENLET_STACK_COMMANDS ) commands . append ( FILE_CLOSE_COMMAND ) command = r';' . join ( commands ) args = make_args ( pid , command % tmp_path ) process = subprocess . Popen ( args , stdout = subprocess . PIPE , stderr = subprocess . PIPE ) out , err = process . communicate ( ) if verbose : sys_stderr . write ( b'Standard Output:\n%s\n' % out ) sys_stderr . write ( b'Standard Error:\n%s\n' % err ) sys_stderr . flush ( ) for chunk in iter ( functools . partial ( os . read , tmp_fd , 1024 ) , b'' ) : sys_stdout . write ( chunk ) sys_stdout . write ( b'\n' ) sys_stdout . flush ( )
Executes a file in a running Python process .
18,474
def cli_main ( pid , include_greenlet , debugger , verbose ) : try : print_stack ( pid , include_greenlet , debugger , verbose ) except DebuggerNotFound as e : click . echo ( 'DebuggerNotFound: %s' % e . args [ 0 ] , err = True ) click . get_current_context ( ) . exit ( 1 )
Print stack of python process .
18,475
def forward_algo ( self , observations ) : total_stages = len ( observations ) ob_ind = self . obs_map [ observations [ 0 ] ] alpha = np . multiply ( np . transpose ( self . em_prob [ : , ob_ind ] ) , self . start_prob ) for curr_t in range ( 1 , total_stages ) : ob_ind = self . obs_map [ observations [ curr_t ] ] alpha = np . dot ( alpha , self . trans_prob ) alpha = np . multiply ( alpha , np . transpose ( self . em_prob [ : , ob_ind ] ) ) total_prob = alpha . sum ( ) return ( total_prob )
Finds the probability of an observation sequence for given model parameters
18,476
def viterbi ( self , observations ) : total_stages = len ( observations ) num_states = len ( self . states ) old_path = np . zeros ( ( total_stages , num_states ) ) new_path = np . zeros ( ( total_stages , num_states ) ) ob_ind = self . obs_map [ observations [ 0 ] ] delta = np . multiply ( np . transpose ( self . em_prob [ : , ob_ind ] ) , self . start_prob ) delta = delta / np . sum ( delta ) old_path [ 0 , : ] = [ i for i in range ( num_states ) ] for curr_t in range ( 1 , total_stages ) : ob_ind = self . obs_map [ observations [ curr_t ] ] temp = np . multiply ( np . multiply ( delta , self . trans_prob . transpose ( ) ) , self . em_prob [ : , ob_ind ] ) delta = temp . max ( axis = 1 ) . transpose ( ) delta = delta / np . sum ( delta ) max_temp = temp . argmax ( axis = 1 ) . transpose ( ) max_temp = np . ravel ( max_temp ) . tolist ( ) for s in range ( num_states ) : new_path [ : curr_t , s ] = old_path [ 0 : curr_t , max_temp [ s ] ] new_path [ curr_t , : ] = [ i for i in range ( num_states ) ] old_path = new_path . copy ( ) final_max = np . argmax ( np . ravel ( delta ) ) best_path = old_path [ : , final_max ] . tolist ( ) best_path_map = [ self . state_map [ i ] for i in best_path ] return best_path_map
The probability of occurence of the observation sequence
18,477
def train_hmm ( self , observation_list , iterations , quantities ) : obs_size = len ( observation_list ) prob = float ( 'inf' ) q = quantities for i in range ( iterations ) : emProbNew = np . asmatrix ( np . zeros ( ( self . em_prob . shape ) ) ) transProbNew = np . asmatrix ( np . zeros ( ( self . trans_prob . shape ) ) ) startProbNew = np . asmatrix ( np . zeros ( ( self . start_prob . shape ) ) ) for j in range ( obs_size ) : emProbNew = emProbNew + q [ j ] * self . _train_emission ( observation_list [ j ] ) transProbNew = transProbNew + q [ j ] * self . _train_transition ( observation_list [ j ] ) startProbNew = startProbNew + q [ j ] * self . _train_start_prob ( observation_list [ j ] ) em_norm = emProbNew . sum ( axis = 1 ) trans_norm = transProbNew . sum ( axis = 1 ) start_norm = startProbNew . sum ( axis = 1 ) emProbNew = emProbNew / em_norm . transpose ( ) startProbNew = startProbNew / start_norm . transpose ( ) transProbNew = transProbNew / trans_norm . transpose ( ) self . em_prob , self . trans_prob = emProbNew , transProbNew self . start_prob = startProbNew if prob - self . log_prob ( observation_list , quantities ) > 0.0000001 : prob = self . log_prob ( observation_list , quantities ) else : return self . em_prob , self . trans_prob , self . start_prob return self . em_prob , self . trans_prob , self . start_prob
Runs the Baum Welch Algorithm and finds the new model parameters
18,478
def log_prob ( self , observations_list , quantities ) : prob = 0 for q , obs in enumerate ( observations_list ) : temp , c_scale = self . _alpha_cal ( obs ) prob = prob + - 1 * quantities [ q ] * np . sum ( np . log ( c_scale ) ) return prob
Finds Weighted log probability of a list of observation sequences
18,479
def __fetch_data ( self , url ) : url += '&api_key=' + self . api_key try : response = urlopen ( url ) root = ET . fromstring ( response . read ( ) ) except HTTPError as exc : root = ET . fromstring ( exc . read ( ) ) raise ValueError ( root . get ( 'message' ) ) return root
helper function for fetching data given a request URL
18,480
def _parse ( self , date_str , format = '%Y-%m-%d' ) : rv = pd . to_datetime ( date_str , format = format ) if hasattr ( rv , 'to_pydatetime' ) : rv = rv . to_pydatetime ( ) return rv
helper function for parsing FRED date string into datetime
18,481
def get_series_first_release ( self , series_id ) : df = self . get_series_all_releases ( series_id ) first_release = df . groupby ( 'date' ) . head ( 1 ) data = first_release . set_index ( 'date' ) [ 'value' ] return data
Get first - release data for a Fred series id . This ignores any revision to the data series . For instance The US GDP for Q1 2014 was first released to be 17149 . 6 and then later revised to 17101 . 3 and 17016 . 0 . This will ignore revisions after the first release .
18,482
def get_series_as_of_date ( self , series_id , as_of_date ) : as_of_date = pd . to_datetime ( as_of_date ) df = self . get_series_all_releases ( series_id ) data = df [ df [ 'realtime_start' ] <= as_of_date ] return data
Get latest data for a Fred series id as known on a particular date . This includes any revision to the data series before or on as_of_date but ignores any revision on dates after as_of_date .
18,483
def get_series_vintage_dates ( self , series_id ) : url = "%s/series/vintagedates?series_id=%s" % ( self . root_url , series_id ) root = self . __fetch_data ( url ) if root is None : raise ValueError ( 'No vintage date exists for series id: ' + series_id ) dates = [ ] for child in root . getchildren ( ) : dates . append ( self . _parse ( child . text ) ) return dates
Get a list of vintage dates for a series . Vintage dates are the dates in history when a series data values were revised or new data values were released .
18,484
def __do_series_search ( self , url ) : root = self . __fetch_data ( url ) series_ids = [ ] data = { } num_results_returned = 0 num_results_total = int ( root . get ( 'count' ) ) for child in root . getchildren ( ) : num_results_returned += 1 series_id = child . get ( 'id' ) series_ids . append ( series_id ) data [ series_id ] = { "id" : series_id } fields = [ "realtime_start" , "realtime_end" , "title" , "observation_start" , "observation_end" , "frequency" , "frequency_short" , "units" , "units_short" , "seasonal_adjustment" , "seasonal_adjustment_short" , "last_updated" , "popularity" , "notes" ] for field in fields : data [ series_id ] [ field ] = child . get ( field ) if num_results_returned > 0 : data = pd . DataFrame ( data , columns = series_ids ) . T for field in [ "realtime_start" , "realtime_end" , "observation_start" , "observation_end" , "last_updated" ] : data [ field ] = data [ field ] . apply ( self . _parse , format = None ) data . index . name = 'series id' else : data = None return data , num_results_total
helper function for making one HTTP request for data and parsing the returned results into a DataFrame
18,485
def __get_search_results ( self , url , limit , order_by , sort_order , filter ) : order_by_options = [ 'search_rank' , 'series_id' , 'title' , 'units' , 'frequency' , 'seasonal_adjustment' , 'realtime_start' , 'realtime_end' , 'last_updated' , 'observation_start' , 'observation_end' , 'popularity' ] if order_by is not None : if order_by in order_by_options : url = url + '&order_by=' + order_by else : raise ValueError ( '%s is not in the valid list of order_by options: %s' % ( order_by , str ( order_by_options ) ) ) if filter is not None : if len ( filter ) == 2 : url = url + '&filter_variable=%s&filter_value=%s' % ( filter [ 0 ] , filter [ 1 ] ) else : raise ValueError ( 'Filter should be a 2 item tuple like (filter_variable, filter_value)' ) sort_order_options = [ 'asc' , 'desc' ] if sort_order is not None : if sort_order in sort_order_options : url = url + '&sort_order=' + sort_order else : raise ValueError ( '%s is not in the valid list of sort_order options: %s' % ( sort_order , str ( sort_order_options ) ) ) data , num_results_total = self . __do_series_search ( url ) if data is None : return data if limit == 0 : max_results_needed = num_results_total else : max_results_needed = limit if max_results_needed > self . max_results_per_request : for i in range ( 1 , max_results_needed // self . max_results_per_request + 1 ) : offset = i * self . max_results_per_request next_data , _ = self . __do_series_search ( url + '&offset=' + str ( offset ) ) data = data . append ( next_data ) return data . head ( max_results_needed )
helper function for getting search results up to specified limit on the number of results . The Fred HTTP API truncates to 1000 results per request so this may issue multiple HTTP requests to obtain more available data .
18,486
def search ( self , text , limit = 1000 , order_by = None , sort_order = None , filter = None ) : url = "%s/series/search?search_text=%s&" % ( self . root_url , quote_plus ( text ) ) info = self . __get_search_results ( url , limit , order_by , sort_order , filter ) return info
Do a fulltext search for series in the Fred dataset . Returns information about matching series in a DataFrame .
18,487
def search_by_release ( self , release_id , limit = 0 , order_by = None , sort_order = None , filter = None ) : url = "%s/release/series?release_id=%d" % ( self . root_url , release_id ) info = self . __get_search_results ( url , limit , order_by , sort_order , filter ) if info is None : raise ValueError ( 'No series exists for release id: ' + str ( release_id ) ) return info
Search for series that belongs to a release id . Returns information about matching series in a DataFrame .
18,488
def search_by_category ( self , category_id , limit = 0 , order_by = None , sort_order = None , filter = None ) : url = "%s/category/series?category_id=%d&" % ( self . root_url , category_id ) info = self . __get_search_results ( url , limit , order_by , sort_order , filter ) if info is None : raise ValueError ( 'No series exists for category id: ' + str ( category_id ) ) return info
Search for series that belongs to a category id . Returns information about matching series in a DataFrame .
18,489
def init ( self , ca , csr , ** kwargs ) : c = self . model ( ca = ca ) c . x509 , csr = self . sign_cert ( ca , csr , ** kwargs ) c . csr = csr . public_bytes ( Encoding . PEM ) . decode ( 'utf-8' ) c . save ( ) post_issue_cert . send ( sender = self . model , cert = c ) return c
Create a signed certificate from a CSR and store it to the database .
18,490
def download_bundle_view ( self , request , pk ) : return self . _download_response ( request , pk , bundle = True )
A view that allows the user to download a certificate bundle in PEM format .
18,491
def get_actions ( self , request ) : actions = super ( CertificateMixin , self ) . get_actions ( request ) actions . pop ( 'delete_selected' , '' ) return actions
Disable the delete selected admin action .
18,492
def get_cert_profile_kwargs ( name = None ) : if name is None : name = ca_settings . CA_DEFAULT_PROFILE profile = deepcopy ( ca_settings . CA_PROFILES [ name ] ) kwargs = { 'cn_in_san' : profile [ 'cn_in_san' ] , 'subject' : get_default_subject ( name = name ) , } key_usage = profile . get ( 'keyUsage' ) if key_usage and key_usage . get ( 'value' ) : kwargs [ 'key_usage' ] = KeyUsage ( key_usage ) ext_key_usage = profile . get ( 'extendedKeyUsage' ) if ext_key_usage and ext_key_usage . get ( 'value' ) : kwargs [ 'extended_key_usage' ] = ExtendedKeyUsage ( ext_key_usage ) tls_feature = profile . get ( 'TLSFeature' ) if tls_feature and tls_feature . get ( 'value' ) : kwargs [ 'tls_feature' ] = TLSFeature ( tls_feature ) if profile . get ( 'ocsp_no_check' ) : kwargs [ 'ocsp_no_check' ] = profile [ 'ocsp_no_check' ] return kwargs
Get kwargs suitable for get_cert X509 keyword arguments from the given profile .
18,493
def format_name ( subject ) : if isinstance ( subject , x509 . Name ) : subject = [ ( OID_NAME_MAPPINGS [ s . oid ] , s . value ) for s in subject ] return '/%s' % ( '/' . join ( [ '%s=%s' % ( force_text ( k ) , force_text ( v ) ) for k , v in subject ] ) )
Convert a subject into the canonical form for distinguished names .
18,494
def format_general_name ( name ) : if isinstance ( name , x509 . DirectoryName ) : value = format_name ( name . value ) else : value = name . value return '%s:%s' % ( SAN_NAME_MAPPINGS [ type ( name ) ] , value )
Format a single general name .
18,495
def add_colons ( s ) : return ':' . join ( [ s [ i : i + 2 ] for i in range ( 0 , len ( s ) , 2 ) ] )
Add colons after every second digit .
18,496
def int_to_hex ( i ) : s = hex ( i ) [ 2 : ] . upper ( ) if six . PY2 is True and isinstance ( i , long ) : s = s [ : - 1 ] return add_colons ( s )
Create a hex - representation of the given serial .
18,497
def parse_name ( name ) : name = name . strip ( ) if not name : return [ ] try : items = [ ( NAME_CASE_MAPPINGS [ t [ 0 ] . upper ( ) ] , force_text ( t [ 2 ] ) ) for t in NAME_RE . findall ( name ) ] except KeyError as e : raise ValueError ( 'Unknown x509 name field: %s' % e . args [ 0 ] ) for key , oid in NAME_OID_MAPPINGS . items ( ) : if sum ( 1 for t in items if t [ 0 ] == key ) > 1 and oid not in MULTIPLE_OIDS : raise ValueError ( 'Subject contains multiple "%s" fields' % key ) return sort_name ( items )
Parses a subject string as used in OpenSSLs command line utilities .
18,498
def parse_general_name ( name ) : name = force_text ( name ) typ = None match = GENERAL_NAME_RE . match ( name ) if match is not None : typ , name = match . groups ( ) typ = typ . lower ( ) if typ is None : if re . match ( '[a-z0-9]{2,}://' , name ) : try : return x509 . UniformResourceIdentifier ( name ) except Exception : pass if '@' in name : try : return x509 . RFC822Name ( validate_email ( name ) ) except Exception : pass if name . strip ( ) . startswith ( '/' ) : return x509 . DirectoryName ( x509_name ( name ) ) try : return x509 . IPAddress ( ip_address ( name ) ) except ValueError : pass try : return x509 . IPAddress ( ip_network ( name ) ) except ValueError : pass if name . startswith ( '*.' ) : idna . encode ( name [ 2 : ] ) elif name . startswith ( '.' ) : idna . encode ( name [ 1 : ] ) else : idna . encode ( name ) return x509 . DNSName ( name ) if typ == 'uri' : return x509 . UniformResourceIdentifier ( name ) elif typ == 'email' : return x509 . RFC822Name ( validate_email ( name ) ) elif typ == 'ip' : try : return x509 . IPAddress ( ip_address ( name ) ) except ValueError : pass try : return x509 . IPAddress ( ip_network ( name ) ) except ValueError : pass raise ValueError ( 'Could not parse IP address.' ) elif typ == 'rid' : return x509 . RegisteredID ( x509 . ObjectIdentifier ( name ) ) elif typ == 'othername' : regex = "(.*);(.*):(.*)" if re . match ( regex , name ) is not None : oid , asn_typ , val = re . match ( regex , name ) . groups ( ) oid = x509 . ObjectIdentifier ( oid ) if asn_typ == 'UTF8' : val = val . encode ( 'utf-8' ) elif asn_typ == 'OctetString' : val = bytes ( bytearray . fromhex ( val ) ) val = OctetString ( val ) . dump ( ) else : raise ValueError ( 'Unsupported ASN type in otherName: %s' % asn_typ ) val = force_bytes ( val ) return x509 . OtherName ( oid , val ) else : raise ValueError ( 'Incorrect otherName format: %s' % name ) elif typ == 'dirname' : return x509 . DirectoryName ( x509_name ( name ) ) else : if name . startswith ( '*.' ) : idna . encode ( name [ 2 : ] ) elif name . startswith ( '.' ) : idna . encode ( name [ 1 : ] ) else : idna . encode ( name ) return x509 . DNSName ( name )
Parse a general name from user input .
18,499
def parse_hash_algorithm ( value = None ) : if value is None : return ca_settings . CA_DIGEST_ALGORITHM elif isinstance ( value , type ) and issubclass ( value , hashes . HashAlgorithm ) : return value ( ) elif isinstance ( value , hashes . HashAlgorithm ) : return value elif isinstance ( value , six . string_types ) : try : return getattr ( hashes , value . strip ( ) ) ( ) except AttributeError : raise ValueError ( 'Unknown hash algorithm: %s' % value ) else : raise ValueError ( 'Unknown type passed: %s' % type ( value ) . __name__ )
Parse a hash algorithm value .