idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
50,400
def scrape ( cls , selector , root , xpath = False ) : log . debug ( 'Called scrape classmethod with root: %s' % root ) roots = selector . xpath ( root ) if xpath else selector . css ( root ) results = [ cls ( r ) for r in roots ] return EntityList ( * results )
Return EntityList for the given selector .
50,401
def serialize ( self ) : data = { } for field_name in self : value = self . _values . get ( field_name ) field = self . fields . get ( field_name ) if value is not None : if field . all : value = [ field . serialize ( v ) for v in value ] else : value = field . serialize ( value ) if not field . null and ( ( field . all and value == [ ] ) or ( not field . all and value in { None , '' } ) ) : continue data [ field . name ] = value return data
Convert Entity to python dictionary .
50,402
def to_json ( self , * args , ** kwargs ) : return json . dumps ( self . serialize ( ) , * args , ** kwargs )
Convert Entity to JSON .
50,403
def from_file ( cls , f , fname = None , readers = None ) : if isinstance ( f , six . string_types ) : f = io . open ( f , 'rb' ) if not fname and hasattr ( f , 'name' ) : fname = f . name return cls . from_string ( f . read ( ) , fname = fname , readers = readers )
Create a Document from a file .
50,404
def from_string ( cls , fstring , fname = None , readers = None ) : if readers is None : from . . reader import DEFAULT_READERS readers = DEFAULT_READERS if isinstance ( fstring , six . text_type ) : raise ReaderError ( 'from_string expects a byte string, not a unicode string' ) for reader in readers : if not reader . detect ( fstring , fname = fname ) : continue try : d = reader . readstring ( fstring ) log . debug ( 'Parsed document with %s' % reader . __class__ . __name__ ) return d except ReaderError : pass raise ReaderError ( 'Unable to read document' )
Create a Document from a byte string containing the contents of a file .
50,405
def get_element_with_id ( self , id ) : return next ( ( el for el in self . elements if el . id == id ) , None )
Return the element with the specified ID .
50,406
def serialize ( self ) : elements = [ ] for element in self . elements : elements . append ( element . serialize ( ) ) data = { 'type' : 'document' , 'elements' : elements } return data
Convert Document to python dictionary .
50,407
def parse ( self , xmp ) : tree = etree . fromstring ( xmp ) rdf_tree = tree . find ( RDF_NS + 'RDF' ) meta = defaultdict ( dict ) for desc in rdf_tree . findall ( RDF_NS + 'Description' ) : for el in desc . getchildren ( ) : ns , tag = self . _parse_tag ( el ) value = self . _parse_value ( el ) meta [ ns ] [ tag ] = value return dict ( meta )
Run parser and return a dictionary of all the parsed metadata .
50,408
def _parse_tag ( self , el ) : ns = None tag = el . tag if tag [ 0 ] == '{' : ns , tag = tag [ 1 : ] . split ( '}' , 1 ) if self . ns_map and ns in self . ns_map : ns = self . ns_map [ ns ] return ns , tag
Extract the namespace and tag from an element .
50,409
def _parse_value ( self , el ) : if el . find ( RDF_NS + 'Bag' ) is not None : value = [ ] for li in el . findall ( RDF_NS + 'Bag/' + RDF_NS + 'li' ) : value . append ( li . text ) elif el . find ( RDF_NS + 'Seq' ) is not None : value = [ ] for li in el . findall ( RDF_NS + 'Seq/' + RDF_NS + 'li' ) : value . append ( li . text ) elif el . find ( RDF_NS + 'Alt' ) is not None : value = { } for li in el . findall ( RDF_NS + 'Alt/' + RDF_NS + 'li' ) : value [ li . get ( XML_NS + 'lang' ) ] = li . text else : value = el . text return value
Extract the metadata value from an element .
50,410
def parse_rsc_html ( htmlstring ) : converted = UnicodeDammit ( htmlstring ) if not converted . unicode_markup : raise UnicodeDecodeError ( 'Failed to detect encoding, tried [%s]' ) root = fromstring ( htmlstring , parser = HTMLParser ( recover = True , encoding = converted . original_encoding ) ) newp = None for child in root . get_element_by_id ( 'wrapper' ) : if newp is not None : if child . tag in BLOCK_ELEMENTS or child . get ( 'id' , '' ) . startswith ( 'sect' ) or child . getnext ( ) is None : child . addprevious ( newp ) newp = None else : newp . append ( child ) if newp is None and child . tag in BLOCK_ELEMENTS and child . tail and child . tail . strip ( ) : newp = Element ( 'p' , ** { 'class' : 'otherpara' } ) newp . text = child . tail child . tail = '' return root
Messy RSC HTML needs this special parser to fix problems before creating selector .
50,411
def replace_rsc_img_chars ( document ) : image_re = re . compile ( 'http://www.rsc.org/images/entities/(?:h[23]+_)?(?:[ib]+_)?char_([0-9a-f]{4})(?:_([0-9a-f]{4}))?\.gif' ) for img in document . xpath ( './/img[starts-with(@src, "http://www.rsc.org/images/entities/")]' ) : m = image_re . match ( img . get ( 'src' ) ) if m : u1 , u2 = m . group ( 1 ) , m . group ( 2 ) if not u2 and u1 in RSC_IMG_CHARS : rep = RSC_IMG_CHARS [ u1 ] else : rep = ( '\\u%s' % u1 ) . encode ( 'ascii' ) . decode ( 'unicode-escape' ) if u2 : rep += ( '\\u%s' % u2 ) . encode ( 'ascii' ) . decode ( 'unicode-escape' ) if img . tail is not None : rep += img . tail parent = img . getparent ( ) if parent is not None : previous = img . getprevious ( ) if previous is not None : previous . tail = ( previous . tail or '' ) + rep else : parent . text = ( parent . text or '' ) + rep parent . remove ( img ) return document
Replace image characters with unicode equivalents .
50,412
def space_references ( document ) : for ref in document . xpath ( './/a/sup/span[@class="sup_ref"]' ) : a = ref . getparent ( ) . getparent ( ) if a is not None : atail = a . tail or '' if not atail . startswith ( ')' ) and not atail . startswith ( ',' ) and not atail . startswith ( ' ' ) : a . tail = ' ' + atail return document
Ensure a space around reference links so there s a gap when they are removed .
50,413
def load ( ctx , input , output ) : log . debug ( 'chemdataextractor.cluster.load' ) import pickle click . echo ( 'Reading %s' % input . name ) clusters = { } for line in input . readlines ( ) : cluster , word , freq = line . split ( ) clusters [ word ] = cluster pickle . dump ( clusters , output , protocol = pickle . HIGHEST_PROTOCOL )
Read clusters from file and save to model file .
50,414
def space_labels ( document ) : for label in document . xpath ( './/bold' ) : if not label . text or not re . match ( '^\(L?\d\d?[a-z]?\):?$' , label . text , re . I ) : continue parent = label . getparent ( ) previous = label . getprevious ( ) if previous is None : text = parent . text or '' if not text . endswith ( ' ' ) : parent . text = text + ' ' else : text = previous . tail or '' if not text . endswith ( ' ' ) : previous . tail = text + ' ' text = label . tail or '' if not text . endswith ( ' ' ) : label . tail = text + ' ' return document
Ensure space around bold compound labels .
50,415
def tidy_nlm_references ( document ) : def strip_preceding ( text ) : stext = text . rstrip ( ) if stext . endswith ( '[' ) or stext . endswith ( '(' ) : return stext [ : - 1 ] return text def strip_between ( text ) : stext = text . strip ( ) if stext in { ',' , '-' , '\u2013' , '\u2212' } : return '' return text def strip_following ( text ) : stext = text . lstrip ( ) if stext . startswith ( ']' ) or stext . startswith ( ')' ) : return stext [ 1 : ] return text for ref in document . xpath ( './/xref[@ref-type="bibr"]' ) : parent = ref . getparent ( ) previous = ref . getprevious ( ) next = ref . getnext ( ) if previous is None : parent . text = strip_preceding ( parent . text or '' ) else : previous . tail = strip_preceding ( previous . tail or '' ) if next is not None and next . tag == 'xref' and next . get ( 'ref-type' ) == 'bibr' : ref . tail = strip_between ( ref . tail or '' ) ref . tail = strip_following ( ref . tail or '' ) return document
Remove punctuation around references like brackets commas hyphens .
50,416
def regex_span_tokenize ( s , regex ) : left = 0 for m in re . finditer ( regex , s , re . U ) : right , next = m . span ( ) if right != 0 : yield left , right left = next yield left , len ( s )
Return spans that identify tokens in s split using regex .
50,417
def tokenize ( self , s ) : return [ s [ start : end ] for start , end in self . span_tokenize ( s ) ]
Return a list of token strings from the given sentence .
50,418
def span_tokenize ( self , s ) : if self . _tokenizer is None : self . _tokenizer = load_model ( self . model ) return self . _tokenizer . span_tokenize ( s )
Return a list of integer offsets that identify sentences in the given text .
50,419
def _split_span ( self , span , index , length = 0 ) : offset = span [ 1 ] + index if index < 0 else span [ 0 ] + index return [ ( span [ 0 ] , offset ) , ( offset , offset + length ) , ( offset + length , span [ 1 ] ) ]
Split a span into two or three separate spans at certain indices .
50,420
def _closing_bracket_index ( self , text , bpair = ( '(' , ')' ) ) : level = 1 for i , char in enumerate ( text [ 1 : ] ) : if char == bpair [ 0 ] : level += 1 elif char == bpair [ 1 ] : level -= 1 if level == 0 : return i + 1
Return the index of the closing bracket that matches the opening bracket at the start of the text .
50,421
def _opening_bracket_index ( self , text , bpair = ( '(' , ')' ) ) : level = 1 for i , char in enumerate ( reversed ( text [ : - 1 ] ) ) : if char == bpair [ 1 ] : level += 1 elif char == bpair [ 0 ] : level -= 1 if level == 0 : return len ( text ) - i - 2
Return the index of the opening bracket that matches the closing bracket at the end of the text .
50,422
def _is_saccharide_arrow ( self , before , after ) : if ( before and after and before [ - 1 ] . isdigit ( ) and after [ 0 ] . isdigit ( ) and before . rstrip ( '0123456789' ) . endswith ( '(' ) and after . lstrip ( '0123456789' ) . startswith ( ')-' ) ) : return True else : return False
Return True if the arrow is in a chemical name .
50,423
def get_names ( cs ) : records = [ ] for c in cs : records . extend ( c . get ( 'names' , [ ] ) ) return records
Return list of every name .
50,424
def get_labels ( cs ) : records = [ ] for c in cs : records . extend ( c . get ( 'labels' , [ ] ) ) return records
Return list of every label .
50,425
def get_ids ( cs ) : records = [ ] for c in cs : records . append ( { k : c [ k ] for k in c if k in { 'names' , 'labels' } } ) return records
Return chemical identifier records .
50,426
def memoized_property ( fget ) : attr_name = '_{}' . format ( fget . __name__ ) @ functools . wraps ( fget ) def fget_memoized ( self ) : if not hasattr ( self , attr_name ) : setattr ( self , attr_name , fget ( self ) ) return getattr ( self , attr_name ) return property ( fget_memoized )
Decorator to create memoized properties .
50,427
def memoize ( obj ) : cache = obj . cache = { } @ functools . wraps ( obj ) def memoizer ( * args , ** kwargs ) : if args not in cache : cache [ args ] = obj ( * args , ** kwargs ) return cache [ args ] return memoizer
Decorator to create memoized functions methods or classes .
50,428
def evaluate ( self , gold ) : tagged_sents = self . tag_sents ( [ w for ( w , t ) in sent ] for sent in gold ) gold_tokens = sum ( gold , [ ] ) test_tokens = sum ( tagged_sents , [ ] ) accuracy = float ( sum ( x == y for x , y in six . moves . zip ( gold_tokens , test_tokens ) ) ) / len ( test_tokens ) return accuracy
Evaluate the accuracy of this tagger using a gold standard corpus .
50,429
def predict ( self , features ) : scores = defaultdict ( float ) for feat in features : if feat not in self . weights : continue weights = self . weights [ feat ] for label , weight in weights . items ( ) : scores [ label ] += weight return max ( self . classes , key = lambda label : ( scores [ label ] , label ) )
Dot - product the features and current weights and return the best label .
50,430
def update ( self , truth , guess , features ) : def upd_feat ( c , f , w , v ) : param = ( f , c ) self . _totals [ param ] += ( self . i - self . _tstamps [ param ] ) * w self . _tstamps [ param ] = self . i self . weights [ f ] [ c ] = w + v self . i += 1 if truth == guess : return None for f in features : weights = self . weights . setdefault ( f , { } ) upd_feat ( truth , f , weights . get ( truth , 0.0 ) , 1.0 ) upd_feat ( guess , f , weights . get ( guess , 0.0 ) , - 1.0 ) return None
Update the feature weights .
50,431
def average_weights ( self ) : for feat , weights in self . weights . items ( ) : new_feat_weights = { } for clas , weight in weights . items ( ) : param = ( feat , clas ) total = self . _totals [ param ] total += ( self . i - self . _tstamps [ param ] ) * weight averaged = round ( total / float ( self . i ) , 3 ) if averaged : new_feat_weights [ clas ] = averaged self . weights [ feat ] = new_feat_weights return None
Average weights from all iterations .
50,432
def save ( self , path ) : with io . open ( path , 'wb' ) as fout : return pickle . dump ( dict ( self . weights ) , fout )
Save the pickled model weights .
50,433
def load ( self , path ) : with io . open ( path , 'rb' ) as fin : self . weights = pickle . load ( fin )
Load the pickled model weights .
50,434
def train ( self , sentences , nr_iter = 5 ) : self . _make_tagdict ( sentences ) self . perceptron . classes = self . classes for iter_ in range ( nr_iter ) : c = 0 n = 0 for sentence in sentences : prev , prev2 = self . START context = [ t [ 0 ] for t in sentence ] for i , ( token , tag ) in enumerate ( sentence ) : guess = self . tagdict . get ( token ) if not guess : feats = self . _get_features ( i , context , prev , prev2 ) guess = self . perceptron . predict ( feats ) self . perceptron . update ( tag , guess , feats ) prev2 = prev prev = guess c += guess == tag n += 1 random . shuffle ( sentences ) log . debug ( 'Iter %s: %s/%s=%s' % ( iter_ , c , n , ( float ( c ) / n ) * 100 ) ) self . perceptron . average_weights ( )
Train a model from sentences .
50,435
def save ( self , f ) : return pickle . dump ( ( self . perceptron . weights , self . tagdict , self . classes , self . clusters ) , f , protocol = pickle . HIGHEST_PROTOCOL )
Save pickled model to file .
50,436
def load ( self , model ) : self . perceptron . weights , self . tagdict , self . classes , self . clusters = load_model ( model ) self . perceptron . classes = self . classes
Load pickled model .
50,437
def train ( self , sentences , model ) : trainer = pycrfsuite . Trainer ( verbose = True ) trainer . set_params ( self . params ) for sentence in sentences : tokens , labels = zip ( * sentence ) features = [ self . _get_features ( tokens , i ) for i in range ( len ( tokens ) ) ] trainer . append ( features , labels ) trainer . train ( model ) self . load ( model )
Train the CRF tagger using CRFSuite .
50,438
def load ( self , model ) : self . _dawg . load ( find_data ( model ) ) self . _loaded_model = True
Load pickled DAWG from disk .
50,439
def build ( self , words ) : words = [ self . _normalize ( tokens ) for tokens in words ] self . _dawg = dawg . CompletionDAWG ( words ) self . _loaded_model = True
Construct dictionary DAWG from tokenized words .
50,440
def _normalize ( self , tokens ) : if self . case_sensitive : return ' ' . join ( self . lexicon [ t ] . normalized for t in tokens ) else : return ' ' . join ( self . lexicon [ t ] . lower for t in tokens )
Normalization transform to apply to both dictionary words and input tokens .
50,441
def standardize_role ( role ) : role = role . lower ( ) if any ( c in role for c in { 'synthesis' , 'give' , 'yield' , 'afford' , 'product' , 'preparation of' } ) : return 'product' return role
Convert role text into standardized form .
50,442
def list ( ctx ) : log . debug ( 'chemdataextractor.data.list' ) click . echo ( 'Downloaded\tPackage' ) for package in PACKAGES : click . echo ( '%s\t%s' % ( package . local_exists ( ) , package . path ) )
List active data packages .
50,443
def download ( ctx ) : log . debug ( 'chemdataextractor.data.download' ) count = 0 for package in PACKAGES : success = package . download ( ) if success : count += 1 click . echo ( 'Successfully downloaded %s new data packages (%s existing)' % ( count , len ( PACKAGES ) - count ) )
Download data .
50,444
def find_data ( path , warn = True ) : full_path = os . path . join ( get_data_dir ( ) , path ) if warn and not os . path . isfile ( full_path ) : for package in PACKAGES : if path == package . path : log . warn ( '%s doesn\'t exist. Run `cde data download` to get it.' % path ) break return full_path
Return the absolute path to a data file within the data directory .
50,445
def load_model ( path ) : abspath = find_data ( path ) cached = _model_cache . get ( abspath ) if cached is not None : log . debug ( 'Using cached copy of %s' % path ) return cached log . debug ( 'Loading model %s' % path ) try : with io . open ( abspath , 'rb' ) as f : model = six . moves . cPickle . load ( f ) except IOError : raise ModelNotFoundError ( 'Could not load %s. Have you run `cde data download`?' % path ) _model_cache [ abspath ] = model return model
Load a model from a pickle file in the data directory . Cached so model is only loaded once .
50,446
def normalize ( self , text ) : text = super ( ChemNormalizer , self ) . normalize ( text ) if self . chem_spell : text = re . sub ( r'sulph' , r'sulf' , text , flags = re . I ) text = re . sub ( r'aluminum' , r'aluminium' , text , flags = re . I ) text = re . sub ( r'cesium' , r'caesium' , text , flags = re . I ) return text
Normalize unicode hyphens whitespace and some chemistry terms and formatting .
50,447
def add ( self , text ) : if text not in self . lexemes : normalized = self . normalized ( text ) self . lexemes [ text ] = Lexeme ( text = text , normalized = normalized , lower = self . lower ( normalized ) , first = self . first ( normalized ) , suffix = self . suffix ( normalized ) , shape = self . shape ( normalized ) , length = self . length ( normalized ) , upper_count = self . upper_count ( normalized ) , lower_count = self . lower_count ( normalized ) , digit_count = self . digit_count ( normalized ) , is_alpha = self . is_alpha ( normalized ) , is_ascii = self . is_ascii ( normalized ) , is_digit = self . is_digit ( normalized ) , is_lower = self . is_lower ( normalized ) , is_upper = self . is_upper ( normalized ) , is_title = self . is_title ( normalized ) , is_punct = self . is_punct ( normalized ) , is_hyphenated = self . is_hyphenated ( normalized ) , like_url = self . like_url ( normalized ) , like_number = self . like_number ( normalized ) , cluster = self . cluster ( normalized ) )
Add text to the lexicon .
50,448
def serialize ( self ) : data = { 'type' : self . __class__ . __name__ , 'caption' : self . caption . serialize ( ) , 'headings' : [ [ cell . serialize ( ) for cell in hrow ] for hrow in self . headings ] , 'rows' : [ [ cell . serialize ( ) for cell in row ] for row in self . rows ] , } return data
Convert Table element to python dictionary .
50,449
def merge ( self , other ) : log . debug ( 'Merging: %s and %s' % ( self . serialize ( ) , other . serialize ( ) ) ) for k in self . keys ( ) : for new_item in other [ k ] : if new_item not in self [ k ] : self [ k ] . append ( new_item ) log . debug ( 'Result: %s' % self . serialize ( ) ) return self
Merge data from another Compound into this Compound .
50,450
def merge_contextual ( self , other ) : for k in self . keys ( ) : for item in self [ k ] : for other_item in other . get ( k , [ ] ) : if isinstance ( other_item , six . text_type ) : continue for otherk in other_item . keys ( ) : if isinstance ( other_item [ otherk ] , list ) : if len ( other_item [ otherk ] ) > 0 and len ( item [ otherk ] ) > 0 : other_nested_item = other_item [ otherk ] [ 0 ] for othernestedk in other_nested_item . keys ( ) : for nested_item in item [ otherk ] : if not nested_item [ othernestedk ] : nested_item [ othernestedk ] = other_nested_item [ othernestedk ] elif not item [ otherk ] : item [ otherk ] = other_item [ otherk ] log . debug ( 'Result: %s' % self . serialize ( ) ) return self
Merge in contextual info from a template Compound .
50,451
def is_id_only ( self ) : for key , value in self . items ( ) : if key not in { 'names' , 'labels' , 'roles' } and value : return False if self . names or self . labels : return True return False
Return True if identifier information only .
50,452
def join ( tokens , start , result ) : texts = [ ] if len ( result ) > 0 : for e in result : for child in e . iter ( ) : if child . text is not None : texts . append ( child . text ) return [ E ( result [ 0 ] . tag , ' ' . join ( texts ) ) ]
Join tokens into a single string with spaces between .
50,453
def strip_stop ( tokens , start , result ) : for e in result : for child in e . iter ( ) : if child . text . endswith ( '.' ) : child . text = child . text [ : - 1 ] return result
Remove trailing full stop from tokens .
50,454
def fix_whitespace ( tokens , start , result ) : for e in result : for child in e . iter ( ) : child . text = child . text . replace ( ' , ' , ', ' ) for hyphen in HYPHENS : child . text = child . text . replace ( ' %s ' % hyphen , '%s' % hyphen ) child . text = re . sub ( r'- (.) -' , r'-\1-' , child . text ) return result
Fix whitespace around hyphens and commas . Can be used to remove whitespace tokenization artefacts .
50,455
def detect ( self , fstring , fname = None ) : if fname is not None and '.' in fname : extension = fname . rsplit ( '.' , 1 ) [ 1 ] if extension in { 'pdf' , 'html' , 'xml' } : return False return True
Have a stab at most files .
50,456
def _process_layout ( self , layout ) : elements = [ ] for lt_obj in layout : if isinstance ( lt_obj , LTTextBox ) or isinstance ( lt_obj , LTTextLine ) : elements . append ( Paragraph ( lt_obj . get_text ( ) . strip ( ) ) ) elif isinstance ( lt_obj , LTFigure ) : elements . extend ( self . _process_layout ( lt_obj ) ) return elements
Process an LTPage layout and return a list of elements .
50,457
def get_encoding ( input_string , guesses = None , is_html = False ) : converted = UnicodeDammit ( input_string , override_encodings = [ guesses ] if guesses else [ ] , is_html = is_html ) return converted . original_encoding
Return the encoding of a byte string . Uses bs4 UnicodeDammit .
50,458
def levenshtein ( s1 , s2 , allow_substring = False ) : len1 , len2 = len ( s1 ) , len ( s2 ) lev = [ ] for i in range ( len1 + 1 ) : lev . append ( [ 0 ] * ( len2 + 1 ) ) for i in range ( len1 + 1 ) : lev [ i ] [ 0 ] = i for j in range ( len2 + 1 ) : lev [ 0 ] [ j ] = 0 if allow_substring else j for i in range ( len1 ) : for j in range ( len2 ) : lev [ i + 1 ] [ j + 1 ] = min ( lev [ i ] [ j + 1 ] + 1 , lev [ i + 1 ] [ j ] + 1 , lev [ i ] [ j ] + ( s1 [ i ] != s2 [ j ] ) ) return min ( lev [ len1 ] ) if allow_substring else lev [ len1 ] [ len2 ]
Return the Levenshtein distance between two strings .
50,459
def bracket_level ( text , open = { '(' , '[' , '{' } , close = { ')' , ']' , '}' } ) : level = 0 for c in text : if c in open : level += 1 elif c in close : level -= 1 return level
Return 0 if string contains balanced brackets or no brackets .
50,460
def list ( ctx ) : log . debug ( 'chemdataextractor.config.list' ) for k in config : click . echo ( '%s : %s' % ( k , config [ k ] ) )
List all config values .
50,461
def train_crf ( ctx , input , output , clusters ) : click . echo ( 'chemdataextractor.crf.train' ) sentences = [ ] for line in input : sentence = [ ] for t in line . split ( ) : token , tag , iob = t . rsplit ( '/' , 2 ) sentence . append ( ( ( token , tag ) , iob ) ) if sentence : sentences . append ( sentence ) tagger = CrfCemTagger ( clusters = clusters ) tagger . train ( sentences , output )
Train CRF CEM recognizer .
50,462
def sentences ( self ) : sents = [ ] spans = self . sentence_tokenizer . span_tokenize ( self . text ) for span in spans : sent = Sentence ( text = self . text [ span [ 0 ] : span [ 1 ] ] , start = span [ 0 ] , end = span [ 1 ] , word_tokenizer = self . word_tokenizer , lexicon = self . lexicon , abbreviation_detector = self . abbreviation_detector , pos_tagger = self . pos_tagger , ner_tagger = self . ner_tagger , parsers = self . parsers , document = self . document ) sents . append ( sent ) return sents
Return a list of Sentences that make up this text passage .
50,463
def records ( self ) : return ModelList ( * [ r for sent in self . sentences for r in sent . records ] )
Return a list of records for this text passage .
50,464
def tokens ( self ) : spans = self . word_tokenizer . span_tokenize ( self . text ) toks = [ Token ( text = self . text [ span [ 0 ] : span [ 1 ] ] , start = span [ 0 ] + self . start , end = span [ 1 ] + self . start , lexicon = self . lexicon ) for span in spans ] return toks
Return a list of token Spans for this sentence .
50,465
def tags ( self ) : tags = self . pos_tags for i , tag in enumerate ( self . ner_tags ) : if tag is not None : tags [ i ] = tag return tags
Return combined POS and NER tags .
50,466
def records ( self ) : compounds = ModelList ( ) seen_labels = set ( ) tagged_tokens = [ ( CONTROL_RE . sub ( '' , token ) , tag ) for token , tag in self . tagged_tokens ] for parser in self . parsers : for record in parser . parse ( tagged_tokens ) : p = record . serialize ( ) if not p : continue if record in compounds : continue if all ( k in { 'labels' , 'roles' } for k in p . keys ( ) ) and set ( record . labels ) . issubset ( seen_labels ) : continue seen_labels . update ( record . labels ) compounds . append ( record ) return compounds
Return a list of records for this sentence .
50,467
def prepare_gold ( ctx , annotations , gout ) : click . echo ( 'chemdataextractor.chemdner.prepare_gold' ) for line in annotations : pmid , ta , start , end , text , category = line . strip ( ) . split ( '\t' ) gout . write ( '%s\t%s:%s:%s\n' % ( pmid , ta , start , end ) )
Prepare bc - evaluate gold file from annotations supplied by CHEMDNER .
50,468
def prepare_tokens ( ctx , input , annotations , tout , lout ) : click . echo ( 'chemdataextractor.chemdner.prepare_tokens' ) anndict = defaultdict ( list ) for line in annotations : pmid , ta , start , end , text , category = line . strip ( ) . split ( '\t' ) anndict [ ( pmid , ta ) ] . append ( ( int ( start ) , int ( end ) , text ) ) for line in input : pmid , title , abstract = line . strip ( ) . split ( u'\t' ) for t , section , anns in [ ( Title ( title ) , 'T' , anndict . get ( ( pmid , u'T' ) , [ ] ) ) , ( Paragraph ( abstract ) , u'A' , anndict . get ( ( pmid , u'A' ) , [ ] ) ) ] : tagged = _prep_tags ( t , anns ) for i , sentence in enumerate ( tagged ) : tout . write ( u' ' . join ( [ u'/' . join ( [ token , tag , label ] ) for token , tag , label in sentence ] ) ) lout . write ( u' ' . join ( [ u'/' . join ( [ token , label ] ) for token , tag , label in sentence ] ) ) tout . write ( u'\n' ) lout . write ( u'\n' ) tout . write ( u'\n' ) lout . write ( u'\n' )
Prepare tokenized and tagged corpus file from those supplied by CHEMDNER .
50,469
def _prep_tags ( t , annotations ) : tags = [ [ 'O' for _ in sent . tokens ] for sent in t . sentences ] for start , end , text in annotations : done_first = False for i , sent in enumerate ( t . sentences ) : for j , token in enumerate ( sent . tokens ) : if start <= token . start < end or start < token . end <= end : tags [ i ] [ j ] = 'I-CM' if done_first else 'B-CM' done_first = True tagged = [ [ ( token [ 0 ] , token [ 1 ] , tags [ i ] [ j ] ) for j , token in enumerate ( sentence . pos_tagged_tokens ) ] for i , sentence in enumerate ( t . sentences ) ] return tagged
Apply IOB chemical entity tags and POS tags to text .
50,470
def train_all ( ctx , output ) : click . echo ( 'chemdataextractor.pos.train_all' ) click . echo ( 'Output: %s' % output ) ctx . invoke ( train , output = '%s_wsj_nocluster.pickle' % output , corpus = 'wsj' , clusters = False ) ctx . invoke ( train , output = '%s_wsj.pickle' % output , corpus = 'wsj' , clusters = True ) ctx . invoke ( train , output = '%s_genia_nocluster.pickle' % output , corpus = 'genia' , clusters = False ) ctx . invoke ( train , output = '%s_genia.pickle' % output , corpus = 'genia' , clusters = True ) ctx . invoke ( train , output = '%s_wsj_genia_nocluster.pickle' % output , corpus = 'wsj+genia' , clusters = False ) ctx . invoke ( train , output = '%s_wsj_genia.pickle' % output , corpus = 'wsj+genia' , clusters = True )
Train POS tagger on WSJ GENIA and both . With and without cluster features .
50,471
def evaluate_all ( ctx , model ) : click . echo ( 'chemdataextractor.pos.evaluate_all' ) click . echo ( 'Model: %s' % model ) ctx . invoke ( evaluate , model = '%s_wsj_nocluster.pickle' % model , corpus = 'wsj' , clusters = False ) ctx . invoke ( evaluate , model = '%s_wsj_nocluster.pickle' % model , corpus = 'genia' , clusters = False ) ctx . invoke ( evaluate , model = '%s_wsj.pickle' % model , corpus = 'wsj' , clusters = True ) ctx . invoke ( evaluate , model = '%s_wsj.pickle' % model , corpus = 'genia' , clusters = True ) ctx . invoke ( evaluate , model = '%s_genia_nocluster.pickle' % model , corpus = 'wsj' , clusters = False ) ctx . invoke ( evaluate , model = '%s_genia_nocluster.pickle' % model , corpus = 'genia' , clusters = False ) ctx . invoke ( evaluate , model = '%s_genia.pickle' % model , corpus = 'wsj' , clusters = True ) ctx . invoke ( evaluate , model = '%s_genia.pickle' % model , corpus = 'genia' , clusters = True ) ctx . invoke ( evaluate , model = '%s_wsj_genia_nocluster.pickle' % model , corpus = 'wsj' , clusters = False ) ctx . invoke ( evaluate , model = '%s_wsj_genia_nocluster.pickle' % model , corpus = 'genia' , clusters = False ) ctx . invoke ( evaluate , model = '%s_wsj_genia.pickle' % model , corpus = 'wsj' , clusters = True ) ctx . invoke ( evaluate , model = '%s_wsj_genia.pickle' % model , corpus = 'genia' , clusters = True )
Evaluate POS taggers on WSJ and GENIA .
50,472
def train ( ctx , output , corpus , clusters ) : click . echo ( 'chemdataextractor.pos.train' ) click . echo ( 'Output: %s' % output ) click . echo ( 'Corpus: %s' % corpus ) click . echo ( 'Clusters: %s' % clusters ) wsj_sents = [ ] genia_sents = [ ] if corpus == 'wsj' or corpus == 'wsj+genia' : wsj_sents = list ( wsj_training . tagged_sents ( ) ) for i , wsj_sent in enumerate ( wsj_sents ) : wsj_sents [ i ] = [ t for t in wsj_sent if not t [ 1 ] == '-NONE-' ] if corpus == 'genia' or corpus == 'wsj+genia' : genia_sents = list ( genia_training . tagged_sents ( ) ) for i , genia_sent in enumerate ( genia_sents ) : for j , ( token , tag ) in enumerate ( genia_sent ) : if tag == '(' : genia_sents [ i ] [ j ] = ( token , '-LRB-' ) elif tag == ')' : genia_sents [ i ] [ j ] = ( token , '-RRB-' ) elif tag == 'CT' : genia_sents [ i ] [ j ] = ( token , 'DT' ) elif tag == 'XT' : genia_sents [ i ] [ j ] = ( token , 'DT' ) elif tag == '-' : genia_sents [ i ] [ j ] = ( token , ':' ) elif tag == 'N' : genia_sents [ i ] [ j ] = ( token , 'NN' ) elif tag == 'PP' : genia_sents [ i ] [ j ] = ( token , 'PRP' ) elif tag == '' and token == ')' : genia_sents [ i ] [ j ] = ( token , '-RRB-' ) elif tag == '' and token == 'IFN-gamma' : genia_sents [ i ] [ j ] = ( token , 'NN' ) elif '|' in tag : genia_sents [ i ] [ j ] = ( token , tag . split ( '|' ) [ 0 ] ) genia_sents [ i ] = [ t for t in genia_sent if t [ 1 ] in TAGS ] if corpus == 'wsj' : training_corpus = wsj_sents elif corpus == 'genia' : training_corpus = genia_sents elif corpus == 'wsj+genia' : training_corpus = wsj_sents + genia_sents else : raise click . ClickException ( 'Invalid corpus' ) tagger = ChemCrfPosTagger ( clusters = clusters ) tagger . train ( training_corpus , output )
Train POS Tagger .
50,473
def evaluate ( ctx , model , corpus , clusters ) : click . echo ( 'chemdataextractor.pos.evaluate' ) if corpus == 'wsj' : evaluation = wsj_evaluation sents = list ( evaluation . tagged_sents ( ) ) for i , wsj_sent in enumerate ( sents ) : sents [ i ] = [ t for t in wsj_sent if not t [ 1 ] == '-NONE-' ] elif corpus == 'genia' : evaluation = genia_evaluation sents = list ( evaluation . tagged_sents ( ) ) for i , genia_sent in enumerate ( sents ) : for j , ( token , tag ) in enumerate ( genia_sent ) : if tag == '(' : sents [ i ] [ j ] = ( token , '-LRB-' ) elif tag == ')' : sents [ i ] [ j ] = ( token , '-RRB-' ) else : raise click . ClickException ( 'Invalid corpus' ) tagger = ChemCrfPosTagger ( model = model , clusters = clusters ) accuracy = tagger . evaluate ( sents ) click . echo ( '%s on %s: %s' % ( model , evaluation , accuracy ) )
Evaluate performance of POS Tagger .
50,474
def evaluate_perceptron ( ctx , model , corpus ) : click . echo ( 'chemdataextractor.pos.evaluate' ) if corpus == 'wsj' : evaluation = wsj_evaluation sents = list ( evaluation . tagged_sents ( ) ) for i , wsj_sent in enumerate ( sents ) : sents [ i ] = [ t for t in wsj_sent if not t [ 1 ] == u'-NONE-' ] elif corpus == 'genia' : evaluation = genia_evaluation sents = list ( evaluation . tagged_sents ( ) ) for i , genia_sent in enumerate ( sents ) : for j , ( token , tag ) in enumerate ( genia_sent ) : if tag == u'(' : sents [ i ] [ j ] = ( token , u'-LRB-' ) elif tag == u')' : sents [ i ] [ j ] = ( token , u'-RRB-' ) else : raise click . ClickException ( 'Invalid corpus' ) tagger = ChemApPosTagger ( model = model ) accuracy = tagger . evaluate ( sents ) click . echo ( '%s on %s: %s' % ( model , evaluation , accuracy ) )
Evaluate performance of Averaged Perceptron POS Tagger .
50,475
def tag ( ctx , input , output ) : log . info ( 'chemdataextractor.pos.tag' ) log . info ( 'Reading %s' % input . name ) doc = Document . from_file ( input ) for element in doc . elements : if isinstance ( element , Text ) : for sentence in element . sentences : output . write ( u' ' . join ( u'/' . join ( [ token , tag ] ) for token , tag in sentence . pos_tagged_tokens ) ) output . write ( u'\n' )
Output POS - tagged tokens .
50,476
def make_request ( self , session , url , ** kwargs ) : log . debug ( 'Making request: GET %s %s' % ( url , kwargs ) ) return session . get ( url , ** kwargs )
Make a HTTP GET request .
50,477
def make_request ( self , session , url , ** kwargs ) : log . debug ( 'Making request: POST %s %s' % ( url , kwargs ) ) return session . post ( url , ** kwargs )
Make a HTTP POST request .
50,478
def run ( self , url ) : url = self . process_url ( url ) if not url : return response = self . make_request ( self . http , url ) selector = self . process_response ( response ) entities = [ ] for root in self . get_roots ( selector ) : entity = self . entity ( root ) entity = self . process_entity ( entity ) if entity : entities . append ( entity ) return EntityList ( * entities )
Request URL scrape response and return an EntityList .
50,479
def clean_html ( self , html ) : result_type = type ( html ) if isinstance ( html , six . string_types ) : doc = html_fromstring ( html ) else : doc = copy . deepcopy ( html ) self ( doc ) if issubclass ( result_type , six . binary_type ) : return tostring ( doc , encoding = 'utf-8' ) elif issubclass ( result_type , six . text_type ) : return tostring ( doc , encoding = 'unicode' ) else : return doc
Apply Cleaner to HTML string or document and return a cleaned string or document .
50,480
def clean_markup ( self , markup , parser = None ) : result_type = type ( markup ) if isinstance ( markup , six . string_types ) : doc = fromstring ( markup , parser = parser ) else : doc = copy . deepcopy ( markup ) self ( doc ) if issubclass ( result_type , six . binary_type ) : return tostring ( doc , encoding = 'utf-8' ) elif issubclass ( result_type , six . text_type ) : return tostring ( doc , encoding = 'unicode' ) else : return doc
Apply Cleaner to markup string or document and return a cleaned string or document .
50,481
def floats ( s ) : try : return float ( s ) except ValueError : s = re . sub ( r'(\d)\s*\(\d+(\.\d+)?\)' , r'\1' , s ) s = re . sub ( r'(\d)\s*±\s*\d+(\.\d+)?', '\1', ) s = s . rstrip ( '\'"+-=<>/,.:;!?)]}…∼~≈×*_≥≤') s = s . lstrip ( '\'"+=<>/([{∼~≈×*_≥≤£$€#§') s = s . replace ( ',' , '' ) s = '' . join ( s . split ( ) ) s = re . sub ( r'(\d)\s*[×x]\s*10\^?(-?\d)', '\1e\2', ) return float ( s )
Convert string to float . Handles more string formats that the standard python conversion .
50,482
def strip_querystring ( url ) : p = six . moves . urllib . parse . urlparse ( url ) return p . scheme + "://" + p . netloc + p . path
Remove the querystring from the end of a URL .
50,483
def extract_emails ( text ) : text = text . replace ( u'\u2024' , '.' ) emails = [ ] for m in EMAIL_RE . findall ( text ) : emails . append ( m [ 0 ] ) return emails
Return a list of email addresses extracted from the string .
50,484
def unapostrophe ( text ) : text = re . sub ( r'[%s]s?$' % '' . join ( APOSTROPHES ) , '' , text ) return text
Strip apostrophe and s from the end of a string .
50,485
def getLocalTime ( date , time , * args , ** kwargs ) : if time is not None : return getLocalDateAndTime ( date , time , * args , ** kwargs ) [ 1 ]
Get the time in the local timezone from date and time
50,486
def getLocalDateAndTime ( date , time , * args , ** kwargs ) : localDt = getLocalDatetime ( date , time , * args , ** kwargs ) if time is not None : return ( localDt . date ( ) , localDt . timetz ( ) ) else : return ( localDt . date ( ) , None )
Get the date and time in the local timezone from date and optionally time
50,487
def getLocalDatetime ( date , time , tz = None , timeDefault = dt . time . max ) : localTZ = timezone . get_current_timezone ( ) if tz is None or tz == localTZ : localDt = getAwareDatetime ( date , time , tz , timeDefault ) else : eventDt = getAwareDatetime ( date , time , tz , timeDefault ) localDt = eventDt . astimezone ( localTZ ) if time is None : localDt = getAwareDatetime ( localDt . date ( ) , None , localTZ , timeDefault ) return localDt
Get a datetime in the local timezone from date and optionally time
50,488
def getAwareDatetime ( date , time , tz , timeDefault = dt . time . max ) : if time is None : time = timeDefault datetime = dt . datetime . combine ( date , time ) datetime = timezone . make_aware ( datetime , tz , is_dst = False ) return datetime
Get a datetime in the given timezone from date and optionally time . If time is not given it will default to timeDefault if that is given or if not then to the end of the day .
50,489
def _iso_num_weeks ( iso_year ) : "Get the number of ISO-weeks in this year" year_start = _iso_year_start ( iso_year ) next_year_start = _iso_year_start ( iso_year + 1 ) year_num_weeks = ( ( next_year_start - year_start ) . days ) // 7 return year_num_weeks
Get the number of ISO - weeks in this year
50,490
def _iso_info ( iso_year , iso_week ) : "Give all the iso info we need from one calculation" prev_year_start = _iso_year_start ( iso_year - 1 ) year_start = _iso_year_start ( iso_year ) next_year_start = _iso_year_start ( iso_year + 1 ) first_day = year_start + dt . timedelta ( weeks = iso_week - 1 ) last_day = first_day + dt . timedelta ( days = 6 ) prev_year_num_weeks = ( ( year_start - prev_year_start ) . days ) // 7 year_num_weeks = ( ( next_year_start - year_start ) . days ) // 7 return ( first_day , last_day , prev_year_num_weeks , year_num_weeks )
Give all the iso info we need from one calculation
50,491
def _iso_week_of_month ( date_value ) : "0-starting index which ISO-week in the month this date is" weekday_of_first = date_value . replace ( day = 1 ) . weekday ( ) return ( date_value . day + weekday_of_first - 1 ) // 7
0 - starting index which ISO - week in the month this date is
50,492
def _ssweek_year_start ( ssweek_year ) : "The gregorian calendar date of the first day of the given Sundaystarting-week year" fifth_jan = dt . date ( ssweek_year , 1 , 5 ) delta = dt . timedelta ( fifth_jan . weekday ( ) + 1 ) return fifth_jan - delta
The gregorian calendar date of the first day of the given Sundaystarting - week year
50,493
def _ssweek_to_gregorian ( ssweek_year , ssweek_week , ssweek_day ) : "Gregorian calendar date for the given Sundaystarting-week year, week and day" year_start = _ssweek_year_start ( ssweek_year ) return year_start + dt . timedelta ( days = ssweek_day - 1 , weeks = ssweek_week - 1 )
Gregorian calendar date for the given Sundaystarting - week year week and day
50,494
def _ssweek_num_weeks ( ssweek_year ) : "Get the number of Sundaystarting-weeks in this year" year_start = _ssweek_year_start ( ssweek_year ) next_year_start = _ssweek_year_start ( ssweek_year + 1 ) year_num_weeks = ( ( next_year_start - year_start ) . days ) // 7 return year_num_weeks
Get the number of Sundaystarting - weeks in this year
50,495
def _ssweek_info ( ssweek_year , ssweek_week ) : "Give all the ssweek info we need from one calculation" prev_year_start = _ssweek_year_start ( ssweek_year - 1 ) year_start = _ssweek_year_start ( ssweek_year ) next_year_start = _ssweek_year_start ( ssweek_year + 1 ) first_day = year_start + dt . timedelta ( weeks = ssweek_week - 1 ) last_day = first_day + dt . timedelta ( days = 6 ) prev_year_num_weeks = ( ( year_start - prev_year_start ) . days ) // 7 year_num_weeks = ( ( next_year_start - year_start ) . days ) // 7 return ( first_day , last_day , prev_year_num_weeks , year_num_weeks )
Give all the ssweek info we need from one calculation
50,496
def _gregorian_to_ssweek ( date_value ) : "Sundaystarting-week year, week and day for the given Gregorian calendar date" yearStart = _ssweek_year_start ( date_value . year ) weekNum = ( ( date_value - yearStart ) . days ) // 7 + 1 dayOfWeek = date_value . weekday ( ) + 1 return ( date_value . year , weekNum , dayOfWeek )
Sundaystarting - week year week and day for the given Gregorian calendar date
50,497
def _ssweek_of_month ( date_value ) : "0-starting index which Sundaystarting-week in the month this date is" weekday_of_first = ( date_value . replace ( day = 1 ) . weekday ( ) + 1 ) % 7 return ( date_value . day + weekday_of_first - 1 ) // 7
0 - starting index which Sundaystarting - week in the month this date is
50,498
def byweekday ( self ) : retval = [ ] if self . rule . _byweekday : retval += [ Weekday ( day ) for day in self . rule . _byweekday ] if self . rule . _bynweekday : retval += [ Weekday ( day , n ) for day , n in self . rule . _bynweekday ] return retval
The weekdays where the recurrence will be applied . In RFC5545 this is called BYDAY but is renamed by dateutil to avoid ambiguity .
50,499
def bymonthday ( self ) : retval = [ ] if self . rule . _bymonthday : retval += self . rule . _bymonthday if self . rule . _bynmonthday : retval += self . rule . _bynmonthday return retval
The month days where the recurrence will be applied .