_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q33700
ParserUDF._valid_pdf
train
def _valid_pdf(self, path, filename): """Verify that the file exists and has a PDF extension.""" # If path is file, but not PDF. if os.path.isfile(path) and path.lower().endswith(".pdf"): return True else: full_path = os.path.join(path, filename) if os.path.isfile(full_path) and full_path.lower().endswith(".pdf"): return True elif os.path.isfile(os.path.join(path, filename + ".pdf")): return True elif os.path.isfile(os.path.join(path, filename + ".PDF")): return True return False
python
{ "resource": "" }
q33701
ParserUDF._parse_figure
train
def _parse_figure(self, node, state): """Parse the figure node. :param node: The lxml img node to parse :param state: The global state necessary to place the node in context of the document as a whole. """ if node.tag not in ["img", "figure"]: return state # Process the Figure stable_id = ( f"{state['document'].name}" f"::" f"{'figure'}" f":" f"{state['figure']['idx']}" ) # Set name for Figure name = node.attrib["name"] if "name" in node.attrib else None # img within a Figure get's processed in the parent Figure if node.tag == "img" and isinstance(state["parent"][node], Figure): return state # NOTE: We currently do NOT support nested figures. parts = {} parent = state["parent"][node] if isinstance(parent, Section): parts["section"] = parent elif isinstance(parent, Cell): parts["section"] = parent.table.section parts["cell"] = parent else: logger.warning(f"Figure is nested within {state['parent'][node]}") return state parts["document"] = state["document"] parts["stable_id"] = stable_id parts["name"] = name parts["position"] = state["figure"]["idx"] # If processing a raw img if node.tag == "img": # Create the Figure entry in the DB parts["url"] = node.get("src") state["context"][node] = Figure(**parts) elif node.tag == "figure": # Pull the image from a child img node, if one exists imgs = [child for child in node if child.tag == "img"] if len(imgs) > 1: logger.warning("Figure contains multiple images.") # Right now we don't support multiple URLs in the Figure context # As a workaround, just ignore the outer Figure and allow processing # of the individual images. We ignore the accompanying figcaption # by marking it as visited. captions = [child for child in node if child.tag == "figcaption"] state["visited"].update(captions) return state img = imgs[0] state["visited"].add(img) # Create the Figure entry in the DB parts["url"] = img.get("src") state["context"][node] = Figure(**parts) state["figure"]["idx"] += 1 return state
python
{ "resource": "" }
q33702
ParserUDF._parse_paragraph
train
def _parse_paragraph(self, node, state): """Parse a Paragraph of the node. :param node: The lxml node to parse :param state: The global state necessary to place the node in context of the document as a whole. """ # Both Paragraphs will share the same parent parent = ( state["context"][node] if node in state["context"] else state["parent"][node] ) # Set name for Paragraph name = node.attrib["name"] if "name" in node.attrib else None for field in ["text", "tail"]: text = getattr(node, field) text = text.strip() if text and self.strip else text # Skip if "" or None if not text: continue # Run RegEx replacements for (rgx, replace) in self.replacements: text = rgx.sub(replace, text) # Process the Paragraph stable_id = ( f"{state['document'].name}" f"::" f"{'paragraph'}" f":" f"{state['paragraph']['idx']}" ) parts = {} parts["stable_id"] = stable_id parts["name"] = name parts["document"] = state["document"] parts["position"] = state["paragraph"]["idx"] if isinstance(parent, Caption): if parent.table: parts["section"] = parent.table.section elif parent.figure: parts["section"] = parent.figure.section parts["caption"] = parent elif isinstance(parent, Cell): parts["section"] = parent.table.section parts["cell"] = parent elif isinstance(parent, Section): parts["section"] = parent elif isinstance(parent, Figure): # occurs with text in the tail of an img parts["section"] = parent.section elif isinstance(parent, Table): # occurs with text in the tail of a table parts["section"] = parent.section else: raise NotImplementedError( f"Para '{text}' parent must be Section, Caption, or Cell, " f"not {parent}" ) # Create the entry in the DB paragraph = Paragraph(**parts) state["paragraph"]["idx"] += 1 state["paragraph"]["text"] = text state["paragraph"]["field"] = field yield from self._parse_sentence(paragraph, node, state)
python
{ "resource": "" }
q33703
ParserUDF._parse_section
train
def _parse_section(self, node, state): """Parse a Section of the node. Note that this implementation currently creates a Section at the beginning of the document and creates Section based on tag of node. :param node: The lxml node to parse :param state: The global state necessary to place the node in context of the document as a whole. """ if node.tag not in ["html", "section"]: return state # Add a Section stable_id = ( f"{state['document'].name}" f"::" f"{'section'}" f":" f"{state['section']['idx']}" ) # Set name for Section name = node.attrib["name"] if "name" in node.attrib else None state["context"][node] = Section( document=state["document"], name=name, stable_id=stable_id, position=state["section"]["idx"], ) state["section"]["idx"] += 1 return state
python
{ "resource": "" }
q33704
ParserUDF._parse_caption
train
def _parse_caption(self, node, state): """Parse a Caption of the node. :param node: The lxml node to parse :param state: The global state necessary to place the node in context of the document as a whole. """ if node.tag not in ["caption", "figcaption"]: # captions used in Tables return state # Add a Caption parent = state["parent"][node] stable_id = ( f"{state['document'].name}" f"::" f"{'caption'}" f":" f"{state['caption']['idx']}" ) # Set name for Section name = node.attrib["name"] if "name" in node.attrib else None if isinstance(parent, Table): state["context"][node] = Caption( document=state["document"], table=parent, figure=None, stable_id=stable_id, name=name, position=state["caption"]["idx"], ) elif isinstance(parent, Figure): state["context"][node] = Caption( document=state["document"], table=None, figure=parent, stable_id=stable_id, name=name, position=state["caption"]["idx"], ) else: raise NotImplementedError("Caption must be a child of Table or Figure.") state["caption"]["idx"] += 1 return state
python
{ "resource": "" }
q33705
ParserUDF._parse_node
train
def _parse_node(self, node, state): """Entry point for parsing all node types. :param node: The lxml HTML node to parse :param state: The global state necessary to place the node in context of the document as a whole. :rtype: a *generator* of Sentences """ # Processing on entry of node state = self._parse_section(node, state) state = self._parse_figure(node, state) if self.tabular: state = self._parse_table(node, state) state = self._parse_caption(node, state) yield from self._parse_paragraph(node, state)
python
{ "resource": "" }
q33706
ParserUDF.parse
train
def parse(self, document, text): """Depth-first search over the provided tree. Implemented as an iterative procedure. The structure of the state needed to parse each node is also defined in this function. :param document: the Document context :param text: the structured text of the document (e.g. HTML) :rtype: a *generator* of Sentences. """ stack = [] root = lxml.html.fromstring(text) # flattens children of node that are in the 'flatten' list if self.flatten: lxml.etree.strip_tags(root, self.flatten) # Assign the text, which was stripped of the 'flatten'-tags, to the document document.text = lxml.etree.tostring(root, encoding="unicode") # This dictionary contain the global state necessary to parse a # document and each context element. This reflects the relationships # defined in parser/models. This contains the state necessary to create # the respective Contexts within the document. state = { "visited": set(), "parent": {}, # map of parent[child] = node used to discover child "context": {}, # track the Context of each node (context['td'] = Cell) "root": root, "document": document, "section": {"idx": 0}, "paragraph": {"idx": 0}, "figure": {"idx": 0}, "caption": {"idx": 0}, "table": {"idx": 0}, "sentence": {"idx": 0, "abs_offset": 0}, } # NOTE: Currently the helper functions directly manipulate the state # rather than returning a modified copy. # Iterative Depth-First Search stack.append(root) state["parent"][root] = document state["context"][root] = document tokenized_sentences = [] while stack: node = stack.pop() if node not in state["visited"]: state["visited"].add(node) # mark as visited # Process if self.lingual: tokenized_sentences += [y for y in self._parse_node(node, state)] else: yield from self._parse_node(node, state) # NOTE: This reversed() order is to ensure that the iterative # DFS matches the order that would be produced by a recursive # DFS implementation. for child in reversed(node): # Skip nodes that are comments or blacklisted if child.tag is lxml.etree.Comment or ( self.blacklist and child.tag in self.blacklist ): continue stack.append(child) # store the parent of the node, which is either the parent # Context, or if the parent did not create a Context, then # use the node's parent Context. state["parent"][child] = ( state["context"][node] if node in state["context"] else state["parent"][node] ) if self.lingual: yield from self.enrich_tokenized_sentences_with_nlp(tokenized_sentences)
python
{ "resource": "" }
q33707
init_logging
train
def init_logging( log_dir=tempfile.gettempdir(), format="[%(asctime)s][%(levelname)s] %(name)s:%(lineno)s - %(message)s", level=logging.INFO, ): """Configures logging to output to the provided log_dir. Will use a nested directory whose name is the current timestamp. :param log_dir: The directory to store logs in. :type log_dir: str :param format: The logging format string to use. :type format: str :param level: The logging level to use, e.g., logging.INFO. """ if not Meta.log_path: # Generate a new directory using the log_dir, if it doesn't exist dt = datetime.now().strftime("%Y-%m-%d_%H-%M-%S") log_path = os.path.join(log_dir, dt) if not os.path.exists(log_path): os.makedirs(log_path) # Configure the logger using the provided path logging.basicConfig( format=format, level=level, handlers=[ logging.FileHandler(os.path.join(log_path, "fonduer.log")), logging.StreamHandler(), ], ) # Notify user of log location logger.info(f"Setting logging directory to: {log_path}") Meta.log_path = log_path else: logger.info( f"Logging was already initialized to use {Meta.log_path}. " "To configure logging manually, call fonduer.init_logging before " "initialiting Meta." )
python
{ "resource": "" }
q33708
_update_meta
train
def _update_meta(conn_string): """Update Meta class.""" url = urlparse(conn_string) Meta.conn_string = conn_string Meta.DBNAME = url.path[1:] Meta.DBUSER = url.username Meta.DBPWD = url.password Meta.DBHOST = url.hostname Meta.DBPORT = url.port Meta.postgres = url.scheme.startswith("postgresql")
python
{ "resource": "" }
q33709
Meta.init
train
def init(cls, conn_string=None): """Return the unique Meta class.""" if conn_string: _update_meta(conn_string) # We initialize the engine within the models module because models' # schema can depend on which data types are supported by the engine Meta.Session = new_sessionmaker() Meta.engine = Meta.Session.kw["bind"] logger.info( f"Connecting user:{Meta.DBUSER} " f"to {Meta.DBHOST}:{Meta.DBPORT}/{Meta.DBNAME}" ) Meta._init_db() if not Meta.log_path: init_logging() return cls
python
{ "resource": "" }
q33710
Meta._init_db
train
def _init_db(cls): """ Initialize the storage schema. This call must be performed after all classes that extend Base are declared to ensure the storage schema is initialized. """ # This list of import defines which SQLAlchemy classes will be # initialized when Meta.init() is called. If a sqlalchemy class is not # imported before the call to create_all(), it will not be created. import fonduer.candidates.models # noqa import fonduer.features.models # noqa import fonduer.learning.models # noqa import fonduer.parser.models # noqa import fonduer.supervision.models # noqa import fonduer.utils.models # noqa logger.info("Initializing the storage schema") Meta.Base.metadata.create_all(Meta.engine)
python
{ "resource": "" }
q33711
Spacy.model_installed
train
def model_installed(name): """Check if spaCy language model is installed. From https://github.com/explosion/spaCy/blob/master/spacy/util.py :param name: :return: """ data_path = util.get_data_path() if not data_path or not data_path.exists(): raise IOError(f"Can't find spaCy data path: {data_path}") if name in {d.name for d in data_path.iterdir()}: return True if Spacy.is_package(name): # installed as package return True if Path(name).exists(): # path to model data directory return True return False
python
{ "resource": "" }
q33712
Spacy.load_lang_model
train
def load_lang_model(self): """ Load spaCy language model or download if model is available and not installed. Currenty supported spaCy languages en English (50MB) de German (645MB) fr French (1.33GB) es Spanish (377MB) :return: """ if self.lang in self.languages: if not Spacy.model_installed(self.lang): download(self.lang) model = spacy.load(self.lang) elif self.lang in self.alpha_languages: language_module = importlib.import_module(f"spacy.lang.{self.lang}") language_method = getattr(language_module, self.alpha_languages[self.lang]) model = language_method() self.model = model
python
{ "resource": "" }
q33713
Spacy.enrich_sentences_with_NLP
train
def enrich_sentences_with_NLP(self, all_sentences): """ Enrich a list of fonduer Sentence objects with NLP features. We merge and process the text of all Sentences for higher efficiency. :param all_sentences: List of fonduer Sentence objects for one document :return: """ if not self.has_NLP_support(): raise NotImplementedError( f"Language {self.lang} not available in spacy beyond tokenization" ) if len(all_sentences) == 0: return # Nothing to parse if self.model.has_pipe("sentencizer"): self.model.remove_pipe("sentencizer") self.logger.debug( f"Removed sentencizer ('sentencizer') from model. " f"Now in pipeline: {self.model.pipe_names}" ) if self.model.has_pipe("sentence_boundary_detector"): self.model.remove_pipe(name="sentence_boundary_detector") self.model.add_pipe( set_custom_boundary, before="parser", name="sentence_boundary_detector" ) sentence_batches = self._split_sentences_by_char_limit( all_sentences, self.model.max_length ) # TODO: We could do this in parallel. Test speedup in the future for sentence_batch in sentence_batches: custom_tokenizer = TokenPreservingTokenizer(self.model.vocab) # we circumvent redundant tokenization by using a custom # tokenizer that directly uses the already separated words # of each sentence as tokens doc = custom_tokenizer(sentence_batch) doc.user_data = sentence_batch for name, proc in self.model.pipeline: # iterate over components in order doc = proc(doc) try: assert doc.is_parsed except Exception: self.logger.exception(f"{doc} was not parsed") for sent, current_sentence_obj in zip(doc.sents, sentence_batch): parts = defaultdict(list) for i, token in enumerate(sent): parts["lemmas"].append(token.lemma_) parts["pos_tags"].append(token.tag_) parts["ner_tags"].append( token.ent_type_ if token.ent_type_ else "O" ) head_idx = ( 0 if token.head is token else token.head.i - sent[0].i + 1 ) parts["dep_parents"].append(head_idx) parts["dep_labels"].append(token.dep_) current_sentence_obj.pos_tags = parts["pos_tags"] current_sentence_obj.lemmas = parts["lemmas"] current_sentence_obj.ner_tags = parts["ner_tags"] current_sentence_obj.dep_parents = parts["dep_parents"] current_sentence_obj.dep_labels = parts["dep_labels"] yield current_sentence_obj
python
{ "resource": "" }
q33714
Spacy.split_sentences
train
def split_sentences(self, text): """ Split input text into sentences that match CoreNLP's default format, but are not yet processed. :param text: The text of the parent paragraph of the sentences :return: """ if self.model.has_pipe("sentence_boundary_detector"): self.model.remove_pipe(name="sentence_boundary_detector") if not self.model.has_pipe("sentencizer"): sentencizer = self.model.create_pipe("sentencizer") # add sentencizer self.model.add_pipe(sentencizer) try: doc = self.model(text, disable=["parser", "tagger", "ner"]) except ValueError: # temporary increase character limit of spacy # 'Probably save' according to spacy, as no parser or NER is used previous_max_length = self.model.max_length self.model.max_length = 100_000_000 self.logger.warning( f"Temporarily increased spacy maximum " f"character limit to {self.model.max_length} to split sentences." ) doc = self.model(text, disable=["parser", "tagger", "ner"]) self.model.max_length = previous_max_length self.logger.warning( f"Spacy maximum " f"character limit set back to {self.model.max_length}." ) doc.is_parsed = True position = 0 for sent in doc.sents: parts = defaultdict(list) text = sent.text for i, token in enumerate(sent): parts["words"].append(str(token)) parts["lemmas"].append(token.lemma_) parts["pos_tags"].append(token.pos_) parts["ner_tags"].append("") # placeholder for later NLP parsing parts["char_offsets"].append(token.idx) parts["abs_char_offsets"].append(token.idx) parts["dep_parents"].append(0) # placeholder for later NLP parsing parts["dep_labels"].append("") # placeholder for later NLP parsing # make char_offsets relative to start of sentence parts["char_offsets"] = [ p - parts["char_offsets"][0] for p in parts["char_offsets"] ] parts["position"] = position parts["text"] = text position += 1 yield parts
python
{ "resource": "" }
q33715
Classifier._setup_model_loss
train
def _setup_model_loss(self, lr): """ Setup loss and optimizer for PyTorch model. """ # Setup loss if not hasattr(self, "loss"): self.loss = SoftCrossEntropyLoss() # Setup optimizer if not hasattr(self, "optimizer"): self.optimizer = optim.Adam(self.parameters(), lr=lr)
python
{ "resource": "" }
q33716
Classifier.save_marginals
train
def save_marginals(self, session, X, training=False): """Save the predicted marginal probabilities for the Candidates X. :param session: The database session to use. :param X: Input data. :param training: If True, these are training marginals / labels; else they are saved as end model predictions. :type training: bool """ save_marginals(session, X, self.marginals(X), training=training)
python
{ "resource": "" }
q33717
Classifier.predict
train
def predict(self, X, b=0.5, pos_label=1, return_probs=False): """Return numpy array of class predictions for X based on predicted marginal probabilities. :param X: Input data. :param b: Decision boundary *for binary setting only*. :type b: float :param pos_label: Positive class index *for binary setting only*. Default: 1 :type pos_label: int :param return_probs: If True, return predict probability. Default: False :type return_probs: bool """ if self._check_input(X): X = self._preprocess_data(X) Y_prob = self.marginals(X) if self.cardinality > 2: Y_pred = Y_prob.argmax(axis=1) + 1 if return_probs: return Y_pred, Y_prob else: return Y_pred if pos_label not in [1, 2]: raise ValueError("pos_label must have values in {1,2}.") self.logger.info(f"Using positive label class {pos_label} with threshold {b}") Y_pred = np.array( [pos_label if p[pos_label - 1] > b else 3 - pos_label for p in Y_prob] ) if return_probs: return Y_pred, Y_prob else: return Y_pred
python
{ "resource": "" }
q33718
Classifier.save
train
def save(self, model_file, save_dir, verbose=True): """Save current model. :param model_file: Saved model file name. :type model_file: str :param save_dir: Saved model directory. :type save_dir: str :param verbose: Print log or not :type verbose: bool """ # Check existence of model saving directory and create if does not exist. if not os.path.exists(save_dir): os.makedirs(save_dir) params = { "model": self.state_dict(), "cardinality": self.cardinality, "name": self.name, "config": self.settings, } try: torch.save(params, f"{save_dir}/{model_file}") except BaseException: self.logger.warning("Saving failed... continuing anyway.") if verbose: self.logger.info(f"[{self.name}] Model saved as {model_file} in {save_dir}")
python
{ "resource": "" }
q33719
Classifier.load
train
def load(self, model_file, save_dir, verbose=True): """Load model from file and rebuild the model. :param model_file: Saved model file name. :type model_file: str :param save_dir: Saved model directory. :type save_dir: str :param verbose: Print log or not :type verbose: bool """ if not os.path.exists(save_dir): self.logger.error("Loading failed... Directory does not exist.") try: checkpoint = torch.load(f"{save_dir}/{model_file}") except BaseException: self.logger.error( f"Loading failed... Cannot load model from {save_dir}/{model_file}" ) self.load_state_dict(checkpoint["model"]) self.settings = checkpoint["config"] self.cardinality = checkpoint["cardinality"] self.name = checkpoint["name"] if verbose: self.logger.info( f"[{self.name}] Model loaded as {model_file} in {save_dir}" )
python
{ "resource": "" }
q33720
get_parent_tag
train
def get_parent_tag(mention): """Return the HTML tag of the Mention's parent. These may be tags such as 'p', 'h2', 'table', 'div', etc. If a candidate is passed in, only the tag of its first Mention is returned. :param mention: The Mention to evaluate :rtype: string """ span = _to_span(mention) i = _get_node(span.sentence) return str(i.getparent().tag) if i.getparent() is not None else None
python
{ "resource": "" }
q33721
get_prev_sibling_tags
train
def get_prev_sibling_tags(mention): """Return the HTML tag of the Mention's previous siblings. Previous siblings are Mentions which are at the same level in the HTML tree as the given mention, but are declared before the given mention. If a candidate is passed in, only the previous siblings of its first Mention are considered in the calculation. :param mention: The Mention to evaluate :rtype: list of strings """ span = _to_span(mention) prev_sibling_tags = [] i = _get_node(span.sentence) while i.getprevious() is not None: prev_sibling_tags.insert(0, str(i.getprevious().tag)) i = i.getprevious() return prev_sibling_tags
python
{ "resource": "" }
q33722
get_next_sibling_tags
train
def get_next_sibling_tags(mention): """Return the HTML tag of the Mention's next siblings. Next siblings are Mentions which are at the same level in the HTML tree as the given mention, but are declared after the given mention. If a candidate is passed in, only the next siblings of its last Mention are considered in the calculation. :param mention: The Mention to evaluate :rtype: list of strings """ span = _to_span(mention) next_sibling_tags = [] i = _get_node(span.sentence) while i.getnext() is not None: next_sibling_tags.append(str(i.getnext().tag)) i = i.getnext() return next_sibling_tags
python
{ "resource": "" }
q33723
get_ancestor_class_names
train
def get_ancestor_class_names(mention): """Return the HTML classes of the Mention's ancestors. If a candidate is passed in, only the ancestors of its first Mention are returned. :param mention: The Mention to evaluate :rtype: list of strings """ span = _to_span(mention) class_names = [] i = _get_node(span.sentence) while i is not None: class_names.insert(0, str(i.get("class"))) i = i.getparent() return class_names
python
{ "resource": "" }
q33724
get_ancestor_tag_names
train
def get_ancestor_tag_names(mention): """Return the HTML tag of the Mention's ancestors. For example, ['html', 'body', 'p']. If a candidate is passed in, only the ancestors of its first Mention are returned. :param mention: The Mention to evaluate :rtype: list of strings """ span = _to_span(mention) tag_names = [] i = _get_node(span.sentence) while i is not None: tag_names.insert(0, str(i.tag)) i = i.getparent() return tag_names
python
{ "resource": "" }
q33725
get_ancestor_id_names
train
def get_ancestor_id_names(mention): """Return the HTML id's of the Mention's ancestors. If a candidate is passed in, only the ancestors of its first Mention are returned. :param mention: The Mention to evaluate :rtype: list of strings """ span = _to_span(mention) id_names = [] i = _get_node(span.sentence) while i is not None: id_names.insert(0, str(i.get("id"))) i = i.getparent() return id_names
python
{ "resource": "" }
q33726
common_ancestor
train
def common_ancestor(c): """Return the path to the root that is shared between a binary-Mention Candidate. In particular, this is the common path of HTML tags. :param c: The binary-Mention Candidate to evaluate :rtype: list of strings """ span1 = _to_span(c[0]) span2 = _to_span(c[1]) ancestor1 = np.array(span1.sentence.xpath.split("/")) ancestor2 = np.array(span2.sentence.xpath.split("/")) min_len = min(ancestor1.size, ancestor2.size) return list(ancestor1[: np.argmin(ancestor1[:min_len] == ancestor2[:min_len])])
python
{ "resource": "" }
q33727
RNN.init_hidden
train
def init_hidden(self, batch_size): """Initiate the initial state. :param batch_size: batch size. :type batch_size: int :return: Initial state of LSTM :rtype: pair of torch.Tensors of shape (num_layers * num_directions, batch_size, hidden_size) """ b = 2 if self.bidirectional else 1 if self.use_cuda: return ( torch.zeros(self.num_layers * b, batch_size, self.lstm_hidden).cuda(), torch.zeros(self.num_layers * b, batch_size, self.lstm_hidden).cuda(), ) else: return ( torch.zeros(self.num_layers * b, batch_size, self.lstm_hidden), torch.zeros(self.num_layers * b, batch_size, self.lstm_hidden), )
python
{ "resource": "" }
q33728
TensorBoardLogger.add_scalar
train
def add_scalar(self, name, value, step): """Log a scalar variable.""" self.writer.add_scalar(name, value, step)
python
{ "resource": "" }
q33729
mention_to_tokens
train
def mention_to_tokens(mention, token_type="words", lowercase=False): """ Extract tokens from the mention :param mention: mention object. :param token_type: token type that wants to extract. :type token_type: str :param lowercase: use lowercase or not. :type lowercase: bool :return: The token list. :rtype: list """ tokens = mention.context.sentence.__dict__[token_type] return [w.lower() if lowercase else w for w in tokens]
python
{ "resource": "" }
q33730
mark_sentence
train
def mark_sentence(s, args): """Insert markers around relation arguments in word sequence :param s: list of tokens in sentence. :type s: list :param args: list of triples (l, h, idx) as per @_mark(...) corresponding to relation arguments :type args: list :return: The marked sentence. :rtype: list Example: Then Barack married Michelle. -> Then ~~[[1 Barack 1]]~~ married ~~[[2 Michelle 2]]~~. """ marks = sorted([y for m in args for y in mark(*m)], reverse=True) x = list(s) for k, v in marks: x.insert(k, v) return x
python
{ "resource": "" }
q33731
pad_batch
train
def pad_batch(batch, max_len=0, type="int"): """Pad the batch into matrix :param batch: The data for padding. :type batch: list of word index sequences :param max_len: Max length of sequence of padding. :type max_len: int :param type: mask value type. :type type: str :return: The padded matrix and correspoing mask matrix. :rtype: pair of torch.Tensors with shape (batch_size, max_sent_len) """ batch_size = len(batch) max_sent_len = int(np.max([len(x) for x in batch])) if max_len > 0 and max_len < max_sent_len: max_sent_len = max_len if type == "float": idx_matrix = np.zeros((batch_size, max_sent_len), dtype=np.float32) else: idx_matrix = np.zeros((batch_size, max_sent_len), dtype=np.int) for idx1, i in enumerate(batch): for idx2, j in enumerate(i): if idx2 >= max_sent_len: break idx_matrix[idx1, idx2] = j idx_matrix = torch.tensor(idx_matrix) mask_matrix = torch.tensor(torch.eq(idx_matrix.data, 0)) return idx_matrix, mask_matrix
python
{ "resource": "" }
q33732
DocPreprocessor._generate
train
def _generate(self): """Parses a file or directory of files into a set of ``Document`` objects.""" doc_count = 0 for fp in self.all_files: for doc in self._get_docs_for_path(fp): yield doc doc_count += 1 if doc_count >= self.max_docs: return
python
{ "resource": "" }
q33733
is_horz_aligned
train
def is_horz_aligned(c): """Return True if all the components of c are horizontally aligned. Horizontal alignment means that the bounding boxes of each Mention of c shares a similar y-axis value in the visual rendering of the document. :param c: The candidate to evaluate :rtype: boolean """ return all( [ _to_span(c[i]).sentence.is_visual() and bbox_horz_aligned( bbox_from_span(_to_span(c[i])), bbox_from_span(_to_span(c[0])) ) for i in range(len(c)) ] )
python
{ "resource": "" }
q33734
is_vert_aligned
train
def is_vert_aligned(c): """Return true if all the components of c are vertically aligned. Vertical alignment means that the bounding boxes of each Mention of c shares a similar x-axis value in the visual rendering of the document. :param c: The candidate to evaluate :rtype: boolean """ return all( [ _to_span(c[i]).sentence.is_visual() and bbox_vert_aligned( bbox_from_span(_to_span(c[i])), bbox_from_span(_to_span(c[0])) ) for i in range(len(c)) ] )
python
{ "resource": "" }
q33735
is_vert_aligned_left
train
def is_vert_aligned_left(c): """Return true if all components are vertically aligned on their left border. Vertical alignment means that the bounding boxes of each Mention of c shares a similar x-axis value in the visual rendering of the document. In this function the similarity of the x-axis value is based on the left border of their bounding boxes. :param c: The candidate to evaluate :rtype: boolean """ return all( [ _to_span(c[i]).sentence.is_visual() and bbox_vert_aligned_left( bbox_from_span(_to_span(c[i])), bbox_from_span(_to_span(c[0])) ) for i in range(len(c)) ] )
python
{ "resource": "" }
q33736
is_vert_aligned_right
train
def is_vert_aligned_right(c): """Return true if all components vertically aligned on their right border. Vertical alignment means that the bounding boxes of each Mention of c shares a similar x-axis value in the visual rendering of the document. In this function the similarity of the x-axis value is based on the right border of their bounding boxes. :param c: The candidate to evaluate :rtype: boolean """ return all( [ _to_span(c[i]).sentence.is_visual() and bbox_vert_aligned_right( bbox_from_span(_to_span(c[i])), bbox_from_span(_to_span(c[0])) ) for i in range(len(c)) ] )
python
{ "resource": "" }
q33737
is_vert_aligned_center
train
def is_vert_aligned_center(c): """Return true if all the components are vertically aligned on their center. Vertical alignment means that the bounding boxes of each Mention of c shares a similar x-axis value in the visual rendering of the document. In this function the similarity of the x-axis value is based on the center of their bounding boxes. :param c: The candidate to evaluate :rtype: boolean """ return all( [ _to_span(c[i]).sentence.is_visual() and bbox_vert_aligned_center( bbox_from_span(_to_span(c[i])), bbox_from_span(_to_span(c[0])) ) for i in range(len(c)) ] )
python
{ "resource": "" }
q33738
same_page
train
def same_page(c): """Return true if all the components of c are on the same page of the document. Page numbers are based on the PDF rendering of the document. If a PDF file is provided, it is used. Otherwise, if only a HTML/XML document is provided, a PDF is created and then used to determine the page number of a Mention. :param c: The candidate to evaluate :rtype: boolean """ return all( [ _to_span(c[i]).sentence.is_visual() and bbox_from_span(_to_span(c[i])).page == bbox_from_span(_to_span(c[0])).page for i in range(len(c)) ] )
python
{ "resource": "" }
q33739
get_horz_ngrams
train
def get_horz_ngrams( mention, attrib="words", n_min=1, n_max=1, lower=True, from_sentence=True ): """Return all ngrams which are visually horizontally aligned with the Mention. Note that if a candidate is passed in, all of its Mentions will be searched. :param mention: The Mention to evaluate :param attrib: The token attribute type (e.g. words, lemmas, poses) :param n_min: The minimum n of the ngrams that should be returned :param n_max: The maximum n of the ngrams that should be returned :param lower: If True, all ngrams will be returned in lower case :param from_sentence: If True, returns ngrams from any horizontally aligned Sentences, rather than just horizontally aligned ngrams themselves. :rtype: a *generator* of ngrams """ spans = _to_spans(mention) for span in spans: for ngram in _get_direction_ngrams( "horz", span, attrib, n_min, n_max, lower, from_sentence ): yield ngram
python
{ "resource": "" }
q33740
get_page_vert_percentile
train
def get_page_vert_percentile( mention, page_width=DEFAULT_WIDTH, page_height=DEFAULT_HEIGHT ): """Return which percentile from the TOP in the page the Mention is located in. Percentile is calculated where the top of the page is 0.0, and the bottom of the page is 1.0. For example, a Mention in at the top 1/4 of the page will have a percentile of 0.25. Page width and height are based on pt values:: Letter 612x792 Tabloid 792x1224 Ledger 1224x792 Legal 612x1008 Statement 396x612 Executive 540x720 A0 2384x3371 A1 1685x2384 A2 1190x1684 A3 842x1190 A4 595x842 A4Small 595x842 A5 420x595 B4 729x1032 B5 516x729 Folio 612x936 Quarto 610x780 10x14 720x1008 and should match the source documents. Letter size is used by default. Note that if a candidate is passed in, only the vertical percentil of its first Mention is returned. :param mention: The Mention to evaluate :param page_width: The width of the page. Default to Letter paper width. :param page_height: The heigh of the page. Default to Letter paper height. :rtype: float in [0.0, 1.0] """ span = _to_span(mention) return bbox_from_span(span).top / page_height
python
{ "resource": "" }
q33741
get_page_horz_percentile
train
def get_page_horz_percentile( mention, page_width=DEFAULT_WIDTH, page_height=DEFAULT_HEIGHT ): """Return which percentile from the LEFT in the page the Mention is located in. Percentile is calculated where the left of the page is 0.0, and the right of the page is 1.0. Page width and height are based on pt values:: Letter 612x792 Tabloid 792x1224 Ledger 1224x792 Legal 612x1008 Statement 396x612 Executive 540x720 A0 2384x3371 A1 1685x2384 A2 1190x1684 A3 842x1190 A4 595x842 A4Small 595x842 A5 420x595 B4 729x1032 B5 516x729 Folio 612x936 Quarto 610x780 10x14 720x1008 and should match the source documents. Letter size is used by default. Note that if a candidate is passed in, only the vertical percentile of its first Mention is returned. :param c: The Mention to evaluate :param page_width: The width of the page. Default to Letter paper width. :param page_height: The heigh of the page. Default to Letter paper height. :rtype: float in [0.0, 1.0] """ span = _to_span(mention) return bbox_from_span(span).left / page_width
python
{ "resource": "" }
q33742
get_visual_aligned_lemmas
train
def get_visual_aligned_lemmas(mention): """Return a generator of the lemmas aligned visually with the Mention. Note that if a candidate is passed in, all of its Mentions will be searched. :param mention: The Mention to evaluate. :rtype: a *generator* of lemmas """ spans = _to_spans(mention) for span in spans: sentence = span.sentence doc = sentence.document # cache features for the entire document _preprocess_visual_features(doc) for aligned_lemma in sentence._aligned_lemmas: yield aligned_lemma
python
{ "resource": "" }
q33743
camel_to_under
train
def camel_to_under(name): """ Converts camel-case string to lowercase string separated by underscores. Written by epost (http://stackoverflow.com/questions/1175208). :param name: String to be converted :return: new String with camel-case converted to lowercase, underscored """ s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name) return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower()
python
{ "resource": "" }
q33744
get_as_dict
train
def get_as_dict(x): """Return an object as a dictionary of its attributes.""" if isinstance(x, dict): return x else: try: return x._asdict() except AttributeError: return x.__dict__
python
{ "resource": "" }
q33745
UDFRunner._apply_st
train
def _apply_st(self, doc_loader, **kwargs): """Run the UDF single-threaded, optionally with progress bar""" udf = self.udf_class(**self.udf_init_kwargs) # Run single-thread for doc in doc_loader: if self.pb is not None: self.pb.update(1) udf.session.add_all(y for y in udf.apply(doc, **kwargs)) # Commit session and close progress bar if applicable udf.session.commit()
python
{ "resource": "" }
q33746
UDFRunner._apply_mt
train
def _apply_mt(self, doc_loader, parallelism, **kwargs): """Run the UDF multi-threaded using python multiprocessing""" if not Meta.postgres: raise ValueError("Fonduer must use PostgreSQL as a database backend.") def fill_input_queue(in_queue, doc_loader, terminal_signal): for doc in doc_loader: in_queue.put(doc) in_queue.put(terminal_signal) # Create an input queue to feed documents to UDF workers manager = Manager() in_queue = manager.Queue() # Use an output queue to track multiprocess progress out_queue = JoinableQueue() total_count = len(doc_loader) # Start UDF Processes for i in range(parallelism): udf = self.udf_class( in_queue=in_queue, out_queue=out_queue, worker_id=i, **self.udf_init_kwargs, ) udf.apply_kwargs = kwargs self.udfs.append(udf) # Start the UDF processes, and then join on their completion for udf in self.udfs: udf.start() # Fill input queue with documents terminal_signal = UDF.QUEUE_CLOSED in_queue_filler = Process( target=fill_input_queue, args=(in_queue, doc_loader, terminal_signal) ) in_queue_filler.start() count_parsed = 0 while count_parsed < total_count: y = out_queue.get() # Update progress bar whenever an item has been processed if y == UDF.TASK_DONE: count_parsed += 1 if self.pb is not None: self.pb.update(1) else: raise ValueError("Got non-sentinal output.") in_queue_filler.join() in_queue.put(UDF.QUEUE_CLOSED) for udf in self.udfs: udf.join() # Terminate and flush the processes for udf in self.udfs: udf.terminate() self.udfs = []
python
{ "resource": "" }
q33747
AnnotationMixin.candidate
train
def candidate(cls): """The ``Candidate``.""" return relationship( "Candidate", backref=backref( camel_to_under(cls.__name__) + "s", cascade="all, delete-orphan", cascade_backrefs=False, ), cascade_backrefs=False, )
python
{ "resource": "" }
q33748
same_document
train
def same_document(c): """Return True if all Mentions in the given candidate are from the same Document. :param c: The candidate whose Mentions are being compared :rtype: boolean """ return all( _to_span(c[i]).sentence.document is not None and _to_span(c[i]).sentence.document == _to_span(c[0]).sentence.document for i in range(len(c)) )
python
{ "resource": "" }
q33749
same_table
train
def same_table(c): """Return True if all Mentions in the given candidate are from the same Table. :param c: The candidate whose Mentions are being compared :rtype: boolean """ return all( _to_span(c[i]).sentence.is_tabular() and _to_span(c[i]).sentence.table == _to_span(c[0]).sentence.table for i in range(len(c)) )
python
{ "resource": "" }
q33750
same_row
train
def same_row(c): """Return True if all Mentions in the given candidate are from the same Row. :param c: The candidate whose Mentions are being compared :rtype: boolean """ return same_table(c) and all( is_row_aligned(_to_span(c[i]).sentence, _to_span(c[0]).sentence) for i in range(len(c)) )
python
{ "resource": "" }
q33751
same_col
train
def same_col(c): """Return True if all Mentions in the given candidate are from the same Col. :param c: The candidate whose Mentions are being compared :rtype: boolean """ return same_table(c) and all( is_col_aligned(_to_span(c[i]).sentence, _to_span(c[0]).sentence) for i in range(len(c)) )
python
{ "resource": "" }
q33752
is_tabular_aligned
train
def is_tabular_aligned(c): """Return True if all Mentions in the given candidate are from the same Row or Col. :param c: The candidate whose Mentions are being compared :rtype: boolean """ return same_table(c) and ( is_col_aligned(_to_span(c[i]).sentence, _to_span(c[0]).sentence) or is_row_aligned(_to_span(c[i]).sentence, _to_span(c[0]).sentence) for i in range(len(c)) )
python
{ "resource": "" }
q33753
same_cell
train
def same_cell(c): """Return True if all Mentions in the given candidate are from the same Cell. :param c: The candidate whose Mentions are being compared :rtype: boolean """ return all( _to_span(c[i]).sentence.cell is not None and _to_span(c[i]).sentence.cell == _to_span(c[0]).sentence.cell for i in range(len(c)) )
python
{ "resource": "" }
q33754
same_sentence
train
def same_sentence(c): """Return True if all Mentions in the given candidate are from the same Sentence. :param c: The candidate whose Mentions are being compared :rtype: boolean """ return all( _to_span(c[i]).sentence is not None and _to_span(c[i]).sentence == _to_span(c[0]).sentence for i in range(len(c)) )
python
{ "resource": "" }
q33755
get_max_col_num
train
def get_max_col_num(mention): """Return the largest column number that a Mention occupies. :param mention: The Mention to evaluate. If a candidate is given, default to its last Mention. :rtype: integer or None """ span = _to_span(mention, idx=-1) if span.sentence.is_tabular(): return span.sentence.cell.col_end else: return None
python
{ "resource": "" }
q33756
get_min_col_num
train
def get_min_col_num(mention): """Return the lowest column number that a Mention occupies. :param mention: The Mention to evaluate. If a candidate is given, default to its first Mention. :rtype: integer or None """ span = _to_span(mention) if span.sentence.is_tabular(): return span.sentence.cell.col_start else: return None
python
{ "resource": "" }
q33757
get_min_row_num
train
def get_min_row_num(mention): """Return the lowest row number that a Mention occupies. :param mention: The Mention to evaluate. If a candidate is given, default to its first Mention. :rtype: integer or None """ span = _to_span(mention) if span.sentence.is_tabular(): return span.sentence.cell.row_start else: return None
python
{ "resource": "" }
q33758
get_sentence_ngrams
train
def get_sentence_ngrams(mention, attrib="words", n_min=1, n_max=1, lower=True): """Get the ngrams that are in the Sentence of the given Mention, not including itself. Note that if a candidate is passed in, all of its Mentions will be searched. :param mention: The Mention whose Sentence is being searched :param attrib: The token attribute type (e.g. words, lemmas, poses) :param n_min: The minimum n of the ngrams that should be returned :param n_max: The maximum n of the ngrams that should be returned :param lower: If True, all ngrams will be returned in lower case :rtype: a *generator* of ngrams """ spans = _to_spans(mention) for span in spans: for ngram in get_left_ngrams( span, window=100, attrib=attrib, n_min=n_min, n_max=n_max, lower=lower ): yield ngram for ngram in get_right_ngrams( span, window=100, attrib=attrib, n_min=n_min, n_max=n_max, lower=lower ): yield ngram
python
{ "resource": "" }
q33759
get_neighbor_sentence_ngrams
train
def get_neighbor_sentence_ngrams( mention, d=1, attrib="words", n_min=1, n_max=1, lower=True ): """Get the ngrams that are in the neighoring Sentences of the given Mention. Note that if a candidate is passed in, all of its Mentions will be searched. :param mention: The Mention whose neighbor Sentences are being searched :param attrib: The token attribute type (e.g. words, lemmas, poses) :param n_min: The minimum n of the ngrams that should be returned :param n_max: The maximum n of the ngrams that should be returned :param lower: If True, all ngrams will be returned in lower case :rtype: a *generator* of ngrams """ spans = _to_spans(mention) for span in spans: for ngram in chain.from_iterable( [ tokens_to_ngrams( getattr(sentence, attrib), n_min=n_min, n_max=n_max, lower=lower ) for sentence in span.sentence.document.sentences if abs(sentence.position - span.sentence.position) <= d and sentence != span.sentence ] ): yield ngram
python
{ "resource": "" }
q33760
get_cell_ngrams
train
def get_cell_ngrams(mention, attrib="words", n_min=1, n_max=1, lower=True): """Get the ngrams that are in the Cell of the given mention, not including itself. Note that if a candidate is passed in, all of its Mentions will be searched. :param mention: The Mention whose Cell is being searched :param attrib: The token attribute type (e.g. words, lemmas, poses) :param n_min: The minimum n of the ngrams that should be returned :param n_max: The maximum n of the ngrams that should be returned :param lower: If True, all ngrams will be returned in lower case :rtype: a *generator* of ngrams """ spans = _to_spans(mention) for span in spans: for ngram in get_sentence_ngrams( span, attrib=attrib, n_min=n_min, n_max=n_max, lower=lower ): yield ngram if span.sentence.is_tabular(): for ngram in chain.from_iterable( [ tokens_to_ngrams( getattr(sentence, attrib), n_min=n_min, n_max=n_max, lower=lower ) for sentence in _get_table_cells(span.sentence.table)[ span.sentence.cell ] if sentence != span.sentence ] ): yield ngram
python
{ "resource": "" }
q33761
get_neighbor_cell_ngrams
train
def get_neighbor_cell_ngrams( mention, dist=1, directions=False, attrib="words", n_min=1, n_max=1, lower=True ): """ Get the ngrams from all Cells that are within a given Cell distance in one direction from the given Mention. Note that if a candidate is passed in, all of its Mentions will be searched. If `directions=True``, each ngram will be returned with a direction in {'UP', 'DOWN', 'LEFT', 'RIGHT'}. :param mention: The Mention whose neighbor Cells are being searched :param dist: The Cell distance within which a neighbor Cell must be to be considered :param directions: A Boolean expressing whether or not to return the direction of each ngram :param attrib: The token attribute type (e.g. words, lemmas, poses) :param n_min: The minimum n of the ngrams that should be returned :param n_max: The maximum n of the ngrams that should be returned :param lower: If True, all ngrams will be returned in lower case :rtype: a *generator* of ngrams (or (ngram, direction) tuples if directions=True) """ # TODO: Fix this to be more efficient (optimize with SQL query) spans = _to_spans(mention) for span in spans: for ngram in get_sentence_ngrams( span, attrib=attrib, n_min=n_min, n_max=n_max, lower=lower ): yield ngram if span.sentence.is_tabular(): root_cell = span.sentence.cell for sentence in chain.from_iterable( [ _get_aligned_sentences(root_cell, "row"), _get_aligned_sentences(root_cell, "col"), ] ): row_diff = min_row_diff(sentence, root_cell, absolute=False) col_diff = min_col_diff(sentence, root_cell, absolute=False) if ( (row_diff or col_diff) and not (row_diff and col_diff) and abs(row_diff) + abs(col_diff) <= dist ): if directions: direction = "" if col_diff == 0: if 0 < row_diff and row_diff <= dist: direction = "UP" elif 0 > row_diff and row_diff >= -dist: direction = "DOWN" elif row_diff == 0: if 0 < col_diff and col_diff <= dist: direction = "RIGHT" elif 0 > col_diff and col_diff >= -dist: direction = "LEFT" for ngram in tokens_to_ngrams( getattr(sentence, attrib), n_min=n_min, n_max=n_max, lower=lower, ): yield (ngram, direction) else: for ngram in tokens_to_ngrams( getattr(sentence, attrib), n_min=n_min, n_max=n_max, lower=lower, ): yield ngram
python
{ "resource": "" }
q33762
get_col_ngrams
train
def get_col_ngrams( mention, attrib="words", n_min=1, n_max=1, spread=[0, 0], lower=True ): """Get the ngrams from all Cells that are in the same column as the given Mention. Note that if a candidate is passed in, all of its Mentions will be searched. :param mention: The Mention whose column Cells are being searched :param attrib: The token attribute type (e.g. words, lemmas, poses) :param n_min: The minimum n of the ngrams that should be returned :param n_max: The maximum n of the ngrams that should be returned :param spread: The number of cols left and right to also consider "aligned". :param lower: If True, all ngrams will be returned in lower case :rtype: a *generator* of ngrams """ spans = _to_spans(mention) for span in spans: for ngram in _get_axis_ngrams( span, axis="col", attrib=attrib, n_min=n_min, n_max=n_max, spread=spread, lower=lower, ): yield ngram
python
{ "resource": "" }
q33763
get_aligned_ngrams
train
def get_aligned_ngrams( mention, attrib="words", n_min=1, n_max=1, spread=[0, 0], lower=True ): """Get the ngrams from all Cells in the same row or column as the given Mention. Note that if a candidate is passed in, all of its Mentions will be searched. :param mention: The Mention whose row and column Cells are being searched :param attrib: The token attribute type (e.g. words, lemmas, poses) :param n_min: The minimum n of the ngrams that should be returned :param n_max: The maximum n of the ngrams that should be returned :param spread: The number of rows/cols above/below/left/right to also consider "aligned". :param lower: If True, all ngrams will be returned in lower case :rtype: a *generator* of ngrams """ spans = _to_spans(mention) for span in spans: for ngram in get_row_ngrams( span, attrib=attrib, n_min=n_min, n_max=n_max, spread=spread, lower=lower ): yield ngram for ngram in get_col_ngrams( span, attrib=attrib, n_min=n_min, n_max=n_max, spread=spread, lower=lower ): yield ngram
python
{ "resource": "" }
q33764
get_head_ngrams
train
def get_head_ngrams(mention, axis=None, attrib="words", n_min=1, n_max=1, lower=True): """Get the ngrams from the cell in the head of the row or column. More specifically, this returns the ngrams in the leftmost cell in a row and/or the ngrams in the topmost cell in the column, depending on the axis parameter. Note that if a candidate is passed in, all of its Mentions will be searched. :param mention: The Mention whose head Cells are being returned :param axis: Which axis {'row', 'col'} to search. If None, then both row and col are searched. :param attrib: The token attribute type (e.g. words, lemmas, poses) :param n_min: The minimum n of the ngrams that should be returned :param n_max: The maximum n of the ngrams that should be returned :param lower: If True, all ngrams will be returned in lower case :rtype: a *generator* of ngrams """ spans = _to_spans(mention) axes = (axis,) if axis else ("row", "col") for span in spans: if span.sentence.is_tabular(): for axis in axes: if getattr(span.sentence, _other_axis(axis) + "_start") == 0: return for sentence in getattr( _get_head_cell(span.sentence.cell, axis), "sentences", [] ): for ngram in tokens_to_ngrams( getattr(sentence, attrib), n_min=n_min, n_max=n_max, lower=lower ): yield ngram
python
{ "resource": "" }
q33765
_get_table_cells
train
def _get_table_cells(table): """Helper function with caching for table cells and the cells' sentences. This function significantly improves the speed of `get_row_ngrams` primarily by reducing the number of queries that are made (which were previously the bottleneck. Rather than taking a single mention, then its sentence, then its table, then all the cells in the table, then all the sentences in each cell, and performing operations on that series of queries, this performs a single query for all the sentences in a table and returns all of the cells and the cells sentences directly. :param table: the Table object to cache. :return: an iterator of (Cell, [Sentence._asdict(), ...]) tuples. """ sent_map = defaultdict(list) for sent in table.sentences: if sent.is_tabular(): sent_map[sent.cell].append(sent) return sent_map
python
{ "resource": "" }
q33766
SoftCrossEntropyLoss.forward
train
def forward(self, input, target): """ Calculate the loss :param input: prediction logits :param target: target probabilities :return: loss """ n, k = input.shape losses = input.new_zeros(n) for i in range(k): cls_idx = input.new_full((n,), i, dtype=torch.long) loss = F.cross_entropy(input, cls_idx, reduction="none") if self.weight is not None: loss = loss * self.weight[i] losses += target[:, i].float() * loss if self.reduction == "mean": losses = losses.mean() elif self.reduction == "sum": losses = losses.sum() elif self.reduction != "none": raise ValueError(f"Unrecognized reduction: {self.reduction}") return losses
python
{ "resource": "" }
q33767
bbox_horz_aligned
train
def bbox_horz_aligned(box1, box2): """ Returns true if the vertical center point of either span is within the vertical range of the other """ if not (box1 and box2): return False # NEW: any overlap counts # return box1.top <= box2.bottom and box2.top <= box1.bottom box1_top = box1.top + 1.5 box2_top = box2.top + 1.5 box1_bottom = box1.bottom - 1.5 box2_bottom = box2.bottom - 1.5 return not (box1_top > box2_bottom or box2_top > box1_bottom)
python
{ "resource": "" }
q33768
bbox_vert_aligned
train
def bbox_vert_aligned(box1, box2): """ Returns true if the horizontal center point of either span is within the horizontal range of the other """ if not (box1 and box2): return False # NEW: any overlap counts # return box1.left <= box2.right and box2.left <= box1.right box1_left = box1.left + 1.5 box2_left = box2.left + 1.5 box1_right = box1.right - 1.5 box2_right = box2.right - 1.5 return not (box1_left > box2_right or box2_left > box1_right)
python
{ "resource": "" }
q33769
bbox_vert_aligned_left
train
def bbox_vert_aligned_left(box1, box2): """ Returns true if the left boundary of both boxes is within 2 pts """ if not (box1 and box2): return False return abs(box1.left - box2.left) <= 2
python
{ "resource": "" }
q33770
bbox_vert_aligned_right
train
def bbox_vert_aligned_right(box1, box2): """ Returns true if the right boundary of both boxes is within 2 pts """ if not (box1 and box2): return False return abs(box1.right - box2.right) <= 2
python
{ "resource": "" }
q33771
bbox_vert_aligned_center
train
def bbox_vert_aligned_center(box1, box2): """ Returns true if the center of both boxes is within 5 pts """ if not (box1 and box2): return False return abs(((box1.right + box1.left) / 2.0) - ((box2.right + box2.left) / 2.0)) <= 5
python
{ "resource": "" }
q33772
_NgramMatcher._is_subspan
train
def _is_subspan(self, m, span): """ Tests if mention m is subspan of span, where span is defined specific to mention type. """ return ( m.sentence.id == span[0] and m.char_start >= span[1] and m.char_end <= span[2] )
python
{ "resource": "" }
q33773
_NgramMatcher._get_span
train
def _get_span(self, m): """ Gets a tuple that identifies a span for the specific mention class that m belongs to. """ return (m.sentence.id, m.char_start, m.char_end)
python
{ "resource": "" }
q33774
_FigureMatcher._is_subspan
train
def _is_subspan(self, m, span): """Tests if mention m does exist""" return m.figure.document.id == span[0] and m.figure.position == span[1]
python
{ "resource": "" }
q33775
_FigureMatcher._get_span
train
def _get_span(self, m): """ Gets a tuple that identifies a figure for the specific mention class that m belongs to. """ return (m.figure.document.id, m.figure.position)
python
{ "resource": "" }
q33776
candidate_subclass
train
def candidate_subclass( class_name, args, table_name=None, cardinality=None, values=None ): """ Creates and returns a Candidate subclass with provided argument names, which are Context type. Creates the table in DB if does not exist yet. Import using: .. code-block:: python from fonduer.candidates.models import candidate_subclass :param class_name: The name of the class, should be "camel case" e.g. NewCandidate :param args: A list of names of constituent arguments, which refer to the Contexts--representing mentions--that comprise the candidate :param table_name: The name of the corresponding table in DB; if not provided, is converted from camel case by default, e.g. new_candidate :param cardinality: The cardinality of the variable corresponding to the Candidate. By default is 2 i.e. is a binary value, e.g. is or is not a true mention. """ if table_name is None: table_name = camel_to_under(class_name) # If cardinality and values are None, default to binary classification if cardinality is None and values is None: values = [True, False] cardinality = 2 # Else use values if present, and validate proper input elif values is not None: if cardinality is not None and len(values) != cardinality: raise ValueError("Number of values must match cardinality.") if None in values: raise ValueError("`None` is a protected value.") # Note that bools are instances of ints in Python... if any([isinstance(v, int) and not isinstance(v, bool) for v in values]): raise ValueError( ( "Default usage of values is consecutive integers." "Leave values unset if trying to define values as integers." ) ) cardinality = len(values) # If cardinality is specified but not values, fill in with ints elif cardinality is not None: values = list(range(cardinality)) class_spec = (args, table_name, cardinality, values) if class_name in candidate_subclasses: if class_spec == candidate_subclasses[class_name][1]: return candidate_subclasses[class_name][0] else: raise ValueError( f"Candidate subclass {class_name} " f"already exists in memory with incompatible " f"specification: {candidate_subclasses[class_name][1]}" ) else: # Set the class attributes == the columns in the database class_attribs = { # Declares name for storage table "__tablename__": table_name, # Connects candidate_subclass records to generic Candidate records "id": Column( Integer, ForeignKey("candidate.id", ondelete="CASCADE"), primary_key=True, ), # Store values & cardinality information in the class only "values": values, "cardinality": cardinality, # Polymorphism information for SQLAlchemy "__mapper_args__": {"polymorphic_identity": table_name}, # Helper method to get argument names "__argnames__": [_.__tablename__ for _ in args], "mentions": args, } class_attribs["document_id"] = Column( Integer, ForeignKey("document.id", ondelete="CASCADE") ) class_attribs["document"] = relationship( "Document", backref=backref(table_name + "s", cascade="all, delete-orphan"), foreign_keys=class_attribs["document_id"], ) # Create named arguments, i.e. the entity mentions comprising the # relation mention. unique_args = [] for arg in args: # Primary arguments are constituent Contexts, and their ids class_attribs[arg.__tablename__ + "_id"] = Column( Integer, ForeignKey(arg.__tablename__ + ".id", ondelete="CASCADE") ) class_attribs[arg.__tablename__] = relationship( arg.__name__, backref=backref( table_name + "_" + arg.__tablename__ + "s", cascade_backrefs=False, cascade="all, delete-orphan", ), cascade_backrefs=False, foreign_keys=class_attribs[arg.__tablename__ + "_id"], ) unique_args.append(class_attribs[arg.__tablename__ + "_id"]) # Add unique constraints to the arguments class_attribs["__table_args__"] = (UniqueConstraint(*unique_args),) # Create class C = type(class_name, (Candidate,), class_attribs) # Create table in DB if not Meta.engine.dialect.has_table(Meta.engine, table_name): C.__table__.create(bind=Meta.engine) candidate_subclasses[class_name] = C, class_spec return C
python
{ "resource": "" }
q33777
CandidateExtractor.apply
train
def apply(self, docs, split=0, clear=True, parallelism=None, progress_bar=True): """Run the CandidateExtractor. :Example: To extract candidates from a set of training documents using 4 cores:: candidate_extractor.apply(train_docs, split=0, parallelism=4) :param docs: Set of documents to extract from. :param split: Which split to assign the extracted Candidates to. :type split: int :param clear: Whether or not to clear the existing Candidates beforehand. :type clear: bool :param parallelism: How many threads to use for extraction. This will override the parallelism value used to initialize the CandidateExtractor if it is provided. :type parallelism: int :param progress_bar: Whether or not to display a progress bar. The progress bar is measured per document. :type progress_bar: bool """ super(CandidateExtractor, self).apply( docs, split=split, clear=clear, parallelism=parallelism, progress_bar=progress_bar, )
python
{ "resource": "" }
q33778
CandidateExtractor.clear
train
def clear(self, split): """Delete Candidates of each class initialized with the CandidateExtractor from given split the database. :param split: Which split to clear. :type split: int """ for candidate_class in self.candidate_classes: logger.info( f"Clearing table {candidate_class.__tablename__} (split {split})" ) self.session.query(Candidate).filter( Candidate.type == candidate_class.__tablename__ ).filter(Candidate.split == split).delete(synchronize_session="fetch")
python
{ "resource": "" }
q33779
CandidateExtractor.clear_all
train
def clear_all(self, split): """Delete ALL Candidates from given split the database. :param split: Which split to clear. :type split: int """ logger.info("Clearing ALL Candidates.") self.session.query(Candidate).filter(Candidate.split == split).delete( synchronize_session="fetch" )
python
{ "resource": "" }
q33780
CandidateExtractor.get_candidates
train
def get_candidates(self, docs=None, split=0, sort=False): """Return a list of lists of the candidates associated with this extractor. Each list of the return will contain the candidates for one of the candidate classes associated with the CandidateExtractor. :param docs: If provided, return candidates from these documents from all splits. :type docs: list, tuple of ``Documents``. :param split: If docs is None, then return all the candidates from this split. :type split: int :param sort: If sort is True, then return all candidates sorted by stable_id. :type sort: bool :return: Candidates for each candidate_class. :rtype: List of lists of ``Candidates``. """ result = [] if docs: docs = docs if isinstance(docs, (list, tuple)) else [docs] # Get cands from all splits for candidate_class in self.candidate_classes: cands = ( self.session.query(candidate_class) .filter(candidate_class.document_id.in_([doc.id for doc in docs])) .order_by(candidate_class.id) .all() ) if sort: cands = sorted( cands, key=lambda x: " ".join( [x[i][0].get_stable_id() for i in range(len(x))] ), ) result.append(cands) else: for candidate_class in self.candidate_classes: # Filter by candidate_ids in a particular split sub_query = ( self.session.query(Candidate.id) .filter(Candidate.split == split) .subquery() ) cands = ( self.session.query(candidate_class) .filter(candidate_class.id.in_(sub_query)) .order_by(candidate_class.id) .all() ) if sort: cands = sorted( cands, key=lambda x: " ".join( [x[i][0].get_stable_id() for i in range(len(x))] ), ) result.append(cands) return result
python
{ "resource": "" }
q33781
SparseLinear.reset_parameters
train
def reset_parameters(self): """Reinitiate the weight parameters. """ stdv = 1.0 / math.sqrt(self.num_features) self.weight.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) if self.padding_idx is not None: self.weight.weight.data[self.padding_idx].fill_(0)
python
{ "resource": "" }
q33782
save_marginals
train
def save_marginals(session, X, marginals, training=True): """Save marginal probabilities for a set of Candidates to db. :param X: A list of arbitrary objects with candidate ids accessible via a .id attrib :param marginals: A dense M x K matrix of marginal probabilities, where K is the cardinality of the candidates, OR a M-dim list/array if K=2. :param training: If True, these are training marginals / labels; else they are saved as end model predictions. Note: The marginals for k=0 are not stored, only for k = 1,...,K """ logger = logging.getLogger(__name__) # Make sure that we are working with a numpy array try: shape = marginals.shape except Exception: marginals = np.array(marginals) shape = marginals.shape # Handle binary input as M x 1-dim array; assume elements represent # poksitive (k=1) class values if len(shape) == 1: marginals = np.vstack([1 - marginals, marginals]).T # Only add values for classes k=1,...,K marginal_tuples = [] for i in range(shape[0]): for k in range(1, shape[1] if len(shape) > 1 else 2): if marginals[i, k] > 0: marginal_tuples.append((i, k, marginals[i, k])) # NOTE: This will delete all existing marginals of type `training` session.query(Marginal).filter(Marginal.training == training).delete( synchronize_session="fetch" ) # Prepare bulk INSERT query q = Marginal.__table__.insert() # Prepare values insert_vals = [] for i, k, p in marginal_tuples: cid = X[i].id insert_vals.append( { "candidate_id": cid, "training": training, "value": k, # We cast p in case its a numpy type, which psycopg2 does not handle "probability": float(p), } ) # Execute update session.execute(q, insert_vals) session.commit() logger.info(f"Saved {len(marginals)} marginals")
python
{ "resource": "" }
q33783
compile_entity_feature_generator
train
def compile_entity_feature_generator(): """ Given optional arguments, returns a generator function which accepts an xml root and a list of indexes for a mention, and will generate relation features for this entity. """ BASIC_ATTRIBS_REL = ["lemma", "dep_label"] m = Mention(0) # Basic relation feature templates temps = [ [Indicator(m, a) for a in BASIC_ATTRIBS_REL], Indicator(m, "dep_label,lemma"), # The *first element on the* path to the root: ngram lemmas along it Ngrams(Parents(m, 3), "lemma", (1, 3)), Ngrams(Children(m), "lemma", (1, 3)), # The siblings of the mention [LeftNgrams(LeftSiblings(m), a) for a in BASIC_ATTRIBS_REL], [RightNgrams(RightSiblings(m), a) for a in BASIC_ATTRIBS_REL], ] # return generator function return Compile(temps).apply_mention
python
{ "resource": "" }
q33784
get_ddlib_feats
train
def get_ddlib_feats(span, context, idxs): """ Minimalist port of generic mention features from ddlib """ if span.stable_id not in unary_ddlib_feats: unary_ddlib_feats[span.stable_id] = set() for seq_feat in _get_seq_features(context, idxs): unary_ddlib_feats[span.stable_id].add(seq_feat) for window_feat in _get_window_features(context, idxs): unary_ddlib_feats[span.stable_id].add(window_feat) for f in unary_ddlib_feats[span.stable_id]: yield f
python
{ "resource": "" }
q33785
_get_cand_values
train
def _get_cand_values(candidate, key_table): """Get the corresponding values for the key_table.""" # NOTE: Import just before checking to avoid circular imports. from fonduer.features.models import FeatureKey from fonduer.supervision.models import GoldLabelKey, LabelKey if key_table == FeatureKey: return candidate.features elif key_table == LabelKey: return candidate.labels elif key_table == GoldLabelKey: return candidate.gold_labels else: raise ValueError(f"{key_table} is not a valid key table.")
python
{ "resource": "" }
q33786
_batch_postgres_query
train
def _batch_postgres_query(table, records): """Break the list into chunks that can be processed as a single statement. Postgres query cannot be too long or it will fail. See: https://dba.stackexchange.com/questions/131399/is-there-a-maximum- length-constraint-for-a-postgres-query :param records: The full list of records to batch. :type records: iterable :param table: The sqlalchemy table. :return: A generator of lists of records. """ if not records: return POSTGRESQL_MAX = 0x3FFFFFFF # Create preamble and measure its length preamble = ( "INSERT INTO " + table.__tablename__ + " (" + ", ".join(records[0].keys()) + ") VALUES (" + ", ".join(["?"] * len(records[0].keys())) + ")\n" ) start = 0 end = 0 total_len = len(preamble) while end < len(records): record_len = sum([len(str(v)) for v in records[end].values()]) # Pre-increment to include the end element in the slice end += 1 if total_len + record_len >= POSTGRESQL_MAX: logger.debug(f"Splitting query due to length ({total_len} chars).") yield records[start:end] start = end # Reset the total query length total_len = len(preamble) else: total_len += record_len yield records[start:end]
python
{ "resource": "" }
q33787
get_sparse_matrix_keys
train
def get_sparse_matrix_keys(session, key_table): """Return a list of keys for the sparse matrix.""" return session.query(key_table).order_by(key_table.name).all()
python
{ "resource": "" }
q33788
batch_upsert_records
train
def batch_upsert_records(session, table, records): """Batch upsert records into postgresql database.""" if not records: return for record_batch in _batch_postgres_query(table, records): stmt = insert(table.__table__) stmt = stmt.on_conflict_do_update( constraint=table.__table__.primary_key, set_={ "keys": stmt.excluded.get("keys"), "values": stmt.excluded.get("values"), }, ) session.execute(stmt, record_batch) session.commit()
python
{ "resource": "" }
q33789
get_docs_from_split
train
def get_docs_from_split(session, candidate_classes, split): """Return a list of documents that contain the candidates in the split.""" # Only grab the docs containing candidates from the given split. sub_query = session.query(Candidate.id).filter(Candidate.split == split).subquery() split_docs = set() for candidate_class in candidate_classes: split_docs.update( cand.document for cand in session.query(candidate_class) .filter(candidate_class.id.in_(sub_query)) .all() ) return split_docs
python
{ "resource": "" }
q33790
get_mapping
train
def get_mapping(session, table, candidates, generator, key_map): """Generate map of keys and values for the candidate from the generator. :param session: The database session. :param table: The table we will be inserting into (i.e. Feature or Label). :param candidates: The candidates to get mappings for. :param generator: A generator yielding (candidate_id, key, value) tuples. :param key_map: A mutable dict which values will be added to as {key: [relations]}. :type key_map: Dict :return: Generator of dictionaries of {"candidate_id": _, "keys": _, "values": _} :rtype: generator of dict """ for cand in candidates: # Grab the old values currently in the DB try: temp = session.query(table).filter(table.candidate_id == cand.id).one() cand_map = dict(zip(temp.keys, temp.values)) except NoResultFound: cand_map = {} map_args = {"candidate_id": cand.id} for cid, key, value in generator(cand): if value == 0: continue cand_map[key] = value # Assemble label arguments map_args["keys"] = [*cand_map.keys()] map_args["values"] = [*cand_map.values()] # Update key_map by adding the candidate class for each key for key in map_args["keys"]: try: key_map[key].add(cand.__class__.__tablename__) except KeyError: key_map[key] = {cand.__class__.__tablename__} yield map_args
python
{ "resource": "" }
q33791
get_cands_list_from_split
train
def get_cands_list_from_split(session, candidate_classes, doc, split): """Return the list of list of candidates from this document based on the split.""" cands = [] if split == ALL_SPLITS: # Get cands from all splits for candidate_class in candidate_classes: cands.append( session.query(candidate_class) .filter(candidate_class.document_id == doc.id) .all() ) else: # Get cands from the specified split for candidate_class in candidate_classes: cands.append( session.query(candidate_class) .filter(candidate_class.document_id == doc.id) .filter(candidate_class.split == split) .all() ) return cands
python
{ "resource": "" }
q33792
drop_all_keys
train
def drop_all_keys(session, key_table, candidate_classes): """Bulk drop annotation keys for all the candidate_classes in the table. Rather than directly dropping the keys, this removes the candidate_classes specified for the given keys only. If all candidate_classes are removed for a key, the key is dropped. :param key_table: The sqlalchemy class to insert into. :param candidate_classes: A list of candidate classes to drop. """ if not candidate_classes: return candidate_classes = set([c.__tablename__ for c in candidate_classes]) # Select all rows that contain ANY of the candidate_classes all_rows = ( session.query(key_table) .filter( key_table.candidate_classes.overlap(cast(candidate_classes, ARRAY(String))) ) .all() ) to_delete = set() to_update = [] # All candidate classes will be the same for all keys, so just look at one for row in all_rows: # Remove the selected candidate_classes. If empty, mark for deletion. row.candidate_classes = list( set(row.candidate_classes) - set(candidate_classes) ) if len(row.candidate_classes) == 0: to_delete.add(row.name) else: to_update.append( {"name": row.name, "candidate_classes": row.candidate_classes} ) # Perform all deletes if to_delete: query = session.query(key_table).filter(key_table.name.in_(to_delete)) query.delete(synchronize_session="fetch") # Perform all updates if to_update: for batch in _batch_postgres_query(key_table, to_update): stmt = insert(key_table.__table__) stmt = stmt.on_conflict_do_update( constraint=key_table.__table__.primary_key, set_={ "name": stmt.excluded.get("name"), "candidate_classes": stmt.excluded.get("candidate_classes"), }, ) session.execute(stmt, batch) session.commit()
python
{ "resource": "" }
q33793
drop_keys
train
def drop_keys(session, key_table, keys): """Bulk drop annotation keys to the specified table. Rather than directly dropping the keys, this removes the candidate_classes specified for the given keys only. If all candidate_classes are removed for a key, the key is dropped. :param key_table: The sqlalchemy class to insert into. :param keys: A map of {name: [candidate_classes]}. """ # Do nothing if empty if not keys: return for key_batch in _batch_postgres_query( key_table, [{"name": k[0], "candidate_classes": k[1]} for k in keys.items()] ): all_rows = ( session.query(key_table) .filter(key_table.name.in_([key["name"] for key in key_batch])) .all() ) to_delete = set() to_update = [] # All candidate classes will be the same for all keys, so just look at one candidate_classes = key_batch[0]["candidate_classes"] for row in all_rows: # Remove the selected candidate_classes. If empty, mark for deletion. row.candidate_classes = list( set(row.candidate_classes) - set(candidate_classes) ) if len(row.candidate_classes) == 0: to_delete.add(row.name) else: to_update.append( {"name": row.name, "candidate_classes": row.candidate_classes} ) # Perform all deletes if to_delete: query = session.query(key_table).filter(key_table.name.in_(to_delete)) query.delete(synchronize_session="fetch") # Perform all updates if to_update: stmt = insert(key_table.__table__) stmt = stmt.on_conflict_do_update( constraint=key_table.__table__.primary_key, set_={ "name": stmt.excluded.get("name"), "candidate_classes": stmt.excluded.get("candidate_classes"), }, ) session.execute(stmt, to_update) session.commit()
python
{ "resource": "" }
q33794
upsert_keys
train
def upsert_keys(session, key_table, keys): """Bulk add annotation keys to the specified table. :param key_table: The sqlalchemy class to insert into. :param keys: A map of {name: [candidate_classes]}. """ # Do nothing if empty if not keys: return for key_batch in _batch_postgres_query( key_table, [{"name": k[0], "candidate_classes": k[1]} for k in keys.items()] ): stmt = insert(key_table.__table__) stmt = stmt.on_conflict_do_update( constraint=key_table.__table__.primary_key, set_={ "name": stmt.excluded.get("name"), "candidate_classes": stmt.excluded.get("candidate_classes"), }, ) while True: try: session.execute(stmt, key_batch) session.commit() break except Exception as e: logger.debug(e)
python
{ "resource": "" }
q33795
Labeler.update
train
def update(self, docs=None, split=0, lfs=None, parallelism=None, progress_bar=True): """Update the labels of the specified candidates based on the provided LFs. :param docs: If provided, apply the updated LFs to all the candidates in these documents. :param split: If docs is None, apply the updated LFs to the candidates in this particular split. :param lfs: A list of lists of labeling functions to update. Each list should correspond with the candidate_classes used to initialize the Labeler. :param parallelism: How many threads to use for extraction. This will override the parallelism value used to initialize the Labeler if it is provided. :type parallelism: int :param progress_bar: Whether or not to display a progress bar. The progress bar is measured per document. :type progress_bar: bool """ if lfs is None: raise ValueError("Please provide a list of lists of labeling functions.") if len(lfs) != len(self.candidate_classes): raise ValueError("Please provide LFs for each candidate class.") self.apply( docs=docs, split=split, lfs=lfs, train=True, clear=False, parallelism=parallelism, progress_bar=progress_bar, )
python
{ "resource": "" }
q33796
Labeler.apply
train
def apply( self, docs=None, split=0, train=False, lfs=None, clear=True, parallelism=None, progress_bar=True, ): """Apply the labels of the specified candidates based on the provided LFs. :param docs: If provided, apply the LFs to all the candidates in these documents. :param split: If docs is None, apply the LFs to the candidates in this particular split. :type split: int :param train: Whether or not to update the global key set of labels and the labels of candidates. :type train: bool :param lfs: A list of lists of labeling functions to apply. Each list should correspond with the candidate_classes used to initialize the Labeler. :type lfs: list of lists :param clear: Whether or not to clear the labels table before applying these LFs. :type clear: bool :param parallelism: How many threads to use for extraction. This will override the parallelism value used to initialize the Labeler if it is provided. :type parallelism: int :param progress_bar: Whether or not to display a progress bar. The progress bar is measured per document. :type progress_bar: bool :raises ValueError: If labeling functions are not provided for each candidate class. """ if lfs is None: raise ValueError("Please provide a list of labeling functions.") if len(lfs) != len(self.candidate_classes): raise ValueError("Please provide LFs for each candidate class.") self.lfs = lfs if docs: # Call apply on the specified docs for all splits split = ALL_SPLITS super(Labeler, self).apply( docs, split=split, train=train, lfs=self.lfs, clear=clear, parallelism=parallelism, progress_bar=progress_bar, ) # Needed to sync the bulk operations self.session.commit() else: # Only grab the docs containing candidates from the given split. split_docs = get_docs_from_split( self.session, self.candidate_classes, split ) super(Labeler, self).apply( split_docs, split=split, train=train, lfs=self.lfs, clear=clear, parallelism=parallelism, progress_bar=progress_bar, ) # Needed to sync the bulk operations self.session.commit()
python
{ "resource": "" }
q33797
Labeler.clear
train
def clear(self, train, split, lfs=None): """Delete Labels of each class from the database. :param train: Whether or not to clear the LabelKeys. :type train: bool :param split: Which split of candidates to clear labels from. :type split: int :param lfs: This parameter is ignored. """ # Clear Labels for the candidates in the split passed in. logger.info(f"Clearing Labels (split {split})") sub_query = ( self.session.query(Candidate.id).filter(Candidate.split == split).subquery() ) query = self.session.query(Label).filter(Label.candidate_id.in_(sub_query)) query.delete(synchronize_session="fetch") # Delete all old annotation keys if train: logger.debug(f"Clearing all LabelKeys from {self.candidate_classes}...") drop_all_keys(self.session, LabelKey, self.candidate_classes)
python
{ "resource": "" }
q33798
Labeler.clear_all
train
def clear_all(self): """Delete all Labels.""" logger.info("Clearing ALL Labels and LabelKeys.") self.session.query(Label).delete(synchronize_session="fetch") self.session.query(LabelKey).delete(synchronize_session="fetch")
python
{ "resource": "" }
q33799
LabelerUDF._f_gen
train
def _f_gen(self, c): """Convert lfs into a generator of id, name, and labels. In particular, catch verbose values and convert to integer ones. """ lf_idx = self.candidate_classes.index(c.__class__) labels = lambda c: [(c.id, lf.__name__, lf(c)) for lf in self.lfs[lf_idx]] for cid, lf_key, label in labels(c): # Note: We assume if the LF output is an int, it is already # mapped correctly if isinstance(label, int): yield cid, lf_key, label # None is a protected LF output value corresponding to 0, # representing LF abstaining elif label is None: yield cid, lf_key, 0 elif label in c.values: if c.cardinality > 2: yield cid, lf_key, c.values.index(label) + 1 # Note: Would be nice to not special-case here, but for # consistency we leave binary LF range as {-1,0,1} else: val = 1 if c.values.index(label) == 0 else -1 yield cid, lf_key, val else: raise ValueError( f"Can't parse label value {label} for candidate values {c.values}" )
python
{ "resource": "" }