| import json |
| import re |
| import torch |
| from string import Template |
| from typing import List |
|
|
|
|
| def remove_citations(sent): |
| return ( |
| re.sub(r"\[\d+", "", re.sub(r" \[\d+", "", sent)) |
| .replace(" |", "") |
| .replace("]", "") |
| ) |
|
|
|
|
| class BaseGenerator: |
| def __init__(self, prompt_path: str): |
| self.prompt_path = prompt_path |
|
|
| self.instruction, self.prompt_format = self._load_prompt(prompt_path) |
|
|
| def _load_prompt(self, path: str): |
| with open(path, "r") as f: |
| prompt_config = json.load(f) |
|
|
| instruction = prompt_config["instruction"] |
| prompt_format = Template(prompt_config["prompt_format"]) |
|
|
| return instruction, prompt_format |
|
|
| def _format_documents(self, doc_list: list) -> str: |
| documents = "" |
| for idx, doc in enumerate(doc_list): |
| title = doc["title"] |
| text = doc["text"] |
|
|
| documents += f"Document [{idx + 1}](Title: {title}): {text}\n" |
| return documents |
|
|
|
|
| class SentenceGenerator(BaseGenerator): |
| def __init__(self, model, tokenizer, prompt_path: str): |
| super().__init__(prompt_path) |
|
|
| self.model = model |
| self.tokenizer = tokenizer |
|
|
| def format_prompt(self, question: str, doc_list: list) -> str: |
| prompt = self.prompt_format.substitute( |
| instruction=self.instruction, |
| question=question, |
| documents=self._format_documents(doc_list), |
| ) |
| return prompt |
|
|
| @torch.inference_mode() |
| def generate(self, prompt: str) -> (str, List[int]): |
| |
| input_ids = self.tokenizer(prompt, return_tensors="pt")["input_ids"].to( |
| self.model.device |
| ) |
| prompt_len = input_ids.shape[1] |
| |
| outputs = self.model.generate( |
| input_ids, |
| max_new_tokens=128, |
| eos_token_id=[self.tokenizer.eos_token_id, 29889], |
| ).to("cpu") |
| |
| sentence = self.tokenizer.decode( |
| outputs[0][prompt_len:], skip_special_tokens=True |
| ).strip() |
| |
| sentence = ( |
| sentence[: sentence.index("].") + 2] if "]." in sentence else sentence |
| ) |
| |
| sentence = remove_citations(sentence) |
|
|
| return sentence, outputs[0] |
|
|
|
|
| class CitationGenerator(BaseGenerator): |
| def __init__(self, model, tokenizer, prompt_path: str): |
| super().__init__(prompt_path) |
|
|
| self.model = model |
| self.tokenizer = tokenizer |
|
|
| def format_prompt(self, doc_list: list, sentence: str) -> str: |
| prompt = self.prompt_format.substitute( |
| instruction=self.instruction, |
| documents=self._format_documents(doc_list), |
| sentence=sentence, |
| ) |
| return prompt |
|
|
| @torch.inference_mode() |
| def generate(self, prompt: str) -> str: |
| |
| input_ids = self.tokenizer(prompt, return_tensors="pt")["input_ids"].to( |
| self.model.device |
| ) |
| prompt_len = input_ids.shape[1] |
| |
| outputs = self.model.generate(input_ids, max_new_tokens=128).to("cpu") |
| |
| cited_sentence = self.tokenizer.decode( |
| outputs[0][prompt_len:], skip_special_tokens=True |
| ).strip() |
|
|
| return cited_sentence |
|
|
| class QueryGenerator(BaseGenerator): |
| def __init__(self, model, tokenizer, prompt_path: str): |
| super().__init__(prompt_path) |
|
|
| self.model = model |
| self.tokenizer = tokenizer |
|
|
| def format_prompt(self, question: str, context: str, claim: str, query_num: int) -> str: |
| prompt = self.prompt_format.substitute( |
| question=question, |
| context=context, |
| claim=claim, |
| query_num=query_num |
| ) |
| return prompt |
|
|
| @torch.inference_mode() |
| def generate(self, prompt: str) -> str: |
| |
| input_ids = self.tokenizer(prompt, return_tensors="pt")["input_ids"].to( |
| self.model.device |
| ) |
| prompt_len = input_ids.shape[1] |
| |
| outputs = self.model.generate(input_ids, max_new_tokens=256).to("cpu") |
| |
| query = self.tokenizer.decode( |
| outputs[0][prompt_len:], skip_special_tokens=True |
| ).strip() |
|
|
| return query |
|
|