Citation / src /generator.py
rainym00d's picture
Upload folder using huggingface_hub
07eb921 verified
import json
import re
import torch
from string import Template
from typing import List
def remove_citations(sent):
return (
re.sub(r"\[\d+", "", re.sub(r" \[\d+", "", sent))
.replace(" |", "")
.replace("]", "")
)
class BaseGenerator:
def __init__(self, prompt_path: str):
self.prompt_path = prompt_path
self.instruction, self.prompt_format = self._load_prompt(prompt_path)
def _load_prompt(self, path: str):
with open(path, "r") as f:
prompt_config = json.load(f)
instruction = prompt_config["instruction"]
prompt_format = Template(prompt_config["prompt_format"])
return instruction, prompt_format
def _format_documents(self, doc_list: list) -> str:
documents = ""
for idx, doc in enumerate(doc_list):
title = doc["title"]
text = doc["text"]
documents += f"Document [{idx + 1}](Title: {title}): {text}\n"
return documents
class SentenceGenerator(BaseGenerator):
def __init__(self, model, tokenizer, prompt_path: str):
super().__init__(prompt_path)
self.model = model
self.tokenizer = tokenizer
def format_prompt(self, question: str, doc_list: list) -> str:
prompt = self.prompt_format.substitute(
instruction=self.instruction,
question=question,
documents=self._format_documents(doc_list),
)
return prompt
@torch.inference_mode()
def generate(self, prompt: str) -> (str, List[int]):
# * tokenization
input_ids = self.tokenizer(prompt, return_tensors="pt")["input_ids"].to(
self.model.device
)
prompt_len = input_ids.shape[1]
# * generate until the end of the sentence
outputs = self.model.generate(
input_ids,
max_new_tokens=128,
eos_token_id=[self.tokenizer.eos_token_id, 29889],
).to("cpu")
# * decode and remove the prompt
sentence = self.tokenizer.decode(
outputs[0][prompt_len:], skip_special_tokens=True
).strip()
# * make sure that only one sentence is generated
sentence = (
sentence[: sentence.index("].") + 2] if "]." in sentence else sentence
)
# * remove citations
sentence = remove_citations(sentence)
return sentence, outputs[0]
class CitationGenerator(BaseGenerator):
def __init__(self, model, tokenizer, prompt_path: str):
super().__init__(prompt_path)
self.model = model
self.tokenizer = tokenizer
def format_prompt(self, doc_list: list, sentence: str) -> str:
prompt = self.prompt_format.substitute(
instruction=self.instruction,
documents=self._format_documents(doc_list),
sentence=sentence,
)
return prompt
@torch.inference_mode()
def generate(self, prompt: str) -> str:
# * tokenization
input_ids = self.tokenizer(prompt, return_tensors="pt")["input_ids"].to(
self.model.device
)
prompt_len = input_ids.shape[1]
# * generate cited sentence
outputs = self.model.generate(input_ids, max_new_tokens=128).to("cpu")
# * decode and remove the prompt
cited_sentence = self.tokenizer.decode(
outputs[0][prompt_len:], skip_special_tokens=True
).strip()
return cited_sentence
class QueryGenerator(BaseGenerator):
def __init__(self, model, tokenizer, prompt_path: str):
super().__init__(prompt_path)
self.model = model
self.tokenizer = tokenizer
def format_prompt(self, question: str, context: str, claim: str, query_num: int) -> str:
prompt = self.prompt_format.substitute(
question=question,
context=context,
claim=claim,
query_num=query_num
)
return prompt
@torch.inference_mode()
def generate(self, prompt: str) -> str:
# * tokenization
input_ids = self.tokenizer(prompt, return_tensors="pt")["input_ids"].to(
self.model.device
)
prompt_len = input_ids.shape[1]
# * generate query
outputs = self.model.generate(input_ids, max_new_tokens=256).to("cpu")
# * decode and remove the prompt
query = self.tokenizer.decode(
outputs[0][prompt_len:], skip_special_tokens=True
).strip()
return query