| import google.generativeai as genai |
| import requests |
| import numpy as np |
| import faiss |
| from sentence_transformers import SentenceTransformer |
| from bs4 import BeautifulSoup |
| import gradio as gr |
|
|
| |
| GOOGLE_API_KEY = 'AIzaSyA0yLvySmj8xjMd0sedSgklg1fj0wBDyyw' |
| genai.configure(api_key=GOOGLE_API_KEY) |
|
|
| |
| def fetch_lecture_notes(): |
| lecture_urls = [ |
| "https://stanford-cs324.github.io/winter2022/lectures/introduction/", |
| "https://stanford-cs324.github.io/winter2022/lectures/capabilities/", |
| "https://stanford-cs324.github.io/winter2022/lectures/data/", |
| "https://stanford-cs324.github.io/winter2022/lectures/modeling/" |
| ] |
| lecture_texts = [] |
| for url in lecture_urls: |
| response = requests.get(url) |
| if response.status_code == 200: |
| print(f"Fetched content from {url}") |
| lecture_texts.append((extract_text_from_html(response.text), url)) |
| else: |
| print(f"Failed to fetch content from {url}, status code: {response.status_code}") |
| return lecture_texts |
|
|
| def fetch_model_architectures(): |
| url = "https://github.com/Hannibal046/Awesome-LLM#milestone-papers" |
| response = requests.get(url) |
| if response.status_code == 200: |
| print(f"Fetched model architectures, status code: {response.status_code}") |
| return extract_text_from_html(response.text), url |
| else: |
| print(f"Failed to fetch model architectures, status code: {response.status_code}") |
| return "", url |
|
|
| |
| def extract_text_from_html(html_content): |
| soup = BeautifulSoup(html_content, 'html.parser') |
| for script in soup(["script", "style"]): |
| script.extract() |
| text = soup.get_text(separator="\n", strip=True) |
| return text |
|
|
| |
| def create_embeddings(texts, model): |
| texts_only = [text for text, _ in texts] |
| embeddings = model.encode(texts_only) |
| return embeddings |
|
|
| |
| def initialize_faiss_index(embeddings): |
| dimension = embeddings.shape[1] |
| index = faiss.IndexFlatL2(dimension) |
| index.add(embeddings.astype('float32')) |
| return index |
|
|
| |
| conversation_history = [] |
|
|
| def handle_query(query, faiss_index, embeddings_texts, model): |
| global conversation_history |
|
|
| query_embedding = model.encode([query]).astype('float32') |
|
|
| |
| _, indices = faiss_index.search(query_embedding, 3) |
| relevant_texts = [embeddings_texts[idx] for idx in indices[0]] |
|
|
| |
| combined_text = "\n".join([text for text, _ in relevant_texts]) |
| max_length = 500 |
| if len(combined_text) > max_length: |
| combined_text = combined_text[:max_length] + "..." |
|
|
| |
| try: |
| response = genai.generate_text( |
| model="models/text-bison-001", |
| prompt=f"Based on the following context:\n\n{combined_text}\n\nAnswer the following question: {query}", |
| max_output_tokens=200 |
| ) |
| generated_text = response.result if response else "No response generated." |
| except Exception as e: |
| print(f"Error generating text: {e}") |
| generated_text = "An error occurred while generating the response." |
|
|
| |
| conversation_history.append((query, generated_text)) |
|
|
| |
| sources = [url for _, url in relevant_texts] |
|
|
| return generated_text, sources |
|
|
| def generate_concise_response(prompt, context): |
| try: |
| response = genai.generate_text( |
| model="models/text-bison-001", |
| prompt=f"{prompt}\n\nContext: {context}\n\nAnswer:", |
| max_output_tokens=200 |
| ) |
| return response.result if response else "No response generated." |
| except Exception as e: |
| print(f"Error generating concise response: {e}") |
| return "An error occurred while generating the concise response." |
|
|
| |
| def chatbot(message, history): |
| lecture_notes = fetch_lecture_notes() |
| model_architectures = fetch_model_architectures() |
|
|
| all_texts = lecture_notes + [model_architectures] |
|
|
| |
| embedding_model = SentenceTransformer('paraphrase-MiniLM-L6-v2') |
|
|
| embeddings = create_embeddings(all_texts, embedding_model) |
|
|
| |
| faiss_index = initialize_faiss_index(np.array(embeddings)) |
|
|
| response, sources = handle_query(message, faiss_index, all_texts, embedding_model) |
| print("Query:", message) |
| print("Response:", response) |
| total_text = response |
| |
| if sources: |
| print("Sources:", sources) |
| relevant_source = "\n".join(sources) |
| total_text += f"\n\nSources:\n{relevant_source}" |
| else: |
| print("Sources: None of the provided sources were used.") |
| |
| print("----") |
|
|
| |
| prompt = "Summarize the user queries so far" |
| user_queries_summary = " ".join([msg[0] for msg in history] + [message]) |
| concise_response = generate_concise_response(prompt, user_queries_summary) |
| print("Concise Response:") |
| print(concise_response) |
| |
| return total_text |
|
|
| |
| iface = gr.ChatInterface( |
| chatbot, |
| title="LLM Research Assistant", |
| description="Ask questions about LLM architectures, datasets, and training techniques.", |
| examples=[ |
| "What are some milestone model architectures in LLMs?", |
| "Explain the transformer architecture.", |
| "Tell me about datasets used to train LLMs.", |
| "How are LLM training datasets cleaned and preprocessed?", |
| "Summarize the user queries so far" |
| ], |
| retry_btn="Regenerate", |
| undo_btn="Undo", |
| clear_btn="Clear", |
| ) |
|
|
| if __name__ == "__main__": |
| iface.launch(server_name="0.0.0.0", server_port=7860) |