Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -2,57 +2,55 @@ import os
|
|
| 2 |
import gradio as gr
|
| 3 |
from langchain_huggingface import HuggingFaceEndpoint, HuggingFaceEmbeddings
|
| 4 |
from langchain_community.vectorstores import FAISS
|
| 5 |
-
from langchain_community.document_loaders import
|
| 6 |
-
from langchain_text_splitters import
|
| 7 |
from langchain.chains import RetrievalQA
|
| 8 |
|
| 9 |
-
# 1.
|
|
|
|
| 10 |
llm = HuggingFaceEndpoint(
|
| 11 |
repo_id="Qwen/Qwen2.5-7B-Instruct",
|
| 12 |
-
huggingfacehub_api_token=os.getenv("HF_TOKEN")
|
|
|
|
| 13 |
)
|
| 14 |
|
| 15 |
-
# 2.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
embeddings = HuggingFaceEmbeddings(model_name="BAAI/bge-small-zh-v1.5")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
|
| 18 |
-
|
| 19 |
-
all_docs = []
|
| 20 |
-
for file in files:
|
| 21 |
-
# 根据文件后缀选择加载器
|
| 22 |
-
if file.name.endswith('.pdf'):
|
| 23 |
-
loader = PyPDFLoader(file.name)
|
| 24 |
-
elif file.name.endswith('.docx'):
|
| 25 |
-
loader = Docx2txtLoader(file.name)
|
| 26 |
-
else:
|
| 27 |
-
loader = TextLoader(file.name)
|
| 28 |
-
all_docs.extend(loader.load())
|
| 29 |
-
|
| 30 |
-
# 3. 智能切片(防止文档太长 AI 记不住)
|
| 31 |
-
text_splitter = RecursiveCharacterTextSplitter(chunk_size=600, chunk_overlap=100)
|
| 32 |
-
split_docs = text_splitter.split_documents(all_docs)
|
| 33 |
-
|
| 34 |
-
# 4. 存入临时向量库
|
| 35 |
-
vectorstore = FAISS.from_documents(split_docs, embeddings)
|
| 36 |
-
return vectorstore
|
| 37 |
-
|
| 38 |
-
# 5. 问答函数
|
| 39 |
-
def predict(message, history, file_output):
|
| 40 |
-
if not file_output:
|
| 41 |
-
return "请先上传 PDF/Word/TXT 文档,我才能开启私有大脑模式。"
|
| 42 |
-
|
| 43 |
-
# 处理文件并建立检索链
|
| 44 |
-
vectorstore = process_files(file_output)
|
| 45 |
-
qa_chain = RetrievalQA.from_chain_type(llm=llm, retriever=vectorstore.as_retriever())
|
| 46 |
-
|
| 47 |
-
response = qa_chain.invoke(message)
|
| 48 |
-
return response["result"]
|
| 49 |
-
|
| 50 |
-
# 6. 界面设计
|
| 51 |
demo = gr.ChatInterface(
|
| 52 |
-
|
| 53 |
-
additional_inputs=[gr.File(file_count="multiple", label="上传私有文档 (.pdf, .docx, .txt)")],
|
| 54 |
title="我的全能私有大脑",
|
| 55 |
-
description="
|
| 56 |
)
|
| 57 |
|
| 58 |
-
|
|
|
|
|
|
| 2 |
import gradio as gr
|
| 3 |
from langchain_huggingface import HuggingFaceEndpoint, HuggingFaceEmbeddings
|
| 4 |
from langchain_community.vectorstores import FAISS
|
| 5 |
+
from langchain_community.document_loaders import TextLoader
|
| 6 |
+
from langchain_text_splitters import CharacterTextSplitter
|
| 7 |
from langchain.chains import RetrievalQA
|
| 8 |
|
| 9 |
+
# 1. 配置大模型动力源 (使用 Qwen 2.5)
|
| 10 |
+
# 确保你在 Settings -> Secrets 中设置了 HF_TOKEN
|
| 11 |
llm = HuggingFaceEndpoint(
|
| 12 |
repo_id="Qwen/Qwen2.5-7B-Instruct",
|
| 13 |
+
huggingfacehub_api_token=os.getenv("HF_TOKEN"),
|
| 14 |
+
timeout=300
|
| 15 |
)
|
| 16 |
|
| 17 |
+
# 2. 初始化私有知识库
|
| 18 |
+
# 如果 knowledge.txt 不存在,先创建一个简单的,防止报错
|
| 19 |
+
if not os.path.exists("knowledge.txt"):
|
| 20 |
+
with open("knowledge.txt", "w", encoding="utf-8") as f:
|
| 21 |
+
f.write("欢迎使用全能 AI 大脑!请在 knowledge.txt 中输入你的私有知识。")
|
| 22 |
+
|
| 23 |
+
# 加载并切分文档
|
| 24 |
+
loader = TextLoader("knowledge.txt", encoding="utf-8")
|
| 25 |
+
documents = loader.load()
|
| 26 |
+
text_splitter = CharacterTextSplitter(chunk_size=500, chunk_overlap=50)
|
| 27 |
+
docs = text_splitter.split_documents(documents)
|
| 28 |
+
|
| 29 |
+
# 3. 创建向量检索系统 (使用中文优化的 Embedding 模型)
|
| 30 |
embeddings = HuggingFaceEmbeddings(model_name="BAAI/bge-small-zh-v1.5")
|
| 31 |
+
vectorstore = FAISS.from_documents(docs, embeddings)
|
| 32 |
+
|
| 33 |
+
# 4. 构建问答链 (RAG 核心)
|
| 34 |
+
qa_chain = RetrievalQA.from_chain_type(
|
| 35 |
+
llm=llm,
|
| 36 |
+
retriever=vectorstore.as_retriever(search_kwargs={"k": 3})
|
| 37 |
+
)
|
| 38 |
+
|
| 39 |
+
# 5. 定义界面交互
|
| 40 |
+
def chat_fn(message, history):
|
| 41 |
+
try:
|
| 42 |
+
# 执行检索并生成回答
|
| 43 |
+
response = qa_chain.invoke({"query": message})
|
| 44 |
+
return response["result"]
|
| 45 |
+
except Exception as e:
|
| 46 |
+
return f"大脑连接超时或出错,请检查 Token 设置。错误详情: {str(e)}"
|
| 47 |
|
| 48 |
+
# 启动 Gradio 界面
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 49 |
demo = gr.ChatInterface(
|
| 50 |
+
chat_fn,
|
|
|
|
| 51 |
title="我的全能私有大脑",
|
| 52 |
+
description="基于 Qwen 2.5 + RAG 技术。它会先查阅你的 knowledge.txt 再回答。"
|
| 53 |
)
|
| 54 |
|
| 55 |
+
if __name__ == "__main__":
|
| 56 |
+
demo.launch()
|