Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -4,50 +4,56 @@ from langchain_huggingface import HuggingFaceEndpoint, HuggingFaceEmbeddings
|
|
| 4 |
from langchain_community.vectorstores import FAISS
|
| 5 |
from langchain_community.document_loaders import TextLoader
|
| 6 |
from langchain_text_splitters import CharacterTextSplitter
|
| 7 |
-
from langchain.chains
|
| 8 |
|
| 9 |
-
# 1.
|
|
|
|
| 10 |
llm = HuggingFaceEndpoint(
|
| 11 |
repo_id="Qwen/Qwen2.5-7B-Instruct",
|
| 12 |
huggingfacehub_api_token=os.getenv("HF_TOKEN"),
|
| 13 |
-
|
| 14 |
-
#
|
| 15 |
-
client_kwargs={"headers": {"Authorization": f"Bearer {os.getenv('HF_TOKEN')}"}}
|
| 16 |
)
|
| 17 |
|
| 18 |
-
# 2. 知识库
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
vectorstore = FAISS.from_documents(docs, embeddings)
|
| 27 |
-
|
| 28 |
-
# 3. 构建问答链
|
| 29 |
qa_chain = RetrievalQA.from_chain_type(
|
| 30 |
llm=llm,
|
| 31 |
-
retriever=
|
| 32 |
)
|
| 33 |
|
| 34 |
-
#
|
| 35 |
-
def
|
| 36 |
try:
|
| 37 |
-
# 使用 invoke
|
| 38 |
-
|
| 39 |
-
return
|
| 40 |
except Exception as e:
|
| 41 |
-
#
|
| 42 |
-
if "
|
| 43 |
-
return "
|
| 44 |
-
return f"大脑
|
| 45 |
|
| 46 |
-
#
|
| 47 |
demo = gr.ChatInterface(
|
| 48 |
-
|
| 49 |
-
title="全能私有大脑 v2.
|
| 50 |
-
description="
|
| 51 |
)
|
| 52 |
|
| 53 |
if __name__ == "__main__":
|
|
|
|
| 4 |
from langchain_community.vectorstores import FAISS
|
| 5 |
from langchain_community.document_loaders import TextLoader
|
| 6 |
from langchain_text_splitters import CharacterTextSplitter
|
| 7 |
+
from langchain.chains import RetrievalQA
|
| 8 |
|
| 9 |
+
# 1. 配置大模型 - 换一种更稳健的连接方式
|
| 10 |
+
# 我们明确指定使用异步/同步通用的传输协议
|
| 11 |
llm = HuggingFaceEndpoint(
|
| 12 |
repo_id="Qwen/Qwen2.5-7B-Instruct",
|
| 13 |
huggingfacehub_api_token=os.getenv("HF_TOKEN"),
|
| 14 |
+
timeout=300,
|
| 15 |
+
task="text-generation" # 明确任务类型
|
|
|
|
| 16 |
)
|
| 17 |
|
| 18 |
+
# 2. 知识库加载逻辑
|
| 19 |
+
def load_kb():
|
| 20 |
+
if not os.path.exists("knowledge.txt"):
|
| 21 |
+
with open("knowledge.txt", "w", encoding="utf-8") as f:
|
| 22 |
+
f.write("私有大脑知识库已就绪。")
|
| 23 |
+
|
| 24 |
+
loader = TextLoader("knowledge.txt", encoding="utf-8")
|
| 25 |
+
docs = CharacterTextSplitter(chunk_size=500, chunk_overlap=50).split_documents(loader.load())
|
| 26 |
+
|
| 27 |
+
# 使用中文优化的 Embedding 模型
|
| 28 |
+
embeddings = HuggingFaceEmbeddings(model_name="BAAI/bge-small-zh-v1.5")
|
| 29 |
+
vectorstore = FAISS.from_documents(docs, embeddings)
|
| 30 |
+
return vectorstore
|
| 31 |
|
| 32 |
+
# 初始化问答链
|
| 33 |
+
vs = load_kb()
|
| 34 |
+
# 注意:这里我们使用最新的 invoke 接口
|
|
|
|
|
|
|
|
|
|
| 35 |
qa_chain = RetrievalQA.from_chain_type(
|
| 36 |
llm=llm,
|
| 37 |
+
retriever=vs.as_retriever(search_kwargs={"k": 3})
|
| 38 |
)
|
| 39 |
|
| 40 |
+
# 3. 聊天处理函数
|
| 41 |
+
def chat_fn(message, history):
|
| 42 |
try:
|
| 43 |
+
# 使用 invoke 替代旧的直接调用,解决 InferenceClient 兼容性
|
| 44 |
+
result = qa_chain.invoke({"query": message})
|
| 45 |
+
return result["result"]
|
| 46 |
except Exception as e:
|
| 47 |
+
# 如果 Token 权限有问题,给出清晰提示
|
| 48 |
+
if "401" in str(e):
|
| 49 |
+
return "错误:Token 无效或权限不足,请检查 Settings 里的 HF_TOKEN。"
|
| 50 |
+
return f"大脑响应异常:{str(e)}"
|
| 51 |
|
| 52 |
+
# 4. 构建前端界面
|
| 53 |
demo = gr.ChatInterface(
|
| 54 |
+
chat_fn,
|
| 55 |
+
title="全能私有大脑 v2.5",
|
| 56 |
+
description="针对最新 API 进行了深度优化,现在可以正常调取知识库了。"
|
| 57 |
)
|
| 58 |
|
| 59 |
if __name__ == "__main__":
|