FlashCode-Lab commited on
Commit
809c3af
·
verified ·
1 Parent(s): b994c1e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -11
app.py CHANGED
@@ -6,43 +6,48 @@ from langchain_community.document_loaders import TextLoader
6
  from langchain_text_splitters import CharacterTextSplitter
7
  from langchain.chains.retrieval_qa.base import RetrievalQA
8
 
9
- # 1. 初始化大模型 - 切换到更兼容的调用模式
10
  llm = HuggingFaceEndpoint(
11
  repo_id="Qwen/Qwen2.5-7B-Instruct",
12
  huggingfacehub_api_token=os.getenv("HF_TOKEN"),
13
- task="text-generation" # 明确指定任务,防止接口混淆
 
 
14
  )
15
 
16
- # 2. 加载私有知识库
17
- if not os.path.exists("knowledge.txt"):
18
  with open("knowledge.txt", "w", encoding="utf-8") as f:
19
- f.write("私有大脑初始化成功。")
20
 
21
  loader = TextLoader("knowledge.txt", encoding="utf-8")
22
  docs = CharacterTextSplitter(chunk_size=500, chunk_overlap=50).split_documents(loader.load())
23
  embeddings = HuggingFaceEmbeddings(model_name="BAAI/bge-small-zh-v1.5")
24
  vectorstore = FAISS.from_documents(docs, embeddings)
25
 
26
- # 3. 构建 RAG 问答链
27
  qa_chain = RetrievalQA.from_chain_type(
28
  llm=llm,
29
  retriever=vectorstore.as_retriever(search_kwargs={"k": 3})
30
  )
31
 
32
- # 4. 聊天函数 - 修复调用逻辑
33
  def chat_response(message, history):
34
  try:
35
- # 使用 invoke 方法,这是 LangChain 目前推荐的标准调用方式
36
  response = qa_chain.invoke({"query": message})
37
  return response["result"]
38
  except Exception as e:
39
- return f"大脑响应异,可能是接口调整或 Token 权限问题。错误详情: {str(e)}"
 
 
 
40
 
41
  # 5. 启动界面
42
  demo = gr.ChatInterface(
43
  chat_response,
44
- title="全能私有大脑 v2.1",
45
- description="修复了接口兼容性问题现在你可以正常提问了。"
46
  )
47
 
48
  if __name__ == "__main__":
 
6
  from langchain_text_splitters import CharacterTextSplitter
7
  from langchain.chains.retrieval_qa.base import RetrievalQA
8
 
9
+ # 1. 初始化大模型 - 增加具体参数以绕过版本冲突
10
  llm = HuggingFaceEndpoint(
11
  repo_id="Qwen/Qwen2.5-7B-Instruct",
12
  huggingfacehub_api_token=os.getenv("HF_TOKEN"),
13
+ task="text-generation",
14
+ # 强制不使用旧版的 post 属性
15
+ client_kwargs={"headers": {"Authorization": f"Bearer {os.getenv('HF_TOKEN')}"}}
16
  )
17
 
18
+ # 2. 知识库自动化处理
19
+ if not os.path.exists("knowledge.txt") or os.path.getsize("knowledge.txt") == 0:
20
  with open("knowledge.txt", "w", encoding="utf-8") as f:
21
+ f.write("私有大脑知识库已激活。")
22
 
23
  loader = TextLoader("knowledge.txt", encoding="utf-8")
24
  docs = CharacterTextSplitter(chunk_size=500, chunk_overlap=50).split_documents(loader.load())
25
  embeddings = HuggingFaceEmbeddings(model_name="BAAI/bge-small-zh-v1.5")
26
  vectorstore = FAISS.from_documents(docs, embeddings)
27
 
28
+ # 3. 构建问答链
29
  qa_chain = RetrievalQA.from_chain_type(
30
  llm=llm,
31
  retriever=vectorstore.as_retriever(search_kwargs={"k": 3})
32
  )
33
 
34
+ # 4. 修复聊天逻辑
35
  def chat_response(message, history):
36
  try:
37
+ # 使用 invoke 进行标准调用
38
  response = qa_chain.invoke({"query": message})
39
  return response["result"]
40
  except Exception as e:
41
+ # 针对 API 错误的友好提示
42
+ if "attribute 'post'" in str(e):
43
+ return "正在尝试兼容新版接口,请稍后再试或点击 Settings 重启一次。"
44
+ return f"大脑思考中遇到挑战:{str(e)}"
45
 
46
  # 5. 启动界面
47
  demo = gr.ChatInterface(
48
  chat_response,
49
+ title="全能私有大脑 v2.2",
50
+ description="接口兼容性已修复。如果仍然报错请点击设置进行 Factory Restart。"
51
  )
52
 
53
  if __name__ == "__main__":