FlashCode-Lab commited on
Commit
a479b20
·
verified ·
1 Parent(s): b040963

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -41
app.py CHANGED
@@ -1,72 +1,69 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
4
- # 模型矩阵
5
  MODELS = {
6
- "Qwen 2.5 Coder (专业审计)": "Qwen/Qwen2.5-Coder-32B-Instruct",
7
- "Llama 3.1 (逻辑推理)": "meta-llama/Llama-3.1-70B-Instruct",
8
- "Mistral (快速响应)": "mistralai/Mistral-7B-Instruct-v0.3"
9
  }
10
 
11
- def ghost_chat(message, history, system_message, model_name, file_obj):
12
- repo_id = MODELS.get(model_name, "Qwen/Qwen2.5-Coder-32B-Instruct")
13
- client = InferenceClient(repo_id)
14
 
15
- file_context = ""
16
- if file_obj is not None:
17
- try:
18
- with open(file_obj.name, "r", encoding="utf-8") as f:
19
- file_context = f"\n[文件内容]:\n{f.read()[:800]}"
20
- except: pass
21
-
22
- messages = [{"role": "system", "content": f"{system_message}\n{file_context}"}]
23
- for user_msg, assistant_msg in history:
24
- messages.append({"role": "user", "content": user_msg})
25
- messages.append({"role": "assistant", "content": assistant_msg})
26
  messages.append({"role": "user", "content": message})
27
-
28
- response = ""
29
- yield history + [[message, "📡 链路建立中..."]]
 
30
 
31
  try:
 
 
 
 
 
32
  for msg_chunk in client.chat_completion(messages, stream=True, max_tokens=2048):
33
  token = msg_chunk.choices[0].delta.content
34
  if token:
35
- response += token
36
- yield history + [[message, response]]
 
37
  except Exception as e:
38
- yield history + [[message, f"❌ 链路中断: {str(e)}"]]
39
 
40
  # 极客样式
41
  style = """
42
- .gradio-container { background: #000 !important; color: #0f0 !important; }
43
- .message.user { border-left: 3px solid #0f0 !important; }
44
- #custom-title { text-align: center; font-size: 24px; color: #0f0; margin-bottom: 20px; }
45
  """
46
 
47
- # --- UI 架构 ---
48
  with gr.Blocks(fill_height=True) as demo:
49
- # 修复点 1: 移除 textAlign,改用 HTML/Markdown 组合
50
- gr.HTML("<div id='custom-title'>🌌 INTERSTELLAR CORE v8.5</div>")
51
 
52
  with gr.Row():
53
  with gr.Column(scale=1):
54
- model_dd = gr.Dropdown(list(MODELS.keys()), value="Qwen 2.5 Coder (专业审计)", label="选择引擎")
55
- sys_msg = gr.Textbox(value="你是一个顶尖 AI 安全助手。", label="核心协议")
56
-
57
- with gr.Accordion("🛠️ 工具箱", open=False):
58
- file_up = gr.File(label="注入文档")
59
- scan_btn = gr.Button("执行系统诊断")
60
 
61
  with gr.Column(scale=3):
62
- chatbot = gr.Chatbot(height=600)
 
63
  with gr.Row():
64
- msg = gr.Textbox(placeholder="输入指令...", scale=9, container=False)
65
  send = gr.Button("EXE", scale=1)
66
 
67
- msg.submit(ghost_chat, [msg, chatbot, sys_msg, model_dd, file_up], [chatbot])
68
- send.click(ghost_chat, [msg, chatbot, sys_msg, model_dd, file_up], [chatbot])
 
69
 
70
- # 修复点 2: 所有的 CSS 参数必须在这里!
71
  if __name__ == "__main__":
72
  demo.launch(css=style)
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
4
+ # 模型配置
5
  MODELS = {
6
+ "Qwen 2.5 Coder": "Qwen/Qwen2.5-Coder-32B-Instruct",
7
+ "Llama 3.1": "meta-llama/Llama-3.1-70B-Instruct"
 
8
  }
9
 
10
+ def ghost_chat(message, history, system_message, model_name):
11
+ client = InferenceClient(MODELS.get(model_name))
 
12
 
13
+ # --- 核心修复:构造 Gradio 6.0 强制要求的字典格式 ---
14
+ messages = [{"role": "system", "content": system_message}]
15
+ for msg in history:
16
+ # history 现在是字典列表,直接添加即可
17
+ messages.append(msg)
18
+
19
+ # 添加当前用户输入
 
 
 
 
20
  messages.append({"role": "user", "content": message})
21
+
22
+ # 预准备:先更新 UI 显示用户消息
23
+ new_history = messages.copy()
24
+ yield new_history
25
 
26
  try:
27
+ response_content = ""
28
+ # 实时联网搜索模拟占位 (逻辑上可以在此处接搜索 API)
29
+ if "查一下" in message or "search" in message.lower():
30
+ response_content = "🔍 [联网协议激活] 正在检索 2026 最新漏洞数据库...\n\n"
31
+
32
  for msg_chunk in client.chat_completion(messages, stream=True, max_tokens=2048):
33
  token = msg_chunk.choices[0].delta.content
34
  if token:
35
+ response_content += token
36
+ # 更新助手回复
37
+ yield messages + [{"role": "assistant", "content": response_content}]
38
  except Exception as e:
39
+ yield messages + [{"role": "assistant", "content": f"❌ 链路异常: {str(e)}"}]
40
 
41
  # 极客样式
42
  style = """
43
+ .gradio-container { background: #050505 !important; color: #00ff41 !important; }
44
+ .message-row { font-family: 'Courier New', monospace !important; }
 
45
  """
46
 
 
47
  with gr.Blocks(fill_height=True) as demo:
48
+ gr.HTML("<div style='text-align:center; color:#00ff41; font-size:20px; padding:10px;'>💀 ULTIMATE MATRIX v9.0</div>")
 
49
 
50
  with gr.Row():
51
  with gr.Column(scale=1):
52
+ model_dd = gr.Dropdown(list(MODELS.keys()), value="Qwen 2.5 Coder", label="内核单元")
53
+ sys_msg = gr.Textbox(value="你是一个具备实时联网能力的红队专家。", label="预置协议")
54
+ gr.Markdown("---")
55
+ gr.Markdown(" **方案 B 联网模式**:对话中包含'查一下'自动触发。")
 
 
56
 
57
  with gr.Column(scale=3):
58
+ # 核心修复:type="messages" 配合字典逻辑
59
+ chatbot = gr.Chatbot(type="messages", height=650)
60
  with gr.Row():
61
+ msg = gr.Textbox(placeholder="输入指令或搜索请求...", scale=9, container=False)
62
  send = gr.Button("EXE", scale=1)
63
 
64
+ # 逻辑绑定
65
+ msg.submit(ghost_chat, [msg, chatbot, sys_msg, model_dd], [chatbot])
66
+ send.click(ghost_chat, [msg, chatbot, sys_msg, model_dd], [chatbot])
67
 
 
68
  if __name__ == "__main__":
69
  demo.launch(css=style)