FlashCode-Lab commited on
Commit
b040963
·
verified ·
1 Parent(s): bd8dde2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -28
app.py CHANGED
@@ -1,8 +1,7 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
- import re
4
 
5
- # 备选模型列表,增加稳定性
6
  MODELS = {
7
  "Qwen 2.5 Coder (专业审计)": "Qwen/Qwen2.5-Coder-32B-Instruct",
8
  "Llama 3.1 (逻辑推理)": "meta-llama/Llama-3.1-70B-Instruct",
@@ -10,16 +9,14 @@ MODELS = {
10
  }
11
 
12
  def ghost_chat(message, history, system_message, model_name, file_obj):
13
- # 自动选择模型
14
- repo_id = MODELS.get(model_name, MODELS["Qwen 2.5 Coder (专业审计)"])
15
  client = InferenceClient(repo_id)
16
 
17
- # 文件处理
18
  file_context = ""
19
  if file_obj is not None:
20
  try:
21
  with open(file_obj.name, "r", encoding="utf-8") as f:
22
- file_context = f"\n[File Context]:\n{f.read()[:1000]}"
23
  except: pass
24
 
25
  messages = [{"role": "system", "content": f"{system_message}\n{file_context}"}]
@@ -29,7 +26,7 @@ def ghost_chat(message, history, system_message, model_name, file_obj):
29
  messages.append({"role": "user", "content": message})
30
 
31
  response = ""
32
- yield history + [[message, "🛰️ 正在接卫星链路..."]]
33
 
34
  try:
35
  for msg_chunk in client.chat_completion(messages, stream=True, max_tokens=2048):
@@ -38,41 +35,38 @@ def ghost_chat(message, history, system_message, model_name, file_obj):
38
  response += token
39
  yield history + [[message, response]]
40
  except Exception as e:
41
- yield history + [[message, f"❌ 系统崩溃: {str(e)}"]]
42
 
43
- # --- 简黑盒样式 ---
44
  style = """
45
  .gradio-container { background: #000 !important; color: #0f0 !important; }
46
- .message.user { border: 1px solid #0f0 !important; }
47
- .tabs { border: none !important; }
48
  """
49
 
50
- with gr.Blocks(fill_height=True, css=style) as demo:
51
- gr.Markdown("## 🌌 INTERSTELLAR CORE v8.0", textAlign="center")
 
 
52
 
53
  with gr.Row():
54
- # 左侧控制面板
55
  with gr.Column(scale=1):
56
- with gr.Group():
57
- model_dd = gr.Dropdown(list(MODELS.keys()), value="Qwen 2.5 Coder (专业审计)", label="选择核心")
58
- sys_msg = gr.Textbox(value="你是一个顶尖的 AI 助手。", label="系统架构")
59
 
60
- with gr.Accordion("🛠️ 战术附件", open=False):
61
- file_up = gr.File(label="注入知识库")
62
- scan_input = gr.Textbox(label="扫描器输入")
63
- scan_btn = gr.Button("RUN ANALYSIS")
64
- scan_res = gr.Textbox(label="分析结果")
65
 
66
- # 右侧主控制台
67
  with gr.Column(scale=3):
68
- chatbot = gr.Chatbot(height=650)
69
  with gr.Row():
70
- msg = gr.Textbox(placeholder="输入指令...", scale=8, container=False)
71
- send = gr.Button("EXE", scale=2)
72
 
73
- # 绑定逻辑
74
  msg.submit(ghost_chat, [msg, chatbot, sys_msg, model_dd, file_up], [chatbot])
75
  send.click(ghost_chat, [msg, chatbot, sys_msg, model_dd, file_up], [chatbot])
76
 
 
77
  if __name__ == "__main__":
78
- demo.launch()
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
 
3
 
4
+ # 模型矩阵
5
  MODELS = {
6
  "Qwen 2.5 Coder (专业审计)": "Qwen/Qwen2.5-Coder-32B-Instruct",
7
  "Llama 3.1 (逻辑推理)": "meta-llama/Llama-3.1-70B-Instruct",
 
9
  }
10
 
11
  def ghost_chat(message, history, system_message, model_name, file_obj):
12
+ repo_id = MODELS.get(model_name, "Qwen/Qwen2.5-Coder-32B-Instruct")
 
13
  client = InferenceClient(repo_id)
14
 
 
15
  file_context = ""
16
  if file_obj is not None:
17
  try:
18
  with open(file_obj.name, "r", encoding="utf-8") as f:
19
+ file_context = f"\n[文件内容]:\n{f.read()[:800]}"
20
  except: pass
21
 
22
  messages = [{"role": "system", "content": f"{system_message}\n{file_context}"}]
 
26
  messages.append({"role": "user", "content": message})
27
 
28
  response = ""
29
+ yield history + [[message, "📡 链路建立中..."]]
30
 
31
  try:
32
  for msg_chunk in client.chat_completion(messages, stream=True, max_tokens=2048):
 
35
  response += token
36
  yield history + [[message, response]]
37
  except Exception as e:
38
+ yield history + [[message, f"❌ 链路中断: {str(e)}"]]
39
 
40
+ # 极样式
41
  style = """
42
  .gradio-container { background: #000 !important; color: #0f0 !important; }
43
+ .message.user { border-left: 3px solid #0f0 !important; }
44
+ #custom-title { text-align: center; font-size: 24px; color: #0f0; margin-bottom: 20px; }
45
  """
46
 
47
+ # --- UI 架构 ---
48
+ with gr.Blocks(fill_height=True) as demo:
49
+ # 修复点 1: 移除 textAlign,改用 HTML/Markdown 组合
50
+ gr.HTML("<div id='custom-title'>🌌 INTERSTELLAR CORE v8.5</div>")
51
 
52
  with gr.Row():
 
53
  with gr.Column(scale=1):
54
+ model_dd = gr.Dropdown(list(MODELS.keys()), value="Qwen 2.5 Coder (专业审计)", label="选择引擎")
55
+ sys_msg = gr.Textbox(value="你是一个顶尖 AI 安全助手。", label="核心协议")
 
56
 
57
+ with gr.Accordion("🛠️ 工具箱", open=False):
58
+ file_up = gr.File(label="注入文档")
59
+ scan_btn = gr.Button("执行系统诊断")
 
 
60
 
 
61
  with gr.Column(scale=3):
62
+ chatbot = gr.Chatbot(height=600)
63
  with gr.Row():
64
+ msg = gr.Textbox(placeholder="输入指令...", scale=9, container=False)
65
+ send = gr.Button("EXE", scale=1)
66
 
 
67
  msg.submit(ghost_chat, [msg, chatbot, sys_msg, model_dd, file_up], [chatbot])
68
  send.click(ghost_chat, [msg, chatbot, sys_msg, model_dd, file_up], [chatbot])
69
 
70
+ # 修复点 2: 所有的 CSS 参数必须在这里!
71
  if __name__ == "__main__":
72
+ demo.launch(css=style)