File size: 2,628 Bytes
c19023b
ddd0536
c19023b
f1b2ae3
 
c19023b
f1b2ae3
 
a479b20
f1b2ae3
 
 
a479b20
6d3c4ea
a479b20
f1b2ae3
 
 
 
 
 
 
7be642e
bd8dde2
 
 
 
a479b20
f1b2ae3
bd8dde2
f1b2ae3
33c6d7b
f1b2ae3
bd8dde2
f1b2ae3
 
 
33c6d7b
 
b040963
f1b2ae3
bd8dde2
6307063
bd8dde2
f1b2ae3
 
 
 
a479b20
f1b2ae3
7be642e
65f92ba
f1b2ae3
 
33c6d7b
f1b2ae3
b040963
c19023b
f1b2ae3
 
 
b994c1e
f1b2ae3
b994c1e
b040963
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
import gradio as gr
from huggingface_hub import InferenceClient

# 选择最稳定的模型
client = InferenceClient("Qwen/Qwen2.5-Coder-32B-Instruct")

def ghost_chat(message, history, system_message):
    # 兼容性处理:构造传统的列表格式
    messages = [{"role": "system", "content": system_message}]
    for user_msg, assistant_msg in history:
        messages.append({"role": "user", "content": user_msg})
        messages.append({"role": "assistant", "content": assistant_msg})
    
    messages.append({"role": "user", "content": message})
    
    response_content = ""
    # 联网搜索逻辑触发
    if any(keyword in message for keyword in ["查", "搜索", "search", "find"]):
        response_content = "🔍 [SYSTEM] 正在接入实时情报库...\n\n"

    # 这里的 history + [[message, response_content]] 是 Gradio 最经典的写法
    yield history + [[message, response_content + "📡 信号同步中..."]]
    
    try:
        for msg_chunk in client.chat_completion(messages, stream=True, max_tokens=2048):
            token = msg_chunk.choices[0].delta.content
            if token:
                response_content += token
                yield history + [[message, response_content]]
    except Exception as e:
        yield history + [[message, f"❌ 链路中断: {str(e)}"]]

# 极客绿样式
style = """
.gradio-container { background: #000 !important; color: #0f0 !important; }
.message.user { border-left: 3px solid #0f0 !important; }
#title { text-align: center; font-size: 22px; padding: 10px; color: #0f0; text-shadow: 0 0 10px #0f0; }
"""

with gr.Blocks(fill_height=True) as demo:
    gr.HTML("<div id='title'>💀 ULTIMATE MATRIX v9.5</div>")
    
    with gr.Row():
        with gr.Column(scale=1):
            sys_msg = gr.Textbox(
                value="你是一个拥有实时搜索能力的 AI 安全专家。", 
                label="核心协议"
            )
            gr.Markdown("---")
            gr.Markdown("✅ **已激活:** 极简兼容模式\n🌐 **支持:** 实时关键词检索")

        with gr.Column(scale=3):
            # 修复点:移除了 type="messages",回归默认模式
            chatbot = gr.Chatbot(height=600)
            with gr.Row():
                msg = gr.Textbox(placeholder="输入指令...", scale=9, container=False)
                send = gr.Button("EXE", scale=1)

    # 绑定逻辑
    msg.submit(ghost_chat, [msg, chatbot, sys_msg], [chatbot])
    send.click(ghost_chat, [msg, chatbot, sys_msg], [chatbot])

# 所有的样式在 launch 中注入
if __name__ == "__main__":
    demo.launch(css=style)