File size: 2,917 Bytes
c19023b
ddd0536
c19023b
b040963
bd8dde2
 
 
 
 
c19023b
bd8dde2
b040963
bd8dde2
 
 
 
 
 
b040963
bd8dde2
d7872e6
65f92ba
6d3c4ea
 
 
 
bd8dde2
33c6d7b
b040963
7be642e
bd8dde2
 
 
 
 
 
 
b040963
33c6d7b
b040963
bd8dde2
 
b040963
 
33c6d7b
 
b040963
 
 
 
bd8dde2
6307063
bd8dde2
b040963
 
33c6d7b
b040963
 
 
7be642e
65f92ba
b040963
33c6d7b
b040963
 
c19023b
bd8dde2
 
b994c1e
b040963
b994c1e
b040963
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
import gradio as gr
from huggingface_hub import InferenceClient

# 多模型矩阵
MODELS = {
    "Qwen 2.5 Coder (专业审计)": "Qwen/Qwen2.5-Coder-32B-Instruct",
    "Llama 3.1 (逻辑推理)": "meta-llama/Llama-3.1-70B-Instruct",
    "Mistral (快速响应)": "mistralai/Mistral-7B-Instruct-v0.3"
}

def ghost_chat(message, history, system_message, model_name, file_obj):
    repo_id = MODELS.get(model_name, "Qwen/Qwen2.5-Coder-32B-Instruct")
    client = InferenceClient(repo_id)
    
    file_context = ""
    if file_obj is not None:
        try:
            with open(file_obj.name, "r", encoding="utf-8") as f:
                file_context = f"\n[文件内容]:\n{f.read()[:800]}"
        except: pass

    messages = [{"role": "system", "content": f"{system_message}\n{file_context}"}]
    for user_msg, assistant_msg in history:
        messages.append({"role": "user", "content": user_msg})
        messages.append({"role": "assistant", "content": assistant_msg})
    messages.append({"role": "user", "content": message})

    response = ""
    yield history + [[message, "📡 链路建立中..."]]
    
    try:
        for msg_chunk in client.chat_completion(messages, stream=True, max_tokens=2048):
            token = msg_chunk.choices[0].delta.content
            if token:
                response += token
                yield history + [[message, response]]
    except Exception as e:
        yield history + [[message, f"❌ 链路中断: {str(e)}"]]

# 极客样式
style = """
.gradio-container { background: #000 !important; color: #0f0 !important; }
.message.user { border-left: 3px solid #0f0 !important; }
#custom-title { text-align: center; font-size: 24px; color: #0f0; margin-bottom: 20px; }
"""

# --- UI 架构 ---
with gr.Blocks(fill_height=True) as demo:
    # 修复点 1: 移除 textAlign,改用 HTML/Markdown 组合
    gr.HTML("<div id='custom-title'>🌌 INTERSTELLAR CORE v8.5</div>")
    
    with gr.Row():
        with gr.Column(scale=1):
            model_dd = gr.Dropdown(list(MODELS.keys()), value="Qwen 2.5 Coder (专业审计)", label="选择引擎")
            sys_msg = gr.Textbox(value="你是一个顶尖 AI 安全助手。", label="核心协议")
            
            with gr.Accordion("🛠️ 工具箱", open=False):
                file_up = gr.File(label="注入文档")
                scan_btn = gr.Button("执行系统诊断")

        with gr.Column(scale=3):
            chatbot = gr.Chatbot(height=600)
            with gr.Row():
                msg = gr.Textbox(placeholder="输入指令...", scale=9, container=False)
                send = gr.Button("EXE", scale=1)

    msg.submit(ghost_chat, [msg, chatbot, sys_msg, model_dd, file_up], [chatbot])
    send.click(ghost_chat, [msg, chatbot, sys_msg, model_dd, file_up], [chatbot])

# 修复点 2: 所有的 CSS 参数必须在这里!
if __name__ == "__main__":
    demo.launch(css=style)