import os import gradio as gr from huggingface_hub import InferenceClient MODEL_ID = "HauhauCS/GLM-4.7-Flash-Uncensored-HauhauCS-Balanced" def get_token(hf_token: gr.OAuthToken | None) -> str | None: """ Priority: 1) Gradio OAuth token (when user clicks LoginButton) 2) Space secret HF_TOKEN (Settings -> Secrets) """ if hf_token is not None and getattr(hf_token, "token", None): return hf_token.token return os.getenv("HF_TOKEN") def respond( message, history: list[dict[str, str]], system_message, max_tokens, temperature, top_p, hf_token: gr.OAuthToken, ): token = get_token(hf_token) # If the repo is gated/private, you need a token. # If it's public, token can be None and it may still work depending on the endpoint limits. client = InferenceClient(model=MODEL_ID, token=token) messages = [{"role": "system", "content": system_message}] messages.extend(history) messages.append({"role": "user", "content": message}) response = "" # Stream output token-by-token for chunk in client.chat_completion( messages, max_tokens=max_tokens, stream=True, temperature=temperature, top_p=top_p, ): token_text = "" if chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content: token_text = chunk.choices[0].delta.content response += token_text yield response chatbot = gr.ChatInterface( respond, type="messages", additional_inputs=[ gr.Textbox(value="You are a helpful coding assistant.", label="System message"), gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"), gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"), gr.Slider( minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)", ), ], ) with gr.Blocks() as demo: with gr.Sidebar(): gr.Markdown(f"### Model\n`{MODEL_ID}`") gr.LoginButton() gr.Markdown( "If this model requires authentication, click **Login** " "or set a Space secret named `HF_TOKEN`." ) chatbot.render() if __name__ == "__main__": demo.launch()