import gradio as gr from transformers import AutoTokenizer, AutoModelForCausalLM model_name = "DeepHat/DeepHat-V1-7B" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained( model_name, device_map="auto", low_cpu_mem_usage=True ) def chat(input_text): messages = [ {"role": "system", "content": "You are a cybersecurity expert."}, {"role": "user", "content": input_text} ] text = tokenizer.apply_chat_template( messages, tokenize=False, add_generation_prompt=True ) inputs = tokenizer(text, return_tensors="pt") outputs = model.generate(**inputs, max_new_tokens=120) return tokenizer.decode(outputs[0], skip_special_tokens=True) gr.Interface(fn=chat, inputs="text", outputs="text").launch()