Spaces:
Sleeping
Sleeping
File size: 1,271 Bytes
88ee4ee | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 | from fastapi import FastAPI
from pydantic import BaseModel
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
app = FastAPI()
MODEL_NAME = "mjpsm/progress-generation-model"
device = "cuda" if torch.cuda.is_available() else "cpu"
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME).to(device)
tokenizer.pad_token = tokenizer.eos_token
class Request(BaseModel):
text: str
def generate_response(user_input):
prompt = f"""<|system|>
You describe what progress was achieved in one sentence.
<|user|>
{user_input}
<|assistant|>
"""
inputs = tokenizer(prompt, return_tensors="pt").to(device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=50,
temperature=0.6,
top_p=0.9,
repetition_penalty=1.2,
pad_token_id=tokenizer.eos_token_id
)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
return decoded.split("<|assistant|>")[-1].strip()
@app.get("/")
def root():
return {"message": "Progress Model API running"}
@app.post("/predict")
def predict(req: Request):
result = generate_response(req.text)
return {"output": result} |