LH-Tech-AI commited on
Commit
53f92c7
·
verified ·
1 Parent(s): cbf2398

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +53 -0
app.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, Request
2
+ from fastapi.responses import StreamingResponse, FileResponse
3
+ from fastapi.middleware.cors import CORSMiddleware
4
+ from fastapi.staticfiles import StaticFiles
5
+ import onnxruntime as ort
6
+ import numpy as np
7
+ import tiktoken
8
+ import json
9
+ import os
10
+
11
+ app = FastAPI()
12
+
13
+ # CORS erlauben
14
+ app.add_middleware(CORSMiddleware, allow_origins=["*"], allow_methods=["*"], allow_headers=["*"])
15
+
16
+ # Modell & Tokenizer laden
17
+ tokenizer = tiktoken.get_encoding("gpt2")
18
+ MODEL_PATH = "SmaLLMPro_350M_int8.onnx"
19
+ # Nutzt optimierte CPU-Einstellungen
20
+ session = ort.InferenceSession(MODEL_PATH, providers=['CPUExecutionProvider'])
21
+
22
+ def top_k_sample(logits, k=50, temp=0.7):
23
+ logits = logits / temp
24
+ # Numpy ist VIEL schneller als JS-Schleifen
25
+ top_k_indices = np.argpartition(logits, -k)[-k:]
26
+ top_k_logits = logits[top_k_indices]
27
+ exp_logits = np.exp(top_k_logits - np.max(top_k_logits))
28
+ probs = exp_logits / np.sum(exp_logits)
29
+ return int(np.random.choice(top_k_indices, p=probs))
30
+
31
+ @app.post("/chat")
32
+ async def chat(request: Request):
33
+ data = await request.json()
34
+ prompt = f"Instruction:\n{data['prompt']}\n\nResponse:\n"
35
+ tokens = tokenizer.encode(prompt)
36
+
37
+ async def generate():
38
+ nonlocal tokens
39
+ for _ in range(data.get('maxLen', 100)):
40
+ ctx = tokens[-1024:]
41
+ padded = np.zeros((1, 1024), dtype=np.int64)
42
+ padded[0, -len(ctx):] = ctx
43
+
44
+ outputs = session.run(None, {'input': padded})
45
+ logits = outputs[0][0, -1, :50304]
46
+
47
+ next_token = top_k_sample(logits, k=data.get('topK', 25), temp=data.get('temp', 0.5))
48
+ if next_token == 50256: break
49
+
50
+ tokens.append(next_token)
51
+ yield f"data: {json.dumps({'token': tokenizer.decode([next_token])})}\n\n"
52
+
53
+ return StreamingResponse(generate(), media_type="text/event-stream")