File size: 3,226 Bytes
8705a00
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from typing import Optional
import random
import time

app = FastAPI(
    title="ARF Sandbox API",
    description="Mock endpoint – does NOT use the real Bayesian engine. Simulated responses only.",
    version="1.0.0",
    docs_url="/docs",
    redoc_url="/redoc",
)

# ---------- Request/Response Models ----------
class Metrics(BaseModel):
    latency_ms: Optional[float] = None
    error_rate: Optional[float] = None
    throughput: Optional[float] = None
    cpu_usage: Optional[float] = None

class EvaluateRequest(BaseModel):
    service_name: str
    event_type: str          # e.g., "latency", "error_rate", "cpu_spike"
    severity: str            # "low", "medium", "high", "critical"
    metrics: Optional[Metrics] = None
    timestamp: Optional[float] = None

class EvaluateResponse(BaseModel):
    status: str
    recommendation: str      # "APPROVE", "DENY", "ESCALATE"
    risk_score: float
    confidence: float
    justification: str
    policy_violations: list

# ---------- Mock Logic ----------
def generate_mock_response(request: EvaluateRequest) -> EvaluateResponse:
    # Deterministic randomness based on service name and event type
    seed = hash((request.service_name, request.event_type, request.severity)) % 1000
    random.seed(seed)
    
    # Simulate risk score based on severity
    severity_map = {"low": 0.2, "medium": 0.4, "high": 0.7, "critical": 0.9}
    base_risk = severity_map.get(request.severity, 0.5)
    # Add small random noise
    risk = min(0.99, max(0.01, base_risk + random.uniform(-0.1, 0.1)))
    
    # Decision logic (mock)
    if risk < 0.3:
        rec = "APPROVE"
    elif risk > 0.8:
        rec = "DENY"
    else:
        rec = "ESCALATE"
    
    # Confidence inversely related to uncertainty (simulated)
    confidence = 1.0 - (risk * 0.3) + random.uniform(-0.05, 0.05)
    confidence = min(0.99, max(0.5, confidence))
    
    # Justification template
    justification = (
        f"Simulated evaluation for {request.service_name}: {request.event_type} severity={request.severity}. "
        f"Risk score {risk:.2f}{rec}. (Mock response, not real inference.)"
    )
    
    return EvaluateResponse(
        status="success",
        recommendation=rec,
        risk_score=round(risk, 4),
        confidence=round(confidence, 4),
        justification=justification,
        policy_violations=[]
    )

# ---------- Endpoints ----------
@app.get("/health", tags=["health"])
async def health():
    return {"status": "ok", "timestamp": time.time()}

@app.post("/v1/evaluate", response_model=EvaluateResponse, tags=["evaluation"])
async def evaluate(request: EvaluateRequest):
    # Simple input validation
    if not request.service_name or not request.event_type:
        raise HTTPException(status_code=400, detail="Missing service_name or event_type")
    return generate_mock_response(request)

# ---------- Root redirect to docs ----------
@app.get("/", include_in_schema=False)
async def root():
    return {"message": "ARF Sandbox API. See /docs for interactive documentation."}

if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=7860)