Spaces:
Running
Running
File size: 4,837 Bytes
e72f783 fadccb6 e72f783 fadccb6 e72f783 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 | # api/schemas.py
# Pydantic request and response models for all 7 endpoints
# Validation happens here β model is never called on invalid input
from pydantic import BaseModel, Field, validator
from typing import Optional, List, Any
from enum import Enum
VALID_CATEGORIES = [
'bottle', 'cable', 'capsule', 'carpet', 'grid', 'hazelnut',
'leather', 'metal_nut', 'pill', 'screw', 'tile', 'toothbrush',
'transistor', 'wood', 'zipper'
]
# ββ /inspect βββββββββββββββββββββββββββββββββββββββββββββββββ
class InspectResponse(BaseModel):
# Core result
is_anomalous: bool
anomaly_score: float = Field(..., ge=0.0)
calibrated_score: float = Field(..., ge=0.0, le=1.0)
score_std: float
category: str
version: str
# Visuals (base64 PNG strings)
heatmap_b64: Optional[str] = None
defect_crop_b64: Optional[str] = None
depth_map_b64: Optional[str] = None
# Retrieval
similar_cases: List[dict] = []
# Graph context
graph_context: dict = {}
# XAI
shap_features: dict = {}
# LLM report (polled separately)
report_id: Optional[str] = None
# Meta
latency_ms: float
image_hash: str
low_confidence: bool = False # calibrated_score < 0.3
# ββ /report/{report_id} ββββββββββββββββββββββββββββββββββββββ
class ReportResponse(BaseModel):
status: str # "pending" | "ready" | "not_found"
report: Optional[str] = None
# ββ /forensics/{case_id} βββββββββββββββββββββββββββββββββββββ
class ForensicsResponse(BaseModel):
case_id: str
category: str
anomaly_score: float
calibrated_score: float
patch_scores_grid: List[List[float]] # [28][28]
gradcampp_b64: Optional[str] = None
shap_features: dict = {}
similar_cases: List[dict] = []
graph_context: dict = {}
retrieval_trace: List[dict] = []
# ββ /knowledge/search ββββββββββββββββββββββββββββββββββββββββ
class KnowledgeSearchResponse(BaseModel):
results: List[dict]
total_found: int
query: str
# ββ /arena/next_case βββββββββββββββββββββββββββββββββββββββββ
class ArenaCase(BaseModel):
case_id: str
image_b64: str
expert_mode: bool = False # True if score in [0.45, 0.55]
# ββ /arena/submit/{case_id} ββββββββββββββββββββββββββββββββββ
class ArenaSubmitRequest(BaseModel):
user_rating: int = Field(..., ge=0, le=1)
user_severity: int = Field(..., ge=1, le=5)
session_id: Optional[str] = None
class ArenaSubmitResponse(BaseModel):
correct_label: int
ai_score: float
calibrated_score: float
user_score: float
streak: int
top_shap_features: List[dict] # top 2 features for post-submission
heatmap_b64: Optional[str] = None
is_expert_case: bool = False
# ββ /correct/{case_id} βββββββββββββββββββββββββββββββββββββββ
class CorrectionType(str, Enum):
false_positive = "false_positive"
false_negative = "false_negative"
wrong_category = "wrong_category"
class CorrectionRequest(BaseModel):
correction_type: CorrectionType
note: Optional[str] = Field(None, max_length=500)
class CorrectionResponse(BaseModel):
status: str = "correction_logged"
case_id: str
# ββ /health ββββββββββββββββββββββββββββββββββββββββββββββββββ
class HealthResponse(BaseModel):
status: str
version: str
uptime_seconds: float
index_sizes: dict
coreset_size: int
threshold_config_version: str
cache_stats: dict
# ββ /metrics βββββββββββββββββββββββββββββββββββββββββββββββββ
class MetricsResponse(BaseModel):
request_count: int
latency_p50_ms: float
latency_p95_ms: float
cache_hit_rate: float
hf_push_failure_count: int
memory_usage_mb: float |