focus_Guard_test / src /components /FocusPageLocal.jsx
k22056537
feat: UI nav, onboarding, L2CS weights path + torch.load; trim dev files
37a8ba6
import React, { useState, useEffect, useRef } from 'react';
import CalibrationOverlay from './CalibrationOverlay';
const FLOW_STEPS = {
intro: 'intro',
permission: 'permission',
ready: 'ready'
};
const FOCUS_STATES = {
pending: 'pending',
focused: 'focused',
notFocused: 'not-focused'
};
function HelloIcon() {
return (
<svg width="96" height="96" viewBox="0 0 96 96" aria-hidden="true">
<circle cx="48" cy="48" r="40" fill="#007BFF" />
<path d="M30 38c0-4 2.7-7 6-7s6 3 6 7" fill="none" stroke="#fff" strokeWidth="6" strokeLinecap="round" />
<path d="M54 38c0-4 2.7-7 6-7s6 3 6 7" fill="none" stroke="#fff" strokeWidth="6" strokeLinecap="round" />
<path d="M30 52c3 11 10 17 18 17s15-6 18-17" fill="none" stroke="#fff" strokeWidth="6" strokeLinecap="round" />
</svg>
);
}
function CameraIcon() {
return (
<svg width="110" height="110" viewBox="0 0 110 110" aria-hidden="true">
<rect x="30" y="36" width="50" height="34" rx="5" fill="none" stroke="#007BFF" strokeWidth="6" />
<path d="M24 72h62c0 9-7 16-16 16H40c-9 0-16-7-16-16Z" fill="none" stroke="#007BFF" strokeWidth="6" />
<path d="M55 28v8" stroke="#007BFF" strokeWidth="6" strokeLinecap="round" />
<circle cx="55" cy="36" r="14" fill="none" stroke="#007BFF" strokeWidth="6" />
<circle cx="55" cy="36" r="4" fill="#007BFF" />
<path d="M46 83h18" stroke="#007BFF" strokeWidth="6" strokeLinecap="round" />
</svg>
);
}
const MODEL_ORDER = ['hybrid', 'xgboost', 'mlp', 'geometric'];
const MODEL_INFO = {
hybrid: {
label: 'Hybrid',
tagline: 'Best overall — combines ML with geometric scoring',
how: 'Fuses XGBoost predictions (30%) with geometric face/eye scores (70%). A logistic regression meta-classifier combines both signals for the final decision.',
accuracy: '84.3%',
f1: '0.864',
auc: '0.880',
threshold: '0.46',
evaluation: 'Leave-One-Person-Out (9 participants, 144K frames)',
features: '10 features: head deviation, face score, eye scores (EAR), gaze offset, pitch, horizontal gaze, PERCLOS',
strengths: 'Most robust across different people. Geometric scoring generalises well; ML catches subtle patterns.',
badge: 'Recommended',
},
xgboost: {
label: 'XGBoost',
tagline: 'Highest raw accuracy — gradient-boosted decision trees',
how: 'Ensemble of 600 decision trees (max depth 8). Each tree learns to correct errors from previous trees. Outputs probability of focused state.',
accuracy: '84.3%',
f1: '0.859',
auc: '0.880',
threshold: '0.38',
evaluation: 'Leave-One-Person-Out (9 participants, 144K frames)',
features: '10 features: head deviation, face score, eye scores (EAR), gaze offset, pitch, horizontal gaze, PERCLOS',
strengths: 'Strong pattern recognition. Handles non-linear feature interactions. 95.9% accuracy on random split (but LOPO is the fairer test).',
badge: null,
},
mlp: {
label: 'MLP',
tagline: 'Lightweight neural network — fast and efficient',
how: 'Two-layer neural network (64→32 neurons). Takes 10 face features, applies learned weights, outputs focused/unfocused probability via softmax.',
accuracy: '82.7%',
f1: '0.858',
auc: '0.862',
threshold: '0.23',
evaluation: 'Leave-One-Person-Out (9 participants, 144K frames)',
features: '10 features: head deviation, face score, eye scores (EAR), gaze offset, pitch, horizontal gaze, PERCLOS',
strengths: 'Fastest inference. Smallest model size. Good baseline. 92.9% accuracy on random split.',
badge: null,
},
geometric: {
label: 'Geometric',
tagline: 'Baseline only — hardcoded thresholds, no learning',
how: 'Uses fixed thresholds on head orientation (70%) and eye openness (30%). No training — just hand-tuned rules on 478 face landmarks. Cannot adapt to new faces or environments.',
accuracy: '~77%',
f1: '0.772',
auc: 'N/A',
threshold: '0.55',
evaluation: 'Leave-One-Person-Out geometric sweep',
features: 'Head yaw/pitch/roll angles, eye aspect ratio (EAR), iris gaze offset, mouth aspect ratio (MAR)',
strengths: 'No model files needed. Useful as a fallback. This is the baseline that motivated building the ML models — its fixed thresholds struggle with different face shapes, lighting, and camera angles.',
badge: 'Baseline',
},
};
function FocusPageLocal({ videoManager, sessionResult, setSessionResult, isActive }) {
const [currentFrame, setCurrentFrame] = useState(15);
const [timelineEvents, setTimelineEvents] = useState([]);
const [stats, setStats] = useState(null);
const [systemStats, setSystemStats] = useState(null);
const [availableModels, setAvailableModels] = useState([]);
const [currentModel, setCurrentModel] = useState('mlp');
const [flowStep, setFlowStep] = useState(FLOW_STEPS.intro);
const [cameraReady, setCameraReady] = useState(false);
const [isStarting, setIsStarting] = useState(false);
const [focusState, setFocusState] = useState(FOCUS_STATES.pending);
const [cameraError, setCameraError] = useState('');
const [calibration, setCalibration] = useState(null);
const [l2csBoost, setL2csBoost] = useState(false);
const [l2csBoostAvailable, setL2csBoostAvailable] = useState(false);
const localVideoRef = useRef(null);
const displayCanvasRef = useRef(null);
const pipVideoRef = useRef(null);
const pipStreamRef = useRef(null);
const previewFrameRef = useRef(null);
const formatDuration = (seconds) => {
if (seconds === 0) return '0s';
const mins = Math.floor(seconds / 60);
const secs = Math.floor(seconds % 60);
return `${mins}m ${secs}s`;
};
const stopPreviewLoop = () => {
if (previewFrameRef.current) {
cancelAnimationFrame(previewFrameRef.current);
previewFrameRef.current = null;
}
};
const startPreviewLoop = () => {
stopPreviewLoop();
const renderPreview = () => {
const canvas = displayCanvasRef.current;
const video = localVideoRef.current;
if (!canvas || !video || !cameraReady || videoManager?.isStreaming) {
previewFrameRef.current = null;
return;
}
if (video.readyState >= 2) {
const ctx = canvas.getContext('2d');
ctx.drawImage(video, 0, 0, canvas.width, canvas.height);
}
previewFrameRef.current = requestAnimationFrame(renderPreview);
};
previewFrameRef.current = requestAnimationFrame(renderPreview);
};
const getErrorMessage = (err) => {
if (err?.name === 'NotAllowedError') {
return 'Camera permission denied. Please allow camera access.';
}
if (err?.name === 'NotFoundError') {
return 'No camera found. Please connect a camera.';
}
if (err?.name === 'NotReadableError') {
return 'Camera is already in use by another application.';
}
if (err?.target?.url) {
return `WebSocket connection failed: ${err.target.url}. Check that the backend server is running.`;
}
return err?.message || 'Failed to start focus session.';
};
useEffect(() => {
if (!videoManager) return;
const originalOnStatusUpdate = videoManager.callbacks.onStatusUpdate;
const originalOnSessionEnd = videoManager.callbacks.onSessionEnd;
videoManager.callbacks.onStatusUpdate = (isFocused) => {
setTimelineEvents((prev) => {
const newEvents = [...prev, { isFocused, timestamp: Date.now() }];
if (newEvents.length > 60) newEvents.shift();
return newEvents;
});
setFocusState(isFocused ? FOCUS_STATES.focused : FOCUS_STATES.notFocused);
if (originalOnStatusUpdate) originalOnStatusUpdate(isFocused);
};
videoManager.callbacks.onSessionEnd = (summary) => {
setFocusState(FOCUS_STATES.pending);
setCameraReady(false);
if (originalOnSessionEnd) originalOnSessionEnd(summary);
};
videoManager.callbacks.onCalibrationUpdate = (cal) => {
setCalibration(cal && cal.active ? { ...cal } : null);
};
const statsInterval = setInterval(() => {
if (videoManager && videoManager.getStats) {
setStats(videoManager.getStats());
}
}, 1000);
return () => {
if (videoManager) {
videoManager.callbacks.onStatusUpdate = originalOnStatusUpdate;
videoManager.callbacks.onSessionEnd = originalOnSessionEnd;
videoManager.callbacks.onCalibrationUpdate = null;
}
clearInterval(statsInterval);
};
}, [videoManager]);
// Fetch available models on mount
useEffect(() => {
fetch('/api/models')
.then((res) => res.json())
.then((data) => {
if (data.available) setAvailableModels(data.available);
if (data.current) {
// If L2CS was the active model, switch to a base model + enable boost
if (data.current === 'l2cs') {
const fallback = data.available.find((m) => m !== 'l2cs') || 'mlp';
setCurrentModel(fallback);
handleModelChange(fallback);
} else {
setCurrentModel(data.current);
}
}
if (data.l2cs_boost !== undefined) setL2csBoost(data.l2cs_boost);
if (data.l2cs_boost_available !== undefined) setL2csBoostAvailable(data.l2cs_boost_available);
})
.catch((err) => console.error('Failed to fetch models:', err));
}, []);
useEffect(() => {
if (flowStep === FLOW_STEPS.ready && cameraReady && !videoManager?.isStreaming) {
startPreviewLoop();
return;
}
stopPreviewLoop();
}, [cameraReady, flowStep, videoManager?.isStreaming]);
useEffect(() => {
if (!isActive) {
stopPreviewLoop();
}
}, [isActive]);
useEffect(() => {
return () => {
stopPreviewLoop();
if (pipVideoRef.current) {
pipVideoRef.current.pause();
pipVideoRef.current.srcObject = null;
}
if (pipStreamRef.current) {
pipStreamRef.current.getTracks().forEach((t) => t.stop());
pipStreamRef.current = null;
}
};
}, []);
// Poll server CPU/memory for UI
useEffect(() => {
const fetchSystem = () => {
fetch('/api/stats/system')
.then(res => res.json())
.then(data => setSystemStats(data))
.catch(() => setSystemStats(null));
};
fetchSystem();
const interval = setInterval(fetchSystem, 3000);
return () => clearInterval(interval);
}, []);
const handleModelChange = async (modelName) => {
try {
const res = await fetch('/api/settings', {
method: 'PUT',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ model_name: modelName })
});
const result = await res.json();
if (result.updated) {
setCurrentModel(modelName);
}
} catch (err) {
console.error('Failed to switch model:', err);
}
};
const handleEnableCamera = async () => {
if (!videoManager) return;
try {
setCameraError('');
await videoManager.initCamera(localVideoRef.current, displayCanvasRef.current);
setCameraReady(true);
setFlowStep(FLOW_STEPS.ready);
setFocusState(FOCUS_STATES.pending);
} catch (err) {
const errorMessage = getErrorMessage(err);
setCameraError(errorMessage);
console.error('Camera init error:', err);
}
};
const handleEyeGazeToggle = async () => {
const next = !l2csBoost;
try {
const res = await fetch('/api/settings', {
method: 'PUT',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ l2cs_boost: next })
});
if (!res.ok) return;
setL2csBoost(next);
if (next && videoManager && videoManager.isStreaming) {
// Turning ON → auto-start calibration
videoManager.startCalibration();
} else if (!next && videoManager) {
// Turning OFF → cancel any active calibration
videoManager.cancelCalibration();
}
} catch (err) {
console.error('Failed to toggle eye gaze:', err);
}
};
const handleStart = async () => {
try {
setIsStarting(true);
setSessionResult(null);
setTimelineEvents([]);
setFocusState(FOCUS_STATES.pending);
setCameraError('');
if (!cameraReady) {
await videoManager.initCamera(localVideoRef.current, displayCanvasRef.current);
setCameraReady(true);
setFlowStep(FLOW_STEPS.ready);
}
await videoManager.startStreaming();
} catch (err) {
const errorMessage = getErrorMessage(err);
setCameraError(errorMessage);
setFocusState(FOCUS_STATES.pending);
console.error('Start error:', err);
alert(`Failed to start: ${errorMessage}\n\nCheck browser console for details.`);
} finally {
setIsStarting(false);
}
};
const handleStop = async () => {
if (videoManager) {
await videoManager.stopStreaming();
}
try {
if (document.pictureInPictureElement === pipVideoRef.current) {
await document.exitPictureInPicture();
}
} catch (_) {}
if (pipVideoRef.current) {
pipVideoRef.current.pause();
pipVideoRef.current.srcObject = null;
}
if (pipStreamRef.current) {
pipStreamRef.current.getTracks().forEach((t) => t.stop());
pipStreamRef.current = null;
}
stopPreviewLoop();
setFocusState(FOCUS_STATES.pending);
setCameraReady(false);
};
const handlePiP = async () => {
try {
//
if (!videoManager || !videoManager.isStreaming) {
alert('Please start the video first.');
return;
}
if (!displayCanvasRef.current) {
alert('Video not ready.');
return;
}
//
if (document.pictureInPictureElement === pipVideoRef.current) {
await document.exitPictureInPicture();
console.log('PiP exited');
return;
}
//
if (!document.pictureInPictureEnabled) {
alert('Picture-in-Picture is not supported in this browser.');
return;
}
//
const pipVideo = pipVideoRef.current;
if (!pipVideo) {
alert('PiP video element not ready.');
return;
}
const isSafariPiP = typeof pipVideo.webkitSetPresentationMode === 'function';
//
let stream = pipStreamRef.current;
if (!stream) {
const capture = displayCanvasRef.current.captureStream;
if (typeof capture === 'function') {
stream = capture.call(displayCanvasRef.current, 30);
}
if (!stream || stream.getTracks().length === 0) {
const cameraStream = localVideoRef.current?.srcObject;
if (!cameraStream) {
alert('Camera stream not ready.');
return;
}
stream = cameraStream;
}
pipStreamRef.current = stream;
}
//
if (!stream || stream.getTracks().length === 0) {
alert('Failed to capture video stream from canvas.');
return;
}
pipVideo.srcObject = stream;
//
if (pipVideo.readyState < 2) {
await new Promise((resolve) => {
const onReady = () => {
pipVideo.removeEventListener('loadeddata', onReady);
pipVideo.removeEventListener('canplay', onReady);
resolve();
};
pipVideo.addEventListener('loadeddata', onReady);
pipVideo.addEventListener('canplay', onReady);
//
setTimeout(resolve, 600);
});
}
try {
await pipVideo.play();
} catch (_) {}
//
if (isSafariPiP) {
try {
pipVideo.webkitSetPresentationMode('picture-in-picture');
console.log('PiP activated (Safari)');
return;
} catch (e) {
//
const cameraStream = localVideoRef.current?.srcObject;
if (cameraStream && cameraStream !== pipVideo.srcObject) {
pipVideo.srcObject = cameraStream;
try {
await pipVideo.play();
} catch (_) {}
pipVideo.webkitSetPresentationMode('picture-in-picture');
console.log('PiP activated (Safari fallback)');
return;
}
throw e;
}
}
//
if (typeof pipVideo.requestPictureInPicture === 'function') {
await pipVideo.requestPictureInPicture();
console.log('PiP activated');
} else {
alert('Picture-in-Picture is not supported in this browser.');
}
} catch (err) {
console.error('PiP error:', err);
alert(`Failed to enter Picture-in-Picture: ${err.message}`);
}
};
const handleFloatingWindow = () => {
handlePiP();
};
const handleFrameChange = (val) => {
const rate = parseInt(val, 10);
setCurrentFrame(rate);
if (videoManager) {
videoManager.setFrameRate(rate);
}
};
const handlePreview = () => {
if (!videoManager || !videoManager.isStreaming) {
alert('Please start a session first.');
return;
}
//
const currentStats = videoManager.getStats();
if (!currentStats.sessionId) {
alert('No active session.');
return;
}
//
const sessionDuration = Math.floor((Date.now() - (videoManager.sessionStartTime || Date.now())) / 1000);
//
const focusScore = currentStats.framesProcessed > 0
? (currentStats.framesProcessed * (currentStats.currentStatus ? 1 : 0)) / currentStats.framesProcessed
: 0;
//
setSessionResult({
duration_seconds: sessionDuration,
focus_score: focusScore,
total_frames: currentStats.framesProcessed,
focused_frames: Math.floor(currentStats.framesProcessed * focusScore)
});
};
const handleCloseOverlay = () => {
setSessionResult(null);
};
const pageStyle = isActive
? undefined
: {
position: 'absolute',
width: '1px',
height: '1px',
overflow: 'hidden',
opacity: 0,
pointerEvents: 'none'
};
const focusStateLabel = {
[FOCUS_STATES.pending]: 'Pending',
[FOCUS_STATES.focused]: 'Focused',
[FOCUS_STATES.notFocused]: 'Not Focused'
}[focusState];
const introHighlights = [
{
title: 'Live focus tracking',
text: 'Head pose, gaze, and eye openness are read continuously during the session.'
},
{
title: 'Quick setup',
text: 'Front-facing light and a stable camera angle give the cleanest preview.'
},
{
title: 'Private by default',
text: 'Only session metadata is stored, not the raw camera footage.'
}
];
const permissionSteps = [
{
title: 'Allow browser access',
text: 'Approve the camera prompt so the preview can appear immediately.'
},
{
title: 'Check your framing',
text: 'Keep your face visible and centered for more stable landmark detection.'
},
{
title: 'Start when ready',
text: 'After the preview appears, use the page controls to begin or stop.'
}
];
const renderIntroCard = () => {
if (flowStep === FLOW_STEPS.intro) {
return (
<div className="focus-flow-overlay">
<div className="focus-flow-card">
<div className="focus-flow-header">
<div>
<div className="focus-flow-eyebrow">Focus Session</div>
<h2>Before you begin</h2>
</div>
<div className="focus-flow-icon">
<HelloIcon />
</div>
</div>
<p className="focus-flow-lead">
The focus page uses your live camera preview to estimate attention in real time.
Review the setup notes below, then continue to camera access.
</p>
<div className="focus-flow-grid">
{introHighlights.map((item) => (
<article key={item.title} className="focus-flow-panel">
<h3>{item.title}</h3>
<p>{item.text}</p>
</article>
))}
</div>
<div className="focus-flow-footer">
<div className="focus-flow-note">
You can still change frame rate and available model options after the preview loads.
</div>
<button className="focus-flow-button" onClick={() => setFlowStep(FLOW_STEPS.permission)}>
Continue
</button>
</div>
</div>
</div>
);
}
if (flowStep === FLOW_STEPS.permission && !cameraReady) {
return (
<div className="focus-flow-overlay">
<div className="focus-flow-card">
<div className="focus-flow-header">
<div>
<div className="focus-flow-eyebrow">Camera Setup</div>
<h2>Enable camera access</h2>
</div>
<div className="focus-flow-icon">
<CameraIcon />
</div>
</div>
<p className="focus-flow-lead">
Once access is granted, your preview appears here and the rest of the Focus page
behaves like the other dashboard screens.
</p>
<div className="focus-flow-steps">
{permissionSteps.map((item, index) => (
<div key={item.title} className="focus-flow-step">
<div className="focus-flow-step-number">{index + 1}</div>
<div className="focus-flow-step-copy">
<h3>{item.title}</h3>
<p>{item.text}</p>
</div>
</div>
))}
</div>
{cameraError ? <div className="focus-inline-error">{cameraError}</div> : null}
<div className="focus-flow-footer">
<button
type="button"
className="focus-flow-secondary"
onClick={() => setFlowStep(FLOW_STEPS.intro)}
>
Back
</button>
<button className="focus-flow-button" onClick={handleEnableCamera}>
Enable Camera
</button>
</div>
</div>
</div>
);
}
return null;
};
return (
<main id="page-b" className="page" style={pageStyle}>
{renderIntroCard()}
<section id="display-area" className="focus-display-shell">
<video
ref={pipVideoRef}
muted
playsInline
autoPlay
style={{
position: 'absolute',
width: '1px',
height: '1px',
opacity: 0,
pointerEvents: 'none'
}}
/>
{/* local video (hidden, for capture) */}
<video
ref={localVideoRef}
muted
playsInline
autoPlay
style={{ display: 'none' }}
/>
{/* processed video (canvas) */}
<canvas
ref={displayCanvasRef}
width={640}
height={480}
style={{
width: '100%',
height: '100%',
objectFit: 'contain',
backgroundColor: '#101010'
}}
/>
{flowStep === FLOW_STEPS.ready ? (
<>
<div className={`focus-state-pill ${focusState}`}>
<span className="focus-state-dot" />
{focusStateLabel}
</div>
{!cameraReady && !videoManager?.isStreaming ? (
<div className="focus-idle-overlay">
<p>Camera is paused.</p>
<span>Use Start to enable the camera and begin detection.</span>
</div>
) : null}
</>
) : null}
{sessionResult && (
<div className="session-result-overlay">
<h3>Session Complete!</h3>
<div className="result-item">
<span className="label">Duration:</span>
<span className="value">{formatDuration(sessionResult.duration_seconds)}</span>
</div>
<div className="result-item">
<span className="label">Focus Score:</span>
<span className="value">{(sessionResult.focus_score * 100).toFixed(1)}%</span>
</div>
<button
onClick={handleCloseOverlay}
style={{
marginTop: '20px',
padding: '8px 20px',
background: 'transparent',
border: '1px solid white',
color: 'white',
borderRadius: '20px',
cursor: 'pointer'
}}
>
Close
</button>
</div>
)}
</section>
{flowStep === FLOW_STEPS.ready ? (
<>
{/* Model selector */}
{availableModels.length > 0 ? (
<section className="focus-model-strip">
<span className="focus-model-label">Model:</span>
{MODEL_ORDER.filter((n) => availableModels.includes(n)).map((name) => (
<button
key={name}
onClick={() => handleModelChange(name)}
className={`focus-model-button ${currentModel === name ? 'active' : ''}`}
>
{MODEL_INFO[name]?.label || name}
</button>
))}
{l2csBoostAvailable && (
<>
<span className="focus-model-sep" />
<button
onClick={handleEyeGazeToggle}
className={`eye-gaze-toggle ${l2csBoost ? 'on' : 'off'}`}
title={l2csBoost ? 'Eye gaze tracking activeclick to disable' : 'Enable eye gaze tracking (requires calibration)'}
>
<svg width="16" height="16" viewBox="0 0 16 16" className="eye-gaze-icon" aria-hidden="true">
<ellipse cx="8" cy="8" rx="7" ry="4.5" fill="none" stroke="currentColor" strokeWidth="1.4" />
<circle cx="8" cy="8" r="2.2" fill="currentColor" />
</svg>
{l2csBoost ? 'Eye Gaze On' : 'Eye Gaze'}
</button>
{l2csBoost && stats && stats.isStreaming && (
<button
onClick={() => videoManager && videoManager.startCalibration()}
className="focus-model-button recalibrate"
title="Re-run gaze calibration"
>
Recalibrate
</button>
)}
</>
)}
</section>
) : null}
{/* Server stats */}
{systemStats && systemStats.cpu_percent != null && (
<section className="focus-system-stats">
<span>CPU: <strong>{systemStats.cpu_percent}%</strong></span>
<span className="focus-system-stats-sep" />
<span>RAM: <strong>{systemStats.memory_percent}%</strong> ({systemStats.memory_used_mb}/{systemStats.memory_total_mb} MB)</span>
</section>
)}
<section id="timeline-area">
<div className="timeline-label">Timeline</div>
<div id="timeline-visuals">
{timelineEvents.map((event, index) => (
<div
key={index}
className="timeline-block"
style={{
backgroundColor: event.isFocused ? '#28a745' : '#dc3545',
width: '10px',
height: '20px',
display: 'inline-block',
marginRight: '2px',
borderRadius: '2px'
}}
title={event.isFocused ? 'Focused' : 'Distracted'}
/>
))}
</div>
<div id="timeline-line" />
</section>
<section id="control-panel">
<button id="btn-cam-start" className="action-btn green" onClick={handleStart} disabled={isStarting}>
{isStarting ? 'Starting...' : 'Start'}
</button>
<button id="btn-floating" className="action-btn blue" onClick={handlePiP}>
Floating Window
</button>
<button id="btn-preview" className="action-btn orange" onClick={handlePreview}>
Preview Result
</button>
<button id="btn-cam-stop" className="action-btn red" onClick={handleStop}>
Stop
</button>
</section>
{cameraError ? (
<div className="focus-inline-error focus-inline-error-standalone">{cameraError}</div>
) : null}
{/* Model info card — below action buttons */}
{MODEL_INFO[currentModel] && (
<section className="model-card">
<div className="model-card-header">
<h3 className="model-card-title">{MODEL_INFO[currentModel].label}</h3>
{MODEL_INFO[currentModel].badge && (
<span className={MODEL_INFO[currentModel].badge === 'Baseline' ? 'model-card-badge-baseline' : 'model-card-badge'}>
{MODEL_INFO[currentModel].badge}
</span>
)}
</div>
<p className="model-card-tagline">{MODEL_INFO[currentModel].tagline}</p>
<div className="model-card-metrics">
<div className="model-card-metric">
<span className="model-card-metric-value">{MODEL_INFO[currentModel].accuracy}</span>
<span className="model-card-metric-label">Accuracy</span>
</div>
<div className="model-card-metric">
<span className="model-card-metric-value">{MODEL_INFO[currentModel].f1}</span>
<span className="model-card-metric-label">F1 Score</span>
</div>
<div className="model-card-metric">
<span className="model-card-metric-value">{MODEL_INFO[currentModel].auc}</span>
<span className="model-card-metric-label">ROC-AUC</span>
</div>
<div className="model-card-metric">
<span className="model-card-metric-value">{MODEL_INFO[currentModel].threshold}</span>
<span className="model-card-metric-label">Threshold</span>
</div>
</div>
<div className="model-card-details">
<div className="model-card-section">
<h4>How it works</h4>
<p>{MODEL_INFO[currentModel].how}</p>
</div>
<div className="model-card-section">
<h4>Features used</h4>
<p>{MODEL_INFO[currentModel].features}</p>
</div>
<div className="model-card-section">
<h4>Strengths</h4>
<p>{MODEL_INFO[currentModel].strengths}</p>
</div>
</div>
<div className="model-card-eval">
Evaluated with {MODEL_INFO[currentModel].evaluation}
</div>
</section>
)}
<section id="frame-control">
<label htmlFor="frame-slider">Frame Rate (FPS)</label>
<input
type="range"
id="frame-slider"
min="10"
max="30"
value={currentFrame}
onChange={(e) => handleFrameChange(e.target.value)}
/>
<input
type="number"
id="frame-input"
min="10"
max="30"
value={currentFrame}
onChange={(e) => handleFrameChange(e.target.value)}
/>
</section>
</>
) : null}
{/* Calibration overlay (fixed fullscreen, must be outside overflow:hidden containers) */}
<CalibrationOverlay calibration={calibration} videoManager={videoManager} />
</main>
);
}
export default FocusPageLocal;