import React, { useState, useEffect, useRef } from 'react'; import CalibrationOverlay from './CalibrationOverlay'; const FLOW_STEPS = { intro: 'intro', permission: 'permission', ready: 'ready' }; const FOCUS_STATES = { pending: 'pending', focused: 'focused', notFocused: 'not-focused' }; function HelloIcon() { return ( ); } function CameraIcon() { return ( ); } const MODEL_ORDER = ['hybrid', 'xgboost', 'mlp', 'geometric']; const MODEL_INFO = { hybrid: { label: 'Hybrid', tagline: 'Best overall — combines ML with geometric scoring', how: 'Fuses XGBoost predictions (30%) with geometric face/eye scores (70%). A logistic regression meta-classifier combines both signals for the final decision.', accuracy: '84.3%', f1: '0.864', auc: '0.880', threshold: '0.46', evaluation: 'Leave-One-Person-Out (9 participants, 144K frames)', features: '10 features: head deviation, face score, eye scores (EAR), gaze offset, pitch, horizontal gaze, PERCLOS', strengths: 'Most robust across different people. Geometric scoring generalises well; ML catches subtle patterns.', badge: 'Recommended', }, xgboost: { label: 'XGBoost', tagline: 'Highest raw accuracy — gradient-boosted decision trees', how: 'Ensemble of 600 decision trees (max depth 8). Each tree learns to correct errors from previous trees. Outputs probability of focused state.', accuracy: '84.3%', f1: '0.859', auc: '0.880', threshold: '0.38', evaluation: 'Leave-One-Person-Out (9 participants, 144K frames)', features: '10 features: head deviation, face score, eye scores (EAR), gaze offset, pitch, horizontal gaze, PERCLOS', strengths: 'Strong pattern recognition. Handles non-linear feature interactions. 95.9% accuracy on random split (but LOPO is the fairer test).', badge: null, }, mlp: { label: 'MLP', tagline: 'Lightweight neural network — fast and efficient', how: 'Two-layer neural network (64→32 neurons). Takes 10 face features, applies learned weights, outputs focused/unfocused probability via softmax.', accuracy: '82.7%', f1: '0.858', auc: '0.862', threshold: '0.23', evaluation: 'Leave-One-Person-Out (9 participants, 144K frames)', features: '10 features: head deviation, face score, eye scores (EAR), gaze offset, pitch, horizontal gaze, PERCLOS', strengths: 'Fastest inference. Smallest model size. Good baseline. 92.9% accuracy on random split.', badge: null, }, geometric: { label: 'Geometric', tagline: 'Baseline only — hardcoded thresholds, no learning', how: 'Uses fixed thresholds on head orientation (70%) and eye openness (30%). No training — just hand-tuned rules on 478 face landmarks. Cannot adapt to new faces or environments.', accuracy: '~77%', f1: '0.772', auc: 'N/A', threshold: '0.55', evaluation: 'Leave-One-Person-Out geometric sweep', features: 'Head yaw/pitch/roll angles, eye aspect ratio (EAR), iris gaze offset, mouth aspect ratio (MAR)', strengths: 'No model files needed. Useful as a fallback. This is the baseline that motivated building the ML models — its fixed thresholds struggle with different face shapes, lighting, and camera angles.', badge: 'Baseline', }, }; function FocusPageLocal({ videoManager, sessionResult, setSessionResult, isActive }) { const [currentFrame, setCurrentFrame] = useState(15); const [timelineEvents, setTimelineEvents] = useState([]); const [stats, setStats] = useState(null); const [systemStats, setSystemStats] = useState(null); const [availableModels, setAvailableModels] = useState([]); const [currentModel, setCurrentModel] = useState('mlp'); const [flowStep, setFlowStep] = useState(FLOW_STEPS.intro); const [cameraReady, setCameraReady] = useState(false); const [isStarting, setIsStarting] = useState(false); const [focusState, setFocusState] = useState(FOCUS_STATES.pending); const [cameraError, setCameraError] = useState(''); const [calibration, setCalibration] = useState(null); const [l2csBoost, setL2csBoost] = useState(false); const [l2csBoostAvailable, setL2csBoostAvailable] = useState(false); const localVideoRef = useRef(null); const displayCanvasRef = useRef(null); const pipVideoRef = useRef(null); const pipStreamRef = useRef(null); const previewFrameRef = useRef(null); const formatDuration = (seconds) => { if (seconds === 0) return '0s'; const mins = Math.floor(seconds / 60); const secs = Math.floor(seconds % 60); return `${mins}m ${secs}s`; }; const stopPreviewLoop = () => { if (previewFrameRef.current) { cancelAnimationFrame(previewFrameRef.current); previewFrameRef.current = null; } }; const startPreviewLoop = () => { stopPreviewLoop(); const renderPreview = () => { const canvas = displayCanvasRef.current; const video = localVideoRef.current; if (!canvas || !video || !cameraReady || videoManager?.isStreaming) { previewFrameRef.current = null; return; } if (video.readyState >= 2) { const ctx = canvas.getContext('2d'); ctx.drawImage(video, 0, 0, canvas.width, canvas.height); } previewFrameRef.current = requestAnimationFrame(renderPreview); }; previewFrameRef.current = requestAnimationFrame(renderPreview); }; const getErrorMessage = (err) => { if (err?.name === 'NotAllowedError') { return 'Camera permission denied. Please allow camera access.'; } if (err?.name === 'NotFoundError') { return 'No camera found. Please connect a camera.'; } if (err?.name === 'NotReadableError') { return 'Camera is already in use by another application.'; } if (err?.target?.url) { return `WebSocket connection failed: ${err.target.url}. Check that the backend server is running.`; } return err?.message || 'Failed to start focus session.'; }; useEffect(() => { if (!videoManager) return; const originalOnStatusUpdate = videoManager.callbacks.onStatusUpdate; const originalOnSessionEnd = videoManager.callbacks.onSessionEnd; videoManager.callbacks.onStatusUpdate = (isFocused) => { setTimelineEvents((prev) => { const newEvents = [...prev, { isFocused, timestamp: Date.now() }]; if (newEvents.length > 60) newEvents.shift(); return newEvents; }); setFocusState(isFocused ? FOCUS_STATES.focused : FOCUS_STATES.notFocused); if (originalOnStatusUpdate) originalOnStatusUpdate(isFocused); }; videoManager.callbacks.onSessionEnd = (summary) => { setFocusState(FOCUS_STATES.pending); setCameraReady(false); if (originalOnSessionEnd) originalOnSessionEnd(summary); }; videoManager.callbacks.onCalibrationUpdate = (cal) => { setCalibration(cal && cal.active ? { ...cal } : null); }; const statsInterval = setInterval(() => { if (videoManager && videoManager.getStats) { setStats(videoManager.getStats()); } }, 1000); return () => { if (videoManager) { videoManager.callbacks.onStatusUpdate = originalOnStatusUpdate; videoManager.callbacks.onSessionEnd = originalOnSessionEnd; videoManager.callbacks.onCalibrationUpdate = null; } clearInterval(statsInterval); }; }, [videoManager]); // Fetch available models on mount useEffect(() => { fetch('/api/models') .then((res) => res.json()) .then((data) => { if (data.available) setAvailableModels(data.available); if (data.current) { // If L2CS was the active model, switch to a base model + enable boost if (data.current === 'l2cs') { const fallback = data.available.find((m) => m !== 'l2cs') || 'mlp'; setCurrentModel(fallback); handleModelChange(fallback); } else { setCurrentModel(data.current); } } if (data.l2cs_boost !== undefined) setL2csBoost(data.l2cs_boost); if (data.l2cs_boost_available !== undefined) setL2csBoostAvailable(data.l2cs_boost_available); }) .catch((err) => console.error('Failed to fetch models:', err)); }, []); useEffect(() => { if (flowStep === FLOW_STEPS.ready && cameraReady && !videoManager?.isStreaming) { startPreviewLoop(); return; } stopPreviewLoop(); }, [cameraReady, flowStep, videoManager?.isStreaming]); useEffect(() => { if (!isActive) { stopPreviewLoop(); } }, [isActive]); useEffect(() => { return () => { stopPreviewLoop(); if (pipVideoRef.current) { pipVideoRef.current.pause(); pipVideoRef.current.srcObject = null; } if (pipStreamRef.current) { pipStreamRef.current.getTracks().forEach((t) => t.stop()); pipStreamRef.current = null; } }; }, []); // Poll server CPU/memory for UI useEffect(() => { const fetchSystem = () => { fetch('/api/stats/system') .then(res => res.json()) .then(data => setSystemStats(data)) .catch(() => setSystemStats(null)); }; fetchSystem(); const interval = setInterval(fetchSystem, 3000); return () => clearInterval(interval); }, []); const handleModelChange = async (modelName) => { try { const res = await fetch('/api/settings', { method: 'PUT', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify({ model_name: modelName }) }); const result = await res.json(); if (result.updated) { setCurrentModel(modelName); } } catch (err) { console.error('Failed to switch model:', err); } }; const handleEnableCamera = async () => { if (!videoManager) return; try { setCameraError(''); await videoManager.initCamera(localVideoRef.current, displayCanvasRef.current); setCameraReady(true); setFlowStep(FLOW_STEPS.ready); setFocusState(FOCUS_STATES.pending); } catch (err) { const errorMessage = getErrorMessage(err); setCameraError(errorMessage); console.error('Camera init error:', err); } }; const handleEyeGazeToggle = async () => { const next = !l2csBoost; try { const res = await fetch('/api/settings', { method: 'PUT', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify({ l2cs_boost: next }) }); if (!res.ok) return; setL2csBoost(next); if (next && videoManager && videoManager.isStreaming) { // Turning ON → auto-start calibration videoManager.startCalibration(); } else if (!next && videoManager) { // Turning OFF → cancel any active calibration videoManager.cancelCalibration(); } } catch (err) { console.error('Failed to toggle eye gaze:', err); } }; const handleStart = async () => { try { setIsStarting(true); setSessionResult(null); setTimelineEvents([]); setFocusState(FOCUS_STATES.pending); setCameraError(''); if (!cameraReady) { await videoManager.initCamera(localVideoRef.current, displayCanvasRef.current); setCameraReady(true); setFlowStep(FLOW_STEPS.ready); } await videoManager.startStreaming(); } catch (err) { const errorMessage = getErrorMessage(err); setCameraError(errorMessage); setFocusState(FOCUS_STATES.pending); console.error('Start error:', err); alert(`Failed to start: ${errorMessage}\n\nCheck browser console for details.`); } finally { setIsStarting(false); } }; const handleStop = async () => { if (videoManager) { await videoManager.stopStreaming(); } try { if (document.pictureInPictureElement === pipVideoRef.current) { await document.exitPictureInPicture(); } } catch (_) {} if (pipVideoRef.current) { pipVideoRef.current.pause(); pipVideoRef.current.srcObject = null; } if (pipStreamRef.current) { pipStreamRef.current.getTracks().forEach((t) => t.stop()); pipStreamRef.current = null; } stopPreviewLoop(); setFocusState(FOCUS_STATES.pending); setCameraReady(false); }; const handlePiP = async () => { try { // if (!videoManager || !videoManager.isStreaming) { alert('Please start the video first.'); return; } if (!displayCanvasRef.current) { alert('Video not ready.'); return; } // if (document.pictureInPictureElement === pipVideoRef.current) { await document.exitPictureInPicture(); console.log('PiP exited'); return; } // if (!document.pictureInPictureEnabled) { alert('Picture-in-Picture is not supported in this browser.'); return; } // const pipVideo = pipVideoRef.current; if (!pipVideo) { alert('PiP video element not ready.'); return; } const isSafariPiP = typeof pipVideo.webkitSetPresentationMode === 'function'; // let stream = pipStreamRef.current; if (!stream) { const capture = displayCanvasRef.current.captureStream; if (typeof capture === 'function') { stream = capture.call(displayCanvasRef.current, 30); } if (!stream || stream.getTracks().length === 0) { const cameraStream = localVideoRef.current?.srcObject; if (!cameraStream) { alert('Camera stream not ready.'); return; } stream = cameraStream; } pipStreamRef.current = stream; } // if (!stream || stream.getTracks().length === 0) { alert('Failed to capture video stream from canvas.'); return; } pipVideo.srcObject = stream; // if (pipVideo.readyState < 2) { await new Promise((resolve) => { const onReady = () => { pipVideo.removeEventListener('loadeddata', onReady); pipVideo.removeEventListener('canplay', onReady); resolve(); }; pipVideo.addEventListener('loadeddata', onReady); pipVideo.addEventListener('canplay', onReady); // setTimeout(resolve, 600); }); } try { await pipVideo.play(); } catch (_) {} // if (isSafariPiP) { try { pipVideo.webkitSetPresentationMode('picture-in-picture'); console.log('PiP activated (Safari)'); return; } catch (e) { // const cameraStream = localVideoRef.current?.srcObject; if (cameraStream && cameraStream !== pipVideo.srcObject) { pipVideo.srcObject = cameraStream; try { await pipVideo.play(); } catch (_) {} pipVideo.webkitSetPresentationMode('picture-in-picture'); console.log('PiP activated (Safari fallback)'); return; } throw e; } } // if (typeof pipVideo.requestPictureInPicture === 'function') { await pipVideo.requestPictureInPicture(); console.log('PiP activated'); } else { alert('Picture-in-Picture is not supported in this browser.'); } } catch (err) { console.error('PiP error:', err); alert(`Failed to enter Picture-in-Picture: ${err.message}`); } }; const handleFloatingWindow = () => { handlePiP(); }; const handleFrameChange = (val) => { const rate = parseInt(val, 10); setCurrentFrame(rate); if (videoManager) { videoManager.setFrameRate(rate); } }; const handlePreview = () => { if (!videoManager || !videoManager.isStreaming) { alert('Please start a session first.'); return; } // const currentStats = videoManager.getStats(); if (!currentStats.sessionId) { alert('No active session.'); return; } // const sessionDuration = Math.floor((Date.now() - (videoManager.sessionStartTime || Date.now())) / 1000); // const focusScore = currentStats.framesProcessed > 0 ? (currentStats.framesProcessed * (currentStats.currentStatus ? 1 : 0)) / currentStats.framesProcessed : 0; // setSessionResult({ duration_seconds: sessionDuration, focus_score: focusScore, total_frames: currentStats.framesProcessed, focused_frames: Math.floor(currentStats.framesProcessed * focusScore) }); }; const handleCloseOverlay = () => { setSessionResult(null); }; const pageStyle = isActive ? undefined : { position: 'absolute', width: '1px', height: '1px', overflow: 'hidden', opacity: 0, pointerEvents: 'none' }; const focusStateLabel = { [FOCUS_STATES.pending]: 'Pending', [FOCUS_STATES.focused]: 'Focused', [FOCUS_STATES.notFocused]: 'Not Focused' }[focusState]; const introHighlights = [ { title: 'Live focus tracking', text: 'Head pose, gaze, and eye openness are read continuously during the session.' }, { title: 'Quick setup', text: 'Front-facing light and a stable camera angle give the cleanest preview.' }, { title: 'Private by default', text: 'Only session metadata is stored, not the raw camera footage.' } ]; const permissionSteps = [ { title: 'Allow browser access', text: 'Approve the camera prompt so the preview can appear immediately.' }, { title: 'Check your framing', text: 'Keep your face visible and centered for more stable landmark detection.' }, { title: 'Start when ready', text: 'After the preview appears, use the page controls to begin or stop.' } ]; const renderIntroCard = () => { if (flowStep === FLOW_STEPS.intro) { return (
Focus Session

Before you begin

The focus page uses your live camera preview to estimate attention in real time. Review the setup notes below, then continue to camera access.

{introHighlights.map((item) => (

{item.title}

{item.text}

))}
You can still change frame rate and available model options after the preview loads.
); } if (flowStep === FLOW_STEPS.permission && !cameraReady) { return (
Camera Setup

Enable camera access

Once access is granted, your preview appears here and the rest of the Focus page behaves like the other dashboard screens.

{permissionSteps.map((item, index) => (
{index + 1}

{item.title}

{item.text}

))}
{cameraError ?
{cameraError}
: null}
); } return null; }; return (
{renderIntroCard()}
{flowStep === FLOW_STEPS.ready ? ( <> {/* Model selector */} {availableModels.length > 0 ? (
Model: {MODEL_ORDER.filter((n) => availableModels.includes(n)).map((name) => ( ))} {l2csBoostAvailable && ( <> {l2csBoost && stats && stats.isStreaming && ( )} )}
) : null} {/* Server stats */} {systemStats && systemStats.cpu_percent != null && (
CPU: {systemStats.cpu_percent}% RAM: {systemStats.memory_percent}% ({systemStats.memory_used_mb}/{systemStats.memory_total_mb} MB)
)}
Timeline
{timelineEvents.map((event, index) => (
))}
{cameraError ? (
{cameraError}
) : null} {/* Model info card — below action buttons */} {MODEL_INFO[currentModel] && (

{MODEL_INFO[currentModel].label}

{MODEL_INFO[currentModel].badge && ( {MODEL_INFO[currentModel].badge} )}

{MODEL_INFO[currentModel].tagline}

{MODEL_INFO[currentModel].accuracy} Accuracy
{MODEL_INFO[currentModel].f1} F1 Score
{MODEL_INFO[currentModel].auc} ROC-AUC
{MODEL_INFO[currentModel].threshold} Threshold

How it works

{MODEL_INFO[currentModel].how}

Features used

{MODEL_INFO[currentModel].features}

Strengths

{MODEL_INFO[currentModel].strengths}

Evaluated with {MODEL_INFO[currentModel].evaluation}
)}
handleFrameChange(e.target.value)} /> handleFrameChange(e.target.value)} />
) : null} {/* Calibration overlay (fixed fullscreen, must be outside overflow:hidden containers) */}
); } export default FocusPageLocal;