k22056537
feat: UI nav, onboarding, L2CS weights path + torch.load; trim dev files
37a8ba6
import React, { useState } from 'react';
function Help() {
const [clearMsg, setClearMsg] = useState('');
const clearAllHistory = async () => {
if (!window.confirm('Delete all saved sessions? My Records and My Achievement will reset.')) return;
setClearMsg('');
try {
const res = await fetch('/api/history', { method: 'DELETE' });
const data = await res.json().catch(() => ({}));
if (res.ok && data.status === 'success') {
setClearMsg('Session history cleared.');
} else {
setClearMsg(data.message || 'Could not clear history.');
}
} catch (e) {
setClearMsg('Request failed.');
}
};
return (
<main id="page-f" className="page">
<h1 className="page-title">Help</h1>
<div className="help-container">
<section className="help-section">
<h2>How to Use Focus Guard</h2>
<ol>
<li>Navigate to the Focus page from the menu</li>
<li>Allow camera access when prompted</li>
<li>Click the green "Start" button to begin monitoring</li>
<li>Position yourself in front of the camera</li>
<li>The system will track your focus in real-time using face mesh analysis</li>
<li>Use the model selector to switch between detection models (MLP, XGBoost, Geometric, Hybrid)</li>
<li>Click "Stop" when you're done to save the session</li>
</ol>
</section>
<section className="help-section">
<h2>What is "Focused"?</h2>
<p>The system considers you focused when:</p>
<ul>
<li>Your face is detected and visible in the camera frame</li>
<li>Your head is oriented toward the screen (low yaw/pitch deviation)</li>
<li>Your eyes are open and gaze is directed forward</li>
<li>You are not yawning</li>
</ul>
<p>The system uses MediaPipe Face Mesh to extract 478 facial landmarks, then computes features like head pose, eye aspect ratio (EAR), gaze offset, PERCLOS, and blink rate to determine focus.</p>
</section>
<section className="help-section">
<h2>Available Models</h2>
<p><strong>MLP:</strong> Neural network trained on extracted facial features. Good balance of speed and accuracy.</p>
<p><strong>XGBoost:</strong> Gradient-boosted tree model using 10 selected features. Strong on tabular data with fast inference.</p>
<p><strong>Geometric:</strong> Rule-based scoring using head pose and eye openness. No ML model needed, lightweight.</p>
<p><strong>Hybrid:</strong> Combines MLP predictions with geometric scoring for robust results.</p>
</section>
<section className="help-section">
<h2>Adjusting Settings</h2>
<p><strong>Frame Rate:</strong> Controls how many frames per second are sent for analysis. Recommended: 15-30 FPS. Minimum is 10 FPS to ensure temporal features (blink rate, PERCLOS) remain accurate.</p>
<p><strong>Model Selection:</strong> Switch models in real-time using the pill buttons above the timeline. Different models may perform better depending on your lighting and setup.</p>
</section>
<section className="help-section">
<h2>Privacy & Data</h2>
<p>Video frames are processed in real-time on the server and are never stored. Only focus status metadata (timestamps, confidence scores) is saved to the session database. View past runs under <strong>My Records</strong>; stats and badges live under <strong>My Achievement</strong>.</p>
<p style={{ marginTop: '12px' }}>
<button
type="button"
onClick={clearAllHistory}
style={{
padding: '8px 16px',
borderRadius: '8px',
border: '1px solid #c44',
background: 'transparent',
color: '#e88',
cursor: 'pointer',
fontSize: '14px'
}}
>
Clear all session history
</button>
{clearMsg && (
<span style={{ marginLeft: '12px', color: '#aaa', fontSize: '14px' }}>{clearMsg}</span>
)}
</p>
</section>
<section className="help-section">
<h2>FAQ</h2>
<details>
<summary>Why is my focus score low?</summary>
<p>Ensure good lighting so the face mesh can detect your landmarks clearly. Face the camera directly and avoid large head movements. Try switching to a different model if one isn't working well for your setup.</p>
</details>
<details>
<summary>Can I use this without a camera?</summary>
<p>No, camera access is required. The system relies on real-time face landmark detection to determine focus.</p>
</details>
<details>
<summary>Does this work on mobile?</summary>
<p>Yes, it works on mobile browsers that support camera access and WebSocket connections. Performance depends on your device and network speed.</p>
</details>
<details>
<summary>Is my data private?</summary>
<p>Yes. No video frames are stored. Processing happens in real-time and only metadata (focus/unfocused status, confidence, timestamps) is saved.</p>
</details>
<details>
<summary>Why does the face mesh lag behind my movements?</summary>
<p>The face mesh overlay updates each time the server returns a detection result. The camera feed itself renders at 60fps locally. Any visible lag depends on network latency and server processing time.</p>
</details>
</section>
<section className="help-section">
<h2>Technical Info</h2>
<p><strong>Face Detection:</strong> MediaPipe Face Mesh (478 landmarks)</p>
<p><strong>Feature Extraction:</strong> Head pose (yaw/pitch/roll), EAR, MAR, gaze offset, PERCLOS, blink rate</p>
<p><strong>ML Models:</strong> MLP (scikit-learn), XGBoost, Geometric, Hybrid</p>
<p><strong>Storage:</strong> SQLite database</p>
<p><strong>Framework:</strong> FastAPI + React (Vite) + WebSocket</p>
</section>
</div>
</main>
);
}
export default Help;