import React, { useState, useEffect, useRef } from 'react'; import CalibrationOverlay from './CalibrationOverlay'; const FLOW_STEPS = { intro: 'intro', permission: 'permission', ready: 'ready' }; const FOCUS_STATES = { pending: 'pending', focused: 'focused', notFocused: 'not-focused' }; function HelloIcon() { return ( ); } function CameraIcon() { return ( ); } const MODEL_ORDER = ['hybrid', 'xgboost', 'mlp', 'geometric']; const MODEL_INFO = { hybrid: { label: 'Hybrid', tagline: 'Best overall — combines ML with geometric scoring', how: 'Fuses XGBoost predictions (30%) with geometric face/eye scores (70%). Uses a weighted blend tuned with LOPO evaluation.', accuracy: 'N/A', f1: '0.8409', auc: 'N/A', threshold: '0.46', evaluation: 'LOPO tuning (9 participants, 144K frames)', features: '10 features: head deviation, face score, eye scores (EAR), gaze offset, pitch, horizontal gaze, PERCLOS', strengths: 'Most robust across different people. Latest LOPO mean F1 is 0.8409 at w_mlp=0.3.', badge: 'Recommended', }, xgboost: { label: 'XGBoost', tagline: 'Highest raw accuracy — gradient-boosted decision trees', how: 'Ensemble of 600 decision trees (max depth 8). Each tree learns to correct errors from previous trees. Outputs probability of focused state.', accuracy: '95.87%', f1: '0.9585', auc: '0.9908', threshold: '0.38', evaluation: 'Random split test (15%) + LOPO thresholds', features: '10 features: head deviation, face score, eye scores (EAR), gaze offset, pitch, horizontal gaze, PERCLOS', strengths: 'Strong pattern recognition and fast inference. LOPO: AUC 0.8695, optimal threshold 0.280, F1 0.8549.', badge: null, }, mlp: { label: 'MLP', tagline: 'Lightweight neural network — fast and efficient', how: 'Two-layer neural network (64→32 neurons). Takes 10 face features, applies learned weights, outputs focused/unfocused probability via softmax.', accuracy: '92.92%', f1: '0.9287', auc: '0.9714', threshold: '0.23', evaluation: 'Random split test (15%) + LOPO thresholds', features: '10 features: head deviation, face score, eye scores (EAR), gaze offset, pitch, horizontal gaze, PERCLOS', strengths: 'Fastest inference and smallest model size. LOPO: AUC 0.8624, optimal threshold 0.228, F1 0.8578.', badge: null, }, geometric: { label: 'Geometric', tagline: 'Baseline only — hardcoded thresholds, no learning', how: 'Uses fixed thresholds on head orientation (70%) and eye openness (30%). No training — just hand-tuned rules on 478 face landmarks. Cannot adapt to new faces or environments.', accuracy: 'N/A', f1: '0.8195', auc: 'N/A', threshold: '0.55', evaluation: 'LOPO geometric sweep', features: 'Head yaw/pitch/roll angles, eye aspect ratio (EAR), iris gaze offset, mouth aspect ratio (MAR)', strengths: 'No model files needed. Useful fallback when model checkpoints are unavailable.', badge: 'Baseline', }, }; function FocusPageLocal({ videoManager, sessionResult, setSessionResult, isActive, isTutorialActive, setIsTutorialActive, setHasSeenTutorial }) { const [currentFrame, setCurrentFrame] = useState(15); const [timelineEvents, setTimelineEvents] = useState([]); const [stats, setStats] = useState(null); const [systemStats, setSystemStats] = useState(null); const [availableModels, setAvailableModels] = useState([]); const [currentModel, setCurrentModel] = useState('mlp'); const [flowStep, setFlowStep] = useState(FLOW_STEPS.ready); const [cameraReady, setCameraReady] = useState(false); const [isStarting, setIsStarting] = useState(false); const [focusState, setFocusState] = useState(FOCUS_STATES.pending); const [cameraError, setCameraError] = useState(''); const [calibration, setCalibration] = useState(null); const [l2csBoost, setL2csBoost] = useState(false); const [l2csBoostAvailable, setL2csBoostAvailable] = useState(false); const [showEyeGazeModal, setShowEyeGazeModal] = useState(false); const [eyeGazeDontShow, setEyeGazeDontShow] = useState(false); const localVideoRef = useRef(null); const displayCanvasRef = useRef(null); const pipVideoRef = useRef(null); const pipStreamRef = useRef(null); const previewFrameRef = useRef(null); useEffect(() => { if (isTutorialActive) { setFlowStep(FLOW_STEPS.intro); } else { setFlowStep(FLOW_STEPS.ready); } }, [isTutorialActive]); const formatDuration = (seconds) => { if (seconds === 0) return '0s'; const mins = Math.floor(seconds / 60); const secs = Math.floor(seconds % 60); return `${mins}m ${secs}s`; }; const stopPreviewLoop = () => { if (previewFrameRef.current) { cancelAnimationFrame(previewFrameRef.current); previewFrameRef.current = null; } }; const startPreviewLoop = () => { stopPreviewLoop(); const renderPreview = () => { const canvas = displayCanvasRef.current; const video = localVideoRef.current; if (!canvas || !video || !cameraReady || videoManager?.isStreaming) { previewFrameRef.current = null; return; } if (video.readyState >= 2) { const ctx = canvas.getContext('2d'); ctx.drawImage(video, 0, 0, canvas.width, canvas.height); } previewFrameRef.current = requestAnimationFrame(renderPreview); }; previewFrameRef.current = requestAnimationFrame(renderPreview); }; const getErrorMessage = (err) => { if (err?.name === 'NotAllowedError') { return 'Camera permission denied. Please allow camera access.'; } if (err?.name === 'NotFoundError') { return 'No camera found. Please connect a camera.'; } if (err?.name === 'NotReadableError') { return 'Camera is already in use by another application.'; } if (err?.target?.url) { return `WebSocket connection failed: ${err.target.url}. Check that the backend server is running.`; } return err?.message || 'Failed to start focus session.'; }; useEffect(() => { if (!videoManager) return; const originalOnStatusUpdate = videoManager.callbacks.onStatusUpdate; const originalOnSessionEnd = videoManager.callbacks.onSessionEnd; videoManager.callbacks.onStatusUpdate = (isFocused) => { setTimelineEvents((prev) => { const newEvents = [...prev, { isFocused, timestamp: Date.now() }]; if (newEvents.length > 60) newEvents.shift(); return newEvents; }); setFocusState(isFocused ? FOCUS_STATES.focused : FOCUS_STATES.notFocused); if (originalOnStatusUpdate) originalOnStatusUpdate(isFocused); }; videoManager.callbacks.onSessionEnd = (summary) => { setFocusState(FOCUS_STATES.pending); setCameraReady(false); if (originalOnSessionEnd) originalOnSessionEnd(summary); }; videoManager.callbacks.onCalibrationUpdate = (cal) => { setCalibration(cal && cal.active ? { ...cal } : null); }; const statsInterval = setInterval(() => { if (videoManager && videoManager.getStats) { setStats(videoManager.getStats()); } }, 1000); return () => { if (videoManager) { videoManager.callbacks.onStatusUpdate = originalOnStatusUpdate; videoManager.callbacks.onSessionEnd = originalOnSessionEnd; videoManager.callbacks.onCalibrationUpdate = null; } clearInterval(statsInterval); }; }, [videoManager]); useEffect(() => { fetch('/api/models') .then((res) => res.json()) .then((data) => { if (data.available) setAvailableModels(data.available); if (data.current) { if (data.current === 'l2cs') { const fallback = data.available.find((m) => m !== 'l2cs') || 'mlp'; setCurrentModel(fallback); handleModelChange(fallback); } else { setCurrentModel(data.current); } } if (data.l2cs_boost !== undefined) setL2csBoost(data.l2cs_boost); if (data.l2cs_boost_available !== undefined) setL2csBoostAvailable(data.l2cs_boost_available); }) .catch((err) => console.error('Failed to fetch models:', err)); }, []); useEffect(() => { if (flowStep === FLOW_STEPS.ready && cameraReady && !videoManager?.isStreaming) { startPreviewLoop(); return; } stopPreviewLoop(); }, [cameraReady, flowStep, videoManager?.isStreaming]); useEffect(() => { if (!isActive) { stopPreviewLoop(); } }, [isActive]); useEffect(() => { return () => { stopPreviewLoop(); if (pipVideoRef.current) { pipVideoRef.current.pause(); pipVideoRef.current.srcObject = null; } if (pipStreamRef.current) { pipStreamRef.current.getTracks().forEach((t) => t.stop()); pipStreamRef.current = null; } }; }, []); useEffect(() => { const fetchSystem = () => { fetch('/api/stats/system') .then(res => res.json()) .then(data => setSystemStats(data)) .catch(() => setSystemStats(null)); }; fetchSystem(); const interval = setInterval(fetchSystem, 3000); return () => clearInterval(interval); }, []); const handleModelChange = async (modelName) => { try { const res = await fetch('/api/settings', { method: 'PUT', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify({ model_name: modelName }) }); const result = await res.json(); if (result.updated) { setCurrentModel(modelName); } } catch (err) { console.error('Failed to switch model:', err); } }; const closeTutorial = () => { setFlowStep(FLOW_STEPS.ready); setIsTutorialActive(false); setHasSeenTutorial(true); }; const handleEnableCamera = async () => { if (!videoManager) return; try { setCameraError(''); await videoManager.initCamera(localVideoRef.current, displayCanvasRef.current); setCameraReady(true); closeTutorial(); setFocusState(FOCUS_STATES.pending); } catch (err) { const errorMessage = getErrorMessage(err); setCameraError(errorMessage); console.error('Camera init error:', err); } }; const applyEyeGazeChange = async (enable, withCalibration = true) => { try { const res = await fetch('/api/settings', { method: 'PUT', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify({ l2cs_boost: enable }) }); if (!res.ok) return; setL2csBoost(enable); if (enable && withCalibration && videoManager && videoManager.isStreaming) { videoManager.startCalibration(); } else if (!enable && videoManager) { videoManager.cancelCalibration(); } } catch (err) { console.error('Failed to toggle eye gaze:', err); } }; const handleEyeGazeToggle = async () => { const next = !l2csBoost; if (next && !eyeGazeDontShow) { setShowEyeGazeModal(true); return; } await applyEyeGazeChange(next, false); }; const handleEyeGazeModalAction = async (withCalibration) => { setShowEyeGazeModal(false); await applyEyeGazeChange(true, withCalibration); }; const handleStart = async () => { try { setIsStarting(true); setSessionResult(null); setTimelineEvents([]); setFocusState(FOCUS_STATES.pending); setCameraError(''); if (!cameraReady) { await videoManager.initCamera(localVideoRef.current, displayCanvasRef.current); setCameraReady(true); setFlowStep(FLOW_STEPS.ready); } await videoManager.startStreaming(); } catch (err) { const errorMessage = getErrorMessage(err); setCameraError(errorMessage); setFocusState(FOCUS_STATES.pending); console.error('Start error:', err); alert(`Failed to start: ${errorMessage}\n\nCheck browser console for details.`); } finally { setIsStarting(false); } }; const handleStop = async () => { if (videoManager) { await videoManager.stopStreaming(); } try { if (document.pictureInPictureElement === pipVideoRef.current) { await document.exitPictureInPicture(); } } catch (_) {} if (pipVideoRef.current) { pipVideoRef.current.pause(); pipVideoRef.current.srcObject = null; } if (pipStreamRef.current) { pipStreamRef.current.getTracks().forEach((t) => t.stop()); pipStreamRef.current = null; } stopPreviewLoop(); setFocusState(FOCUS_STATES.pending); setCameraReady(false); }; const handlePiP = async () => { try { if (!videoManager || !videoManager.isStreaming) { alert('Please start the video first.'); return; } if (!displayCanvasRef.current) { alert('Video not ready.'); return; } if (document.pictureInPictureElement === pipVideoRef.current) { await document.exitPictureInPicture(); console.log('PiP exited'); return; } if (!document.pictureInPictureEnabled) { alert('Picture-in-Picture is not supported in this browser.'); return; } const pipVideo = pipVideoRef.current; if (!pipVideo) { alert('PiP video element not ready.'); return; } const isSafariPiP = typeof pipVideo.webkitSetPresentationMode === 'function'; let stream = pipStreamRef.current; if (!stream) { const capture = displayCanvasRef.current.captureStream; if (typeof capture === 'function') { stream = capture.call(displayCanvasRef.current, 30); } if (!stream || stream.getTracks().length === 0) { const cameraStream = localVideoRef.current?.srcObject; if (!cameraStream) { alert('Camera stream not ready.'); return; } stream = cameraStream; } pipStreamRef.current = stream; } if (!stream || stream.getTracks().length === 0) { alert('Failed to capture video stream from canvas.'); return; } pipVideo.srcObject = stream; if (pipVideo.readyState < 2) { await new Promise((resolve) => { const onReady = () => { pipVideo.removeEventListener('loadeddata', onReady); pipVideo.removeEventListener('canplay', onReady); resolve(); }; pipVideo.addEventListener('loadeddata', onReady); pipVideo.addEventListener('canplay', onReady); setTimeout(resolve, 600); }); } try { await pipVideo.play(); } catch (_) {} if (isSafariPiP) { try { pipVideo.webkitSetPresentationMode('picture-in-picture'); console.log('PiP activated (Safari)'); return; } catch (e) { const cameraStream = localVideoRef.current?.srcObject; if (cameraStream && cameraStream !== pipVideo.srcObject) { pipVideo.srcObject = cameraStream; try { await pipVideo.play(); } catch (_) {} pipVideo.webkitSetPresentationMode('picture-in-picture'); console.log('PiP activated (Safari fallback)'); return; } throw e; } } if (typeof pipVideo.requestPictureInPicture === 'function') { await pipVideo.requestPictureInPicture(); console.log('PiP activated'); } else { alert('Picture-in-Picture is not supported in this browser.'); } } catch (err) { console.error('PiP error:', err); alert(`Failed to enter Picture-in-Picture: ${err.message}`); } }; const handleFloatingWindow = () => { handlePiP(); }; const handleFrameChange = (val) => { const rate = parseInt(val, 10); setCurrentFrame(rate); if (videoManager) { videoManager.setFrameRate(rate); } }; const handlePreview = () => { if (!videoManager || !videoManager.isStreaming) { alert('Please start a session first.'); return; } const currentStats = videoManager.getStats(); if (!currentStats.sessionId) { alert('No active session.'); return; } const sessionDuration = Math.floor((Date.now() - (videoManager.sessionStartTime || Date.now())) / 1000); const totalFrames = currentStats.framesProcessed || 0; const focusedFrames = currentStats.focusedFrames ?? 0; const focusScore = totalFrames > 0 ? focusedFrames / totalFrames : 0; setSessionResult({ duration_seconds: sessionDuration, focus_score: focusScore, total_frames: totalFrames, focused_frames: focusedFrames }); }; const handleCloseOverlay = () => { setSessionResult(null); }; const pageStyle = isActive ? undefined : { position: 'absolute', width: '1px', height: '1px', overflow: 'hidden', opacity: 0, pointerEvents: 'none' }; const focusStateLabel = { [FOCUS_STATES.pending]: 'Pending', [FOCUS_STATES.focused]: 'Focused', [FOCUS_STATES.notFocused]: 'Not Focused' }[focusState]; const introHighlights = [ { title: 'Live focus tracking', text: 'Head pose, gaze, and eye openness are read continuously during the session.' }, { title: 'Quick setup', text: 'Front-facing light and a stable camera angle give the cleanest preview.' }, { title: 'Private by default', text: 'Only session metadata is stored, not the raw camera footage.' }, { title: 'Sync across devices', text: 'Your history auto-saves to this browser. To switch devices, use the Data Management tools at the bottom of the My Records tab to export or import your data.' } ]; const permissionSteps = [ { title: 'Allow browser access', text: 'Approve the camera prompt so the preview can appear immediately.' }, { title: 'Check your framing', text: 'Keep your face visible and centered for more stable landmark detection.' }, { title: 'Start when ready', text: 'After the preview appears, use the page controls to begin or stop.' } ]; const renderIntroCard = () => { if (flowStep === FLOW_STEPS.intro) { return (
Focus Session

Before you begin

The focus page uses your live camera preview to estimate attention in real time. Review the setup notes below, then continue to camera access.

{introHighlights.map((item) => (

{item.title}

{item.text}

))}
Wearing glasses? Glasses may reduce detection accuracy on some models. If results seem inaccurate, try switching to a different model (e.g. Geometric or MLP).
You can still change frame rate and available model options after the preview loads.
); } if (flowStep === FLOW_STEPS.permission && !cameraReady) { return (
Camera Setup

Enable camera access

Once access is granted, your preview appears here and the rest of the Focus page behaves like the other dashboard screens.

{permissionSteps.map((item, index) => (
{index + 1}

{item.title}

{item.text}

))}
{cameraError ?
{cameraError}
: null}
); } return null; }; const renderEyeGazeModal = () => { if (!showEyeGazeModal) return null; return (
Eye Gaze Tracking

Before you enable

Eye gaze tracking runs an additional deep neural network (L2CS-Net) alongside your current model. Please read the notes below before proceeding.

Performance impact

Enabling eye gaze tracking increases CPU usage and may reduce frame rate. If the system feels sluggish, consider disabling it.

Calibration (recommended)

For best accuracy, calibrate by looking at 9 screen positions one at a time, followed by 1 validation point. The whole process takes about 30 seconds.

1

Click "Start Calibration"

A dot will appear on screen. Look directly at it and keep your gaze steady. It will cycle through 9 positions then show a final validation dot.

2

Or skip for now

Click "Skip" to enable eye gaze tracking without calibrating. You can recalibrate at any time using the "Recalibrate" button during a session.

); }; return (
{renderIntroCard()} {renderEyeGazeModal()}
{flowStep === FLOW_STEPS.ready ? ( <> {availableModels.length > 0 ? (
Model: {MODEL_ORDER.filter((n) => availableModels.includes(n)).map((name) => ( ))} {l2csBoostAvailable && ( <> {l2csBoost && stats && stats.isStreaming && ( )} )}
) : null} {systemStats && systemStats.cpu_percent != null && (
CPU: {systemStats.cpu_percent}% RAM: {systemStats.memory_percent}% ({systemStats.memory_used_mb}/{systemStats.memory_total_mb} MB)
)}
Timeline
{timelineEvents.map((event, index) => (
))}
{cameraError ? (
{cameraError}
) : null} {MODEL_INFO[currentModel] && (

{MODEL_INFO[currentModel].label}

{MODEL_INFO[currentModel].badge && ( {MODEL_INFO[currentModel].badge} )}

{MODEL_INFO[currentModel].tagline}

{MODEL_INFO[currentModel].accuracy} Accuracy
{MODEL_INFO[currentModel].f1} F1 Score
{MODEL_INFO[currentModel].auc} ROC-AUC
{MODEL_INFO[currentModel].threshold} Threshold

How it works

{MODEL_INFO[currentModel].how}

Features used

{MODEL_INFO[currentModel].features}

Strengths

{MODEL_INFO[currentModel].strengths}

Evaluated with {MODEL_INFO[currentModel].evaluation}
)}
handleFrameChange(e.target.value)} /> handleFrameChange(e.target.value)} />
) : null}
); } export default FocusPageLocal;