import os import re import json import csv import pickle import base64 import mimetypes from datetime import datetime import numpy as np import gradio as gr from openai import OpenAI from rank_bm25 import BM25Okapi from sentence_transformers import SentenceTransformer # ===================================================== # CONFIG # ===================================================== BUILD_DIR = "brainchat_build" CHUNKS_PATH = os.path.join(BUILD_DIR, "chunks.pkl") TOKENS_PATH = os.path.join(BUILD_DIR, "tokenized_chunks.pkl") EMBED_PATH = os.path.join(BUILD_DIR, "embeddings.npy") CONFIG_PATH = os.path.join(BUILD_DIR, "config.json") LOGO_FILE = "logo.png" OPENAI_MODEL = os.getenv("OPENAI_MODEL", "gpt-4o-mini") ANALYTICS_FILE = "brainchat_analytics.json" STUDENT_FILE = "brainchat_student_profiles.json" BM25 = None CHUNKS = None EMBEDDINGS = None EMBED_MODEL = None CLIENT = None ANALYTICS_LOG = [] STUDENT_PROFILES = {} # ===================================================== # PERSISTENCE # ===================================================== def load_persistent_data(): global ANALYTICS_LOG, STUDENT_PROFILES try: if os.path.exists(ANALYTICS_FILE): with open(ANALYTICS_FILE, "r", encoding="utf-8") as f: ANALYTICS_LOG = json.load(f) if os.path.exists(STUDENT_FILE): with open(STUDENT_FILE, "r", encoding="utf-8") as f: STUDENT_PROFILES = json.load(f) except Exception: ANALYTICS_LOG = [] STUDENT_PROFILES = {} def save_persistent_data(): try: with open(ANALYTICS_FILE, "w", encoding="utf-8") as f: json.dump(ANALYTICS_LOG, f, ensure_ascii=False, indent=2) with open(STUDENT_FILE, "w", encoding="utf-8") as f: json.dump(STUDENT_PROFILES, f, ensure_ascii=False, indent=2) except Exception: pass def normalize_student_id(student_id: str) -> str: sid = (student_id or "").strip() return sid if sid else "Guest" def get_profile(student_id: str): sid = normalize_student_id(student_id) if sid not in STUDENT_PROFILES: STUDENT_PROFILES[sid] = {"topic_stats": {}, "badges": []} return STUDENT_PROFILES[sid] # ===================================================== # LOADING / RETRIEVAL # ===================================================== def tokenize(text: str): return re.findall(r"\w+", text.lower(), flags=re.UNICODE) def expand_short_query(query: str): q = query.strip() q_lower = q.lower() expansions = { "mri": "MRI magnetic resonance imaging resonancia magnetica resonancia magnética neuroimaging brain scan RM", "rm": "RM MRI magnetic resonance imaging resonancia magnetica resonancia magnética neuroimaging brain scan", "ct": "CT computed tomography tomografia computarizada brain scan TC", "tc": "TC CT computed tomography tomografia computarizada brain scan", "csf": "CSF cerebrospinal fluid liquor cerebrospinal LCR liquido cefalorraquideo líquido cefalorraquídeo production circulation", "lcr": "LCR CSF liquido cefalorraquideo líquido cefalorraquídeo cerebrospinal fluid production circulation", "eeg": "EEG electroencephalography electroencefalograma epilepsy seizure", } if q_lower in expansions: return expansions[q_lower] if "mri" in q_lower: return q + " MRI magnetic resonance imaging resonancia magnetica resonancia magnética neuroimaging brain scan" if "csf" in q_lower: return q + " CSF cerebrospinal fluid liquor cerebrospinal LCR liquido cefalorraquideo líquido cefalorraquídeo" return q def ensure_loaded(): global BM25, CHUNKS, EMBEDDINGS, EMBED_MODEL, CLIENT if CHUNKS is None: missing = [p for p in [CHUNKS_PATH, TOKENS_PATH, EMBED_PATH, CONFIG_PATH] if not os.path.exists(p)] if missing: raise FileNotFoundError("Missing build files:\n" + "\n".join(missing)) with open(CHUNKS_PATH, "rb") as f: CHUNKS = pickle.load(f) with open(TOKENS_PATH, "rb") as f: tokenized_chunks = pickle.load(f) EMBEDDINGS = np.load(EMBED_PATH) with open(CONFIG_PATH, "r", encoding="utf-8") as f: cfg = json.load(f) BM25 = BM25Okapi(tokenized_chunks) EMBED_MODEL = SentenceTransformer(cfg["embedding_model"]) if CLIENT is None: api_key = os.getenv("OPENAI_API_KEY") if not api_key: raise ValueError("OPENAI_API_KEY is missing in Hugging Face Space Secrets.") CLIENT = OpenAI(api_key=api_key) # ===================================================== # SOURCE HANDLING # ===================================================== def clean_source_name(book_name: str) -> str: name = (book_name or "").strip() low = name.lower().replace("_", " ").replace("-", " ") # Treat all converted/merged professor material as Professor Handouts. # This covers names such as ilovepdf, i love pdf, merged, professor notes, etc. professor_markers = [ "ilovepdf", "i love pdf", "i-love-pdf", "merged", "professor handout", "professor handouts", "professor notes", "teacher handout", "teacher handouts", "lecture notes", "class notes", "handout", "handouts", ] if any(marker in low for marker in professor_markers): return "Professor Handouts" if name.lower().endswith(".pdf"): name = name[:-4] return name or "Professor Handouts" def source_priority_label(book_name: str) -> str: return "Primary source" if clean_source_name(book_name) == "Professor Handouts" else "Supporting textbook" def prioritize_professor_handouts(records): return sorted( records, key=lambda r: ( 0 if clean_source_name(r.get("book", "")) == "Professor Handouts" else 1, -float(r.get("final_score", r.get("similarity_score", 0))) ) ) def search_hybrid(query: str, shortlist_k: int = 40, final_k: int = 6): ensure_loaded() expanded_query = expand_short_query(query) q_tokens = tokenize(expanded_query) bm25_scores = BM25.get_scores(q_tokens) shortlist_idx = np.argsort(bm25_scores)[::-1][:shortlist_k] shortlist_emb = EMBEDDINGS[shortlist_idx] qvec = EMBED_MODEL.encode([expanded_query], normalize_embeddings=True).astype("float32")[0] dense_scores = shortlist_emb @ qvec results = [] for idx, dense_score in zip(shortlist_idx, dense_scores): record = CHUNKS[int(idx)].copy() clean_book = clean_source_name(record.get("book", "")) bm25_score = float(bm25_scores[idx]) bm25_norm = min(bm25_score / 10.0, 0.20) priority_boost = 0.35 if clean_book == "Professor Handouts" else 0.0 final_score = float(dense_score) + bm25_norm + priority_boost record["similarity_score"] = float(dense_score) record["bm25_score"] = bm25_score record["final_score"] = final_score results.append(record) results = sorted(results, key=lambda r: r["final_score"], reverse=True) results = prioritize_professor_handouts(results) return results[:final_k] def build_context(records): blocks = [] for i, r in enumerate(prioritize_professor_handouts(records), start=1): clean_book = clean_source_name(r.get("book", "")) blocks.append( f"""[Source {i}] Book: {clean_book} Source priority: {source_priority_label(clean_book)} Section: {r.get('section_title','')} Pages: {r.get('page_start','')}-{r.get('page_end','')} Similarity Score: {r.get('similarity_score', 0):.3f} BM25 Score: {r.get('bm25_score', 0):.3f} Final Score: {r.get('final_score', r.get('similarity_score', 0)):.3f} Text: {r.get('text','')}""" ) return "\n\n".join(blocks) def make_sources(records): seen = set() lines = [] for r in prioritize_professor_handouts(records): clean_book = clean_source_name(r.get("book", "")) key = ( clean_book, r.get("section_title"), r.get("page_start"), r.get("page_end"), ) if key in seen: continue seen.add(key) section = r.get("section_title", "Course Material") page_start = r.get("page_start", "") page_end = r.get("page_end", "") score = r.get("final_score", r.get("similarity_score", 0)) if page_start and page_end and page_start != page_end: page_text = f"pages {page_start}-{page_end}" elif page_start: page_text = f"page {page_start}" else: page_text = "page not specified" lines.append( f"• {clean_book} ({source_priority_label(clean_book)}) | {section} | {page_text} | relevance: {score:.2f}" ) return "\n".join(lines) # ===================================================== # TOPIC / DIFFICULTY / BADGES # ===================================================== def detect_topic(text: str) -> str: t = text.lower() padded = f" {t} " topics = { "Stroke / Cerebrovascular": [ "stroke", "ictus", "cerebrovascular", "aphasia", "afasia", "mca", "acm", "hemiparesis", "hemiparesia", "broca" ], "Epilepsy": ["epilepsy", "epilepsia", "seizure", "convulsion", "crisis"], "Headache": ["headache", "cefalea", "migraine", "migraña"], "Multiple Sclerosis / Demyelination": [ "multiple sclerosis", "esclerosis múltiple", " em ", "nmosd", "desmielinizante", "demyelinating" ], "Parkinson / Movement Disorders": [ "parkinson", "movement disorder", "trastornos del movimiento", "tremor", "temblor" ], "Dementia": ["dementia", "demencia", "alzheimer", "cognitive", "cognitivo"], "Neuropathy / Neuromuscular": [ "neuropathy", "neuropatía", "myasthenia", "miastenia", "neuromuscular", "myopathy", "miopatía" ], "Neuroanatomy / Topography": [ "topography", "topografía", "lesion", "lesión", "localization", "localización", "mri", "rm", "ct", "tc", "csf", "lcr", "resonancia", "magnetic resonance", "cerebrospinal" ], } for topic, keys in topics.items(): if any(k in padded or k in t for k in keys): return topic return "General Neurology" def estimate_difficulty(text: str, mode: str, topic: str) -> dict: t = text.lower() if mode == "Case-Based" or any(k in t for k in ["case", "caso", "diagnosis", "diagnóstico", "differential", "diferencial"]): level = 3 elif any(k in t for k in ["complex", "complicación", "why", "por qué", "mechanism", "mecanismo", "reasoning", "razonamiento"]): level = 4 elif any(k in t for k in ["explain", "explica", "understand", "comprender", "difference", "diferencia"]): level = 2 else: level = 1 names = { 1: "Level 1 – Recall", 2: "Level 2 – Understanding", 3: "Level 3 – Application", 4: "Level 4 – Clinical Analysis", } return {"level": level, "name": names[level], "topic": topic} def update_topic_stats(student_id: str, topic: str, confidence: str): profile = get_profile(student_id) stats = profile["topic_stats"] if topic not in stats: stats[topic] = {"total": 0, "green": 0, "orange": 0, "red": 0, "weak": 0} if confidence not in ["green", "orange", "red"]: confidence = "orange" stats[topic]["total"] += 1 stats[topic][confidence] += 1 if confidence in ["orange", "red"]: stats[topic]["weak"] += 1 save_persistent_data() def award_badges(student_id: str, topic: str): profile = get_profile(student_id) stats = profile["topic_stats"].get(topic, {}) badges = set(profile.get("badges", [])) new_badges = [] badge_map = { "Stroke / Cerebrovascular": "Stroke Explorer", "Epilepsy": "Epilepsy Learner", "Headache": "Headache Reviewer", "Multiple Sclerosis / Demyelination": "Demyelination Specialist", "Parkinson / Movement Disorders": "Movement Disorders Explorer", "Dementia": "Cognitive Disorders Reviewer", "Neuropathy / Neuromuscular": "Neuromuscular Learner", "Neuroanatomy / Topography": "Topography Trainee", "General Neurology": "General Neurology Starter", } if stats.get("green", 0) >= 3 and topic in badge_map: badge = badge_map[topic] if badge not in badges: badges.add(badge) new_badges.append(badge) total_green = sum(v.get("green", 0) for v in profile["topic_stats"].values()) if total_green >= 10 and "Clinical Reasoning Builder" not in badges: badges.add("Clinical Reasoning Builder") new_badges.append("Clinical Reasoning Builder") if new_badges: profile["badges"] = sorted(list(badges)) save_persistent_data() return new_badges def spaced_repetition_tip(student_id: str, topic: str) -> str: stats = get_profile(student_id)["topic_stats"].get(topic) if stats and stats.get("weak", 0) >= 2: return ( f"\n\n**Spaced revision suggestion:** You may need more practice in **{topic}**. " f"Try asking: *Give me 3 revision questions on {topic}.*" ) return "" # ===================================================== # GENERAL CHAT / CONFIDENCE # ===================================================== def is_general_chat(text: str) -> bool: return text.lower().strip() in [ "hi", "hello", "hola", "hey", "good morning", "good afternoon", "good evening", "thanks", "thank you", "gracias", "ok", "okay", "who are you", "what can you do", "help" ] def general_chat_reply(text: str, language_mode: str) -> str: if language_mode == "Spanish": return ( "¡Hola! Soy BrainChat, tu tutor de IA para Neurología y PMQSN. " "Primero uso los apuntes del profesor y después libros de apoyo si es necesario. " "Puedes pedirme explicaciones, casos clínicos, flashcards o quizzes." ) return ( "Hello! I am BrainChat, your AI tutor for Neurology and PMQSN. " "I first use Professor Handouts and then supporting textbooks if needed. " "You can ask for explanations, clinical cases, flashcards, or quizzes." ) def is_not_found_answer(answer: str) -> bool: a = (answer or "").lower().strip() return ( "not found in the course material" in a or "no encontrado en el material del curso" in a or "no se encuentra en el material del curso" in a or "no se encontró información" in a or a == "no encontrado" ) def compute_confidence(records, answer: str): if is_not_found_answer(answer) or not records: return {"level": "red", "label": "Not found", "score": 0.0} scores = [float(r.get("final_score", r.get("similarity_score", 0))) for r in records] raw_scores = [float(r.get("similarity_score", 0)) for r in records] top_score = max(scores) top_raw = max(raw_scores) professor_found = any( clean_source_name(r.get("book", "")) == "Professor Handouts" for r in records[:3] ) if professor_found and top_score >= 0.42: return {"level": "green", "label": "High confidence", "score": top_raw} if top_score >= 0.48: return {"level": "green", "label": "High confidence", "score": top_raw} if top_score >= 0.28: return {"level": "orange", "label": "Medium confidence", "score": top_raw} return {"level": "red", "label": "Low confidence", "score": top_raw} def confidence_html(conf): color_map = {"green": "#16a34a", "orange": "#f97316", "red": "#dc2626"} return f"""
{conf['label']} — similarity score: {conf['score']:.2f}
""" # ===================================================== # ANALYTICS / REPORT # ===================================================== def log_event(student_id, event_type, mode, language, confidence_level, similarity, query, topic, difficulty, answer=""): ANALYTICS_LOG.append({ "time": datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "student_id": normalize_student_id(student_id), "event": event_type, "mode": mode, "language": language, "confidence": confidence_level, "similarity": round(float(similarity), 3), "topic": topic, "difficulty": difficulty, "query": query, "answer": answer[:3000], }) save_persistent_data() def filter_logs(student_id: str): sid = normalize_student_id(student_id) return [x for x in ANALYTICS_LOG if sid == "All Students" or x.get("student_id") == sid] def aggregate_topic_stats(student_id): sid = normalize_student_id(student_id) if sid != "All Students": return STUDENT_PROFILES.get(sid, {}).get("topic_stats", {}) combined = {} for profile in STUDENT_PROFILES.values(): for topic, s in profile.get("topic_stats", {}).items(): if topic not in combined: combined[topic] = {"total": 0, "green": 0, "orange": 0, "red": 0, "weak": 0} for k in combined[topic]: combined[topic][k] += int(s.get(k, 0)) return combined def export_interactions(student_id): sid = normalize_student_id(student_id) logs = filter_logs(sid) safe_sid = re.sub(r"[^A-Za-z0-9_-]+", "_", sid) timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") try: from openpyxl import Workbook from openpyxl.styles import Font, PatternFill, Border, Side, Alignment from openpyxl.utils import get_column_letter file_path = f"brainchat_student_report_{safe_sid}_{timestamp}.xlsx" wb = Workbook() purple = "5A2D77" gold = "C7A008" light_purple = "F3E9F8" light_gold = "FFF2A8" light_green = "BDF3CF" light_orange = "FFD99B" light_red = "FFB9B9" white = "FFFFFF" black = "000000" thin = Side(style="thin", color="CCCCCC") border = Border(left=thin, right=thin, top=thin, bottom=thin) title_font = Font(bold=True, size=16, color=purple) heading_font = Font(bold=True, color=white) normal_bold = Font(bold=True, color=black) normal_font = Font(color=black) header_fill = PatternFill("solid", fgColor=purple) summary_fill = PatternFill("solid", fgColor=light_purple) gold_fill = PatternFill("solid", fgColor=light_gold) def style_header_row(ws, row_num): for cell in ws[row_num]: cell.fill = header_fill cell.font = heading_font cell.border = border cell.alignment = Alignment(horizontal="center", vertical="center", wrap_text=True) def autofit(ws, max_width=55): for col in ws.columns: max_len = 0 col_letter = get_column_letter(col[0].column) for cell in col: value = "" if cell.value is None else str(cell.value) max_len = max(max_len, min(len(value), max_width)) cell.alignment = Alignment(vertical="top", wrap_text=True) ws.column_dimensions[col_letter].width = max(12, min(max_len + 2, max_width)) # Summary ws = wb.active ws.title = "Summary" total = len(logs) green = sum(1 for x in logs if x.get("confidence") == "green") orange = sum(1 for x in logs if x.get("confidence") == "orange") red = sum(1 for x in logs if x.get("confidence") == "red") quizzes = sum(1 for x in logs if x.get("event") in ["quiz_generated", "quiz_evaluated"]) avg_sim = round(sum(float(x.get("similarity", 0)) for x in logs) / total, 3) if total else 0 ws["A1"] = "BrainChat Student Interaction Report" ws["A1"].font = title_font ws.merge_cells("A1:D1") summary_rows = [ ("Generated At", datetime.now().strftime("%Y-%m-%d %H:%M:%S")), ("Student", sid), ("Total Interactions", total), ("High Confidence", green), ("Medium Confidence", orange), ("Low / Not Found", red), ("Quiz Actions", quizzes), ("Average Similarity", avg_sim), ] row = 3 for label, value in summary_rows: ws.cell(row=row, column=1, value=label) ws.cell(row=row, column=2, value=value) ws.cell(row=row, column=1).font = normal_bold ws.cell(row=row, column=1).fill = summary_fill ws.cell(row=row, column=2).fill = gold_fill ws.cell(row=row, column=1).border = border ws.cell(row=row, column=2).border = border row += 1 autofit(ws) # Interactions ws = wb.create_sheet("Interactions") headers = [ "Time", "Student ID", "Event", "Mode", "Language", "Confidence", "Similarity", "Topic", "Difficulty", "Question / Query", "Answer / Feedback" ] ws.append(headers) style_header_row(ws, 1) for log in logs: ws.append([ log.get("time", ""), log.get("student_id", sid), log.get("event", ""), log.get("mode", ""), log.get("language", ""), log.get("confidence", ""), log.get("similarity", ""), log.get("topic", ""), log.get("difficulty", ""), log.get("query", ""), log.get("answer", ""), ]) for row_cells in ws.iter_rows(min_row=2): conf = str(row_cells[5].value or "").lower() fill = None if conf == "green": fill = PatternFill("solid", fgColor=light_green) elif conf == "orange": fill = PatternFill("solid", fgColor=light_orange) elif conf == "red": fill = PatternFill("solid", fgColor=light_red) for cell in row_cells: cell.border = border cell.font = normal_font cell.alignment = Alignment(vertical="top", wrap_text=True) if fill and cell.column == 6: cell.fill = fill ws.freeze_panes = "A2" ws.auto_filter.ref = ws.dimensions autofit(ws, max_width=60) # Topic Analytics ws = wb.create_sheet("Topic Analytics") topic_stats = aggregate_topic_stats(sid) ws.append(["Topic", "Total", "High", "Medium", "Low", "Weak Attempts"]) style_header_row(ws, 1) for topic, s in sorted(topic_stats.items(), key=lambda x: x[1].get("weak", 0), reverse=True): ws.append([ topic, s.get("total", 0), s.get("green", 0), s.get("orange", 0), s.get("red", 0), s.get("weak", 0), ]) for row_cells in ws.iter_rows(min_row=2): for cell in row_cells: cell.border = border cell.alignment = Alignment(vertical="top", wrap_text=True) ws.freeze_panes = "A2" ws.auto_filter.ref = ws.dimensions autofit(ws) # Badges ws = wb.create_sheet("Badges") if sid == "All Students": badges = sorted({b for p in STUDENT_PROFILES.values() for b in p.get("badges", [])}) else: badges = STUDENT_PROFILES.get(sid, {}).get("badges", []) ws.append(["Badge", "Meaning"]) style_header_row(ws, 1) badge_meanings = { "Stroke Explorer": "Repeated strong performance in stroke/cerebrovascular topics.", "Epilepsy Learner": "Repeated strong performance in epilepsy topics.", "Headache Reviewer": "Repeated strong performance in headache topics.", "Demyelination Specialist": "Repeated strong performance in MS/demyelination topics.", "Movement Disorders Explorer": "Repeated strong performance in Parkinson/movement disorders topics.", "Cognitive Disorders Reviewer": "Repeated strong performance in dementia/cognitive topics.", "Neuromuscular Learner": "Repeated strong performance in neuropathy/neuromuscular topics.", "Topography Trainee": "Repeated strong performance in neuroanatomy/topography topics.", "General Neurology Starter": "Repeated strong performance in general neurology topics.", "Clinical Reasoning Builder": "At least 10 high-confidence interactions across topics.", } if badges: for badge in badges: ws.append([badge, badge_meanings.get(badge, "Badge earned through repeated strong learning activity.")]) else: ws.append(["No badges earned yet", "Badges appear after repeated high-confidence activity in a topic."]) for row_cells in ws.iter_rows(min_row=2): for cell in row_cells: cell.border = border cell.alignment = Alignment(vertical="top", wrap_text=True) autofit(ws) wb.save(file_path) return gr.update(value=file_path, visible=True) except Exception: file_path = f"brainchat_student_report_{safe_sid}_{timestamp}.csv" with open(file_path, "w", newline="", encoding="utf-8-sig") as f: writer = csv.writer(f) writer.writerow(["BrainChat Student Interaction Report"]) writer.writerow(["Generated At", datetime.now().strftime("%Y-%m-%d %H:%M:%S")]) writer.writerow(["Student", sid]) writer.writerow([]) writer.writerow([ "Time", "Student ID", "Event", "Mode", "Language", "Confidence", "Similarity", "Topic", "Difficulty", "Question / Query", "Answer / Feedback" ]) for log in logs: writer.writerow([ log.get("time", ""), log.get("student_id", sid), log.get("event", ""), log.get("mode", ""), log.get("language", ""), log.get("confidence", ""), log.get("similarity", ""), log.get("topic", ""), log.get("difficulty", ""), log.get("query", ""), log.get("answer", ""), ]) return gr.update(value=file_path, visible=True) def render_badges(student_id): sid = normalize_student_id(student_id) if sid == "All Students": badges = sorted({b for p in STUDENT_PROFILES.values() for b in p.get("badges", [])}) else: badges = STUDENT_PROFILES.get(sid, {}).get("badges", []) if not badges: return "

No badges earned yet.

" return "".join([f'🏅 {b}' for b in sorted(badges)]) def render_topic_table(student_id): topic_stats = aggregate_topic_stats(student_id) if not topic_stats: return "

No topic data yet.

" rows = "" for topic, s in sorted(topic_stats.items(), key=lambda x: x[1].get("weak", 0), reverse=True): rows += f""" {topic} {s.get('total', 0)} {s.get('green', 0)} {s.get('orange', 0)} {s.get('red', 0)} {s.get('weak', 0)} """ return f""" {rows}
Topic Total High Medium Low Weak Attempts
""" def bar(label, value, total, css_class): pct = 0 if total == 0 else int((value / total) * 100) return f"""
{label}: {value}
""" def topic_bars(student_id): topic_stats = aggregate_topic_stats(student_id) if not topic_stats: return "

No topic data yet.

" max_weak = max([s.get("weak", 0) for s in topic_stats.values()] + [1]) html = "" for topic, s in sorted(topic_stats.items(), key=lambda x: x[1].get("weak", 0), reverse=True)[:6]: pct = int((s.get("weak", 0) / max_weak) * 100) html += f"""
{topic}: {s.get('weak',0)} weak
""" return html def render_dashboard(student_id="Guest"): sid = normalize_student_id(student_id) logs = filter_logs(sid) total = len(logs) if total == 0: return f"""

Progress Analytics Dashboard

Showing: {sid}

No interactions recorded yet.

What this dashboard shows

Topic tracking: identifies areas where students struggle.

Difficulty level: recall, understanding, application, or clinical analysis.

Badges: awarded after repeated strong performance.

""" green = sum(1 for x in logs if x["confidence"] == "green") orange = sum(1 for x in logs if x["confidence"] == "orange") red = sum(1 for x in logs if x["confidence"] == "red") quizzes = sum(1 for x in logs if x["event"] in ["quiz_generated", "quiz_evaluated"]) avg_sim = sum(x["similarity"] for x in logs) / total recent_rows = "" for item in logs[-8:][::-1]: recent_rows += f""" {item['time']} {item.get('student_id','Guest')} {item['event']} {item['topic']} {item['difficulty']} {item['confidence']} {item['similarity']} {item['query'][:100]} """ return f"""

Progress Analytics Dashboard

Showing: {sid}

{total}
Total interactions
{green}
High confidence
{orange}
Medium confidence
{red}
Low / Not found
{quizzes}
Quiz actions
{avg_sim:.2f}
Avg similarity

Confidence Chart

{bar('High confidence', green, total, 'green')} {bar('Medium confidence', orange, total, 'orange')} {bar('Low / Not found', red, total, 'red')}

Badges Earned

{render_badges(sid)}

Teacher View

Charts: show confidence and weak topics.

Badges: awarded per student after repeated strong answers.

Download report: exports Q&A, confidence, topic, difficulty and badges.

Weak Topic Chart

{topic_bars(sid)}

Topic Weakness Tracking

{render_topic_table(sid)}

Recent Activity

{recent_rows}
Time Student Event Topic Difficulty Confidence Similarity Query
""" def refresh_dashboard(student_id): return render_dashboard(student_id) def clear_analytics(): ANALYTICS_LOG.clear() STUDENT_PROFILES.clear() save_persistent_data() return render_dashboard("Guest") # ===================================================== # PROMPTS # ===================================================== def language_instruction(language_mode: str) -> str: if language_mode == "English": return "Answer only in English." if language_mode == "Spanish": return "Answer only in Spanish." if language_mode == "Bilingual": return "Answer first in English, then provide a Spanish version under the heading 'Español:'." return "If the user's message is in Spanish, answer in Spanish; otherwise answer in English." def choose_quiz_count(user_text: str, selector: str) -> int: if selector in {"3", "5", "7"}: return int(selector) t = user_text.lower() if any(k in t for k in ["mock test", "final exam", "exam practice", "full test"]): return 7 if any(k in t for k in ["detailed", "revision", "comprehensive", "study"]): return 5 return 3 def build_tutor_prompt(mode, language_mode, question, context, difficulty): styles = { "Explain": "Explain clearly like a friendly clinical tutor.", "Detailed": "Give a detailed structured explanation with clinical relevance.", "Short Notes": "Write concise exam-focused revision notes.", "Flashcards": "Create 6 flashcards in Q/A format.", "Case-Based": "Create a clinical case and guide the student using Socratic reasoning.", } return f""" You are BrainChat, an interactive neurology tutor for PMQSN. Core rules: - Use ONLY the provided context. - Always prioritize Professor Handouts first. - Use supporting textbooks only when Professor Handouts are insufficient. - If the answer is not supported by the context, say exactly: Not found in the course material. - Do not invent facts. - Do not invent references. - {language_instruction(language_mode)} Adaptive difficulty: - Current topic: {difficulty['topic']} - Current level: {difficulty['name']} - If the user asks a clinical question, guide step-by-step. - If the student seems unsure, use Socratic hints before giving the final answer. Teaching style: {styles.get(mode, "Explain clearly like a friendly clinical tutor.")} Context: {context} Student question: {question} """.strip() def build_quiz_generation_prompt(language_mode, topic, context, n_questions, difficulty): return f""" You are BrainChat, an interactive neurology tutor. Use ONLY the provided context. Prioritize Professor Handouts first. Create exactly {n_questions} quiz questions. Topic: {difficulty['topic']}. Difficulty: {difficulty['name']}. Return VALID JSON only. {language_instruction(language_mode)} Return JSON: {{"title":"short quiz title","questions":[{{"q":"question","answer_key":"expected answer"}}]}} Context: {context} Topic: {topic} """.strip() def build_quiz_eval_prompt(language_mode, quiz_data, user_answers): quiz_json = json.dumps(quiz_data, ensure_ascii=False) return f""" You are BrainChat, an interactive neurology tutor. Evaluate the student's answers fairly. If wrong, first explain the reasoning gap. Give constructive feedback. Return VALID JSON only: {{"score_obtained":0,"score_total":0,"summary":"overall feedback","results":[{{"question":"question text","answer_key":"expected answer","student_answer":"student answer","result":"Correct / Partially Correct / Incorrect","feedback":"Socratic feedback and correction"}}],"improvement_tip":"short study suggestion"}} Quiz: {quiz_json} Student answers: {user_answers} Language: {language_instruction(language_mode)} """.strip() # ===================================================== # OPENAI # ===================================================== def oai_text(prompt): ensure_loaded() resp = CLIENT.chat.completions.create( model=OPENAI_MODEL, temperature=0.2, messages=[ {"role": "system", "content": "You are BrainChat, a careful educational assistant for neurology students."}, {"role": "user", "content": prompt}, ], ) return resp.choices[0].message.content.strip() def oai_json(prompt): ensure_loaded() resp = CLIENT.chat.completions.create( model=OPENAI_MODEL, temperature=0.2, response_format={"type": "json_object"}, messages=[ {"role": "system", "content": "Return only valid JSON."}, {"role": "user", "content": prompt}, ], ) return json.loads(resp.choices[0].message.content) # ===================================================== # UI HELPERS # ===================================================== def get_logo_data_uri(): if not os.path.exists(LOGO_FILE): return None mime_type, _ = mimetypes.guess_type(LOGO_FILE) if not mime_type: mime_type = "image/png" with open(LOGO_FILE, "rb") as f: encoded = base64.b64encode(f.read()).decode("utf-8") return f"data:{mime_type};base64,{encoded}" def render_logo(): data_uri = get_logo_data_uri() if data_uri: return f'BrainChat logo' return '
BRAIN
CHAT
' def format_text(text): safe = text.replace("&", "&").replace("<", "<").replace(">", ">") safe = re.sub(r"\*\*(.+?)\*\*", r"\1", safe) return safe.replace("\n", "
") def render_chat(history, thinking=False): if not history: return """

Welcome to BrainChat

I am your AI tutor for Neurology and PMQSN. Ask a question, practise a clinical case, or generate a quiz.

I first use Professor Handouts, then supporting textbooks if needed.

""" rows = [] for item in history: role = item["role"] content = format_text(item["content"]) confidence_block = item.get("confidence_html", "") if role == "user": rows.append(f'
{content}
') else: rows.append(f'
{confidence_block}{content}
') if thinking: rows.append( '
🔵 BrainChat is thinking...
' ) return f"""
{''.join(rows)}
""" # ===================================================== # MAIN CHAT LOGIC # ===================================================== def respond(user_msg, history, student_id, mode, language_mode, quiz_count_mode, show_sources, quiz_state): history = history or [] quiz_state = quiz_state or {"active": False, "quiz_data": None, "language_mode": "Auto", "topic": "General Neurology"} text = (user_msg or "").strip() sid = normalize_student_id(student_id) if not text: yield "", history, render_chat(history), quiz_state, render_dashboard(sid), gr.update(value=None, visible=False) return history = history + [{"role": "user", "content": text}] yield "", history, render_chat(history, thinking=True), quiz_state, render_dashboard(sid), gr.update(value=None, visible=False) try: topic = detect_topic(text) difficulty = estimate_difficulty(text, mode, topic) if is_general_chat(text): conf = {"level": "green", "label": "Ready", "score": 1.0} reply = general_chat_reply(text, language_mode) log_event(sid, "general_chat", mode, language_mode, "green", 1.0, text, topic, difficulty["name"], reply) history.append({"role": "assistant", "content": reply, "confidence_html": confidence_html(conf)}) yield "", history, render_chat(history), quiz_state, render_dashboard(sid), gr.update(value=None, visible=False) return if quiz_state.get("active", False): evaluation = oai_json( build_quiz_eval_prompt( quiz_state.get("language_mode", language_mode), quiz_state.get("quiz_data", {}), text ) ) lines = [f"**Score:** {evaluation.get('score_obtained', 0)}/{evaluation.get('score_total', 0)}"] if evaluation.get("summary"): lines.append(f"\n**Overall feedback:** {evaluation['summary']}") if evaluation.get("improvement_tip"): lines.append(f"\n**Study tip:** {evaluation['improvement_tip']}") results = evaluation.get("results", []) if results: lines.append("\n**Question-wise feedback:**") for item in results: lines += [ "", f"**Q:** {item.get('question','')}", f"**Your answer:** {item.get('student_answer','')}", f"**Expected answer:** {item.get('answer_key','')}", f"**Result:** {item.get('result','')}", f"**Feedback:** {item.get('feedback','')}", ] quiz_topic = quiz_state.get("topic", topic) conf = {"level": "green", "label": "Quiz evaluated", "score": 1.0} update_topic_stats(sid, quiz_topic, "green") new_badges = award_badges(sid, quiz_topic) if new_badges: lines.append("\n**New badge earned:** " + ", ".join([f"🏅 {b}" for b in new_badges])) final_feedback = "\n".join(lines).strip() log_event(sid, "quiz_evaluated", mode, language_mode, "green", 1.0, text, quiz_topic, difficulty["name"], final_feedback) history.append({"role": "assistant", "content": final_feedback, "confidence_html": confidence_html(conf)}) quiz_state = {"active": False, "quiz_data": None, "language_mode": language_mode, "topic": quiz_topic} yield "", history, render_chat(history), quiz_state, render_dashboard(sid), gr.update(value=None, visible=False) return records = search_hybrid(text, shortlist_k=40, final_k=6) context = build_context(records) if mode == "Quiz Me": n_questions = choose_quiz_count(text, quiz_count_mode) quiz_data = oai_json(build_quiz_generation_prompt(language_mode, text, context, n_questions, difficulty)) conf = compute_confidence(records, "quiz generated") lines = [ f"**{quiz_data.get('title', 'Quiz')}**", f"\n**Topic:** {topic}", f"**Difficulty:** {difficulty['name']}", f"\n**Total questions:** {len(quiz_data.get('questions', []))}\n", "Reply in one message using numbered answers.", "Example: 1. ... 2. ...\n", ] for i, q in enumerate(quiz_data.get("questions", []), start=1): lines.append(f"**Q{i}.** {q.get('q','')}") if show_sources and conf["level"] != "red": lines.append("\n\n**References used to create this quiz:**") lines.append(make_sources(records)) update_topic_stats(sid, topic, conf["level"]) new_badges = award_badges(sid, topic) if new_badges: lines.append("\n**New badge earned:** " + ", ".join([f"🏅 {b}" for b in new_badges])) quiz_output = "\n".join(lines).strip() log_event(sid, "quiz_generated", mode, language_mode, conf["level"], conf["score"], text, topic, difficulty["name"], quiz_output) history.append({"role": "assistant", "content": quiz_output, "confidence_html": confidence_html(conf)}) quiz_state = {"active": True, "quiz_data": quiz_data, "language_mode": language_mode, "topic": topic} yield "", history, render_chat(history), quiz_state, render_dashboard(sid), gr.update(value=None, visible=False) return answer = oai_text(build_tutor_prompt(mode, language_mode, text, context, difficulty)) conf = compute_confidence(records, answer) if conf["level"] == "red": final_answer = "Not found in the course material." if language_mode == "English" else "No encontrado en el material del curso." else: final_answer = answer.strip() + f"\n\n**Topic:** {topic}\n**Difficulty:** {difficulty['name']}" if show_sources: final_answer += "\n\n**References used:**\n" + make_sources(records) update_topic_stats(sid, topic, conf["level"]) new_badges = award_badges(sid, topic) revision_tip = spaced_repetition_tip(sid, topic) if new_badges: final_answer += "\n\n**New badge earned:** " + ", ".join([f"🏅 {b}" for b in new_badges]) if revision_tip: final_answer += revision_tip log_event(sid, "answer", mode, language_mode, conf["level"], conf["score"], text, topic, difficulty["name"], final_answer) history.append({"role": "assistant", "content": final_answer.strip(), "confidence_html": confidence_html(conf)}) yield "", history, render_chat(history), quiz_state, render_dashboard(sid), gr.update(value=None, visible=False) except Exception as e: error_msg = f"Error: {str(e)}" history.append({"role": "assistant", "content": error_msg}) quiz_state = {"active": False, "quiz_data": None, "language_mode": language_mode, "topic": "General Neurology"} yield "", history, render_chat(history), quiz_state, render_dashboard(sid), gr.update(value=None, visible=False) def clear_all(student_id): empty_history = [] empty_quiz = {"active": False, "quiz_data": None, "language_mode": "Auto", "topic": "General Neurology"} return "", empty_history, render_chat(empty_history), empty_quiz, render_dashboard(student_id), gr.update(value=None, visible=False) # ===================================================== # CSS — CLEAN FINAL VERSION # ===================================================== CSS = """ :root{ --page-bg:#d9d9dd; --uva-purple:#5a2d77; --uva-gold:#c7a008; --uva-gold-soft:#fff2a8; --uva-soft:#f3e9f8; } html, body, .gradio-container{ background:var(--page-bg)!important; font-family:Arial, Helvetica, sans-serif!important; color:#000000!important; } footer{display:none!important;} #bc_app{ max-width:1100px; margin:18px auto; } #bc_app, #bc_app *{ box-sizing:border-box; } /* Header */ .bc-top-header{ background:#ffffff!important; border-radius:22px; padding:22px 24px; margin-bottom:18px; display:flex; align-items:center; gap:18px; box-shadow:0 12px 28px rgba(0,0,0,0.18); border-top:8px solid var(--uva-purple)!important; } .bc-header-logo{ width:78px; height:78px; min-width:78px; border-radius:999px; background:var(--uva-gold); display:flex; align-items:center; justify-content:center; } .bc-logo-img{ width:62px; height:62px; object-fit:contain; } .bc-logo-fallback{ width:62px; height:62px; display:flex; align-items:center; justify-content:center; font-weight:900; } .bc-top-header h1{ margin:0!important; color:var(--uva-purple)!important; font-size:32px!important; font-weight:900!important; } .bc-top-header p{ margin:4px 0 0!important; color:#000000!important; font-size:15px!important; font-weight:800!important; } /* Settings */ .bc-settings{ background:#252528!important; color:#ffffff!important; border-radius:18px!important; padding:20px!important; box-shadow:0 6px 18px rgba(0,0,0,0.12)!important; border-left:7px solid var(--uva-purple)!important; margin-bottom:16px!important; } .bc-settings label, .bc-settings label *, .bc-settings span, .bc-settings p{ color:#ffffff!important; font-weight:900!important; } .bc-settings input, .bc-settings textarea, .bc-settings select{ background:#2f2f34!important; color:#ffffff!important; border:1px solid #777777!important; border-radius:10px!important; font-weight:800!important; } .bc-settings input::placeholder, .bc-settings textarea::placeholder{ color:#dddddd!important; } .bc-settings input[type="checkbox"]{ accent-color:#ff7a1a!important; } /* Gradio dropdown popover */ .gradio-container [role="listbox"], .gradio-container [role="menu"]{ background:#ffffff!important; color:#000000!important; border:2px solid var(--uva-purple)!important; } .gradio-container [role="option"], .gradio-container [role="option"] *, .gradio-container [role="menuitem"], .gradio-container [role="menuitem"] *{ background:#ffffff!important; color:#000000!important; font-weight:800!important; } .gradio-container [role="option"]:hover, .gradio-container [role="option"][aria-selected="true"]{ background:var(--uva-soft)!important; color:#000000!important; } /* Instruction card */ .bc-howto{ background:var(--uva-soft)!important; border-left:7px solid var(--uva-gold)!important; border-radius:16px; padding:16px; color:#000000!important; font-size:15px!important; line-height:1.6!important; font-weight:800!important; margin-top:12px; } .bc-howto *{ color:#000000!important; font-weight:800!important; } .bc-howto strong{ color:var(--uva-purple)!important; font-weight:900!important; } /* Chat */ .bc-phone{ background:#ffffff!important; border-radius:22px; padding:16px; box-shadow:0 12px 28px rgba(0,0,0,0.18); border-top:6px solid var(--uva-purple)!important; margin-bottom:16px; } .bc-chat-shell{ background:#ffffff!important; border-radius:18px; border:2px solid #e0d5ea; padding:18px; min-height:480px; } .bc-chat-wrap{ display:flex; flex-direction:column; gap:14px; max-height:480px; overflow-y:auto; } .bc-row{display:flex;width:100%;} .bc-user-row{justify-content:flex-start;} .bc-bot-row{justify-content:flex-end;} .bc-bubble{ max-width:82%; padding:16px 18px; border-radius:22px; line-height:1.65; font-size:15px; box-shadow:0 8px 18px rgba(0,0,0,0.10); word-wrap:break-word; font-weight:800!important; color:#000000!important; } .bc-bubble *{ color:#000000!important; font-weight:800!important; } .bc-user-bubble{ background:#eadcf3!important; border:2px solid #9b73b5!important; border-bottom-left-radius:8px; } .bc-bot-bubble{ background:var(--uva-gold-soft)!important; border:3px solid var(--uva-gold)!important; border-bottom-right-radius:8px; } .bc-thinking{ background:#eef4ff!important; border:2px solid var(--uva-purple)!important; } .dots{animation:blink 1.2s infinite;} @keyframes blink{0%,100%{opacity:.2}50%{opacity:1}} .bc-confidence{ display:flex; align-items:center; gap:8px; margin-bottom:10px; padding:8px 12px; background:#ffffff!important; border-radius:999px; font-size:13px; border:2px solid #d0b8df; font-weight:900!important; } .bc-dot{ width:15px; height:15px; border-radius:999px; display:inline-block; } /* Welcome card */ .bc-empty{ display:flex; justify-content:center; align-items:center; min-height:430px; } .bc-welcome-card{ max-width:560px; background:#ffffff!important; border-radius:22px; padding:28px; text-align:center; box-shadow:0 10px 24px rgba(0,0,0,0.18); border:2px solid #9b73b5; } .bc-welcome-card h2{ color:var(--uva-purple)!important; font-size:28px!important; font-weight:900!important; } .bc-welcome-card p{ color:#000000!important; font-size:16px!important; line-height:1.7!important; font-weight:800!important; } .bc-source-note{ background:var(--uva-gold-soft)!important; border-left:7px solid var(--uva-gold); padding:12px; border-radius:12px; } /* Input bar */ .bc-input-bar{ margin-top:12px; background:#ffffff!important; border:2px solid var(--uva-purple); border-radius:999px; padding:6px; display:flex; align-items:center; gap:10px; } .bc-plus{ width:38px; height:38px; min-width:38px; border-radius:999px; background:var(--uva-gold)!important; display:flex; align-items:center; justify-content:center; font-size:30px; font-weight:900; color:#ffffff!important; } #bc_msg textarea{ background:#ffffff!important; border:none!important; border-radius:999px!important; color:#000000!important; font-weight:800!important; } #bc_msg textarea::placeholder{ color:#555555!important; } #bc_send button{ min-width:48px!important; height:42px!important; border-radius:999px!important; background:var(--uva-purple)!important; color:#ffffff!important; font-size:20px!important; font-weight:900!important; } /* Buttons */ #bc_app button{ font-weight:900!important; } /* File */ #bc_app .gr-file{ background:#ffffff!important; border-radius:16px!important; border:2px solid var(--uva-purple)!important; color:#000000!important; padding:12px!important; margin-bottom:16px!important; } #bc_app .gr-file *{ color:#000000!important; font-weight:800!important; } /* Dashboard */ .bc-dashboard{ background:#ffffff!important; border-radius:22px; padding:22px; box-shadow:0 12px 28px rgba(0,0,0,0.18); border-top:8px solid var(--uva-purple)!important; color:#000000!important; } .bc-dashboard h3, .bc-dashboard h4{ color:var(--uva-purple)!important; font-weight:900!important; } .bc-dashboard p, .bc-dashboard td, .bc-dashboard span, .bc-dashboard div{ color:#000000!important; font-weight:800!important; } .bc-dashboard-grid{ display:grid; grid-template-columns:2fr 1fr; gap:20px; align-items:start; } .bc-dashboard-help{ background:var(--uva-soft)!important; border-left:7px solid var(--uva-gold)!important; border-radius:16px; padding:16px; } .bc-metrics{ display:grid; grid-template-columns:repeat(3,1fr); gap:14px; margin:16px 0; } .bc-card{ border-radius:16px; padding:16px; text-align:center; font-weight:900!important; border:2px solid #9b73b5; } .bc-card strong{ display:block; font-size:30px!important; color:var(--uva-purple)!important; font-weight:900!important; } .bc-card.total{background:#e8d8f0!important;} .bc-card.green{background:#bdf3cf!important;border-color:#16a34a!important;} .bc-card.orange{background:#ffd99b!important;border-color:#f97316!important;} .bc-card.red{background:#ffb9b9!important;border-color:#dc2626!important;} .bc-card.quiz{background:#e4d5ff!important;border-color:#7c3aed!important;} .bc-card.avg{background:var(--uva-gold-soft)!important;border-color:var(--uva-gold)!important;} .bc-chart-row{margin:10px 0;} .bc-chart-label{font-size:13px;margin-bottom:4px;} .bc-bar-bg{height:16px;background:#eeeeee;border-radius:999px;overflow:hidden;border:1px solid #bbbbbb;} .bc-bar{height:100%;border-radius:999px;} .bc-bar.green{background:#16a34a;} .bc-bar.orange{background:#f97316;} .bc-bar.red{background:#dc2626;} .bc-bar.weak{background:var(--uva-purple);} .bc-table{ width:100%; border-collapse:collapse; font-size:13px; margin-top:12px; background:#ffffff!important; } .bc-table th{ background:var(--uva-purple)!important; color:#ffffff!important; padding:10px; border:1px solid #dddddd; font-weight:900!important; } .bc-table td{ border:1px solid #999999; padding:9px; color:#000000!important; font-weight:800!important; background:#ffffff!important; } .bc-table tr:nth-child(even) td{ background:#f8f0fb!important; } .bc-pill{ padding:5px 10px; border-radius:999px; font-weight:900!important; color:#000000!important; } .bc-green{background:#86efac!important;} .bc-orange{background:#fdba74!important;} .bc-red{background:#fca5a5!important;} .bc-badge{ display:inline-block; background:var(--uva-gold-soft)!important; border:2px solid var(--uva-gold); padding:7px 12px; border-radius:999px; margin:4px; font-weight:900!important; color:#000000!important; } /* Final dashboard help text fix */ .bc-dashboard-help, .bc-dashboard-help p, .bc-dashboard-help div{ color:#000000!important; font-weight:800!important; } .bc-dashboard-help strong{ color:#5a2d77!important; font-weight:900!important; } /* Mobile */ @media(max-width:768px){ #bc_app{margin:8px auto;max-width:98vw;} .bc-top-header{flex-direction:column;text-align:center;padding:16px;} .bc-settings{padding:14px!important;} .bc-chat-shell{min-height:420px;} .bc-chat-wrap{max-height:420px;} .bc-bubble{max-width:94%;font-size:14px;} .bc-input-bar{border-radius:18px;} .bc-metrics{grid-template-columns:1fr;} .bc-dashboard-grid{grid-template-columns:1fr;} .bc-table{font-size:11px;} .bc-table th,.bc-table td{padding:6px;} } """ # ===================================================== # GRADIO UI # ===================================================== load_persistent_data() with gr.Blocks(css=CSS) as demo: history_state = gr.State([]) quiz_state = gr.State({"active": False, "quiz_data": None, "language_mode": "Auto", "topic": "General Neurology"}) with gr.Column(elem_id="bc_app"): gr.HTML(f"""

BrainChat

AI-powered Neurology Tutor for PMQSN

""") with gr.Group(elem_classes="bc-settings"): student_id = gr.Textbox(label="Student ID / Name", value="Guest", placeholder="Enter student name or ID") with gr.Row(): mode = gr.Dropdown( choices=["Explain", "Detailed", "Short Notes", "Flashcards", "Case-Based", "Quiz Me"], value="Explain", label="Tutor Mode" ) language_mode = gr.Dropdown( choices=["Auto", "Spanish", "English", "Bilingual"], value="Spanish", label="Answer Language" ) with gr.Row(): quiz_count_mode = gr.Dropdown( choices=["Auto", "3", "5", "7"], value="Auto", label="Quiz Questions" ) show_sources = gr.Checkbox(value=True, label="Show References") gr.HTML("""
Welcome to BrainChat
BrainChat first searches Professor Handouts, then supporting textbooks if needed.

Learning features: typing animation, adaptive difficulty, Socratic feedback, weak-topic tracking, per-student badges, spaced revision, downloadable Excel report, responsive UI, and chart-based analytics dashboard.
""") with gr.Group(elem_classes="bc-phone"): chat_html = gr.HTML(f'
{render_chat([])}
') with gr.Row(elem_classes="bc-input-bar"): gr.HTML('
+
') msg = gr.Textbox( placeholder="Type a message...", show_label=False, container=False, scale=8, elem_id="bc_msg" ) send_btn = gr.Button("➤", elem_id="bc_send", scale=1) with gr.Row(): clear_btn = gr.Button("Clear Chat") refresh_btn = gr.Button("Refresh Dashboard") clear_analytics_btn = gr.Button("Clear Analytics") download_btn = gr.Button("Download Excel Report") download_file = gr.File(label="Download Excel Report", visible=False) dashboard_html = gr.HTML(render_dashboard("Guest")) msg.submit( respond, inputs=[msg, history_state, student_id, mode, language_mode, quiz_count_mode, show_sources, quiz_state], outputs=[msg, history_state, chat_html, quiz_state, dashboard_html, download_file] ) send_btn.click( respond, inputs=[msg, history_state, student_id, mode, language_mode, quiz_count_mode, show_sources, quiz_state], outputs=[msg, history_state, chat_html, quiz_state, dashboard_html, download_file] ) clear_btn.click( clear_all, inputs=[student_id], outputs=[msg, history_state, chat_html, quiz_state, dashboard_html, download_file], queue=False ) refresh_btn.click( refresh_dashboard, inputs=[student_id], outputs=[dashboard_html], queue=False ) clear_analytics_btn.click( clear_analytics, inputs=[], outputs=[dashboard_html], queue=False ) download_btn.click( export_interactions, inputs=[student_id], outputs=[download_file], queue=False ) if __name__ == "__main__": demo.queue() demo.launch()