code-whisperer-haven / rosalinda.html
Abmacode12's picture
<!doctype html>
d225191 verified
<!DOCTYPE html>
<html lang="fr">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Rosalinda — Espace Codage</title>
<link rel="stylesheet" href="style.css">
<script src="https://cdn.tailwindcss.com"></script>
<script src="https://unpkg.com/feather-icons"></script>
</head>
<body class="bg-gray-900 text-gray-100 min-h-screen">
<div class="container mx-auto p-4 max-w-4xl">
<div class="flex justify-between items-center mb-4">
<div class="px-4 py-2 bg-gray-800 rounded-full border border-gray-700">
<span class="font-bold">Rosalinda</span> — Espace Codage
</div>
<div class="px-4 py-2 bg-gray-800 rounded-full border border-gray-700" id="status">
Micro: prêt
</div>
</div>
<div class="bg-gray-800 rounded-xl border border-gray-700 overflow-hidden">
<div class="h-[60vh] overflow-y-auto p-4 flex flex-col gap-3" id="msgs"></div>
<div class="p-3 border-t border-gray-700 bg-gray-900/50 flex items-center gap-2">
<button id="micBtn" class="p-2 rounded-lg hover:bg-gray-700">
<i data-feather="mic"></i>
</button>
<input id="inp" type="text" placeholder="Écris à Rosalinda…"
class="flex-1 bg-gray-700 rounded-lg px-4 py-2 focus:outline-none focus:ring-2 focus:ring-blue-500">
<button id="sendBtn" class="p-2 rounded-lg bg-blue-600 hover:bg-blue-500 text-white">
<i data-feather="send"></i>
</button>
<button id="speakBtn" title="Lire la dernière réponse" class="p-2 rounded-lg hover:bg-gray-700">
<i data-feather="volume-2"></i>
</button>
</div>
</div>
<div class="text-sm text-gray-400 mt-4">
✅ Micro & voix = API du navigateur (Chrome/Edge).<br>
⚠️ Cette version répond avec une "IA locale simple". Prochaine étape : brancher un vrai modèle IA.
</div>
</div>
<script>
feather.replace();
const msgs = document.getElementById("msgs");
const inp = document.getElementById("inp");
const sendBtn = document.getElementById("sendBtn");
const micBtn = document.getElementById("micBtn");
const speakBtn = document.getElementById("speakBtn");
const statusEl = document.getElementById("status");
let lastAIText = "";
function addMsg(text, who) {
const msgDiv = document.createElement("div");
msgDiv.className = `p-3 rounded-lg max-w-[80%] ${who === "me" ? "ml-auto bg-blue-900/30" : "mr-auto bg-gray-700"}`;
msgDiv.textContent = text;
msgs.appendChild(msgDiv);
msgs.scrollTop = msgs.scrollHeight;
}
function rosalindaBrain(userText) {
const t = userText.toLowerCase();
if (t.includes("bonjour") || t.includes("salut")) return "Bonjour 😄 Je suis Rosalinda. Dis-moi ce que tu veux créer : site, thème, image, vidéo, plugin…";
if (t.includes("theme") || t.includes("thème")) return "Ok ✅ Dis-moi : (1) style (moderne, luxe, minimal, flashy), (2) couleurs, (3) 3 colonnes ou non, (4) Shopify/WooCommerce/autre.";
if (t.includes("image")) return "Je peux préparer une demande d'image. Dis-moi : sujet + style + format (1:1, 16:9, 9:16) + texte à afficher.";
if (t.includes("vidéo") || t.includes("video")) return "Je peux préparer une demande vidéo. Dis-moi : durée, style (réaliste/3D), texte à l'écran, musique oui/non.";
if (t.includes("micro")) return "Pour le micro : clique 🎤, autorise le micro dans ton navigateur, puis parle. Je transcris et je réponds.";
return "Compris ✅ Donne-moi plus de détails (objectif + plateforme + style), et je te génère une réponse claire.";
}
function speak(text) {
if (!("speechSynthesis" in window)) {
alert("TTS non supporté sur ce navigateur.");
return;
}
const utterance = new SpeechSynthesisUtterance(text);
utterance.lang = "fr-FR";
window.speechSynthesis.cancel();
window.speechSynthesis.speak(utterance);
}
function handleSend(text) {
const inputText = (text ?? inp.value).trim();
if (!inputText) return;
addMsg(inputText, "me");
inp.value = "";
const reply = rosalindaBrain(inputText);
lastAIText = reply;
addMsg(reply, "ai");
speak(reply);
}
sendBtn.addEventListener("click", () => handleSend());
inp.addEventListener("keydown", (e) => {
if (e.key === "Enter") handleSend();
});
speakBtn.addEventListener("click", () => lastAIText && speak(lastAIText));
let recognition = null;
let isListening = false;
function setupSpeechRecognition() {
const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
if (!SpeechRecognition) return null;
const recognizer = new SpeechRecognition();
recognizer.lang = "fr-FR";
recognizer.interimResults = true;
recognizer.continuous = false;
return recognizer;
}
recognition = setupSpeechRecognition();
micBtn.addEventListener("click", async () => {
if (!recognition) {
alert("Reconnaissance vocale non disponible ici. Essaie Chrome/Edge.");
return;
}
if (isListening) return;
isListening = true;
statusEl.textContent = "Micro: écoute…";
let finalText = "";
recognition.onresult = (e) => {
let transcript = "";
for (let i = e.resultIndex; i < e.results.length; i++) {
transcript += e.results[i][0].transcript;
if (e.results[i].isFinal) finalText += e.results[i][0].transcript + " ";
}
inp.value = (finalText || transcript).trim();
};
recognition.onerror = () => {
isListening = false;
statusEl.textContent = "Micro: erreur";
};
recognition.onend = () => {
isListening = false;
statusEl.textContent = "Micro: prêt";
if (inp.value.trim()) handleSend(inp.value);
};
try {
recognition.start();
} catch (e) {
isListening = false;
statusEl.textContent = "Micro: prêt";
}
});
// Initial message
addMsg("Bonjour 👋 Je suis Rosalinda. Clique sur le micro pour parler ou écris-moi.", "ai");
</script>
</body>
</html>