File size: 8,664 Bytes
027123c 7da6db5 027123c 7da6db5 027123c | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 | """Chat endpoint with streaming support."""
import asyncio
import uuid
from fastapi import APIRouter, Depends, HTTPException
from sqlalchemy.ext.asyncio import AsyncSession
from src.db.postgres.connection import get_db
from src.db.postgres.models import ChatMessage, MessageSource
from src.agents.orchestration import orchestrator
from src.agents.chatbot import chatbot
from src.rag.retriever import retriever
from src.db.redis.connection import get_redis
from src.config.settings import settings
from src.middlewares.logging import get_logger, log_execution
from sse_starlette.sse import EventSourceResponse
from langchain_core.messages import HumanMessage, AIMessage
from sqlalchemy import select
from pydantic import BaseModel
from typing import List, Dict, Any, Optional
import json
_GREETINGS = frozenset(["hi", "hello", "hey", "halo", "hai", "hei"])
_GOODBYES = frozenset(["bye", "goodbye", "thanks", "thank you", "terima kasih", "sampai jumpa"])
def _fast_intent(message: str) -> Optional[dict]:
"""Bypass LLM orchestrator for obvious greetings and farewells."""
lower = message.lower().strip().rstrip("!.,?")
if lower in _GREETINGS:
return {"intent": "greeting", "needs_search": False,
"direct_response": "Hello! How can I assist you today?", "search_query": ""}
if lower in _GOODBYES:
return {"intent": "goodbye", "needs_search": False,
"direct_response": "Goodbye! Have a great day!", "search_query": ""}
return None
logger = get_logger("chat_api")
router = APIRouter(prefix="/api/v1", tags=["Chat"])
class ChatRequest(BaseModel):
user_id: str
room_id: str
message: str
def _format_context(results: List[Dict[str, Any]]) -> str:
"""Format retrieval results as context string for the LLM."""
lines = []
for result in results:
filename = result["metadata"].get("filename", "Unknown")
page = result["metadata"].get("page_label")
source_label = f"{filename}, p.{page}" if page else filename
lines.append(f"[Source: {source_label}]\n{result['content']}\n")
return "\n".join(lines)
def _extract_sources(results: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""Extract deduplicated source references from retrieval results."""
seen = set()
sources = []
for result in results:
meta = result["metadata"]
key = (meta.get("document_id"), meta.get("page_label"))
if key not in seen:
seen.add(key)
sources.append({
"document_id": meta.get("data", {}).get("document_id"),
"filename": meta.get("data", {}).get("filename", "Unknown"),
"page_label": meta.get("data", {}).get("page_label", "Unknown"),
})
logger.debug(f"Extracted sources: {sources}")
return sources
async def get_cached_response(redis, cache_key: str) -> Optional[str]:
cached = await redis.get(cache_key)
if cached:
return json.loads(cached)
return None
async def cache_response(redis, cache_key: str, response: str):
await redis.setex(cache_key, 86400, json.dumps(response))
async def load_history(db: AsyncSession, room_id: str, limit: int = 10) -> list:
"""Load recent chat messages for a room as LangChain message objects (oldest-first)."""
result = await db.execute(
select(ChatMessage)
.where(ChatMessage.room_id == room_id)
.order_by(ChatMessage.created_at.asc())
.limit(limit)
)
rows = result.scalars().all()
return [
HumanMessage(content=row.content) if row.role == "user" else AIMessage(content=row.content)
for row in rows
]
async def save_messages(
db: AsyncSession,
room_id: str,
user_content: str,
assistant_content: str,
sources: Optional[List[Dict[str, Any]]] = None,
):
"""Persist user and assistant messages, and attach sources to the assistant message."""
db.add(ChatMessage(id=str(uuid.uuid4()), room_id=room_id, role="user", content=user_content))
assistant_id = str(uuid.uuid4())
db.add(ChatMessage(id=assistant_id, room_id=room_id, role="assistant", content=assistant_content))
for src in (sources or []):
page = src.get("page_label")
db.add(MessageSource(
id=str(uuid.uuid4()),
message_id=assistant_id,
document_id=src.get("document_id"),
filename=src.get("filename"),
page_label=str(page) if page is not None else None,
))
await db.commit()
@router.post("/chat/stream")
@log_execution(logger)
async def chat_stream(request: ChatRequest, db: AsyncSession = Depends(get_db)):
"""Chat endpoint with streaming response.
SSE event sequence:
1. sources — JSON array of {document_id, filename, page_label}
2. chunk — text fragments of the answer
3. done — signals end of stream
"""
redis = await get_redis()
cache_key = f"{settings.redis_prefix}chat:{request.room_id}:{request.message}"
cached = await get_cached_response(redis, cache_key)
if cached:
logger.info("Returning cached response")
async def stream_cached():
yield {"event": "sources", "data": json.dumps([])}
for i in range(0, len(cached), 50):
yield {"event": "chunk", "data": cached[i:i + 50]}
yield {"event": "done", "data": ""}
return EventSourceResponse(stream_cached())
try:
# Step 1: Fast local intent check (skips LLM for greetings/farewells)
intent_result = _fast_intent(request.message)
context = ""
sources: List[Dict[str, Any]] = []
if intent_result is None:
# Step 2: Launch retrieval and history loading in parallel, then run orchestrator
retrieval_task = asyncio.create_task(
retriever.retrieve(request.message, request.user_id, db)
)
history_task = asyncio.create_task(
load_history(db, request.room_id, limit=6) # 6 msgs (3 pairs) for orchestrator
)
history = await history_task # fast DB query (<100ms), done before orchestrator finishes
intent_result = await orchestrator.analyze_message(request.message, history)
if not intent_result.get("needs_search"):
retrieval_task.cancel()
raw_results = []
else:
search_query = intent_result.get("search_query", request.message)
logger.info(f"Searching for: {search_query}")
if search_query != request.message:
retrieval_task.cancel()
raw_results = await retriever.retrieve(
query=search_query,
user_id=request.user_id,
db=db,
)
else:
raw_results = await retrieval_task
context = _format_context(raw_results)
sources = _extract_sources(raw_results)
# Step 3: Direct response for greetings / non-document intents
if intent_result.get("direct_response"):
response = intent_result["direct_response"]
await cache_response(redis, cache_key, response)
await save_messages(db, request.room_id, request.message, response, sources=[])
async def stream_direct():
yield {"event": "sources", "data": json.dumps([])}
yield {"event": "message", "data": response}
return EventSourceResponse(stream_direct())
# Step 4: Stream answer token-by-token as LLM generates it
# Load full history (10 msgs) for chatbot — richer context than the 6 used by orchestrator
full_history = await load_history(db, request.room_id, limit=10)
messages = full_history + [HumanMessage(content=request.message)]
async def stream_response():
full_response = ""
yield {"event": "sources", "data": json.dumps(sources)}
async for token in chatbot.astream_response(messages, context):
full_response += token
yield {"event": "chunk", "data": token}
yield {"event": "done", "data": ""}
await cache_response(redis, cache_key, full_response)
await save_messages(db, request.room_id, request.message, full_response, sources=sources)
return EventSourceResponse(stream_response())
except Exception as e:
logger.error("Chat failed", error=str(e))
raise HTTPException(status_code=500, detail=f"Chat failed: {str(e)}")
|