ZoniaChatbot commited on
Commit
160bc7e
·
verified ·
1 Parent(s): f05f4e9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +369 -50
app.py CHANGED
@@ -1,64 +1,383 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
 
 
 
 
 
 
 
 
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
 
 
 
 
 
 
 
 
 
 
8
 
 
 
 
9
 
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
 
 
 
 
 
 
 
 
 
19
 
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
 
26
- messages.append({"role": "user", "content": message})
 
 
 
 
 
 
 
 
 
 
 
 
 
27
 
28
- response = ""
29
 
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
 
39
- response += token
40
- yield response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
 
42
 
43
  """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
- ],
60
- )
61
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
 
63
- if __name__ == "__main__":
64
- demo.launch()
 
1
  import gradio as gr
2
+ import time
3
+ import os
4
+ import glob
5
+ import torch
6
+ import gc
7
+ import re
8
+ import json
9
+ from pathlib import Path
10
+ from omegaconf import OmegaConf
11
 
12
+ from rag.constants.default_prompt import PROMT_USER_INTENT_TREE
13
+ from rag.reader.directory_reader import DirectoryReader
14
+ from rag.pipeline.milvus_bm25_retriever import HybridSearchRetrieverPipeline
15
+ from rag.pipeline.contextual_generator_qa import ContextualQuestionGeneratorPipeline
16
+ from rag.synthesizer.types import StreamingResponse
17
+ from rag.config.configuration import ConfigurationManager
18
+
19
+ torch.cuda.empty_cache()
20
+ gc.collect()
21
+
22
+ # === Cargar PDFs
23
+ pdf_files = glob.glob(os.path.abspath(os.curdir) + "/data/*.pdf")
24
+ reader = DirectoryReader(input_files=pdf_files)
25
+ documents = reader.load_data()
26
 
27
+ # === Configuración
28
+ config = OmegaConf.load(Path("configs/config.yaml"))
29
+ manager = ConfigurationManager(config)
30
 
31
+ # === Inicializar Pipelines
32
+ pipeline = HybridSearchRetrieverPipeline(
33
+ documents=documents,
34
+ splitter_config=manager.get_splitter_config(),
35
+ index_retriver_config=manager.get_index_retriever_config(),
36
+ embed_config=manager.get_embed_config(),
37
+ milvus_config=manager.get_db_config(),
38
+ rerank_config=manager.get_rerank_config(),
39
+ response_config=manager.get_response_config(),
40
+ llm_config=manager.get_llm_config()
41
+ )
42
+
43
+ qgen_pipeline = ContextualQuestionGeneratorPipeline(
44
+ llm_config=manager.get_llm_config(),
45
+ embed_config=manager.get_embed_config(),
46
+ splitter_config=manager.get_splitter_config(),
47
+ qgen_config=manager.get_question_gen_config(),
48
+ )
49
 
 
 
 
 
 
50
 
51
+ # === Intención básica
52
+ def handle_intent(etiqueta: str, motivo: str) -> str:
53
+ if etiqueta.lower() == "neutro":
54
+ if "saludo" in motivo.lower():
55
+ return "👋 ¡Hola! ¿En qué puedo ayudarte hoy?"
56
+ elif "despedida" in motivo.lower():
57
+ return "👋 ¡Hasta luego!"
58
+ elif "agradecimiento" in motivo.lower():
59
+ return "😊 ¡Con gusto!"
60
+ else:
61
+ return "🙂 Gracias por tu mensaje."
62
+ elif etiqueta.lower() == "negativo":
63
+ return "⚠️ No puedo responder a eso. Por favor, intenta con algo relacionado al estatuto."
64
+ return ""
65
 
 
66
 
67
+ DEV_MODE = False # Cambia a True si quieres ver rutas de archivo en las fuentes
 
 
 
 
 
 
 
68
 
69
+
70
+ def extract_sources(nodes):
71
+ fuentes = []
72
+ for i, node in enumerate(nodes):
73
+ meta = node.metadata or {}
74
+ fuente = {
75
+ "documento": meta.get("file_name", "Desconocido"),
76
+ "página": meta.get("num_page", "¿?"),
77
+ "autor": meta.get("Author", "Autor desconocido"),
78
+ "fecha": meta.get("creation_date", "Sin fecha"),
79
+ "ruta": meta.get("file_path", "No disponible")
80
+ }
81
+
82
+ fuente_md = (
83
+ f"**[{i + 1}] {fuente['documento']}**\n"
84
+ f"> 📄 Página: `{fuente['página']}`\n"
85
+ f"> ✍️ Autor: *{fuente['autor']}*\n"
86
+ f"> 🗓️ Fecha de creación: {fuente['fecha']}"
87
+ )
88
+
89
+ if DEV_MODE:
90
+ fuente_md += f"\n> 🧪 Ruta (debug): `{fuente['ruta']}`"
91
+
92
+ fuentes.append(fuente_md)
93
+
94
+ return "\n\n".join(fuentes) if fuentes else "⚠️ No se encontraron fuentes relevantes."
95
+
96
+
97
+ def detectar_intencion(query: str) -> str:
98
+ try:
99
+ result = pipeline.service_context.llm.predict(
100
+ prompt=PROMT_USER_INTENT_TREE,
101
+ query_str=query
102
+ )
103
+ cleaned = re.sub(r"```(?:json)?|```", "", result).strip()
104
+ parsed = json.loads(cleaned)
105
+ etiqueta = parsed.get("etiqueta", "").lower()
106
+ motivo = parsed.get("motivo", "").lower()
107
+ return handle_intent(etiqueta, motivo)
108
+ except Exception as e:
109
+ print("⚠️ Error al detectar intención:", e)
110
+ return ""
111
+
112
+
113
+ def bot(history, use_intent, use_subq):
114
+ user_msg = next(
115
+ (m["content"] for m in reversed(history) if m["role"] == "user" and isinstance(m["content"], str)),
116
+ None
117
+ )
118
+ if not user_msg:
119
+ return history, gr.update(visible=False), *[gr.update(visible=False)] * 10
120
+
121
+ try:
122
+ if use_intent:
123
+ intent_reply = detectar_intencion(user_msg)
124
+ if intent_reply:
125
+ history.append({"role": "assistant", "content": ""})
126
+ for char in intent_reply:
127
+ history[-1]["content"] += char
128
+ time.sleep(0.05)
129
+ yield history, gr.update(visible=False), *[gr.update(visible=False)] * 10
130
+ return
131
+ except Exception as e:
132
+ print("⚠️ Error interpretando intención:", e)
133
+
134
+ response, nodes = pipeline.main(user_msg)
135
+ history.append({"role": "assistant", "content": ""})
136
+
137
+ if isinstance(response, StreamingResponse):
138
+ for char in response.response_gen:
139
+ history[-1]["content"] += char
140
+ time.sleep(0.02)
141
+ yield history, gr.update(visible=False), *[gr.update(visible=False)] * 10
142
+
143
+ subquestions = qgen_pipeline.run(user_msg, nodes) if use_subq else []
144
+
145
+ fuentes_md = extract_sources(nodes)
146
+ history[-1]["sources"] = fuentes_md
147
+
148
+ subq_updates = [
149
+ gr.update(value=q, visible=True) if i < len(subquestions) else gr.update(visible=False)
150
+ for i, q in enumerate(subquestions + [""] * (10 - len(subquestions)))
151
+ ]
152
+
153
+ yield history, gr.update(value=f"📚 **Fuentes:**\n{fuentes_md}", visible=True), *subq_updates
154
+
155
+
156
+ def add_message(history, msg_text):
157
+ history.append({"role": "user", "content": msg_text})
158
+ return history, ""
159
+
160
+
161
+ # === Toggle panel visibility
162
+ def toggle_config(is_visible):
163
+ return not is_visible, gr.update(visible=not is_visible)
164
+
165
+
166
+ # === Estilo CSS
167
+ css = """
168
+ body {
169
+ background-color: #f8fafc;
170
+ font-family: 'Segoe UI', sans-serif;
171
+ }
172
+
173
+ .gr-chatbot {
174
+ background-color: #ffffff;
175
+ border: 1px solid #d1fae5;
176
+ border-radius: 12px;
177
+ padding: 10px;
178
+ box-shadow: 0 4px 12px rgba(0,0,0,0.03);
179
+ }
180
+
181
+ .gr-textbox {
182
+ border-radius: 10px !important;
183
+ border: 1px solid #a7f3d0;
184
+ padding: 10px;
185
+ flex-grow: 1;
186
+ }
187
+
188
+ .gr-button.enviar {
189
+ background-color: #2ecc71;
190
+ color: white;
191
+ font-weight: 600;
192
+ border-radius: 10px !important;
193
+ padding: 10px 18px;
194
+ margin-left: 8px;
195
+ transition: background-color 0.3s ease;
196
+ }
197
+ .gr-button.enviar:hover {
198
+ background-color: #27ae60;
199
+ }
200
+
201
+ .gr-markdown {
202
+ background-color: #ecfdf5;
203
+ border-left: 4px solid #10b981;
204
+ padding: 10px;
205
+ margin-top: 10px;
206
+ border-radius: 8px;
207
+ color: #064e3b;
208
+ font-size: 15px;
209
+ }
210
+
211
+ .gr-button.subq {
212
+ background-color: #d1fae5;
213
+ border: none;
214
+ padding: 8px 14px;
215
+ border-radius: 9999px;
216
+ cursor: pointer;
217
+ font-size: 14px;
218
+ margin: 4px 6px 0 0;
219
+ transition: background-color 0.3s, transform 0.1s;
220
+ box-shadow: 0 1px 2px rgba(0,0,0,0.05);
221
+ }
222
+ .gr-button.subq:hover {
223
+ background-color: #a7f3d0;
224
+ transform: scale(1.05);
225
+ }
226
+
227
+ #floating-config-wrapper {
228
+ position: fixed;
229
+ bottom: 20px;
230
+ right: 20px;
231
+ z-index: 9999;
232
+ display: inline-flex;
233
+ flex-direction: column;
234
+ align-items: flex-end;
235
+ width: auto; /* 👈 no ocupar ancho completo */
236
+ max-width: 250px; /* 👈 ancho controlado */
237
+ # pointer-events: none; /* 👈 deja pasar clics fuera del botón y panel */
238
+ }
239
+
240
+ #config-btn {
241
+ all: unset; /* 🚫 Quita todos los estilos por defecto */
242
+ display: flex;
243
+ justify-content: center;
244
+ align-items: center;
245
+ width: 40px;
246
+ height: 40px;
247
+ font-size: 22px;
248
+ background-color: transparent; /* 🟢 Sin fondo */
249
+ color: #2ecc71; /* 🟢 Verde icono */
250
+ border-radius: 50%;
251
+ cursor: pointer;
252
+ transition: background-color 0.2s ease;
253
+ }
254
+
255
+ #config-btn:hover {
256
+ background-color: #d1fae5; /* 🟢 Suave fondo al pasar el mouse */
257
+ }
258
+
259
+
260
+ #config-panel {
261
+ background-color: #ecfdf5;
262
+ border: 1px solid #a7f3d0;
263
+ border-radius: 10px;
264
+ padding: 12px;
265
+ min-width: 220px;
266
+ box-shadow: 0 6px 16px rgba(0,0,0,0.1);
267
+ font-size: 14px;
268
+ color: #064e3b;
269
+ }
270
+
271
+ #zonia-header h1 {
272
+ font-family: 'Segoe UI', sans-serif;
273
+ font-weight: bold;
274
+ }
275
+
276
+ #zonia-header p {
277
+ font-family: 'Segoe UI', sans-serif;
278
+ }
279
+
280
+ .gr-chatbot {
281
+ background: white;
282
+ border-radius: 16px;
283
+ border: 1px solid #d1fae5;
284
+ box-shadow: 0 4px 14px rgba(0,0,0,0.05);
285
+ padding: 12px;
286
+ margin-bottom: 12px;
287
+ }
288
+
289
+ .gr-chatbot .message.user {
290
+ background-color: #e0f2f1;
291
+ border-radius: 12px;
292
+ padding: 10px;
293
+ margin: 6px 0;
294
+ font-weight: 500;
295
+ color: #065f46;
296
+ }
297
+
298
+ .gr-chatbot .message.assistant {
299
+ background-color: #ecfdf5;
300
+ border-radius: 12px;
301
+ padding: 10px;
302
+ margin: 6px 0;
303
+ color: #065f46;
304
+ border-left: 4px solid #10b981;
305
+ }
306
+
307
+ .gr-chatbot .message {
308
+ font-family: 'Segoe UI', sans-serif;
309
+ font-size: 15px;
310
+ line-height: 1.6;
311
+ }
312
+
313
+ .gr-chatbot .avatar {
314
+ display: none; /* Oculta los avatares si los hay */
315
+ }
316
 
317
 
318
  """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
319
 
320
+ # === Interfaz
321
+ with gr.Blocks(css=css) as app:
322
+ gr.Markdown(
323
+ """
324
+ <div style='padding: 1rem 1.5rem; border-radius: 12px; background: #ecfdf5; border-left: 5px solid #10b981; margin-bottom: 1.5rem'>
325
+ <h1 style='margin: 0; font-size: 26px; color: #065f46;'>🤖 ZONIA</h1>
326
+ <p style='margin: 0.3rem 0 0; font-size: 15px; color: #065f46;'>Un asistente inteligente para consultar estatutos de posgrado y reglamentos estudiantiles. Esta interfaz es una prueba para evaluación de usuarios.</p>
327
+ </div>
328
+ """,
329
+ elem_id="zonia-header"
330
+ )
331
+
332
+ config_visible = gr.State(False)
333
+ toggle_intent = gr.Checkbox(visible=False, value=True, label="intent_flag")
334
+ toggle_subq = gr.Checkbox(visible=False, value=True, label="subq_flag")
335
+
336
+ chatbot = gr.Chatbot(label="Conversación", type="messages", show_copy_button=True)
337
+ state = gr.State([])
338
+
339
+ sources_md = gr.Markdown(visible=False)
340
+ subq_buttons = [gr.Button(visible=False, elem_classes="subq") for _ in range(10)]
341
+
342
+ # Input + enviar
343
+ with gr.Row():
344
+ msg = gr.Textbox(placeholder="Haz tu pregunta...")
345
+ enviar = gr.Button("Enviar", elem_classes="enviar")
346
+
347
+ # Botón de configuración y panel flotante
348
+ with gr.Group(elem_id="floating-config-wrapper"):
349
+ show_config_btn = gr.Button("⚙️", elem_id="config-btn")
350
+ with gr.Column(visible=False, elem_id="config-panel") as config_panel:
351
+ gr.Markdown("### ⚙️ Configuración")
352
+ intent_checkbox = gr.Checkbox(label="Activar intención", value=True)
353
+ subq_checkbox = gr.Checkbox(label="Activar subpreguntas", value=True)
354
+
355
+
356
+ # Función toggle y sync
357
+ def toggle_panel(is_visible): return not is_visible, gr.update(visible=not is_visible)
358
+
359
+
360
+ def sync_flags(i, s): return i, s
361
+
362
+
363
+ show_config_btn.click(toggle_panel, inputs=[config_visible], outputs=[config_visible, config_panel])
364
+ intent_checkbox.change(sync_flags, [intent_checkbox, subq_checkbox], [toggle_intent, toggle_subq])
365
+ subq_checkbox.change(sync_flags, [intent_checkbox, subq_checkbox], [toggle_intent, toggle_subq])
366
+
367
+ # Submit por enter
368
+ msg.submit(add_message, [state, msg], [state, msg]).then(
369
+ bot, [state, toggle_intent, toggle_subq], [chatbot, sources_md] + subq_buttons
370
+ )
371
+
372
+ # Submit por botón
373
+ enviar.click(add_message, [state, msg], [state, msg]).then(
374
+ bot, [state, toggle_intent, toggle_subq], [chatbot, sources_md] + subq_buttons
375
+ )
376
+
377
+ # Subpreguntas
378
+ for btn in subq_buttons:
379
+ btn.click(lambda txt, h: add_message(h, txt), [btn, state], [state, msg]).then(
380
+ bot, [state, toggle_intent, toggle_subq], [chatbot, sources_md] + subq_buttons
381
+ )
382
 
383
+ app.launch(True)