chuckfinca Claude Opus 4.6 (1M context) commited on
Commit
c02b92b
·
1 Parent(s): cfc5e63

Add HF trace storage, admin controls, and Spaces deployment

Browse files

- Upload traces as JSON to a HF dataset repo after each question
- Add Traces tab with session-scoped view (admin URL param for full history)
- Replace Gradio login with inline token field
- Decouple chat history from agent loop (display only)
- Add huggingface-hub dependency and requirements.txt for HF Spaces
- Add .claudeignore to exclude .env files

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>

Files changed (7) hide show
  1. .claudeignore +3 -0
  2. .env.example +5 -0
  3. README.md +1 -0
  4. app.py +212 -87
  5. pyproject.toml +1 -0
  6. requirements.txt +269 -0
  7. uv.lock +2 -0
.claudeignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ .env
2
+ .env.local
3
+ .env.*.local
.env.example CHANGED
@@ -11,4 +11,9 @@ E2B_API_KEY=your-e2b-api-key
11
 
12
  # Web app authentication
13
  LH_ACCESS_TOKEN=your-secret-token
 
14
  # LH_MAX_SESSION_COST=0.50
 
 
 
 
 
11
 
12
  # Web app authentication
13
  LH_ACCESS_TOKEN=your-secret-token
14
+ # LH_ADMIN_TOKEN=your-admin-token
15
  # LH_MAX_SESSION_COST=0.50
16
+
17
+ # Hugging Face trace storage (optional)
18
+ # HF_TOKEN=your-hf-write-token
19
+ # HF_TRACES_REPO=your-username/document-explorer-traces
README.md CHANGED
@@ -7,6 +7,7 @@ sdk: gradio
7
  sdk_version: "6.9.0"
8
  app_file: app.py
9
  pinned: false
 
10
  ---
11
 
12
  # Document Explorer
 
7
  sdk_version: "6.9.0"
8
  app_file: app.py
9
  pinned: false
10
+ python_version: "3.12"
11
  ---
12
 
13
  # Document Explorer
app.py CHANGED
@@ -9,15 +9,18 @@ Uses E2B sandboxes for code execution (no Docker required).
9
 
10
  from __future__ import annotations
11
 
 
12
  import os
13
  import tempfile
14
  import time
15
  from dataclasses import asdict
 
16
  from pathlib import Path
17
 
18
  import gradio as gr
19
  import litellm
20
  from dotenv import load_dotenv
 
21
 
22
  from llm_harness.agent import run_agent_loop
23
  from llm_harness.prompt import build_system_prompt
@@ -32,11 +35,64 @@ litellm.suppress_debug_info = True
32
 
33
  MODEL = os.environ.get("LH_MODEL", "")
34
  ACCESS_TOKEN = os.environ.get("LH_ACCESS_TOKEN", "")
 
35
  MAX_SESSION_COST = float(os.environ.get("LH_MAX_SESSION_COST", "0.50"))
 
 
36
 
 
37
 
38
- def authenticate(username: str, password: str) -> bool:
39
- return password == ACCESS_TOKEN
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
 
41
 
42
  def save_uploaded_files(files: list[str]) -> Path:
@@ -67,30 +123,27 @@ def format_stats(trace: object) -> str:
67
 
68
  def chat(
69
  message: str,
70
- history: list[dict],
71
  files: list[str] | None,
72
  workspace_path: str,
73
  scratch_path: str,
74
  session_cost: float,
 
75
  ):
 
 
 
 
 
 
76
  if not MODEL:
77
- yield (
78
- "Error: LH_MODEL not set.",
79
- "",
80
- workspace_path,
81
- scratch_path,
82
- session_cost,
83
- )
84
  return
85
 
86
  if session_cost >= MAX_SESSION_COST:
87
  yield (
88
  f"Session cost limit reached (${session_cost:.2f} / "
89
  f"${MAX_SESSION_COST:.2f}). Start a new session.",
90
- "",
91
- workspace_path,
92
- scratch_path,
93
- session_cost,
94
  )
95
  return
96
 
@@ -105,11 +158,11 @@ def chat(
105
  scratch_path = tempfile.mkdtemp(prefix="lh-scratch-")
106
  scratch_dir = Path(scratch_path)
107
 
108
- # Build messages from Gradio history
109
  system_prompt = build_system_prompt(base_prompt="", workspace=workspace)
110
- messages: list[Message] = [{"role": "system", "content": system_prompt}]
111
- messages.extend({"role": e["role"], "content": e["content"]} for e in history)
112
- messages.append({"role": "user", "content": message})
 
113
 
114
  # Run agent loop with E2B sandbox
115
  start = time.monotonic()
@@ -129,7 +182,7 @@ def chat(
129
  if isinstance(event, ToolCallEvent):
130
  tool_call_count += 1
131
  status = f"*Exploring documents ({tool_call_count} tool calls)...*"
132
- yield status, "", workspace_path, scratch_path, session_cost
133
  elif isinstance(event, ToolResultEvent):
134
  continue
135
  else:
@@ -142,6 +195,7 @@ def chat(
142
  workspace_path,
143
  scratch_path,
144
  session_cost,
 
145
  )
146
  return
147
 
@@ -149,10 +203,9 @@ def chat(
149
  trace.wall_time_s = round(time.monotonic() - start, 2)
150
  answer = trace.answer or "(no answer)"
151
  stats = format_stats(trace)
152
- trace_html = render_trace(
153
- {"question": message, "passed": True, "assertions": {}, "trace": asdict(trace)},
154
- max_chars=2000,
155
- )
156
 
157
  yield (
158
  f"{answer}\n\n---\n{stats}",
@@ -160,74 +213,146 @@ def chat(
160
  workspace_path,
161
  scratch_path,
162
  session_cost,
 
163
  )
164
 
165
 
166
  def build_app() -> gr.Blocks:
167
  with gr.Blocks(title="Document Explorer", theme=gr.themes.Soft()) as demo:
168
- gr.Markdown(
169
- "# Document Explorer\n"
170
- "Upload text or CSV files, then ask questions. "
171
- "The model explores your documents by writing and running Python code."
172
- )
173
-
174
- workspace_state = gr.State("")
175
- scratch_state = gr.State("")
176
- cost_state = gr.State(0.0)
177
-
178
- with gr.Accordion("Upload documents", open=True):
179
- file_upload = gr.File(
180
- label="Text, CSV, Markdown, or JSON files",
181
- file_count="multiple",
182
- file_types=[".txt", ".csv", ".md", ".json"],
183
- )
184
-
185
- chatbot = gr.Chatbot(height=500)
186
- msg = gr.Textbox(
187
- placeholder="Ask a question about your documents...",
188
- label="",
189
- show_label=False,
190
- )
191
-
192
- with gr.Accordion("Trace", open=False, visible=False) as trace_accordion:
193
- trace_display = gr.HTML("")
194
-
195
- def respond(
196
- message, history, files, workspace_path, scratch_path, session_cost
197
- ):
198
- history = history or []
199
- history.append({"role": "user", "content": message})
200
-
201
- for response, trace_html, wp, sp, sc in chat(
202
- message, history[:-1], files, workspace_path, scratch_path, session_cost
203
- ):
204
- history_with_response = [
205
- *history,
206
- {"role": "assistant", "content": response},
207
- ]
208
- visible = gr.update(visible=bool(trace_html))
209
- yield history_with_response, "", trace_html, visible, wp, sp, sc
210
-
211
- msg.submit(
212
- respond,
213
- inputs=[
214
- msg,
215
- chatbot,
216
- file_upload,
217
- workspace_state,
218
- scratch_state,
219
- cost_state,
220
- ],
221
- outputs=[
222
- chatbot,
223
- msg,
224
- trace_display,
225
- trace_accordion,
226
- workspace_state,
227
- scratch_state,
228
- cost_state,
229
- ],
230
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
231
 
232
  return demo
233
 
@@ -237,4 +362,4 @@ if __name__ == "__main__":
237
  print("WARNING: LH_ACCESS_TOKEN not set — app is unprotected")
238
 
239
  app = build_app()
240
- app.launch(auth=authenticate if ACCESS_TOKEN else None)
 
9
 
10
  from __future__ import annotations
11
 
12
+ import json
13
  import os
14
  import tempfile
15
  import time
16
  from dataclasses import asdict
17
+ from datetime import datetime, timezone
18
  from pathlib import Path
19
 
20
  import gradio as gr
21
  import litellm
22
  from dotenv import load_dotenv
23
+ from huggingface_hub import HfApi
24
 
25
  from llm_harness.agent import run_agent_loop
26
  from llm_harness.prompt import build_system_prompt
 
35
 
36
  MODEL = os.environ.get("LH_MODEL", "")
37
  ACCESS_TOKEN = os.environ.get("LH_ACCESS_TOKEN", "")
38
+ ADMIN_TOKEN = os.environ.get("LH_ADMIN_TOKEN", "")
39
  MAX_SESSION_COST = float(os.environ.get("LH_MAX_SESSION_COST", "0.50"))
40
+ HF_TRACES_REPO = os.environ.get("HF_TRACES_REPO", "")
41
+ HF_TOKEN = os.environ.get("HF_TOKEN", "")
42
 
43
+ hf_api = HfApi(token=HF_TOKEN) if HF_TOKEN else None
44
 
45
+
46
+ def _slugify(text: str, max_len: int = 50) -> str:
47
+ slug = text.lower().replace(" ", "-")
48
+ slug = "".join(c for c in slug if c.isalnum() or c == "-")
49
+ return slug[:max_len].rstrip("-")
50
+
51
+
52
+ def upload_trace(result: dict) -> str | None:
53
+ if not hf_api or not HF_TRACES_REPO:
54
+ return None
55
+ timestamp = datetime.now(timezone.utc).strftime("%Y%m%d-%H%M%S-%f")
56
+ question_slug = _slugify(result.get("question", ""))
57
+ filename = f"{timestamp}_{question_slug}.json" if question_slug else f"{timestamp}.json"
58
+ content = json.dumps(result, indent=2, default=str).encode()
59
+ try:
60
+ hf_api.upload_file(
61
+ path_or_fileobj=content,
62
+ path_in_repo=filename,
63
+ repo_id=HF_TRACES_REPO,
64
+ repo_type="dataset",
65
+ )
66
+ return filename
67
+ except Exception as exc:
68
+ print(f"WARNING: trace upload failed: {exc}")
69
+ return None
70
+
71
+
72
+ def list_traces() -> list[str]:
73
+ if not hf_api or not HF_TRACES_REPO:
74
+ return []
75
+ try:
76
+ files = hf_api.list_repo_files(HF_TRACES_REPO, repo_type="dataset")
77
+ traces = sorted(
78
+ [f for f in files if f.endswith(".json")],
79
+ reverse=True,
80
+ )
81
+ return traces
82
+ except Exception:
83
+ return []
84
+
85
+
86
+ def fetch_trace(filename: str) -> dict | None:
87
+ if not hf_api or not HF_TRACES_REPO or not filename:
88
+ return None
89
+ try:
90
+ path = hf_api.hf_hub_download(
91
+ HF_TRACES_REPO, filename, repo_type="dataset"
92
+ )
93
+ return json.loads(Path(path).read_text())
94
+ except Exception:
95
+ return None
96
 
97
 
98
  def save_uploaded_files(files: list[str]) -> Path:
 
123
 
124
  def chat(
125
  message: str,
 
126
  files: list[str] | None,
127
  workspace_path: str,
128
  scratch_path: str,
129
  session_cost: float,
130
+ token: str = "",
131
  ):
132
+ empty = ("", "", workspace_path, scratch_path, session_cost, None)
133
+
134
+ if ACCESS_TOKEN and token != ACCESS_TOKEN:
135
+ yield ("Invalid access token.", *empty[1:])
136
+ return
137
+
138
  if not MODEL:
139
+ yield ("Error: LH_MODEL not set.", *empty[1:])
 
 
 
 
 
 
140
  return
141
 
142
  if session_cost >= MAX_SESSION_COST:
143
  yield (
144
  f"Session cost limit reached (${session_cost:.2f} / "
145
  f"${MAX_SESSION_COST:.2f}). Start a new session.",
146
+ *empty[1:],
 
 
 
147
  )
148
  return
149
 
 
158
  scratch_path = tempfile.mkdtemp(prefix="lh-scratch-")
159
  scratch_dir = Path(scratch_path)
160
 
 
161
  system_prompt = build_system_prompt(base_prompt="", workspace=workspace)
162
+ messages: list[Message] = [
163
+ {"role": "system", "content": system_prompt},
164
+ {"role": "user", "content": message},
165
+ ]
166
 
167
  # Run agent loop with E2B sandbox
168
  start = time.monotonic()
 
182
  if isinstance(event, ToolCallEvent):
183
  tool_call_count += 1
184
  status = f"*Exploring documents ({tool_call_count} tool calls)...*"
185
+ yield status, "", workspace_path, scratch_path, session_cost, None
186
  elif isinstance(event, ToolResultEvent):
187
  continue
188
  else:
 
195
  workspace_path,
196
  scratch_path,
197
  session_cost,
198
+ None,
199
  )
200
  return
201
 
 
203
  trace.wall_time_s = round(time.monotonic() - start, 2)
204
  answer = trace.answer or "(no answer)"
205
  stats = format_stats(trace)
206
+ result = {"question": message, "passed": True, "assertions": {}, "trace": asdict(trace)}
207
+ trace_filename = upload_trace(result)
208
+ trace_html = render_trace(result, max_chars=2000)
 
209
 
210
  yield (
211
  f"{answer}\n\n---\n{stats}",
 
213
  workspace_path,
214
  scratch_path,
215
  session_cost,
216
+ trace_filename,
217
  )
218
 
219
 
220
  def build_app() -> gr.Blocks:
221
  with gr.Blocks(title="Document Explorer", theme=gr.themes.Soft()) as demo:
222
+ gr.Markdown("# Document Explorer")
223
+
224
+ with gr.Tabs():
225
+ with gr.Tab("Chat"):
226
+ gr.Markdown(
227
+ "Upload text or CSV files, then ask questions. "
228
+ "The model explores your documents by writing and running Python code."
229
+ )
230
+
231
+ workspace_state = gr.State("")
232
+ scratch_state = gr.State("")
233
+ cost_state = gr.State(0.0)
234
+ session_traces_state = gr.State([]) # filenames uploaded this session
235
+
236
+ with gr.Row():
237
+ token_input = gr.Textbox(
238
+ placeholder="Access token",
239
+ label="Access Token",
240
+ type="password",
241
+ scale=1,
242
+ )
243
+
244
+ with gr.Accordion("Upload documents", open=True):
245
+ file_upload = gr.File(
246
+ label="Text, CSV, Markdown, or JSON files",
247
+ file_count="multiple",
248
+ file_types=[".txt", ".csv", ".md", ".json"],
249
+ )
250
+
251
+ chatbot = gr.Chatbot(height=500)
252
+ msg = gr.Textbox(
253
+ placeholder="Ask a question about your documents...",
254
+ label="",
255
+ show_label=False,
256
+ )
257
+
258
+ with gr.Accordion("Trace", open=False, visible=False) as trace_accordion:
259
+ trace_display = gr.HTML("")
260
+
261
+ def respond(
262
+ message, history, files, workspace_path, scratch_path,
263
+ session_cost, session_traces, token,
264
+ ):
265
+ history = history or []
266
+ history.append({"role": "user", "content": message})
267
+
268
+ for response, trace_html, wp, sp, sc, trace_file in chat(
269
+ message, files, workspace_path, scratch_path, session_cost, token
270
+ ):
271
+ if trace_file:
272
+ session_traces = [*session_traces, trace_file]
273
+ history_with_response = [
274
+ *history,
275
+ {"role": "assistant", "content": response},
276
+ ]
277
+ accordion = gr.Accordion(visible=bool(trace_html))
278
+ yield (
279
+ history_with_response, "", trace_html, accordion,
280
+ wp, sp, sc, session_traces,
281
+ )
282
+
283
+ msg.submit(
284
+ respond,
285
+ inputs=[
286
+ msg,
287
+ chatbot,
288
+ file_upload,
289
+ workspace_state,
290
+ scratch_state,
291
+ cost_state,
292
+ session_traces_state,
293
+ token_input,
294
+ ],
295
+ outputs=[
296
+ chatbot,
297
+ msg,
298
+ trace_display,
299
+ trace_accordion,
300
+ workspace_state,
301
+ scratch_state,
302
+ cost_state,
303
+ session_traces_state,
304
+ ],
305
+ )
306
+
307
+ with gr.Tab("Traces") as traces_tab:
308
+ admin_state = gr.State(False)
309
+ trace_dropdown = gr.Dropdown(
310
+ choices=[],
311
+ label="Select trace",
312
+ )
313
+ refresh_btn = gr.Button("Refresh")
314
+ trace_viewer = gr.HTML("")
315
+
316
+ def check_admin(request: gr.Request):
317
+ token = request.query_params.get("admin", "")
318
+ return ADMIN_TOKEN and token == ADMIN_TOKEN
319
+
320
+ def show_trace(filename):
321
+ result = fetch_trace(filename)
322
+ if not result:
323
+ return ""
324
+ return render_trace(result, max_chars=5000)
325
+
326
+ def refresh_traces(session_traces, is_admin):
327
+ if is_admin:
328
+ filenames = list_traces()
329
+ else:
330
+ filenames = sorted(session_traces, reverse=True)
331
+ return gr.Dropdown(
332
+ choices=filenames,
333
+ value=filenames[0] if filenames else None,
334
+ )
335
+
336
+ demo.load(
337
+ check_admin,
338
+ outputs=[admin_state],
339
+ )
340
+
341
+ trace_dropdown.change(
342
+ show_trace,
343
+ inputs=[trace_dropdown],
344
+ outputs=[trace_viewer],
345
+ )
346
+ refresh_btn.click(
347
+ refresh_traces,
348
+ inputs=[session_traces_state, admin_state],
349
+ outputs=[trace_dropdown],
350
+ )
351
+ traces_tab.select(
352
+ refresh_traces,
353
+ inputs=[session_traces_state, admin_state],
354
+ outputs=[trace_dropdown],
355
+ )
356
 
357
  return demo
358
 
 
362
  print("WARNING: LH_ACCESS_TOKEN not set — app is unprotected")
363
 
364
  app = build_app()
365
+ app.launch()
pyproject.toml CHANGED
@@ -11,6 +11,7 @@ dependencies = [
11
  "a-simple-llm-harness",
12
  "e2b-code-interpreter>=2.5",
13
  "gradio>=5.0",
 
14
  "python-dotenv",
15
  ]
16
 
 
11
  "a-simple-llm-harness",
12
  "e2b-code-interpreter>=2.5",
13
  "gradio>=5.0",
14
+ "huggingface-hub",
15
  "python-dotenv",
16
  ]
17
 
requirements.txt ADDED
@@ -0,0 +1,269 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file was autogenerated by uv via the following command:
2
+ # uv export --format requirements-txt --no-hashes
3
+ a-simple-llm-harness @ git+https://github.com/chuckfinca/a-simple-llm-harness.git@a67e2cc8dcf320d5df765c44fbee282ba1aa5331
4
+ # via document-explorer
5
+ aiofiles==24.1.0
6
+ # via gradio
7
+ aiohappyeyeballs==2.6.1
8
+ # via aiohttp
9
+ aiohttp==3.13.3
10
+ # via litellm
11
+ aiosignal==1.4.0
12
+ # via aiohttp
13
+ annotated-doc==0.0.4
14
+ # via
15
+ # fastapi
16
+ # typer
17
+ annotated-types==0.7.0
18
+ # via pydantic
19
+ anyio==4.12.1
20
+ # via
21
+ # gradio
22
+ # httpx
23
+ # openai
24
+ # starlette
25
+ attrs==26.1.0
26
+ # via
27
+ # aiohttp
28
+ # e2b
29
+ # e2b-code-interpreter
30
+ # jsonschema
31
+ # referencing
32
+ audioop-lts==0.2.2 ; python_full_version >= '3.13'
33
+ # via gradio
34
+ bracex==2.6
35
+ # via wcmatch
36
+ brotli==1.2.0
37
+ # via gradio
38
+ certifi==2026.2.25
39
+ # via
40
+ # httpcore
41
+ # httpx
42
+ # requests
43
+ charset-normalizer==3.4.6
44
+ # via requests
45
+ click==8.3.1
46
+ # via
47
+ # litellm
48
+ # typer
49
+ # uvicorn
50
+ colorama==0.4.6 ; sys_platform == 'win32'
51
+ # via
52
+ # click
53
+ # tqdm
54
+ distro==1.9.0
55
+ # via openai
56
+ dockerfile-parse==2.0.1
57
+ # via e2b
58
+ e2b==2.15.3
59
+ # via e2b-code-interpreter
60
+ e2b-code-interpreter==2.5.0
61
+ # via document-explorer
62
+ fastapi==0.135.2
63
+ # via gradio
64
+ fastuuid==0.14.0
65
+ # via litellm
66
+ ffmpy==1.0.0
67
+ # via gradio
68
+ filelock==3.25.2
69
+ # via huggingface-hub
70
+ frozenlist==1.8.0
71
+ # via
72
+ # aiohttp
73
+ # aiosignal
74
+ fsspec==2026.2.0
75
+ # via
76
+ # gradio-client
77
+ # huggingface-hub
78
+ gradio==6.9.0
79
+ # via document-explorer
80
+ gradio-client==2.3.0
81
+ # via gradio
82
+ groovy==0.1.2
83
+ # via gradio
84
+ h11==0.16.0
85
+ # via
86
+ # httpcore
87
+ # uvicorn
88
+ hf-xet==1.4.2 ; platform_machine == 'AMD64' or platform_machine == 'aarch64' or platform_machine == 'amd64' or platform_machine == 'arm64' or platform_machine == 'x86_64'
89
+ # via huggingface-hub
90
+ httpcore==1.0.9
91
+ # via
92
+ # e2b
93
+ # httpx
94
+ httpx==0.28.1
95
+ # via
96
+ # e2b
97
+ # e2b-code-interpreter
98
+ # gradio
99
+ # gradio-client
100
+ # huggingface-hub
101
+ # litellm
102
+ # openai
103
+ # safehttpx
104
+ huggingface-hub==1.7.2
105
+ # via
106
+ # document-explorer
107
+ # gradio
108
+ # gradio-client
109
+ # tokenizers
110
+ idna==3.11
111
+ # via
112
+ # anyio
113
+ # httpx
114
+ # requests
115
+ # yarl
116
+ importlib-metadata==9.0.0
117
+ # via litellm
118
+ jinja2==3.1.6
119
+ # via
120
+ # gradio
121
+ # litellm
122
+ jiter==0.13.0
123
+ # via openai
124
+ jsonschema==4.26.0
125
+ # via litellm
126
+ jsonschema-specifications==2025.9.1
127
+ # via jsonschema
128
+ litellm==1.82.6
129
+ # via a-simple-llm-harness
130
+ markdown-it-py==4.0.0
131
+ # via rich
132
+ markupsafe==3.0.3
133
+ # via
134
+ # gradio
135
+ # jinja2
136
+ mdurl==0.1.2
137
+ # via markdown-it-py
138
+ multidict==6.7.1
139
+ # via
140
+ # aiohttp
141
+ # yarl
142
+ numpy==2.4.3
143
+ # via
144
+ # gradio
145
+ # pandas
146
+ openai==2.29.0
147
+ # via litellm
148
+ orjson==3.11.7
149
+ # via gradio
150
+ packaging==26.0
151
+ # via
152
+ # e2b
153
+ # gradio
154
+ # gradio-client
155
+ # huggingface-hub
156
+ pandas==3.0.1
157
+ # via gradio
158
+ pillow==12.1.1
159
+ # via gradio
160
+ propcache==0.4.1
161
+ # via
162
+ # aiohttp
163
+ # yarl
164
+ protobuf==7.34.1
165
+ # via e2b
166
+ pydantic==2.12.5
167
+ # via
168
+ # fastapi
169
+ # gradio
170
+ # litellm
171
+ # openai
172
+ pydantic-core==2.41.5
173
+ # via pydantic
174
+ pydub==0.25.1
175
+ # via gradio
176
+ pygments==2.19.2
177
+ # via rich
178
+ python-dateutil==2.9.0.post0
179
+ # via
180
+ # e2b
181
+ # pandas
182
+ python-dotenv==1.2.2
183
+ # via
184
+ # document-explorer
185
+ # litellm
186
+ python-multipart==0.0.22
187
+ # via gradio
188
+ pytz==2026.1.post1
189
+ # via gradio
190
+ pyyaml==6.0.3
191
+ # via
192
+ # gradio
193
+ # huggingface-hub
194
+ referencing==0.37.0
195
+ # via
196
+ # jsonschema
197
+ # jsonschema-specifications
198
+ regex==2026.2.28
199
+ # via tiktoken
200
+ requests==2.32.5
201
+ # via tiktoken
202
+ rich==14.3.3
203
+ # via
204
+ # a-simple-llm-harness
205
+ # e2b
206
+ # typer
207
+ rpds-py==0.30.0
208
+ # via
209
+ # jsonschema
210
+ # referencing
211
+ safehttpx==0.1.7
212
+ # via gradio
213
+ semantic-version==2.10.0
214
+ # via gradio
215
+ shellingham==1.5.4
216
+ # via typer
217
+ six==1.17.0
218
+ # via python-dateutil
219
+ sniffio==1.3.1
220
+ # via openai
221
+ starlette==0.52.1
222
+ # via
223
+ # fastapi
224
+ # gradio
225
+ tiktoken==0.12.0
226
+ # via litellm
227
+ tokenizers==0.22.2
228
+ # via litellm
229
+ tomlkit==0.13.3
230
+ # via gradio
231
+ tqdm==4.67.3
232
+ # via
233
+ # huggingface-hub
234
+ # openai
235
+ typer==0.24.1
236
+ # via
237
+ # gradio
238
+ # huggingface-hub
239
+ typing-extensions==4.15.0
240
+ # via
241
+ # aiosignal
242
+ # anyio
243
+ # e2b
244
+ # fastapi
245
+ # gradio
246
+ # gradio-client
247
+ # huggingface-hub
248
+ # openai
249
+ # pydantic
250
+ # pydantic-core
251
+ # referencing
252
+ # starlette
253
+ # typing-inspection
254
+ typing-inspection==0.4.2
255
+ # via
256
+ # fastapi
257
+ # pydantic
258
+ tzdata==2025.3 ; sys_platform == 'emscripten' or sys_platform == 'win32'
259
+ # via pandas
260
+ urllib3==2.6.3
261
+ # via requests
262
+ uvicorn==0.42.0
263
+ # via gradio
264
+ wcmatch==10.1
265
+ # via e2b
266
+ yarl==1.23.0
267
+ # via aiohttp
268
+ zipp==3.23.0
269
+ # via importlib-metadata
uv.lock CHANGED
@@ -410,6 +410,7 @@ dependencies = [
410
  { name = "a-simple-llm-harness" },
411
  { name = "e2b-code-interpreter" },
412
  { name = "gradio" },
 
413
  { name = "python-dotenv" },
414
  ]
415
 
@@ -418,6 +419,7 @@ requires-dist = [
418
  { name = "a-simple-llm-harness", git = "https://github.com/chuckfinca/a-simple-llm-harness.git" },
419
  { name = "e2b-code-interpreter", specifier = ">=2.5" },
420
  { name = "gradio", specifier = ">=5.0" },
 
421
  { name = "python-dotenv" },
422
  ]
423
 
 
410
  { name = "a-simple-llm-harness" },
411
  { name = "e2b-code-interpreter" },
412
  { name = "gradio" },
413
+ { name = "huggingface-hub" },
414
  { name = "python-dotenv" },
415
  ]
416
 
 
419
  { name = "a-simple-llm-harness", git = "https://github.com/chuckfinca/a-simple-llm-harness.git" },
420
  { name = "e2b-code-interpreter", specifier = ">=2.5" },
421
  { name = "gradio", specifier = ">=5.0" },
422
+ { name = "huggingface-hub" },
423
  { name = "python-dotenv" },
424
  ]
425