File size: 8,882 Bytes
b39ac03 a5a5ea6 b39ac03 a5a5ea6 b39ac03 a5a5ea6 b39ac03 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 | # ===========================================
# Medini Autonomous Agent - app.py
# Compatible with LangChain 0.3.x + langchain-community
# ===========================================
import os
import json
import threading
import re
from typing import Dict, Any
import gradio as gr
from fastapi import FastAPI, Depends, HTTPException
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
import uvicorn
from transformers import pipeline
# ===== LangChain Community Imports =====
from langchain_community.llms import HuggingFacePipeline
from langchain_community.utilities import SerpAPIWrapper
from langchain_community.vectorstores import Chroma
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain.memory import ConversationBufferMemory
from langchain.agents import initialize_agent, Tool
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.docstore.document import Document
# ✅ Updated Python REPL tool import
from langchain.tools.python.tool import PythonAstREPLTool
# ===========================================
# ENVIRONMENT VARIABLES
# ===========================================
HF_TOKEN = os.getenv("HF_TOKEN")
SERPAPI_KEY = os.getenv("SERPAPI_API_KEY")
JWT_SECRET = os.getenv("JWT_SECRET", "changeme123")
# ===========================================
# AUTH
# ===========================================
security = HTTPBearer()
def verify_jwt(credentials: HTTPAuthorizationCredentials = Depends(security)):
token = credentials.credentials
if token != JWT_SECRET:
raise HTTPException(status_code=403, detail="Invalid token")
return True
# ===========================================
# MODEL LOADER
# ===========================================
MODEL_ID = "PuruAI/Medini_Intelligence"
FALLBACK_MODEL = "gpt2"
def load_llm():
pipeline_kwargs = {"max_new_tokens": 512, "temperature": 0.7}
try:
model_pipeline = pipeline("text-generation", model=MODEL_ID, use_auth_token=HF_TOKEN, **pipeline_kwargs)
except Exception:
print(f"Warning: Failed to load {MODEL_ID}. Falling back to {FALLBACK_MODEL}.")
model_pipeline = pipeline("text-generation", model=FALLBACK_MODEL, **pipeline_kwargs)
return HuggingFacePipeline(pipeline=model_pipeline)
llm = load_llm()
# ===========================================
# VECTOR MEMORY
# ===========================================
embeddings = HuggingFaceEmbeddings()
chroma_db = Chroma(persist_directory="./medini_memory", embedding_function=embeddings)
retriever = chroma_db.as_retriever()
qa_prompt_template = """
You are a question-answering system. Use the following context, which contains information retrieved from memory, to answer the user's question.
If the context is empty or does not contain the answer, state clearly that the information is not in memory.
Context:
{context}
Question: {question}
Answer:
"""
QA_PROMPT = PromptTemplate(template=qa_prompt_template, input_variables=["context", "question"])
qa_chain = LLMChain(llm=llm, prompt=QA_PROMPT)
def retrieve_and_answer(question: str) -> str:
docs = retriever.get_relevant_documents(question)
context = "\n---\n".join([d.page_content for d in docs])
return qa_chain.run(context=context, question=question)
# ===========================================
# TOOLS
# ===========================================
search = SerpAPIWrapper(serpapi_api_key=SERPAPI_KEY)
python_tool = PythonAstREPLTool()
tools = [
Tool(name="Knowledge Recall", func=retrieve_and_answer, description="Retrieve info from Medini memory (Chroma DB)."),
Tool(name="Web Search", func=search.run, description="Search the web for up-to-date information."),
Tool(name="Python REPL", func=python_tool.run, description="Execute Python code for math/data manipulation."),
]
TOOL_MAP = {tool.name.lower().replace(" ", ""): tool.func for tool in tools}
# ===========================================
# AGENT
# ===========================================
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
agent = initialize_agent(
tools=tools,
llm=llm,
agent="conversational-react-description",
memory=memory,
verbose=True
)
# ===========================================
# PLANNER
# ===========================================
plan_prompt = PromptTemplate(
input_variables=["goal"],
template="""
You are Medini Planner. Decompose the high-level goal into a JSON object containing a 'steps' array (max 6 steps). Each step must have: id (integer), name (short string), description (detailed instruction), and tool_hint (either 'recall', 'search', 'python', or 'agent').
Return JSON only.
Goal: {goal}
"""
)
planner_chain = LLMChain(llm=llm, prompt=plan_prompt)
def create_plan(goal: str) -> Dict[str, Any]:
raw = planner_chain.run(goal=goal)
m = re.search(r"\{.*\}", raw, flags=re.DOTALL)
json_str = m.group(0) if m else raw
json_str = json_str.replace("```json", "").replace("```", "").strip()
try:
plan = json.loads(json_str)
if 'steps' not in plan:
raise ValueError("Parsed JSON missing 'steps'.")
return plan
except json.JSONDecodeError as e:
print(f"JSON Parsing Error: {e} in string: {json_str[:200]}...")
raise ValueError("Planner returned malformed JSON.") from e
def execute_step(step: Dict[str, Any]) -> Dict[str, Any]:
hint = (step.get("tool_hint") or "").lower()
input_text = step.get("description")
output, status = "Execution skipped.", "error"
try:
tool_func = None
if "recall" in hint:
tool_func = TOOL_MAP.get("knowledgerecall")
elif "search" in hint:
tool_func = TOOL_MAP.get("websearch")
elif "python" in hint:
tool_func = TOOL_MAP.get("pythonrepl")
if tool_func:
output = tool_func(input_text)
else:
output = agent.run(input_text)
status = "ok"
except Exception as e:
output = f"Execution Error: {str(e)}"
chroma_db.add_documents([Document(page_content=f"Step {step['id']} - {step['name']} Result: {output}")])
return {"id": step['id'], "name": step['name'], "status": status, "output": output}
def execute_plan(goal: str) -> Dict[str, Any]:
try:
plan = create_plan(goal)
except ValueError as e:
return {"goal": goal, "error": str(e)}
results = [execute_step(step) for step in plan.get("steps", [])]
return {"goal": goal, "plan": plan, "results": results}
# ===========================================
# FASTAPI BACKEND
# ===========================================
app = FastAPI(title="Medini Agent API")
@app.post("/chat")
def chat_endpoint(message: str, auth: bool = Depends(verify_jwt)):
response = agent.run(message)
return {"response": response}
@app.post("/goal")
def goal_endpoint(goal: str, auth: bool = Depends(verify_jwt)):
return execute_plan(goal)
# ===========================================
# GRADIO FRONTEND
# ===========================================
def gradio_chat(message, history):
try:
response = agent.run(message)
history.append((message, response))
except Exception as e:
history.append((message, f"Error: {str(e)}"))
return history, ""
def gradio_execute_plan(goal):
try:
return execute_plan(goal)
except Exception as e:
return {"error": f"Failed to execute plan: {str(e)}"}
with gr.Blocks(theme=gr.themes.Soft()) as demo:
gr.Markdown("# 🤖 Medini Autonomous Agent")
gr.Markdown("Chat or submit high-level goals. Agentic AI handles reasoning, memory, and tool use.")
with gr.Row():
with gr.Column(scale=2):
gr.Markdown("## Conversational Chat")
chatbot = gr.Chatbot(height=400)
msg = gr.Textbox(placeholder="Type your message...", label="Chat Input")
clear_btn = gr.Button("Clear Chat")
msg.submit(gradio_chat, [msg, chatbot], [chatbot, msg])
clear_btn.click(lambda: [], None, chatbot, queue=False)
with gr.Column(scale=1):
gr.Markdown("## Autonomous Goal Planner")
goal_input = gr.Textbox(placeholder="Enter high-level goal...", label="Goal")
run_goal_btn = gr.Button("Run Goal", variant="primary")
gr.Markdown("---")
gr.Markdown("### Execution Report")
goal_output = gr.JSON(label="Plan and Results")
run_goal_btn.click(gradio_execute_plan, [goal_input], goal_output)
# ===========================================
# LAUNCH
# ===========================================
if __name__ == "__main__":
def start_api():
uvicorn.run(app, host="0.0.0.0", port=8000, log_level="critical")
threading.Thread(target=start_api, daemon=True).start()
demo.launch(share=False)
|