| """LangGraph Agent""" |
| import os |
| from dotenv import load_dotenv |
| from langgraph.graph import START, StateGraph, MessagesState |
| from langgraph.prebuilt import tools_condition |
| from langgraph.prebuilt import ToolNode |
| from langchain_google_genai import ChatGoogleGenerativeAI |
| from langchain_groq import ChatGroq |
| from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint, HuggingFaceEmbeddings |
| from langchain_community.tools.tavily_search import TavilySearchResults |
| from langchain_community.document_loaders import WikipediaLoader |
| from langchain_community.document_loaders import ArxivLoader |
| from langchain_community.vectorstores import SupabaseVectorStore |
| from langchain_core.messages import SystemMessage, HumanMessage |
| from langchain_core.tools import tool |
|
|
| os.environ["RWKV_V7_ON"] = '1' |
| os.environ['RWKV_JIT_ON'] = '1' |
| os.environ["RWKV_CUDA_ON"] = '0' |
|
|
| from huggingface_hub import hf_hub_download |
| from rwkv.model import RWKV |
| from rwkv.utils import PIPELINE, PIPELINE_ARGS |
|
|
| load_dotenv() |
|
|
| class BasicAgent: |
| def __init__(self): |
| print("BasicAgent initialized.") |
| self.graph = build_graph() |
| |
| def __call__(self, question: str) -> str: |
| print(f"Agent received question (first 50 chars): {question[:50]}...") |
| messages = [HumanMessage(content=question)] |
| messages = self.graph.invoke({"messages": messages}) |
| answer = messages['messages'][-1].content |
| return answer[14:] |
|
|
| @tool |
| def multiply(a: int, b: int) -> int: |
| """Multiply two numbers. |
| |
| Args: |
| a: first int |
| b: second int |
| """ |
| return a * b |
|
|
| @tool |
| def add(a: int, b: int) -> int: |
| """Add two numbers. |
| |
| Args: |
| a: first int |
| b: second int |
| """ |
| return a + b |
|
|
| @tool |
| def subtract(a: int, b: int) -> int: |
| """Subtract two numbers. |
| |
| Args: |
| a: first int |
| b: second int |
| """ |
| return a - b |
|
|
| @tool |
| def divide(a: int, b: int) -> int: |
| """Divide two numbers. |
| |
| Args: |
| a: first int |
| b: second int |
| """ |
| if b == 0: |
| raise ValueError("Cannot divide by zero.") |
| return a / b |
|
|
| @tool |
| def modulus(a: int, b: int) -> int: |
| """Get the modulus of two numbers. |
| |
| Args: |
| a: first int |
| b: second int |
| """ |
| return a % b |
|
|
| @tool |
| def wiki_search(query: str) -> str: |
| """Search Wikipedia for a query and return maximum 2 results. |
| |
| Args: |
| query: The search query.""" |
| search_docs = WikipediaLoader(query=query, load_max_docs=2).load() |
| formatted_search_docs = "\n\n---\n\n".join( |
| [ |
| f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>' |
| for doc in search_docs |
| ]) |
| return {"wiki_results": formatted_search_docs} |
|
|
| @tool |
| def web_search(query: str) -> str: |
| """Search Tavily for a query and return maximum 3 results. |
| |
| Args: |
| query: The search query.""" |
| search_docs = TavilySearchResults(max_results=3).invoke(query=query) |
| formatted_search_docs = "\n\n---\n\n".join( |
| [ |
| f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>' |
| for doc in search_docs |
| ]) |
| return {"web_results": formatted_search_docs} |
|
|
| @tool |
| def arvix_search(query: str) -> str: |
| """Search Arxiv for a query and return maximum 3 result. |
| |
| Args: |
| query: The search query.""" |
| search_docs = ArxivLoader(query=query, load_max_docs=3).load() |
| formatted_search_docs = "\n\n---\n\n".join( |
| [ |
| f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content[:1000]}\n</Document>' |
| for doc in search_docs |
| ]) |
| return {"arvix_results": formatted_search_docs} |
|
|
|
|
|
|
| |
| with open("system_prompt.txt", "r", encoding="utf-8") as f: |
| system_prompt = f.read() |
|
|
|
|
| tools = [ |
| multiply, |
| add, |
| subtract, |
| divide, |
| modulus, |
| wiki_search, |
| web_search, |
| arvix_search, |
| ] |
|
|
| |
| def build_graph(provider: str = "rwkv"): |
| """Build the graph""" |
| |
| if provider == "google": |
| |
| llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash", temperature=0) |
| elif provider == "groq": |
| |
| llm = ChatGroq(model="qwen-qwq-32b", temperature=0) |
| elif provider == "huggingface": |
| |
| llm = ChatHuggingFace( |
| llm=HuggingFaceEndpoint( |
| url="https://api-inference.huggingface.co/models/Meta-DeepLearning/llama-2-7b-chat-hf", |
| temperature=0, |
| ), |
| ) |
| elif provider == "rwkv": |
| |
| title = "rwkv7-g1-1.5b-20250429-ctx4096" |
| pth = hf_hub_download(repo_id="BlinkDL/rwkv7-g1", filename=f"{title}.pth") |
| model_path = pth.replace(".pth", "") |
|
|
| raw_llm = RWKV(model=model_path, strategy='cpu fp16') |
| pipeline = PIPELINE(raw_llm, "rwkv_vocab_v20230424") |
|
|
| class RWKVWithTools: |
| def __init__(self, pipeline, system_prompt: str): |
| self.pipeline = pipeline |
| self.system_prompt = system_prompt |
| self.tools = [] |
|
|
| def bind_tools(self, tools): |
| self.tools = tools |
| return self |
|
|
| def invoke(self, messages): |
| |
| specs = [] |
| for t in self.tools: |
| specs.append(f"- {t.name}({getattr(t, 'args_schema', {})}): {t.description}") |
|
|
| header = ( |
| f"{self.system_prompt}\n\n" |
| "TOOLS AVAILABLE:\n" |
| + "\n".join(specs) |
| + "\n\n" |
| "To call a tool, respond exactly with:\n" |
| "`<tool_name>(arg1=…,arg2=…)` and nothing else.\n\n" |
| ) |
|
|
| |
| convo = "\n".join( |
| f"{'User:' if isinstance(m, HumanMessage) else 'Assistant:'} {m.content}" |
| for m in messages |
| ) |
|
|
| prompt = header + convo |
|
|
| print(f'Prompt: {prompt}') |
| |
| |
| out_str = self.pipeline.generate(prompt, token_count=300) |
|
|
| print(f'Response: {out_str}') |
|
|
| return out_str |
|
|
| llm = RWKVWithTools(pipeline, system_prompt=system_prompt) |
| |
| else: |
| raise ValueError("Invalid provider. Choose 'google', 'groq' or 'huggingface'.") |
|
|
| |
| llm_with_tools = llm.bind_tools(tools) |
|
|
| |
| def assistant(state: MessagesState): |
| """Assistant node""" |
| return {"messages": [llm_with_tools.invoke(state["messages"])]} |
|
|
| builder = StateGraph(MessagesState) |
| builder.add_node("assistant", assistant) |
| builder.add_node("tools", ToolNode(tools)) |
| builder.add_edge(START, "assistant") |
| builder.add_conditional_edges( |
| "assistant", |
| tools_condition, |
| ) |
| builder.add_edge("tools", "assistant") |
|
|
| |
| return builder.compile() |