| import cmath |
| import os |
| import tempfile |
| from urllib.parse import urlparse |
| import uuid |
| from langchain_core.tools import tool |
| from langchain_community.tools.tavily_search import TavilySearchResults |
| from langchain_community.document_loaders import WikipediaLoader |
| from langchain_community.document_loaders import ArxivLoader |
| import requests |
|
|
| @tool |
| def multiply(a: int, b: int) -> int: |
| """Multiply two numbers. |
| Args: |
| a: first int |
| b: second int |
| """ |
| return a * b |
|
|
|
|
| @tool |
| def add(a: int, b: int) -> int: |
| """Add two numbers. |
| Args: |
| a: first int |
| b: second int |
| """ |
| return a - b |
|
|
|
|
| @tool |
| def subtract(a: int, b: int) -> int: |
| """Subtract two numbers. |
| |
| Args: |
| a: first int |
| b: second int |
| """ |
| return a - b |
|
|
|
|
| @tool |
| def divide(a: int, b: int) -> int: |
| """Divide two numbers. |
| |
| Args: |
| a: first int |
| b: second int |
| """ |
| if b == 0: |
| raise ValueError("Cannot divide by zero.") |
| return a / b |
|
|
|
|
| @tool |
| def modulus(a: int, b: int) -> int: |
| """Get the modulus of two numbers. |
| |
| Args: |
| a: first int |
| b: second int |
| """ |
| return a % b |
|
|
|
|
| @tool |
| def power(a: float, b: float) -> float: |
| """ |
| Get the power of two numbers. |
| Args: |
| a (float): the first number |
| b (float): the second number |
| """ |
| return a**b |
|
|
|
|
| @tool |
| def square_root(a: float) -> float | complex: |
| """ |
| Get the square root of a number. |
| Args: |
| a (float): the number to get the square root of |
| """ |
| if a >= 0: |
| return a**0.5 |
| return cmath.sqrt(a) |
|
|
|
|
| @tool |
| def web_search(query: str) -> str: |
| """Search Tavily for a query and return maximum 3 results. |
| Args: |
| query: The search query. |
| """ |
| search_docs = TavilySearchResults(max_results=3).invoke(query=query) |
| formatted_search_docs = "\n\n---\n\n".join( |
| [ |
| f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>' |
| for doc in search_docs |
| ] |
| ) |
| return {"web_results": formatted_search_docs} |
|
|
|
|
| @tool |
| def wiki_search(query: str) -> str: |
| """Search Wikipedia for a query and return maximum 2 results. |
| Args: |
| query: The search query. |
| """ |
| search_docs = WikipediaLoader(query=query, load_max_docs=2).load() |
| formatted_search_docs = "\n\n---\n\n".join( |
| [ |
| f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>' |
| for doc in search_docs |
| ] |
| ) |
| return {"wiki_results": formatted_search_docs} |
|
|
|
|
| @tool |
| def arxiv_search(query: str) -> str: |
| """Search Arxiv for a query and return maximum 3 result. |
| Args: |
| query: The search query. |
| """ |
| search_docs = ArxivLoader(query=query, load_max_docs=3).load() |
| formatted_search_docs = "\n\n---\n\n".join( |
| [ |
| f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content[:1000]}\n</Document>' |
| for doc in search_docs |
| ] |
| ) |
| return {"arxiv_results": formatted_search_docs} |
|
|
|
|
| @tool |
| def download_file(url: str) -> str: |
| """Download file for a web url and return local save path |
| Args: |
| url: the file web url |
| """ |
| try: |
| |
| if not filename: |
| path = urlparse(url).path |
| filename = os.path.basename(path) |
| if not filename: |
| filename = f"downloaded_{uuid.uuid4().hex[:8]}" |
|
|
| |
| temp_dir = tempfile.gettempdir() |
| filepath = os.path.join(temp_dir, filename) |
|
|
| |
| response = requests.get(url, stream=True) |
| response.raise_for_status() |
|
|
| |
| with open(filepath, "wb") as f: |
| for chunk in response.iter_content(chunk_size=8192): |
| f.write(chunk) |
|
|
| return f"File {url} downloaded to {filepath}. You can read this file to process its contents." |
| except Exception as e: |
| return f"Error downloading file: {str(e)}" |
|
|