helloparthshah commited on
Commit
21611df
·
1 Parent(s): 8a7487d

pushing everything I have so far

Browse files
CEO/CEO.py CHANGED
@@ -32,7 +32,7 @@ class Specialization(Enum):
32
  # Enum for Model Parameters (Temperature, num_ctx, etc.)
33
  class ModelParameters(Enum):
34
  NUM_CTX = 4096
35
- TEMPERATURE = 0.7 # A typical temperature value for model responses
36
  TOP_K = 50 # Number of top tokens to consider during generation
37
 
38
  class Subtask(BaseModel):
@@ -56,11 +56,17 @@ class APIUtilization(BaseModel):
56
  class AgentManagement(BaseModel):
57
  hired: List[Agent] = Field(default=[], description="List of hired agents")
58
 
 
 
 
 
59
  class CEOResponse(BaseModel):
60
- decision: str = Field(..., description="Decision made by the CEO: Hire or Assign_API")
61
- task_breakdown: List[Subtask] = Field(..., description="List of decomposed subtasks")
62
- agent_management: AgentManagement = Field(..., description="Details of agent hiring")
63
- api_utilization: Optional[List[APIUtilization]] = Field(default=None, description="List of utilized APIs, if any")
 
 
64
 
65
  class OllamaModelManager:
66
  def __init__(self, toolsLoader: ToolLoader, model_name="HASHIRU-CEO", system_prompt_file="./models/system.prompt"):
@@ -70,23 +76,24 @@ class OllamaModelManager:
70
  self.system_prompt_file = system_prompt_file
71
  self.toolsLoader = toolsLoader
72
  self.toolsLoader.load_tools()
73
- self.create_model(model_name)
74
 
75
  def is_model_loaded(self, model):
76
  loaded_models = [m.model for m in ollama.list().models]
77
  return model in loaded_models or f'{model}:latest' in loaded_models
78
 
79
- def create_model(self, base_model='llama3.2'):
80
  with open(self.system_prompt_file, 'r', encoding="utf8") as f:
81
  system = f.read()
 
82
 
83
  if not self.is_model_loaded(self.model_name):
84
  print(f"Creating model {self.model_name}")
85
  ollama.create(
86
  model=self.model_name,
87
- from_='llama3.1',
88
  system=system,
89
- parameters={"num_ctx": ModelParameters.NUM_CTX.value, "temperature": ModelParameters.TEMPERATURE.value}
90
  )
91
 
92
  def request(self, messages):
@@ -97,6 +104,7 @@ class OllamaModelManager:
97
  # format=CEOResponse.model_json_schema(),
98
  tools=self.toolsLoader.getTools(),
99
  )
 
100
  # response = CEOResponse.model_validate_json(response['message']['content'])
101
  if "EOF" in response.message.content:
102
  return messages
@@ -109,10 +117,21 @@ class OllamaModelManager:
109
  if "role" in toolResponse:
110
  role = toolResponse["role"]
111
  messages.append({"role": role, "content": str(toolResponse)})
 
112
  self.request(messages)
 
 
 
 
 
 
 
 
 
 
113
  else:
114
  print("No tool calls found in the response.")
115
- messages.append({"role": "assistant", "content": response.message.content})
116
  print(f"Messages: {messages}")
117
  # ask_user_tool = AskUser()
118
  # ask_user_response = ask_user_tool.run(prompt=response.message.content)
 
32
  # Enum for Model Parameters (Temperature, num_ctx, etc.)
33
  class ModelParameters(Enum):
34
  NUM_CTX = 4096
35
+ TEMPERATURE = 0.2 # A typical temperature value for model responses
36
  TOP_K = 50 # Number of top tokens to consider during generation
37
 
38
  class Subtask(BaseModel):
 
56
  class AgentManagement(BaseModel):
57
  hired: List[Agent] = Field(default=[], description="List of hired agents")
58
 
59
+ class ToolCall(BaseModel):
60
+ function: str = Field(..., description="Name of the function to be called")
61
+ arguments: Dict[str, str] = Field(..., description="Arguments for the function call")
62
+
63
  class CEOResponse(BaseModel):
64
+ # decision: str = Field(..., description="Decision made by the CEO: Hire or Assign_API")
65
+ # task_breakdown: List[Subtask] = Field(..., description="List of decomposed subtasks")
66
+ # agent_management: AgentManagement = Field(..., description="Details of agent hiring")
67
+ # api_utilization: Optional[List[APIUtilization]] = Field(default=None, description="List of utilized APIs, if any")
68
+ tools: List[ToolCall] = Field(default=None, description="List of tool or agent calls made by the model")
69
+ message: str = Field(default=None, description="Message content from the model")
70
 
71
  class OllamaModelManager:
72
  def __init__(self, toolsLoader: ToolLoader, model_name="HASHIRU-CEO", system_prompt_file="./models/system.prompt"):
 
76
  self.system_prompt_file = system_prompt_file
77
  self.toolsLoader = toolsLoader
78
  self.toolsLoader.load_tools()
79
+ self.create_model()
80
 
81
  def is_model_loaded(self, model):
82
  loaded_models = [m.model for m in ollama.list().models]
83
  return model in loaded_models or f'{model}:latest' in loaded_models
84
 
85
+ def create_model(self):
86
  with open(self.system_prompt_file, 'r', encoding="utf8") as f:
87
  system = f.read()
88
+ # system += "Tools\n"+str(self.toolsLoader.getTools())
89
 
90
  if not self.is_model_loaded(self.model_name):
91
  print(f"Creating model {self.model_name}")
92
  ollama.create(
93
  model=self.model_name,
94
+ from_='mistral-nemo',
95
  system=system,
96
+ parameters={"temperature": ModelParameters.TEMPERATURE.value}
97
  )
98
 
99
  def request(self, messages):
 
104
  # format=CEOResponse.model_json_schema(),
105
  tools=self.toolsLoader.getTools(),
106
  )
107
+ print(f"Response: {response}")
108
  # response = CEOResponse.model_validate_json(response['message']['content'])
109
  if "EOF" in response.message.content:
110
  return messages
 
117
  if "role" in toolResponse:
118
  role = toolResponse["role"]
119
  messages.append({"role": role, "content": str(toolResponse)})
120
+ self.toolsLoader.load_tools()
121
  self.request(messages)
122
+ # if response.tools:
123
+ # for tool_call in response.tools:
124
+ # print(f"Tool Name: {tool_call.function}, Arguments: {tool_call.arguments}")
125
+ # toolResponse = self.toolsLoader.runTool(tool_call.function, tool_call.arguments)
126
+ # print(f"Tool Response: {toolResponse}")
127
+ # role = "tool"
128
+ # if "role" in toolResponse:
129
+ # role = toolResponse["role"]
130
+ # messages.append({"role": role, "content": str(toolResponse)})
131
+ # self.request(messages)
132
  else:
133
  print("No tool calls found in the response.")
134
+ messages.append({"role": "assistant", "content": response.message})
135
  print(f"Messages: {messages}")
136
  # ask_user_tool = AskUser()
137
  # ask_user_response = ask_user_tool.run(prompt=response.message.content)
CEO/tool_loader.py CHANGED
@@ -48,7 +48,11 @@ class ToolLoader:
48
  for tool in self.toolsImported:
49
  if tool.name == toolName:
50
  return tool.run(query)
51
- return None
 
 
 
 
52
 
53
  def getTools(self):
54
  toolsList = []
 
48
  for tool in self.toolsImported:
49
  if tool.name == toolName:
50
  return tool.run(query)
51
+ return {
52
+ "status": "error",
53
+ "message": f"Tool {toolName} not found",
54
+ "output": None
55
+ }
56
 
57
  def getTools(self):
58
  toolsList = []
main.py CHANGED
@@ -49,7 +49,7 @@ if __name__ == "__main__":
49
  # The prompt explicitly mentions that it can use the web_search tool if needed,
50
  # and that it is allowed to choose the website for the search.
51
  task_prompt = (
52
- "Your task is to create a marketing strategy for Ashton Hall, a morning routine creator with 10M followers."
53
  )
54
 
55
  # Request a CEO response with the prompt.
 
49
  # The prompt explicitly mentions that it can use the web_search tool if needed,
50
  # and that it is allowed to choose the website for the search.
51
  task_prompt = (
52
+ "Create a tool to get the current system time and invoke it to get the current time."
53
  )
54
 
55
  # Request a CEO response with the prompt.
models/system copy.prompt ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ You are HASHIRU, you're designed to assist users with their queries and provide information. You are not allowed to provide any personal opinions or engage in discussions that are not related to the user's query. Your responses should be concise and informative, focusing on the user's needs. Always prioritize user privacy and security, and avoid sharing any sensitive information. If you encounter a question that is outside your expertise, politely inform the user and suggest they seek assistance from a qualified professional.
2
+
3
+ <Info>
4
+ Tools are external programs used to perform specific tasks. You can create, invoke, and manage these tools to assist users with their queries. Each tool has a specific purpose and input schema that must be followed strictly. You can also create agents with specific capabilities to handle more complex tasks or questions. Agents can be created, invoked, and managed similarly to tools. Always ensure that the tools and agents you create are relevant to the user's query and follow the required schema.
5
+ </Info>
6
+
7
+ <Info>
8
+ Agents are invoked through tools as well by using the AskAgent tool. Agents can be created with specific capabilities to handle more complex tasks or questions. Always ensure that the agents you create are relevant to the user's query and follow the required schema.
9
+ </Info>
10
+
11
+ Here's a set of rules you must follow:
12
+ <Rule>
13
+ You will never answer any questions directly but rather break down the question into smaller parts and invoke tools to get the answer.
14
+ </Rule>
15
+
16
+ <Rule>
17
+ Your output should always be in tools. Only exception to this rule is when you are providing the final answer to the user.
18
+ </Rule>
19
+
20
+ <Rule>
21
+ If you need more information to answer the question, ask the user for clarification or additional details by invoking the AskUser tool.
22
+ </Rule>
23
+
24
+ <Rule>
25
+ Always invoke GetAgents tool to get the list of available agents and their capabilities before invoking any other tools.
26
+ </Rule>
27
+
28
+ <Rule>
29
+ If an agent isn't already available, invoke the CreateAgent tool to create a new agent with the required capabilities. You're an expert in prompt engineering and can create agents with specific skills.
30
+ </Rule>
31
+
32
+ <Rule>
33
+ Once an Agent is created, use the AskAgent tool to ask the agent the question or request the information needed.
34
+ </Rule>
35
+
36
+ <Rule>
37
+ If the agent is not able to answer the question, invoke the AskUser tool to get more information or clarify the question.
38
+ </Rule>
39
+
40
+ <Rule>
41
+ In order to execute tasks on real time data, math calculations, or any other operations, invoke the CreateTool tool to create a new tool with the required capabilities. The tools are created in Python and must follow this strict schema:
42
+ import importlib
43
+
44
+ __all__ = ['WeatherApi']
45
+
46
+
47
+ class WeatherApi():
48
+ dependencies = ["requests==2.32.3"]
49
+
50
+ inputSchema = {
51
+ "name": "WeatherApi",
52
+ "description": "Returns weather information for a given location",
53
+ "parameters": {
54
+ "type": "object",
55
+ "properties": {
56
+ "location": {
57
+ "type": "string",
58
+ "description": "The location for which to get the weather information",
59
+ },
60
+ },
61
+ "required": ["location"],
62
+ }
63
+ }
64
+
65
+ def __init__(self):
66
+ pass
67
+
68
+ def run(self, **kwargs):
69
+ print("Running Weather API test tool")
70
+ location = kwargs.get("location")
71
+ print(f"Location: {location}")
72
+
73
+ requests = importlib.import_module("requests")
74
+
75
+ response = requests.get(
76
+ f"http://api.openweathermap.org/data/2.5/weather?q={location}&appid=ea50e63a3bea67adaf50fbecbe5b3c1e")
77
+ if response.status_code == 200:
78
+ return {
79
+ "status": "success",
80
+ "message": "Weather API test tool executed successfully",
81
+ "error": None,
82
+ "output": response.json()
83
+ }
84
+ else:
85
+ return {
86
+ "status": "error",
87
+ "message": "Weather API test tool failed",
88
+ "error": response.text,
89
+ "output": None
90
+ }
91
+
92
+ </Rule>
93
+
94
+ <Rule>
95
+ Strictly follow the schema required for invoking the tools and agents. Do not deviate from it.
96
+ </Rule>
97
+
98
+ <Rule>
99
+ Once you have the answer, provide it to the user in a clear and concise manner ending with a "EOF" message.
100
+ </Rule>
models/system.prompt CHANGED
@@ -11,6 +11,10 @@ You are HASHIRU, a CEO-level AI responsible for managing a team of AI agents (em
11
 
12
  Condense context intelligently to maximize reasoning capabilities across different model context windows.
13
 
 
 
 
 
14
  ⚙️ Core Functionalities
15
 
16
  ✅ 1. Agent Hiring and Firing
@@ -113,4 +117,12 @@ You are HASHIRU, a CEO-level AI responsible for managing a team of AI agents (em
113
 
114
  No Model Overload: Avoid excessive model hiring. If a task can be solved by fewer agents, do not over-provision.
115
 
116
- Clarification Over Guessing: If task requirements are ambiguous, ask the user for clarification instead of guessing.
 
 
 
 
 
 
 
 
 
11
 
12
  Condense context intelligently to maximize reasoning capabilities across different model context windows.
13
 
14
+ Tools are defined in the tools/ directory, and you can create new tools as needed using the CreateTool tool.
15
+
16
+ Read the existing tools using the ListFiles and ReadFile tools to understand how they work and create new ones following the same schema.
17
+
18
  ⚙️ Core Functionalities
19
 
20
  ✅ 1. Agent Hiring and Firing
 
117
 
118
  No Model Overload: Avoid excessive model hiring. If a task can be solved by fewer agents, do not over-provision.
119
 
120
+ Clarification Over Guessing: If task requirements are ambiguous, ask the user for clarification instead of guessing.
121
+
122
+ If invoking an agent or API fails, retry the invocation with a different approach or ask the user for more information.
123
+
124
+ Avoid Redundant Tasks: If a task has already been completed, do not reassign it unless necessary.
125
+
126
+ Never respond directly to user queries. Always break down the question into smaller parts and invoke tools to get the answer.
127
+
128
+ Tools are present in the tools/ directory, use the ListFiles and ReadFile tools to look at how existing tools are implemented to create new ones.
tools/agent_creater_tool.py CHANGED
@@ -9,7 +9,7 @@ class AgentCreator():
9
 
10
  inputSchema = {
11
  "name": "AgentCreator",
12
- "description": "Creates an AI agent using the ollama library. Before creating an Agent, please get the list of available models using the GetAgents tool. Once the model is created, you can use the AskAgent tool to ask the agent a question.",
13
  "parameters": {
14
  "type": "object",
15
  "properties":{
@@ -79,11 +79,9 @@ class AgentCreator():
79
  return {
80
  "status": "success",
81
  "message": "Agent successfully created",
82
- "output": models
83
  }
84
  else:
85
  return {
86
  "status": "error",
87
  "message": "Agent creation failed",
88
- "output": models
89
  }
 
9
 
10
  inputSchema = {
11
  "name": "AgentCreator",
12
+ "description": "Creates an AI agent for you. Please make sure to invoke the created agent using the AskAgent tool.",
13
  "parameters": {
14
  "type": "object",
15
  "properties":{
 
79
  return {
80
  "status": "success",
81
  "message": "Agent successfully created",
 
82
  }
83
  else:
84
  return {
85
  "status": "error",
86
  "message": "Agent creation failed",
 
87
  }
tools/get_agents_tool.py CHANGED
@@ -25,5 +25,5 @@ class GetAgents():
25
  return {
26
  "status": "success",
27
  "message": "Agents list retrieved successfully",
28
- "output": models,
29
  }
 
25
  return {
26
  "status": "success",
27
  "message": "Agents list retrieved successfully",
28
+ "agents": models,
29
  }
tools/list_files.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ __all__ = ['ListFiles']
3
+
4
+
5
+ class ListFiles():
6
+ dependencies = []
7
+
8
+ inputSchema = {
9
+ "name": "ListFiles",
10
+ "description": "Lists all files in a directory",
11
+ "parameters": {
12
+ "type": "object",
13
+ "properties": {
14
+ "directory": {
15
+ "type": "string",
16
+ "description": "The directory to list files from",
17
+ },
18
+ },
19
+ "required": ["directory"],
20
+ }
21
+ }
22
+
23
+ def __init__(self):
24
+ pass
25
+
26
+ def run(self, **kwargs):
27
+ print("Running List Files tool")
28
+ directory = kwargs.get("directory")
29
+ print(f"Directory: {directory}")
30
+ try:
31
+ import os
32
+ files = os.listdir(directory)
33
+ return {
34
+ "status": "success",
35
+ "message": "Files listed successfully",
36
+ "error": None,
37
+ "output": files
38
+ }
39
+ except Exception as e:
40
+ return {
41
+ "status": "error",
42
+ "message": "Failed to list files",
43
+ "error": str(e),
44
+ "output": None
45
+ }
tools/read_file.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib
2
+
3
+ __all__ = ['ReadFile']
4
+
5
+
6
+ class ReadFile():
7
+ dependencies = []
8
+
9
+ inputSchema = {
10
+ "name": "ReadFile",
11
+ "description": "Reads a file and returns its content",
12
+ "parameters": {
13
+ "type": "object",
14
+ "properties": {
15
+ "file_path": {
16
+ "type": "string",
17
+ "description": "The path to the file to read",
18
+ },
19
+ },
20
+ "required": ["file_path"],
21
+ }
22
+ }
23
+
24
+ def __init__(self):
25
+ pass
26
+
27
+ def run(self, **kwargs):
28
+ print("Running Read File tool")
29
+ file_path = kwargs.get("file_path")
30
+ print(f"File Path: {file_path}")
31
+ try:
32
+ with open(file_path, "r", encoding="utf8") as f:
33
+ content = f.read()
34
+ return {
35
+ "status": "success",
36
+ "message": "File read successfully",
37
+ "error": None,
38
+ "output": content
39
+ }
40
+ except Exception as e:
41
+ return {
42
+ "status": "error",
43
+ "message": "Failed to read file",
44
+ "error": str(e),
45
+ "output": None
46
+ }
tools/tool_creator.py CHANGED
@@ -16,61 +16,9 @@ class ToolCreator():
16
  "type": "string",
17
  "description": "The name of the tool to create",
18
  },
19
- "content": {
20
  "type": "string",
21
- "description": "The content of the tool to create",
22
- "examples": ["""
23
- import importlib
24
-
25
- __all__ = ['WeatherApi']
26
-
27
-
28
- class WeatherApi():
29
- dependencies = ["requests==2.32.3"]
30
-
31
- inputSchema = {
32
- "name": "WeatherApi",
33
- "description": "Returns weather information for a given location",
34
- "parameters": {
35
- "type": "object",
36
- "properties": {
37
- "location": {
38
- "type": "string",
39
- "description": "The location for which to get the weather information",
40
- },
41
- },
42
- "required": ["location"],
43
- }
44
- }
45
-
46
- def __init__(self):
47
- pass
48
-
49
- def run(self, **kwargs):
50
- print("Running Weather API test tool")
51
- location = kwargs.get("location")
52
- print(f"Location: {location}")
53
-
54
- requests = importlib.import_module("requests")
55
-
56
- response = requests.get(
57
- f"http://api.openweathermap.org/data/2.5/weather?q={location}&appid=ea50e63a3bea67adaf50fbecbe5b3c1e")
58
- if response.status_code == 200:
59
- return {
60
- "status": "success",
61
- "message": "Weather API test tool executed successfully",
62
- "error": None,
63
- "output": response.json()
64
- }
65
- else:
66
- return {
67
- "status": "error",
68
- "message": "Weather API test tool failed",
69
- "error": response.text,
70
- "output": None
71
- }
72
-
73
- """]
74
  },
75
  },
76
  "required": ["name", "content"],
@@ -83,7 +31,7 @@ def run(self, **kwargs):
83
  def run(self, **kwargs):
84
  print("Running Tool Creator")
85
  name = kwargs.get("name")
86
- content = kwargs.get("content")
87
  print(f"Tool Name: {name}")
88
  print(f"Tool Content: {content}")
89
  # Create the tool file
 
16
  "type": "string",
17
  "description": "The name of the tool to create",
18
  },
19
+ "tool_code": {
20
  "type": "string",
21
+ "description": "The code of the tool to create",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  },
23
  },
24
  "required": ["name", "content"],
 
31
  def run(self, **kwargs):
32
  print("Running Tool Creator")
33
  name = kwargs.get("name")
34
+ content = kwargs.get("tool_code")
35
  print(f"Tool Name: {name}")
36
  print(f"Tool Content: {content}")
37
  # Create the tool file