agenticAi / agents /reasoning_agent.py
Cline
Initial commit
0af0a55
from typing import Dict, Any, List
from loguru import logger
from utils.llm_orchestrator import LLMOrchestrator
class ReasoningAgent:
def __init__(self, llm_api_key: str):
"""Initialize the Reasoning Agent."""
logger.info("Initializing ReasoningAgent")
self.llm_orchestrator = LLMOrchestrator(llm_api_key)
self.capabilities = [
"step_by_step_reasoning",
"context_management",
"agent_coordination",
"result_aggregation"
]
self.setup_logger()
def setup_logger(self):
"""Configure logging for the agent."""
logger.add("logs/reasoning_agent.log", rotation="500 MB")
async def perform_reasoning(
self, goal: str, available_agents: List[Dict[str, Any]]) -> Dict[str, Any]:
"""Perform step-by-step reasoning to achieve a goal."""
logger.info(f"Performing reasoning for goal: {goal}")
try:
context = {
"goal": goal,
"available_agents": available_agents,
"steps": []
}
# Example of a simple reasoning process with 2 steps.
# This can be made more sophisticated based on the specific needs.
for step in range(2):
prompt = self.generate_reasoning_prompt(context)
response = await self.llm_orchestrator.generate_completion(prompt)
logger.info(f"Reasoning step {step + 1}: {response}")
# Placeholder for action execution based on reasoning
action = self.extract_action(response)
if action and action["agent"] != "reasoning_agent":
# Here we simulate executing an action with another agent
# In a real scenario, this would involve calling the
# appropriate agent
action_result = await self.execute_agent_action(action, context)
context["steps"].append({
"step": step + 1,
"prompt": prompt,
"response": response,
"action": action,
"action_result": action_result
})
else:
context["steps"].append({
"step": step + 1,
"prompt": prompt,
"response": response,
"action": action
})
return {
"status": "success",
"reasoning_process": context["steps"],
"result": "Reasoning process completed." # Placeholder for final result
}
except Exception as e:
logger.error(f"Error during reasoning: {str(e)}")
return {
"status": "error",
"message": str(e)
}
def generate_reasoning_prompt(self, context: Dict[str, Any]) -> str:
"""Generate a prompt for the LLM to guide the reasoning process."""
prompt = f"""
Goal: {context['goal']}
Available Agents: {', '.join([agent['name'] for agent in context['available_agents']])}
Reasoning Steps:
"""
for step in context["steps"]:
prompt += f"- Step {step['step']}: {step['response']}\n"
if "action" in step and step["action"]:
prompt += f" Action: {step['action']}\n"
if "action_result" in step and step["action_result"]:
prompt += f" Result: {step['action_result']}\n"
prompt += "What is the next logical step to achieve the goal? Explain your reasoning."
return prompt
def extract_action(self, response: str) -> Dict[str, Any]:
"""Extract the next action to be taken based on the LLM's response."""
# Basic implementation: Assume the last line of the response contains
# the action
lines = response.strip().split("\n")
last_line = lines[-1]
if ":" in last_line:
parts = last_line.split(":")
agent = parts[0].strip()
parameters = parts[1].strip() if len(parts) > 1 else ""
return {
"agent": agent,
"parameters": parameters
}
else:
return None
async def execute_agent_action(
self, action: Dict[str, Any], context: Dict[str, Any]) -> str:
"""Simulate executing an action with another agent."""
# This is a placeholder for actual agent execution
# In a real scenario, this method would call the appropriate agent based on action["agent"]
# and pass the necessary parameters from action["parameters"]
# Find the agent in the available agents list
agent_info = next(
(agent for agent in context["available_agents"]
if agent["name"] == action["agent"]),
None)
if agent_info:
logger.info(
f"Executing action with agent: {action['agent']} with parameters: {action['parameters']}")
# Simulate an action result
return f"Result of action with {action['agent']}: Success"
else:
logger.error(f"Agent {action['agent']} not found.")
return "Error: Agent not found"