|
import os |
|
import logging |
|
from config import MODEL_NAME |
|
from dotenv import load_dotenv |
|
from langchain_groq import ChatGroq |
|
from langchain.agents import AgentExecutor |
|
from langchain.memory import ConversationBufferWindowMemory |
|
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder |
|
from langchain.agents import create_tool_calling_agent |
|
from langchain_core.utils.function_calling import convert_to_openai_function |
|
from utils import book_slot, check_slots, reschedule_event, delete_event |
|
|
|
load_dotenv() |
|
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s') |
|
|
|
API_KEY = os.environ["API_KEY"] |
|
|
|
def create_agent(PROMPT): |
|
|
|
memory = ConversationBufferWindowMemory( |
|
memory_key="chat_history", |
|
return_messages=True, |
|
k=5 |
|
) |
|
|
|
|
|
prompt_template = ChatPromptTemplate.from_messages([ |
|
("system", PROMPT), |
|
MessagesPlaceholder(variable_name="chat_history"), |
|
("human", "{input}"), |
|
MessagesPlaceholder(variable_name="agent_scratchpad"), |
|
]) |
|
|
|
|
|
tools = [book_slot, delete_event, check_slots, reschedule_event] |
|
functions = [convert_to_openai_function(f) for f in tools] |
|
|
|
|
|
llm = ChatGroq( |
|
model=MODEL_NAME, |
|
temperature=0.7, |
|
max_tokens=None, |
|
timeout=60, |
|
max_retries=2, |
|
api_key=API_KEY |
|
).bind_functions(functions=functions) |
|
|
|
|
|
agent = create_tool_calling_agent(llm, tools, prompt_template) |
|
|
|
|
|
agent_executor = AgentExecutor( |
|
agent=agent, |
|
tools=tools, |
|
memory=memory, |
|
verbose=True |
|
) |
|
|
|
return agent_executor |
|
|
|
|
|
def process_query(query: str): |
|
try: |
|
agent = create_agent("Your system prompt here") |
|
response = agent.invoke( |
|
{ |
|
"input": query, |
|
} |
|
) |
|
return response |
|
except Exception as e: |
|
logging.error(f"Error during query processing: {str(e)}") |
|
raise |