simran0608's picture
Update model.py
f320a1e verified
raw
history blame
2.18 kB
import os
import logging
from config import MODEL_NAME
from dotenv import load_dotenv
from langchain_groq import ChatGroq
from langchain.agents import AgentExecutor
from langchain.memory import ConversationBufferWindowMemory
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain.agents import create_tool_calling_agent
from langchain_core.utils.function_calling import convert_to_openai_function
from utils import book_slot, check_slots, reschedule_event, delete_event
load_dotenv()
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
API_KEY = os.environ["API_KEY"]
def create_agent(PROMPT):
# First create the memory object
memory = ConversationBufferWindowMemory(
memory_key="chat_history",
return_messages=True,
k=5
)
# Create the prompt template
prompt_template = ChatPromptTemplate.from_messages([
("system", PROMPT),
MessagesPlaceholder(variable_name="chat_history"),
("human", "{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad"),
])
# Define tools and convert to functions
tools = [book_slot, delete_event, check_slots, reschedule_event]
functions = [convert_to_openai_function(f) for f in tools]
# Create the LLM instance separately
llm = ChatGroq(
model=MODEL_NAME,
temperature=0.7,
max_tokens=None,
timeout=60,
max_retries=2,
api_key=API_KEY
).bind_functions(functions=functions)
# Create the agent
agent = create_tool_calling_agent(llm, tools, prompt_template)
# Create the agent executor with memory
agent_executor = AgentExecutor(
agent=agent,
tools=tools,
memory=memory,
verbose=True
)
return agent_executor
# Example usage
def process_query(query: str):
try:
agent = create_agent("Your system prompt here")
response = agent.invoke(
{
"input": query,
}
)
return response
except Exception as e:
logging.error(f"Error during query processing: {str(e)}")
raise