File size: 2,184 Bytes
49e4e3b
 
 
 
 
 
c48c29b
49e4e3b
 
 
 
 
 
 
 
 
c48c29b
49e4e3b
c48c29b
 
 
 
 
 
 
 
49e4e3b
 
c48c29b
49e4e3b
 
 
 
c48c29b
49e4e3b
 
 
c48c29b
49e4e3b
 
 
 
c48c29b
49e4e3b
 
 
 
c48c29b
49e4e3b
c48c29b
 
 
 
 
 
 
 
 
49e4e3b
c48c29b
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
import os
import logging
from config import MODEL_NAME
from dotenv import load_dotenv
from langchain_groq import ChatGroq
from langchain.agents import AgentExecutor
from langchain.memory import ConversationBufferWindowMemory
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain.agents import create_tool_calling_agent
from langchain_core.utils.function_calling import convert_to_openai_function
from utils import book_slot, check_slots, reschedule_event, delete_event

load_dotenv()
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')

API_KEY = os.environ["API_KEY"]

def create_agent(PROMPT):
    # First create the memory object
    memory = ConversationBufferWindowMemory(
        memory_key="chat_history",
        return_messages=True,
        k=5
    )
    
    # Create the prompt template
    prompt_template = ChatPromptTemplate.from_messages([
        ("system", PROMPT),
        MessagesPlaceholder(variable_name="chat_history"),
        ("human", "{input}"),
        MessagesPlaceholder(variable_name="agent_scratchpad"),
    ])

    # Define tools and convert to functions
    tools = [book_slot, delete_event, check_slots, reschedule_event]
    functions = [convert_to_openai_function(f) for f in tools]

    # Create the LLM instance separately
    llm = ChatGroq(
        model=MODEL_NAME,
        temperature=0.7,
        max_tokens=None,
        timeout=60,
        max_retries=2,
        api_key=API_KEY
    ).bind_functions(functions=functions)

    # Create the agent
    agent = create_tool_calling_agent(llm, tools, prompt_template)
    
    # Create the agent executor with memory
    agent_executor = AgentExecutor(
        agent=agent,
        tools=tools,
        memory=memory,
        verbose=True
    )
    
    return agent_executor

# Example usage
def process_query(query: str):
    try:
        agent = create_agent("Your system prompt here")
        response = agent.invoke(
            {
                "input": query,
            }
        )
        return response
    except Exception as e:
        logging.error(f"Error during query processing: {str(e)}")
        raise