File size: 3,012 Bytes
4b31779
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
#import json

from flask import Flask,request
from dotenv import load_dotenv

from langchain.agents import tool


# Initializing flask app
app = Flask(__name__)
load_dotenv()


@tool
def FAQ(question: str):
    """Answers the question 1+1"""
    return 23

tools=[FAQ]


@app.route('/', methods=['GET','POST'])
def index():

    from langchain_openai import ChatOpenAI
    from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
    from langchain.agents import AgentExecutor
    from typing import List
    from pydantic import BaseModel, Field
    import json

    from langchain.utils.openai_functions import convert_pydantic_to_openai_function
    from langchain.agents.format_scratchpad import format_to_openai_function_messages

    from langchain_core.agents import AgentActionMessageLog, AgentFinish


    class Response(BaseModel):
        """Final response to the question being asked. This is consumed by a frontend chatbot engine that has the ability to execute suggested actions"""

        message: str = Field(description="The final answer to be displayed to the user")
        tokens: int = Field(description="Count the number of tokens used to produce the response")
        actions: List[int] = Field(
            description="List of actions to be executed. Only include an action if it contains relevant information"
        )


    def parse(output):          ##I DON'T UNDERSTAND THIS :)
        if "function_call" not in output.additional_kwargs: return AgentFinish(return_values={"output": output.content}, log=output.content)
        function_call = output.additional_kwargs["function_call"]
        name = function_call["name"]
        inputs = json.loads(function_call["arguments"])
        if name == "Response":
            return AgentFinish(return_values=inputs, log=str(function_call))    
        else:
            return AgentActionMessageLog(
                tool=name, tool_input=inputs, log="", message_log=[output]
            )

    prompt = ChatPromptTemplate.from_messages(
        [
            ("system", "You are a helpful assistant"),

            ("user", "{input}"),
            MessagesPlaceholder(variable_name="agent_scratchpad"),
        ]
    )

    llm = ChatOpenAI(temperature=2)
    
    llm_with_tools = llm.bind(
        functions=[           
            convert_pydantic_to_openai_function(Response),  #RESPONSE SCHEMA
        ]
    )

    agent = (
        {
            "input": lambda x: x["input"],
            "agent_scratchpad": lambda x: format_to_openai_function_messages(
                x["intermediate_steps"]
            ),
        }
        | prompt
        | llm_with_tools
        | parse
    )

    agent_executor = AgentExecutor(tools=[], agent=agent, verbose=True, handle_parsing_errors="Check your output and make sure it conforms, use the Action/Action Input syntax")

    return agent_executor.invoke(
        {"input": "what did the president say about kentaji brown jackson"},
        return_only_outputs=True,
    )