File size: 890 Bytes
9e1c1c8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
# Test langchain pulling from Turbo
import langchain
import openai
import gradio as gr
import os

from openai import OpenAI

openai.api_key = os.environ['OPENAI_API_KEY']
client = OpenAI()

#Assistants
# my_assistant = client.beta.assistants.retrieve("asst_9msH6Qr6Sb6JCQRwW3aqFDUi")
# print(my_assistant)

from langchain.chat_models import ChatOpenAI
from langchain.schema import AIMessage, HumanMessage

llm = ChatOpenAI(temperature=1.0, model='gpt-4-1106-preview')

def predict(message, history):
    history_langchain_format = []
    for human, ai in history:
        history_langchain_format.append(HumanMessage(content=human))
        history_langchain_format.append(AIMessage(content=ai))
    history_langchain_format.append(HumanMessage(content=message))
    gpt_response = llm(history_langchain_format)
    return gpt_response.content

gr.ChatInterface(predict).launch(share=True)