# Test langchain pulling from Turbo import langchain import openai import gradio as gr import os from openai import OpenAI openai.api_key = os.environ['OPENAI_API_KEY'] client = OpenAI() #Assistants # my_assistant = client.beta.assistants.retrieve("asst_9msH6Qr6Sb6JCQRwW3aqFDUi") # print(my_assistant) from langchain.chat_models import ChatOpenAI from langchain.schema import AIMessage, HumanMessage llm = ChatOpenAI(temperature=1.0, model='gpt-4-1106-preview') def predict(message, history): history_langchain_format = [] for human, ai in history: history_langchain_format.append(HumanMessage(content=human)) history_langchain_format.append(AIMessage(content=ai)) history_langchain_format.append(HumanMessage(content=message)) gpt_response = llm(history_langchain_format) return gpt_response.content gr.ChatInterface(predict).launch(share=True)