Spaces:
Runtime error
Runtime error
updated function calling code
Browse files
app.py
CHANGED
@@ -22,10 +22,11 @@ from langchain.schema import (
|
|
22 |
HumanMessage,
|
23 |
SystemMessage
|
24 |
)
|
|
|
|
|
25 |
|
26 |
|
27 |
# Get the value of the openai_api_key from environment variable
|
28 |
-
#openai_api_key = os.getenv("OPENAI_API_KEY")
|
29 |
openai.api_key = os.getenv("OPENAI_API_KEY")
|
30 |
|
31 |
chat = ChatOpenAI(
|
@@ -35,16 +36,12 @@ chat = ChatOpenAI(
|
|
35 |
model='gpt-3.5-turbo-0613'
|
36 |
)
|
37 |
|
38 |
-
# import all defined functions, their definitions and a dictionary
|
39 |
-
from gpt_function_definitions import generate_image, generate_caption, get_news
|
40 |
-
|
41 |
-
|
42 |
#Streaming endpoint
|
43 |
API_URL = "https://api.openai.com/v1/chat/completions" #os.getenv("API_URL") + "/generate_stream"
|
44 |
|
45 |
|
46 |
|
47 |
-
# TOOLS
|
48 |
# Load the tool configs that are needed.
|
49 |
# 'Tool' dataclass wraps functions that accept a single string input and returns a string output.
|
50 |
tools = [
|
@@ -75,32 +72,49 @@ tools = [
|
|
75 |
|
76 |
# Creating OpenAI functions
|
77 |
# use LangChain tools as OpenAI functions.
|
78 |
-
functions = [format_tool_to_openai_function(t) for t in tools]
|
79 |
-
functions
|
80 |
|
81 |
# defining agents using tools and openai functions
|
82 |
-
agent = initialize_agent(tools, chat, agent=AgentType.OPENAI_FUNCTIONS, verbose=True)
|
83 |
|
84 |
|
85 |
# function calling
|
86 |
-
def run_conversation(user_input):
|
87 |
-
|
88 |
-
|
89 |
-
print(f"
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
104 |
|
105 |
print(f"chatbot - {chatbot}")
|
106 |
print(f"user_input - {user_input}")
|
@@ -115,13 +129,14 @@ def predict(user_input, temperature, stable_diff, image_cap, top_news, file_outp
|
|
115 |
|
116 |
|
117 |
chat = ChatOpenAI(
|
118 |
-
|
119 |
temperature=temperature, #1.0
|
120 |
streaming=True,
|
121 |
model='gpt-3.5-turbo-0613')
|
122 |
messages = [system]
|
123 |
-
|
124 |
-
|
|
|
125 |
|
126 |
if len(chatbot) != 0:
|
127 |
for conv in chatbot:
|
@@ -130,28 +145,30 @@ def predict(user_input, temperature, stable_diff, image_cap, top_news, file_outp
|
|
130 |
messages.append(human)
|
131 |
messages.append(ai)
|
132 |
messages.append(HumanMessage(content=user_input))
|
|
|
|
|
133 |
if function_call_decision:
|
134 |
# getting openAI function agent reponse
|
135 |
-
function_response, image_file = run_conversation(user_input)
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
|
|
141 |
else: # for first user message
|
142 |
-
#human = HumanMessage(content=user_input)
|
143 |
-
#messages.append(human)
|
144 |
messages.append(HumanMessage(content=user_input))
|
|
|
|
|
145 |
if function_call_decision:
|
146 |
# getting openAI function agent reponse
|
147 |
-
function_response, image_file = run_conversation(user_input)
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
print(f"messages - {messages}")
|
155 |
|
156 |
# getting gpt3.5's response
|
157 |
gpt_response = chat(messages)
|
@@ -161,8 +178,7 @@ def predict(user_input, temperature, stable_diff, image_cap, top_news, file_outp
|
|
161 |
|
162 |
chatbot.append((user_input, bot_message))
|
163 |
|
164 |
-
return "", chatbot, None
|
165 |
-
|
166 |
|
167 |
|
168 |
def add_image(file_to_save, file_output):
|
@@ -177,6 +193,7 @@ def add_image(file_to_save, file_output):
|
|
177 |
print(f"Logging: Updated file directory - {file_output}")
|
178 |
return file_output #gr.update(value="dog1.jpg")
|
179 |
|
|
|
180 |
def add_audio(file_to_save, file_output):
|
181 |
print(f"audio file_to_save is - {file_to_save}")
|
182 |
print(f"files available in directory are -{file_output}")
|
@@ -189,6 +206,7 @@ def add_audio(file_to_save, file_output):
|
|
189 |
print(f"Logging: Updated file directory - {file_output}")
|
190 |
return file_output #gr.update(value="dog1.jpg")
|
191 |
|
|
|
192 |
def upload_file(file, file_output):
|
193 |
print(f"Logging: all files available - {file_output}")
|
194 |
print(f"Logging: file uploaded is - {file}")
|
@@ -279,54 +297,54 @@ Thats it! you are have added your own brand new CHATGPT Plugin for yourself. Go
|
|
279 |
"""
|
280 |
|
281 |
|
282 |
-
|
283 |
# GRADIO BLOCK
|
284 |
with gr.Blocks(css = """#col_container { margin-left: auto; margin-right: auto;}
|
285 |
#chatbot {height: 520px; overflow: auto;}""") as demo: # #width: 1000px;
|
286 |
-
gr.HTML('<h1 align="center">🚀ChatGPT🧩Plugin
|
287 |
with gr.Accordion("What is happening?", open=False):
|
288 |
gr.HTML(messaging)
|
289 |
gr.HTML('''<center><a href="https://huggingface.co/spaces/ysharma/ChatGPT-Plugins-UI-with-Langchain?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>Duplicate the Space and run securely with your OpenAI API Key</center>''')
|
290 |
with gr.Row():
|
291 |
with gr.Column():
|
292 |
openai_api_key_tb = gr.Textbox(label="Enter your OpenAI API key here",
|
293 |
-
value="🎁Keys are provided by HuggingFace for Free🥳
|
294 |
container=False)
|
295 |
#plugin_message = gr.HTML()
|
296 |
-
with gr.Accordion("
|
297 |
with gr.Row():
|
298 |
#music_gen = gr.Checkbox(label="🎵MusicGen", value=False)
|
299 |
stable_diff = gr.Checkbox(label="🖼️Diffusers", value=False)
|
300 |
image_cap = gr.Checkbox(label="🎨Describe Image", value=False)
|
301 |
top_news = gr.Checkbox(label="📰News", value=False)
|
|
|
302 |
#texttospeech = gr.Checkbox(label="📝🗣️Text-To-Speech", value=False)
|
303 |
#gr.CheckboxGroup(["🎵MusicGen", "🖼️Diffusers", "🎨Describe Image", "📰News", "📝🗣️Text-To-Speech" ], label="Plug-ins", info="enhance your ChatGPT experience using Plugins : Powered by Gradio!")
|
304 |
with gr.Column():
|
305 |
-
gen_image = gr.Image(label="generated image", type="filepath")
|
306 |
|
307 |
with gr.Row():
|
308 |
chatbot = gr.Chatbot(elem_id='chatbot')
|
309 |
|
310 |
with gr.Row():
|
311 |
-
with gr.Column(scale=0.
|
312 |
inputs = gr.Textbox(placeholder= "Hi there!", label= "Type an input and press Enter")
|
|
|
|
|
313 |
with gr.Column(scale=0.15, min_width=0):
|
314 |
btn = gr.UploadButton("📁Upload", file_types=["image", "audio"], file_count="single")
|
315 |
|
316 |
-
b1 = gr.Button("🏃Run")
|
317 |
-
|
318 |
with gr.Row():
|
319 |
with gr.Accordion("Parameters", open=False):
|
320 |
top_p = gr.Slider( minimum=-0, maximum=1.0, value=1.0, step=0.05, interactive=True, label="Top-p (nucleus sampling)",)
|
321 |
temperature = gr.Slider( minimum=-0, maximum=5.0, value=1.0, step=0.1, interactive=True, label="Temperature",)
|
322 |
with gr.Accordion("Available Files", open=False):
|
323 |
-
file_output = gr.File(file_count="multiple", file_types=["image", "audio"])
|
324 |
|
325 |
inputs.submit( predict,
|
326 |
-
[inputs, temperature, stable_diff, image_cap, top_news, file_output, chatbot],
|
327 |
[inputs, chatbot, gen_image ])
|
328 |
b1.click( predict,
|
329 |
-
[inputs, temperature, stable_diff, image_cap, top_news, file_output, chatbot],
|
330 |
[inputs, chatbot, gen_image ])
|
331 |
|
332 |
|
@@ -337,8 +355,16 @@ with gr.Blocks(css = """#col_container { margin-left: auto; margin-right: auto;}
|
|
337 |
with gr.Accordion("How to add more Plugins to ChatGPT", open=False ):
|
338 |
gr.Markdown(add_plugin_steps)
|
339 |
|
340 |
-
|
341 |
-
|
342 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
343 |
|
344 |
demo.queue().launch(debug=True, height = '1000')
|
|
|
22 |
HumanMessage,
|
23 |
SystemMessage
|
24 |
)
|
25 |
+
# import all defined functions, their definitions and a dictionary
|
26 |
+
from gpt_function_definitions import generate_image, generate_caption, get_news
|
27 |
|
28 |
|
29 |
# Get the value of the openai_api_key from environment variable
|
|
|
30 |
openai.api_key = os.getenv("OPENAI_API_KEY")
|
31 |
|
32 |
chat = ChatOpenAI(
|
|
|
36 |
model='gpt-3.5-turbo-0613'
|
37 |
)
|
38 |
|
|
|
|
|
|
|
|
|
39 |
#Streaming endpoint
|
40 |
API_URL = "https://api.openai.com/v1/chat/completions" #os.getenv("API_URL") + "/generate_stream"
|
41 |
|
42 |
|
43 |
|
44 |
+
# TOOLS and FUNCTION CALLING
|
45 |
# Load the tool configs that are needed.
|
46 |
# 'Tool' dataclass wraps functions that accept a single string input and returns a string output.
|
47 |
tools = [
|
|
|
72 |
|
73 |
# Creating OpenAI functions
|
74 |
# use LangChain tools as OpenAI functions.
|
75 |
+
#functions = [format_tool_to_openai_function(t) for t in tools]
|
76 |
+
#functions
|
77 |
|
78 |
# defining agents using tools and openai functions
|
79 |
+
#agent = initialize_agent(tools, chat, agent=AgentType.OPENAI_FUNCTIONS, verbose=True)
|
80 |
|
81 |
|
82 |
# function calling
|
83 |
+
def run_conversation(user_input, plugins, tools):
|
84 |
+
|
85 |
+
print(f"Plugins are - {plugins}")
|
86 |
+
print(f"Total available PLUGINS/Tools are - {tools}")
|
87 |
+
|
88 |
+
# Load the tool configs that are needed.
|
89 |
+
# 'Tool' dataclass wraps functions that accept a single string input and returns a string output.
|
90 |
+
tools = [val for val, flag in zip(tools, plugins) if flag]
|
91 |
+
print(f"PLUGINS/Tools enabled in this run are - {tools}")
|
92 |
+
|
93 |
+
try:
|
94 |
+
# defining agents using tools and openai functions
|
95 |
+
agent = initialize_agent(tools, chat, agent=AgentType.OPENAI_FUNCTIONS, verbose=True)
|
96 |
+
|
97 |
+
# calling the agent
|
98 |
+
function_response = agent.run(user_input)
|
99 |
+
print(f"function_response is - {function_response}")
|
100 |
+
|
101 |
+
image_file_extns = ['png', 'jpg', 'gif', 'tiff', 'tif', 'svg', 'bmp']
|
102 |
+
literal_terms = ['caption', 'captions']
|
103 |
+
if any(extn in function_response for extn in image_file_extns) and not any(term in function_response for term in literal_terms) :
|
104 |
+
image_file = function_response.replace('sandbox:',"").split('(')[-1].split(')')[0]
|
105 |
+
print(f"image_file is -{image_file}")
|
106 |
+
return function_response, image_file
|
107 |
+
|
108 |
+
return function_response, None
|
109 |
+
|
110 |
+
except Exception as e:
|
111 |
+
print(f"An error occured while calling agents using 'Function Calling': {e}")
|
112 |
+
return None, None
|
113 |
+
|
114 |
+
|
115 |
+
system = SystemMessage(content = "You are a helpful AI assistant")
|
116 |
+
|
117 |
+
def predict(user_input, temperature, stable_diff, image_cap, top_news, google_search, file_output, chatbot):
|
118 |
|
119 |
print(f"chatbot - {chatbot}")
|
120 |
print(f"user_input - {user_input}")
|
|
|
129 |
|
130 |
|
131 |
chat = ChatOpenAI(
|
132 |
+
openai_api_key=openai_api_key,
|
133 |
temperature=temperature, #1.0
|
134 |
streaming=True,
|
135 |
model='gpt-3.5-turbo-0613')
|
136 |
messages = [system]
|
137 |
+
# image, caption, news, serach
|
138 |
+
plugins = [stable_diff, image_cap, top_news, google_search]
|
139 |
+
function_call_decision = True if any(plugins) else False
|
140 |
|
141 |
if len(chatbot) != 0:
|
142 |
for conv in chatbot:
|
|
|
145 |
messages.append(human)
|
146 |
messages.append(ai)
|
147 |
messages.append(HumanMessage(content=user_input))
|
148 |
+
print(f"messages list is - {messages}")
|
149 |
+
|
150 |
if function_call_decision:
|
151 |
# getting openAI function agent reponse
|
152 |
+
function_response, image_file = run_conversation(user_input, plugins, tools)
|
153 |
+
if function_response is not None:
|
154 |
+
gpt_response = AIMessage(content= function_response)
|
155 |
+
bot_message = gpt_response.content
|
156 |
+
print(f"bot_message - {bot_message}")
|
157 |
+
chatbot.append((user_input, bot_message))
|
158 |
+
return "", chatbot, image_file
|
159 |
else: # for first user message
|
|
|
|
|
160 |
messages.append(HumanMessage(content=user_input))
|
161 |
+
print(f"messages list is - {messages}")
|
162 |
+
|
163 |
if function_call_decision:
|
164 |
# getting openAI function agent reponse
|
165 |
+
function_response, image_file = run_conversation(user_input, plugins, tools)
|
166 |
+
if function_response is not None:
|
167 |
+
gpt_response = AIMessage(content= function_response)
|
168 |
+
bot_message = gpt_response.content
|
169 |
+
print(f"bot_message - {bot_message}")
|
170 |
+
chatbot.append((user_input, bot_message))
|
171 |
+
return "", chatbot, image_file
|
|
|
172 |
|
173 |
# getting gpt3.5's response
|
174 |
gpt_response = chat(messages)
|
|
|
178 |
|
179 |
chatbot.append((user_input, bot_message))
|
180 |
|
181 |
+
return "", chatbot, None #"", chatbot
|
|
|
182 |
|
183 |
|
184 |
def add_image(file_to_save, file_output):
|
|
|
193 |
print(f"Logging: Updated file directory - {file_output}")
|
194 |
return file_output #gr.update(value="dog1.jpg")
|
195 |
|
196 |
+
|
197 |
def add_audio(file_to_save, file_output):
|
198 |
print(f"audio file_to_save is - {file_to_save}")
|
199 |
print(f"files available in directory are -{file_output}")
|
|
|
206 |
print(f"Logging: Updated file directory - {file_output}")
|
207 |
return file_output #gr.update(value="dog1.jpg")
|
208 |
|
209 |
+
|
210 |
def upload_file(file, file_output):
|
211 |
print(f"Logging: all files available - {file_output}")
|
212 |
print(f"Logging: file uploaded is - {file}")
|
|
|
297 |
"""
|
298 |
|
299 |
|
|
|
300 |
# GRADIO BLOCK
|
301 |
with gr.Blocks(css = """#col_container { margin-left: auto; margin-right: auto;}
|
302 |
#chatbot {height: 520px; overflow: auto;}""") as demo: # #width: 1000px;
|
303 |
+
gr.HTML('<h1 align="center">🚀ChatGPT🧩Plugin WebUI using Langchain & Gradio</h1>')
|
304 |
with gr.Accordion("What is happening?", open=False):
|
305 |
gr.HTML(messaging)
|
306 |
gr.HTML('''<center><a href="https://huggingface.co/spaces/ysharma/ChatGPT-Plugins-UI-with-Langchain?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>Duplicate the Space and run securely with your OpenAI API Key</center>''')
|
307 |
with gr.Row():
|
308 |
with gr.Column():
|
309 |
openai_api_key_tb = gr.Textbox(label="Enter your OpenAI API key here",
|
310 |
+
value="🎁ChatGPT Keys are provided by HuggingFace for Free🥳 You don't need to enter yours!😉🙌",
|
311 |
container=False)
|
312 |
#plugin_message = gr.HTML()
|
313 |
+
with gr.Accordion("Plugins🛠️ Available",open=True):
|
314 |
with gr.Row():
|
315 |
#music_gen = gr.Checkbox(label="🎵MusicGen", value=False)
|
316 |
stable_diff = gr.Checkbox(label="🖼️Diffusers", value=False)
|
317 |
image_cap = gr.Checkbox(label="🎨Describe Image", value=False)
|
318 |
top_news = gr.Checkbox(label="📰News", value=False)
|
319 |
+
google_search = gr.Checkbox(label="🌐Google Search", value=False)
|
320 |
#texttospeech = gr.Checkbox(label="📝🗣️Text-To-Speech", value=False)
|
321 |
#gr.CheckboxGroup(["🎵MusicGen", "🖼️Diffusers", "🎨Describe Image", "📰News", "📝🗣️Text-To-Speech" ], label="Plug-ins", info="enhance your ChatGPT experience using Plugins : Powered by Gradio!")
|
322 |
with gr.Column():
|
323 |
+
gen_image = gr.Image(label="generated image", type="filepath", interactive=False)
|
324 |
|
325 |
with gr.Row():
|
326 |
chatbot = gr.Chatbot(elem_id='chatbot')
|
327 |
|
328 |
with gr.Row():
|
329 |
+
with gr.Column(scale=0.70):
|
330 |
inputs = gr.Textbox(placeholder= "Hi there!", label= "Type an input and press Enter")
|
331 |
+
with gr.Column(scale=0.15, min_width=0):
|
332 |
+
b1 = gr.Button("🏃Run")
|
333 |
with gr.Column(scale=0.15, min_width=0):
|
334 |
btn = gr.UploadButton("📁Upload", file_types=["image", "audio"], file_count="single")
|
335 |
|
|
|
|
|
336 |
with gr.Row():
|
337 |
with gr.Accordion("Parameters", open=False):
|
338 |
top_p = gr.Slider( minimum=-0, maximum=1.0, value=1.0, step=0.05, interactive=True, label="Top-p (nucleus sampling)",)
|
339 |
temperature = gr.Slider( minimum=-0, maximum=5.0, value=1.0, step=0.1, interactive=True, label="Temperature",)
|
340 |
with gr.Accordion("Available Files", open=False):
|
341 |
+
file_output = gr.File(file_count="multiple", file_types=["image", "audio"], label="Files Available")
|
342 |
|
343 |
inputs.submit( predict,
|
344 |
+
[inputs, temperature, stable_diff, image_cap, top_news, google_search, file_output, chatbot],
|
345 |
[inputs, chatbot, gen_image ])
|
346 |
b1.click( predict,
|
347 |
+
[inputs, temperature, stable_diff, image_cap, top_news, google_search, file_output, chatbot],
|
348 |
[inputs, chatbot, gen_image ])
|
349 |
|
350 |
|
|
|
355 |
with gr.Accordion("How to add more Plugins to ChatGPT", open=False ):
|
356 |
gr.Markdown(add_plugin_steps)
|
357 |
|
358 |
+
gr.Examples(
|
359 |
+
examples = [["generate an image of a puppy", 1.0, True, False, False, False, None],
|
360 |
+
["generate a caption for the image cat2.jpg", 1.0, False, True, False, False, "cat2.jpg"],
|
361 |
+
["What is the latest top news on Inflation in Europe", 1.0, False, False, True, False, None],
|
362 |
+
["What is Europe's stand on the ongoing generative AI revolution?", 1.0, False, False, False, True, None],
|
363 |
+
["Write a very short poem on 'sparkling water'", 1.0, False, False, False, False, None],
|
364 |
+
["What is the weather in LA and SF?", 1.0, False, False, False, True, None],
|
365 |
+
["Who is the owner of Twitter? Are there any competitors of Twitter yet?", 1.0, True, True, True, True, None],
|
366 |
+
],
|
367 |
+
inputs = [inputs, temperature, stable_diff, image_cap, top_news, google_search, file_output, chatbot]
|
368 |
+
)
|
369 |
|
370 |
demo.queue().launch(debug=True, height = '1000')
|