ysharma HF staff commited on
Commit
5653071
·
1 Parent(s): ae72a48

updated code

Browse files
Files changed (1) hide show
  1. app.py +180 -101
app.py CHANGED
@@ -1,85 +1,100 @@
1
  import os
2
  import openai
3
  import gradio as gr
4
- import time
5
  import requests
6
  import shutil
7
- import json
 
8
 
9
- from PIL import Image
10
  from gradio_client import Client
11
  from newsapi import NewsApiClient
12
 
13
- # Import langchain things that are needed generically
 
 
 
 
 
 
 
 
 
14
  from langchain import LLMMathChain, SerpAPIWrapper
15
- from langchain.agents import AgentType, initialize_agent
16
  from langchain.chat_models import ChatOpenAI
17
  from langchain.tools import BaseTool, StructuredTool, Tool, tool
18
-
19
- from langchain.tools import format_tool_to_openai_function
20
  from langchain.schema import (
21
  AIMessage,
22
  HumanMessage,
23
  SystemMessage
24
  )
25
- # import all defined functions, their definitions and a dictionary
26
- from gpt_function_definitions import generate_image, generate_caption, get_news
27
-
28
 
29
  # Get the value of the openai_api_key from environment variable
 
30
  openai.api_key = os.getenv("OPENAI_API_KEY")
31
 
32
-
33
- #Streaming endpoint
34
- API_URL = "https://api.openai.com/v1/chat/completions" #os.getenv("API_URL") + "/generate_stream"
35
  search = SerpAPIWrapper()
36
 
37
 
38
- # TOOLS and FUNCTION CALLING
 
39
  # Load the tool configs that are needed.
40
- # 'Tool' dataclass wraps functions that accept a single string input and returns a string output.
41
  tools = [
 
42
  Tool.from_function(
43
  func=generate_image,
44
  name="generate_image",
45
  description="generate an image based on the prompt provided"
46
  # coroutine= ... <- you can specify an async method if desired as well
47
  ),
48
- #Tool.from_function(
49
- # func=generate_music,
50
- # name="generate_music",
51
- # description="generate music based on an input text and input melody"
52
- # # coroutine= ... <- you can specify an async method if desired as well
53
- #),
54
  Tool.from_function(
55
  func=generate_caption,
56
  name="generate_caption",
57
  description="generate caption for the image present at the filepath provided"
58
  # coroutine= ... <- you can specify an async method if desired as well
59
  ),
 
 
60
  Tool.from_function(
61
  func=get_news,
62
  name="get_news",
63
  description="get top three engilsh news items for a given query, sorted by relevancy"
64
  # coroutine= ... <- you can specify an async method if desired as well
65
  ),
 
 
66
  Tool.from_function(
67
  func=search.run,
68
  name="Search",
69
  description="useful for when you need to answer questions about current events"
70
  # coroutine= ... <- you can specify an async method if desired as well
71
- ),
72
- ]
 
 
 
 
 
 
 
 
73
 
74
 
75
- # function calling
76
  def run_conversation(user_input, plugins, tools, chat):
77
 
78
  print(f"Plugins are - {plugins}")
79
  print(f"Total available PLUGINS/Tools are - {tools}")
80
 
81
  # Load the tool configs that are needed.
82
- # 'Tool' dataclass wraps functions that accept a single string input and returns a string output.
83
  tools = [val for val, flag in zip(tools, plugins) if flag]
84
  print(f"PLUGINS/Tools enabled in this run are - {tools}")
85
 
@@ -91,7 +106,7 @@ def run_conversation(user_input, plugins, tools, chat):
91
  function_response = agent.run(user_input)
92
  print(f"function_response is - {function_response}")
93
 
94
- image_file_extns = ['png', 'jpg', 'gif', 'tiff', 'tif', 'svg', 'bmp']
95
  literal_terms = ['caption', 'captions']
96
  if any(extn in function_response for extn in image_file_extns) and not any(term in function_response for term in literal_terms) :
97
  image_file = function_response.replace('sandbox:',"").split('(')[-1].split(')')[0]
@@ -105,9 +120,11 @@ def run_conversation(user_input, plugins, tools, chat):
105
  return None, None
106
 
107
 
108
- system = SystemMessage(content = "You are a helpful AI assistant")
 
109
 
110
- def predict(user_input, temperature, stable_diff, image_cap, top_news, google_search, file_output, chatbot):
 
111
 
112
  print(f"chatbot - {chatbot}")
113
  print(f"user_input - {user_input}")
@@ -128,7 +145,7 @@ def predict(user_input, temperature, stable_diff, image_cap, top_news, google_se
128
  model='gpt-3.5-turbo-0613')
129
  messages = [system]
130
  # image, caption, news, serach
131
- plugins = [stable_diff, image_cap, top_news, google_search]
132
  function_call_decision = True if any(plugins) else False
133
 
134
  if len(chatbot) != 0:
@@ -174,6 +191,7 @@ def predict(user_input, temperature, stable_diff, image_cap, top_news, google_se
174
  return "", chatbot, None #"", chatbot
175
 
176
 
 
177
  def add_image(file_to_save, file_output):
178
  print(f"image file_to_save is - {file_to_save}")
179
  print(f"files available in directory are -{file_output}")
@@ -186,7 +204,6 @@ def add_image(file_to_save, file_output):
186
  print(f"Logging: Updated file directory - {file_output}")
187
  return file_output #gr.update(value="dog1.jpg")
188
 
189
-
190
  def add_audio(file_to_save, file_output):
191
  print(f"audio file_to_save is - {file_to_save}")
192
  print(f"files available in directory are -{file_output}")
@@ -199,7 +216,6 @@ def add_audio(file_to_save, file_output):
199
  print(f"Logging: Updated file directory - {file_output}")
200
  return file_output #gr.update(value="dog1.jpg")
201
 
202
-
203
  def upload_file(file, file_output):
204
  print(f"Logging: all files available - {file_output}")
205
  print(f"Logging: file uploaded is - {file}")
@@ -213,105 +229,166 @@ def upload_file(file, file_output):
213
  return file_output
214
 
215
 
 
216
  messaging = """
217
- How does a Language Model like GPT makes discerning choices regarding which plugins to run? Well, this is done using the Language Model as a reasoning agent and allowing it to assess and process information intelligently.<br>
218
- <b>Langchain & OpenAI Function Calling</b>: AI models like gpt-3.5-turbo-0613 and gpt-4-0613, are designed to identify when and how to activate functions through API calls. These function-specific APIs generate a JSON object with necessary arguments, aiming to surpass the efficacy of traditional chat or text completion APIs.<br>
219
- <b>Gradio Chatbots</b>: Gradio provides super easy way to build Chatbot UI. Refer our <a href="https://gradio.app/docs/#chatbot" target="_blank">Docs</a>. Using Langchain's OpenAI Functions Agent you can create chatbots designed to respond to queries by communicating with external APIs. The API responses are fed back to the Language Model for processing and a new response is generated for the user.The versatility of using Gradio to build LLM applications is immense. FOr example, in this Gradio app, you can have an array of Plugins based on functions which are tailored for various purposes (image, video, audio, text generation, utilities etc). This enhancing the breadth and depth of interactions with your Language Model.
220
  """
221
 
222
- add_plugin_steps = """## Steps to add new Plugins to your Gradio ChatGPT Chatbot
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
223
 
224
  1. **Acquire the API Endpoint**
225
- - You need an API which you can query, and for this example let's consider using a text-to-speech demo hosted on Huggingface Spaces.
226
- - **API Endpoint**: [https://gradio-neon-tts-plugin-coqui.hf.space/](https://gradio-neon-tts-plugin-coqui.hf.space/)
227
 
228
  2. **Create a Function to Query the API**
229
  - You can access any Gradio demo as an API via the Gradio Python Client.
230
  ```python
231
- from gradio.client import Client
232
-
233
- def texttospeech(input_text):
234
- client = Client("https://gradio-neon-tts-plugin-coqui.hf.space/")
235
- result = client.predict(
236
- input_text, # str in 'Input' Textbox component
237
- "en", # str in 'Language' Radio component
238
- api_name="/predict"
239
- )
240
- return result
 
 
 
 
241
  ```
242
 
243
- 3. **Describe the Function to GPT-3.5**
244
- - You need to describe your function to GPT3.5/4. This function definition will get passed to gpt and will suck up your token. GPT may or may not use this function based on user inputs later on.
245
- - You can either use the Gradio demo for converting any given function to the required JSON format for GPT-3.5.
246
- - Demo: [Function to JSON](https://huggingface.co/spaces/ysharma/function-to-JSON)
247
- - Or, you can create the dictionary object on your own. Note that, the correct format is super important here.
248
- - MAke sure to name your JSON object description as `<function_name>_func`.
249
  ```python
250
- texttospeech_func = {
251
- "name": "texttospeech",
252
- "description": "generate speech from the given input text",
253
- "parameters": {
254
- "type": "object",
255
- "properties": {
256
- "input_text": {
257
- "type": "string",
258
- "description": "text that will be used to generate speech"
259
- }
260
- },
261
- "required": [
262
- "input_text"
263
- ]
264
- }
265
- }
266
  ```
 
 
 
 
267
 
268
- 4. **Add Function and JSON Object Details**
269
- - Add the function definition and description to the `gpt_function_definitions.py` file (simply copy and paste).
270
- - `dict_plugin_functions` is a dictionary of all available plugins. Add your plugin information to this dictionary in the required format.
271
  ```python
272
- 'texttospeech_func': {
273
- 'dict': texttospeech_func,
274
- 'func': texttospeech
275
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
276
  ```
277
 
278
  5. **Update the Chatbot Layout**
279
  - Go to the Blocks Chatbot layout and add a new checkbox for your plugin as:
280
  ```python
281
- texttospeech = gr.Checkbox(label="📝🗣️Text-To-Speech", value=False)
282
  ```
283
- - Add the new checkbox component to your submit and click events for your chatbot and to the predict function accordingly.
284
  - And also to the `plugins` list in `predict`
285
  ```python
286
- plugins = [music_gen, stable_diff, image_cap, top_news, texttospeech]
287
  ```
288
 
289
- Thats it! you are have added your own brand new CHATGPT Plugin for yourself. Go PLAY!!
290
  """
291
 
292
 
293
- # GRADIO BLOCK
 
 
 
 
 
 
294
  with gr.Blocks(css = """#col_container { margin-left: auto; margin-right: auto;}
295
- #chatbot {height: 520px; overflow: auto;}""") as demo: # #width: 1000px;
296
- gr.HTML('<h1 align="center">🚀ChatGPT🧩Plugin WebUI using Langchain & Gradio</h1>')
 
 
 
 
 
 
297
  with gr.Accordion("What is happening?", open=False):
298
  gr.HTML(messaging)
299
- gr.HTML('''<center><a href="https://huggingface.co/spaces/ysharma/ChatGPT-Plugins-UI-with-Langchain?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>Duplicate the Space and run securely with your OpenAI API Key</center>''')
 
 
300
  with gr.Row():
301
  with gr.Column():
302
  openai_api_key_tb = gr.Textbox(label="Enter your OpenAI API key here",
303
  value="🎁ChatGPT Keys are provided by HuggingFace for Free🥳 You don't need to enter yours!😉🙌",
304
  container=False)
305
- #plugin_message = gr.HTML()
 
306
  with gr.Accordion("Plugins🛠️ Available",open=True):
307
  with gr.Row():
308
- #music_gen = gr.Checkbox(label="🎵MusicGen", value=False)
309
  stable_diff = gr.Checkbox(label="🖼️Diffusers", value=False)
310
  image_cap = gr.Checkbox(label="🎨Describe Image", value=False)
311
  top_news = gr.Checkbox(label="📰News", value=False)
312
  google_search = gr.Checkbox(label="🌐Google Search", value=False)
 
 
313
  #texttospeech = gr.Checkbox(label="📝🗣️Text-To-Speech", value=False)
314
  #gr.CheckboxGroup(["🎵MusicGen", "🖼️Diffusers", "🎨Describe Image", "📰News", "📝🗣️Text-To-Speech" ], label="Plug-ins", info="enhance your ChatGPT experience using Plugins : Powered by Gradio!")
 
315
  with gr.Column():
316
  gen_image = gr.Image(label="generated image", type="filepath", interactive=False)
317
 
@@ -334,30 +411,32 @@ with gr.Blocks(css = """#col_container { margin-left: auto; margin-right: auto;}
334
  file_output = gr.File(file_count="multiple", file_types=["image", "audio"], label="Files Available")
335
 
336
  inputs.submit( predict,
337
- [inputs, temperature, stable_diff, image_cap, top_news, google_search, file_output, chatbot],
338
  [inputs, chatbot, gen_image ])
339
  b1.click( predict,
340
- [inputs, temperature, stable_diff, image_cap, top_news, google_search, file_output, chatbot],
341
  [inputs, chatbot, gen_image ])
342
 
343
 
344
  btn.upload(upload_file, [btn, file_output], file_output)
345
  gen_image.change(add_image, [gen_image, file_output], file_output)
346
  #gen_audio.change(add_audio, [gen_audio, file_output], file_output)
347
- gr.HTML("""<a href="https://huggingface.co/spaces/ysharma/ChatGPT-Plugins-in-Gradio/blob/main/README.md" target="_blank">How to add new ChatGPT Plugins in Gradio Chatbot in 5 mins!! or open the accordion below.</a>""")
348
- with gr.Accordion("How to add more Plugins to ChatGPT", open=False ):
349
- gr.Markdown(add_plugin_steps)
350
 
351
- gr.Examples(
352
- examples = [["generate an image of a puppy", 1.0, True, False, False, False, None],
353
- ["generate a caption for the image cat2.jpg", 1.0, False, True, False, False, "cat2.jpg"],
354
- ["What is the latest top news on Inflation in Europe", 1.0, False, False, True, False, None],
355
- ["What is Europe's stand on the ongoing generative AI revolution?", 1.0, False, False, False, True, None],
356
- ["Write a very short poem on 'sparkling water'", 1.0, False, False, False, False, None],
357
- ["What is the weather in LA and SF?", 1.0, False, False, False, True, None],
358
- ["Who is the owner of Twitter? Are there any competitors of Twitter yet?", 1.0, True, True, True, True, None],
 
 
359
  ],
360
- inputs = [inputs, temperature, stable_diff, image_cap, top_news, google_search, file_output, chatbot]
361
  )
 
 
 
362
 
363
- demo.queue().launch(debug=True, height = '1000')
 
1
  import os
2
  import openai
3
  import gradio as gr
4
+ import json
5
  import requests
6
  import shutil
7
+ import random
8
+ import time
9
 
 
10
  from gradio_client import Client
11
  from newsapi import NewsApiClient
12
 
13
+ from PIL import Image
14
+ import matplotlib.pyplot as plt
15
+
16
+ # import all defined functions, their definitions and a dictionary
17
+ from gpt_function_definitions import generate_image, generate_caption, get_news, bored_api
18
+
19
+ #OpenaI Chat Completions endpoint
20
+ API_URL = "https://api.openai.com/v1/chat/completions" #os.getenv("API_URL") + "/generate_stream"
21
+
22
+ # Import things that are needed generically from langchain
23
  from langchain import LLMMathChain, SerpAPIWrapper
24
+ from langchain.agents import AgentType, initialize_agent, load_tools
25
  from langchain.chat_models import ChatOpenAI
26
  from langchain.tools import BaseTool, StructuredTool, Tool, tool
27
+ from langchain.tools import MoveFileTool, format_tool_to_openai_function
 
28
  from langchain.schema import (
29
  AIMessage,
30
  HumanMessage,
31
  SystemMessage
32
  )
33
+ from langchain.utilities import WikipediaAPIWrapper
34
+ from langchain.tools import AIPluginTool
 
35
 
36
  # Get the value of the openai_api_key from environment variable
37
+ os.environ['OPENAI_API_KEY'] = "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"
38
  openai.api_key = os.getenv("OPENAI_API_KEY")
39
 
40
+ os.environ['SERPAPI_API_KEY'] = "e771e9e02fdace5cbc95249ec3dfa8ab32f9e378cf4c9ad080444c7c0e80b8a6"
 
 
41
  search = SerpAPIWrapper()
42
 
43
 
44
+ # LANGCHAIN
45
+
46
  # Load the tool configs that are needed.
47
+ # Langchain 'Tool' dataclass wraps functions that accept a single string input and returns a string output.
48
  tools = [
49
+ #image generation
50
  Tool.from_function(
51
  func=generate_image,
52
  name="generate_image",
53
  description="generate an image based on the prompt provided"
54
  # coroutine= ... <- you can specify an async method if desired as well
55
  ),
56
+
57
+ # Describe an image
 
 
 
 
58
  Tool.from_function(
59
  func=generate_caption,
60
  name="generate_caption",
61
  description="generate caption for the image present at the filepath provided"
62
  # coroutine= ... <- you can specify an async method if desired as well
63
  ),
64
+
65
+ # Get lattest top news
66
  Tool.from_function(
67
  func=get_news,
68
  name="get_news",
69
  description="get top three engilsh news items for a given query, sorted by relevancy"
70
  # coroutine= ... <- you can specify an async method if desired as well
71
  ),
72
+
73
+ # Search the web using Google search
74
  Tool.from_function(
75
  func=search.run,
76
  name="Search",
77
  description="useful for when you need to answer questions about current events"
78
  # coroutine= ... <- you can specify an async method if desired as well
79
+ ),
80
+
81
+ #The Bored API
82
+ Tool.from_function(
83
+ func=bored_api,
84
+ name="bored_api",
85
+ description="Get a random activity to do based on the activity type"
86
+ # coroutine= ... <- you can specify an async method if desired as well
87
+ ),
88
+ ]
89
 
90
 
91
+ # Handling Plugin converations
92
  def run_conversation(user_input, plugins, tools, chat):
93
 
94
  print(f"Plugins are - {plugins}")
95
  print(f"Total available PLUGINS/Tools are - {tools}")
96
 
97
  # Load the tool configs that are needed.
 
98
  tools = [val for val, flag in zip(tools, plugins) if flag]
99
  print(f"PLUGINS/Tools enabled in this run are - {tools}")
100
 
 
106
  function_response = agent.run(user_input)
107
  print(f"function_response is - {function_response}")
108
 
109
+ image_file_extns = ['.png', '.jpg', '.gif', '.tiff', '.tif', '.svg', '.bmp']
110
  literal_terms = ['caption', 'captions']
111
  if any(extn in function_response for extn in image_file_extns) and not any(term in function_response for term in literal_terms) :
112
  image_file = function_response.replace('sandbox:',"").split('(')[-1].split(')')[0]
 
120
  return None, None
121
 
122
 
123
+ # Setting up a system message for our Chatbot
124
+ system = SystemMessage(content = "You are a helpful AI assistant") # that translates English to Pirate English.")
125
 
126
+ # driver
127
+ def predict(user_input, temperature, stable_diff, image_cap, top_news, google_search, bored, file_output, chatbot):
128
 
129
  print(f"chatbot - {chatbot}")
130
  print(f"user_input - {user_input}")
 
145
  model='gpt-3.5-turbo-0613')
146
  messages = [system]
147
  # image, caption, news, serach
148
+ plugins = [stable_diff, image_cap, top_news, google_search, bored]
149
  function_call_decision = True if any(plugins) else False
150
 
151
  if len(chatbot) != 0:
 
191
  return "", chatbot, None #"", chatbot
192
 
193
 
194
+ # Helper functions for file handling
195
  def add_image(file_to_save, file_output):
196
  print(f"image file_to_save is - {file_to_save}")
197
  print(f"files available in directory are -{file_output}")
 
204
  print(f"Logging: Updated file directory - {file_output}")
205
  return file_output #gr.update(value="dog1.jpg")
206
 
 
207
  def add_audio(file_to_save, file_output):
208
  print(f"audio file_to_save is - {file_to_save}")
209
  print(f"files available in directory are -{file_output}")
 
216
  print(f"Logging: Updated file directory - {file_output}")
217
  return file_output #gr.update(value="dog1.jpg")
218
 
 
219
  def upload_file(file, file_output):
220
  print(f"Logging: all files available - {file_output}")
221
  print(f"Logging: file uploaded is - {file}")
 
229
  return file_output
230
 
231
 
232
+ # What is happening with function calling, langchain, and Gradio
233
  messaging = """
234
+ How does a Language Model like GPT makes discerning choices regarding which plugins to run? Well, this is done using the Language Model as a reasoning agent and allowing it to assess and process information intelligently.<br><br>
235
+ - <b>Langchain & OpenAI Function Calling</b>: AI models like gpt-3.5-turbo-0613 and gpt-4-0613, are designed to identify when and how to activate functions through API calls. These function-specific APIs generate a JSON object with necessary arguments, aiming to surpass the efficacy of traditional chat or text completion APIs.<br><br>
236
+ - <b>Gradio Chatbots</b>: Gradio provides super easy way to build Chatbot UI. Refer our <a href="https://gradio.app/docs/#chatbot" target="_blank">Docs</a>. Using Langchain's OpenAI Functions Agent you can create chatbots designed to respond to queries by communicating with external APIs. The API responses are fed back to the Language Model for processing and a new response is generated for the user.The versatility of using Gradio to build LLM applications is immense. FOr example, in this Gradio app, you can have an array of Plugins based on functions which are tailored for various purposes (image, video, audio, text generation, utilities etc). This enhancing the breadth and depth of interactions with your Language Model.
237
  """
238
 
239
+
240
+ # How to use this Demo effectively
241
+ howto = """
242
+ Welcome to the <b>ChatGPT-Plugins WebUI</b>, built using Gradio and Langchain! This interactive gradio chatbot uses the GPT3.5-turbo-0613 model from OpenAI and boasts the ability to USE, as well as BUILD Custom Plugins to enhance your chatbot experience.
243
+ <br>Here’s a quick guide for you to get you started:<br><br>
244
+ <b>To get Started</b>: Simply type your messages in the textbox to chat with ChatGPT and press enter!<br><br>
245
+ <b>How to use Plugins</b>: Plugins are provided as checkboxes. If you want to try out a plugin just select that checkbox<br><br>
246
+
247
+ - <b>DIFFUSERS PLUGIN:</b><br>
248
+ <b>What it does:</b> Generates images based on your text prompt.<br>
249
+ <b>How to use:</b> Type a prompt for the image you want to generate, and the Diffusers plugin will create it for you.<br>
250
+ <b>Example input:</b> "Generate an image of a sunset over the mountains."<br><br>
251
+
252
+ - <b>IMAGE CAPTION PLUGIN:</b><br>
253
+ <b>What it does:</b> Describes images that you upload.<br>
254
+ <b>How to use:</b> Upload an image using the 'Upload' button. Ask ChatGPT to describe the image make sure to mention the image name to it.<br>
255
+ <b>Example input:</b> "Describe the image cat2.jpg."<br><br>
256
+
257
+ - <b>NEWS PLUGIN:</b><br>
258
+ <b>What it does:</b> Provides the top 3 news articles based on your search query.<br>
259
+ <b>How to use:</b> Just type in a search query and the NewsAPI plugin will present the top 3 news based on relevance.<br>
260
+ <b>Example input:</b> "Show me the top news about space exploration."<br><br>
261
+
262
+ - <b>SEARCH PLUGIN:</b><br>
263
+ <b>What it does:</b> Searches internet for your queries. Now you don;t need to limit yourself to a knowledge cut-off of 2021<br>
264
+ <b>How to use:</b> Type in a user message in the chatbot. Google Search plugin will search the internet and present a concise resuklt for you like magic!<br>
265
+ <b>Example input:</b> "Who is the current girlfriend of Leonardo Di Caprio."<br><br>
266
+
267
+ - <b>BORED API PLUGIN:</b><br>
268
+ <b>What it does:</b> Suggests you activities of different types.<br>
269
+ <b>How to use:</b> Mention that you are bored and want some activities to do or simply ask to generate an activity.<br>
270
+ <b>Example input:</b> "Can you suggest me something to do, I am totally bored."<br><br>
271
+
272
+ Access Generated Content: Find all generated images in the Gradio Files component located below the input textbox.<br><br>
273
+ Have Fun!: Explore and enjoy the versatile features of this <b>ChatGPT-Plugin WebUI</b>.<br>
274
+ Now you’re all set to make the most of this ChatGPT demo. Happy chatting!
275
+ """
276
+
277
+
278
+ # Guide to add new Plugins
279
+ add_plugin_steps = """
280
+ ## Steps to add new Plugins to your Langchain-Gradio ChatGPT PLUGIN WebUI
281
 
282
  1. **Acquire the API Endpoint**
283
+ - You need an API which you can query, and for this example let's consider using a The Bored API.
284
+ - **API Endpoint**: [https://www.boredapi.com/api/activity/?type=](https://www.boredapi.com/api/activity/?type=)
285
 
286
  2. **Create a Function to Query the API**
287
  - You can access any Gradio demo as an API via the Gradio Python Client.
288
  ```python
289
+ def bored_api(activity_type) -> str:
290
+ '''
291
+ Get a random activity to do based on the activity type.
292
+ '''
293
+ activity_type_list = ["education", "recreational", "social", "diy", "charity", "cooking", "relaxation", "music", "busywork"]
294
+ activity_type = activity_type.lower()
295
+ if activity_type not in activity_type_list:
296
+ activity_type = random.choice(activity_type_list)
297
+
298
+ api_url = "https://www.boredapi.com/api/activity/?type=" + activity_type
299
+ response = requests.get(
300
+ api_url
301
+ )
302
+ return response.json()['activity']
303
  ```
304
 
305
+ 3. **Add Function definitions**
306
+ - Add the function definition to the `gpt_function_definitions.py` file (simply copy and paste). Don't forget to add function description in docstring.
307
+ - Add required imports
 
 
 
308
  ```python
309
+ from gpt_function_definitions import generate_image, generate_caption, get_news, bored_api
310
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
311
  ```
312
+
313
+ 4. **Add the function to the Tools list**
314
+ - Add a description - describe what your function does. Models like GPT3.5/4 support Function Calling. The OpenAI Functions Agent from Langchain is designed to work with these functions and models.
315
+ - Name - add a name of your function, don't include spaces
316
 
 
 
 
317
  ```python
318
+ tools = [
319
+ #image generation
320
+ ...
321
+
322
+ # Describe an image
323
+ ...
324
+
325
+ # Get lattest top news
326
+ ...
327
+
328
+ # Bored Api
329
+ Tool.from_function(
330
+ func=bored_api,
331
+ name="bored_api",
332
+ description="Get a random activity to do based on the activity type"
333
+ # coroutine= ... <- you can specify an async method if desired as well
334
+ ),
335
+ ]
336
  ```
337
 
338
  5. **Update the Chatbot Layout**
339
  - Go to the Blocks Chatbot layout and add a new checkbox for your plugin as:
340
  ```python
341
+ bored = gr.Checkbox(label="🙄bored", value=False)
342
  ```
343
+ - Add the new checkbox component (example - <i>bored</i>) to your submit and click events for your chatbot and to the predict function accordingly.
344
  - And also to the `plugins` list in `predict`
345
  ```python
346
+ plugins = [stable_diff, image_cap, top_news, search, bored]
347
  ```
348
 
349
+ **Thats it! you have added your own brand new CHATGPT Plugin for yourself. Go PLAY!!**
350
  """
351
 
352
 
353
+ second_headline = """<h3 align="center">🔥This Plugins WebUI is build using <a href="https://www.gradio.app/" target="_blank">Gradio</a>,
354
+ <a href="https://python.langchain.com/docs/get_started/introduction.html" target="_blank">Langchain</a>,
355
+ and ChatGPT <a href="https://openai.com/blog/function-calling-and-other-api-updates" target="_blank">Function Calling API</a>.
356
+ You don't need an OPENAI API key to run this demo as Huggingface is provided one for the community use🙌</h1>"""
357
+
358
+
359
+ # Gradio block
360
  with gr.Blocks(css = """#col_container { margin-left: auto; margin-right: auto;}
361
+ #chatbot {height: 520px; overflow: auto;}""") as demo:
362
+
363
+ gr.HTML('<h1 align="center">🚀ChatGPT-Plugins🧩 WebUI using Langchain & Gradio</h1>')
364
+ gr.HTML(second_headline)
365
+ gr.HTML('''<center><a href="https://huggingface.co/spaces/ysharma/ChatGPT-Plugins-UI-with-Langchain?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>Duplicate the Space and run securely with your OpenAI API Key</center>''')
366
+
367
+ with gr.Accordion("Follow these Steps to use the Gradio WebUI OR simply Click any of the given Examples! ", open=False):
368
+ gr.HTML(howto)
369
  with gr.Accordion("What is happening?", open=False):
370
  gr.HTML(messaging)
371
+
372
+ gr.HTML("""Bonus! Steps to build and add your own ChatGPT Plugins to the WebUI using Langchain : <a href="https://huggingface.co/spaces/ysharma/ChatGPT-Plugins-UI-with-Langchain/blob/main/README.md" target="_blank">Add new Plugins to ChatGPT WebUI in 5 mins!!</a>""")
373
+
374
  with gr.Row():
375
  with gr.Column():
376
  openai_api_key_tb = gr.Textbox(label="Enter your OpenAI API key here",
377
  value="🎁ChatGPT Keys are provided by HuggingFace for Free🥳 You don't need to enter yours!😉🙌",
378
  container=False)
379
+ #plugin_message = gr.HTML()
380
+
381
  with gr.Accordion("Plugins🛠️ Available",open=True):
382
  with gr.Row():
 
383
  stable_diff = gr.Checkbox(label="🖼️Diffusers", value=False)
384
  image_cap = gr.Checkbox(label="🎨Describe Image", value=False)
385
  top_news = gr.Checkbox(label="📰News", value=False)
386
  google_search = gr.Checkbox(label="🌐Google Search", value=False)
387
+ bored = gr.Checkbox(label="🙄Bored API", value=False)
388
+ #music_gen = gr.Checkbox(label="🎵MusicGen", value=False)
389
  #texttospeech = gr.Checkbox(label="📝🗣️Text-To-Speech", value=False)
390
  #gr.CheckboxGroup(["🎵MusicGen", "🖼️Diffusers", "🎨Describe Image", "📰News", "📝🗣️Text-To-Speech" ], label="Plug-ins", info="enhance your ChatGPT experience using Plugins : Powered by Gradio!")
391
+
392
  with gr.Column():
393
  gen_image = gr.Image(label="generated image", type="filepath", interactive=False)
394
 
 
411
  file_output = gr.File(file_count="multiple", file_types=["image", "audio"], label="Files Available")
412
 
413
  inputs.submit( predict,
414
+ [inputs, temperature, stable_diff, image_cap, top_news, google_search, bored, file_output, chatbot],
415
  [inputs, chatbot, gen_image ])
416
  b1.click( predict,
417
+ [inputs, temperature, stable_diff, image_cap, top_news, google_search, bored, file_output, chatbot],
418
  [inputs, chatbot, gen_image ])
419
 
420
 
421
  btn.upload(upload_file, [btn, file_output], file_output)
422
  gen_image.change(add_image, [gen_image, file_output], file_output)
423
  #gen_audio.change(add_audio, [gen_audio, file_output], file_output)
 
 
 
424
 
425
+ gr.HTML("<br><br>")
426
+ gr.Examples(label = "To get started quickly - Click on any example below and press Enter/Run:",
427
+ examples = [["What is the latest top news on Inflation in Europe", 1.0, False, False, True, False, False, None],
428
+ ["What is Europe's stand on the ongoing generative AI revolution?", 1.0, False, False, False, True, False, None],
429
+ ["Write a very short poem on 'sparkling water'", 1.0, False, False, False, False, False, None],
430
+ ["What is the weather in LA and SF?", 1.0, False, False, False, True, False, None],
431
+ ["generate an image of a puppy", 1.0, True, False, False, False, False,None],
432
+ ["generate a caption for the image cat2.jpg", 1.0, False, True, False, False, False, "cat2.jpg"],
433
+ ["Who is the owner of Twitter? Are there any competitor of Twitter yet?", 1.0, True, True, True, True, False, None],
434
+ ["Can you suggest me something to do, I am totally bored", 1.0, False, False, False, False, True, None]
435
  ],
436
+ inputs = [inputs, temperature, stable_diff, image_cap, top_news, google_search, bored, file_output]
437
  )
438
+
439
+ with gr.Accordion("Use Langchain to build and add your own Plugins to this ChatGPT WebUI", open=False ):
440
+ gr.Markdown(add_plugin_steps)
441
 
442
+ demo.queue().launch(debug=True) # height = '1000'