SansarK commited on
Commit
bd93483
·
verified ·
1 Parent(s): b4896f1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +65 -148
app.py CHANGED
@@ -1,48 +1,17 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  import json
4
- import uuid
5
- from PIL import Image
6
  from bs4 import BeautifulSoup
7
  import requests
8
- import random
9
  from transformers import LlavaProcessor, LlavaForConditionalGeneration, TextIteratorStreamer
10
  from threading import Thread
11
- import re
12
- import time
13
  import torch
14
- import cv2
15
- from gradio_client import Client, file
16
-
17
- def image_gen(prompt):
18
- client = Client("KingNish/Image-Gen-Pro")
19
- return client.predict("Image Generation",None, prompt, api_name="/image_gen_pro")
20
 
21
  model_id = "llava-hf/llava-interleave-qwen-0.5b-hf"
22
-
23
  processor = LlavaProcessor.from_pretrained(model_id)
24
-
25
  model = LlavaForConditionalGeneration.from_pretrained(model_id)
26
  model.to("cpu")
27
 
28
-
29
- def llava(message, history):
30
- if message["files"]:
31
- image = message["files"][0]
32
- else:
33
- for hist in history:
34
- if type(hist[0])==tuple:
35
- image = hist[0][0]
36
-
37
- txt = message["text"]
38
-
39
- gr.Info("Analyzing image")
40
- image = Image.open(image).convert("RGB")
41
- prompt = f"<|im_start|>user <image>\n{txt}<|im_end|><|im_start|>assistant"
42
-
43
- inputs = processor(prompt, image, return_tensors="pt")
44
- return inputs
45
-
46
  def extract_text_from_webpage(html_content):
47
  soup = BeautifulSoup(html_content, 'html.parser')
48
  for tag in soup(["script", "style", "header", "footer"]):
@@ -51,7 +20,6 @@ def extract_text_from_webpage(html_content):
51
 
52
  def search(query):
53
  term = query
54
- start = 0
55
  all_results = []
56
  max_chars_per_page = 8000
57
  with requests.Session() as session:
@@ -60,16 +28,14 @@ def search(query):
60
  headers={"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/111.0"},
61
  params={"q": term, "num": 3, "udm": 14},
62
  timeout=5,
63
- verify=None,
64
  )
65
  resp.raise_for_status()
66
  soup = BeautifulSoup(resp.text, "html.parser")
67
  result_block = soup.find_all("div", attrs={"class": "g"})
68
  for result in result_block:
69
- link = result.find("a", href=True)
70
- link = link["href"]
71
  try:
72
- webpage = session.get(link, headers={"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/111.0"}, timeout=5, verify=False)
73
  webpage.raise_for_status()
74
  visible_text = extract_text_from_webpage(webpage.text)
75
  if len(visible_text) > max_chars_per_page:
@@ -84,136 +50,87 @@ client_gemma = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")
84
  client_mixtral = InferenceClient("NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO")
85
  client_llama = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct")
86
 
87
-
88
- func_caller = []
89
-
90
  # Define the main chat function
91
- def respond(message, history):
92
  func_caller = []
93
 
94
- user_prompt = message
95
- # Handle image processing
96
- if message["files"]:
97
- inputs = llava(message, history)
98
- streamer = TextIteratorStreamer(processor, skip_prompt=True, **{"skip_special_tokens": True})
99
- generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=1024)
100
 
101
- thread = Thread(target=model.generate, kwargs=generation_kwargs)
102
- thread.start()
103
-
104
- buffer = ""
105
- for new_text in streamer:
106
- buffer += new_text
107
- yield buffer
108
- else:
109
- functions_metadata = [
110
- {"type": "function", "function": {"name": "web_search", "description": "Search query on google", "parameters": {"type": "object", "properties": {"query": {"type": "string", "description": "web search query"}}, "required": ["query"]}}},
111
- {"type": "function", "function": {"name": "general_query", "description": "Reply general query of USER", "parameters": {"type": "object", "properties": {"prompt": {"type": "string", "description": "A detailed prompt"}}, "required": ["prompt"]}}},
112
- {"type": "function", "function": {"name": "image_generation", "description": "Generate image for user", "parameters": {"type": "object", "properties": {"query": {"type": "string", "description": "image generation prompt"}, "number_of_image": {"type": "integer", "description": "number of images to generate"}}, "required": ["query"]}}},
113
- {"type": "function", "function": {"name": "image_qna", "description": "Answer question asked by user related to image", "parameters": {"type": "object", "properties": {"query": {"type": "string", "description": "Question by user"}}, "required": ["query"]}}},
114
- ]
115
 
116
- for msg in history:
117
- func_caller.append({"role": "user", "content": f"{str(msg[0])}"})
118
- func_caller.append({"role": "assistant", "content": f"{str(msg[1])}"})
119
-
120
- message_text = message["text"]
121
- func_caller.append({"role": "user", "content": f'[SYSTEM]You are a helpful assistant. You have access to the following functions: \n {str(functions_metadata)}\n\nTo use these functions respond with:\n<functioncall> {{ "name": "function_name", "arguments": {{ "arg_1": "value_1", "arg_1": "value_1", ... }} }} </functioncall> [USER] {message_text}'})
122
-
123
- response = client_gemma.chat_completion(func_caller, max_tokens=200)
124
- response = str(response)
125
- try:
126
- response = response[int(response.find("{")):int(response.rindex("</"))]
127
- except:
128
- response = response[int(response.find("{")):(int(response.rfind("}"))+1)]
129
- response = response.replace("\\n", "")
130
- response = response.replace("\\'", "'")
131
- response = response.replace('\\"', '"')
132
- response = response.replace('\\', '')
133
- print(f"\n{response}")
134
 
135
- try:
136
- json_data = json.loads(str(response))
137
- if json_data["name"] == "web_search":
138
- query = json_data["arguments"]["query"]
139
- gr.Info("Searching Web")
140
- web_results = search(query)
141
- gr.Info("Extracting relevant Info")
142
- web2 = ' '.join([f"Link: {res['link']}\nText: {res['text']}\n\n" for res in web_results])
143
- messages = f"<|im_start|>system\nYou are OpenCHAT mini a helpful assistant made by KingNish. You are provided with WEB results from which you can find informations to answer users query in Structured and More better way. You do not say Unnecesarry things Only say thing which is important and relevant. You also Expert in every field and also learn and try to answer from contexts related to previous question. Try your best to give best response possible to user. You also try to show emotions using Emojis and reply like human, use short forms, friendly tone and emotions.<|im_end|>"
144
- for msg in history:
145
- messages += f"\n<|im_start|>user\n{str(msg[0])}<|im_end|>"
146
- messages += f"\n<|im_start|>assistant\n{str(msg[1])}<|im_end|>"
147
- messages+=f"\n<|im_start|>user\n{message_text}<|im_end|>\n<|im_start|>web_result\n{web2}<|im_end|>\n<|im_start|>assistant\n"
148
- stream = client_mixtral.text_generation(messages, max_new_tokens=2000, do_sample=True, stream=True, details=True, return_full_text=False)
149
- output = ""
150
- for response in stream:
151
- if not response.token.text == "<|im_end|>":
152
- output += response.token.text
153
- yield output
154
- elif json_data["name"] == "image_generation":
155
- query = json_data["arguments"]["query"]
156
- try:
157
- number_of_image = json_data["arguments"]["number_of_image"]
158
- except:
159
- number_of_image = 1
160
- gr.Info("Generating Image, Please wait 10 sec...")
161
- image = image_gen(f"{str(query)}")
162
- yield gr.Image(image[1])
163
- elif json_data["name"] == "image_qna":
164
- inputs = llava(message, history)
165
- streamer = TextIteratorStreamer(processor, skip_prompt=True, **{"skip_special_tokens": True})
166
- generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=1024)
167
-
168
- thread = Thread(target=model.generate, kwargs=generation_kwargs)
169
- thread.start()
170
 
171
- buffer = ""
172
- for new_text in streamer:
173
- buffer += new_text
174
- yield buffer
175
- else:
176
- messages = f"<|start_header_id|>system\nYou are OpenCHAT mini a helpful assistant made by KingNish. You answers users query like human friend. You are also Expert in every field and also learn and try to answer from contexts related to previous question. Try your best to give best response possible to user. You also try to show emotions using Emojis and reply like human, use short forms, friendly tone and emotions.<|end_header_id|>"
177
- for msg in history:
178
- messages += f"\n<|start_header_id|>user\n{str(msg[0])}<|end_header_id|>"
179
- messages += f"\n<|start_header_id|>assistant\n{str(msg[1])}<|end_header_id|>"
180
- messages+=f"\n<|start_header_id|>user\n{message_text}<|end_header_id|>\n<|start_header_id|>assistant\n"
181
- stream = client_llama.text_generation(messages, max_new_tokens=2000, do_sample=True, stream=True, details=True, return_full_text=False)
182
- output = ""
183
- for response in stream:
184
- if not response.token.text == "<|eot_id|>":
185
- output += response.token.text
186
- yield output
187
- except:
188
- messages = f"<|start_header_id|>system\nYou are OpenCHAT mini a helpful assistant made by KingNish. You answers users query like human friend. You are also Expert in every field and also learn and try to answer from contexts related to previous question. Try your best to give best response possible to user. You also try to show emotions using Emojis and reply like human, use short forms, friendly tone and emotions.<|end_header_id|>"
189
  for msg in history:
190
- messages += f"\n<|start_header_id|>user\n{str(msg[0])}<|end_header_id|>"
191
- messages += f"\n<|start_header_id|>assistant\n{str(msg[1])}<|end_header_id|>"
192
- messages+=f"\n<|start_header_id|>user\n{message_text}<|end_header_id|>\n<|start_header_id|>assistant\n"
 
 
 
 
 
 
 
 
 
 
 
 
193
  stream = client_llama.text_generation(messages, max_new_tokens=2000, do_sample=True, stream=True, details=True, return_full_text=False)
194
  output = ""
195
  for response in stream:
196
- if not response.token.text == "<|eot_id|>":
197
  output += response.token.text
198
  yield output
 
 
 
 
 
 
 
 
 
 
 
 
199
 
200
  # Create the Gradio interface
201
- demo = gr.ChatInterface(
202
  fn=respond,
203
- chatbot=gr.Chatbot(show_copy_button=True, likeable=True, layout="panel"),
 
204
  description ="# OpenGPT 4o mini\n ### You can engage in chat, generate images, perform web searches, and Q&A with images.",
205
- textbox=gr.MultimodalTextbox(),
206
- multimodal=True,
207
- concurrency_limit=200,
208
  examples=[
209
- {"text": "Hy, who are you?",},
210
- {"text": "What's the current price of Bitcoin",},
211
- {"text": "Search and Tell me what's the release date of llama 3 400b",},
212
- {"text": "Create A Beautiful image of Effiel Tower at Night",},
213
- {"text": "Write me a Python function to calculate the first 10 digits of the fibonacci sequence.",},
214
- {"text": "What's the colour of car in given image", "files": ["./car1.png"]},
215
- {"text": "Read what's written on paper", "files": ["./paper_with_text.png"]},
216
  ],
217
- cache_examples=False,
218
  )
219
  demo.launch(show_error=True)
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  import json
 
 
4
  from bs4 import BeautifulSoup
5
  import requests
 
6
  from transformers import LlavaProcessor, LlavaForConditionalGeneration, TextIteratorStreamer
7
  from threading import Thread
 
 
8
  import torch
 
 
 
 
 
 
9
 
10
  model_id = "llava-hf/llava-interleave-qwen-0.5b-hf"
 
11
  processor = LlavaProcessor.from_pretrained(model_id)
 
12
  model = LlavaForConditionalGeneration.from_pretrained(model_id)
13
  model.to("cpu")
14
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  def extract_text_from_webpage(html_content):
16
  soup = BeautifulSoup(html_content, 'html.parser')
17
  for tag in soup(["script", "style", "header", "footer"]):
 
20
 
21
  def search(query):
22
  term = query
 
23
  all_results = []
24
  max_chars_per_page = 8000
25
  with requests.Session() as session:
 
28
  headers={"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/111.0"},
29
  params={"q": term, "num": 3, "udm": 14},
30
  timeout=5,
 
31
  )
32
  resp.raise_for_status()
33
  soup = BeautifulSoup(resp.text, "html.parser")
34
  result_block = soup.find_all("div", attrs={"class": "g"})
35
  for result in result_block:
36
+ link = result.find("a", href=True)["href"]
 
37
  try:
38
+ webpage = session.get(link, headers={"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/111.0"}, timeout=5)
39
  webpage.raise_for_status()
40
  visible_text = extract_text_from_webpage(webpage.text)
41
  if len(visible_text) > max_chars_per_page:
 
50
  client_mixtral = InferenceClient("NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO")
51
  client_llama = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct")
52
 
 
 
 
53
  # Define the main chat function
54
+ def respond(question, history):
55
  func_caller = []
56
 
57
+ user_prompt = question
58
+ functions_metadata = [
59
+ {"type": "function", "function": {"name": "web_search", "description": "Search query on google", "parameters": {"type": "object", "properties": {"query": {"type": "string", "description": "web search query"}}, "required": ["query"]}}},
60
+ {"type": "function", "function": {"name": "general_query", "description": "Reply general query of USER", "parameters": {"type": "object", "properties": {"prompt": {"type": "string", "description": "A detailed prompt"}}, "required": ["prompt"]}}},
61
+ ]
 
62
 
63
+ for msg in history:
64
+ func_caller.append({"role": "user", "content": f"{str(msg[0])}"})
65
+ func_caller.append({"role": "assistant", "content": f"{str(msg[1])}"})
 
 
 
 
 
 
 
 
 
 
 
66
 
67
+ func_caller.append({"role": "user", "content": f'[SYSTEM]You are a helpful assistant. You have access to the following functions: \n {str(functions_metadata)}\n\nTo use these functions respond with:\n<functioncall> {{ "name": "function_name", "arguments": {{ "arg_1": "value_1", "arg_1": "value_1", ... }} }} </functioncall> [USER] {question}'})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
 
69
+ response = client_gemma.chat_completion(func_caller, max_tokens=200)
70
+ response = str(response)
71
+ try:
72
+ response = response[int(response.find("{")):int(response.rindex("</"))]
73
+ except:
74
+ response = response[int(response.find("{")):(int(response.rfind("}"))+1)]
75
+ response = response.replace("\\n", "")
76
+ response = response.replace("\\'", "'")
77
+ response = response.replace('\\"', '"')
78
+ response = response.replace('\\', '')
79
+ print(f"\n{response}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
80
 
81
+ try:
82
+ json_data = json.loads(str(response))
83
+ if json_data["name"] == "web_search":
84
+ query = json_data["arguments"]["query"]
85
+ web_results = search(query)
86
+ web2 = ' '.join([f"Link: {res['link']}\nText: {res['text']}\n\n" for res in web_results])
87
+ messages = f"system\nYou are OpenCHAT mini a helpful assistant. You are provided with WEB results from which you can find informations to answer users query in Structured and More better way. You do not say Unnecesarry things Only say thing which is important and relevant. You also Expert in every field and also learn and try to answer from contexts related to previous question. Try your best to give best response possible to user. You also try to show emotions using Emojis and reply like human, use short forms, friendly tone and emotions."
 
 
 
 
 
 
 
 
 
 
 
88
  for msg in history:
89
+ messages += f"\nuser\n{str(msg[0])}"
90
+ messages += f"\nassistant\n{str(msg[1])}"
91
+ messages += f"\nuser\n{question}\nweb_result\n{web2}\nassistant\n"
92
+ stream = client_mixtral.text_generation(messages, max_new_tokens=2000, do_sample=True, stream=True, details=True, return_full_text=False)
93
+ output = ""
94
+ for response in stream:
95
+ if not response.token.text == "":
96
+ output += response.token.text
97
+ yield output
98
+ else:
99
+ messages = f"system\nYou are OpenCHAT mini a helpful assistant. You answers users query like human friend. You are also Expert in every field and also learn and try to answer from contexts related to previous question. Try your best to give best response possible to user. You also try to show emotions using Emojis and reply like human, use short forms, friendly tone and emotions."
100
+ for msg in history:
101
+ messages += f"\nuser\n{str(msg[0])}"
102
+ messages += f"\nassistant\n{str(msg[1])}"
103
+ messages += f"\nuser\n{question}\nassistant\n"
104
  stream = client_llama.text_generation(messages, max_new_tokens=2000, do_sample=True, stream=True, details=True, return_full_text=False)
105
  output = ""
106
  for response in stream:
107
+ if not response.token.text == "":
108
  output += response.token.text
109
  yield output
110
+ except:
111
+ messages = f"system\nYou are OpenCHAT mini a helpful assistant. You answers users query like human friend. You are also Expert in every field and also learn and try to answer from contexts related to previous question. Try your best to give best response possible to user. You also try to show emotions using Emojis and reply like human, use short forms, friendly tone and emotions."
112
+ for msg in history:
113
+ messages += f"\nuser\n{str(msg[0])}"
114
+ messages += f"\nassistant\n{str(msg[1])}"
115
+ messages += f"\nuser\n{question}\nassistant\n"
116
+ stream = client_llama.text_generation(messages, max_new_tokens=2000, do_sample=True, stream=True, details=True, return_full_text=False)
117
+ output = ""
118
+ for response in stream:
119
+ if not response.token.text == "":
120
+ output += response.token.text
121
+ yield output
122
 
123
  # Create the Gradio interface
124
+ demo = gr.Interface(
125
  fn=respond,
126
+ inputs=gr.inputs.Textbox(label="Question"),
127
+ outputs=gr.outputs.Textbox(label="Response"),
128
  description ="# OpenGPT 4o mini\n ### You can engage in chat, generate images, perform web searches, and Q&A with images.",
 
 
 
129
  examples=[
130
+ {"question": "Hi, who are you?"},
131
+ {"question": "What's the current price of Bitcoin?"},
132
+ {"question": "Search and tell me what's the release date of llama 3 400b."},
133
+ {"question": "Write me a Python function to calculate the first 10 digits of the Fibonacci sequence."},
 
 
 
134
  ],
 
135
  )
136
  demo.launch(show_error=True)