Spaces:
Runtime error
Runtime error
clementrof
commited on
Commit
•
963e020
1
Parent(s):
80176a0
Upload folder using huggingface_hub
Browse files- README.md +3 -9
- app.py +34 -0
- app2.py +26 -0
- app3.py +225 -0
- app4.py +194 -0
- app5.py +192 -0
- avatar.jpg +0 -0
- requirements.txt +3 -0
README.md
CHANGED
@@ -1,12 +1,6 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
|
4 |
-
colorFrom: blue
|
5 |
-
colorTo: pink
|
6 |
sdk: gradio
|
7 |
-
sdk_version: 4.
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
---
|
2 |
+
title: Chat_prompt
|
3 |
+
app_file: app5.py
|
|
|
|
|
4 |
sdk: gradio
|
5 |
+
sdk_version: 4.13.0
|
|
|
|
|
6 |
---
|
|
|
|
app.py
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import openai
|
3 |
+
|
4 |
+
# Replace "YOUR_OPENAI_API_KEY" with your actual OpenAI API key
|
5 |
+
openai_api_key = "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"
|
6 |
+
|
7 |
+
|
8 |
+
def chatbot(question):
|
9 |
+
message_log = [
|
10 |
+
{"role": "user", "content": question}
|
11 |
+
]
|
12 |
+
|
13 |
+
response = openai.ChatCompletion.create(
|
14 |
+
model="gpt-3.5-turbo-16k",
|
15 |
+
messages=message_log,
|
16 |
+
max_tokens=8800,
|
17 |
+
request_timeout=35,
|
18 |
+
stop=None,
|
19 |
+
temperature=0.9
|
20 |
+
)
|
21 |
+
|
22 |
+
return response.choices[0].message.content
|
23 |
+
|
24 |
+
# Create the Gradio interface
|
25 |
+
iface = gr.Interface(
|
26 |
+
fn=chatbot,
|
27 |
+
inputs=gr.components.Textbox(lines=7, placeholder="Enter your question here"),
|
28 |
+
outputs="text",
|
29 |
+
title="Frost AI ChatBot: Your Knowledge Companion Powered-by ChatGPT",
|
30 |
+
description="Ask any question about rahasak research papers"
|
31 |
+
)
|
32 |
+
|
33 |
+
# Launch the Gradio interface
|
34 |
+
iface.launch(share=True)
|
app2.py
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import openai
|
2 |
+
import gradio as gr
|
3 |
+
|
4 |
+
openai.api_key = "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" # Replace with your key
|
5 |
+
|
6 |
+
def predict(message, history):
|
7 |
+
history_openai_format = []
|
8 |
+
for human, assistant in history:
|
9 |
+
history_openai_format.append({"role": "user", "content": human })
|
10 |
+
history_openai_format.append({"role": "assistant", "content":assistant})
|
11 |
+
history_openai_format.append({"role": "user", "content": message})
|
12 |
+
|
13 |
+
response = openai.ChatCompletion.create(
|
14 |
+
model='gpt-3.5-turbo',
|
15 |
+
messages= history_openai_format,
|
16 |
+
temperature=1.0,
|
17 |
+
stream=True
|
18 |
+
)
|
19 |
+
|
20 |
+
partial_message = ""
|
21 |
+
for chunk in response:
|
22 |
+
if len(chunk['choices'][0]['delta']) != 0:
|
23 |
+
partial_message = partial_message + chunk['choices'][0]['delta']['content']
|
24 |
+
yield partial_message
|
25 |
+
|
26 |
+
gr.ChatInterface(predict).launch()
|
app3.py
ADDED
@@ -0,0 +1,225 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import requests
|
3 |
+
import time
|
4 |
+
import requests
|
5 |
+
import base64
|
6 |
+
|
7 |
+
|
8 |
+
|
9 |
+
|
10 |
+
token = '5UAYO8UWHNQKT3UUS9H8V360L76MD72DRIUY9QC2'
|
11 |
+
|
12 |
+
|
13 |
+
|
14 |
+
|
15 |
+
##############################################################
|
16 |
+
#################################################
|
17 |
+
|
18 |
+
def SD_call(image_prompt, age, color, hair_color):
|
19 |
+
|
20 |
+
serverless_api_id = '3g77weiulabzuk'
|
21 |
+
# Define the URL you want to send the request to
|
22 |
+
url = f"https://api.runpod.ai/v2/{serverless_api_id}/runsync"
|
23 |
+
|
24 |
+
# Define your custom headers
|
25 |
+
headers = {
|
26 |
+
"Authorization": f"Bearer {token}",
|
27 |
+
"Accept": "application/json",
|
28 |
+
"Content-Type": "application/json"
|
29 |
+
}
|
30 |
+
|
31 |
+
# Define your data (this could also be a JSON payload)
|
32 |
+
print("SD_processing")
|
33 |
+
data = {
|
34 |
+
"input": {
|
35 |
+
"api": {
|
36 |
+
"method": "POST",
|
37 |
+
"endpoint": "/sdapi/v1/txt2img"
|
38 |
+
},
|
39 |
+
"payload": {
|
40 |
+
"override_settings": {
|
41 |
+
"sd_model_checkpoint": "CyberRealistic",
|
42 |
+
"sd_vae": ""
|
43 |
+
},
|
44 |
+
"override_settings_restore_afterwards": True,
|
45 |
+
"refiner_checkpoint": "",
|
46 |
+
"refiner_switch_at": 0.8,
|
47 |
+
"prompt": f"masterpiece, best quality, 8k, (looking at viewer:1.1), gorgeous, hot, seductive, {age} years old american {color} woman, (eye contact:1.1), beautiful face, hyper detailed, best quality, ultra high res, {hair_color} hair,blue eyes, photorealistic, high resolution, detailed, raw photo, 1girl,{image_prompt} ",
|
48 |
+
"negative_prompt": "EasyNegative, fat, paintings, sketches, (worst quality:2), (low quality:2), (normal quality:2), lowres, ((monochrome)), ((grayscale)), bad anatomy, text, error, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, bad feet, poorly drawn face, bad proportions, gross proportions, ng_deepnegative_v1_75t, badhandsv5-neg, clothes",
|
49 |
+
"seed": -1,
|
50 |
+
"batch_size": 1,
|
51 |
+
"steps": 30,
|
52 |
+
"cfg_scale": 7,
|
53 |
+
"width": 520,
|
54 |
+
"height": 520,
|
55 |
+
"sampler_name": "DPM++ SDE Karras",
|
56 |
+
"sampler_index": "DPM++ SDE Karras",
|
57 |
+
"restore_faces": False
|
58 |
+
}
|
59 |
+
}
|
60 |
+
}
|
61 |
+
|
62 |
+
|
63 |
+
|
64 |
+
|
65 |
+
# Send the POST request with headers and data
|
66 |
+
response = requests.post(url, headers=headers, json=data)
|
67 |
+
|
68 |
+
# Check the response
|
69 |
+
if response.status_code == 200:
|
70 |
+
response_data = response.json()
|
71 |
+
msg_id = response_data['id']
|
72 |
+
print("Message ID:", msg_id)
|
73 |
+
|
74 |
+
# Poll the status until it's not 'IN_QUEUE'
|
75 |
+
while response_data['status'] == 'IN_QUEUE':
|
76 |
+
time.sleep(5) # Wait for 5 seconds before checking again
|
77 |
+
response = requests.get(f"{url}/{msg_id}", headers=headers)
|
78 |
+
|
79 |
+
try:
|
80 |
+
response_data = response.json()
|
81 |
+
except Exception as e:
|
82 |
+
print("Error decoding JSON:", e)
|
83 |
+
print("Response content:", response.text)
|
84 |
+
break # Exit the loop on JSON decoding error
|
85 |
+
|
86 |
+
# Check if the response contains images
|
87 |
+
if 'images' in response_data.get('output', {}):
|
88 |
+
base64_image = response_data['output']['images'][0]
|
89 |
+
image_bytes = base64.b64decode(base64_image)
|
90 |
+
|
91 |
+
# Save the image to a file
|
92 |
+
image_path = f"output_image_{msg_id}.png"
|
93 |
+
with open(image_path, "wb") as img_file:
|
94 |
+
img_file.write(image_bytes)
|
95 |
+
|
96 |
+
print(f"Image downloaded successfully: {image_path}")
|
97 |
+
|
98 |
+
return image_path
|
99 |
+
|
100 |
+
else:
|
101 |
+
return "No images found in the response."
|
102 |
+
|
103 |
+
else:
|
104 |
+
# Print error message
|
105 |
+
return f"Error: {response.status_code} - {response.text}"
|
106 |
+
|
107 |
+
|
108 |
+
|
109 |
+
|
110 |
+
##############################################################
|
111 |
+
#################################################
|
112 |
+
|
113 |
+
|
114 |
+
|
115 |
+
def LLM_call(message_log, temperature):
|
116 |
+
|
117 |
+
serverless_api_id = '4whzcbwuriohqh'
|
118 |
+
# Define the URL you want to send the request to
|
119 |
+
url = f"https://api.runpod.ai/v2/{serverless_api_id}/run"
|
120 |
+
|
121 |
+
# Define your custom headers
|
122 |
+
headers = {
|
123 |
+
"Authorization": f"Bearer {token}",
|
124 |
+
"Accept": "application/json",
|
125 |
+
"Content-Type": "application/json"
|
126 |
+
}
|
127 |
+
|
128 |
+
|
129 |
+
|
130 |
+
# Define your data (this could also be a JSON payload)
|
131 |
+
data = {
|
132 |
+
|
133 |
+
"input": {
|
134 |
+
"prompt": message_log,
|
135 |
+
"max_new_tokens": 4500,
|
136 |
+
"temperature": 0.7,
|
137 |
+
"top_k": 50,
|
138 |
+
"top_p": 0.9,
|
139 |
+
"repetition_penalty": 1.2,
|
140 |
+
"batch_size": 8,
|
141 |
+
"stop": ["</s>"]
|
142 |
+
}
|
143 |
+
}
|
144 |
+
|
145 |
+
|
146 |
+
|
147 |
+
|
148 |
+
# Send the POST request with headers and data
|
149 |
+
call = requests.post(url, headers=headers, json=data)
|
150 |
+
response_data = call.json()
|
151 |
+
msg_id = response_data['id']
|
152 |
+
print("Message ID:", msg_id)
|
153 |
+
output = "Output not available"
|
154 |
+
# Poll the API until the response is ready
|
155 |
+
while True:
|
156 |
+
# Get the status using the message ID
|
157 |
+
response = requests.get(f"https://api.runpod.ai/v2/{serverless_api_id}/status/{msg_id}", headers=headers)
|
158 |
+
|
159 |
+
if response.status_code == 200:
|
160 |
+
response_data = response.json()
|
161 |
+
status = response_data.get('status')
|
162 |
+
|
163 |
+
if status == 'COMPLETED':
|
164 |
+
# Access the 'output' directly from the response
|
165 |
+
output = response_data.get('output', 'Output not available')
|
166 |
+
print("Response content:", output)
|
167 |
+
break # Exit the loop once the response is ready
|
168 |
+
elif status == 'FAILED':
|
169 |
+
error_message = response_data.get('error', 'Unknown error')
|
170 |
+
print("Request failed. Reason:", error_message)
|
171 |
+
break # Exit the loop if the request failed
|
172 |
+
else:
|
173 |
+
print("Failed to get status. HTTP status code:", response.status_code)
|
174 |
+
|
175 |
+
# Wait for a short time before polling again (e.g., 2 seconds)
|
176 |
+
time.sleep(2)
|
177 |
+
|
178 |
+
return output
|
179 |
+
|
180 |
+
|
181 |
+
|
182 |
+
|
183 |
+
|
184 |
+
def Chat_call(chat,temperature):
|
185 |
+
message_log = [{"role": "assistant", "content": f"I want you to act as a woman named Julie. Use colloquial language. Julie is 25 years old, she is very hot physically, she loves watching football, she loves to talk about sex. Julie loves sex and being fucked. She never refuses anything. You'll answer to a man, call him baby. Write very short answers, 15 words."},]
|
186 |
+
new_message = {"role": "user", "content": chat}
|
187 |
+
message_log.append(new_message)
|
188 |
+
response = LLM_call(message_log, temperature)
|
189 |
+
print("From Runpod:" + response)
|
190 |
+
return response
|
191 |
+
|
192 |
+
|
193 |
+
|
194 |
+
|
195 |
+
########################################################
|
196 |
+
#######################################################
|
197 |
+
|
198 |
+
|
199 |
+
|
200 |
+
def greet(chat, image_prompt, temperature, age, color, hair_color):
|
201 |
+
if image_prompt.strip():
|
202 |
+
image_path = SD_call(image_prompt,age, color, hair_color)
|
203 |
+
return "Image generated successfully", image_path
|
204 |
+
|
205 |
+
if temperature > 3:
|
206 |
+
return "You are too warm please try again", None
|
207 |
+
else:
|
208 |
+
text_answer = Chat_call(chat,temperature)
|
209 |
+
return text_answer, None
|
210 |
+
|
211 |
+
demo = gr.Interface(
|
212 |
+
fn=greet,
|
213 |
+
inputs=[
|
214 |
+
"text",
|
215 |
+
gr.Textbox(label="Image", lines=3),
|
216 |
+
gr.Slider(label="Text temperature", value=1, minimum=0, maximum=2),
|
217 |
+
gr.Slider(label="Age", value=22, minimum=18, maximum=75),
|
218 |
+
gr.Dropdown(["asian", "white", "black", "latina"], label="Color", info="Will add more later!"),
|
219 |
+
gr.Dropdown(["blond", "brune", "red", "white", "pink", "black", "blue", "green"], label="Hair color", info="Blond is cool")
|
220 |
+
],
|
221 |
+
flagging_options=["blurry", "incorrect", "other"],
|
222 |
+
outputs=[gr.Textbox(label="Answer", lines=3), gr.Image(label="Generated Image", type="filepath")],
|
223 |
+
)
|
224 |
+
|
225 |
+
demo.launch(share=True)
|
app4.py
ADDED
@@ -0,0 +1,194 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import os
|
3 |
+
import time
|
4 |
+
import requests
|
5 |
+
import base64
|
6 |
+
|
7 |
+
import pymongo
|
8 |
+
import certifi
|
9 |
+
|
10 |
+
|
11 |
+
token = '5UAYO8UWHNQKT3UUS9H8V360L76MD72DRIUY9QC2'
|
12 |
+
|
13 |
+
# Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text.
|
14 |
+
|
15 |
+
uri = "mongodb+srv://clementrof:[email protected]/?retryWrites=true&w=majority"
|
16 |
+
|
17 |
+
# Create a new client and connect to the server
|
18 |
+
client = pymongo.MongoClient(uri, tlsCAFile=certifi.where())
|
19 |
+
|
20 |
+
# Send a ping to confirm a successful connection
|
21 |
+
try:
|
22 |
+
client.admin.command('ping')
|
23 |
+
print("Pinged your deployment. You successfully connected to MongoDB!")
|
24 |
+
except Exception as e:
|
25 |
+
print(e)
|
26 |
+
|
27 |
+
# Access your database
|
28 |
+
db = client.get_database('camila')
|
29 |
+
records = db.info
|
30 |
+
|
31 |
+
|
32 |
+
#########################################
|
33 |
+
#########################################
|
34 |
+
|
35 |
+
def LLM_call(message_log):
|
36 |
+
|
37 |
+
serverless_api_id = '4whzcbwuriohqh'
|
38 |
+
# Define the URL you want to send the request to
|
39 |
+
url = f"https://api.runpod.ai/v2/{serverless_api_id}/run"
|
40 |
+
|
41 |
+
# Define your custom headers
|
42 |
+
headers = {
|
43 |
+
"Authorization": f"Bearer {token}",
|
44 |
+
"Accept": "application/json",
|
45 |
+
"Content-Type": "application/json"
|
46 |
+
}
|
47 |
+
|
48 |
+
|
49 |
+
|
50 |
+
# Define your data (this could also be a JSON payload)
|
51 |
+
data = {
|
52 |
+
|
53 |
+
"input": {
|
54 |
+
"prompt": message_log,
|
55 |
+
"max_new_tokens": 4500,
|
56 |
+
"temperature": 0.7,
|
57 |
+
"top_k": 50,
|
58 |
+
"top_p": 0.9,
|
59 |
+
"repetition_penalty": 1.2,
|
60 |
+
"batch_size": 8,
|
61 |
+
"stop": ["</s>"]
|
62 |
+
}
|
63 |
+
}
|
64 |
+
|
65 |
+
|
66 |
+
|
67 |
+
|
68 |
+
# Send the POST request with headers and data
|
69 |
+
call = requests.post(url, headers=headers, json=data)
|
70 |
+
response_data = call.json()
|
71 |
+
msg_id = response_data['id']
|
72 |
+
print("Message ID:", msg_id)
|
73 |
+
output = "Output not available"
|
74 |
+
# Poll the API until the response is ready
|
75 |
+
while True:
|
76 |
+
# Get the status using the message ID
|
77 |
+
response = requests.get(f"https://api.runpod.ai/v2/{serverless_api_id}/status/{msg_id}", headers=headers)
|
78 |
+
|
79 |
+
if response.status_code == 200:
|
80 |
+
response_data = response.json()
|
81 |
+
status = response_data.get('status')
|
82 |
+
|
83 |
+
if status == 'COMPLETED':
|
84 |
+
# Access the 'output' directly from the response
|
85 |
+
output = response_data.get('output', 'Output not available')
|
86 |
+
print("Response content:", output)
|
87 |
+
break # Exit the loop once the response is ready
|
88 |
+
elif status == 'FAILED':
|
89 |
+
error_message = response_data.get('error', 'Unknown error')
|
90 |
+
print("Request failed. Reason:", error_message)
|
91 |
+
break # Exit the loop if the request failed
|
92 |
+
else:
|
93 |
+
print("Failed to get status. HTTP status code:", response.status_code)
|
94 |
+
|
95 |
+
# Wait for a short time before polling again (e.g., 2 seconds)
|
96 |
+
time.sleep(2)
|
97 |
+
|
98 |
+
return output
|
99 |
+
|
100 |
+
#########################################
|
101 |
+
#########################################
|
102 |
+
|
103 |
+
def print_like_dislike(x: gr.LikeData):
|
104 |
+
print(x.index, x.value, x.liked)
|
105 |
+
|
106 |
+
|
107 |
+
# def add_text(history, text):
|
108 |
+
# history = history + [(text, None)]
|
109 |
+
# return history, gr.Textbox(value="", interactive=False)
|
110 |
+
|
111 |
+
def add_text(history, text):
|
112 |
+
history = history + [((text[0], None),)] # Make sure to put the tuple inside a list
|
113 |
+
return history, gr.Textbox(value="", interactive=False)
|
114 |
+
|
115 |
+
|
116 |
+
|
117 |
+
def add_file(history, file):
|
118 |
+
history = history + [((file.name,), None)]
|
119 |
+
return history
|
120 |
+
|
121 |
+
#########################################
|
122 |
+
#########################################
|
123 |
+
|
124 |
+
def bot(history):
|
125 |
+
ID=2
|
126 |
+
existing_user_doc = records.find_one({'ID': ID})
|
127 |
+
if existing_user_doc:
|
128 |
+
message_log = [{"role": "system", "content": f"I want you to act as a woman named Julie. Use colloquial language. Julie is 25 years old, she is very hot physically, she loves watching football, she loves to talk about sex. Julie loves sex and being fucked. She never refuses anything. You'll answer to a man, call him baby. Write very short answers, 15 words."},
|
129 |
+
]
|
130 |
+
messages = existing_user_doc['message']
|
131 |
+
if len(messages)>5:
|
132 |
+
messages = messages[-5:]
|
133 |
+
|
134 |
+
message_log.extend(messages)
|
135 |
+
print (history)
|
136 |
+
new_message = {"role": "user", "content": history}
|
137 |
+
message_log.append(new_message)
|
138 |
+
|
139 |
+
|
140 |
+
response = LLM_call(message_log)
|
141 |
+
|
142 |
+
else:
|
143 |
+
new_user_doc = {
|
144 |
+
'ID': ID,
|
145 |
+
'message': []
|
146 |
+
}
|
147 |
+
records.insert_one(new_user_doc)
|
148 |
+
response = "Hello"
|
149 |
+
|
150 |
+
history[-1][1] = ""
|
151 |
+
for character in response:
|
152 |
+
history[-1][1] += character
|
153 |
+
time.sleep(0.05)
|
154 |
+
yield history
|
155 |
+
|
156 |
+
|
157 |
+
#########################################
|
158 |
+
#########################################
|
159 |
+
|
160 |
+
with gr.Blocks() as demo:
|
161 |
+
chatbot = gr.Chatbot(
|
162 |
+
[],
|
163 |
+
elem_id="chatbot",
|
164 |
+
bubble_full_width=False,
|
165 |
+
avatar_images=(None, (os.path.join(os.path.dirname(__file__), "avatar.jpg"))),
|
166 |
+
)
|
167 |
+
|
168 |
+
with gr.Row():
|
169 |
+
txt = gr.Textbox(
|
170 |
+
scale=4,
|
171 |
+
show_label=False,
|
172 |
+
placeholder="Enter text and press enter, or upload an image",
|
173 |
+
container=False,
|
174 |
+
)
|
175 |
+
btn = gr.UploadButton("📁", file_types=["image", "video", "audio"])
|
176 |
+
|
177 |
+
txt_msg = txt.submit(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(
|
178 |
+
bot, chatbot, chatbot, api_name="bot_response"
|
179 |
+
)
|
180 |
+
txt_msg.then(lambda: gr.Textbox(interactive=True), None, [txt], queue=False)
|
181 |
+
file_msg = btn.upload(add_file, [chatbot, btn], [chatbot], queue=False).then(
|
182 |
+
bot, chatbot, chatbot
|
183 |
+
)
|
184 |
+
|
185 |
+
chatbot.like(print_like_dislike, None, None)
|
186 |
+
|
187 |
+
|
188 |
+
demo.queue()
|
189 |
+
if __name__ == "__main__":
|
190 |
+
demo.launch()
|
191 |
+
|
192 |
+
|
193 |
+
|
194 |
+
|
app5.py
ADDED
@@ -0,0 +1,192 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import random
|
3 |
+
import time
|
4 |
+
import os
|
5 |
+
import requests
|
6 |
+
import base64
|
7 |
+
|
8 |
+
import pymongo
|
9 |
+
import certifi
|
10 |
+
|
11 |
+
|
12 |
+
token = '5UAYO8UWHNQKT3UUS9H8V360L76MD72DRIUY9QC2'
|
13 |
+
|
14 |
+
# Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text.
|
15 |
+
|
16 |
+
uri = "mongodb+srv://clementrof:[email protected]/?retryWrites=true&w=majority"
|
17 |
+
|
18 |
+
# Create a new client and connect to the server
|
19 |
+
client = pymongo.MongoClient(uri, tlsCAFile=certifi.where())
|
20 |
+
|
21 |
+
# Send a ping to confirm a successful connection
|
22 |
+
try:
|
23 |
+
client.admin.command('ping')
|
24 |
+
print("Pinged your deployment. You successfully connected to MongoDB!")
|
25 |
+
except Exception as e:
|
26 |
+
print(e)
|
27 |
+
|
28 |
+
# Access your database
|
29 |
+
db = client.get_database('camila')
|
30 |
+
records = db.info
|
31 |
+
|
32 |
+
|
33 |
+
|
34 |
+
last_used_id = 0
|
35 |
+
|
36 |
+
|
37 |
+
def generate_unique_id():
|
38 |
+
global last_used_id
|
39 |
+
last_used_id += 1
|
40 |
+
return last_used_id
|
41 |
+
|
42 |
+
|
43 |
+
def clear_button_callback():
|
44 |
+
global last_used_id
|
45 |
+
# Generate a new unique ID when the clear button is clicked
|
46 |
+
ID = generate_unique_id()
|
47 |
+
|
48 |
+
# Update the ID in the chatbot response function
|
49 |
+
chatbot.response_fn.keywords["ID"] = ID
|
50 |
+
|
51 |
+
# Clear the textboxes
|
52 |
+
msg.reset()
|
53 |
+
chatbot.reset()
|
54 |
+
|
55 |
+
|
56 |
+
|
57 |
+
def save(ID, response, message_text):
|
58 |
+
# Check if the user already has a collection
|
59 |
+
records.find_one({'ID': ID})
|
60 |
+
records.update_one({'ID': ID},
|
61 |
+
{'$push':{'message': {'role': 'user', 'content': f'{message_text}'}}})
|
62 |
+
records.update_one({'ID': ID},
|
63 |
+
{'$push':{'message': {'role': 'assistant', 'content': f'{response}'}}})
|
64 |
+
return response
|
65 |
+
|
66 |
+
|
67 |
+
|
68 |
+
#########################################
|
69 |
+
#########################################
|
70 |
+
|
71 |
+
def LLM_call(message_log):
|
72 |
+
|
73 |
+
serverless_api_id = '4whzcbwuriohqh'
|
74 |
+
# Define the URL you want to send the request to
|
75 |
+
url = f"https://api.runpod.ai/v2/{serverless_api_id}/run"
|
76 |
+
|
77 |
+
# Define your custom headers
|
78 |
+
headers = {
|
79 |
+
"Authorization": f"Bearer {token}",
|
80 |
+
"Accept": "application/json",
|
81 |
+
"Content-Type": "application/json"
|
82 |
+
}
|
83 |
+
|
84 |
+
|
85 |
+
|
86 |
+
# Define your data (this could also be a JSON payload)
|
87 |
+
data = {
|
88 |
+
|
89 |
+
"input": {
|
90 |
+
"prompt": message_log,
|
91 |
+
"max_new_tokens": 4500,
|
92 |
+
"temperature": 0.7,
|
93 |
+
"top_k": 50,
|
94 |
+
"top_p": 0.9,
|
95 |
+
"repetition_penalty": 1.2,
|
96 |
+
"batch_size": 8,
|
97 |
+
"stop": ["</s>"]
|
98 |
+
}
|
99 |
+
}
|
100 |
+
|
101 |
+
|
102 |
+
|
103 |
+
|
104 |
+
# Send the POST request with headers and data
|
105 |
+
call = requests.post(url, headers=headers, json=data)
|
106 |
+
response_data = call.json()
|
107 |
+
msg_id = response_data['id']
|
108 |
+
print("Message ID:", msg_id)
|
109 |
+
output = "Output not available"
|
110 |
+
# Poll the API until the response is ready
|
111 |
+
while True:
|
112 |
+
# Get the status using the message ID
|
113 |
+
response = requests.get(f"https://api.runpod.ai/v2/{serverless_api_id}/status/{msg_id}", headers=headers)
|
114 |
+
|
115 |
+
if response.status_code == 200:
|
116 |
+
response_data = response.json()
|
117 |
+
status = response_data.get('status')
|
118 |
+
|
119 |
+
if status == 'COMPLETED':
|
120 |
+
# Access the 'output' directly from the response
|
121 |
+
output = response_data.get('output', 'Output not available')
|
122 |
+
print("Response content:", output)
|
123 |
+
break # Exit the loop once the response is ready
|
124 |
+
elif status == 'FAILED':
|
125 |
+
error_message = response_data.get('error', 'Unknown error')
|
126 |
+
print("Request failed. Reason:", error_message)
|
127 |
+
break # Exit the loop if the request failed
|
128 |
+
else:
|
129 |
+
print("Failed to get status. HTTP status code:", response.status_code)
|
130 |
+
|
131 |
+
# Wait for a short time before polling again (e.g., 2 seconds)
|
132 |
+
time.sleep(2)
|
133 |
+
|
134 |
+
return output
|
135 |
+
|
136 |
+
|
137 |
+
|
138 |
+
def Chat_call(chat,prompt):
|
139 |
+
|
140 |
+
global last_used_id
|
141 |
+
# Use the last used ID
|
142 |
+
ID = last_used_id
|
143 |
+
existing_user_doc = records.find_one({'ID': ID})
|
144 |
+
if existing_user_doc:
|
145 |
+
message_log = [{"role": "system", "content": f"{prompt}"},
|
146 |
+
]
|
147 |
+
messages = existing_user_doc['message']
|
148 |
+
if len(messages)>5:
|
149 |
+
messages = messages[-5:]
|
150 |
+
|
151 |
+
message_log.extend(messages)
|
152 |
+
new_message = {"role": "user", "content": chat}
|
153 |
+
message_log.append(new_message)
|
154 |
+
|
155 |
+
|
156 |
+
response = LLM_call(message_log)
|
157 |
+
|
158 |
+
else:
|
159 |
+
new_user_doc = {
|
160 |
+
'ID': ID,
|
161 |
+
'message': []
|
162 |
+
}
|
163 |
+
records.insert_one(new_user_doc)
|
164 |
+
response = "Hello"
|
165 |
+
|
166 |
+
response = save(ID, response, chat)
|
167 |
+
return response
|
168 |
+
|
169 |
+
|
170 |
+
|
171 |
+
with gr.Blocks() as demo:
|
172 |
+
chatbot = gr.Chatbot()
|
173 |
+
msg = gr.Textbox(label = "Chat")
|
174 |
+
prompt = gr.Textbox(label ="Prompt")
|
175 |
+
clear = gr.ClearButton([msg, chatbot])
|
176 |
+
|
177 |
+
def respond(message, chat_history,prompt):
|
178 |
+
bot_message = Chat_call(message,prompt)
|
179 |
+
chat_history.append((message, bot_message))
|
180 |
+
time.sleep(2)
|
181 |
+
return "", chat_history,""
|
182 |
+
|
183 |
+
msg.submit(respond, [msg, chatbot, prompt], [msg, chatbot, prompt])
|
184 |
+
|
185 |
+
# Add an event listener to the Chatbot to update the ID when the button is clicked
|
186 |
+
clear.click(lambda: clear_button_callback())
|
187 |
+
|
188 |
+
|
189 |
+
if __name__ == "__main__":
|
190 |
+
demo.launch()
|
191 |
+
|
192 |
+
|
avatar.jpg
ADDED
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
gradio==2.2.10
|
2 |
+
pymongo==3.12.2
|
3 |
+
certifi==2021.5.30
|