Upload 10 files
Browse files- app (1).py +128 -0
- app-v3-working-dup.py +121 -0
- app-work-only-1.py +214 -0
- app-working-v2.py +116 -0
- app_base.py +29 -0
- models.py +4 -0
- models1.py +22 -0
- old.py +109 -0
- old2.py +127 -0
- requirements (1).txt +2 -0
app (1).py
ADDED
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import requests
|
3 |
+
import logging
|
4 |
+
|
5 |
+
# Configure logging
|
6 |
+
logging.basicConfig(level=logging.INFO)
|
7 |
+
logger = logging.getLogger(__name__)
|
8 |
+
|
9 |
+
# Page configuration
|
10 |
+
st.set_page_config(
|
11 |
+
page_title="DeepSeek Chatbot - ruslanmv.com",
|
12 |
+
page_icon="π€",
|
13 |
+
layout="centered"
|
14 |
+
)
|
15 |
+
|
16 |
+
# Initialize session state for chat history
|
17 |
+
if "messages" not in st.session_state:
|
18 |
+
st.session_state.messages = []
|
19 |
+
|
20 |
+
# Sidebar configuration
|
21 |
+
with st.sidebar:
|
22 |
+
st.header("Model Configuration")
|
23 |
+
st.markdown("[Get HuggingFace Token](https://huggingface.co/settings/tokens)")
|
24 |
+
|
25 |
+
# Dropdown to select model
|
26 |
+
model_options = [
|
27 |
+
"deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
|
28 |
+
]
|
29 |
+
selected_model = st.selectbox("Select Model", model_options, index=0)
|
30 |
+
|
31 |
+
system_message = st.text_area(
|
32 |
+
"System Message",
|
33 |
+
value="You are a friendly chatbot created by ruslanmv.com. Provide clear, accurate, and brief answers. Keep responses polite, engaging, and to the point. If unsure, politely suggest alternatives.",
|
34 |
+
height=100
|
35 |
+
)
|
36 |
+
|
37 |
+
max_tokens = st.slider(
|
38 |
+
"Max Tokens",
|
39 |
+
10, 4000, 100
|
40 |
+
)
|
41 |
+
|
42 |
+
temperature = st.slider(
|
43 |
+
"Temperature",
|
44 |
+
0.1, 4.0, 0.3
|
45 |
+
)
|
46 |
+
|
47 |
+
top_p = st.slider(
|
48 |
+
"Top-p",
|
49 |
+
0.1, 1.0, 0.6
|
50 |
+
)
|
51 |
+
|
52 |
+
# Function to query the Hugging Face API
|
53 |
+
def query(payload, api_url):
|
54 |
+
headers = {"Authorization": f"Bearer {st.secrets['HF_TOKEN']}"}
|
55 |
+
logger.info(f"Sending request to {api_url} with payload: {payload}")
|
56 |
+
response = requests.post(api_url, headers=headers, json=payload)
|
57 |
+
logger.info(f"Received response: {response.status_code}, {response.text}")
|
58 |
+
try:
|
59 |
+
return response.json()
|
60 |
+
except requests.exceptions.JSONDecodeError:
|
61 |
+
logger.error(f"Failed to decode JSON response: {response.text}")
|
62 |
+
return None
|
63 |
+
|
64 |
+
# Chat interface
|
65 |
+
st.title("π€ DeepSeek Chatbot")
|
66 |
+
st.caption("Powered by Hugging Face Inference API - Configure in sidebar")
|
67 |
+
|
68 |
+
# Display chat history
|
69 |
+
for message in st.session_state.messages:
|
70 |
+
with st.chat_message(message["role"]):
|
71 |
+
st.markdown(message["content"])
|
72 |
+
|
73 |
+
# Handle input
|
74 |
+
if prompt := st.chat_input("Type your message..."):
|
75 |
+
st.session_state.messages.append({"role": "user", "content": prompt})
|
76 |
+
|
77 |
+
with st.chat_message("user"):
|
78 |
+
st.markdown(prompt)
|
79 |
+
|
80 |
+
try:
|
81 |
+
with st.spinner("Generating response..."):
|
82 |
+
# Prepare the payload for the API
|
83 |
+
# Combine system message and user input into a single prompt
|
84 |
+
full_prompt = f"{system_message}\n\nUser: {prompt}\nAssistant:"
|
85 |
+
payload = {
|
86 |
+
"inputs": full_prompt,
|
87 |
+
"parameters": {
|
88 |
+
"max_new_tokens": max_tokens,
|
89 |
+
"temperature": temperature,
|
90 |
+
"top_p": top_p,
|
91 |
+
"return_full_text": False
|
92 |
+
}
|
93 |
+
}
|
94 |
+
|
95 |
+
# Dynamically construct the API URL based on the selected model
|
96 |
+
api_url = f"https://api-inference.huggingface.co/models/{selected_model}"
|
97 |
+
logger.info(f"Selected model: {selected_model}, API URL: {api_url}")
|
98 |
+
|
99 |
+
# Query the Hugging Face API using the selected model
|
100 |
+
output = query(payload, api_url)
|
101 |
+
|
102 |
+
# Handle API response
|
103 |
+
if output is not None and isinstance(output, list) and len(output) > 0:
|
104 |
+
if 'generated_text' in output[0]:
|
105 |
+
# Extract the assistant's response
|
106 |
+
assistant_response = output[0]['generated_text'].strip()
|
107 |
+
|
108 |
+
# Check for and remove duplicate responses
|
109 |
+
responses = assistant_response.split("\n</think>\n")
|
110 |
+
unique_response = responses[0].strip()
|
111 |
+
|
112 |
+
logger.info(f"Generated response: {unique_response}")
|
113 |
+
|
114 |
+
# Append response to chat only once
|
115 |
+
with st.chat_message("assistant"):
|
116 |
+
st.markdown(unique_response)
|
117 |
+
|
118 |
+
st.session_state.messages.append({"role": "assistant", "content": unique_response})
|
119 |
+
else:
|
120 |
+
logger.error(f"Unexpected API response structure: {output}")
|
121 |
+
st.error("Error: Unexpected response from the model. Please try again.")
|
122 |
+
else:
|
123 |
+
logger.error(f"Empty or invalid API response: {output}")
|
124 |
+
st.error("Error: Unable to generate a response. Please check the model and try again.")
|
125 |
+
|
126 |
+
except Exception as e:
|
127 |
+
logger.error(f"Application Error: {str(e)}", exc_info=True)
|
128 |
+
st.error(f"Application Error: {str(e)}")
|
app-v3-working-dup.py
ADDED
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import requests
|
3 |
+
import logging
|
4 |
+
|
5 |
+
# Configure logging
|
6 |
+
logging.basicConfig(level=logging.INFO)
|
7 |
+
logger = logging.getLogger(__name__)
|
8 |
+
|
9 |
+
# Page configuration
|
10 |
+
st.set_page_config(
|
11 |
+
page_title="DeepSeek Chatbot - ruslanmv.com",
|
12 |
+
page_icon="π€",
|
13 |
+
layout="centered"
|
14 |
+
)
|
15 |
+
|
16 |
+
# Initialize session state for chat history
|
17 |
+
if "messages" not in st.session_state:
|
18 |
+
st.session_state.messages = []
|
19 |
+
|
20 |
+
# Sidebar configuration
|
21 |
+
with st.sidebar:
|
22 |
+
st.header("Model Configuration")
|
23 |
+
st.markdown("[Get HuggingFace Token](https://huggingface.co/settings/tokens)")
|
24 |
+
|
25 |
+
# Dropdown to select model
|
26 |
+
model_options = [
|
27 |
+
"deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
|
28 |
+
]
|
29 |
+
selected_model = st.selectbox("Select Model", model_options, index=0)
|
30 |
+
|
31 |
+
system_message = st.text_area(
|
32 |
+
"System Message",
|
33 |
+
value="You are a friendly chatbot created by ruslanmv.com. Provide clear, accurate, and brief answers. Keep responses polite, engaging, and to the point. If unsure, politely suggest alternatives.",
|
34 |
+
height=100
|
35 |
+
)
|
36 |
+
|
37 |
+
max_tokens = st.slider(
|
38 |
+
"Max Tokens",
|
39 |
+
10, 4000, 100
|
40 |
+
)
|
41 |
+
|
42 |
+
temperature = st.slider(
|
43 |
+
"Temperature",
|
44 |
+
0.1, 4.0, 0.3
|
45 |
+
)
|
46 |
+
|
47 |
+
top_p = st.slider(
|
48 |
+
"Top-p",
|
49 |
+
0.1, 1.0, 0.6
|
50 |
+
)
|
51 |
+
|
52 |
+
# Function to query the Hugging Face API
|
53 |
+
def query(payload, api_url):
|
54 |
+
headers = {"Authorization": f"Bearer {st.secrets['HF_TOKEN']}"}
|
55 |
+
logger.info(f"Sending request to {api_url} with payload: {payload}")
|
56 |
+
response = requests.post(api_url, headers=headers, json=payload)
|
57 |
+
logger.info(f"Received response: {response.status_code}, {response.text}")
|
58 |
+
try:
|
59 |
+
return response.json()
|
60 |
+
except requests.exceptions.JSONDecodeError:
|
61 |
+
logger.error(f"Failed to decode JSON response: {response.text}")
|
62 |
+
return None
|
63 |
+
|
64 |
+
# Chat interface
|
65 |
+
st.title("π€ DeepSeek Chatbot")
|
66 |
+
st.caption("Powered by Hugging Face Inference API - Configure in sidebar")
|
67 |
+
|
68 |
+
# Display chat history
|
69 |
+
for message in st.session_state.messages:
|
70 |
+
with st.chat_message(message["role"]):
|
71 |
+
st.markdown(message["content"])
|
72 |
+
|
73 |
+
# Handle input
|
74 |
+
if prompt := st.chat_input("Type your message..."):
|
75 |
+
st.session_state.messages.append({"role": "user", "content": prompt})
|
76 |
+
|
77 |
+
with st.chat_message("user"):
|
78 |
+
st.markdown(prompt)
|
79 |
+
|
80 |
+
try:
|
81 |
+
with st.spinner("Generating response..."):
|
82 |
+
# Prepare the payload for the API
|
83 |
+
# Combine system message and user input into a single prompt
|
84 |
+
full_prompt = f"{system_message}\n\nUser: {prompt}\nAssistant:"
|
85 |
+
payload = {
|
86 |
+
"inputs": full_prompt,
|
87 |
+
"parameters": {
|
88 |
+
"max_new_tokens": max_tokens,
|
89 |
+
"temperature": temperature,
|
90 |
+
"top_p": top_p,
|
91 |
+
"return_full_text": False
|
92 |
+
}
|
93 |
+
}
|
94 |
+
|
95 |
+
# Dynamically construct the API URL based on the selected model
|
96 |
+
api_url = f"https://api-inference.huggingface.co/models/{selected_model}"
|
97 |
+
logger.info(f"Selected model: {selected_model}, API URL: {api_url}")
|
98 |
+
print("payload",payload)
|
99 |
+
# Query the Hugging Face API using the selected model
|
100 |
+
output = query(payload, api_url)
|
101 |
+
|
102 |
+
# Handle API response
|
103 |
+
if output is not None and isinstance(output, list) and len(output) > 0:
|
104 |
+
if 'generated_text' in output[0]:
|
105 |
+
assistant_response = output[0]['generated_text']
|
106 |
+
logger.info(f"Generated response: {assistant_response}")
|
107 |
+
|
108 |
+
with st.chat_message("assistant"):
|
109 |
+
st.markdown(assistant_response)
|
110 |
+
|
111 |
+
st.session_state.messages.append({"role": "assistant", "content": assistant_response})
|
112 |
+
else:
|
113 |
+
logger.error(f"Unexpected API response structure: {output}")
|
114 |
+
st.error("Error: Unexpected response from the model. Please try again.")
|
115 |
+
else:
|
116 |
+
logger.error(f"Empty or invalid API response: {output}")
|
117 |
+
st.error("Error: Unable to generate a response. Please check the model and try again.")
|
118 |
+
|
119 |
+
except Exception as e:
|
120 |
+
logger.error(f"Application Error: {str(e)}", exc_info=True)
|
121 |
+
st.error(f"Application Error: {str(e)}")
|
app-work-only-1.py
ADDED
@@ -0,0 +1,214 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import streamlit as st
|
3 |
+
import requests
|
4 |
+
|
5 |
+
# Function to query the Hugging Face API
|
6 |
+
def query(payload, api_url):
|
7 |
+
headers = {"Authorization": f"Bearer {st.secrets['HF_TOKEN']}"}
|
8 |
+
response = requests.post(api_url, headers=headers, json=payload)
|
9 |
+
return response.json()
|
10 |
+
|
11 |
+
# Page configuration
|
12 |
+
st.set_page_config(
|
13 |
+
page_title="DeepSeek Chatbot - ruslanmv.com",
|
14 |
+
page_icon="π€",
|
15 |
+
layout="centered"
|
16 |
+
)
|
17 |
+
|
18 |
+
# Initialize session state for chat history
|
19 |
+
if "messages" not in st.session_state:
|
20 |
+
st.session_state.messages = []
|
21 |
+
if "selected_model" not in st.session_state:
|
22 |
+
st.session_state.selected_model = "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B"
|
23 |
+
|
24 |
+
# Sidebar configuration
|
25 |
+
with st.sidebar:
|
26 |
+
st.header("Model Configuration")
|
27 |
+
st.markdown("[Get HuggingFace Token](https://huggingface.co/settings/tokens)")
|
28 |
+
|
29 |
+
# Dropdown to select model
|
30 |
+
model_options = [
|
31 |
+
"deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
|
32 |
+
"deepseek-ai/DeepSeek-R1",
|
33 |
+
"deepseek-ai/DeepSeek-R1-Zero"
|
34 |
+
]
|
35 |
+
selected_model = st.selectbox("Select Model", model_options, index=model_options.index(st.session_state.selected_model))
|
36 |
+
st.session_state.selected_model = selected_model
|
37 |
+
|
38 |
+
system_message = st.text_area(
|
39 |
+
"System Message",
|
40 |
+
value="You are a friendly Chatbot created by ruslanmv.com",
|
41 |
+
height=100
|
42 |
+
)
|
43 |
+
|
44 |
+
max_tokens = st.slider(
|
45 |
+
"Max Tokens",
|
46 |
+
1, 4000, 512
|
47 |
+
)
|
48 |
+
|
49 |
+
temperature = st.slider(
|
50 |
+
"Temperature",
|
51 |
+
0.1, 4.0, 0.7
|
52 |
+
)
|
53 |
+
|
54 |
+
top_p = st.slider(
|
55 |
+
"Top-p",
|
56 |
+
0.1, 1.0, 0.9
|
57 |
+
)
|
58 |
+
|
59 |
+
# Chat interface
|
60 |
+
st.title("π€ DeepSeek Chatbot")
|
61 |
+
st.caption("Powered by Hugging Face Inference API - Configure in sidebar")
|
62 |
+
|
63 |
+
# Display chat history
|
64 |
+
for message in st.session_state.messages:
|
65 |
+
with st.chat_message(message["role"]):
|
66 |
+
st.markdown(message["content"])
|
67 |
+
|
68 |
+
# Handle input
|
69 |
+
if prompt := st.chat_input("Type your message..."):
|
70 |
+
st.session_state.messages.append({"role": "user", "content": prompt})
|
71 |
+
|
72 |
+
with st.chat_message("user"):
|
73 |
+
st.markdown(prompt)
|
74 |
+
|
75 |
+
try:
|
76 |
+
with st.spinner("Generating response..."):
|
77 |
+
# Prepare the payload for the API
|
78 |
+
payload = {
|
79 |
+
"inputs": prompt,
|
80 |
+
"parameters": {
|
81 |
+
"max_new_tokens": max_tokens,
|
82 |
+
"temperature": temperature,
|
83 |
+
"top_p": top_p,
|
84 |
+
"return_full_text": False
|
85 |
+
}
|
86 |
+
}
|
87 |
+
|
88 |
+
# Query the Hugging Face API using the selected model
|
89 |
+
api_url = f"https://api-inference.huggingface.co/models/{st.session_state.selected_model}"
|
90 |
+
output = query(payload, api_url)
|
91 |
+
|
92 |
+
# Handle API response
|
93 |
+
if isinstance(output, list) and len(output) > 0 and 'generated_text' in output[0]:
|
94 |
+
assistant_response = output[0]['generated_text']
|
95 |
+
|
96 |
+
with st.chat_message("assistant"):
|
97 |
+
st.markdown(assistant_response)
|
98 |
+
|
99 |
+
st.session_state.messages.append({"role": "assistant", "content": assistant_response})
|
100 |
+
else:
|
101 |
+
st.error("Error: Unable to generate a response. Please try again.")
|
102 |
+
|
103 |
+
except Exception as e:
|
104 |
+
st.error(f"Application Error: {str(e)}")
|
105 |
+
|
106 |
+
'''
|
107 |
+
|
108 |
+
import streamlit as st
|
109 |
+
import requests
|
110 |
+
|
111 |
+
# Hugging Face API URL (default model)
|
112 |
+
API_URL = "https://api-inference.huggingface.co/models/deepseek-ai/DeepSeek-R1-Distill-Qwen-32B"
|
113 |
+
|
114 |
+
# Function to query the Hugging Face API
|
115 |
+
def query(payload, api_url):
|
116 |
+
headers = {"Authorization": f"Bearer {st.secrets['HF_TOKEN']}"}
|
117 |
+
response = requests.post(api_url, headers=headers, json=payload)
|
118 |
+
return response.json()
|
119 |
+
|
120 |
+
# Page configuration
|
121 |
+
st.set_page_config(
|
122 |
+
page_title="DeepSeek Chatbot - ruslanmv.com",
|
123 |
+
page_icon="π€",
|
124 |
+
layout="centered"
|
125 |
+
)
|
126 |
+
|
127 |
+
# Initialize session state for chat history
|
128 |
+
if "messages" not in st.session_state:
|
129 |
+
st.session_state.messages = []
|
130 |
+
|
131 |
+
# Sidebar configuration
|
132 |
+
with st.sidebar:
|
133 |
+
st.header("Model Configuration")
|
134 |
+
st.markdown("[Get HuggingFace Token](https://huggingface.co/settings/tokens)")
|
135 |
+
|
136 |
+
# Dropdown to select model
|
137 |
+
model_options = [
|
138 |
+
"deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
|
139 |
+
"deepseek-ai/DeepSeek-R1",
|
140 |
+
"deepseek-ai/DeepSeek-R1-Zero"
|
141 |
+
]
|
142 |
+
selected_model = st.selectbox("Select Model", model_options, index=0)
|
143 |
+
|
144 |
+
system_message = st.text_area(
|
145 |
+
"System Message",
|
146 |
+
value="You are a friendly Chatbot created by ruslanmv.com",
|
147 |
+
height=100
|
148 |
+
)
|
149 |
+
|
150 |
+
max_tokens = st.slider(
|
151 |
+
"Max Tokens",
|
152 |
+
1, 4000, 512
|
153 |
+
)
|
154 |
+
|
155 |
+
temperature = st.slider(
|
156 |
+
"Temperature",
|
157 |
+
0.1, 4.0, 0.7
|
158 |
+
)
|
159 |
+
|
160 |
+
top_p = st.slider(
|
161 |
+
"Top-p",
|
162 |
+
0.1, 1.0, 0.9
|
163 |
+
)
|
164 |
+
|
165 |
+
# Chat interface
|
166 |
+
st.title("π€ DeepSeek Chatbot")
|
167 |
+
st.caption("Powered by Hugging Face Inference API - Configure in sidebar")
|
168 |
+
|
169 |
+
# Display chat history
|
170 |
+
for message in st.session_state.messages:
|
171 |
+
with st.chat_message(message["role"]):
|
172 |
+
st.markdown(message["content"])
|
173 |
+
|
174 |
+
# Handle input
|
175 |
+
if prompt := st.chat_input("Type your message..."):
|
176 |
+
st.session_state.messages.append({"role": "user", "content": prompt})
|
177 |
+
|
178 |
+
with st.chat_message("user"):
|
179 |
+
st.markdown(prompt)
|
180 |
+
|
181 |
+
try:
|
182 |
+
with st.spinner("Generating response..."):
|
183 |
+
# Prepare the payload for the API
|
184 |
+
payload = {
|
185 |
+
"inputs": prompt,
|
186 |
+
"parameters": {
|
187 |
+
"max_new_tokens": max_tokens,
|
188 |
+
"temperature": temperature,
|
189 |
+
"top_p": top_p,
|
190 |
+
"return_full_text": False
|
191 |
+
}
|
192 |
+
}
|
193 |
+
|
194 |
+
# Query the Hugging Face API using the selected model
|
195 |
+
output = query(payload, f"https://api-inference.huggingface.co/models/{selected_model}")
|
196 |
+
|
197 |
+
# Handle API response
|
198 |
+
if isinstance(output, list) and len(output) > 0 and 'generated_text' in output[0]:
|
199 |
+
assistant_response = output[0]['generated_text']
|
200 |
+
|
201 |
+
with st.chat_message("assistant"):
|
202 |
+
st.markdown(assistant_response)
|
203 |
+
|
204 |
+
st.session_state.messages.append({"role": "assistant", "content": assistant_response})
|
205 |
+
else:
|
206 |
+
st.error("Error: Unable to generate a response. Please try again.")
|
207 |
+
|
208 |
+
except Exception as e:
|
209 |
+
st.error(f"Application Error: {str(e)}")
|
210 |
+
|
211 |
+
|
212 |
+
|
213 |
+
|
214 |
+
'''
|
app-working-v2.py
ADDED
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import streamlit as st
|
3 |
+
import requests
|
4 |
+
import logging
|
5 |
+
|
6 |
+
# Configure logging
|
7 |
+
logging.basicConfig(level=logging.INFO)
|
8 |
+
logger = logging.getLogger(__name__)
|
9 |
+
|
10 |
+
# Page configuration
|
11 |
+
st.set_page_config(
|
12 |
+
page_title="DeepSeek Chatbot - ruslanmv.com",
|
13 |
+
page_icon="π€",
|
14 |
+
layout="centered"
|
15 |
+
)
|
16 |
+
|
17 |
+
# Initialize session state for chat history
|
18 |
+
if "messages" not in st.session_state:
|
19 |
+
st.session_state.messages = []
|
20 |
+
|
21 |
+
# Sidebar configuration
|
22 |
+
with st.sidebar:
|
23 |
+
st.header("Model Configuration")
|
24 |
+
# st.markdown("[Get HuggingFace Token](https://huggingface.co/settings/tokens)")
|
25 |
+
|
26 |
+
# Dropdown to select model
|
27 |
+
model_options = [
|
28 |
+
"deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
|
29 |
+
# "deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
|
30 |
+
# "deepseek-ai/DeepSeek-R1-Distill-Qwen-7B",
|
31 |
+
# "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
|
32 |
+
# "deepseek-ai/DeepSeek-R1-Distill-Llama-8B"
|
33 |
+
]
|
34 |
+
selected_model = st.selectbox("Select Model", model_options, index=0)
|
35 |
+
|
36 |
+
system_message = st.text_area(
|
37 |
+
"System Message",
|
38 |
+
value="You are a friendly Chatbot created by ruslanmv.com",
|
39 |
+
height=100
|
40 |
+
)
|
41 |
+
|
42 |
+
max_tokens = st.slider(
|
43 |
+
"Max Tokens",
|
44 |
+
1, 4000, 512
|
45 |
+
)
|
46 |
+
|
47 |
+
temperature = st.slider(
|
48 |
+
"Temperature",
|
49 |
+
0.1, 4.0, 0.7
|
50 |
+
)
|
51 |
+
|
52 |
+
top_p = st.slider(
|
53 |
+
"Top-p",
|
54 |
+
0.1, 1.0, 0.9
|
55 |
+
)
|
56 |
+
|
57 |
+
# Function to query the Hugging Face API
|
58 |
+
def query(payload, api_url):
|
59 |
+
headers = {"Authorization": f"Bearer {st.secrets['HF_TOKEN']}"}
|
60 |
+
logger.info(f"Sending request to {api_url} with payload: {payload}")
|
61 |
+
response = requests.post(api_url, headers=headers, json=payload)
|
62 |
+
logger.info(f"Received response: {response.status_code}, {response.text}")
|
63 |
+
return response.json()
|
64 |
+
|
65 |
+
# Chat interface
|
66 |
+
st.title("π€ DeepSeek Chatbot")
|
67 |
+
st.caption("Powered by Hugging Face Inference API - Configure in sidebar")
|
68 |
+
|
69 |
+
# Display chat history
|
70 |
+
for message in st.session_state.messages:
|
71 |
+
with st.chat_message(message["role"]):
|
72 |
+
st.markdown(message["content"])
|
73 |
+
|
74 |
+
# Handle input
|
75 |
+
if prompt := st.chat_input("Type your message..."):
|
76 |
+
st.session_state.messages.append({"role": "user", "content": prompt})
|
77 |
+
|
78 |
+
with st.chat_message("user"):
|
79 |
+
st.markdown(prompt)
|
80 |
+
|
81 |
+
try:
|
82 |
+
with st.spinner("Generating response..."):
|
83 |
+
# Prepare the payload for the API
|
84 |
+
payload = {
|
85 |
+
"inputs": prompt,
|
86 |
+
"parameters": {
|
87 |
+
"max_new_tokens": max_tokens,
|
88 |
+
"temperature": temperature,
|
89 |
+
"top_p": top_p,
|
90 |
+
"return_full_text": False
|
91 |
+
}
|
92 |
+
}
|
93 |
+
|
94 |
+
# Dynamically construct the API URL based on the selected model
|
95 |
+
api_url = f"https://api-inference.huggingface.co/models/{selected_model}"
|
96 |
+
logger.info(f"Selected model: {selected_model}, API URL: {api_url}")
|
97 |
+
|
98 |
+
# Query the Hugging Face API using the selected model
|
99 |
+
output = query(payload, api_url)
|
100 |
+
|
101 |
+
# Handle API response
|
102 |
+
if isinstance(output, list) and len(output) > 0 and 'generated_text' in output[0]:
|
103 |
+
assistant_response = output[0]['generated_text']
|
104 |
+
logger.info(f"Generated response: {assistant_response}")
|
105 |
+
|
106 |
+
with st.chat_message("assistant"):
|
107 |
+
st.markdown(assistant_response)
|
108 |
+
|
109 |
+
st.session_state.messages.append({"role": "assistant", "content": assistant_response})
|
110 |
+
else:
|
111 |
+
logger.error(f"Unexpected API response: {output}")
|
112 |
+
st.error("Error: Unable to generate a response. Please try again.")
|
113 |
+
|
114 |
+
except Exception as e:
|
115 |
+
logger.error(f"Application Error: {str(e)}", exc_info=True)
|
116 |
+
st.error(f"Application Error: {str(e)}")
|
app_base.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import requests
|
3 |
+
|
4 |
+
# Hugging Face API URL
|
5 |
+
API_URL = "https://api-inference.huggingface.co/models/deepseek-ai/DeepSeek-R1-Distill-Qwen-32B"
|
6 |
+
|
7 |
+
# Function to query the Hugging Face API
|
8 |
+
def query(payload):
|
9 |
+
headers = {"Authorization": f"Bearer {st.secrets['HF_TOKEN']}"}
|
10 |
+
response = requests.post(API_URL, headers=headers, json=payload)
|
11 |
+
return response.json()
|
12 |
+
|
13 |
+
# Streamlit app
|
14 |
+
st.title("DeepSeek-R1-Distill-Qwen-32B Chatbot")
|
15 |
+
|
16 |
+
# Input text box
|
17 |
+
user_input = st.text_input("Enter your message:")
|
18 |
+
|
19 |
+
if user_input:
|
20 |
+
# Query the Hugging Face API with the user input
|
21 |
+
payload = {"inputs": user_input}
|
22 |
+
output = query(payload)
|
23 |
+
|
24 |
+
# Display the output
|
25 |
+
if isinstance(output, list) and len(output) > 0 and 'generated_text' in output[0]:
|
26 |
+
st.write("Response:")
|
27 |
+
st.write(output[0]['generated_text'])
|
28 |
+
else:
|
29 |
+
st.write("Error: Unable to generate a response. Please try again.")
|
models.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
def get_hf_api():
|
4 |
+
return "https://api-inference.huggingface.co/models/deepseek-ai/DeepSeek-R1"
|
models1.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# models.py
|
2 |
+
|
3 |
+
import gradio as gr
|
4 |
+
import spaces
|
5 |
+
import types
|
6 |
+
|
7 |
+
# Create a local "transformers_gradio" object to mimic the missing package
|
8 |
+
transformers_gradio = types.SimpleNamespace(registry="huggingface")
|
9 |
+
|
10 |
+
# Now the gradio.load calls will work as if we had `import transformers_gradio`
|
11 |
+
demo = gr.load(name="deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", src=transformers_gradio.registry)
|
12 |
+
demo = gr.load(name="deepseek-ai/DeepSeek-R1", src=transformers_gradio.registry)
|
13 |
+
demo = gr.load(name="deepseek-ai/DeepSeek-R1-Zero", src=transformers_gradio.registry)
|
14 |
+
|
15 |
+
# Example of using spaces (assuming `spaces.GPU()` is valid in your environment)
|
16 |
+
demo.fn = spaces.GPU()(demo.fn)
|
17 |
+
|
18 |
+
for fn in demo.fns.values():
|
19 |
+
fn.api_name = False
|
20 |
+
|
21 |
+
if __name__ == "__main__":
|
22 |
+
demo.launch()
|
old.py
ADDED
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from models import demo # Import the demo object from models.py
|
3 |
+
|
4 |
+
# --- Streamlit App Configuration ---
|
5 |
+
st.set_page_config(
|
6 |
+
page_title="DeepSeek Chatbot",
|
7 |
+
page_icon="π€",
|
8 |
+
layout="wide"
|
9 |
+
)
|
10 |
+
|
11 |
+
# --- App Title and Description ---
|
12 |
+
st.title("DeepSeek Chatbot")
|
13 |
+
st.markdown("""
|
14 |
+
Created by [ruslanmv.com](https://ruslanmv.com/)
|
15 |
+
This is a demo of different DeepSeek models. Select a model, type your message, and click "Submit".
|
16 |
+
You can also adjust optional parameters like system message, max new tokens, temperature, and top-p.
|
17 |
+
""")
|
18 |
+
|
19 |
+
# --- Sidebar for Model Selection and Parameters ---
|
20 |
+
with st.sidebar:
|
21 |
+
st.header("Options")
|
22 |
+
model_choice = st.radio(
|
23 |
+
"Choose a Model",
|
24 |
+
options=["DeepSeek-R1-Distill-Qwen-32B", "DeepSeek-R1", "DeepSeek-R1-Zero"],
|
25 |
+
index=1 # Default to "DeepSeek-R1"
|
26 |
+
)
|
27 |
+
|
28 |
+
with st.expander("Optional Parameters", expanded=False):
|
29 |
+
system_message = st.text_area(
|
30 |
+
"System Message",
|
31 |
+
value="You are a friendly Chatbot created by ruslanmv.com",
|
32 |
+
height=100
|
33 |
+
)
|
34 |
+
max_new_tokens = st.slider(
|
35 |
+
"Max New Tokens",
|
36 |
+
min_value=1,
|
37 |
+
max_value=4000,
|
38 |
+
value=200
|
39 |
+
)
|
40 |
+
temperature = st.slider(
|
41 |
+
"Temperature",
|
42 |
+
min_value=0.10,
|
43 |
+
max_value=4.00,
|
44 |
+
value=0.70
|
45 |
+
)
|
46 |
+
top_p = st.slider(
|
47 |
+
"Top-p (nucleus sampling)",
|
48 |
+
min_value=0.10,
|
49 |
+
max_value=1.00,
|
50 |
+
value=0.90
|
51 |
+
)
|
52 |
+
|
53 |
+
# --- Chatbot Function ---
|
54 |
+
def chatbot(input_text, history, model_choice, system_message, max_new_tokens, temperature, top_p):
|
55 |
+
# Create payload for the model
|
56 |
+
payload = {
|
57 |
+
"messages": [{"role": "user", "content": input_text}],
|
58 |
+
"system": system_message,
|
59 |
+
"max_tokens": max_new_tokens,
|
60 |
+
"temperature": temperature,
|
61 |
+
"top_p": top_p
|
62 |
+
}
|
63 |
+
|
64 |
+
# Run inference using the selected model
|
65 |
+
try:
|
66 |
+
response = demo(payload) # Use the demo object directly
|
67 |
+
if isinstance(response, dict) and "choices" in response:
|
68 |
+
assistant_response = response["choices"][0]["message"]["content"]
|
69 |
+
else:
|
70 |
+
assistant_response = "Unexpected model response format."
|
71 |
+
except Exception as e:
|
72 |
+
assistant_response = f"Error: {str(e)}"
|
73 |
+
|
74 |
+
# Append user and assistant messages to history
|
75 |
+
history.append((input_text, assistant_response))
|
76 |
+
return history
|
77 |
+
|
78 |
+
# --- Chat History Management ---
|
79 |
+
if "chat_history" not in st.session_state:
|
80 |
+
st.session_state.chat_history = []
|
81 |
+
|
82 |
+
# --- Chat Interface ---
|
83 |
+
st.header("Chat with DeepSeek")
|
84 |
+
|
85 |
+
# Display chat history
|
86 |
+
for user_msg, assistant_msg in st.session_state.chat_history:
|
87 |
+
with st.chat_message("user"):
|
88 |
+
st.write(user_msg)
|
89 |
+
with st.chat_message("assistant"):
|
90 |
+
st.write(assistant_msg)
|
91 |
+
|
92 |
+
# Input for new message
|
93 |
+
input_text = st.chat_input("Type your message here...")
|
94 |
+
|
95 |
+
# Handle new message submission
|
96 |
+
if input_text:
|
97 |
+
# Update chat history
|
98 |
+
st.session_state.chat_history = chatbot(
|
99 |
+
input_text,
|
100 |
+
st.session_state.chat_history,
|
101 |
+
model_choice,
|
102 |
+
system_message,
|
103 |
+
max_new_tokens,
|
104 |
+
temperature,
|
105 |
+
top_p
|
106 |
+
)
|
107 |
+
|
108 |
+
# Rerun the app to display the updated chat history
|
109 |
+
st.rerun()
|
old2.py
ADDED
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# app.py
|
2 |
+
import streamlit as st
|
3 |
+
from huggingface_hub import InferenceClient
|
4 |
+
from datetime import datetime
|
5 |
+
|
6 |
+
# Configure page
|
7 |
+
st.set_page_config(
|
8 |
+
page_title="DeepSeek Chatbot - ruslanmv.com",
|
9 |
+
page_icon="π€",
|
10 |
+
layout="centered",
|
11 |
+
initial_sidebar_state="expanded"
|
12 |
+
)
|
13 |
+
|
14 |
+
# Initialize session state
|
15 |
+
if "messages" not in st.session_state:
|
16 |
+
st.session_state.messages = []
|
17 |
+
|
18 |
+
# Sidebar controls
|
19 |
+
with st.sidebar:
|
20 |
+
st.title("π€ Chatbot Settings")
|
21 |
+
st.markdown("Created by [ruslanmv.com](https://ruslanmv.com/)")
|
22 |
+
|
23 |
+
# Model selection
|
24 |
+
selected_model = st.selectbox(
|
25 |
+
"Choose Model",
|
26 |
+
options=[
|
27 |
+
"deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
|
28 |
+
"deepseek-ai/DeepSeek-R1",
|
29 |
+
"deepseek-ai/DeepSeek-R1-Zero"
|
30 |
+
],
|
31 |
+
index=0
|
32 |
+
)
|
33 |
+
|
34 |
+
# System message
|
35 |
+
system_message = st.text_area(
|
36 |
+
"System Message",
|
37 |
+
value="You are a friendly Chatbot created by ruslanmv.com",
|
38 |
+
height=100
|
39 |
+
)
|
40 |
+
|
41 |
+
# Generation parameters
|
42 |
+
max_new_tokens = st.slider(
|
43 |
+
"Max new tokens",
|
44 |
+
min_value=1,
|
45 |
+
max_value=4000,
|
46 |
+
value=512,
|
47 |
+
step=50
|
48 |
+
)
|
49 |
+
|
50 |
+
temperature = st.slider(
|
51 |
+
"Temperature",
|
52 |
+
min_value=0.1,
|
53 |
+
max_value=4.0,
|
54 |
+
value=1.0,
|
55 |
+
step=0.1
|
56 |
+
)
|
57 |
+
|
58 |
+
top_p = st.slider(
|
59 |
+
"Top-p (nucleus sampling)",
|
60 |
+
min_value=0.1,
|
61 |
+
max_value=1.0,
|
62 |
+
value=0.9,
|
63 |
+
step=0.1
|
64 |
+
)
|
65 |
+
|
66 |
+
# Optional HF Token
|
67 |
+
hf_token = st.text_input(
|
68 |
+
"HuggingFace Token (optional)",
|
69 |
+
type="password",
|
70 |
+
help="Enter your HuggingFace token if required for model access"
|
71 |
+
)
|
72 |
+
|
73 |
+
# Main chat interface
|
74 |
+
st.title("π¬ DeepSeek Chatbot")
|
75 |
+
st.caption("π A conversational AI powered by DeepSeek models")
|
76 |
+
|
77 |
+
# Display chat messages
|
78 |
+
for message in st.session_state.messages:
|
79 |
+
with st.chat_message(message["role"]):
|
80 |
+
st.markdown(message["content"])
|
81 |
+
if "timestamp" in message:
|
82 |
+
st.caption(f"_{message['timestamp']}_")
|
83 |
+
|
84 |
+
# Chat input and processing
|
85 |
+
if prompt := st.chat_input("Type your message..."):
|
86 |
+
# Add user message to history
|
87 |
+
st.session_state.messages.append({
|
88 |
+
"role": "user",
|
89 |
+
"content": prompt,
|
90 |
+
"timestamp": datetime.now().strftime("%H:%M:%S")
|
91 |
+
})
|
92 |
+
|
93 |
+
# Display user message
|
94 |
+
with st.chat_message("user"):
|
95 |
+
st.markdown(prompt)
|
96 |
+
st.caption(f"_{st.session_state.messages[-1]['timestamp']}_")
|
97 |
+
|
98 |
+
# Create full prompt with system message
|
99 |
+
full_prompt = f"{system_message}\n\nUser: {prompt}\nAssistant:"
|
100 |
+
|
101 |
+
# Create client and generate response
|
102 |
+
client = InferenceClient(model=selected_model, token=hf_token)
|
103 |
+
|
104 |
+
# Display assistant response
|
105 |
+
with st.chat_message("assistant"):
|
106 |
+
response = st.write_stream(
|
107 |
+
client.text_generation(
|
108 |
+
full_prompt,
|
109 |
+
max_new_tokens=max_new_tokens,
|
110 |
+
temperature=temperature,
|
111 |
+
top_p=top_p,
|
112 |
+
stream=True
|
113 |
+
)
|
114 |
+
)
|
115 |
+
timestamp = datetime.now().strftime("%H:%M:%S")
|
116 |
+
st.caption(f"_{timestamp}_")
|
117 |
+
|
118 |
+
# Add assistant response to history
|
119 |
+
st.session_state.messages.append({
|
120 |
+
"role": "assistant",
|
121 |
+
"content": response,
|
122 |
+
"timestamp": timestamp
|
123 |
+
})
|
124 |
+
|
125 |
+
# Optional debug information
|
126 |
+
# st.sidebar.markdown("---")
|
127 |
+
# st.sidebar.json(st.session_state.messages)
|
requirements (1).txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
huggingface_hub==0.25.2
|
2 |
+
transformers
|