ruslanmv commited on
Commit
94d49c9
·
verified ·
1 Parent(s): 155e743

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +120 -1
app.py CHANGED
@@ -1,3 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
  import requests
3
 
@@ -99,4 +216,6 @@ if prompt := st.chat_input("Type your message..."):
99
  st.error("Error: Unable to generate a response. Please try again.")
100
 
101
  except Exception as e:
102
- st.error(f"Application Error: {str(e)}")
 
 
 
1
+
2
+ import streamlit as st
3
+ import requests
4
+ import logging
5
+
6
+ # Configure logging
7
+ logging.basicConfig(level=logging.INFO)
8
+ logger = logging.getLogger(__name__)
9
+
10
+ # Page configuration
11
+ st.set_page_config(
12
+ page_title="DeepSeek Chatbot - ruslanmv.com",
13
+ page_icon="🤖",
14
+ layout="centered"
15
+ )
16
+
17
+ # Initialize session state for chat history
18
+ if "messages" not in st.session_state:
19
+ st.session_state.messages = []
20
+
21
+ # Sidebar configuration
22
+ with st.sidebar:
23
+ st.header("Model Configuration")
24
+ st.markdown("[Get HuggingFace Token](https://huggingface.co/settings/tokens)")
25
+
26
+ # Dropdown to select model
27
+ model_options = [
28
+ "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
29
+ "deepseek-ai/DeepSeek-R1",
30
+ "deepseek-ai/DeepSeek-R1-Zero"
31
+ ]
32
+ selected_model = st.selectbox("Select Model", model_options, index=0)
33
+
34
+ system_message = st.text_area(
35
+ "System Message",
36
+ value="You are a friendly Chatbot created by ruslanmv.com",
37
+ height=100
38
+ )
39
+
40
+ max_tokens = st.slider(
41
+ "Max Tokens",
42
+ 1, 4000, 512
43
+ )
44
+
45
+ temperature = st.slider(
46
+ "Temperature",
47
+ 0.1, 4.0, 0.7
48
+ )
49
+
50
+ top_p = st.slider(
51
+ "Top-p",
52
+ 0.1, 1.0, 0.9
53
+ )
54
+
55
+ # Function to query the Hugging Face API
56
+ def query(payload, api_url):
57
+ headers = {"Authorization": f"Bearer {st.secrets['HF_TOKEN']}"}
58
+ logger.info(f"Sending request to {api_url} with payload: {payload}")
59
+ response = requests.post(api_url, headers=headers, json=payload)
60
+ logger.info(f"Received response: {response.status_code}, {response.text}")
61
+ return response.json()
62
+
63
+ # Chat interface
64
+ st.title("🤖 DeepSeek Chatbot")
65
+ st.caption("Powered by Hugging Face Inference API - Configure in sidebar")
66
+
67
+ # Display chat history
68
+ for message in st.session_state.messages:
69
+ with st.chat_message(message["role"]):
70
+ st.markdown(message["content"])
71
+
72
+ # Handle input
73
+ if prompt := st.chat_input("Type your message..."):
74
+ st.session_state.messages.append({"role": "user", "content": prompt})
75
+
76
+ with st.chat_message("user"):
77
+ st.markdown(prompt)
78
+
79
+ try:
80
+ with st.spinner("Generating response..."):
81
+ # Prepare the payload for the API
82
+ payload = {
83
+ "inputs": prompt,
84
+ "parameters": {
85
+ "max_new_tokens": max_tokens,
86
+ "temperature": temperature,
87
+ "top_p": top_p,
88
+ "return_full_text": False
89
+ }
90
+ }
91
+
92
+ # Dynamically construct the API URL based on the selected model
93
+ api_url = f"https://api-inference.huggingface.co/models/{selected_model}"
94
+ logger.info(f"Selected model: {selected_model}, API URL: {api_url}")
95
+
96
+ # Query the Hugging Face API using the selected model
97
+ output = query(payload, api_url)
98
+
99
+ # Handle API response
100
+ if isinstance(output, list) and len(output) > 0 and 'generated_text' in output[0]:
101
+ assistant_response = output[0]['generated_text']
102
+ logger.info(f"Generated response: {assistant_response}")
103
+
104
+ with st.chat_message("assistant"):
105
+ st.markdown(assistant_response)
106
+
107
+ st.session_state.messages.append({"role": "assistant", "content": assistant_response})
108
+ else:
109
+ logger.error(f"Unexpected API response: {output}")
110
+ st.error("Error: Unable to generate a response. Please try again.")
111
+
112
+ except Exception as e:
113
+ logger.error(f"Application Error: {str(e)}", exc_info=True)
114
+ st.error(f"Application Error: {str(e)}")
115
+
116
+
117
+ ''''
118
  import streamlit as st
119
  import requests
120
 
 
216
  st.error("Error: Unable to generate a response. Please try again.")
217
 
218
  except Exception as e:
219
+ st.error(f"Application Error: {str(e)}")
220
+
221
+ '''