Spaces:
Running
Running
Divyansh12
commited on
Commit
•
b1f46eb
1
Parent(s):
a7866db
Update app.py
Browse files
app.py
CHANGED
@@ -12,8 +12,8 @@ llm = Llama.from_pretrained(
|
|
12 |
)
|
13 |
|
14 |
# Define the function to get responses from the model
|
15 |
-
def respond(message, history
|
16 |
-
messages = [
|
17 |
|
18 |
for user_message, assistant_message in history:
|
19 |
if user_message:
|
@@ -28,9 +28,9 @@ def respond(message, history, system_message, max_tokens, temperature, top_p):
|
|
28 |
response_stream = llm.create_chat_completion(
|
29 |
messages=messages,
|
30 |
stream=True,
|
31 |
-
max_tokens=
|
32 |
-
temperature=
|
33 |
-
top_p=
|
34 |
)
|
35 |
|
36 |
# Collect the response chunks
|
@@ -41,32 +41,31 @@ def respond(message, history, system_message, max_tokens, temperature, top_p):
|
|
41 |
return response # Return the full response
|
42 |
|
43 |
# Streamlit UI
|
44 |
-
st.title("Chatbot
|
45 |
st.write("### Interact with the chatbot!")
|
46 |
|
47 |
-
# User input
|
48 |
-
system_message = st.text_input("System Message", value="You are a friendly Chatbot.")
|
49 |
user_message = st.text_area("Your Message:")
|
50 |
-
max_tokens = st.slider("Max New Tokens", min_value=1, max_value=2048, value=512)
|
51 |
-
temperature = st.slider("Temperature", min_value=0.1, max_value=4.0, value=0.7)
|
52 |
-
top_p = st.slider("Top-p (Nucleus Sampling)", min_value=0.1, max_value=1.0, value=0.95)
|
53 |
|
54 |
# Chat history
|
55 |
if 'history' not in st.session_state:
|
56 |
st.session_state.history = []
|
57 |
|
58 |
if st.button("Send"):
|
59 |
-
#
|
60 |
-
|
|
|
61 |
|
62 |
-
|
63 |
-
|
64 |
|
65 |
-
|
66 |
-
|
67 |
-
for user_msg, assistant_msg in st.session_state.history:
|
68 |
-
st.write(f"**User:** {user_msg}")
|
69 |
-
st.write(f"**Assistant:** {assistant_msg}")
|
70 |
|
|
|
|
|
|
|
|
|
|
|
71 |
|
72 |
|
|
|
12 |
)
|
13 |
|
14 |
# Define the function to get responses from the model
|
15 |
+
def respond(message, history):
|
16 |
+
messages = []
|
17 |
|
18 |
for user_message, assistant_message in history:
|
19 |
if user_message:
|
|
|
28 |
response_stream = llm.create_chat_completion(
|
29 |
messages=messages,
|
30 |
stream=True,
|
31 |
+
max_tokens=512, # Use a default value for simplicity
|
32 |
+
temperature=0.7, # Use a default value for simplicity
|
33 |
+
top_p=0.95 # Use a default value for simplicity
|
34 |
)
|
35 |
|
36 |
# Collect the response chunks
|
|
|
41 |
return response # Return the full response
|
42 |
|
43 |
# Streamlit UI
|
44 |
+
st.title("Simple Chatbot")
|
45 |
st.write("### Interact with the chatbot!")
|
46 |
|
47 |
+
# User input field
|
|
|
48 |
user_message = st.text_area("Your Message:")
|
|
|
|
|
|
|
49 |
|
50 |
# Chat history
|
51 |
if 'history' not in st.session_state:
|
52 |
st.session_state.history = []
|
53 |
|
54 |
if st.button("Send"):
|
55 |
+
if user_message: # Check if user has entered a message
|
56 |
+
# Get the response from the model
|
57 |
+
response = respond(user_message, st.session_state.history)
|
58 |
|
59 |
+
# Add user message and model response to history
|
60 |
+
st.session_state.history.append((user_message, response))
|
61 |
|
62 |
+
# Clear the input field after sending
|
63 |
+
st.experimental_rerun()
|
|
|
|
|
|
|
64 |
|
65 |
+
# Display the chat history
|
66 |
+
st.write("### Chat History")
|
67 |
+
for user_msg, assistant_msg in st.session_state.history:
|
68 |
+
st.write(f"**User:** {user_msg}")
|
69 |
+
st.write(f"**Assistant:** {assistant_msg}")
|
70 |
|
71 |
|