File size: 1,042 Bytes
4375924 091d2f9 4375924 091d2f9 cc5635f 4375924 091d2f9 7e6c523 091d2f9 4375924 091d2f9 4375924 091d2f9 4375924 091d2f9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 |
import streamlit as st
from huggingface_hub import InferenceClient
import os
# Retrieve the Hugging Face token from environment variables
hf_token = os.getenv("HF_TOKEN")
# Initialize the Hugging Face Inference Client
client = InferenceClient(
model="Qwen/Qwen2-7B",
token=hf_token,
)
def get_chat_completion(message):
try:
response = client.chat_completion(
messages=[{"role": "user", "content": message}],
max_tokens=500,
stream=False,
)
completion = response[0]['choices'][0]['message']['content']
return completion
except Exception as e:
return f"Error: {e}"
# Streamlit app layout
st.title("Chat with Hugging Face Model")
# Input from the user
user_input = st.text_input("Enter your message:")
if st.button("Send"):
if user_input:
# Get response from the model
response = get_chat_completion(user_input)
st.write("**Response:**")
st.write(response)
else:
st.write("Please enter a message.")
|