|
import streamlit as st |
|
from huggingface_hub import InferenceClient |
|
import os |
|
|
|
|
|
hf_token = os.getenv("HF_TOKEN") |
|
|
|
|
|
client = InferenceClient( |
|
model="Qwen/Qwen2-7B", |
|
token=hf_token, |
|
) |
|
|
|
def get_chat_completion(message): |
|
try: |
|
response = client.chat_completion( |
|
messages=[{"role": "user", "content": message}], |
|
max_tokens=500, |
|
stream=False, |
|
) |
|
completion = response[0]['choices'][0]['message']['content'] |
|
return completion |
|
except Exception as e: |
|
return f"Error: {e}" |
|
|
|
|
|
st.title("Chat with Hugging Face Model") |
|
|
|
|
|
user_input = st.text_input("Enter your message:") |
|
|
|
if st.button("Send"): |
|
if user_input: |
|
|
|
response = get_chat_completion(user_input) |
|
st.write("**Response:**") |
|
st.write(response) |
|
else: |
|
st.write("Please enter a message.") |
|
|