Spaces:
Sleeping
Sleeping
import streamlit as st | |
from openai import OpenAI | |
from typing import Iterator | |
import os | |
from phoenix.otel import register | |
tracer_provider = register( | |
project_name=os.getenv('PHOENIX_PROJECT_NAME'), | |
endpoint=os.getenv('PHOENIX_COLLECTOR_ENDPOINT'), | |
) | |
from openinference.instrumentation.openai import OpenAIInstrumentor | |
OpenAIInstrumentor().instrument(tracer_provider=tracer_provider) | |
# Configure page settings | |
st.set_page_config( | |
page_title="LLM Taiwan Chat", | |
page_icon="💬", | |
layout="centered" | |
) | |
# Display privacy notice | |
st.markdown(""" | |
> **隱私權聲明** | |
> | |
> 使用本聊天服務即表示您同意: | |
> - 您的對話內容可能被用於改善服務品質 | |
> - 對話紀錄可能作為系統訓練與評估的素材 | |
> - 請勿在對話中透露任何個人隱私資訊 | |
""") | |
st.markdown("---") | |
# Initialize session state for chat history and system prompt | |
if "messages" not in st.session_state: | |
st.session_state.messages = [] | |
if "system_prompt" not in st.session_state: | |
st.session_state.system_prompt = "" | |
if "temperature" not in st.session_state: | |
st.session_state.temperature = 0.2 | |
if "top_p" not in st.session_state: | |
st.session_state.top_p = 0.95 | |
def stream_chat(prompt: str) -> Iterator[str]: | |
"""Stream chat responses from the LLM API""" | |
client = OpenAI( | |
api_key=os.getenv('API_KEY'), | |
base_url=os.getenv('API_BASE_URL') | |
) | |
messages = [] | |
if st.session_state.system_prompt: | |
messages.append({"role": "system", "content": st.session_state.system_prompt}) | |
messages.extend(st.session_state.messages) | |
stream = client.chat.completions.create( | |
messages=messages, | |
model=os.getenv('LLM_MODEL_NAME'), | |
stream=True, | |
temperature=st.session_state.temperature, | |
top_p=st.session_state.top_p | |
) | |
for chunk in stream: | |
if chunk.choices[0].delta.content is not None: | |
yield chunk.choices[0].delta.content | |
def clear_chat_history(): | |
"""Clear all chat messages and reset system prompt""" | |
st.session_state.messages = [] | |
st.session_state.system_prompt = "" | |
def main(): | |
st.title("💬 LLM Taiwan Chat") | |
# Add a clear chat button with custom styling | |
col1, col2 = st.columns([6, 1]) | |
with col2: | |
if st.button("🗑️", type="secondary", use_container_width=True): | |
clear_chat_history() | |
st.rerun() | |
# Advanced options in expander | |
with st.expander("進階選項 ⚙️", expanded=False): | |
# System prompt input | |
system_prompt = st.text_area( | |
"System Prompt 設定:", | |
value=st.session_state.system_prompt, | |
help="設定 system prompt 來定義 AI 助理的行為和角色。開始對話後將無法修改。", | |
height=100, | |
disabled=len(st.session_state.messages) > 0 # 當有對話時設為唯讀 | |
) | |
if not st.session_state.messages and system_prompt != st.session_state.system_prompt: | |
st.session_state.system_prompt = system_prompt | |
st.session_state.temperature = st.slider( | |
"Temperature", | |
min_value=0.0, | |
max_value=2.0, | |
value=st.session_state.temperature, | |
step=0.1, | |
help="較高的值會使輸出更加隨機,較低的值會使其更加集中和確定。" | |
) | |
st.session_state.top_p = st.slider( | |
"Top P", | |
min_value=0.1, | |
max_value=1.0, | |
value=st.session_state.top_p, | |
step=0.05, | |
help="控制模型輸出的多樣性,較低的值會使輸出更加保守。" | |
) | |
# Display chat messages | |
for message in st.session_state.messages: | |
with st.chat_message(message["role"]): | |
st.write(message["content"]) | |
# Chat input | |
if prompt := st.chat_input("輸入您的訊息..."): | |
# Add user message to chat history | |
st.session_state.messages.append({"role": "user", "content": prompt}) | |
# Display user message | |
with st.chat_message("user"): | |
st.write(prompt) | |
# Display assistant response with streaming | |
with st.chat_message("assistant"): | |
response_placeholder = st.empty() | |
full_response = "" | |
# Stream the response | |
for response_chunk in stream_chat(prompt): | |
full_response += response_chunk | |
response_placeholder.markdown(full_response + "▌") | |
response_placeholder.markdown(full_response) | |
# Add assistant response to chat history | |
st.session_state.messages.append({"role": "assistant", "content": full_response}) | |
if __name__ == "__main__": | |
main() |