Spaces:
Runtime error
Runtime error
import streamlit as st | |
from streamlit_chat import message | |
from streamlit_extras.colored_header import colored_header | |
from streamlit_extras.add_vertical_space import add_vertical_space | |
from transformers import AutoTokenizer, AutoModelForCausalLM | |
st.set_page_config(page_title="Einfach.HugChat") | |
# List of models | |
models = ["vicuna-13b", "koala-13b", "oasst-pythia-12b", "RWKV-4-Raven-14B", | |
"alpaca-13b", "chatglm-6b", "llama-13b", "dolly-v2-12b", "stablelm-tuned-alpha-7b", | |
"fastchat-t5-3b", "mpt-7b-chat"] | |
# Sidebar contents | |
with st.sidebar: | |
st.title('EinfachChat') | |
st.markdown(''' | |
## About | |
This app is a LLM-powered chatbot built using: | |
- [Streamlit](https://streamlit.io/) | |
- [OpenAssistant/oasst-sft-6-llama-30b-xor](https://huggingface.co./OpenAssistant/oasst-sft-6-llama-30b-xor) LLM model | |
💡 Note: No API key required! | |
''') | |
model_name = st.selectbox('Choose a model', models) | |
add_vertical_space(5) | |
st.write('Made with ❤️ by EinfachAlex') | |
# Generate empty lists for generated and past. | |
## generated stores AI generated responses | |
if 'generated' not in st.session_state: | |
st.session_state['generated'] = ["Hallo, wie kann ich dir helfen ?"] | |
## past stores User's questions | |
if 'past' not in st.session_state: | |
st.session_state['past'] = ['Hi!'] | |
# Layout of input/response containers | |
input_container = st.container() | |
colored_header(label='', description='', color_name='blue-30') | |
response_container = st.container() | |
# User input | |
## Function for taking user provided prompt as input | |
def get_text(): | |
input_text = st.text_input("You: ", "", key="input") | |
return input_text | |
## Applying the user input box | |
with input_container: | |
user_input = get_text() | |
# Response output | |
## Function for taking user prompt as input followed by producing AI generated responses | |
def generate_response(prompt, model_name): | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModelForCausalLM.from_pretrained(model_name) | |
inputs = tokenizer(prompt, return_tensors='pt') | |
outputs = model.generate(**inputs) | |
response = tokenizer.decode(outputs[0]) | |
return response | |
## Conditional display of AI generated responses as a function of user provided prompts | |
with response_container: | |
if user_input: | |
response = generate_response(user_input, model_name) | |
st.session_state.past.append(user_input) | |
st.session_state.generated.append(response) | |
if st.session_state['generated']: | |
for i in range(len(st.session_state['generated'])): | |
message(st.session_state['past'][i], is_user=True, key=str(i) + '_user') | |
message(st.session_state["generated"][i], key=str(i)) | |