BuddyChrist / app.py
Renegadesoffun
Updated for CPU evalgguf2
703ea1c
raw
history blame contribute delete
916 Bytes
import streamlit as st
from transformers import AutoModelForCausalLM, AutoTokenizer
model_name = "TheBloke/TinyLlama-1.1B-Chat-v0.3-GGUF"
# Load model and tokenizer
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
# Set model to evaluation mode
model.eval()
st.title("Buddy Christ Chatbot")
user_input = st.text_input("You:", "")
if user_input:
# Encode the user input
inputs = tokenizer.encode(user_input, return_tensors="pt", truncation=True, max_length=1000)
# Generate a response using the model
response = model.generate(inputs, max_length=1000, temperature=1.0, top_k=10, pad_token_id=tokenizer.eos_token_id, gguf_mode=True)
# Decode the response
response_text = tokenizer.decode(response[0], skip_special_tokens=True)
# Display the response in Streamlit
st.write("Buddy Christ:", response_text)