StanceEval / app.py
Soooma's picture
Create app.py
2443b11 verified
raw
history blame
834 Bytes
import streamlit as st
from transformers import AutoTokenizer, AutoModelForCausalLM
# Load the tokenizer and model
tokenizer = AutoTokenizer.from_pretrained("arabic-text-stanceEvalV1")
model = AutoModelForCausalLM.from_pretrained("arabic-text-stanceEvalV1")
def generate_text(prompt, max_length=50):
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(inputs['input_ids'], max_length=max_length, num_return_sequences=1)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
st.title("SatnceEval LLM testing with Hugging Face and Streamlit")
prompt = st.text_input("Enter your prompt:", "Once upon a time")
if st.button("Generate"):
with st.spinner("Generating..."):
generated_text = generate_text(prompt)
st.success("Generated Text:")
st.write(generated_text)