File size: 834 Bytes
2443b11
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
import streamlit as st
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the tokenizer and model
tokenizer = AutoTokenizer.from_pretrained("arabic-text-stanceEvalV1")
model = AutoModelForCausalLM.from_pretrained("arabic-text-stanceEvalV1")

def generate_text(prompt, max_length=50):
    inputs = tokenizer(prompt, return_tensors="pt")
    outputs = model.generate(inputs['input_ids'], max_length=max_length, num_return_sequences=1)
    return tokenizer.decode(outputs[0], skip_special_tokens=True)

st.title("SatnceEval LLM testing with Hugging Face and Streamlit")

prompt = st.text_input("Enter your prompt:", "Once upon a time")

if st.button("Generate"):
    with st.spinner("Generating..."):
        generated_text = generate_text(prompt)
        st.success("Generated Text:")
        st.write(generated_text)