import streamlit as st from transformers import pipeline, AutoProcessor, AutoModel import torch from huggingface_hub import login import os #login api_key = os.getenv("ACCESS_TOKEN") login(token=api_key) # gemma pipe = pipeline(task="text-generation", model="google/gemma-2-2b-it") def poet(prompt): messages = [{"role": "user", "content": prompt}] outputs = pipe(messages, max_length=80) return outputs[0]["generated_text"][-1]["content"].strip() st.title("Shakespeare Ai") st.write("A space made to allow people to create shakespeare like text!") # get prompt prompt = st.text_input("Enter your prompt: ") # analyze prompt shakespeare = poet(prompt) # write content st.write(shakespeare)