Shahabmoin
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from diffusers import StableDiffusionPipeline
|
3 |
+
import torch
|
4 |
+
|
5 |
+
# Load the model
|
6 |
+
@st.cache_resource
|
7 |
+
def load_pipeline():
|
8 |
+
model_name = "runwayml/stable-diffusion-v1-5" # Replace with the desired Stable Diffusion model
|
9 |
+
pipeline = StableDiffusionPipeline.from_pretrained(
|
10 |
+
model_name,
|
11 |
+
torch_dtype=torch.float16,
|
12 |
+
use_auth_token=True # Use Hugging Face token if required for the model
|
13 |
+
)
|
14 |
+
pipeline = pipeline.to("cuda" if torch.cuda.is_available() else "cpu")
|
15 |
+
return pipeline
|
16 |
+
|
17 |
+
pipeline = load_pipeline()
|
18 |
+
|
19 |
+
# App title
|
20 |
+
st.title("🎨 Open Source Text-to-Image Generator")
|
21 |
+
st.write("Generate images from text prompts using Stable Diffusion.")
|
22 |
+
|
23 |
+
# Input prompt
|
24 |
+
prompt = st.text_input("Enter your prompt:", placeholder="A futuristic cityscape at sunset")
|
25 |
+
|
26 |
+
# Image generation button
|
27 |
+
if st.button("Generate Image"):
|
28 |
+
if prompt:
|
29 |
+
with st.spinner("Generating image..."):
|
30 |
+
try:
|
31 |
+
# Generate image
|
32 |
+
result = pipeline(prompt, num_inference_steps=50, guidance_scale=7.5)
|
33 |
+
image = result.images[0]
|
34 |
+
|
35 |
+
# Display the image
|
36 |
+
st.image(image, caption="Generated Image", use_column_width=True)
|
37 |
+
except Exception as e:
|
38 |
+
st.error(f"An error occurred: {e}")
|
39 |
+
else:
|
40 |
+
st.warning("Please enter a prompt to generate an image.")
|
41 |
+
|
42 |
+
# Sidebar configuration
|
43 |
+
st.sidebar.title("Settings")
|
44 |
+
st.sidebar.write("Customize your generation:")
|
45 |
+
guidance_scale = st.sidebar.slider("Guidance Scale", 5.0, 15.0, 7.5)
|
46 |
+
num_inference_steps = st.sidebar.slider("Inference Steps", 10, 100, 50)
|
47 |
+
|
48 |
+
# Clear cache button
|
49 |
+
if st.sidebar.button("Clear Cache"):
|
50 |
+
st.cache_resource.clear()
|
51 |
+
st.success("Cache cleared!")
|