SDXL_Test / app.py
Jangai's picture
Update app.py
d71b895 verified
import requests
import io
from PIL import Image
import gradio as gr
import os
# Assuming you have your API tokens set in environment variables
ZEPHYR_API_TOKEN = os.getenv("HF_API_TOKEN")
SD_API_TOKEN = os.getenv("HF_API_TOKEN")
if not ZEPHYR_API_TOKEN or not SD_API_TOKEN:
raise ValueError("API tokens not found. Please set the ZEPHYR_API_TOKEN and HF_API_TOKEN environment variables.")
ZEPHYR_API_URL = "https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta"
SD_API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-xl-base-1.0"
def query_zephyr(linkedin_text):
prompt = ""
# Attempting a simplified, possibly more correct structure
# Adjust this according to the actual requirements of the Zephyr model
payload = {
"inputs": f"{prompt}\n{linkedin_text}",
}
headers = {
"Authorization": f"Bearer {ZEPHYR_API_TOKEN}",
"Content-Type": "application/json",
}
response = requests.post(ZEPHYR_API_URL, headers=headers, json=payload)
if response.status_code == 200:
return response.json()
else:
print(response.text) # To get more insight into what went wrong
raise Exception(f"Failed to query Zephyr model, status code: {response.status_code}")
# Replace this with the actual LinkedIn text you wish to process
linkedin_text = "Your LinkedIn post content here."
try:
zephyr_response = query_zephyr(linkedin_text)
print(zephyr_response)
except Exception as e:
print(e)
def generate_image_from_prompt(prompt, negative_prompt, guidance_scale, width, height, num_inference_steps):
headers = {"Authorization": f"Bearer {SD_API_TOKEN}"}
payload = {
"inputs": prompt,
"parameters": {
"guidance_scale": guidance_scale,
"width": width,
"height": height,
"num_inference_steps": num_inference_steps,
},
}
if negative_prompt: # Add negative prompt if provided
payload["parameters"]["negative_prompt"] = negative_prompt
response = requests.post(SD_API_URL, headers=headers, json=payload)
image_bytes = response.content
image = Image.open(io.BytesIO(image_bytes))
return image
def generate_image_from_linkedin_text(linkedin_text, negative_prompt, guidance_scale, width, height, num_inference_steps):
# Generate a prompt from the LinkedIn text using Zephyr
zephyr_response = query_zephyr(linkedin_text)
if zephyr_response and isinstance(zephyr_response, list):
generated_prompt = zephyr_response[0].get("generated_text", "")
else:
raise ValueError("Unexpected response format from Zephyr model.")
# Use the generated prompt to create an image with Stable Diffusion
if generated_prompt:
image = generate_image_from_prompt(generated_prompt, negative_prompt, guidance_scale, width, height, num_inference_steps)
return image, generated_prompt
else:
raise ValueError("Failed to generate a prompt from the LinkedIn text.")
iface = gr.Interface(
fn=generate_image_from_linkedin_text,
inputs=[
gr.Textbox(label="LinkedIn Message", placeholder="Enter LinkedIn message here..."),
gr.Textbox(label="Negative Prompt", placeholder="Enter a negative prompt here (optional)..."),
gr.Slider(label="Guidance Scale", minimum=1, maximum=20, step=0.1, value=7.5),
gr.Slider(label="Width", minimum=768, maximum=1024, step=1, value=1024),
gr.Slider(label="Height", minimum=768, maximum=1024, step=1, value=768),
gr.Slider(label="Number of Inference Steps", minimum=20, maximum=50, step=1, value=30)
],
outputs=[
gr.Image(type="pil"),
gr.Label(label="Generated Prompt")
],
title="Generate Images from LinkedIn Messages",
description="Enter a LinkedIn message to generate a creative prompt with Zephyr, which is then used to generate an image with Stable Diffusion. Image parameters can be adjusted."
)
iface.launch()