jeanflop commited on
Commit
ac3764a
·
verified ·
1 Parent(s): eee9873

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -66
app.py CHANGED
@@ -22,15 +22,12 @@ from PIL import Image
22
  # set hf inference endpoint with lama for story
23
  # get a token: https://huggingface.co/docs/api-inference/quicktour#get-your-api-token
24
 
 
 
25
 
26
-
27
- HUGGINGFACEHUB_API_TOKEN =os.environ["HUGGINGFACEHUB_API_TOKEN"]
28
- GOOGLE_API_KEY=os.environ["GOOGLE_API_KEY"]
29
- API_KEY= os.environ["API_KEY"]
30
-
31
-
32
-
33
-
34
 
35
  class Story(BaseModel):
36
  title: str = Field(description="A captivating title for the story.")
@@ -49,7 +46,6 @@ class Story(BaseModel):
49
  Explain the action taking place in each scene. Come up with your own unique descriptions!"""
50
  )
51
 
52
-
53
  from langchain_google_genai import ChatGoogleGenerativeAI
54
 
55
  llm = ChatGoogleGenerativeAI(model="gemini-pro",google_api_key=GOOGLE_API_KEY)
@@ -72,60 +68,38 @@ prompt = PromptTemplate(
72
 
73
  chain = prompt | model | parser
74
 
75
- chain.invoke({"query": story_query})
76
-
77
- response =chain.invoke({"query": story_query})
78
-
79
- response
80
-
81
- # modele load
82
- # Choose among 1, 2, 4 and 8:
83
- num_inference_steps = 8
84
-
85
- import streamlit as st
86
- import requests
87
- import io
88
- from PIL import Image
89
-
90
- API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-xl-base-1.0"
91
- headers = {"Authorization": f"Bearer {API_KEY}"}
92
-
93
- # Fonction pour appeler l'API et générer une image pour une scène donnée
94
- def generate_image(scene):
95
- payload = {
96
- "inputs": scene,
97
- "guidance_scale": 0.8,
98
- "num_inference_steps": 8,
99
- "eta": 0.5,
100
- "seed": 46,
101
- "negative_prompt": negative_prompt
102
- }
103
- response = requests.post(API_URL, headers=headers, json=payload)
104
- image_bytes = response.content
105
- image = Image.open(io.BytesIO(image_bytes))
106
- return image
107
-
108
- # Contenu de la variable response
109
- scenes =response.scenes
110
-
111
- metadonne =response.metadonne
112
- # Générer les images pour chaque scène et afficher avec les métadonnées dans une grille 2x3
113
- st.title("Images générées avec métadonnées dans une grille 2x3")
114
- for i in range(0, len(scenes), 2):
115
- col1, col2 = st.columns(2)
116
- col1.write(f"**Scène {i+1}:** {metadonne[i]}")
117
- col1.image(generate_image(scenes[i]), caption=f"Image de la scène {i+1}", width=300)
118
-
119
- # Vérifie si une deuxième scène existe pour afficher la deuxième image
120
- if i+1 < len(scenes):
121
- col2.write(f"**Scène {i+2}:** {metadonne[i+1]}")
122
- col2.image(generate_image(scenes[i+1]), caption=f"Image de la scène {i+2}", width=300)
123
-
124
-
125
-
126
-
127
-
128
-
129
-
130
-
131
-
 
22
  # set hf inference endpoint with lama for story
23
  # get a token: https://huggingface.co/docs/api-inference/quicktour#get-your-api-token
24
 
25
+ # Load environment variables from .env file
26
+ load_dotenv()
27
 
28
+ HUGGINGFACEHUB_API_TOKEN = os.environ["HUGGINGFACEHUB_API_TOKEN"]
29
+ GOOGLE_API_KEY = os.environ["GOOGLE_API_KEY"]
30
+ API_KEY = os.environ["API_KEY"]
 
 
 
 
 
31
 
32
  class Story(BaseModel):
33
  title: str = Field(description="A captivating title for the story.")
 
46
  Explain the action taking place in each scene. Come up with your own unique descriptions!"""
47
  )
48
 
 
49
  from langchain_google_genai import ChatGoogleGenerativeAI
50
 
51
  llm = ChatGoogleGenerativeAI(model="gemini-pro",google_api_key=GOOGLE_API_KEY)
 
68
 
69
  chain = prompt | model | parser
70
 
71
+ # Trigger the generation of the story only when a title is provided
72
+ if title:
73
+ response = chain.invoke({"query": story_query})
74
+
75
+ # Display the story elements if a response is received
76
+ if response:
77
+ st.write(response)
78
+ # Define negative prompt for the image generation
79
+ negative_prompt = "ugly, blurry, low-resolution, deformed, mutated, disfigured, missing limbs, disjointed, distorted, deformed, unnatural"
80
+ # Function for generating images
81
+ def generate_image(scene):
82
+ payload = {
83
+ "inputs": scene,
84
+ "guidance_scale": 0.8,
85
+ "num_inference_steps": 8,
86
+ "eta": 0.5,
87
+ "seed": 46,
88
+ "negative_prompt": negative_prompt
89
+ }
90
+ response = requests.post(API_URL, headers=headers, json=payload)
91
+ image_bytes = response.content
92
+ image = Image.open(io.BytesIO(image_bytes))
93
+ return image
94
+
95
+ # Generate and display images with meta-data in a 2x3 grid
96
+ st.title("Images générées avec métadonnées dans une grille 2x3")
97
+ for i in range(0, len(response.scenes), 2):
98
+ col1, col2 = st.columns(2)
99
+ col1.write(f"**Scène {i+1}:** {response.metadonne[i]}")
100
+ col1.image(generate_image(response.scenes[i]), caption=f"Image de la scène {i+1}", width=300)
101
+
102
+ # Check if a second scene exists for displaying the second image
103
+ if i+1 < len(response.scenes):
104
+ col2.write(f"**Scène {i+2}:** {response.metadonne[i+1]}")
105
+ col2.image(generate_image(response.scenes[i+1]), caption=f"Image de la scène {i+2}", width=300)