Bils commited on
Commit
2edecf4
Β·
verified Β·
1 Parent(s): eaef5b0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +49 -149
app.py CHANGED
@@ -50,7 +50,6 @@ def get_llama_pipeline(model_id: str, token: str):
50
  """
51
  if model_id in LLAMA_PIPELINES:
52
  return LLAMA_PIPELINES[model_id]
53
-
54
  tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token=token)
55
  model = AutoModelForCausalLM.from_pretrained(
56
  model_id,
@@ -70,10 +69,8 @@ def get_musicgen_model(model_key: str = "facebook/musicgen-large"):
70
  """
71
  if model_key in MUSICGEN_MODELS:
72
  return MUSICGEN_MODELS[model_key]
73
-
74
  model = MusicgenForConditionalGeneration.from_pretrained(model_key)
75
  processor = AutoProcessor.from_pretrained(model_key)
76
-
77
  device = "cuda" if torch.cuda.is_available() else "cpu"
78
  model.to(device)
79
  MUSICGEN_MODELS[model_key] = (model, processor)
@@ -85,7 +82,6 @@ def get_tts_model(model_name: str = "tts_models/en/ljspeech/tacotron2-DDC"):
85
  """
86
  if model_name in TTS_MODELS:
87
  return TTS_MODELS[model_name]
88
-
89
  tts_model = TTS(model_name)
90
  TTS_MODELS[model_name] = tts_model
91
  return tts_model
@@ -97,7 +93,7 @@ def get_tts_model(model_name: str = "tts_models/en/ljspeech/tacotron2-DDC"):
97
  def generate_script(user_prompt: str, model_id: str, token: str, duration: int):
98
  """
99
  Generates a script, sound design suggestions, and music ideas from a user prompt.
100
- Returns a tuple of strings: (voice_script, sound_design, music_suggestions).
101
  """
102
  try:
103
  text_pipeline = get_llama_pipeline(model_id, token)
@@ -122,12 +118,10 @@ def generate_script(user_prompt: str, model_id: str, token: str, duration: int):
122
  if "Output:" in generated_text:
123
  generated_text = generated_text.split("Output:")[-1].strip()
124
 
125
- # Default placeholders
126
  voice_script = "No voice-over script found."
127
  sound_design = "No sound design suggestions found."
128
  music_suggestions = "No music suggestions found."
129
 
130
- # Extract Voice-Over Script
131
  if "Voice-Over Script:" in generated_text:
132
  parts = generated_text.split("Voice-Over Script:")
133
  voice_script_part = parts[1]
@@ -136,7 +130,6 @@ def generate_script(user_prompt: str, model_id: str, token: str, duration: int):
136
  else:
137
  voice_script = voice_script_part.strip()
138
 
139
- # Extract Sound Design Suggestions
140
  if "Sound Design Suggestions:" in generated_text:
141
  parts = generated_text.split("Sound Design Suggestions:")
142
  sound_design_part = parts[1]
@@ -145,7 +138,6 @@ def generate_script(user_prompt: str, model_id: str, token: str, duration: int):
145
  else:
146
  sound_design = sound_design_part.strip()
147
 
148
- # Extract Music Suggestions
149
  if "Music Suggestions:" in generated_text:
150
  parts = generated_text.split("Music Suggestions:")
151
  music_suggestions = parts[1].strip()
@@ -161,19 +153,17 @@ def generate_script(user_prompt: str, model_id: str, token: str, duration: int):
161
  @spaces.GPU(duration=100)
162
  def generate_voice(script: str, tts_model_name: str = "tts_models/en/ljspeech/tacotron2-DDC"):
163
  """
164
- Generates a voice-over from the provided script using the Coqui TTS model.
165
  Returns the file path to the generated .wav file.
166
  """
167
  try:
168
  if not script.strip():
169
  return "Error: No script provided."
170
-
171
  cleaned_script = clean_text(script)
172
  tts_model = get_tts_model(tts_model_name)
173
  output_path = os.path.join(tempfile.gettempdir(), "voice_over.wav")
174
  tts_model.tts_to_file(text=cleaned_script, file_path=output_path)
175
  return output_path
176
-
177
  except Exception as e:
178
  return f"Error generating voice: {e}"
179
 
@@ -194,7 +184,6 @@ def generate_music(prompt: str, audio_length: int):
194
  musicgen_model, musicgen_processor = get_musicgen_model(model_key)
195
 
196
  device = "cuda" if torch.cuda.is_available() else "cpu"
197
- # Process the input and move each tensor to the proper device
198
  inputs = musicgen_processor(text=[prompt], padding=True, return_tensors="pt")
199
  inputs = {k: v.to(device) for k, v in inputs.items()}
200
 
@@ -211,15 +200,12 @@ def generate_music(prompt: str, audio_length: int):
211
  return f"Error generating music: {e}"
212
 
213
  # ---------------------------------------------------------------------
214
- # Audio Blending with Duration Sync & Ducking
215
  # ---------------------------------------------------------------------
216
  @spaces.GPU(duration=100)
217
  def blend_audio(voice_path: str, music_path: str, ducking: bool, duck_level: int = 10):
218
  """
219
  Blends two audio files (voice and music).
220
- 1. If music < voice, loops the music until it meets/exceeds the voice duration.
221
- 2. If music > voice, trims music to the voice duration.
222
- 3. If ducking=True, the music is attenuated by 'duck_level' dB while the voice is playing.
223
  Returns the file path to the blended .wav file.
224
  """
225
  try:
@@ -228,7 +214,6 @@ def blend_audio(voice_path: str, music_path: str, ducking: bool, duck_level: int
228
 
229
  voice = AudioSegment.from_wav(voice_path)
230
  music = AudioSegment.from_wav(music_path)
231
-
232
  voice_len = len(voice)
233
  music_len = len(music)
234
 
@@ -241,12 +226,7 @@ def blend_audio(voice_path: str, music_path: str, ducking: bool, duck_level: int
241
  if len(music) > voice_len:
242
  music = music[:voice_len]
243
 
244
- if ducking:
245
- ducked_music = music - duck_level
246
- final_audio = ducked_music.overlay(voice)
247
- else:
248
- final_audio = music.overlay(voice)
249
-
250
  output_path = os.path.join(tempfile.gettempdir(), "blended_output.wav")
251
  final_audio.export(output_path, format="wav")
252
  return output_path
@@ -261,19 +241,15 @@ def blend_audio(voice_path: str, music_path: str, ducking: bool, duck_level: int
261
  def run_agent(user_prompt: str, llama_model_id: str, duration: int, tts_model_name: str, music_length: int, ducking: bool, duck_level: int):
262
  """
263
  Runs the full workflow as an agent:
264
- 1. Generates a script (voice-over, sound design, music suggestions) from a user prompt.
265
- 2. Synthesizes a voice-over from the generated script.
266
- 3. Generates a music track based on the music suggestions.
267
- 4. Blends the voice and music tracks.
268
- Returns a tuple with the generated script components, voice file, music file, and final blended audio.
269
  """
270
- # Step 1: Generate Script
271
  voice_script, sound_design, music_suggestions = generate_script(user_prompt, llama_model_id, HF_TOKEN, duration)
272
- # Step 2: Generate Voice-Over
273
  voice_file = generate_voice(voice_script, tts_model_name)
274
- # Step 3: Generate Music
275
  music_file = generate_music(music_suggestions, music_length)
276
- # Step 4: Blend Audio
277
  blended_file = blend_audio(voice_file, music_file, ducking, duck_level)
278
  return voice_script, sound_design, music_suggestions, voice_file, music_file, blended_file
279
 
@@ -281,7 +257,6 @@ def run_agent(user_prompt: str, llama_model_id: str, duration: int, tts_model_na
281
  # Gradio Interface with Enhanced UI
282
  # ---------------------------------------------------------------------
283
  with gr.Blocks(css="""
284
- /* Global Styles */
285
  body {
286
  background: linear-gradient(135deg, #1d1f21, #3a3d41);
287
  color: #f0f0f0;
@@ -328,163 +303,88 @@ with gr.Blocks(css="""
328
 
329
  gr.Markdown("""
330
  Welcome to **AI Promo Studio**! This platform leverages state-of-the-art AI models to help you generate:
331
-
332
- - **Script**: Generate a compelling voice-over script with LLaMA.
333
- - **Voice Synthesis**: Create natural-sounding voice-overs using Coqui TTS.
334
- - **Music Production**: Produce custom music tracks with MusicGen.
335
- - **Audio Blending**: Seamlessly blend voice and music with options for ducking.
336
  """)
337
 
338
  with gr.Tabs():
339
  # Tab 1: Script Generation
340
  with gr.Tab("πŸ“ Script Generation"):
341
  with gr.Row():
342
- user_prompt = gr.Textbox(
343
- label="Promo Idea",
344
- placeholder="E.g., A 30-second promo for a morning show...",
345
- lines=2
346
- )
347
  with gr.Row():
348
- llama_model_id = gr.Textbox(
349
- label="LLaMA Model ID",
350
- value="meta-llama/Meta-Llama-3-8B-Instruct",
351
- placeholder="Enter a valid Hugging Face model ID"
352
- )
353
- duration = gr.Slider(
354
- label="Desired Promo Duration (seconds)",
355
- minimum=15,
356
- maximum=60,
357
- step=15,
358
- value=30
359
- )
360
  generate_script_button = gr.Button("Generate Script", variant="primary")
361
- script_output = gr.Textbox(label="Generated Voice-Over Script", lines=5, interactive=False)
362
  sound_design_output = gr.Textbox(label="Sound Design Suggestions", lines=3, interactive=False)
363
  music_suggestion_output = gr.Textbox(label="Music Suggestions", lines=3, interactive=False)
364
-
365
- generate_script_button.click(
366
- fn=lambda user_prompt, model_id, dur: generate_script(user_prompt, model_id, HF_TOKEN, dur),
367
- inputs=[user_prompt, llama_model_id, duration],
368
- outputs=[script_output, sound_design_output, music_suggestion_output],
369
- )
370
 
371
  # Tab 2: Voice Synthesis
372
  with gr.Tab("🎀 Voice Synthesis"):
373
  gr.Markdown("Generate a natural-sounding voice-over using Coqui TTS.")
374
- selected_tts_model = gr.Dropdown(
375
- label="TTS Model",
376
- choices=[
377
- "tts_models/en/ljspeech/tacotron2-DDC",
378
- "tts_models/en/ljspeech/vits",
379
- "tts_models/en/sam/tacotron-DDC",
380
- ],
381
- value="tts_models/en/ljspeech/tacotron2-DDC",
382
- multiselect=False
383
- )
384
  generate_voice_button = gr.Button("Generate Voice-Over", variant="primary")
385
  voice_audio_output = gr.Audio(label="Voice-Over (WAV)", type="filepath")
386
-
387
- generate_voice_button.click(
388
- fn=lambda script, tts_model: generate_voice(script, tts_model),
389
- inputs=[script_output, selected_tts_model],
390
- outputs=voice_audio_output,
391
- )
392
 
393
  # Tab 3: Music Production
394
  with gr.Tab("🎢 Music Production"):
395
- gr.Markdown("Generate a custom music track using the **MusicGen Large** model.")
396
- audio_length = gr.Slider(
397
- label="Music Length (tokens)",
398
- minimum=128,
399
- maximum=1024,
400
- step=64,
401
- value=512,
402
- info="Increase tokens for longer audio (inference time may vary)."
403
- )
404
  generate_music_button = gr.Button("Generate Music", variant="primary")
405
  music_output = gr.Audio(label="Generated Music (WAV)", type="filepath")
406
-
407
- generate_music_button.click(
408
- fn=lambda music_suggestion, length: generate_music(music_suggestion, length),
409
- inputs=[music_suggestion_output, audio_length],
410
- outputs=[music_output],
411
- )
412
 
413
  # Tab 4: Audio Blending
414
  with gr.Tab("🎚️ Audio Blending"):
415
- gr.Markdown("Blend your voice-over and music track. Music will be looped/truncated to match the voice duration. Enable ducking to lower the music during voice segments.")
416
  ducking_checkbox = gr.Checkbox(label="Enable Ducking?", value=True)
417
- duck_level_slider = gr.Slider(
418
- label="Ducking Level (dB attenuation)",
419
- minimum=0,
420
- maximum=20,
421
- step=1,
422
- value=10
423
- )
424
  blend_button = gr.Button("Blend Voice + Music", variant="primary")
425
  blended_output = gr.Audio(label="Final Blended Output (WAV)", type="filepath")
426
-
427
- blend_button.click(
428
- fn=blend_audio,
429
- inputs=[voice_audio_output, music_output, ducking_checkbox, duck_level_slider],
430
- outputs=blended_output
431
- )
432
 
433
  # Tab 5: Agent – Full Workflow
434
  with gr.Tab("πŸ€– Agent"):
435
- gr.Markdown("Let the agent handle everything in one go: generate the script, synthesize voice, produce music, and blend the final ad.")
436
  with gr.Row():
437
- agent_prompt = gr.Textbox(
438
- label="Ad Promo Idea",
439
- placeholder="Enter your ad promo concept...",
440
- lines=2
441
- )
442
  with gr.Row():
443
- agent_llama_model_id = gr.Textbox(
444
- label="LLaMA Model ID",
445
- value="meta-llama/Meta-Llama-3-8B-Instruct",
446
- placeholder="Enter a valid Hugging Face model ID"
447
- )
448
- agent_duration = gr.Slider(
449
- label="Promo Duration (seconds)",
450
- minimum=15, maximum=60, step=15, value=30
451
- )
452
  with gr.Row():
453
- agent_tts_model = gr.Dropdown(
454
- label="TTS Model",
455
- choices=[
456
- "tts_models/en/ljspeech/tacotron2-DDC",
457
- "tts_models/en/ljspeech/vits",
458
- "tts_models/en/sam/tacotron-DDC",
459
- ],
460
- value="tts_models/en/ljspeech/tacotron2-DDC",
461
- multiselect=False
462
- )
463
- agent_music_length = gr.Slider(
464
- label="Music Length (tokens)",
465
- minimum=128, maximum=1024, step=64, value=512
466
- )
467
  with gr.Row():
468
  agent_ducking = gr.Checkbox(label="Enable Ducking?", value=True)
469
- agent_duck_level = gr.Slider(
470
- label="Ducking Level (dB attenuation)",
471
- minimum=0, maximum=20, step=1, value=10
472
- )
473
  agent_run_button = gr.Button("Run Agent", variant="primary")
474
- agent_script_output = gr.Textbox(label="Generated Voice-Over Script", lines=5, interactive=False)
475
  agent_sound_output = gr.Textbox(label="Sound Design Suggestions", lines=3, interactive=False)
476
  agent_music_suggestions_output = gr.Textbox(label="Music Suggestions", lines=3, interactive=False)
477
  agent_voice_audio = gr.Audio(label="Voice-Over (WAV)", type="filepath")
478
  agent_music_audio = gr.Audio(label="Generated Music (WAV)", type="filepath")
479
  agent_blended_audio = gr.Audio(label="Final Blended Output (WAV)", type="filepath")
 
 
 
480
 
481
- agent_run_button.click(
482
- fn=run_agent,
483
- inputs=[agent_prompt, agent_llama_model_id, agent_duration, agent_tts_model, agent_music_length, agent_ducking, agent_duck_level],
484
- outputs=[agent_script_output, agent_sound_output, agent_music_suggestions_output, agent_voice_audio, agent_music_audio, agent_blended_audio]
485
- )
486
-
487
- # Footer
488
  gr.Markdown("""
489
  <div class="footer">
490
  <hr>
 
50
  """
51
  if model_id in LLAMA_PIPELINES:
52
  return LLAMA_PIPELINES[model_id]
 
53
  tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token=token)
54
  model = AutoModelForCausalLM.from_pretrained(
55
  model_id,
 
69
  """
70
  if model_key in MUSICGEN_MODELS:
71
  return MUSICGEN_MODELS[model_key]
 
72
  model = MusicgenForConditionalGeneration.from_pretrained(model_key)
73
  processor = AutoProcessor.from_pretrained(model_key)
 
74
  device = "cuda" if torch.cuda.is_available() else "cpu"
75
  model.to(device)
76
  MUSICGEN_MODELS[model_key] = (model, processor)
 
82
  """
83
  if model_name in TTS_MODELS:
84
  return TTS_MODELS[model_name]
 
85
  tts_model = TTS(model_name)
86
  TTS_MODELS[model_name] = tts_model
87
  return tts_model
 
93
  def generate_script(user_prompt: str, model_id: str, token: str, duration: int):
94
  """
95
  Generates a script, sound design suggestions, and music ideas from a user prompt.
96
+ Returns a tuple: (voice_script, sound_design, music_suggestions).
97
  """
98
  try:
99
  text_pipeline = get_llama_pipeline(model_id, token)
 
118
  if "Output:" in generated_text:
119
  generated_text = generated_text.split("Output:")[-1].strip()
120
 
 
121
  voice_script = "No voice-over script found."
122
  sound_design = "No sound design suggestions found."
123
  music_suggestions = "No music suggestions found."
124
 
 
125
  if "Voice-Over Script:" in generated_text:
126
  parts = generated_text.split("Voice-Over Script:")
127
  voice_script_part = parts[1]
 
130
  else:
131
  voice_script = voice_script_part.strip()
132
 
 
133
  if "Sound Design Suggestions:" in generated_text:
134
  parts = generated_text.split("Sound Design Suggestions:")
135
  sound_design_part = parts[1]
 
138
  else:
139
  sound_design = sound_design_part.strip()
140
 
 
141
  if "Music Suggestions:" in generated_text:
142
  parts = generated_text.split("Music Suggestions:")
143
  music_suggestions = parts[1].strip()
 
153
  @spaces.GPU(duration=100)
154
  def generate_voice(script: str, tts_model_name: str = "tts_models/en/ljspeech/tacotron2-DDC"):
155
  """
156
+ Generates a voice-over from the provided script using Coqui TTS.
157
  Returns the file path to the generated .wav file.
158
  """
159
  try:
160
  if not script.strip():
161
  return "Error: No script provided."
 
162
  cleaned_script = clean_text(script)
163
  tts_model = get_tts_model(tts_model_name)
164
  output_path = os.path.join(tempfile.gettempdir(), "voice_over.wav")
165
  tts_model.tts_to_file(text=cleaned_script, file_path=output_path)
166
  return output_path
 
167
  except Exception as e:
168
  return f"Error generating voice: {e}"
169
 
 
184
  musicgen_model, musicgen_processor = get_musicgen_model(model_key)
185
 
186
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
187
  inputs = musicgen_processor(text=[prompt], padding=True, return_tensors="pt")
188
  inputs = {k: v.to(device) for k, v in inputs.items()}
189
 
 
200
  return f"Error generating music: {e}"
201
 
202
  # ---------------------------------------------------------------------
203
+ # Audio Blending Function
204
  # ---------------------------------------------------------------------
205
  @spaces.GPU(duration=100)
206
  def blend_audio(voice_path: str, music_path: str, ducking: bool, duck_level: int = 10):
207
  """
208
  Blends two audio files (voice and music).
 
 
 
209
  Returns the file path to the blended .wav file.
210
  """
211
  try:
 
214
 
215
  voice = AudioSegment.from_wav(voice_path)
216
  music = AudioSegment.from_wav(music_path)
 
217
  voice_len = len(voice)
218
  music_len = len(music)
219
 
 
226
  if len(music) > voice_len:
227
  music = music[:voice_len]
228
 
229
+ final_audio = music.overlay(voice, gain_during_overlay=-duck_level) if ducking else music.overlay(voice)
 
 
 
 
 
230
  output_path = os.path.join(tempfile.gettempdir(), "blended_output.wav")
231
  final_audio.export(output_path, format="wav")
232
  return output_path
 
241
  def run_agent(user_prompt: str, llama_model_id: str, duration: int, tts_model_name: str, music_length: int, ducking: bool, duck_level: int):
242
  """
243
  Runs the full workflow as an agent:
244
+ 1. Generates a script (voice-over, sound design, and music suggestions).
245
+ 2. Synthesizes a voice-over.
246
+ 3. Generates a music track.
247
+ 4. Blends the voice and music.
248
+ Returns all generated components.
249
  """
 
250
  voice_script, sound_design, music_suggestions = generate_script(user_prompt, llama_model_id, HF_TOKEN, duration)
 
251
  voice_file = generate_voice(voice_script, tts_model_name)
 
252
  music_file = generate_music(music_suggestions, music_length)
 
253
  blended_file = blend_audio(voice_file, music_file, ducking, duck_level)
254
  return voice_script, sound_design, music_suggestions, voice_file, music_file, blended_file
255
 
 
257
  # Gradio Interface with Enhanced UI
258
  # ---------------------------------------------------------------------
259
  with gr.Blocks(css="""
 
260
  body {
261
  background: linear-gradient(135deg, #1d1f21, #3a3d41);
262
  color: #f0f0f0;
 
303
 
304
  gr.Markdown("""
305
  Welcome to **AI Promo Studio**! This platform leverages state-of-the-art AI models to help you generate:
306
+ - A compelling voice-over script (with sound design and music suggestions),
307
+ - A natural-sounding voice-over,
308
+ - Custom music tracks,
309
+ - And a fully blended audio promo.
 
310
  """)
311
 
312
  with gr.Tabs():
313
  # Tab 1: Script Generation
314
  with gr.Tab("πŸ“ Script Generation"):
315
  with gr.Row():
316
+ user_prompt = gr.Textbox(label="Promo Idea", placeholder="E.g., A 30-second promo for a morning show...", lines=2)
 
 
 
 
317
  with gr.Row():
318
+ llama_model_id = gr.Textbox(label="LLaMA Model ID", value="meta-llama/Meta-Llama-3-8B-Instruct", placeholder="Enter a valid Hugging Face model ID")
319
+ duration = gr.Slider(label="Promo Duration (seconds)", minimum=15, maximum=60, step=15, value=30)
 
 
 
 
 
 
 
 
 
 
320
  generate_script_button = gr.Button("Generate Script", variant="primary")
321
+ script_output = gr.Textbox(label="Voice-Over Script", lines=5, interactive=False)
322
  sound_design_output = gr.Textbox(label="Sound Design Suggestions", lines=3, interactive=False)
323
  music_suggestion_output = gr.Textbox(label="Music Suggestions", lines=3, interactive=False)
324
+ generate_script_button.click(fn=lambda prompt, model, dur: generate_script(prompt, model, HF_TOKEN, dur),
325
+ inputs=[user_prompt, llama_model_id, duration],
326
+ outputs=[script_output, sound_design_output, music_suggestion_output])
 
 
 
327
 
328
  # Tab 2: Voice Synthesis
329
  with gr.Tab("🎀 Voice Synthesis"):
330
  gr.Markdown("Generate a natural-sounding voice-over using Coqui TTS.")
331
+ selected_tts_model = gr.Dropdown(label="TTS Model",
332
+ choices=["tts_models/en/ljspeech/tacotron2-DDC", "tts_models/en/ljspeech/vits", "tts_models/en/sam/tacotron-DDC"],
333
+ value="tts_models/en/ljspeech/tacotron2-DDC", multiselect=False)
 
 
 
 
 
 
 
334
  generate_voice_button = gr.Button("Generate Voice-Over", variant="primary")
335
  voice_audio_output = gr.Audio(label="Voice-Over (WAV)", type="filepath")
336
+ generate_voice_button.click(fn=lambda script, tts: generate_voice(script, tts),
337
+ inputs=[script_output, selected_tts_model],
338
+ outputs=voice_audio_output)
 
 
 
339
 
340
  # Tab 3: Music Production
341
  with gr.Tab("🎢 Music Production"):
342
+ gr.Markdown("Generate a custom music track using the MusicGen Large model.")
343
+ audio_length = gr.Slider(label="Music Length (tokens)", minimum=128, maximum=1024, step=64, value=512, info="Increase tokens for longer audio (inference time may vary).")
 
 
 
 
 
 
 
344
  generate_music_button = gr.Button("Generate Music", variant="primary")
345
  music_output = gr.Audio(label="Generated Music (WAV)", type="filepath")
346
+ generate_music_button.click(fn=lambda sugg, length: generate_music(sugg, length),
347
+ inputs=[music_suggestion_output, audio_length],
348
+ outputs=[music_output])
 
 
 
349
 
350
  # Tab 4: Audio Blending
351
  with gr.Tab("🎚️ Audio Blending"):
352
+ gr.Markdown("Blend your voice-over and music track. Enable ducking to lower the music during voice segments.")
353
  ducking_checkbox = gr.Checkbox(label="Enable Ducking?", value=True)
354
+ duck_level_slider = gr.Slider(label="Ducking Level (dB attenuation)", minimum=0, maximum=20, step=1, value=10)
 
 
 
 
 
 
355
  blend_button = gr.Button("Blend Voice + Music", variant="primary")
356
  blended_output = gr.Audio(label="Final Blended Output (WAV)", type="filepath")
357
+ blend_button.click(fn=blend_audio,
358
+ inputs=[voice_audio_output, music_output, ducking_checkbox, duck_level_slider],
359
+ outputs=blended_output)
 
 
 
360
 
361
  # Tab 5: Agent – Full Workflow
362
  with gr.Tab("πŸ€– Agent"):
363
+ gr.Markdown("Let the agent handle everything in one go: generate script, synthesize voice, produce music, and blend the final ad.")
364
  with gr.Row():
365
+ agent_prompt = gr.Textbox(label="Ad Promo Idea", placeholder="Enter your ad promo concept...", lines=2)
 
 
 
 
366
  with gr.Row():
367
+ agent_llama_model_id = gr.Textbox(label="LLaMA Model ID", value="meta-llama/Meta-Llama-3-8B-Instruct", placeholder="Enter a valid Hugging Face model ID")
368
+ agent_duration = gr.Slider(label="Promo Duration (seconds)", minimum=15, maximum=60, step=15, value=30)
 
 
 
 
 
 
 
369
  with gr.Row():
370
+ agent_tts_model = gr.Dropdown(label="TTS Model",
371
+ choices=["tts_models/en/ljspeech/tacotron2-DDC", "tts_models/en/ljspeech/vits", "tts_models/en/sam/tacotron-DDC"],
372
+ value="tts_models/en/ljspeech/tacotron2-DDC", multiselect=False)
373
+ agent_music_length = gr.Slider(label="Music Length (tokens)", minimum=128, maximum=1024, step=64, value=512)
 
 
 
 
 
 
 
 
 
 
374
  with gr.Row():
375
  agent_ducking = gr.Checkbox(label="Enable Ducking?", value=True)
376
+ agent_duck_level = gr.Slider(label="Ducking Level (dB attenuation)", minimum=0, maximum=20, step=1, value=10)
 
 
 
377
  agent_run_button = gr.Button("Run Agent", variant="primary")
378
+ agent_script_output = gr.Textbox(label="Voice-Over Script", lines=5, interactive=False)
379
  agent_sound_output = gr.Textbox(label="Sound Design Suggestions", lines=3, interactive=False)
380
  agent_music_suggestions_output = gr.Textbox(label="Music Suggestions", lines=3, interactive=False)
381
  agent_voice_audio = gr.Audio(label="Voice-Over (WAV)", type="filepath")
382
  agent_music_audio = gr.Audio(label="Generated Music (WAV)", type="filepath")
383
  agent_blended_audio = gr.Audio(label="Final Blended Output (WAV)", type="filepath")
384
+ agent_run_button.click(fn=run_agent,
385
+ inputs=[agent_prompt, agent_llama_model_id, agent_duration, agent_tts_model, agent_music_length, agent_ducking, agent_duck_level],
386
+ outputs=[agent_script_output, agent_sound_output, agent_music_suggestions_output, agent_voice_audio, agent_music_audio, agent_blended_audio])
387
 
 
 
 
 
 
 
 
388
  gr.Markdown("""
389
  <div class="footer">
390
  <hr>