|
from transformers import MBartForConditionalGeneration, MBart50Tokenizer, AutoModelForCausalLM, AutoTokenizer, pipeline |
|
import gradio as gr |
|
import requests |
|
import io |
|
from PIL import Image |
|
import os |
|
|
|
|
|
model_name = "facebook/mbart-large-50-many-to-one-mmt" |
|
tokenizer = MBart50Tokenizer.from_pretrained(model_name) |
|
model = MBartForConditionalGeneration.from_pretrained(model_name) |
|
|
|
|
|
hf_api_key = os.getenv("full_token") |
|
if hf_api_key is None: |
|
raise ValueError("Hugging Face API key not found! Please set 'full_token' environment variable.") |
|
else: |
|
headers = {"Authorization": f"Bearer {hf_api_key}"} |
|
|
|
|
|
API_URL = "https://api-inference.huggingface.co/models/CompVis/stable-diffusion-v1-4" |
|
|
|
|
|
text_generation_model_name = "EleutherAI/gpt-neo-1.3B" |
|
text_tokenizer = AutoTokenizer.from_pretrained(text_generation_model_name) |
|
text_model = AutoModelForCausalLM.from_pretrained(text_generation_model_name) |
|
|
|
|
|
text_generator = pipeline("text-generation", model=text_model, tokenizer=text_tokenizer) |
|
|
|
|
|
def generate_image_from_text(translated_text): |
|
try: |
|
print(f"Generating image from translated text: {translated_text}") |
|
response = requests.post(API_URL, headers=headers, json={"inputs": translated_text}) |
|
|
|
|
|
if response.status_code != 200: |
|
print(f"Error generating image: {response.text}") |
|
return None, f"Error generating image: {response.text}" |
|
|
|
|
|
image_bytes = response.content |
|
image = Image.open(io.BytesIO(image_bytes)) |
|
print("Image generation completed.") |
|
return image, None |
|
except Exception as e: |
|
print(f"Error during image generation: {e}") |
|
return None, f"Error during image generation: {e}" |
|
|
|
|
|
def generate_short_paragraph_from_text(translated_text): |
|
try: |
|
print(f"Generating a short paragraph from translated text: {translated_text}") |
|
paragraph = text_generator( |
|
translated_text, |
|
max_length=80, |
|
num_return_sequences=1, |
|
temperature=0.6, |
|
top_p=0.8, |
|
truncation=True |
|
)[0]['generated_text'] |
|
print(f"Paragraph generation completed: {paragraph}") |
|
return paragraph |
|
except Exception as e: |
|
print(f"Error during paragraph generation: {e}") |
|
return f"Error during paragraph generation: {e}" |
|
|
|
|
|
def translate_generate_paragraph_and_image(tamil_text): |
|
|
|
try: |
|
print("Translating Tamil text to English...") |
|
tokenizer.src_lang = "ta_IN" |
|
inputs = tokenizer(tamil_text, return_tensors="pt") |
|
translated_tokens = model.generate(**inputs, forced_bos_token_id=tokenizer.lang_code_to_id["en_XX"]) |
|
translated_text = tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)[0] |
|
print(f"Translation completed: {translated_text}") |
|
except Exception as e: |
|
return f"Error during translation: {e}", "", None, None |
|
|
|
|
|
paragraph = generate_short_paragraph_from_text(translated_text) |
|
if "Error" in paragraph: |
|
return translated_text, paragraph, None, None |
|
|
|
|
|
image, error_message = generate_image_from_text(translated_text) |
|
if error_message: |
|
return translated_text, paragraph, None, error_message |
|
|
|
return translated_text, paragraph, image, None |
|
|
|
|
|
iface = gr.Interface( |
|
fn=translate_generate_paragraph_and_image, |
|
inputs=gr.Textbox(lines=2, placeholder="Enter Tamil text here..."), |
|
outputs=[gr.Textbox(label="Translated English Text"), |
|
gr.Textbox(label="Generated Short Paragraph"), |
|
gr.Image(label="Generated Image")], |
|
title="Tamil to English Translation, Short Paragraph Generation, and Image Creation", |
|
description="Translate Tamil text to English, generate a short paragraph, and create an image using the translated text.", |
|
|
|
) |
|
|
|
|
|
iface.launch(share=True) |
|
|