import gradio as gr from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM import PyPDF2 # Load Models model1_name = "t5-small" # Model 1 model2_name = "codeparrot/codeparrot-small" # Model 2 model3_name = "Salesforce/blip-image-captioning" # Model 3 # Load Pipelines model1 = pipeline("text2text-generation", model=model1_name, tokenizer=model1_name) model2 = pipeline("text-generation", model=model2_name, tokenizer=model2_name) model3 = pipeline("image-to-text", model=model3_name) # We'll adapt this for PDF processing # Helper: Extract text from PDF def extract_text_from_pdf(pdf_file): pdf_reader = PyPDF2.PdfReader(pdf_file) text = "" for page in pdf_reader.pages: text += page.extract_text() return text # Function for Model 1 def model1_func(input_text): try: result = model1(input_text, max_length=50, num_return_sequences=1) answer = result[0]["generated_text"] return f"Model 1 Output: {answer}" except Exception as e: return f"Error: {str(e)}" # Function for Model 2 def model2_func(input_text): try: result = model2(input_text, max_length=50, num_return_sequences=1) answer = result[0]["generated_text"] return f"Model 2 Output: {answer}" except Exception as e: return f"Error: {str(e)}" # Function for Model 3 def model3_func(pdf_file): try: extracted_text = extract_text_from_pdf(pdf_file) if not extracted_text.strip(): return "No text found in the PDF. Please upload a valid file." result = model3(extracted_text) answer = result[0]["generated_text"] return f"Model 3 Output: {answer}" except Exception as e: return f"Error: {str(e)}" # Gradio Interface with gr.Blocks() as demo: gr.Markdown("