import gradio as gr import re import spacy from transformers import pipeline # Load spaCy's English model nlp = spacy.load("en_core_web_sm") def preprocess_text(text): doc = nlp(text.lower()) # Tokenize and lowercase the text tokens = [token.text for token in doc if not token.is_punct] # Remove punctuation return tokens # Load the multilingual model for question answering qa_model = pipeline("question-answering", model="deepset/xlm-roberta-large-squad2") # Function to generate the answer based on question and uploaded context def answer_question(question, context): try: preprocessed_context = preprocess_text(context) result = qa_model(question=question, context=" ".join(preprocessed_context)) return result['answer'] except Exception as e: return f"Error: {str(e)}" # Gradio interface def qa_app(text_file, question): try: with open(text_file.name, 'r') as file: context = file.read() return answer_question(question, context) except Exception as e: return f"Error reading file: {str(e)}" # Create Gradio interface with updated syntax iface = gr.Interface( fn=qa_app, # The function that processes input inputs=[gr.File(label="Upload your text file"), gr.Textbox(label="Enter your question")], outputs="text", title="Multilingual Question Answering", description="Upload a text file and ask a question based on its content." ) # Launch the Gradio app iface.launch()