import os import subprocess def install_packages(): packages = [ "torch", "transformers", "huggingface-hub", "gradio", "accelerate", "onnxruntime", "onnxruntime-tools", "optimum", "sentencepiece" # Added sentencepiece to the packages list ] for package in packages: result = subprocess.run(f'pip install {package}', shell=True) if result.returncode != 0: print(f"Failed to install {package}") else: print(f"Successfully installed {package}") install_packages() import gradio as gr from huggingface_hub import login from optimum.onnxruntime import ORTModelForSeq2SeqLM from transformers import AutoTokenizer, pipeline, MT5ForConditionalGeneration model_id = "hassamniaz7/GermanMuller3" hf_token = os.environ.get("NLP") if hf_token: login(hf_token) else: print("Token not found.") # Initialize the tokenizer and model tokenizer = AutoTokenizer.from_pretrained(model_id, token=hf_token) model = MT5ForConditionalGeneration.from_pretrained(model_id, token=hf_token) # Create the text generation pipeline generator = pipeline("text2text-generation", model=model, tokenizer=tokenizer) # Define the text generation function def generate(text): outputs = generator(text, max_length=500, num_beams=10, num_return_sequences=3) output_text_1 = outputs[0]['generated_text'] output_text_2 = outputs[1]['generated_text'] output_text_3 = outputs[2]['generated_text'] return output_text_1, output_text_2, output_text_3 # Set up Gradio interface gr.Interface( fn=generate, title="GermanMuller AB", description="Enter text to generate output", inputs=gr.Textbox( label="Input Text", placeholder="Type something here..." ), outputs=[ gr.Textbox(label="Generated Output 1"), gr.Textbox(label="Generated Output 2"), gr.Textbox(label="Generated Output 3") ], examples=[ ["Wir werden morgen nach Berlin fahsren seiin."], ["Ich gehe heute Kino."] ] ).launch()