# import torch # from transformers import AutoModelForSeq2SeqLM, AutoTokenizer # import gradio as gr # # Load tokenizer and model # checkpoint = "Ajay12345678980/Email_subject" # tokenizer = AutoTokenizer.from_pretrained(checkpoint, use_fast=True) # tokenizer.pad_token = tokenizer.eos_token # model = AutoModelForSeq2SeqLM.from_pretrained(checkpoint) # # Check if GPU is available # device = "cuda" if torch.cuda.is_available() else "cpu" # model.to(device) # Move model to the appropriate device # # Function to generate response # def generate_response(prompt): # input_ids = tokenizer.encode(prompt, return_tensors="pt").to(device) # attention_mask = torch.ones_like(input_ids).to(device) # output = model.generate( # input_ids, # max_new_tokens=100, # num_return_sequences=1, # attention_mask=attention_mask, # pad_token_id=tokenizer.eos_token_id # ) # return tokenizer.decode(output[0], skip_special_tokens=True) # # Function to extract subject from the generated response # def extract_subject(subject_text): # start = subject_text.find('@subject') + len('@subject') # return subject_text[start:].strip() # # Function to get the subject from the email body prompt # def get_subject(prompt): # subject_text = generate_response(prompt) # extracted_text = extract_subject(subject_text) # return extracted_text # # Create Gradio interface # iface = gr.Interface( # fn=get_subject, # inputs=gr.Textbox(label="Email Body"), # outputs='text', # title="Email Subject Generator", # description="Enter an email body to generate a subject line.", # live=False # Generate on button click # ) # # Launch Gradio interface # iface.launch(server_port=3000) # You can set any available port import streamlit as st import torch from transformers import AutoModelForSeq2SeqLM, AutoTokenizer # Load tokenizer and model checkpoint = "Ajay12345678980/Email_subject" tokenizer = AutoTokenizer.from_pretrained(checkpoint, use_fast=True) tokenizer.pad_token = tokenizer.eos_token model = AutoModelForSeq2SeqLM.from_pretrained(checkpoint) # Check if GPU is available device = "cuda" if torch.cuda.is_available() else "cpu" model.to(device) # Function to generate response def generate_response(prompt): input_ids = tokenizer.encode("summarize: " + prompt, return_tensors="pt", max_length=512, truncation=True).to(device) attention_mask = torch.ones_like(input_ids).to(device) output = model.generate( input_ids, max_length=30, num_beams=4, early_stopping=True, attention_mask=attention_mask, pad_token_id=tokenizer.eos_token_id ) return tokenizer.decode(output[0], skip_special_tokens=True) # Streamlit app layout st.title("Email Subject Generator") st.write("Enter the email body below to generate a subject line.") # User input email_body = st.text_area("Email Body", height=200) # Clear previous output if "generated_subject" in st.session_state: del st.session_state["generated_subject"] if st.button("Generate Subject"): if email_body: # Generate the subject using the model generated_subject = generate_response(email_body) st.session_state["generated_subject"] = generated_subject st.success("Subject generated!") else: st.warning("Please enter an email body.") # Display the generated subject if "generated_subject" in st.session_state: st.write("Generated Subject:", st.session_state["generated_subject"])