Spaces:
Sleeping
Sleeping
import gradio as gr | |
from transformers import AutoTokenizer, AutoModelForCausalLM | |
# Load pre-trained model (or fine-tuned model) | |
model_name = "Manasa1/GPT_Finetuned_tweets" # Replace with the fine-tuned model name | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModelForCausalLM.from_pretrained(model_name) | |
# Function to generate tweets | |
def generate_tweet(input_text): | |
inputs = tokenizer(input_text, return_tensors="pt", max_length=512, truncation=True, padding=True) | |
outputs = model.generate( | |
inputs['input_ids'], | |
attention_mask=inputs['attention_mask'], | |
max_length=150, # Limit to 150 tokens for brevity | |
num_return_sequences=1, | |
top_p=0.9, # Narrow focus to ensure more concise results | |
top_k=40, # Focus on high-probability words | |
do_sample=True, | |
pad_token_id=tokenizer.pad_token_id | |
) | |
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
# Extract the tweet text (exclude prompt if included) | |
return generated_text.strip() | |
# Gradio interface | |
def main(): | |
with gr.Blocks() as interface: | |
gr.Markdown(""" | |
# Tweet Generator | |
Enter a topic or idea, and the AI will craft a concise, engaging, and impactful tweet inspired by innovative thought leadership. | |
""") | |
with gr.Row(): | |
input_text = gr.Textbox(label="Enter your idea or topic:") | |
output_tweet = gr.Textbox(label="Generated Tweet:", interactive=False) | |
generate_button = gr.Button("Generate Tweet") | |
generate_button.click(generate_tweet, inputs=[input_text], outputs=[output_tweet]) | |
return interface | |
# Run Gradio app | |
if __name__ == "__main__": | |
app = main() | |
app.launch(share=True) | |