Spaces:
Sleeping
Sleeping
File size: 1,757 Bytes
26532db a78a40d da2cb8a a78a40d 27ce0a1 d62ca5c 27ce0a1 d62ca5c 27ce0a1 d62ca5c a78a40d 27ce0a1 a78a40d 27ce0a1 a78a40d 27ce0a1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 |
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
# Load pre-trained model (or fine-tuned model)
model_name = "Manasa1/GPT_Finetuned_tweets" # Replace with the fine-tuned model name
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
# Function to generate tweets
def generate_tweet(input_text):
inputs = tokenizer(input_text, return_tensors="pt", max_length=512, truncation=True, padding=True)
outputs = model.generate(
inputs['input_ids'],
attention_mask=inputs['attention_mask'],
max_length=150, # Limit to 150 tokens for brevity
num_return_sequences=1,
top_p=0.9, # Narrow focus to ensure more concise results
top_k=40, # Focus on high-probability words
do_sample=True,
pad_token_id=tokenizer.pad_token_id
)
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
# Extract the tweet text (exclude prompt if included)
return generated_text.strip()
# Gradio interface
def main():
with gr.Blocks() as interface:
gr.Markdown("""
# Tweet Generator
Enter a topic or idea, and the AI will craft a concise, engaging, and impactful tweet inspired by innovative thought leadership.
""")
with gr.Row():
input_text = gr.Textbox(label="Enter your idea or topic:")
output_tweet = gr.Textbox(label="Generated Tweet:", interactive=False)
generate_button = gr.Button("Generate Tweet")
generate_button.click(generate_tweet, inputs=[input_text], outputs=[output_tweet])
return interface
# Run Gradio app
if __name__ == "__main__":
app = main()
app.launch(share=True)
|