Manasa1 commited on
Commit
27ce0a1
·
verified ·
1 Parent(s): da2cb8a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -12
app.py CHANGED
@@ -8,33 +8,28 @@ model = AutoModelForCausalLM.from_pretrained(model_name)
8
 
9
  # Function to generate tweets
10
  def generate_tweet(input_text):
11
- prompt = ("You are a tech-savvy, forward-thinking individual with a deep understanding of technology, innovation, and cultural trends. "
12
- "Craft a tweet that reflects insightful commentary, wit, or actionable advice based on the following idea: \"{}\". "
13
- "Ensure the response is concise, engaging, and suitable for a diverse audience on social media. "
14
- "Incorporate elements of thought leadership, futuristic perspectives, and practical wisdom where appropriate.").format(input_text)
15
-
16
- inputs = tokenizer(prompt, return_tensors="pt", max_length=512, truncation=True, padding=True)
17
  outputs = model.generate(
18
  inputs['input_ids'],
19
  attention_mask=inputs['attention_mask'],
20
- max_length=280,
21
  num_return_sequences=1,
22
- top_p=0.95,
23
- top_k=50,
24
  do_sample=True,
25
  pad_token_id=tokenizer.pad_token_id
26
  )
27
  generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
28
 
29
  # Extract the tweet text (exclude prompt if included)
30
- return generated_text.replace(prompt, "").strip()
31
 
32
  # Gradio interface
33
  def main():
34
  with gr.Blocks() as interface:
35
  gr.Markdown("""
36
  # Tweet Generator
37
- Enter a topic or idea, and the AI will craft a tweet inspired by innovative, philosophical, and tech-savvy thought leadership.
38
  """)
39
 
40
  with gr.Row():
@@ -50,4 +45,4 @@ def main():
50
  # Run Gradio app
51
  if __name__ == "__main__":
52
  app = main()
53
- app.launch(share=True)
 
8
 
9
  # Function to generate tweets
10
  def generate_tweet(input_text):
11
+ inputs = tokenizer(input_text, return_tensors="pt", max_length=512, truncation=True, padding=True)
 
 
 
 
 
12
  outputs = model.generate(
13
  inputs['input_ids'],
14
  attention_mask=inputs['attention_mask'],
15
+ max_length=150, # Limit to 150 tokens for brevity
16
  num_return_sequences=1,
17
+ top_p=0.9, # Narrow focus to ensure more concise results
18
+ top_k=40, # Focus on high-probability words
19
  do_sample=True,
20
  pad_token_id=tokenizer.pad_token_id
21
  )
22
  generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
23
 
24
  # Extract the tweet text (exclude prompt if included)
25
+ return generated_text.strip()
26
 
27
  # Gradio interface
28
  def main():
29
  with gr.Blocks() as interface:
30
  gr.Markdown("""
31
  # Tweet Generator
32
+ Enter a topic or idea, and the AI will craft a concise, engaging, and impactful tweet inspired by innovative thought leadership.
33
  """)
34
 
35
  with gr.Row():
 
45
  # Run Gradio app
46
  if __name__ == "__main__":
47
  app = main()
48
+ app.launch(share=True)