import gradio as gr from transformers import AutoModelForCausalLM, AutoTokenizer import torch # Load the Bloom model and tokenizer model_name = "bigscience/bloom-560m" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name) # Function to generate a response using Bloom model def chat_with_bloom(user_input): inputs = tokenizer(user_input, return_tensors="pt") outputs = model.generate(inputs.input_ids, max_length=100, pad_token_id=tokenizer.eos_token_id) response = tokenizer.decode(outputs[0], skip_special_tokens=True) return response # Function to create and download sticky note def create_sticky_note(note): file_path = "sticky_note.txt" with open(file_path, "w") as f: f.write(note) return file_path # Gradio interface with gr.Blocks() as demo: gr.Markdown("# Feelings Sharing Application") # Conversation UI with gr.Row(): user_input = gr.Textbox(placeholder="Share your feelings...", label="Your Message", lines=4) bot_response = gr.Textbox(label="AI Response", lines=4) submit_button = gr.Button("Talk") # Sticky note UI gr.Markdown("## Sticky Note") with gr.Row(): sticky_note = gr.Textbox(placeholder="Write your sticky note here...", label="Sticky Note", lines=4) download_button = gr.File(label="Download Sticky Note") save_button = gr.Button("Save Sticky Note") # Actions submit_button.click(chat_with_bloom, inputs=user_input, outputs=bot_response) save_button.click(create_sticky_note, inputs=sticky_note, outputs=download_button) demo.launch()