|
import streamlit as st |
|
from transformers import pipeline |
|
import openai_whisper as whisper |
|
import tempfile |
|
import os |
|
|
|
def transcribe_audio(audio_path): |
|
model = whisper.load_model("base") |
|
result = model.transcribe(audio_path) |
|
return result["text"] |
|
|
|
def summarize_text(text): |
|
summarizer = pipeline("summarization", model="facebook/bart-large-cnn") |
|
summary = summarizer(text, max_length=150, min_length=50, do_sample=False) |
|
return summary[0]['summary_text'] |
|
|
|
def extract_action_items(text): |
|
task_generator = pipeline("text-generation", model="databricks/dolly-v2-3b") |
|
prompt = f"Extract action items from the following meeting notes:\n{text}\nAction Items:" |
|
tasks = task_generator(prompt, max_length=100, do_sample=True) |
|
return tasks[0]['generated_text'] |
|
|
|
st.title("Smart AI Meeting Assistant") |
|
|
|
uploaded_file = st.file_uploader("Upload an audio file", type=["mp3", "wav", "m4a"]) |
|
|
|
if uploaded_file is not None: |
|
with tempfile.NamedTemporaryFile(delete=False, suffix='.mp3') as temp_audio: |
|
temp_audio.write(uploaded_file.read()) |
|
temp_audio_path = temp_audio.name |
|
|
|
st.text("Transcribing...") |
|
transcript = transcribe_audio(temp_audio_path) |
|
st.text_area("Meeting Transcript:", transcript, height=200) |
|
os.remove(temp_audio_path) |
|
|
|
if st.button("Summarize Meeting"): |
|
summary = summarize_text(transcript) |
|
st.text_area("Meeting Summary:", summary, height=150) |
|
|
|
if st.button("Generate Action Items"): |
|
action_items = extract_action_items(transcript) |
|
st.text_area("Action Items:", action_items, height=150) |
|
|