File size: 1,649 Bytes
d48624f
 
22c2aab
cc9e28a
d48624f
 
cc9e28a
 
 
 
 
 
d48624f
cc9e28a
 
0125dc0
cc9e28a
 
 
 
 
0125dc0
cc9e28a
d48624f
cc9e28a
d48624f
cc9e28a
 
 
 
d48624f
cc9e28a
 
 
 
d48624f
cc9e28a
 
 
d48624f
cc9e28a
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
import streamlit as st
from transformers import pipeline
import openai_whisper as whisper
import tempfile
import os

def transcribe_audio(audio_path):
    model = whisper.load_model("base")  # Open-source Whisper model
    result = model.transcribe(audio_path)
    return result["text"]

def summarize_text(text):
    summarizer = pipeline("summarization", model="facebook/bart-large-cnn")
    summary = summarizer(text, max_length=150, min_length=50, do_sample=False)
    return summary[0]['summary_text']

def extract_action_items(text):
    task_generator = pipeline("text-generation", model="databricks/dolly-v2-3b")
    prompt = f"Extract action items from the following meeting notes:\n{text}\nAction Items:"
    tasks = task_generator(prompt, max_length=100, do_sample=True)
    return tasks[0]['generated_text']

st.title("Smart AI Meeting Assistant")

uploaded_file = st.file_uploader("Upload an audio file", type=["mp3", "wav", "m4a"]) 

if uploaded_file is not None:
    with tempfile.NamedTemporaryFile(delete=False, suffix='.mp3') as temp_audio:
        temp_audio.write(uploaded_file.read())
        temp_audio_path = temp_audio.name
    
    st.text("Transcribing...")
    transcript = transcribe_audio(temp_audio_path)
    st.text_area("Meeting Transcript:", transcript, height=200)
    os.remove(temp_audio_path)
    
    if st.button("Summarize Meeting"):
        summary = summarize_text(transcript)
        st.text_area("Meeting Summary:", summary, height=150)
    
    if st.button("Generate Action Items"):
        action_items = extract_action_items(transcript)
        st.text_area("Action Items:", action_items, height=150)