import streamlit as st import openai import os from pydub import AudioSegment import math from transformers import pipeline from dotenv import load_dotenv load_dotenv() # Initialize messages outside of the function to persist state between Streamlit sessions messages = [ {"role": "system", "content": "You are a call center quality and assurance auditor. Your job is to review the call recording, and provide a very brief summary of the key information in the call including Operator’s Name, Call Category, Issue, and Solution. Also, you need to conduct sentiment analysis on the call and evaluate the customer's satisfaction rate from 1 to 10 and provide a very short straight-to-the-point area of improvement to the operator."}, ] def transcribe(audio): # summarizer = pipeline("summarization", model="philschmid/bart-large-cnn-samsum") global messages segment_length = 60000 # Open the audio file audio_file = AudioSegment.from_file(audio) # Get the duration of the audio file in milliseconds duration_ms = len(audio_file) # Calculate the number of segments needed num_segments = math.ceil(duration_ms / segment_length) # Create an empty string to hold the concatenated text all_text = "" # Split the audio file into segments for i in range(num_segments): start = i * segment_length end = min((i + 1) * segment_length, duration_ms) segment = audio_file[start:end] segment.export(f"segment_{i}.mp3", format="mp3") for i in range(num_segments): audio_file = open(f"segment_{i}.mp3", "rb") transcript = openai.Audio.transcribe("whisper-1", audio_file) all_text += transcript["text"] summarizer = pipeline("summarization", model="slauw87/bart_summarisation") st.write(all_text) st.write('Summarizing...') st.write('---------------------------------') summary = summarizer(all_text) st.write(summary) messages.append({"role": "user", "content": all_text}) response = openai.ChatCompletion.create( model="gpt-3.5-turbo", messages=messages ) systems_message = response["choices"][0]["message"]["content"] messages.append({"role": "assistant", "content": systems_message}) chat_transcript = "" for message in messages: if message['role'] != 'system': chat_transcript += message['role'] + ": " + message['content'] + "\n\n" st.write(systems_message) # Streamlit app layout st.title("AI Auditor for Call Center's Quality Assurance") st.markdown("AI Alliance for Audio Analytics Team. Our project's objective is to conduct quality assurance on recorded calls, by transcribing the speech in the call to text using Whisper and then employing GPT-3 for sentiment analysis, summarization, and feedback including areas for improvement.") # Streamlit file uploader uploaded_file = st.file_uploader("Upload an audio file", type=["mp3", "wav"]) # Check if a file is uploaded if uploaded_file: # Display the transcribe function result transcribe(uploaded_file)