Spaces:
Sleeping
Sleeping
import gradio as gr | |
import whisper | |
from langchain_openai import ChatOpenAI | |
from langchain_openai import AzureChatOpenAI | |
from utils import RefineDataSummarizer | |
from utils import ( | |
prompt_template, | |
refine_template, | |
prompt_template_bullet_point, | |
refine_prompt_template_bullet_point | |
) | |
import os | |
def get_prompt_examples(): | |
examples=[ | |
["Regular Template: ", prompt_template, refine_template], | |
["Bullet Point Template: ", prompt_template_bullet_point, refine_prompt_template_bullet_point], | |
["Empty Template: ", '{text}', '{text}'], | |
] | |
return examples | |
def convert_to_time_format(seconds_float): | |
# Split the input into whole seconds and fractional part (milliseconds) | |
seconds, milliseconds = divmod(seconds_float, 1) | |
milliseconds = round(milliseconds * 1000) # Convert fractional part to milliseconds | |
# Convert the whole seconds into hours, minutes, and remaining seconds | |
minutes, seconds = divmod(int(seconds), 60) | |
hours, minutes = divmod(minutes, 60) | |
# Format the time components into HH:MM:SS:OO | |
time_format = f"{hours:02d}:{minutes:02d}:{seconds:02d},{milliseconds:03d}" | |
return time_format | |
def time_stamped_text(transcript_result): | |
text = '' | |
for idx, segment in enumerate(transcript_result['segments']): | |
start_stamp = segment["start"] | |
end_stamp = segment["end"] | |
sentence = segment["text"].strip() | |
text += f"{idx + 1}\n" | |
text += f"{convert_to_time_format(start_stamp)} --> {convert_to_time_format(end_stamp)}\n{sentence}\n\n" | |
return text.strip() | |
def transcript(file_dir, model_type, time_stamp): | |
model_dir = os.path.join('models', model_type) | |
# model_dir = "E:\\Whisper\\" + model_type | |
model = whisper.load_model(model_dir) | |
result = model.transcribe(file_dir, language='English', task='transcribe') | |
if time_stamp: | |
text = time_stamped_text(result) | |
else: | |
lines = [s['text'] for s in result['segments']] | |
text = '' | |
for line in lines: | |
text += f"{line}\n" | |
text = text.strip() | |
with open("Transcript.txt", 'w') as file: | |
file.write(text) | |
return [text, "Transcript.txt"] | |
def upload_file(file_paths): | |
return file_paths | |
def summary(text, chunk_num, chunk_overlap, llm_type, prompt, refine_prompt): | |
#if user_api == "Not Provided": | |
# api_key = os.getenv("openai_api") | |
#deployment_name = llm_type | |
#else: | |
# api_key = user_api | |
#api_key = api_key.strip() | |
# llm = ChatOpenAI(temperature=1, openai_api_key=api_key, model_name=llm_type) | |
os.environ["AZURE_OPENAI_API_KEY"] = os.getenv("azure_api") | |
os.environ["AZURE_OPENAI_ENDPOINT"] = os.getenv("azure_endpoint") | |
openai_api_version=os.getenv("azure_api_version") | |
deployment_name = llm_type | |
llm = AzureChatOpenAI( | |
openai_api_version=openai_api_version, | |
azure_deployment=deployment_name | |
) | |
rds = RefineDataSummarizer(llm=llm, prompt_template=prompt, refine_template=refine_prompt) | |
result = rds.get_summarization(text, chunk_num=chunk_num, chunk_overlap=chunk_overlap) | |
text = result["output_text"] | |
with open("Summary.txt", 'w') as file: | |
file.write(text) | |
return [text, "Summary.txt"] | |
with gr.Blocks() as demo: | |
with gr.Row(equal_height=False): | |
with gr.Column(): | |
file_output = gr.File() | |
upload_button = gr.UploadButton("Click to Upload a File", file_types=["audio", "video"], file_count="single") | |
upload_button.upload(upload_file, upload_button, file_output) | |
model_type = gr.Dropdown( | |
[ | |
"tiny.en.pt", | |
"tiny.pt", | |
"small.en.pt", | |
"small.pt", | |
"base.en.pt", | |
"base.pt", | |
"medium.en.pt", | |
"medium.pt", | |
"large-v1.pt", | |
"large-v2.pt",], label="Model Type", value="medium.pt") | |
time_stamp = gr.Checkbox(label="SRT Format", info="SRT format with timestamps") | |
TranscriptButton = gr.Button("Transcript", variant="primary") | |
transcript_text = gr.Textbox(placeholder="Transcript Result", label="Transcript") | |
with gr.Accordion(open=False, label=["Download Transcript"]): | |
transcript_file = gr.File() | |
with gr.Column(): | |
with gr.Accordion(open=True, label=["summary settings"]): | |
chunk_num = gr.Number(precision=0, minimum=1, maximum=9999, step=1, label="Chunk Number", value=1) | |
chunk_overlap = gr.Number(precision=0, minimum=1, maximum=9999, step=1, label="Chunk Overlap", value=100) | |
placeholder = gr.Textbox(visible=False) | |
prompt = gr.Textbox(placeholder="summary prompt", label="Summary Template", lines=5, value=prompt_template) | |
refine_prompt = gr.Textbox(placeholder="refine summary prompt", label="Refine Summary Template", lines=10, value=refine_template) | |
with gr.Accordion(open=False, label=["Templates"]): | |
gr.Examples( | |
examples=get_prompt_examples(), | |
inputs=[placeholder, prompt, refine_prompt], | |
fn=None, | |
outputs=None, | |
cache_examples=False, | |
label="Prompt Template" | |
) | |
with gr.Accordion(open=False, label=["llm settings"]): | |
# user_api = gr.Textbox(placeholder="If Empty, Use Default Key", label="Your API Key", value="Not Provided") | |
# llm_type = gr.Dropdown( | |
# [ | |
# "gpt-3.5-turbo", | |
# "gpt-3.5-turbo-16k", | |
# "gpt-4-1106-preview" | |
# ], label="LLM Type", value="gpt-4-1106-preview") | |
llm_type = gr.Dropdown( | |
[ | |
"gpt-4-32k", | |
"gpt-4", | |
"gpt-4-1106-preview", | |
"gpt-35-turbo", | |
"gpt-35-turbo-16k" | |
], label="LLM Type", value="gpt-4-1106-preview") | |
SunmmaryButton = gr.Button("Summary", variant="primary") | |
summary_text = gr.Textbox(placeholder="Summary Result", label="Summary") | |
with gr.Accordion(open=False, label=["Download Summary"]): | |
summary_file = gr.File() | |
TranscriptButton.click( | |
fn=transcript, | |
inputs=[ | |
file_output, | |
model_type, | |
time_stamp | |
], | |
outputs=[transcript_text, transcript_file] | |
) | |
SunmmaryButton.click( | |
fn=summary, | |
inputs=[ | |
transcript_text, | |
chunk_num, | |
chunk_overlap, | |
#user_api, | |
llm_type, | |
prompt, | |
refine_prompt | |
], | |
outputs=[summary_text, summary_file] | |
) | |
demo.launch() |