Update app.py
Browse files
app.py
CHANGED
@@ -13,64 +13,65 @@ from pixeltable.functions import openai
|
|
13 |
if 'OPENAI_API_KEY' not in os.environ:
|
14 |
os.environ['OPENAI_API_KEY'] = getpass.getpass('Enter your OpenAI API key:')
|
15 |
|
16 |
-
# Create a Table, a View, and Computed Columns
|
17 |
-
|
18 |
-
pxt.drop_dir('directory', force=True)
|
19 |
-
pxt.create_dir('directory')
|
20 |
-
|
21 |
-
t = pxt.create_table(
|
22 |
-
'directory.video_table', {
|
23 |
-
"video": pxt.VideoType(nullable=True),
|
24 |
-
"sm_type": pxt.StringType(nullable=True),
|
25 |
-
}
|
26 |
-
)
|
27 |
-
|
28 |
-
frames_view = pxt.create_view(
|
29 |
-
"directory.frames",
|
30 |
-
t,
|
31 |
-
iterator=FrameIterator.create(video=t.video, fps=1)
|
32 |
-
)
|
33 |
-
|
34 |
-
# Create computed columns to store transformations and persist outputs
|
35 |
-
t['audio'] = extract_audio(t.video, format='mp3')
|
36 |
-
t['metadata'] = get_metadata(t.audio)
|
37 |
-
t['transcription'] = openai.transcriptions(audio=t.audio, model='whisper-1')
|
38 |
-
t['transcription_text'] = t.transcription.text
|
39 |
-
|
40 |
-
# Custom UDF for Generating Social Media Prompts
|
41 |
-
|
42 |
-
#Custom User-Defined Function (UDF) for Generating Social Media Prompts
|
43 |
-
@pxt.udf
|
44 |
-
def prompt(A: str, B: str) -> list[dict]:
|
45 |
-
system_msg = 'You are an expert in creating social media content and you generate effective post, based on user content. Respect the social media platform guidelines and constraints.'
|
46 |
-
user_msg = f'A: "{A}" \n B: "{B}"'
|
47 |
-
return [
|
48 |
-
{'role': 'system', 'content': system_msg},
|
49 |
-
{'role': 'user', 'content': user_msg}
|
50 |
-
]
|
51 |
-
|
52 |
-
# Apply the UDF to create a new column
|
53 |
-
t['message'] = prompt(t.sm_type, t.transcription_text)
|
54 |
-
|
55 |
-
"""## Generating Responses with OpenAI's GPT Model"""
|
56 |
-
|
57 |
-
# # Generate responses using OpenAI's chat completion API
|
58 |
-
t['response'] = openai.chat_completions(messages=t.message, model='gpt-4o-mini-2024-07-18', max_tokens=500)
|
59 |
-
|
60 |
-
## Extract the content of the response
|
61 |
-
t['answer'] = t.response.choices[0].message.content
|
62 |
-
|
63 |
MAX_VIDEO_SIZE_MB = 35
|
64 |
CONCURRENCY_LIMIT = 1
|
65 |
|
66 |
def process_and_generate_post(video_file, social_media_type, progress=gr.Progress()):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
67 |
if not video_file:
|
68 |
return "Please upload a video file.", None
|
69 |
|
70 |
try:
|
71 |
|
72 |
-
progress(0, desc="Initializing...")
|
73 |
-
|
74 |
# Check video file size
|
75 |
video_size = os.path.getsize(video_file) / (1024 * 1024) # Convert to MB
|
76 |
if video_size > MAX_VIDEO_SIZE_MB:
|
|
|
13 |
if 'OPENAI_API_KEY' not in os.environ:
|
14 |
os.environ['OPENAI_API_KEY'] = getpass.getpass('Enter your OpenAI API key:')
|
15 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
MAX_VIDEO_SIZE_MB = 35
|
17 |
CONCURRENCY_LIMIT = 1
|
18 |
|
19 |
def process_and_generate_post(video_file, social_media_type, progress=gr.Progress()):
|
20 |
+
|
21 |
+
progress(0, desc="Initializing...")
|
22 |
+
|
23 |
+
# Create a Table, a View, and Computed Columns
|
24 |
+
|
25 |
+
pxt.drop_dir('directory', force=True)
|
26 |
+
pxt.create_dir('directory')
|
27 |
+
|
28 |
+
t = pxt.create_table(
|
29 |
+
'directory.video_table', {
|
30 |
+
"video": pxt.VideoType(nullable=True),
|
31 |
+
"sm_type": pxt.StringType(nullable=True),
|
32 |
+
}
|
33 |
+
)
|
34 |
+
|
35 |
+
frames_view = pxt.create_view(
|
36 |
+
"directory.frames",
|
37 |
+
t,
|
38 |
+
iterator=FrameIterator.create(video=t.video, fps=1)
|
39 |
+
)
|
40 |
+
|
41 |
+
# Create computed columns to store transformations and persist outputs
|
42 |
+
t['audio'] = extract_audio(t.video, format='mp3')
|
43 |
+
t['metadata'] = get_metadata(t.audio)
|
44 |
+
t['transcription'] = openai.transcriptions(audio=t.audio, model='whisper-1')
|
45 |
+
t['transcription_text'] = t.transcription.text
|
46 |
+
|
47 |
+
# Custom UDF for Generating Social Media Prompts
|
48 |
+
|
49 |
+
#Custom User-Defined Function (UDF) for Generating Social Media Prompts
|
50 |
+
@pxt.udf
|
51 |
+
def prompt(A: str, B: str) -> list[dict]:
|
52 |
+
system_msg = 'You are an expert in creating social media content and you generate effective post, based on user content. Respect the social media platform guidelines and constraints.'
|
53 |
+
user_msg = f'A: "{A}" \n B: "{B}"'
|
54 |
+
return [
|
55 |
+
{'role': 'system', 'content': system_msg},
|
56 |
+
{'role': 'user', 'content': user_msg}
|
57 |
+
]
|
58 |
+
|
59 |
+
# Apply the UDF to create a new column
|
60 |
+
t['message'] = prompt(t.sm_type, t.transcription_text)
|
61 |
+
|
62 |
+
"""## Generating Responses with OpenAI's GPT Model"""
|
63 |
+
|
64 |
+
# # Generate responses using OpenAI's chat completion API
|
65 |
+
t['response'] = openai.chat_completions(messages=t.message, model='gpt-4o-mini-2024-07-18', max_tokens=500)
|
66 |
+
|
67 |
+
## Extract the content of the response
|
68 |
+
t['answer'] = t.response.choices[0].message.content
|
69 |
+
|
70 |
if not video_file:
|
71 |
return "Please upload a video file.", None
|
72 |
|
73 |
try:
|
74 |
|
|
|
|
|
75 |
# Check video file size
|
76 |
video_size = os.path.getsize(video_file) / (1024 * 1024) # Convert to MB
|
77 |
if video_size > MAX_VIDEO_SIZE_MB:
|