File size: 12,456 Bytes
12514b0
 
6f24d99
12514b0
 
 
 
6570a5e
 
 
6f24d99
12514b0
 
 
f5d6855
12514b0
 
fd3bd0f
 
12514b0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0e2c65f
12514b0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f192b48
12514b0
 
 
 
 
 
 
 
 
 
 
40d9c55
12514b0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3703c89
12514b0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3703c89
12514b0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f389441
12514b0
 
 
 
 
 
 
f08b184
cddf694
706c3d9
f5d6855
12514b0
 
 
 
 
 
 
 
 
 
 
c5b174f
579143c
9d48987
579143c
 
d497f3f
579143c
 
 
454c757
93d9ef9
 
 
12514b0
 
9d48987
 
12514b0
cedbcb7
12514b0
cedbcb7
 
12514b0
9d48987
12514b0
 
d7afb12
e921080
12514b0
 
579143c
766b1e0
12514b0
 
 
 
9d5e881
6035350
f192b48
12514b0
93d9ef9
12514b0
 
f08b184
cddf694
706c3d9
f5d6855
12514b0
 
 
 
 
792e9c5
6f24d99
 
12514b0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
import os
import json
import gradio as gr
import requests
import csv
import argparse
import shutil
from vlog4chat import Vlogger4chat
from vlog4debate import Debate
from utils import download_video

#prompt_templates = {"Default ChatGPT": ""}

parser = argparse.ArgumentParser()
parser.add_argument('--video_path', default='./BV11H4y1F7uH-P50.mp4')
parser.add_argument('--alpha', default=10, type=int, help='Determine the maximum segment number for KTS algorithm, the larger the value, the fewer segments.')
parser.add_argument('--beta', default=1, type=int, help='The smallest time gap between successive clips, in seconds.')
parser.add_argument('--data_dir', default='./', type=str, help='Directory for saving videos and logs.')
parser.add_argument('--tmp_dir', default='./', type=str, help='Directory for saving intermediate files.')

# * Models settings *
parser.add_argument('--openai_api_key', default='xxx', type=str, help='OpenAI API key')
parser.add_argument('--image_caption', action='store_true', dest='image_caption', default=True, help='Set this flag to True if you want to use BLIP Image Caption')
parser.add_argument('--dense_caption', action='store_true', dest='dense_caption', default=True, help='Set this flag to True if you want to use Dense Caption')
parser.add_argument('--feature_extractor', default='./clip-vit-base-patch32', help='Select the feature extractor model for video segmentation')
parser.add_argument('--feature_extractor_device', choices=['cuda', 'cpu'], default='cuda', help='Select the device: cuda or cpu')
parser.add_argument('--image_captioner', choices=['blip2-opt', 'blip2-flan-t5', 'blip'], dest='captioner_base_model', default='blip2-opt', help='blip2 requires 15G GPU memory, blip requires 6G GPU memory')
parser.add_argument('--image_captioner_device', choices=['cuda', 'cpu'], default='cuda', help='Select the device: cuda or cpu, gpu memory larger than 14G is recommended')
parser.add_argument('--dense_captioner_device', choices=['cuda', 'cpu'], default='cuda', help='Select the device: cuda or cpu, < 6G GPU is not recommended>')
parser.add_argument('--audio_translator', default='large')
parser.add_argument('--audio_translator_device', choices=['cuda', 'cpu'], default='cuda')
parser.add_argument('--gpt_version', choices=['gpt-3.5-turbo'], default='gpt-3.5-turbo')

args = parser.parse_args()

vlogger = Vlogger4chat(args)

def get_empty_state():
    return {"total_tokens": 0, "messages": []}


def submit_message(prompt, state):
    history = state['messages']

    if not prompt:
        return gr.update(value=''), [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)], state

    prompt_msg = { "role": "user", "content": prompt }
    
    try:
        history.append(prompt_msg)
        answer = vlogger.chat2video(prompt)
        history.append({"role": "system", "content": answer}) 

    
    except Exception as e:
        history.append(prompt_msg)
        history.append({
            "role": "system",
            "content": f"Error: {e}"
        })

    chat_messages = [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)]
    return '', chat_messages, state


def submit_message_debate(prompt, state):
    history = state['messages']
    
    if not prompt:
        return gr.update(value=''), [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)], state

    prompt_msg = { "role": "user", "content": prompt }
    
    try:
        history.append(prompt_msg)
        
        debate_topic = ""
        while debate_topic == "":
            debate_topic = prompt

        config = json.load(open("./config4all.json", "r"))
        config['debate_topic'] = debate_topic

        debate = Debate(num_players=3, config=config, temperature=0, sleep_time=0)
        answer = debate.run()

        #chat_messages = [(res["debate_topic"]), (res["base_answer"]), (res["debate_answer"]), (res["Reason"])]
        history.append({"role": "system", "content": answer}) 

    
    except Exception as e:
        history.append(prompt_msg)
        history.append({
            "role": "system",
            "content": f"Error: {e}"
        })

    chat_messages = [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)]
    return '', chat_messages, state


def clear_conversation():
    vlogger.clean_history()
    return gr.update(value=None, visible=True), gr.update(value=None, interactive=True), None, gr.update(value=None, visible=True), get_empty_state()


# download video from any online URL 
def subvid_fn(vid):
    print(vid)
    save_path = download_video(vid)
    return gr.update(value=save_path)


# 本地上传,适用于Running on local URL:  http://127.0.0.1:6006
def uploaded_video(video_file):
    UPLOAD_FOLDER = "./"   
    if not os.path.exists(UPLOAD_FOLDER):    
        os.mkdir(UPLOAD_FOLDER)    
    shutil.copy(video_file, UPLOAD_FOLDER)    
    gr.Info("File Uploaded!!!")   
    save_path = os.path.join(UPLOAD_FOLDER, os.path.basename(video_file))
    return gr.update(value=save_path)

    
def vlog_fn(vid_path):
    print(vid_path)
    if vid_path is None:
        log_text = "====== Please choose existing video from the library or provide video URL 🤔====="
    else:
        log_list = vlogger.video2log(vid_path)
        log_text = "\n".join(log_list)
    return gr.update(value=log_text, visible=True)

# 初始化一个空的答案记录字典
answers = {}

# 定义处理用户选择的函数
def submit_answers_pretest(question1, question2, question3, question4, question5, question6, question7, question8, question9, question10):
    answers['Question 1'] = question1
    answers['Question 2'] = question2
    answers['Question 3'] = question3
    answers['Question 4'] = question4
    answers['Question 5'] = question5
    answers['Question 6'] = question6
    answers['Question 7'] = question7
    answers['Question 8'] = question8
    answers['Question 9'] = question9
    answers['Question 10'] = question10
    
    # 可以将结果保存到文件
    with open('answers4pretest.txt', 'a') as f:
        f.write(f"Question 1: {question1}\n")
        f.write(f"Question 2: {question2}\n")
        f.write(f"Question 3: {question3}\n")
        f.write(f"Question 4: {question4}\n")
        f.write(f"Question 5: {question5}\n")
        f.write(f"Question 6: {question6}\n")
        f.write(f"Question 7: {question7}\n")
        f.write(f"Question 8: {question8}\n")
        f.write(f"Question 9: {question9}\n")
        f.write(f"Question 10: {question10}\n\n")
    
    # 返回一个确认消息
    return "谢谢你提交答案!"

def submit_answers_posttest(question1, question2, question3, question4, question5, question6, question7, question8, question9, question10):
    answers['Question 1'] = question1
    answers['Question 2'] = question2
    answers['Question 3'] = question3
    answers['Question 4'] = question4
    answers['Question 5'] = question5
    answers['Question 6'] = question6
    answers['Question 7'] = question7
    answers['Question 8'] = question8
    answers['Question 9'] = question9
    answers['Question 10'] = question10
    
    # 可以将结果保存到文件
    with open('answers4posttest.txt', 'a') as f:
        f.write(f"Question 1: {question1}\n")
        f.write(f"Question 2: {question2}\n")
        f.write(f"Question 3: {question3}\n")
        f.write(f"Question 4: {question4}\n")
        f.write(f"Question 5: {question5}\n")
        f.write(f"Question 6: {question6}\n")
        f.write(f"Question 7: {question7}\n")
        f.write(f"Question 8: {question8}\n")
        f.write(f"Question 9: {question9}\n")
        f.write(f"Question 10: {question10}\n\n")
    
    # 返回一个确认消息
    return "谢谢你提交答案!"

css = """
      #col-container {max-width: 80%; margin-left: auto; margin-right: auto;}
      #video_inp {min-height: 100px}
      #chatbox {min-height: 100px;}
      #header {text-align: center;}
      #hint {font-size: 2.0em; padding: 0.5em; margin: 0;}
      .message { font-size: 1.2em; }
      """

with gr.Blocks(css=css) as demo:
    
    with gr.Tabs():
        # 第一个标签页
        with gr.TabItem("第一步(观看前测试)"):
            gr.Markdown("""## 在观看视频前,我们先进行一个简单的测试: 由和视频内容相关的六道题目组成,
                        请访问:[正则化](https://www.wjx.cn/vm/PpJMhuc.aspx#) 进行答题。""",
                                elem_id="header")
        # 第二个标签页
        with gr.TabItem("第二步(VLog使用)"):
            state = gr.State(get_empty_state())

            with gr.Column(elem_id="col-container"):
                gr.Markdown("""## 🎞️ 视频Chat: 
                            Powered by CLIP, BLIP2, GRIT, RAM++, PaddleOCR, Whisper, Custom LLMs and LangChain""",
                            elem_id="header")

                with gr.Row():
                    with gr.Column():
                        video_inp = gr.Video(label="video_input")

                        gr.Markdown("Step 1: 请在下方选取视频(Select videos below)", elem_id="hint")
                        examples = gr.Examples(
                            examples=[
                                ["./BV11H4y1F7uH-P58.mp4"],
                            ],
                            inputs=[video_inp],
                        )
                        
                        #with gr.Row():
                        #    video_id = gr.Textbox(value="", placeholder="Download video url", show_label=False)
                        #    vidsub_btn = gr.Button("上传网站视频")

                        chatbot = gr.Chatbot(elem_id="chatbox")
                        input_message = gr.Textbox(show_label=False, placeholder="输入提问内容并按回车(Input your question)", visible=True)
                        btn_submit = gr.Button("提问视频内容(Submit)")
                        
                        #gr.Markdown("如果对上面的回答不满意,请在下方输入需要辩论的问题, *e.g.* *方差越小越好? 正则化必不可缺? 梯度一定存在?*", elem_id="hint")
                        #chatbot_debate = gr.Chatbot(elem_id="chatbox")
                        #input_message_debate = gr.Textbox(show_label=False, placeholder="输入辩论主题并按回车(Input your debate topic)", visible=True)
                        #btn_submit_debate = gr.Button("发起问题辩论(Submit)")
                        
                        btn_clear_conversation = gr.Button("🔃 开始新的对话(Start new conversation)")

                    with gr.Column():
                        vlog_btn = gr.Button("Step 2: 查看视频日志(Pleas wait around 20 seconds)")
                        vlog_outp = gr.Textbox(label="Document output", lines=70)
                        total_tokens_str = gr.Markdown(elem_id="total_tokens_str")

                

            gr.HTML('''<br><br><br><center>You can duplicate this Space to skip the queue:<a href="https://huggingface.co./spaces/anzorq/chatgpt-demo?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a><br></center>''')

            btn_submit.click(submit_message, [input_message, state], [input_message, chatbot])
            input_message.submit(submit_message, [input_message, state], [input_message, chatbot])
            #btn_submit_debate.click(submit_message_debate, [input_message_debate, state], [input_message_debate, chatbot])
            #input_message_debate.submit(submit_message_debate, [input_message_debate, state], [input_message_debate, chatbot])
            btn_clear_conversation.click(clear_conversation, [], [input_message, video_inp, chatbot, vlog_outp, state])
            vlog_btn.click(vlog_fn, [video_inp], [vlog_outp])
            #vidsub_btn.click(subvid_fn, [video_id], [video_inp])
            
        # 第三个标签页
        with gr.TabItem("第三步(观后测试)"):
            gr.Markdown("""## 在观看视频后,我们再进行一个简单的测试: 也是由和视频内容相关的十道题目组成,
                        请访问:[正则化](https://www.wjx.cn/vm/moF3yHH.aspx#) 进行答题。""", 
                                elem_id="header")
        

    demo.load(queue=False)


demo.queue()

if __name__ == "__main__":
    demo.launch(share=True)