jonathanjordan21 commited on
Commit
3e2f324
1 Parent(s): 6662ab8

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +43 -0
app.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.llms import HuggingFacePipeline
2
+ import torch
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline, AutoModelForSeq2SeqLM
4
+
5
+ from components import caption_chain, tag_chain
6
+ from components import pexels
7
+ import os
8
+
9
+ model = AutoModelForSeq2SeqLM.from_pretrained("declare-lab/flan-alpaca-gpt4-xl")
10
+ tokenizer = AutoTokenizer.from_pretrained("declare-lab/flan-alpaca-gpt4-xl")
11
+
12
+ pipe = pipeline(
13
+ 'text2text-generation',
14
+ model=model,
15
+ tokenizer= tokenizer,
16
+ max_length=120
17
+ )
18
+
19
+ local_llm = HuggingFacePipeline(pipeline=pipe)
20
+
21
+ llm_chain = caption_chain.chain(llm=local_llm)
22
+ sum_llm_chain = tag_chain.chain(llm=local_llm)
23
+
24
+ pexels_api_key = os.getenv('pexels_api_key')
25
+
26
+ def pred():
27
+ folder_name, sentences = pexels.generate_videos("Bluetooth Earphone", pexel_api_key, 1920, 1080)
28
+ pexels.combine_videos(folder_name)
29
+ return {
30
+ 'video':folder_name,
31
+ 'captions':sentences.join("\n")
32
+ }
33
+
34
+ with gr.Blocks() as demo:
35
+ textbox = gr.Textbox("Product Name")
36
+ captions = gr.Textbox()
37
+ video = gr.Video()
38
+ btn = gr.Button("Submit")
39
+ btn.click(pred, inputs=textbox, outputs=[captions,video])
40
+
41
+
42
+
43
+ demo.launch()