Haoxin Chen commited on
Commit
4a341a4
·
1 Parent(s): 62b6d65
Files changed (2) hide show
  1. app.py +69 -5
  2. videocrafter_test.py +4 -4
app.py CHANGED
@@ -8,16 +8,16 @@ sys.path.insert(1, os.path.join(sys.path[0], 'lvdm'))
8
  def videocrafter_demo(result_dir='./tmp/'):
9
  text2video = Text2Video(result_dir)
10
  with gr.Blocks(analytics_enabled=False) as videocrafter_iface:
 
 
11
  with gr.Row().style(equal_height=False):
12
  with gr.Tab(label="VideoCrafter"):
13
- input_text = gr.Text()
14
  model_choices=['origin','vangogh','frozen','yourname', 'coco']
15
- trigger_word_list=[' ','Loving Vincent style', 'frozenmovie style', 'MakotoShinkaiYourName style', 'coco style']
16
 
17
  with gr.Row():
18
  model_index = gr.Dropdown(label='Models', elem_id=f"model", choices=model_choices, value=model_choices[0], type="index",interactive=True)
19
- trigger_word=gr.Dropdown(label='Trigger Word', elem_id=f"trigger_word", choices=trigger_word_list, value=trigger_word_list[0], interactive=True)
20
-
21
  with gr.Row():
22
  steps = gr.Slider(minimum=1, maximum=200, step=1, elem_id=f"steps", label="Sampling steps", value=50)
23
  eta = gr.Slider(minimum=0.0, maximum=1.0, step=0.1, label='ETA', value=1.0, elem_id="eta")
@@ -31,6 +31,71 @@ def videocrafter_demo(result_dir='./tmp/'):
31
  with gr.Column():
32
  output_video_1 = gr.PlayableVideo()
33
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
  send_btn.click(
35
  fn=text2video.get_prompt,
36
  inputs=[
@@ -40,7 +105,6 @@ def videocrafter_demo(result_dir='./tmp/'):
40
  eta,
41
  cfg_scale,
42
  lora_scale,
43
- trigger_word
44
  ],
45
  outputs=[output_video_1],
46
  )
 
8
  def videocrafter_demo(result_dir='./tmp/'):
9
  text2video = Text2Video(result_dir)
10
  with gr.Blocks(analytics_enabled=False) as videocrafter_iface:
11
+ gr.Markdown("<div align='center'> <h2> VideoCrafter: A Toolkit for Text-to-Video Generation and Editing </span> </h2> \
12
+ <a style='font-size:18px;color: #efefef' href='https://github.com/VideoCrafter/VideoCrafter'> Github </div>")
13
  with gr.Row().style(equal_height=False):
14
  with gr.Tab(label="VideoCrafter"):
15
+ input_text = gr.Text(label='Prompts')
16
  model_choices=['origin','vangogh','frozen','yourname', 'coco']
 
17
 
18
  with gr.Row():
19
  model_index = gr.Dropdown(label='Models', elem_id=f"model", choices=model_choices, value=model_choices[0], type="index",interactive=True)
20
+
 
21
  with gr.Row():
22
  steps = gr.Slider(minimum=1, maximum=200, step=1, elem_id=f"steps", label="Sampling steps", value=50)
23
  eta = gr.Slider(minimum=0.0, maximum=1.0, step=0.1, label='ETA', value=1.0, elem_id="eta")
 
31
  with gr.Column():
32
  output_video_1 = gr.PlayableVideo()
33
 
34
+ with gr.Row():
35
+ examples = [
36
+ [
37
+ 'an elephant is walking under the sea, 4K, high definition',
38
+ 50,
39
+ 'origin',
40
+ 1,
41
+ 15,
42
+ 1,
43
+ ],
44
+ [
45
+ 'an astronaut riding a horse in outer space',
46
+ 25,
47
+ 'origin',
48
+ 1,
49
+ 15,
50
+ 1,
51
+ ],
52
+ [
53
+ 'a monkey is playing a piano',
54
+ 25,
55
+ 'vangogh',
56
+ 1,
57
+ 15,
58
+ 1,
59
+ ],
60
+ [
61
+ 'A fire is burning on a candle',
62
+ 25,
63
+ 'frozen',
64
+ 1,
65
+ 15,
66
+ 1,
67
+ ],
68
+ [
69
+ 'a horse is drinking in the river',
70
+ 25,
71
+ 'yourname',
72
+ 1,
73
+ 15,
74
+ 1,
75
+ ],
76
+ [
77
+ 'Robot dancing in times square',
78
+ 25,
79
+ 'coco',
80
+ 1,
81
+ 15,
82
+ 1,
83
+ ],
84
+
85
+ ]
86
+ gr.Examples(examples=examples,
87
+ inputs=[
88
+ input_text,
89
+ steps,
90
+ model_index,
91
+ eta,
92
+ cfg_scale,
93
+ lora_scale],
94
+ outputs=[output_video_1],
95
+ fn=text2video.get_prompt,
96
+ cache_examples=False)
97
+ #cache_examples=os.getenv('SYSTEM') == 'spaces')
98
+
99
  send_btn.click(
100
  fn=text2video.get_prompt,
101
  inputs=[
 
105
  eta,
106
  cfg_scale,
107
  lora_scale,
 
108
  ],
109
  outputs=[output_video_1],
110
  )
videocrafter_test.py CHANGED
@@ -42,7 +42,7 @@ class Text2Video():
42
  'models/videolora/lora_002_frozenmovie_style.ckpt',
43
  'models/videolora/lora_003_MakotoShinkaiYourName_style.ckpt',
44
  'models/videolora/lora_004_coco_style.ckpt']
45
-
46
  model, _, _ = load_model(config, ckpt_path, gpu_id=0, inject_lora=False)
47
  self.model = model
48
  self.last_time_lora = ''
@@ -52,9 +52,9 @@ class Text2Video():
52
  self.ddim_sampler = DDIMSampler(model)
53
  self.origin_weight = None
54
 
55
- def get_prompt(self, input_text, steps=50, model_index=0, eta=1.0, cfg_scale=15.0, lora_scale=1.0, trigger_word=''):
56
- if trigger_word !=' ':
57
- input_text = input_text + ', ' + trigger_word
58
  inject_lora = model_index > 0
59
  self.origin_weight = change_lora_v2(self.model, inject_lora=inject_lora, lora_scale=lora_scale, lora_path=self.lora_path_list[model_index],
60
  last_time_lora=self.last_time_lora, last_time_lora_scale=self.last_time_lora_scale, origin_weight=self.origin_weight)
 
42
  'models/videolora/lora_002_frozenmovie_style.ckpt',
43
  'models/videolora/lora_003_MakotoShinkaiYourName_style.ckpt',
44
  'models/videolora/lora_004_coco_style.ckpt']
45
+ self.lora_trigger_word_list = ['','Loving Vincent style', 'frozenmovie style', 'MakotoShinkaiYourName style', 'coco style']
46
  model, _, _ = load_model(config, ckpt_path, gpu_id=0, inject_lora=False)
47
  self.model = model
48
  self.last_time_lora = ''
 
52
  self.ddim_sampler = DDIMSampler(model)
53
  self.origin_weight = None
54
 
55
+ def get_prompt(self, input_text, steps=50, model_index=0, eta=1.0, cfg_scale=15.0, lora_scale=1.0):
56
+ if model_index > 0:
57
+ input_text = input_text + ', ' + self.lora_trigger_word_list[model_index]
58
  inject_lora = model_index > 0
59
  self.origin_weight = change_lora_v2(self.model, inject_lora=inject_lora, lora_scale=lora_scale, lora_path=self.lora_path_list[model_index],
60
  last_time_lora=self.last_time_lora, last_time_lora_scale=self.last_time_lora_scale, origin_weight=self.origin_weight)