yourusername commited on
Commit
178400a
·
1 Parent(s): bf0db4f

:construction: wip

Browse files
Files changed (1) hide show
  1. dino/app.py +11 -12
dino/app.py CHANGED
@@ -11,7 +11,6 @@ import subprocess
11
 
12
  os.environ['KMP_DUPLICATE_LIB_OK']='True'
13
 
14
- subprocess.call("apt-get install ffmpeg".split())
15
 
16
  def func(resize, video):
17
  # return video
@@ -37,23 +36,23 @@ def func(resize, video):
37
  vid_generator = VideoGenerator(args)
38
  vid_generator.run()
39
 
40
- # Make a video that puts the resized input video + the attn output video together as one
41
- ffmpeg_cmd = """
42
- ffmpeg
43
- -i /tmp/outputs/original-reshaped.mp4
44
- -i /tmp/outputs/video.mp4
45
- -filter_complex hstack
46
- /tmp/outputs/stacked.mp4
47
- """
48
- subprocess.call(ffmpeg_cmd.split())
49
- return '/tmp/outputs/stacked.mp4'
50
 
51
  title = "Interactive demo: DINO"
52
  description = "Demo for Facebook AI's DINO, a new method for self-supervised training of Vision Transformers. Using this method, they are capable of segmenting objects within an image without having ever been trained to do so. This can be observed by displaying the self-attention of the heads from the last layer for the [CLS] token query. This demo uses a ViT-S/8 trained with DINO. To use it, simply upload an image or use the example image below. Results will show up in a few seconds."
53
  article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2104.14294'>Emerging Properties in Self-Supervised Vision Transformers</a> | <a href='https://github.com/facebookresearch/dino'>Github Repo</a></p>"
54
  iface = gr.Interface(fn=func,
55
  inputs=[gr.inputs.Slider(120, 480, 20, label="resize"), gr.inputs.Video(type=None)],
56
- outputs="video",
57
  title=title,
58
  description=description,
59
  article=article)
 
11
 
12
  os.environ['KMP_DUPLICATE_LIB_OK']='True'
13
 
 
14
 
15
  def func(resize, video):
16
  # return video
 
36
  vid_generator = VideoGenerator(args)
37
  vid_generator.run()
38
 
39
+ # # Make a video that puts the resized input video + the attn output video together as one
40
+ # ffmpeg_cmd = """
41
+ # ffmpeg
42
+ # -i /tmp/outputs/original-reshaped.mp4
43
+ # -i /tmp/outputs/video.mp4
44
+ # -filter_complex hstack
45
+ # /tmp/outputs/stacked.mp4
46
+ # """
47
+ # subprocess.call(ffmpeg_cmd.split())
48
+ return '/tmp/outputs/video.mp4', '/tmp/outputs/original-reshaped.mp4'
49
 
50
  title = "Interactive demo: DINO"
51
  description = "Demo for Facebook AI's DINO, a new method for self-supervised training of Vision Transformers. Using this method, they are capable of segmenting objects within an image without having ever been trained to do so. This can be observed by displaying the self-attention of the heads from the last layer for the [CLS] token query. This demo uses a ViT-S/8 trained with DINO. To use it, simply upload an image or use the example image below. Results will show up in a few seconds."
52
  article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2104.14294'>Emerging Properties in Self-Supervised Vision Transformers</a> | <a href='https://github.com/facebookresearch/dino'>Github Repo</a></p>"
53
  iface = gr.Interface(fn=func,
54
  inputs=[gr.inputs.Slider(120, 480, 20, label="resize"), gr.inputs.Video(type=None)],
55
+ outputs=[gr.outputs.Video(label='attn'), gr.outputs.Video(label='orig-reshaped')],
56
  title=title,
57
  description=description,
58
  article=article)