BlackBeenie commited on
Commit
8dd41a8
1 Parent(s): 09b19a1

Add additional files

Browse files
chain_img_processor/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from .image import ChainImgProcessor, ChainImgPlugin, get_single_image_processor, version
2
+ from .video import ChainVideoProcessor, get_single_video_processor
3
+ from .batchimage import ChainBatchImageProcessor
4
+ from .ffmpeg_writer import FFMPEG_VideoWriter
chain_img_processor/batchimage.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, List, Callable
2
+ import psutil
3
+ import os
4
+ from concurrent.futures import ThreadPoolExecutor, as_completed
5
+ from queue import Queue
6
+ from .image import ChainImgProcessor
7
+ from tqdm import tqdm
8
+ import cv2
9
+
10
+ def create_queue(temp_frame_paths: List[str]) -> Queue[str]:
11
+ queue: Queue[str] = Queue()
12
+ for frame_path in temp_frame_paths:
13
+ queue.put(frame_path)
14
+ return queue
15
+
16
+
17
+ def pick_queue(queue: Queue[str], queue_per_future: int) -> List[str]:
18
+ queues = []
19
+ for _ in range(queue_per_future):
20
+ if not queue.empty():
21
+ queues.append(queue.get())
22
+ return queues
23
+
24
+
25
+
26
+ class ChainBatchImageProcessor(ChainImgProcessor):
27
+ chain = None
28
+ func_params_gen = None
29
+ num_threads = 1
30
+
31
+ def __init__(self):
32
+ ChainImgProcessor.__init__(self)
33
+
34
+
35
+ def init_with_plugins(self):
36
+ self.init_plugins(["core"])
37
+ self.display_init_info()
38
+
39
+ init_on_start_arr = self.init_on_start.split(",")
40
+ for proc_id in init_on_start_arr:
41
+ self.init_processor(proc_id)
42
+
43
+ def update_progress(self, progress: Any = None) -> None:
44
+ process = psutil.Process(os.getpid())
45
+ memory_usage = process.memory_info().rss / 1024 / 1024 / 1024
46
+ progress.set_postfix({
47
+ 'memory_usage': '{:.2f}'.format(memory_usage).zfill(5) + 'GB',
48
+ 'execution_threads': self.num_threads
49
+ })
50
+ progress.refresh()
51
+ progress.update(1)
52
+
53
+
54
+ def process_frames(self, source_files: List[str], target_files: List[str], current_files, update: Callable[[], None]) -> None:
55
+ for f in current_files:
56
+ temp_frame = cv2.imread(f)
57
+ if temp_frame is not None:
58
+ if self.func_params_gen:
59
+ params = self.func_params_gen(None, temp_frame)
60
+ else:
61
+ params = {}
62
+ resimg, _ = self.run_chain(temp_frame, params, self.chain)
63
+ if resimg is not None:
64
+ i = source_files.index(f)
65
+ cv2.imwrite(target_files[i], resimg)
66
+ if update:
67
+ update()
68
+
69
+
70
+ def run_batch_chain(self, source_files, target_files, threads:int = 1, chain = None, params_frame_gen_func = None):
71
+ self.chain = chain
72
+ self.func_params_gen = params_frame_gen_func
73
+ progress_bar_format = '{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]'
74
+ total = len(source_files)
75
+ self.num_threads = threads
76
+ with tqdm(total=total, desc='Processing', unit='frame', dynamic_ncols=True, bar_format=progress_bar_format) as progress:
77
+ with ThreadPoolExecutor(max_workers=threads) as executor:
78
+ futures = []
79
+ queue = create_queue(source_files)
80
+ queue_per_future = max(len(source_files) // threads, 1)
81
+ while not queue.empty():
82
+ future = executor.submit(self.process_frames, source_files, target_files, pick_queue(queue, queue_per_future), lambda: self.update_progress(progress))
83
+ futures.append(future)
84
+ for future in as_completed(futures):
85
+ future.result()
86
+
chain_img_processor/ffmpeg_writer.py ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ FFMPEG_Writer - write set of frames to video file
3
+
4
+ original from
5
+ https://github.com/Zulko/moviepy/blob/master/moviepy/video/io/ffmpeg_writer.py
6
+
7
+ removed unnecessary dependencies
8
+
9
+ The MIT License (MIT)
10
+
11
+ Copyright (c) 2015 Zulko
12
+ Copyright (c) 2023 Janvarev Vladislav
13
+ """
14
+
15
+ import os
16
+ import subprocess as sp
17
+
18
+ PIPE = -1
19
+ STDOUT = -2
20
+ DEVNULL = -3
21
+
22
+ FFMPEG_BINARY = "ffmpeg"
23
+
24
+ class FFMPEG_VideoWriter:
25
+ """ A class for FFMPEG-based video writing.
26
+
27
+ A class to write videos using ffmpeg. ffmpeg will write in a large
28
+ choice of formats.
29
+
30
+ Parameters
31
+ -----------
32
+
33
+ filename
34
+ Any filename like 'video.mp4' etc. but if you want to avoid
35
+ complications it is recommended to use the generic extension
36
+ '.avi' for all your videos.
37
+
38
+ size
39
+ Size (width,height) of the output video in pixels.
40
+
41
+ fps
42
+ Frames per second in the output video file.
43
+
44
+ codec
45
+ FFMPEG codec. It seems that in terms of quality the hierarchy is
46
+ 'rawvideo' = 'png' > 'mpeg4' > 'libx264'
47
+ 'png' manages the same lossless quality as 'rawvideo' but yields
48
+ smaller files. Type ``ffmpeg -codecs`` in a terminal to get a list
49
+ of accepted codecs.
50
+
51
+ Note for default 'libx264': by default the pixel format yuv420p
52
+ is used. If the video dimensions are not both even (e.g. 720x405)
53
+ another pixel format is used, and this can cause problem in some
54
+ video readers.
55
+
56
+ audiofile
57
+ Optional: The name of an audio file that will be incorporated
58
+ to the video.
59
+
60
+ preset
61
+ Sets the time that FFMPEG will take to compress the video. The slower,
62
+ the better the compression rate. Possibilities are: ultrafast,superfast,
63
+ veryfast, faster, fast, medium (default), slow, slower, veryslow,
64
+ placebo.
65
+
66
+ bitrate
67
+ Only relevant for codecs which accept a bitrate. "5000k" offers
68
+ nice results in general.
69
+
70
+ """
71
+
72
+ def __init__(self, filename, size, fps, codec="libx265", crf=14, audiofile=None,
73
+ preset="medium", bitrate=None,
74
+ logfile=None, threads=None, ffmpeg_params=None):
75
+
76
+ if logfile is None:
77
+ logfile = sp.PIPE
78
+
79
+ self.filename = filename
80
+ self.codec = codec
81
+ self.ext = self.filename.split(".")[-1]
82
+ w = size[0] - 1 if size[0] % 2 != 0 else size[0]
83
+ h = size[1] - 1 if size[1] % 2 != 0 else size[1]
84
+
85
+
86
+ # order is important
87
+ cmd = [
88
+ FFMPEG_BINARY,
89
+ '-hide_banner',
90
+ '-hwaccel', 'auto',
91
+ '-y',
92
+ '-loglevel', 'error' if logfile == sp.PIPE else 'info',
93
+ '-f', 'rawvideo',
94
+ '-vcodec', 'rawvideo',
95
+ '-s', '%dx%d' % (size[0], size[1]),
96
+ #'-pix_fmt', 'rgba' if withmask else 'rgb24',
97
+ '-pix_fmt', 'bgr24',
98
+ '-r', str(fps),
99
+ '-an', '-i', '-'
100
+ ]
101
+
102
+ if audiofile is not None:
103
+ cmd.extend([
104
+ '-i', audiofile,
105
+ '-acodec', 'copy'
106
+ ])
107
+
108
+ cmd.extend([
109
+ '-vcodec', codec,
110
+ '-crf', str(crf)
111
+ #'-preset', preset,
112
+ ])
113
+ if ffmpeg_params is not None:
114
+ cmd.extend(ffmpeg_params)
115
+ if bitrate is not None:
116
+ cmd.extend([
117
+ '-b', bitrate
118
+ ])
119
+
120
+ # scale to a resolution divisible by 2 if not even
121
+ cmd.extend(['-vf', f'scale={w}:{h}' if w != size[0] or h != size[1] else 'colorspace=bt709:iall=bt601-6-625:fast=1'])
122
+
123
+ if threads is not None:
124
+ cmd.extend(["-threads", str(threads)])
125
+
126
+ cmd.extend([
127
+ '-pix_fmt', 'yuv420p',
128
+
129
+ ])
130
+ cmd.extend([
131
+ filename
132
+ ])
133
+
134
+ test = str(cmd)
135
+ print(test)
136
+
137
+ popen_params = {"stdout": DEVNULL,
138
+ "stderr": logfile,
139
+ "stdin": sp.PIPE}
140
+
141
+ # This was added so that no extra unwanted window opens on windows
142
+ # when the child process is created
143
+ if os.name == "nt":
144
+ popen_params["creationflags"] = 0x08000000 # CREATE_NO_WINDOW
145
+
146
+ self.proc = sp.Popen(cmd, **popen_params)
147
+
148
+
149
+ def write_frame(self, img_array):
150
+ """ Writes one frame in the file."""
151
+ try:
152
+ #if PY3:
153
+ self.proc.stdin.write(img_array.tobytes())
154
+ # else:
155
+ # self.proc.stdin.write(img_array.tostring())
156
+ except IOError as err:
157
+ _, ffmpeg_error = self.proc.communicate()
158
+ error = (str(err) + ("\n\nMoviePy error: FFMPEG encountered "
159
+ "the following error while writing file %s:"
160
+ "\n\n %s" % (self.filename, str(ffmpeg_error))))
161
+
162
+ if b"Unknown encoder" in ffmpeg_error:
163
+
164
+ error = error+("\n\nThe video export "
165
+ "failed because FFMPEG didn't find the specified "
166
+ "codec for video encoding (%s). Please install "
167
+ "this codec or change the codec when calling "
168
+ "write_videofile. For instance:\n"
169
+ " >>> clip.write_videofile('myvid.webm', codec='libvpx')")%(self.codec)
170
+
171
+ elif b"incorrect codec parameters ?" in ffmpeg_error:
172
+
173
+ error = error+("\n\nThe video export "
174
+ "failed, possibly because the codec specified for "
175
+ "the video (%s) is not compatible with the given "
176
+ "extension (%s). Please specify a valid 'codec' "
177
+ "argument in write_videofile. This would be 'libx264' "
178
+ "or 'mpeg4' for mp4, 'libtheora' for ogv, 'libvpx for webm. "
179
+ "Another possible reason is that the audio codec was not "
180
+ "compatible with the video codec. For instance the video "
181
+ "extensions 'ogv' and 'webm' only allow 'libvorbis' (default) as a"
182
+ "video codec."
183
+ )%(self.codec, self.ext)
184
+
185
+ elif b"encoder setup failed" in ffmpeg_error:
186
+
187
+ error = error+("\n\nThe video export "
188
+ "failed, possibly because the bitrate you specified "
189
+ "was too high or too low for the video codec.")
190
+
191
+ elif b"Invalid encoder type" in ffmpeg_error:
192
+
193
+ error = error + ("\n\nThe video export failed because the codec "
194
+ "or file extension you provided is not a video")
195
+
196
+
197
+ raise IOError(error)
198
+
199
+ def close(self):
200
+ if self.proc:
201
+ self.proc.stdin.close()
202
+ if self.proc.stderr is not None:
203
+ self.proc.stderr.close()
204
+ self.proc.wait()
205
+
206
+ self.proc = None
207
+
208
+ # Support the Context Manager protocol, to ensure that resources are cleaned up.
209
+
210
+ def __enter__(self):
211
+ return self
212
+
213
+ def __exit__(self, exc_type, exc_value, traceback):
214
+ self.close()
215
+
216
+
217
+
218
+ def ffmpeg_write_image(filename, image, logfile=False):
219
+ """ Writes an image (HxWx3 or HxWx4 numpy array) to a file, using
220
+ ffmpeg. """
221
+
222
+ if image.dtype != 'uint8':
223
+ image = image.astype("uint8")
224
+
225
+ cmd = [ FFMPEG_BINARY, '-y',
226
+ '-s', "%dx%d"%(image.shape[:2][::-1]),
227
+ "-f", 'rawvideo',
228
+ '-pix_fmt', "rgba" if (image.shape[2] == 4) else "rgb24",
229
+ '-i','-', filename]
230
+
231
+ if logfile:
232
+ log_file = open(filename + ".log", 'w+')
233
+ else:
234
+ log_file = sp.PIPE
235
+
236
+ popen_params = {"stdout": DEVNULL,
237
+ "stderr": log_file,
238
+ "stdin": sp.PIPE}
239
+
240
+ if os.name == "nt":
241
+ popen_params["creationflags"] = 0x08000000
242
+
243
+ proc = sp.Popen(cmd, **popen_params)
244
+ out, err = proc.communicate(image.tostring())
245
+
246
+ if proc.returncode:
247
+ err = "\n".join(["[MoviePy] Running : %s\n" % cmd,
248
+ "WARNING: this command returned an error:",
249
+ err.decode('utf8')])
250
+ raise IOError(err)
251
+
252
+ del proc
253
+
chain_img_processor/image.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from jaa import JaaCore
2
+ from roop.utilities import get_device
3
+
4
+
5
+ from typing import Any
6
+
7
+ version = "4.0.0"
8
+
9
+ class ChainImgProcessor(JaaCore):
10
+
11
+ def __init__(self):
12
+ JaaCore.__init__(self)
13
+
14
+ self.processors:dict = {
15
+ }
16
+
17
+ self.processors_objects:dict[str,list[ChainImgPlugin]] = {}
18
+
19
+ self.default_chain = ""
20
+ self.init_on_start = ""
21
+
22
+ self.inited_processors = []
23
+
24
+ self.is_demo_row_render = False
25
+
26
+ def process_plugin_manifest(self, modname, manifest):
27
+ # adding processors from plugin manifest
28
+ if "img_processor" in manifest: # process commands
29
+ for cmd in manifest["img_processor"].keys():
30
+ self.processors[cmd] = manifest["img_processor"][cmd]
31
+
32
+ return manifest
33
+
34
+ def init_with_plugins(self):
35
+ self.init_plugins(["core"])
36
+ self.display_init_info()
37
+
38
+ #self.init_translator_engine(self.default_translator)
39
+ init_on_start_arr = self.init_on_start.split(",")
40
+ for proc_id in init_on_start_arr:
41
+ self.init_processor(proc_id)
42
+
43
+ def run_chain(self, img, params:dict[str,Any] = None, chain:str = None, thread_index:int = 0):
44
+ if chain is None:
45
+ chain = self.default_chain
46
+ if params is None:
47
+ params = {}
48
+ params["_thread_index"] = thread_index
49
+ chain_ar = chain.split(",")
50
+ # init all not inited processors first
51
+ for proc_id in chain_ar:
52
+ if proc_id != "":
53
+ if not proc_id in self.inited_processors:
54
+ self.init_processor(proc_id)
55
+
56
+
57
+
58
+ # run processing
59
+ if self.is_demo_row_render:
60
+ import cv2
61
+ import numpy as np
62
+ height, width, channels = img.shape
63
+ img_blank = np.zeros((height+30, width*(1+len(chain_ar)), 3), dtype=np.uint8)
64
+ img_blank.fill(255)
65
+
66
+ y = 30
67
+ x = 0
68
+ img_blank[y:y + height, x:x + width] = img
69
+
70
+ # Set the font scale and thickness
71
+ font_scale = 1
72
+ thickness = 2
73
+
74
+ # Set the font face to a monospace font
75
+ font_face = cv2.FONT_HERSHEY_SIMPLEX
76
+
77
+ cv2.putText(img_blank, "original", (x+4, y-7), font_face, font_scale, (0, 0, 0), thickness)
78
+
79
+
80
+ i = 0
81
+ for proc_id in chain_ar:
82
+ i += 1
83
+ if proc_id != "":
84
+ #img = self.processors[proc_id][1](self, img, params) # params can be modified inside
85
+ y = 30
86
+ img = self.processors_objects[proc_id][thread_index].process(img,params)
87
+ if self.is_demo_row_render:
88
+ x = width*i
89
+ img_blank[y:y + height, x:x + width] = img
90
+ cv2.putText(img_blank, proc_id, (x + 4, y - 7), font_face, font_scale, (0, 0, 0), thickness)
91
+
92
+ if self.is_demo_row_render:
93
+ return img_blank, params
94
+
95
+ return img, params
96
+
97
+ # ---------------- init translation stuff ----------------
98
+ def fill_processors_for_thread_chains(self, threads:int = 1, chain:str = None):
99
+ if chain is None:
100
+ chain = self.default_chain
101
+
102
+ chain_ar = chain.split(",")
103
+ # init all not initialized processors first
104
+ for processor_id in chain_ar:
105
+ if processor_id != "":
106
+ if self.processors_objects.get(processor_id) is None:
107
+ self.processors_objects[processor_id] = []
108
+ while len(self.processors_objects[processor_id]) < threads:
109
+ self.add_processor_to_list(processor_id)
110
+
111
+ def add_processor_to_list(self, processor_id: str):
112
+ obj = self.processors[processor_id](self)
113
+ obj.init_plugin()
114
+ if self.processors_objects.get(processor_id) is None:
115
+ self.processors_objects[processor_id] = []
116
+ self.processors_objects[processor_id].append(obj)
117
+ def init_processor(self, processor_id: str):
118
+ if processor_id == "": # blank line case
119
+ return
120
+
121
+ if processor_id in self.inited_processors:
122
+ return
123
+
124
+ try:
125
+ if self.verbose:
126
+ self.print_blue("TRY: init processor plugin '{0}'...".format(processor_id))
127
+ self.add_processor_to_list(processor_id)
128
+ self.inited_processors.append(processor_id)
129
+ if self.verbose:
130
+ self.print_blue("SUCCESS: '{0}' initialized!".format(processor_id))
131
+
132
+ except Exception as e:
133
+ self.print_error("Error init processor plugin {0}...".format(processor_id), e)
134
+
135
+ # ------------ formatting stuff -------------------
136
+ def display_init_info(self):
137
+ if self.verbose:
138
+ print("ChainImgProcessor v{0}:".format(version))
139
+ self.format_print_key_list("processors:", self.processors.keys())
140
+
141
+ def format_print_key_list(self, key:str, value:list):
142
+ print(key+": ".join(value))
143
+
144
+ def print_error(self,err_txt,e:Exception = None):
145
+ print(err_txt,"red")
146
+ # if e != None:
147
+ # cprint(e,"red")
148
+ import traceback
149
+ traceback.print_exc()
150
+
151
+ def print_red(self,txt):
152
+ print(txt)
153
+
154
+ def print_blue(self, txt):
155
+ print(txt)
156
+
157
+ class ChainImgPlugin:
158
+
159
+ device = 'cpu'
160
+
161
+ def __init__(self, core: ChainImgProcessor):
162
+ self.core = core
163
+ self.device = get_device()
164
+
165
+ def init_plugin(self): # here you can init something. Called once
166
+ pass
167
+ def process(self, img, params:dict): # process img. Called multiple
168
+ return img
169
+
170
+ _img_processor:ChainImgProcessor = None
171
+ def get_single_image_processor() -> ChainImgProcessor:
172
+ global _img_processor
173
+ if _img_processor is None:
174
+ _img_processor = ChainImgProcessor()
175
+ _img_processor.init_with_plugins()
176
+ return _img_processor
chain_img_processor/video.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import roop.globals
2
+
3
+ from threading import Thread
4
+ from chain_img_processor import ChainImgProcessor
5
+
6
+ class ThreadWithReturnValue(Thread):
7
+
8
+ def __init__(self, group=None, target=None, name=None,
9
+ args=(), kwargs={}, Verbose=None):
10
+ Thread.__init__(self, group, target, name, args, kwargs)
11
+ self._return = None
12
+
13
+ def run(self):
14
+ if self._target is not None:
15
+ self._return = self._target(*self._args,
16
+ **self._kwargs)
17
+
18
+ def join(self, *args):
19
+ Thread.join(self, *args)
20
+ return self._return
21
+
22
+
23
+ # in beta
24
+ class ChainVideoProcessor(ChainImgProcessor):
25
+ def __init__(self):
26
+ ChainImgProcessor.__init__(self)
27
+
28
+ self.video_save_codec = "libx264"
29
+ self.video_save_crf = 14
30
+
31
+ def init_with_plugins(self):
32
+ self.init_plugins(["core","core_video"])
33
+ self.display_init_info()
34
+
35
+ init_on_start_arr = self.init_on_start.split(",")
36
+ for proc_id in init_on_start_arr:
37
+ self.init_processor(proc_id)
38
+
39
+ def run_video_chain(self, source_video, target_video, fps, threads:int = 1, chain = None, params_frame_gen_func = None, video_audio = None):
40
+ import cv2
41
+ from tqdm import tqdm
42
+ from chain_img_processor.ffmpeg_writer import FFMPEG_VideoWriter # ffmpeg install needed
43
+
44
+ cap = cv2.VideoCapture(source_video)
45
+ # width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
46
+ # height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
47
+ frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
48
+
49
+ # first frame do manually - because upscale may happen, we need to estimate width/height
50
+ ret, frame = cap.read()
51
+ if params_frame_gen_func is not None:
52
+ params = params_frame_gen_func(self, frame)
53
+ else:
54
+ params = {}
55
+ params["original_frame"] = frame
56
+ frame_processed, params = self.run_chain(frame,params,chain)
57
+ height, width, channels = frame_processed.shape
58
+
59
+ self.fill_processors_for_thread_chains(threads,chain)
60
+ #print(self.processors_objects)
61
+ #import threading
62
+ #locks:list[threading.Lock] = []
63
+ locks: list[bool] = []
64
+ for i in range(threads):
65
+ #locks.append(threading.Lock())
66
+ locks.append(False)
67
+
68
+ temp = []
69
+ with FFMPEG_VideoWriter(target_video, (width, height), fps, codec=roop.globals.video_encoder, crf=roop.globals.video_quality, audiofile=video_audio) as output_video_ff:
70
+ with tqdm(total=frame_count, desc='Processing', unit="frame", dynamic_ncols=True,
71
+ bar_format='{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]') as progress:
72
+
73
+ # do first frame
74
+ output_video_ff.write_frame(frame_processed)
75
+ progress.update(1) #
76
+ cnt_frames = 0
77
+
78
+ # do rest frames
79
+ while True:
80
+ # getting frame
81
+ ret, frame = cap.read()
82
+
83
+ if not ret:
84
+ break
85
+ cnt_frames+=1
86
+ thread_ind = cnt_frames % threads
87
+ # we are having an array of length %gpu_threads%, running in parallel
88
+ # so if array is equal or longer than gpu threads, waiting
89
+ #while len(temp) >= threads:
90
+ while locks[thread_ind]:
91
+ #print('WAIT', thread_ind)
92
+ # we are order dependent, so we are forced to wait for first element to finish. When finished removing thread from the list
93
+ frame_processed, params = temp.pop(0).join()
94
+ locks[params["_thread_index"]] = False
95
+ #print('OFF',cnt_frames,locks[params["_thread_index"]],locks)
96
+ # writing into output
97
+ output_video_ff.write_frame(frame_processed)
98
+ # updating the status
99
+ progress.update(1)
100
+
101
+ # calc params for frame
102
+ if params_frame_gen_func is not None:
103
+ params = params_frame_gen_func(self,frame)
104
+ else:
105
+ params = {}
106
+
107
+ # adding new frame to the list and starting it
108
+ locks[thread_ind] = True
109
+ #print('ON', cnt_frames, thread_ind, locks)
110
+ params["original_frame"] = frame
111
+ temp.append(
112
+ ThreadWithReturnValue(target=self.run_chain, args=(frame, params, chain, thread_ind)))
113
+ temp[-1].start()
114
+
115
+ while len(temp) > 0:
116
+ # we are order dependent, so we are forced to wait for first element to finish. When finished removing thread from the list
117
+ frame_processed, params = temp.pop(0).join()
118
+ locks[params["_thread_index"]] = False
119
+ # writing into output
120
+ output_video_ff.write_frame(frame_processed)
121
+
122
+ progress.update(1)
123
+
124
+ #print("FINAL", locks)
125
+
126
+ _video_processor:ChainVideoProcessor = None
127
+ def get_single_video_processor() -> ChainVideoProcessor:
128
+ global _video_processor
129
+ if _video_processor is None:
130
+ _video_processor = ChainVideoProcessor()
131
+ _video_processor.init_with_plugins()
132
+ return _video_processor
clip/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .clip import *
clip/bpe_simple_vocab_16e6.txt.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:924691ac288e54409236115652ad4aa250f48203de50a9e4722a6ecd48d6804a
3
+ size 1356917
clip/clip.py ADDED
@@ -0,0 +1,245 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import hashlib
2
+ import os
3
+ import urllib
4
+ import warnings
5
+ from typing import Any, Union, List
6
+ from pkg_resources import packaging
7
+
8
+ import torch
9
+ from PIL import Image
10
+ from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
11
+ from tqdm import tqdm
12
+
13
+ from .model import build_model
14
+ from .simple_tokenizer import SimpleTokenizer as _Tokenizer
15
+
16
+ try:
17
+ from torchvision.transforms import InterpolationMode
18
+ BICUBIC = InterpolationMode.BICUBIC
19
+ except ImportError:
20
+ BICUBIC = Image.BICUBIC
21
+
22
+
23
+ if packaging.version.parse(torch.__version__) < packaging.version.parse("1.7.1"):
24
+ warnings.warn("PyTorch version 1.7.1 or higher is recommended")
25
+
26
+
27
+ __all__ = ["available_models", "load", "tokenize"]
28
+ _tokenizer = _Tokenizer()
29
+
30
+ _MODELS = {
31
+ "RN50": "https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt",
32
+ "RN101": "https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt",
33
+ "RN50x4": "https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt",
34
+ "RN50x16": "https://openaipublic.azureedge.net/clip/models/52378b407f34354e150460fe41077663dd5b39c54cd0bfd2b27167a4a06ec9aa/RN50x16.pt",
35
+ "RN50x64": "https://openaipublic.azureedge.net/clip/models/be1cfb55d75a9666199fb2206c106743da0f6468c9d327f3e0d0a543a9919d9c/RN50x64.pt",
36
+ "ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
37
+ "ViT-B/16": "https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt",
38
+ "ViT-L/14": "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt",
39
+ "ViT-L/14@336px": "https://openaipublic.azureedge.net/clip/models/3035c92b350959924f9f00213499208652fc7ea050643e8b385c2dac08641f02/ViT-L-14-336px.pt",
40
+ }
41
+
42
+
43
+ def _download(url: str, root: str):
44
+ os.makedirs(root, exist_ok=True)
45
+ filename = os.path.basename(url)
46
+
47
+ expected_sha256 = url.split("/")[-2]
48
+ download_target = os.path.join(root, filename)
49
+
50
+ if os.path.exists(download_target) and not os.path.isfile(download_target):
51
+ raise RuntimeError(f"{download_target} exists and is not a regular file")
52
+
53
+ if os.path.isfile(download_target):
54
+ if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256:
55
+ return download_target
56
+ else:
57
+ warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file")
58
+
59
+ with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
60
+ with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True, unit_divisor=1024) as loop:
61
+ while True:
62
+ buffer = source.read(8192)
63
+ if not buffer:
64
+ break
65
+
66
+ output.write(buffer)
67
+ loop.update(len(buffer))
68
+
69
+ if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256:
70
+ raise RuntimeError("Model has been downloaded but the SHA256 checksum does not not match")
71
+
72
+ return download_target
73
+
74
+
75
+ def _convert_image_to_rgb(image):
76
+ return image.convert("RGB")
77
+
78
+
79
+ def _transform(n_px):
80
+ return Compose([
81
+ Resize(n_px, interpolation=BICUBIC),
82
+ CenterCrop(n_px),
83
+ _convert_image_to_rgb,
84
+ ToTensor(),
85
+ Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
86
+ ])
87
+
88
+
89
+ def available_models() -> List[str]:
90
+ """Returns the names of available CLIP models"""
91
+ return list(_MODELS.keys())
92
+
93
+
94
+ def load(name: str, device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu", jit: bool = False, download_root: str = None):
95
+ """Load a CLIP model
96
+
97
+ Parameters
98
+ ----------
99
+ name : str
100
+ A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
101
+
102
+ device : Union[str, torch.device]
103
+ The device to put the loaded model
104
+
105
+ jit : bool
106
+ Whether to load the optimized JIT model or more hackable non-JIT model (default).
107
+
108
+ download_root: str
109
+ path to download the model files; by default, it uses "~/.cache/clip"
110
+
111
+ Returns
112
+ -------
113
+ model : torch.nn.Module
114
+ The CLIP model
115
+
116
+ preprocess : Callable[[PIL.Image], torch.Tensor]
117
+ A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
118
+ """
119
+ if name in _MODELS:
120
+ model_path = _download(_MODELS[name], download_root or os.path.expanduser("~/.cache/clip"))
121
+ elif os.path.isfile(name):
122
+ model_path = name
123
+ else:
124
+ raise RuntimeError(f"Model {name} not found; available models = {available_models()}")
125
+
126
+ with open(model_path, 'rb') as opened_file:
127
+ try:
128
+ # loading JIT archive
129
+ model = torch.jit.load(opened_file, map_location=device if jit else "cpu").eval()
130
+ state_dict = None
131
+ except RuntimeError:
132
+ # loading saved state dict
133
+ if jit:
134
+ warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead")
135
+ jit = False
136
+ state_dict = torch.load(opened_file, map_location="cpu")
137
+
138
+ if not jit:
139
+ model = build_model(state_dict or model.state_dict()).to(device)
140
+ if str(device) == "cpu":
141
+ model.float()
142
+ return model, _transform(model.visual.input_resolution)
143
+
144
+ # patch the device names
145
+ device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[])
146
+ device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1]
147
+
148
+ def _node_get(node: torch._C.Node, key: str):
149
+ """Gets attributes of a node which is polymorphic over return type.
150
+
151
+ From https://github.com/pytorch/pytorch/pull/82628
152
+ """
153
+ sel = node.kindOf(key)
154
+ return getattr(node, sel)(key)
155
+
156
+ def patch_device(module):
157
+ try:
158
+ graphs = [module.graph] if hasattr(module, "graph") else []
159
+ except RuntimeError:
160
+ graphs = []
161
+
162
+ if hasattr(module, "forward1"):
163
+ graphs.append(module.forward1.graph)
164
+
165
+ for graph in graphs:
166
+ for node in graph.findAllNodes("prim::Constant"):
167
+ if "value" in node.attributeNames() and str(_node_get(node, "value")).startswith("cuda"):
168
+ node.copyAttributes(device_node)
169
+
170
+ model.apply(patch_device)
171
+ patch_device(model.encode_image)
172
+ patch_device(model.encode_text)
173
+
174
+ # patch dtype to float32 on CPU
175
+ if str(device) == "cpu":
176
+ float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])
177
+ float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
178
+ float_node = float_input.node()
179
+
180
+ def patch_float(module):
181
+ try:
182
+ graphs = [module.graph] if hasattr(module, "graph") else []
183
+ except RuntimeError:
184
+ graphs = []
185
+
186
+ if hasattr(module, "forward1"):
187
+ graphs.append(module.forward1.graph)
188
+
189
+ for graph in graphs:
190
+ for node in graph.findAllNodes("aten::to"):
191
+ inputs = list(node.inputs())
192
+ for i in [1, 2]: # dtype can be the second or third argument to aten::to()
193
+ if _node_get(inputs[i].node(), "value") == 5:
194
+ inputs[i].node().copyAttributes(float_node)
195
+
196
+ model.apply(patch_float)
197
+ patch_float(model.encode_image)
198
+ patch_float(model.encode_text)
199
+
200
+ model.float()
201
+
202
+ return model, _transform(model.input_resolution.item())
203
+
204
+
205
+ def tokenize(texts: Union[str, List[str]], context_length: int = 77, truncate: bool = False) -> Union[torch.IntTensor, torch.LongTensor]:
206
+ """
207
+ Returns the tokenized representation of given input string(s)
208
+
209
+ Parameters
210
+ ----------
211
+ texts : Union[str, List[str]]
212
+ An input string or a list of input strings to tokenize
213
+
214
+ context_length : int
215
+ The context length to use; all CLIP models use 77 as the context length
216
+
217
+ truncate: bool
218
+ Whether to truncate the text in case its encoding is longer than the context length
219
+
220
+ Returns
221
+ -------
222
+ A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length].
223
+ We return LongTensor when torch version is <1.8.0, since older index_select requires indices to be long.
224
+ """
225
+ if isinstance(texts, str):
226
+ texts = [texts]
227
+
228
+ sot_token = _tokenizer.encoder["<|startoftext|>"]
229
+ eot_token = _tokenizer.encoder["<|endoftext|>"]
230
+ all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
231
+ if packaging.version.parse(torch.__version__) < packaging.version.parse("1.8.0"):
232
+ result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
233
+ else:
234
+ result = torch.zeros(len(all_tokens), context_length, dtype=torch.int)
235
+
236
+ for i, tokens in enumerate(all_tokens):
237
+ if len(tokens) > context_length:
238
+ if truncate:
239
+ tokens = tokens[:context_length]
240
+ tokens[-1] = eot_token
241
+ else:
242
+ raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}")
243
+ result[i, :len(tokens)] = torch.tensor(tokens)
244
+
245
+ return result
clip/clipseg.py ADDED
@@ -0,0 +1,538 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from os.path import basename, dirname, join, isfile
3
+ import torch
4
+ from torch import nn
5
+ from torch.nn import functional as nnf
6
+ from torch.nn.modules.activation import ReLU
7
+
8
+
9
+ def get_prompt_list(prompt):
10
+ if prompt == 'plain':
11
+ return ['{}']
12
+ elif prompt == 'fixed':
13
+ return ['a photo of a {}.']
14
+ elif prompt == 'shuffle':
15
+ return ['a photo of a {}.', 'a photograph of a {}.', 'an image of a {}.', '{}.']
16
+ elif prompt == 'shuffle+':
17
+ return ['a photo of a {}.', 'a photograph of a {}.', 'an image of a {}.', '{}.',
18
+ 'a cropped photo of a {}.', 'a good photo of a {}.', 'a photo of one {}.',
19
+ 'a bad photo of a {}.', 'a photo of the {}.']
20
+ else:
21
+ raise ValueError('Invalid value for prompt')
22
+
23
+
24
+ def forward_multihead_attention(x, b, with_aff=False, attn_mask=None):
25
+ """
26
+ Simplified version of multihead attention (taken from torch source code but without tons of if clauses).
27
+ The mlp and layer norm come from CLIP.
28
+ x: input.
29
+ b: multihead attention module.
30
+ """
31
+
32
+ x_ = b.ln_1(x)
33
+ q, k, v = nnf.linear(x_, b.attn.in_proj_weight, b.attn.in_proj_bias).chunk(3, dim=-1)
34
+ tgt_len, bsz, embed_dim = q.size()
35
+
36
+ head_dim = embed_dim // b.attn.num_heads
37
+ scaling = float(head_dim) ** -0.5
38
+
39
+ q = q.contiguous().view(tgt_len, bsz * b.attn.num_heads, b.attn.head_dim).transpose(0, 1)
40
+ k = k.contiguous().view(-1, bsz * b.attn.num_heads, b.attn.head_dim).transpose(0, 1)
41
+ v = v.contiguous().view(-1, bsz * b.attn.num_heads, b.attn.head_dim).transpose(0, 1)
42
+
43
+ q = q * scaling
44
+
45
+ attn_output_weights = torch.bmm(q, k.transpose(1, 2)) # n_heads * batch_size, tokens^2, tokens^2
46
+ if attn_mask is not None:
47
+
48
+
49
+ attn_mask_type, attn_mask = attn_mask
50
+ n_heads = attn_output_weights.size(0) // attn_mask.size(0)
51
+ attn_mask = attn_mask.repeat(n_heads, 1)
52
+
53
+ if attn_mask_type == 'cls_token':
54
+ # the mask only affects similarities compared to the readout-token.
55
+ attn_output_weights[:, 0, 1:] = attn_output_weights[:, 0, 1:] * attn_mask[None,...]
56
+ # attn_output_weights[:, 0, 0] = 0*attn_output_weights[:, 0, 0]
57
+
58
+ if attn_mask_type == 'all':
59
+ # print(attn_output_weights.shape, attn_mask[:, None].shape)
60
+ attn_output_weights[:, 1:, 1:] = attn_output_weights[:, 1:, 1:] * attn_mask[:, None]
61
+
62
+
63
+ attn_output_weights = torch.softmax(attn_output_weights, dim=-1)
64
+
65
+ attn_output = torch.bmm(attn_output_weights, v)
66
+ attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
67
+ attn_output = b.attn.out_proj(attn_output)
68
+
69
+ x = x + attn_output
70
+ x = x + b.mlp(b.ln_2(x))
71
+
72
+ if with_aff:
73
+ return x, attn_output_weights
74
+ else:
75
+ return x
76
+
77
+
78
+ class CLIPDenseBase(nn.Module):
79
+
80
+ def __init__(self, version, reduce_cond, reduce_dim, prompt, n_tokens):
81
+ super().__init__()
82
+
83
+ import clip
84
+
85
+ # prec = torch.FloatTensor
86
+ self.clip_model, _ = clip.load(version, device='cpu', jit=False)
87
+ self.model = self.clip_model.visual
88
+
89
+ # if not None, scale conv weights such that we obtain n_tokens.
90
+ self.n_tokens = n_tokens
91
+
92
+ for p in self.clip_model.parameters():
93
+ p.requires_grad_(False)
94
+
95
+ # conditional
96
+ if reduce_cond is not None:
97
+ self.reduce_cond = nn.Linear(512, reduce_cond)
98
+ for p in self.reduce_cond.parameters():
99
+ p.requires_grad_(False)
100
+ else:
101
+ self.reduce_cond = None
102
+
103
+ self.film_mul = nn.Linear(512 if reduce_cond is None else reduce_cond, reduce_dim)
104
+ self.film_add = nn.Linear(512 if reduce_cond is None else reduce_cond, reduce_dim)
105
+
106
+ self.reduce = nn.Linear(768, reduce_dim)
107
+
108
+ self.prompt_list = get_prompt_list(prompt)
109
+
110
+ # precomputed prompts
111
+ import pickle
112
+ if isfile('precomputed_prompt_vectors.pickle'):
113
+ precomp = pickle.load(open('precomputed_prompt_vectors.pickle', 'rb'))
114
+ self.precomputed_prompts = {k: torch.from_numpy(v) for k, v in precomp.items()}
115
+ else:
116
+ self.precomputed_prompts = dict()
117
+
118
+ def rescaled_pos_emb(self, new_size):
119
+ assert len(new_size) == 2
120
+
121
+ a = self.model.positional_embedding[1:].T.view(1, 768, *self.token_shape)
122
+ b = nnf.interpolate(a, new_size, mode='bicubic', align_corners=False).squeeze(0).view(768, new_size[0]*new_size[1]).T
123
+ return torch.cat([self.model.positional_embedding[:1], b])
124
+
125
+ def visual_forward(self, x_inp, extract_layers=(), skip=False, mask=None):
126
+
127
+
128
+ with torch.no_grad():
129
+
130
+ inp_size = x_inp.shape[2:]
131
+
132
+ if self.n_tokens is not None:
133
+ stride2 = x_inp.shape[2] // self.n_tokens
134
+ conv_weight2 = nnf.interpolate(self.model.conv1.weight, (stride2, stride2), mode='bilinear', align_corners=True)
135
+ x = nnf.conv2d(x_inp, conv_weight2, bias=self.model.conv1.bias, stride=stride2, dilation=self.model.conv1.dilation)
136
+ else:
137
+ x = self.model.conv1(x_inp) # shape = [*, width, grid, grid]
138
+
139
+ x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
140
+ x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
141
+
142
+ x = torch.cat([self.model.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
143
+
144
+ standard_n_tokens = 50 if self.model.conv1.kernel_size[0] == 32 else 197
145
+
146
+ if x.shape[1] != standard_n_tokens:
147
+ new_shape = int(math.sqrt(x.shape[1]-1))
148
+ x = x + self.rescaled_pos_emb((new_shape, new_shape)).to(x.dtype)[None,:,:]
149
+ else:
150
+ x = x + self.model.positional_embedding.to(x.dtype)
151
+
152
+ x = self.model.ln_pre(x)
153
+
154
+ x = x.permute(1, 0, 2) # NLD -> LND
155
+
156
+ activations, affinities = [], []
157
+ for i, res_block in enumerate(self.model.transformer.resblocks):
158
+
159
+ if mask is not None:
160
+ mask_layer, mask_type, mask_tensor = mask
161
+ if mask_layer == i or mask_layer == 'all':
162
+ # import ipdb; ipdb.set_trace()
163
+ size = int(math.sqrt(x.shape[0] - 1))
164
+
165
+ attn_mask = (mask_type, nnf.interpolate(mask_tensor.unsqueeze(1).float(), (size, size)).view(mask_tensor.shape[0], size * size))
166
+
167
+ else:
168
+ attn_mask = None
169
+ else:
170
+ attn_mask = None
171
+
172
+ x, aff_per_head = forward_multihead_attention(x, res_block, with_aff=True, attn_mask=attn_mask)
173
+
174
+ if i in extract_layers:
175
+ affinities += [aff_per_head]
176
+
177
+ #if self.n_tokens is not None:
178
+ # activations += [nnf.interpolate(x, inp_size, mode='bilinear', align_corners=True)]
179
+ #else:
180
+ activations += [x]
181
+
182
+ if len(extract_layers) > 0 and i == max(extract_layers) and skip:
183
+ print('early skip')
184
+ break
185
+
186
+ x = x.permute(1, 0, 2) # LND -> NLD
187
+ x = self.model.ln_post(x[:, 0, :])
188
+
189
+ if self.model.proj is not None:
190
+ x = x @ self.model.proj
191
+
192
+ return x, activations, affinities
193
+
194
+ def sample_prompts(self, words, prompt_list=None):
195
+
196
+ prompt_list = prompt_list if prompt_list is not None else self.prompt_list
197
+
198
+ prompt_indices = torch.multinomial(torch.ones(len(prompt_list)), len(words), replacement=True)
199
+ prompts = [prompt_list[i] for i in prompt_indices]
200
+ return [promt.format(w) for promt, w in zip(prompts, words)]
201
+
202
+ def get_cond_vec(self, conditional, batch_size):
203
+ # compute conditional from a single string
204
+ if conditional is not None and type(conditional) == str:
205
+ cond = self.compute_conditional(conditional)
206
+ cond = cond.repeat(batch_size, 1)
207
+
208
+ # compute conditional from string list/tuple
209
+ elif conditional is not None and type(conditional) in {list, tuple} and type(conditional[0]) == str:
210
+ assert len(conditional) == batch_size
211
+ cond = self.compute_conditional(conditional)
212
+
213
+ # use conditional directly
214
+ elif conditional is not None and type(conditional) == torch.Tensor and conditional.ndim == 2:
215
+ cond = conditional
216
+
217
+ # compute conditional from image
218
+ elif conditional is not None and type(conditional) == torch.Tensor:
219
+ with torch.no_grad():
220
+ cond, _, _ = self.visual_forward(conditional)
221
+ else:
222
+ raise ValueError('invalid conditional')
223
+ return cond
224
+
225
+ def compute_conditional(self, conditional):
226
+ import clip
227
+
228
+ dev = next(self.parameters()).device
229
+
230
+ if type(conditional) in {list, tuple}:
231
+ text_tokens = clip.tokenize(conditional).to(dev)
232
+ cond = self.clip_model.encode_text(text_tokens)
233
+ else:
234
+ if conditional in self.precomputed_prompts:
235
+ cond = self.precomputed_prompts[conditional].float().to(dev)
236
+ else:
237
+ text_tokens = clip.tokenize([conditional]).to(dev)
238
+ cond = self.clip_model.encode_text(text_tokens)[0]
239
+
240
+ if self.shift_vector is not None:
241
+ return cond + self.shift_vector
242
+ else:
243
+ return cond
244
+
245
+
246
+ def clip_load_untrained(version):
247
+ assert version == 'ViT-B/16'
248
+ from clip.model import CLIP
249
+ from clip.clip import _MODELS, _download
250
+ model = torch.jit.load(_download(_MODELS['ViT-B/16'])).eval()
251
+ state_dict = model.state_dict()
252
+
253
+ vision_width = state_dict["visual.conv1.weight"].shape[0]
254
+ vision_layers = len([k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
255
+ vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
256
+ grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
257
+ image_resolution = vision_patch_size * grid_size
258
+ embed_dim = state_dict["text_projection"].shape[1]
259
+ context_length = state_dict["positional_embedding"].shape[0]
260
+ vocab_size = state_dict["token_embedding.weight"].shape[0]
261
+ transformer_width = state_dict["ln_final.weight"].shape[0]
262
+ transformer_heads = transformer_width // 64
263
+ transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks")))
264
+
265
+ return CLIP(embed_dim, image_resolution, vision_layers, vision_width, vision_patch_size,
266
+ context_length, vocab_size, transformer_width, transformer_heads, transformer_layers)
267
+
268
+
269
+ class CLIPDensePredT(CLIPDenseBase):
270
+
271
+ def __init__(self, version='ViT-B/32', extract_layers=(3, 6, 9), cond_layer=0, reduce_dim=128, n_heads=4, prompt='fixed',
272
+ extra_blocks=0, reduce_cond=None, fix_shift=False,
273
+ learn_trans_conv_only=False, limit_to_clip_only=False, upsample=False,
274
+ add_calibration=False, rev_activations=False, trans_conv=None, n_tokens=None, complex_trans_conv=False):
275
+
276
+ super().__init__(version, reduce_cond, reduce_dim, prompt, n_tokens)
277
+ # device = 'cpu'
278
+
279
+ self.extract_layers = extract_layers
280
+ self.cond_layer = cond_layer
281
+ self.limit_to_clip_only = limit_to_clip_only
282
+ self.process_cond = None
283
+ self.rev_activations = rev_activations
284
+
285
+ depth = len(extract_layers)
286
+
287
+ if add_calibration:
288
+ self.calibration_conds = 1
289
+
290
+ self.upsample_proj = nn.Conv2d(reduce_dim, 1, kernel_size=1) if upsample else None
291
+
292
+ self.add_activation1 = True
293
+
294
+ self.version = version
295
+
296
+ self.token_shape = {'ViT-B/32': (7, 7), 'ViT-B/16': (14, 14)}[version]
297
+
298
+ if fix_shift:
299
+ # self.shift_vector = nn.Parameter(torch.load(join(dirname(basename(__file__)), 'clip_text_shift_vector.pth')), requires_grad=False)
300
+ self.shift_vector = nn.Parameter(torch.load(join(dirname(basename(__file__)), 'shift_text_to_vis.pth')), requires_grad=False)
301
+ # self.shift_vector = nn.Parameter(-1*torch.load(join(dirname(basename(__file__)), 'shift2.pth')), requires_grad=False)
302
+ else:
303
+ self.shift_vector = None
304
+
305
+ if trans_conv is None:
306
+ trans_conv_ks = {'ViT-B/32': (32, 32), 'ViT-B/16': (16, 16)}[version]
307
+ else:
308
+ # explicitly define transposed conv kernel size
309
+ trans_conv_ks = (trans_conv, trans_conv)
310
+
311
+ if not complex_trans_conv:
312
+ self.trans_conv = nn.ConvTranspose2d(reduce_dim, 1, trans_conv_ks, stride=trans_conv_ks)
313
+ else:
314
+ assert trans_conv_ks[0] == trans_conv_ks[1]
315
+
316
+ tp_kernels = (trans_conv_ks[0] // 4, trans_conv_ks[0] // 4)
317
+
318
+ self.trans_conv = nn.Sequential(
319
+ nn.Conv2d(reduce_dim, reduce_dim, kernel_size=3, padding=1),
320
+ nn.ReLU(),
321
+ nn.ConvTranspose2d(reduce_dim, reduce_dim // 2, kernel_size=tp_kernels[0], stride=tp_kernels[0]),
322
+ nn.ReLU(),
323
+ nn.ConvTranspose2d(reduce_dim // 2, 1, kernel_size=tp_kernels[1], stride=tp_kernels[1]),
324
+ )
325
+
326
+ # self.trans_conv = nn.ConvTranspose2d(reduce_dim, 1, trans_conv_ks, stride=trans_conv_ks)
327
+
328
+ assert len(self.extract_layers) == depth
329
+
330
+ self.reduces = nn.ModuleList([nn.Linear(768, reduce_dim) for _ in range(depth)])
331
+ self.blocks = nn.ModuleList([nn.TransformerEncoderLayer(d_model=reduce_dim, nhead=n_heads) for _ in range(len(self.extract_layers))])
332
+ self.extra_blocks = nn.ModuleList([nn.TransformerEncoderLayer(d_model=reduce_dim, nhead=n_heads) for _ in range(extra_blocks)])
333
+
334
+ # refinement and trans conv
335
+
336
+ if learn_trans_conv_only:
337
+ for p in self.parameters():
338
+ p.requires_grad_(False)
339
+
340
+ for p in self.trans_conv.parameters():
341
+ p.requires_grad_(True)
342
+
343
+ self.prompt_list = get_prompt_list(prompt)
344
+
345
+
346
+ def forward(self, inp_image, conditional=None, return_features=False, mask=None):
347
+
348
+ assert type(return_features) == bool
349
+
350
+ inp_image = inp_image.to(self.model.positional_embedding.device)
351
+
352
+ if mask is not None:
353
+ raise ValueError('mask not supported')
354
+
355
+ # x_inp = normalize(inp_image)
356
+ x_inp = inp_image
357
+
358
+ bs, dev = inp_image.shape[0], x_inp.device
359
+
360
+ cond = self.get_cond_vec(conditional, bs)
361
+
362
+ visual_q, activations, _ = self.visual_forward(x_inp, extract_layers=[0] + list(self.extract_layers))
363
+
364
+ activation1 = activations[0]
365
+ activations = activations[1:]
366
+
367
+ _activations = activations[::-1] if not self.rev_activations else activations
368
+
369
+ a = None
370
+ for i, (activation, block, reduce) in enumerate(zip(_activations, self.blocks, self.reduces)):
371
+
372
+ if a is not None:
373
+ a = reduce(activation) + a
374
+ else:
375
+ a = reduce(activation)
376
+
377
+ if i == self.cond_layer:
378
+ if self.reduce_cond is not None:
379
+ cond = self.reduce_cond(cond)
380
+
381
+ a = self.film_mul(cond) * a + self.film_add(cond)
382
+
383
+ a = block(a)
384
+
385
+ for block in self.extra_blocks:
386
+ a = a + block(a)
387
+
388
+ a = a[1:].permute(1, 2, 0) # rm cls token and -> BS, Feats, Tokens
389
+
390
+ size = int(math.sqrt(a.shape[2]))
391
+
392
+ a = a.view(bs, a.shape[1], size, size)
393
+
394
+ a = self.trans_conv(a)
395
+
396
+ if self.n_tokens is not None:
397
+ a = nnf.interpolate(a, x_inp.shape[2:], mode='bilinear', align_corners=True)
398
+
399
+ if self.upsample_proj is not None:
400
+ a = self.upsample_proj(a)
401
+ a = nnf.interpolate(a, x_inp.shape[2:], mode='bilinear')
402
+
403
+ if return_features:
404
+ return a, visual_q, cond, [activation1] + activations
405
+ else:
406
+ return a,
407
+
408
+
409
+
410
+ class CLIPDensePredTMasked(CLIPDensePredT):
411
+
412
+ def __init__(self, version='ViT-B/32', extract_layers=(3, 6, 9), cond_layer=0, reduce_dim=128, n_heads=4,
413
+ prompt='fixed', extra_blocks=0, reduce_cond=None, fix_shift=False, learn_trans_conv_only=False,
414
+ refine=None, limit_to_clip_only=False, upsample=False, add_calibration=False, n_tokens=None):
415
+
416
+ super().__init__(version=version, extract_layers=extract_layers, cond_layer=cond_layer, reduce_dim=reduce_dim,
417
+ n_heads=n_heads, prompt=prompt, extra_blocks=extra_blocks, reduce_cond=reduce_cond,
418
+ fix_shift=fix_shift, learn_trans_conv_only=learn_trans_conv_only,
419
+ limit_to_clip_only=limit_to_clip_only, upsample=upsample, add_calibration=add_calibration,
420
+ n_tokens=n_tokens)
421
+
422
+ def visual_forward_masked(self, img_s, seg_s):
423
+ return super().visual_forward(img_s, mask=('all', 'cls_token', seg_s))
424
+
425
+ def forward(self, img_q, cond_or_img_s, seg_s=None, return_features=False):
426
+
427
+ if seg_s is None:
428
+ cond = cond_or_img_s
429
+ else:
430
+ img_s = cond_or_img_s
431
+
432
+ with torch.no_grad():
433
+ cond, _, _ = self.visual_forward_masked(img_s, seg_s)
434
+
435
+ return super().forward(img_q, cond, return_features=return_features)
436
+
437
+
438
+
439
+ class CLIPDenseBaseline(CLIPDenseBase):
440
+
441
+ def __init__(self, version='ViT-B/32', cond_layer=0,
442
+ extract_layer=9, reduce_dim=128, reduce2_dim=None, prompt='fixed',
443
+ reduce_cond=None, limit_to_clip_only=False, n_tokens=None):
444
+
445
+ super().__init__(version, reduce_cond, reduce_dim, prompt, n_tokens)
446
+ device = 'cpu'
447
+
448
+ # self.cond_layer = cond_layer
449
+ self.extract_layer = extract_layer
450
+ self.limit_to_clip_only = limit_to_clip_only
451
+ self.shift_vector = None
452
+
453
+ self.token_shape = {'ViT-B/32': (7, 7), 'ViT-B/16': (14, 14)}[version]
454
+
455
+ assert reduce2_dim is not None
456
+
457
+ self.reduce2 = nn.Sequential(
458
+ nn.Linear(reduce_dim, reduce2_dim),
459
+ nn.ReLU(),
460
+ nn.Linear(reduce2_dim, reduce_dim)
461
+ )
462
+
463
+ trans_conv_ks = {'ViT-B/32': (32, 32), 'ViT-B/16': (16, 16)}[version]
464
+ self.trans_conv = nn.ConvTranspose2d(reduce_dim, 1, trans_conv_ks, stride=trans_conv_ks)
465
+
466
+
467
+ def forward(self, inp_image, conditional=None, return_features=False):
468
+
469
+ inp_image = inp_image.to(self.model.positional_embedding.device)
470
+
471
+ # x_inp = normalize(inp_image)
472
+ x_inp = inp_image
473
+
474
+ bs, dev = inp_image.shape[0], x_inp.device
475
+
476
+ cond = self.get_cond_vec(conditional, bs)
477
+
478
+ visual_q, activations, affinities = self.visual_forward(x_inp, extract_layers=[self.extract_layer])
479
+
480
+ a = activations[0]
481
+ a = self.reduce(a)
482
+ a = self.film_mul(cond) * a + self.film_add(cond)
483
+
484
+ if self.reduce2 is not None:
485
+ a = self.reduce2(a)
486
+
487
+ # the original model would execute a transformer block here
488
+
489
+ a = a[1:].permute(1, 2, 0) # rm cls token and -> BS, Feats, Tokens
490
+
491
+ size = int(math.sqrt(a.shape[2]))
492
+
493
+ a = a.view(bs, a.shape[1], size, size)
494
+ a = self.trans_conv(a)
495
+
496
+ if return_features:
497
+ return a, visual_q, cond, activations
498
+ else:
499
+ return a,
500
+
501
+
502
+ class CLIPSegMultiLabel(nn.Module):
503
+
504
+ def __init__(self, model) -> None:
505
+ super().__init__()
506
+
507
+ from third_party.JoEm.data_loader import get_seen_idx, get_unseen_idx, VOC
508
+
509
+ self.pascal_classes = VOC
510
+
511
+ from clip.clipseg import CLIPDensePredT
512
+ from general_utils import load_model
513
+ # self.clipseg = load_model('rd64-vit16-neg0.2-phrasecut', strict=False)
514
+ self.clipseg = load_model(model, strict=False)
515
+
516
+ self.clipseg.eval()
517
+
518
+ def forward(self, x):
519
+
520
+ bs = x.shape[0]
521
+ out = torch.ones(21, bs, 352, 352).to(x.device) * -10
522
+
523
+ for class_id, class_name in enumerate(self.pascal_classes):
524
+
525
+ fac = 3 if class_name == 'background' else 1
526
+
527
+ with torch.no_grad():
528
+ pred = torch.sigmoid(self.clipseg(x, class_name)[0][:,0]) * fac
529
+
530
+ out[class_id] += pred
531
+
532
+
533
+ out = out.permute(1, 0, 2, 3)
534
+
535
+ return out
536
+
537
+ # construct output tensor
538
+
clip/model.py ADDED
@@ -0,0 +1,436 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import OrderedDict
2
+ from typing import Tuple, Union
3
+
4
+ import numpy as np
5
+ import torch
6
+ import torch.nn.functional as F
7
+ from torch import nn
8
+
9
+
10
+ class Bottleneck(nn.Module):
11
+ expansion = 4
12
+
13
+ def __init__(self, inplanes, planes, stride=1):
14
+ super().__init__()
15
+
16
+ # all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
17
+ self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
18
+ self.bn1 = nn.BatchNorm2d(planes)
19
+ self.relu1 = nn.ReLU(inplace=True)
20
+
21
+ self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
22
+ self.bn2 = nn.BatchNorm2d(planes)
23
+ self.relu2 = nn.ReLU(inplace=True)
24
+
25
+ self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
26
+
27
+ self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
28
+ self.bn3 = nn.BatchNorm2d(planes * self.expansion)
29
+ self.relu3 = nn.ReLU(inplace=True)
30
+
31
+ self.downsample = None
32
+ self.stride = stride
33
+
34
+ if stride > 1 or inplanes != planes * Bottleneck.expansion:
35
+ # downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
36
+ self.downsample = nn.Sequential(OrderedDict([
37
+ ("-1", nn.AvgPool2d(stride)),
38
+ ("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)),
39
+ ("1", nn.BatchNorm2d(planes * self.expansion))
40
+ ]))
41
+
42
+ def forward(self, x: torch.Tensor):
43
+ identity = x
44
+
45
+ out = self.relu1(self.bn1(self.conv1(x)))
46
+ out = self.relu2(self.bn2(self.conv2(out)))
47
+ out = self.avgpool(out)
48
+ out = self.bn3(self.conv3(out))
49
+
50
+ if self.downsample is not None:
51
+ identity = self.downsample(x)
52
+
53
+ out += identity
54
+ out = self.relu3(out)
55
+ return out
56
+
57
+
58
+ class AttentionPool2d(nn.Module):
59
+ def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None):
60
+ super().__init__()
61
+ self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5)
62
+ self.k_proj = nn.Linear(embed_dim, embed_dim)
63
+ self.q_proj = nn.Linear(embed_dim, embed_dim)
64
+ self.v_proj = nn.Linear(embed_dim, embed_dim)
65
+ self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
66
+ self.num_heads = num_heads
67
+
68
+ def forward(self, x):
69
+ x = x.flatten(start_dim=2).permute(2, 0, 1) # NCHW -> (HW)NC
70
+ x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
71
+ x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
72
+ x, _ = F.multi_head_attention_forward(
73
+ query=x[:1], key=x, value=x,
74
+ embed_dim_to_check=x.shape[-1],
75
+ num_heads=self.num_heads,
76
+ q_proj_weight=self.q_proj.weight,
77
+ k_proj_weight=self.k_proj.weight,
78
+ v_proj_weight=self.v_proj.weight,
79
+ in_proj_weight=None,
80
+ in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
81
+ bias_k=None,
82
+ bias_v=None,
83
+ add_zero_attn=False,
84
+ dropout_p=0,
85
+ out_proj_weight=self.c_proj.weight,
86
+ out_proj_bias=self.c_proj.bias,
87
+ use_separate_proj_weight=True,
88
+ training=self.training,
89
+ need_weights=False
90
+ )
91
+ return x.squeeze(0)
92
+
93
+
94
+ class ModifiedResNet(nn.Module):
95
+ """
96
+ A ResNet class that is similar to torchvision's but contains the following changes:
97
+ - There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
98
+ - Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
99
+ - The final pooling layer is a QKV attention instead of an average pool
100
+ """
101
+
102
+ def __init__(self, layers, output_dim, heads, input_resolution=224, width=64):
103
+ super().__init__()
104
+ self.output_dim = output_dim
105
+ self.input_resolution = input_resolution
106
+
107
+ # the 3-layer stem
108
+ self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False)
109
+ self.bn1 = nn.BatchNorm2d(width // 2)
110
+ self.relu1 = nn.ReLU(inplace=True)
111
+ self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False)
112
+ self.bn2 = nn.BatchNorm2d(width // 2)
113
+ self.relu2 = nn.ReLU(inplace=True)
114
+ self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
115
+ self.bn3 = nn.BatchNorm2d(width)
116
+ self.relu3 = nn.ReLU(inplace=True)
117
+ self.avgpool = nn.AvgPool2d(2)
118
+
119
+ # residual layers
120
+ self._inplanes = width # this is a *mutable* variable used during construction
121
+ self.layer1 = self._make_layer(width, layers[0])
122
+ self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
123
+ self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
124
+ self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
125
+
126
+ embed_dim = width * 32 # the ResNet feature dimension
127
+ self.attnpool = AttentionPool2d(input_resolution // 32, embed_dim, heads, output_dim)
128
+
129
+ def _make_layer(self, planes, blocks, stride=1):
130
+ layers = [Bottleneck(self._inplanes, planes, stride)]
131
+
132
+ self._inplanes = planes * Bottleneck.expansion
133
+ for _ in range(1, blocks):
134
+ layers.append(Bottleneck(self._inplanes, planes))
135
+
136
+ return nn.Sequential(*layers)
137
+
138
+ def forward(self, x):
139
+ def stem(x):
140
+ x = self.relu1(self.bn1(self.conv1(x)))
141
+ x = self.relu2(self.bn2(self.conv2(x)))
142
+ x = self.relu3(self.bn3(self.conv3(x)))
143
+ x = self.avgpool(x)
144
+ return x
145
+
146
+ x = x.type(self.conv1.weight.dtype)
147
+ x = stem(x)
148
+ x = self.layer1(x)
149
+ x = self.layer2(x)
150
+ x = self.layer3(x)
151
+ x = self.layer4(x)
152
+ x = self.attnpool(x)
153
+
154
+ return x
155
+
156
+
157
+ class LayerNorm(nn.LayerNorm):
158
+ """Subclass torch's LayerNorm to handle fp16."""
159
+
160
+ def forward(self, x: torch.Tensor):
161
+ orig_type = x.dtype
162
+ ret = super().forward(x.type(torch.float32))
163
+ return ret.type(orig_type)
164
+
165
+
166
+ class QuickGELU(nn.Module):
167
+ def forward(self, x: torch.Tensor):
168
+ return x * torch.sigmoid(1.702 * x)
169
+
170
+
171
+ class ResidualAttentionBlock(nn.Module):
172
+ def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None):
173
+ super().__init__()
174
+
175
+ self.attn = nn.MultiheadAttention(d_model, n_head)
176
+ self.ln_1 = LayerNorm(d_model)
177
+ self.mlp = nn.Sequential(OrderedDict([
178
+ ("c_fc", nn.Linear(d_model, d_model * 4)),
179
+ ("gelu", QuickGELU()),
180
+ ("c_proj", nn.Linear(d_model * 4, d_model))
181
+ ]))
182
+ self.ln_2 = LayerNorm(d_model)
183
+ self.attn_mask = attn_mask
184
+
185
+ def attention(self, x: torch.Tensor):
186
+ self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
187
+ return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
188
+
189
+ def forward(self, x: torch.Tensor):
190
+ x = x + self.attention(self.ln_1(x))
191
+ x = x + self.mlp(self.ln_2(x))
192
+ return x
193
+
194
+
195
+ class Transformer(nn.Module):
196
+ def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None):
197
+ super().__init__()
198
+ self.width = width
199
+ self.layers = layers
200
+ self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)])
201
+
202
+ def forward(self, x: torch.Tensor):
203
+ return self.resblocks(x)
204
+
205
+
206
+ class VisionTransformer(nn.Module):
207
+ def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int, output_dim: int):
208
+ super().__init__()
209
+ self.input_resolution = input_resolution
210
+ self.output_dim = output_dim
211
+ self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
212
+
213
+ scale = width ** -0.5
214
+ self.class_embedding = nn.Parameter(scale * torch.randn(width))
215
+ self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
216
+ self.ln_pre = LayerNorm(width)
217
+
218
+ self.transformer = Transformer(width, layers, heads)
219
+
220
+ self.ln_post = LayerNorm(width)
221
+ self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
222
+
223
+ def forward(self, x: torch.Tensor):
224
+ x = self.conv1(x) # shape = [*, width, grid, grid]
225
+ x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
226
+ x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
227
+ x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
228
+ x = x + self.positional_embedding.to(x.dtype)
229
+ x = self.ln_pre(x)
230
+
231
+ x = x.permute(1, 0, 2) # NLD -> LND
232
+ x = self.transformer(x)
233
+ x = x.permute(1, 0, 2) # LND -> NLD
234
+
235
+ x = self.ln_post(x[:, 0, :])
236
+
237
+ if self.proj is not None:
238
+ x = x @ self.proj
239
+
240
+ return x
241
+
242
+
243
+ class CLIP(nn.Module):
244
+ def __init__(self,
245
+ embed_dim: int,
246
+ # vision
247
+ image_resolution: int,
248
+ vision_layers: Union[Tuple[int, int, int, int], int],
249
+ vision_width: int,
250
+ vision_patch_size: int,
251
+ # text
252
+ context_length: int,
253
+ vocab_size: int,
254
+ transformer_width: int,
255
+ transformer_heads: int,
256
+ transformer_layers: int
257
+ ):
258
+ super().__init__()
259
+
260
+ self.context_length = context_length
261
+
262
+ if isinstance(vision_layers, (tuple, list)):
263
+ vision_heads = vision_width * 32 // 64
264
+ self.visual = ModifiedResNet(
265
+ layers=vision_layers,
266
+ output_dim=embed_dim,
267
+ heads=vision_heads,
268
+ input_resolution=image_resolution,
269
+ width=vision_width
270
+ )
271
+ else:
272
+ vision_heads = vision_width // 64
273
+ self.visual = VisionTransformer(
274
+ input_resolution=image_resolution,
275
+ patch_size=vision_patch_size,
276
+ width=vision_width,
277
+ layers=vision_layers,
278
+ heads=vision_heads,
279
+ output_dim=embed_dim
280
+ )
281
+
282
+ self.transformer = Transformer(
283
+ width=transformer_width,
284
+ layers=transformer_layers,
285
+ heads=transformer_heads,
286
+ attn_mask=self.build_attention_mask()
287
+ )
288
+
289
+ self.vocab_size = vocab_size
290
+ self.token_embedding = nn.Embedding(vocab_size, transformer_width)
291
+ self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width))
292
+ self.ln_final = LayerNorm(transformer_width)
293
+
294
+ self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim))
295
+ self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
296
+
297
+ self.initialize_parameters()
298
+
299
+ def initialize_parameters(self):
300
+ nn.init.normal_(self.token_embedding.weight, std=0.02)
301
+ nn.init.normal_(self.positional_embedding, std=0.01)
302
+
303
+ if isinstance(self.visual, ModifiedResNet):
304
+ if self.visual.attnpool is not None:
305
+ std = self.visual.attnpool.c_proj.in_features ** -0.5
306
+ nn.init.normal_(self.visual.attnpool.q_proj.weight, std=std)
307
+ nn.init.normal_(self.visual.attnpool.k_proj.weight, std=std)
308
+ nn.init.normal_(self.visual.attnpool.v_proj.weight, std=std)
309
+ nn.init.normal_(self.visual.attnpool.c_proj.weight, std=std)
310
+
311
+ for resnet_block in [self.visual.layer1, self.visual.layer2, self.visual.layer3, self.visual.layer4]:
312
+ for name, param in resnet_block.named_parameters():
313
+ if name.endswith("bn3.weight"):
314
+ nn.init.zeros_(param)
315
+
316
+ proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
317
+ attn_std = self.transformer.width ** -0.5
318
+ fc_std = (2 * self.transformer.width) ** -0.5
319
+ for block in self.transformer.resblocks:
320
+ nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
321
+ nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
322
+ nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
323
+ nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
324
+
325
+ if self.text_projection is not None:
326
+ nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)
327
+
328
+ def build_attention_mask(self):
329
+ # lazily create causal attention mask, with full attention between the vision tokens
330
+ # pytorch uses additive attention mask; fill with -inf
331
+ mask = torch.empty(self.context_length, self.context_length)
332
+ mask.fill_(float("-inf"))
333
+ mask.triu_(1) # zero out the lower diagonal
334
+ return mask
335
+
336
+ @property
337
+ def dtype(self):
338
+ return self.visual.conv1.weight.dtype
339
+
340
+ def encode_image(self, image):
341
+ return self.visual(image.type(self.dtype))
342
+
343
+ def encode_text(self, text):
344
+ x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model]
345
+
346
+ x = x + self.positional_embedding.type(self.dtype)
347
+ x = x.permute(1, 0, 2) # NLD -> LND
348
+ x = self.transformer(x)
349
+ x = x.permute(1, 0, 2) # LND -> NLD
350
+ x = self.ln_final(x).type(self.dtype)
351
+
352
+ # x.shape = [batch_size, n_ctx, transformer.width]
353
+ # take features from the eot embedding (eot_token is the highest number in each sequence)
354
+ x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection
355
+
356
+ return x
357
+
358
+ def forward(self, image, text):
359
+ image_features = self.encode_image(image)
360
+ text_features = self.encode_text(text)
361
+
362
+ # normalized features
363
+ image_features = image_features / image_features.norm(dim=1, keepdim=True)
364
+ text_features = text_features / text_features.norm(dim=1, keepdim=True)
365
+
366
+ # cosine similarity as logits
367
+ logit_scale = self.logit_scale.exp()
368
+ logits_per_image = logit_scale * image_features @ text_features.t()
369
+ logits_per_text = logits_per_image.t()
370
+
371
+ # shape = [global_batch_size, global_batch_size]
372
+ return logits_per_image, logits_per_text
373
+
374
+
375
+ def convert_weights(model: nn.Module):
376
+ """Convert applicable model parameters to fp16"""
377
+
378
+ def _convert_weights_to_fp16(l):
379
+ if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
380
+ l.weight.data = l.weight.data.half()
381
+ if l.bias is not None:
382
+ l.bias.data = l.bias.data.half()
383
+
384
+ if isinstance(l, nn.MultiheadAttention):
385
+ for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]:
386
+ tensor = getattr(l, attr)
387
+ if tensor is not None:
388
+ tensor.data = tensor.data.half()
389
+
390
+ for name in ["text_projection", "proj"]:
391
+ if hasattr(l, name):
392
+ attr = getattr(l, name)
393
+ if attr is not None:
394
+ attr.data = attr.data.half()
395
+
396
+ model.apply(_convert_weights_to_fp16)
397
+
398
+
399
+ def build_model(state_dict: dict):
400
+ vit = "visual.proj" in state_dict
401
+
402
+ if vit:
403
+ vision_width = state_dict["visual.conv1.weight"].shape[0]
404
+ vision_layers = len([k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
405
+ vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
406
+ grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
407
+ image_resolution = vision_patch_size * grid_size
408
+ else:
409
+ counts: list = [len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]]
410
+ vision_layers = tuple(counts)
411
+ vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
412
+ output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5)
413
+ vision_patch_size = None
414
+ assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0]
415
+ image_resolution = output_width * 32
416
+
417
+ embed_dim = state_dict["text_projection"].shape[1]
418
+ context_length = state_dict["positional_embedding"].shape[0]
419
+ vocab_size = state_dict["token_embedding.weight"].shape[0]
420
+ transformer_width = state_dict["ln_final.weight"].shape[0]
421
+ transformer_heads = transformer_width // 64
422
+ transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith("transformer.resblocks")))
423
+
424
+ model = CLIP(
425
+ embed_dim,
426
+ image_resolution, vision_layers, vision_width, vision_patch_size,
427
+ context_length, vocab_size, transformer_width, transformer_heads, transformer_layers
428
+ )
429
+
430
+ for key in ["input_resolution", "context_length", "vocab_size"]:
431
+ if key in state_dict:
432
+ del state_dict[key]
433
+
434
+ convert_weights(model)
435
+ model.load_state_dict(state_dict)
436
+ return model.eval()
clip/simple_tokenizer.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gzip
2
+ import html
3
+ import os
4
+ from functools import lru_cache
5
+
6
+ import ftfy
7
+ import regex as re
8
+
9
+
10
+ @lru_cache()
11
+ def default_bpe():
12
+ return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz")
13
+
14
+
15
+ @lru_cache()
16
+ def bytes_to_unicode():
17
+ """
18
+ Returns list of utf-8 byte and a corresponding list of unicode strings.
19
+ The reversible bpe codes work on unicode strings.
20
+ This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
21
+ When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
22
+ This is a signficant percentage of your normal, say, 32K bpe vocab.
23
+ To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
24
+ And avoids mapping to whitespace/control characters the bpe code barfs on.
25
+ """
26
+ bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
27
+ cs = bs[:]
28
+ n = 0
29
+ for b in range(2**8):
30
+ if b not in bs:
31
+ bs.append(b)
32
+ cs.append(2**8+n)
33
+ n += 1
34
+ cs = [chr(n) for n in cs]
35
+ return dict(zip(bs, cs))
36
+
37
+
38
+ def get_pairs(word):
39
+ """Return set of symbol pairs in a word.
40
+ Word is represented as tuple of symbols (symbols being variable-length strings).
41
+ """
42
+ pairs = set()
43
+ prev_char = word[0]
44
+ for char in word[1:]:
45
+ pairs.add((prev_char, char))
46
+ prev_char = char
47
+ return pairs
48
+
49
+
50
+ def basic_clean(text):
51
+ text = ftfy.fix_text(text)
52
+ text = html.unescape(html.unescape(text))
53
+ return text.strip()
54
+
55
+
56
+ def whitespace_clean(text):
57
+ text = re.sub(r'\s+', ' ', text)
58
+ text = text.strip()
59
+ return text
60
+
61
+
62
+ class SimpleTokenizer(object):
63
+ def __init__(self, bpe_path: str = default_bpe()):
64
+ self.byte_encoder = bytes_to_unicode()
65
+ self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
66
+ merges = gzip.open(bpe_path).read().decode("utf-8").split('\n')
67
+ merges = merges[1:49152-256-2+1]
68
+ merges = [tuple(merge.split()) for merge in merges]
69
+ vocab = list(bytes_to_unicode().values())
70
+ vocab = vocab + [v+'</w>' for v in vocab]
71
+ for merge in merges:
72
+ vocab.append(''.join(merge))
73
+ vocab.extend(['<|startoftext|>', '<|endoftext|>'])
74
+ self.encoder = dict(zip(vocab, range(len(vocab))))
75
+ self.decoder = {v: k for k, v in self.encoder.items()}
76
+ self.bpe_ranks = dict(zip(merges, range(len(merges))))
77
+ self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
78
+ self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE)
79
+
80
+ def bpe(self, token):
81
+ if token in self.cache:
82
+ return self.cache[token]
83
+ word = tuple(token[:-1]) + ( token[-1] + '</w>',)
84
+ pairs = get_pairs(word)
85
+
86
+ if not pairs:
87
+ return token+'</w>'
88
+
89
+ while True:
90
+ bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
91
+ if bigram not in self.bpe_ranks:
92
+ break
93
+ first, second = bigram
94
+ new_word = []
95
+ i = 0
96
+ while i < len(word):
97
+ try:
98
+ j = word.index(first, i)
99
+ new_word.extend(word[i:j])
100
+ i = j
101
+ except:
102
+ new_word.extend(word[i:])
103
+ break
104
+
105
+ if word[i] == first and i < len(word)-1 and word[i+1] == second:
106
+ new_word.append(first+second)
107
+ i += 2
108
+ else:
109
+ new_word.append(word[i])
110
+ i += 1
111
+ new_word = tuple(new_word)
112
+ word = new_word
113
+ if len(word) == 1:
114
+ break
115
+ else:
116
+ pairs = get_pairs(word)
117
+ word = ' '.join(word)
118
+ self.cache[token] = word
119
+ return word
120
+
121
+ def encode(self, text):
122
+ bpe_tokens = []
123
+ text = whitespace_clean(basic_clean(text)).lower()
124
+ for token in re.findall(self.pat, text):
125
+ token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
126
+ bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
127
+ return bpe_tokens
128
+
129
+ def decode(self, tokens):
130
+ text = ''.join([self.decoder[token] for token in tokens])
131
+ text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
132
+ return text
clip/vitseg.py ADDED
@@ -0,0 +1,286 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from posixpath import basename, dirname, join
3
+ # import clip
4
+ from clip.model import convert_weights
5
+ import torch
6
+ import json
7
+ from torch import nn
8
+ from torch.nn import functional as nnf
9
+ from torch.nn.modules import activation
10
+ from torch.nn.modules.activation import ReLU
11
+ from torchvision import transforms
12
+
13
+ normalize = transforms.Normalize(mean=(0.48145466, 0.4578275, 0.40821073), std=(0.26862954, 0.26130258, 0.27577711))
14
+
15
+ from torchvision.models import ResNet
16
+
17
+
18
+ def process_prompts(conditional, prompt_list, conditional_map):
19
+ # DEPRECATED
20
+
21
+ # randomly sample a synonym
22
+ words = [conditional_map[int(i)] for i in conditional]
23
+ words = [syns[torch.multinomial(torch.ones(len(syns)), 1, replacement=True).item()] for syns in words]
24
+ words = [w.replace('_', ' ') for w in words]
25
+
26
+ if prompt_list is not None:
27
+ prompt_indices = torch.multinomial(torch.ones(len(prompt_list)), len(words), replacement=True)
28
+ prompts = [prompt_list[i] for i in prompt_indices]
29
+ else:
30
+ prompts = ['a photo of {}'] * (len(words))
31
+
32
+ return [promt.format(w) for promt, w in zip(prompts, words)]
33
+
34
+
35
+ class VITDenseBase(nn.Module):
36
+
37
+ def rescaled_pos_emb(self, new_size):
38
+ assert len(new_size) == 2
39
+
40
+ a = self.model.positional_embedding[1:].T.view(1, 768, *self.token_shape)
41
+ b = nnf.interpolate(a, new_size, mode='bicubic', align_corners=False).squeeze(0).view(768, new_size[0]*new_size[1]).T
42
+ return torch.cat([self.model.positional_embedding[:1], b])
43
+
44
+ def visual_forward(self, x_inp, extract_layers=(), skip=False, mask=None):
45
+
46
+ with torch.no_grad():
47
+
48
+ x_inp = nnf.interpolate(x_inp, (384, 384))
49
+
50
+ x = self.model.patch_embed(x_inp)
51
+ cls_token = self.model.cls_token.expand(x.shape[0], -1, -1) # stole cls_tokens impl from Phil Wang, thanks
52
+ if self.model.dist_token is None:
53
+ x = torch.cat((cls_token, x), dim=1)
54
+ else:
55
+ x = torch.cat((cls_token, self.model.dist_token.expand(x.shape[0], -1, -1), x), dim=1)
56
+ x = self.model.pos_drop(x + self.model.pos_embed)
57
+
58
+ activations = []
59
+ for i, block in enumerate(self.model.blocks):
60
+ x = block(x)
61
+
62
+ if i in extract_layers:
63
+ # permute to be compatible with CLIP
64
+ activations += [x.permute(1,0,2)]
65
+
66
+ x = self.model.norm(x)
67
+ x = self.model.head(self.model.pre_logits(x[:, 0]))
68
+
69
+ # again for CLIP compatibility
70
+ # x = x.permute(1, 0, 2)
71
+
72
+ return x, activations, None
73
+
74
+ def sample_prompts(self, words, prompt_list=None):
75
+
76
+ prompt_list = prompt_list if prompt_list is not None else self.prompt_list
77
+
78
+ prompt_indices = torch.multinomial(torch.ones(len(prompt_list)), len(words), replacement=True)
79
+ prompts = [prompt_list[i] for i in prompt_indices]
80
+ return [promt.format(w) for promt, w in zip(prompts, words)]
81
+
82
+ def get_cond_vec(self, conditional, batch_size):
83
+ # compute conditional from a single string
84
+ if conditional is not None and type(conditional) == str:
85
+ cond = self.compute_conditional(conditional)
86
+ cond = cond.repeat(batch_size, 1)
87
+
88
+ # compute conditional from string list/tuple
89
+ elif conditional is not None and type(conditional) in {list, tuple} and type(conditional[0]) == str:
90
+ assert len(conditional) == batch_size
91
+ cond = self.compute_conditional(conditional)
92
+
93
+ # use conditional directly
94
+ elif conditional is not None and type(conditional) == torch.Tensor and conditional.ndim == 2:
95
+ cond = conditional
96
+
97
+ # compute conditional from image
98
+ elif conditional is not None and type(conditional) == torch.Tensor:
99
+ with torch.no_grad():
100
+ cond, _, _ = self.visual_forward(conditional)
101
+ else:
102
+ raise ValueError('invalid conditional')
103
+ return cond
104
+
105
+ def compute_conditional(self, conditional):
106
+ import clip
107
+
108
+ dev = next(self.parameters()).device
109
+
110
+ if type(conditional) in {list, tuple}:
111
+ text_tokens = clip.tokenize(conditional).to(dev)
112
+ cond = self.clip_model.encode_text(text_tokens)
113
+ else:
114
+ if conditional in self.precomputed_prompts:
115
+ cond = self.precomputed_prompts[conditional].float().to(dev)
116
+ else:
117
+ text_tokens = clip.tokenize([conditional]).to(dev)
118
+ cond = self.clip_model.encode_text(text_tokens)[0]
119
+
120
+ return cond
121
+
122
+
123
+ class VITDensePredT(VITDenseBase):
124
+
125
+ def __init__(self, extract_layers=(3, 6, 9), cond_layer=0, reduce_dim=128, n_heads=4, prompt='fixed',
126
+ depth=3, extra_blocks=0, reduce_cond=None, fix_shift=False,
127
+ learn_trans_conv_only=False, refine=None, limit_to_clip_only=False, upsample=False,
128
+ add_calibration=False, process_cond=None, not_pretrained=False):
129
+ super().__init__()
130
+ # device = 'cpu'
131
+
132
+ self.extract_layers = extract_layers
133
+ self.cond_layer = cond_layer
134
+ self.limit_to_clip_only = limit_to_clip_only
135
+ self.process_cond = None
136
+
137
+ if add_calibration:
138
+ self.calibration_conds = 1
139
+
140
+ self.upsample_proj = nn.Conv2d(reduce_dim, 1, kernel_size=1) if upsample else None
141
+
142
+ self.add_activation1 = True
143
+
144
+ import timm
145
+ self.model = timm.create_model('vit_base_patch16_384', pretrained=True)
146
+ self.model.head = nn.Linear(768, 512 if reduce_cond is None else reduce_cond)
147
+
148
+ for p in self.model.parameters():
149
+ p.requires_grad_(False)
150
+
151
+ import clip
152
+ self.clip_model, _ = clip.load('ViT-B/16', device='cpu', jit=False)
153
+ # del self.clip_model.visual
154
+
155
+
156
+ self.token_shape = (14, 14)
157
+
158
+ # conditional
159
+ if reduce_cond is not None:
160
+ self.reduce_cond = nn.Linear(512, reduce_cond)
161
+ for p in self.reduce_cond.parameters():
162
+ p.requires_grad_(False)
163
+ else:
164
+ self.reduce_cond = None
165
+
166
+ # self.film = AVAILABLE_BLOCKS['film'](512, 128)
167
+ self.film_mul = nn.Linear(512 if reduce_cond is None else reduce_cond, reduce_dim)
168
+ self.film_add = nn.Linear(512 if reduce_cond is None else reduce_cond, reduce_dim)
169
+
170
+ # DEPRECATED
171
+ # self.conditional_map = {c['id']: c['synonyms'] for c in json.load(open(cond_map))}
172
+
173
+ assert len(self.extract_layers) == depth
174
+
175
+ self.reduces = nn.ModuleList([nn.Linear(768, reduce_dim) for _ in range(depth)])
176
+ self.blocks = nn.ModuleList([nn.TransformerEncoderLayer(d_model=reduce_dim, nhead=n_heads) for _ in range(len(self.extract_layers))])
177
+ self.extra_blocks = nn.ModuleList([nn.TransformerEncoderLayer(d_model=reduce_dim, nhead=n_heads) for _ in range(extra_blocks)])
178
+
179
+ trans_conv_ks = (16, 16)
180
+ self.trans_conv = nn.ConvTranspose2d(reduce_dim, 1, trans_conv_ks, stride=trans_conv_ks)
181
+
182
+ # refinement and trans conv
183
+
184
+ if learn_trans_conv_only:
185
+ for p in self.parameters():
186
+ p.requires_grad_(False)
187
+
188
+ for p in self.trans_conv.parameters():
189
+ p.requires_grad_(True)
190
+
191
+ if prompt == 'fixed':
192
+ self.prompt_list = ['a photo of a {}.']
193
+ elif prompt == 'shuffle':
194
+ self.prompt_list = ['a photo of a {}.', 'a photograph of a {}.', 'an image of a {}.', '{}.']
195
+ elif prompt == 'shuffle+':
196
+ self.prompt_list = ['a photo of a {}.', 'a photograph of a {}.', 'an image of a {}.', '{}.',
197
+ 'a cropped photo of a {}.', 'a good photo of a {}.', 'a photo of one {}.',
198
+ 'a bad photo of a {}.', 'a photo of the {}.']
199
+ elif prompt == 'shuffle_clip':
200
+ from models.clip_prompts import imagenet_templates
201
+ self.prompt_list = imagenet_templates
202
+
203
+ if process_cond is not None:
204
+ if process_cond == 'clamp' or process_cond[0] == 'clamp':
205
+
206
+ val = process_cond[1] if type(process_cond) in {list, tuple} else 0.2
207
+
208
+ def clamp_vec(x):
209
+ return torch.clamp(x, -val, val)
210
+
211
+ self.process_cond = clamp_vec
212
+
213
+ elif process_cond.endswith('.pth'):
214
+
215
+ shift = torch.load(process_cond)
216
+ def add_shift(x):
217
+ return x + shift.to(x.device)
218
+
219
+ self.process_cond = add_shift
220
+
221
+ import pickle
222
+ precomp = pickle.load(open('precomputed_prompt_vectors.pickle', 'rb'))
223
+ self.precomputed_prompts = {k: torch.from_numpy(v) for k, v in precomp.items()}
224
+
225
+
226
+ def forward(self, inp_image, conditional=None, return_features=False, mask=None):
227
+
228
+ assert type(return_features) == bool
229
+
230
+ # inp_image = inp_image.to(self.model.positional_embedding.device)
231
+
232
+ if mask is not None:
233
+ raise ValueError('mask not supported')
234
+
235
+ # x_inp = normalize(inp_image)
236
+ x_inp = inp_image
237
+
238
+ bs, dev = inp_image.shape[0], x_inp.device
239
+
240
+ inp_image_size = inp_image.shape[2:]
241
+
242
+ cond = self.get_cond_vec(conditional, bs)
243
+
244
+ visual_q, activations, _ = self.visual_forward(x_inp, extract_layers=[0] + list(self.extract_layers))
245
+
246
+ activation1 = activations[0]
247
+ activations = activations[1:]
248
+
249
+ a = None
250
+ for i, (activation, block, reduce) in enumerate(zip(activations[::-1], self.blocks, self.reduces)):
251
+
252
+ if a is not None:
253
+ a = reduce(activation) + a
254
+ else:
255
+ a = reduce(activation)
256
+
257
+ if i == self.cond_layer:
258
+ if self.reduce_cond is not None:
259
+ cond = self.reduce_cond(cond)
260
+
261
+ a = self.film_mul(cond) * a + self.film_add(cond)
262
+
263
+ a = block(a)
264
+
265
+ for block in self.extra_blocks:
266
+ a = a + block(a)
267
+
268
+ a = a[1:].permute(1, 2, 0) # rm cls token and -> BS, Feats, Tokens
269
+
270
+ size = int(math.sqrt(a.shape[2]))
271
+
272
+ a = a.view(bs, a.shape[1], size, size)
273
+
274
+ if self.trans_conv is not None:
275
+ a = self.trans_conv(a)
276
+
277
+ if self.upsample_proj is not None:
278
+ a = self.upsample_proj(a)
279
+ a = nnf.interpolate(a, x_inp.shape[2:], mode='bilinear')
280
+
281
+ a = nnf.interpolate(a, inp_image_size)
282
+
283
+ if return_features:
284
+ return a, visual_q, cond, [activation1] + activations
285
+ else:
286
+ return a,
docs/faceselection.png ADDED
docs/finaloutput.png ADDED

Git LFS Details

  • SHA256: 1b6883bacac4a858a44fa4108e3f11e0ebbbc34bb0393aac87a1916da76aba44
  • Pointer size: 132 Bytes
  • Size of remote file: 1.42 MB
docs/kickboxing.jpg ADDED
docs/musk.jpg ADDED
docs/screenshot.png ADDED
gfpgan/weights/detection_Resnet50_Final.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d1de9c2944f2ccddca5f5e010ea5ae64a39845a86311af6fdf30841b0a5a16d
3
+ size 109497761
gfpgan/weights/parsing_parsenet.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d558d8d0e42c20224f13cf5a29c79eba2d59913419f945545d8cf7b72920de2
3
+ size 85331193