Brasd99 commited on
Commit
f04a56f
1 Parent(s): 4635a51

Switching from double quotes to single quotes

Browse files
Files changed (2) hide show
  1. app.py +8 -8
  2. helpers/processor.py +12 -12
app.py CHANGED
@@ -12,11 +12,11 @@ def load_model(current_path):
12
  data_path = os.path.join(current_path, 'data')
13
  if not os.path.isdir(data_path):
14
  os.mkdir(data_path)
15
- url = "https://raw.githubusercontent.com/facebookresearch/detectron2/main/projects/DensePose/configs/densepose_rcnn_R_50_FPN_WC1M_s1x.yaml"
16
  wget.download(url, os.path.join(data_path, 'config.yaml'))
17
- url = "https://dl.fbaipublicfiles.com/densepose/densepose_rcnn_R_50_FPN_WC1M_s1x/217144516/model_final_48a9d9.pkl"
18
  wget.download(url, os.path.join(data_path, 'weights.pkl'))
19
- url = "https://raw.githubusercontent.com/facebookresearch/detectron2/main/projects/DensePose/configs/Base-DensePose-RCNN-FPN.yaml"
20
  wget.download(url, os.path.join(data_path, 'Base-DensePose-RCNN-FPN.yaml'))
21
 
22
  current_path = os.getcwd()
@@ -27,14 +27,14 @@ densepose_weights = os.path.join(current_path, 'data', 'weights.pkl')
27
  texture_processor = TextureProcessor(densepose_config, densepose_weights)
28
 
29
  inputs = [
30
- gr.inputs.Image(label="Person Image", type='numpy'),
31
- gr.inputs.Image(label="Model Image (with clothes)", type='numpy')
32
  ]
33
 
34
- outputs = gr.outputs.Image(label="Result Image", type='numpy')
35
 
36
- title = "JustClothify"
37
- description = "Upload an image of a person and an image of a model with clothes, the system will generate an image of a person wearing these clothes."
38
 
39
  gr.Interface(
40
  fn=image_processing,
 
12
  data_path = os.path.join(current_path, 'data')
13
  if not os.path.isdir(data_path):
14
  os.mkdir(data_path)
15
+ url = 'https://raw.githubusercontent.com/facebookresearch/detectron2/main/projects/DensePose/configs/densepose_rcnn_R_50_FPN_WC1M_s1x.yaml'
16
  wget.download(url, os.path.join(data_path, 'config.yaml'))
17
+ url = 'https://dl.fbaipublicfiles.com/densepose/densepose_rcnn_R_50_FPN_WC1M_s1x/217144516/model_final_48a9d9.pkl'
18
  wget.download(url, os.path.join(data_path, 'weights.pkl'))
19
+ url = 'https://raw.githubusercontent.com/facebookresearch/detectron2/main/projects/DensePose/configs/Base-DensePose-RCNN-FPN.yaml'
20
  wget.download(url, os.path.join(data_path, 'Base-DensePose-RCNN-FPN.yaml'))
21
 
22
  current_path = os.getcwd()
 
27
  texture_processor = TextureProcessor(densepose_config, densepose_weights)
28
 
29
  inputs = [
30
+ gr.inputs.Image(label='Person Image', type='numpy'),
31
+ gr.inputs.Image(label='Model Image (with clothes)', type='numpy')
32
  ]
33
 
34
+ outputs = gr.outputs.Image(label='Result Image', type='numpy')
35
 
36
+ title = 'JustClothify'
37
+ description = 'Upload an image of a person and an image of a model with clothes, the system will generate an image of a person wearing these clothes.'
38
 
39
  gr.Interface(
40
  fn=image_processing,
helpers/processor.py CHANGED
@@ -65,7 +65,7 @@ class TextureProcessor:
65
  extractor = CompoundExtractor([extractor])
66
 
67
  with torch.no_grad():
68
- outputs = self.predictor(original_image)["instances"]
69
 
70
  image = cv2.cvtColor(original_image, cv2.COLOR_BGR2GRAY)
71
  image = np.tile(image[:, :, np.newaxis], [1, 1, 3])
@@ -82,7 +82,7 @@ class TextureProcessor:
82
  return iuv
83
 
84
  def parse_bbox(self, result):
85
- return result["pred_boxes_XYXY"][0].cpu().numpy()
86
 
87
  def interpolate_tex(self, tex):
88
  valid_mask = np.array((tex.sum(0) != 0) * 1, dtype='uint8')
@@ -166,27 +166,27 @@ class TextureProcessor:
166
  add_densepose_config(cfg)
167
  cfg.merge_from_file(config_fpath)
168
  cfg.MODEL.WEIGHTS = model_fpath
169
- cfg.MODEL.DEVICE = "cpu"
170
  cfg.freeze()
171
  return cfg
172
 
173
  def execute(self, image):
174
  context = {'results': []}
175
  with torch.no_grad():
176
- outputs = self.predictor(image)["instances"]
177
  self.execute_on_outputs(context, outputs)
178
- return context["results"]
179
 
180
  def execute_on_outputs(self, context: Dict[str, Any], outputs: Instances):
181
  result = {}
182
- if outputs.has("scores"):
183
- result["scores"] = outputs.get("scores").cpu()
184
- if outputs.has("pred_boxes"):
185
- result["pred_boxes_XYXY"] = outputs.get("pred_boxes").tensor.cpu()
186
- if outputs.has("pred_densepose"):
187
  if isinstance(outputs.pred_densepose, DensePoseChartPredictorOutput):
188
  extractor = DensePoseResultExtractor()
189
  elif isinstance(outputs.pred_densepose, DensePoseEmbeddingPredictorOutput):
190
  extractor = DensePoseOutputsExtractor()
191
- result["pred_densepose"] = extractor(outputs)[0]
192
- context["results"].append(result)
 
65
  extractor = CompoundExtractor([extractor])
66
 
67
  with torch.no_grad():
68
+ outputs = self.predictor(original_image)['instances']
69
 
70
  image = cv2.cvtColor(original_image, cv2.COLOR_BGR2GRAY)
71
  image = np.tile(image[:, :, np.newaxis], [1, 1, 3])
 
82
  return iuv
83
 
84
  def parse_bbox(self, result):
85
+ return result['pred_boxes_XYXY'][0].cpu().numpy()
86
 
87
  def interpolate_tex(self, tex):
88
  valid_mask = np.array((tex.sum(0) != 0) * 1, dtype='uint8')
 
166
  add_densepose_config(cfg)
167
  cfg.merge_from_file(config_fpath)
168
  cfg.MODEL.WEIGHTS = model_fpath
169
+ cfg.MODEL.DEVICE = 'cpu'
170
  cfg.freeze()
171
  return cfg
172
 
173
  def execute(self, image):
174
  context = {'results': []}
175
  with torch.no_grad():
176
+ outputs = self.predictor(image)['instances']
177
  self.execute_on_outputs(context, outputs)
178
+ return context['results']
179
 
180
  def execute_on_outputs(self, context: Dict[str, Any], outputs: Instances):
181
  result = {}
182
+ if outputs.has('scores'):
183
+ result['scores'] = outputs.get('scores').cpu()
184
+ if outputs.has('pred_boxes'):
185
+ result['pred_boxes_XYXY'] = outputs.get('pred_boxes').tensor.cpu()
186
+ if outputs.has('pred_densepose'):
187
  if isinstance(outputs.pred_densepose, DensePoseChartPredictorOutput):
188
  extractor = DensePoseResultExtractor()
189
  elif isinstance(outputs.pred_densepose, DensePoseEmbeddingPredictorOutput):
190
  extractor = DensePoseOutputsExtractor()
191
+ result['pred_densepose'] = extractor(outputs)[0]
192
+ context['results'].append(result)