Spaces:
Build error
Build error
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,337 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from PIL import Image
|
3 |
+
import torch.nn as nn
|
4 |
+
import numpy as np
|
5 |
+
import torch
|
6 |
+
import cv2
|
7 |
+
import pandas as pd
|
8 |
+
import os
|
9 |
+
|
10 |
+
from transformers import SegformerImageProcessor, AutoModelForSemanticSegmentation
|
11 |
+
from transformers import AutoImageProcessor, DetrForObjectDetection
|
12 |
+
# segmentation
|
13 |
+
processor_seg = SegformerImageProcessor.from_pretrained("mattmdjaga/segformer_b2_clothes")
|
14 |
+
model_seg = AutoModelForSemanticSegmentation.from_pretrained("mattmdjaga/segformer_b2_clothes")
|
15 |
+
#object detection
|
16 |
+
processor_obj = AutoImageProcessor.from_pretrained("facebook/detr-resnet-50")
|
17 |
+
model_obj = DetrForObjectDetection.from_pretrained("facebook/detr-resnet-50")
|
18 |
+
|
19 |
+
|
20 |
+
def center_image(image_path,width=700):
|
21 |
+
st.markdown(
|
22 |
+
f'<style>img {{ display: block; margin-left: auto; margin-right: auto; }} </style>',
|
23 |
+
unsafe_allow_html=True
|
24 |
+
)
|
25 |
+
st.image(image_path,width = width)
|
26 |
+
|
27 |
+
|
28 |
+
### INTRO ###
|
29 |
+
st.header('๐ ์ค๋ ๋ญ์
์ง?! ๐')
|
30 |
+
st.markdown('๐ฌ : ๐จ **์ค๋ง ๋ ์ง๊ธ.. ๊ทธ๋ ๊ฒ ์
๊ณ ๋๊ฐ๊ฒ?** ๐จ')
|
31 |
+
st.markdown(' **ํจ์
์ผ์ค๊ฐ 2% ๋ถ์กฑํ ๋น์ ์ ์ํด ์ค๋นํ์ต๋๋ค!** ์ฌ์ง ์ด๋ฏธ์ง๋ง ์
๋ ฅํ๋ฉด, ์์ฆ ํธ๋ ๋ํ ์คํ์ผ๊ณผ ์ฌ๋ฌ๋ถ์ TPO๋ฅผ ๊ณ ๋ คํ์ฌ ์ฝ๋๋ฅผ ์ถ์ฒํด๋๋ฆฝ๋๋ค. ๋ฌด์ ์ฌ์ ์จ๋๋ฃฉ์ ํจ์
๋์คํ๋ค์ ์ฝ๋๋ฅผ ์ง๊ธ ๋ฐ๋ก ์ฐธ๊ณ ํด๋ณด์ธ์! ')
|
32 |
+
center_image('./intro_img/fashionista.jpg')
|
33 |
+
|
34 |
+
st.markdown('--------------------------------------------------------------------------------------')
|
35 |
+
st.subheader('PROCESS')
|
36 |
+
center_image('./intro_img/process.png')
|
37 |
+
st.markdown('--------------------------------------------------------------------------------------')
|
38 |
+
|
39 |
+
|
40 |
+
## INPUT ###
|
41 |
+
st.subheader(' โ
์๋ฅ ์ด๋ฏธ์ง ์
๋ก๋ ')
|
42 |
+
input_image = st.file_uploader(" **์๋ฅ ์ด๋ฏธ์ง๋ฅผ ์
๋ก๋ํ์ธ์. (๋ฐฐ๊ฒฝ์ด ๊น๋ํ ์ฌ์ง์ด๋ผ๋ฉด ๋ ์ข์ต๋๋ค!)** ", type=['png', 'jpg', 'jpeg'])
|
43 |
+
if not input_image :
|
44 |
+
con = st.container()
|
45 |
+
st.stop()
|
46 |
+
center_image(input_image,400)
|
47 |
+
st.markdown('--------------------------------------------------------------------------------------')
|
48 |
+
|
49 |
+
st.subheader(' โ
์
๋ก๋ํ ์๋ฅ ์ด๋ฏธ์ง ์นดํ
๊ณ ๋ฆฌ ์ ํ ')
|
50 |
+
input_cat = st.radio(
|
51 |
+
"**๊ทํ๊ฐ ์
๋ก๋ํ ์๋ฅ ์ด๋ฏธ์ง์ ์นดํ
๊ณ ๋ฆฌ๋ฅผ ๊ณจ๋ผ์ฃผ์ธ์.**",
|
52 |
+
['top๐', 'bottom๐', 'shoes๐', 'hat๐งข', 'sunglasses๐ถ๏ธ', 'scarf๐งฃ', 'bag๐'],
|
53 |
+
index=None,
|
54 |
+
horizontal = True)
|
55 |
+
|
56 |
+
|
57 |
+
if not input_cat :
|
58 |
+
con = st.container()
|
59 |
+
st.stop()
|
60 |
+
input_cat = input_cat[:-1]
|
61 |
+
st.write('You selected:', input_cat)
|
62 |
+
st.markdown('--------------------------------------------------------------------------------------')
|
63 |
+
|
64 |
+
st.subheader(' โ
์ถ์ฒ๋ฐ๊ณ ์ถ์ ์๋ฅ ์นดํ
๊ณ ๋ฆฌ ์ ํ ')
|
65 |
+
output_cat = st.radio(
|
66 |
+
'**์ถ์ฒ๋ฐ๊ณ ์ถ์ ์๋ฅ ์นดํ
๊ณ ๋ฆฌ๋ฅผ ์ ํํด์ฃผ์ธ์.**',
|
67 |
+
['top๐', 'bottom๐', 'shoes๐', 'hat๐งข', 'sunglasses๐ถ๏ธ', 'scarf๐งฃ', 'bag๐'],
|
68 |
+
index=None,
|
69 |
+
horizontal = True)
|
70 |
+
|
71 |
+
if not output_cat :
|
72 |
+
con = st.container()
|
73 |
+
st.write('๐ซ ์ฃผ์: ์
๋ก๋ํ ์๋ฅ ์นดํ
๊ณ ๋ฆฌ์ ๋ค๋ฅธ ์นดํ
๊ณ ๋ฆฌ๋ฅผ ์ ํํด์ฃผ์ธ์.')
|
74 |
+
st.stop()
|
75 |
+
output_cat = output_cat[:-1]
|
76 |
+
st.write('You selected:', output_cat)
|
77 |
+
st.write(' ')
|
78 |
+
st.markdown('--------------------------------------------------------------------------------------')
|
79 |
+
|
80 |
+
|
81 |
+
st.subheader(' โ
์ํฉ ์นดํ
๊ณ ๋ฆฌ ์ ํ ')
|
82 |
+
situation = st.radio(
|
83 |
+
"**์ํฉ ์นดํ
๊ณ ๋ฆฌ๋ฅผ ์ ํํด์ฃผ์ธ์.**",
|
84 |
+
['์ฌํ๐', '์นดํโ๏ธ', '์ ์ํ๐ผ๏ธ', '์บ ํผ์ค๐ซ & ์ถ๊ทผ๐ผ', '๊ธ์ถ์๐คง', '์ด๋๐ช'],
|
85 |
+
captions = ['(๋ฐ๋ค,์ฌํ)','(์นดํ, ๋ฐ์ผ๋ฆฌ)','(๋ฐ์ดํธ, ๊ฒฐํผ์)','','',''],
|
86 |
+
index=None,
|
87 |
+
horizontal = True)
|
88 |
+
|
89 |
+
# ์ ํ๋ ์ํฉ ์นดํ
๊ณ ๋ฆฌ๋ฅผ ์์ด๋ก ๋ณํํด์ ๋ณ์ ์ ์ฅ
|
90 |
+
situation_mapping = {
|
91 |
+
'์ฌํ๐': 'travel',
|
92 |
+
'์นดํโ๏ธ': 'cafe',
|
93 |
+
'์ ์ํ๐ผ๏ธ': 'exhibit',
|
94 |
+
'์บ ํผ์ค๐ซ & ์ถ๊ทผ๐ผ': 'campus_work',
|
95 |
+
'๊ธ์ถ์๐คง': 'cold',
|
96 |
+
'์ด๋๐ช': 'exercise'}
|
97 |
+
|
98 |
+
if not situation:
|
99 |
+
con = st.container()
|
100 |
+
st.stop()
|
101 |
+
situation= situation_mapping[situation]
|
102 |
+
st.write('You selected:', situation)
|
103 |
+
|
104 |
+
## ๋ณ์ ๋ช
|
105 |
+
# input_img
|
106 |
+
# input_cat : ์
์ ์ท ์นดํ
๊ณ ๋ฆฌ
|
107 |
+
# output_cat : ์ถ์ฒ ๋ฐ์ ์นดํ
๊ณ ๋ฆฌ
|
108 |
+
# situation : ์ํฉ
|
109 |
+
|
110 |
+
st.markdown('--------------------------------------------------------------------------------------')
|
111 |
+
|
112 |
+
|
113 |
+
### ์
๋ ฅ๋ฐ์ ์ด๋ฏธ์ง segmentation & detection & vector๋ณํ ###
|
114 |
+
image = Image.open(input_image)
|
115 |
+
|
116 |
+
# object detection & cropping ํจ์
|
117 |
+
def cropping(images,st = 1,
|
118 |
+
fi = 0.0,
|
119 |
+
step = -0.05):
|
120 |
+
image_1 = Image.fromarray(images)
|
121 |
+
inputs = processor_obj(images=image_1, return_tensors="pt")
|
122 |
+
outputs = model_obj(**inputs)
|
123 |
+
for tre in np.arange(st,fi,step):
|
124 |
+
try:
|
125 |
+
# convert outputs (bounding boxes and class logits) to Pascal VOC format (xmin, ymin, xmax, ymax)
|
126 |
+
target_sizes = torch.tensor([image_1.size[::-1]])
|
127 |
+
results = processor_obj.post_process_object_detection(outputs, threshold=tre, target_sizes=target_sizes)[0]
|
128 |
+
|
129 |
+
img = None
|
130 |
+
for idx, (score, label, box) in enumerate(zip(results["scores"], results["labels"], results["boxes"])):
|
131 |
+
box = [round(i, 2) for i in box.tolist()]
|
132 |
+
xmin, ymin, xmax, ymax = box
|
133 |
+
img = image_1.crop((xmin, ymin, xmax, ymax))
|
134 |
+
|
135 |
+
poss = np.array(img).sum().sum()
|
136 |
+
return img
|
137 |
+
break
|
138 |
+
except:
|
139 |
+
continue
|
140 |
+
return images
|
141 |
+
|
142 |
+
# vector ๋ณํ ํจ์
|
143 |
+
default_path = './'
|
144 |
+
def image_to_vector(image,resize_size=(256,256)): # ์ด๋ฏธ์ง size ๋ณํ resize(256,256)
|
145 |
+
#image = Image.fromarray(image)
|
146 |
+
#image = image.resize(resize_size)
|
147 |
+
image = Image.fromarray(np.copy(image))
|
148 |
+
image = image.resize(resize_size)
|
149 |
+
image_array = np.array(image, dtype=np.float32)
|
150 |
+
image_vector = image_array.flatten()
|
151 |
+
return image_vector
|
152 |
+
|
153 |
+
# ์ ์ฒด ํตํฉ ํจ์
|
154 |
+
def final_image(image):
|
155 |
+
if len(np.array(image).shape) == 2:
|
156 |
+
image = Image.fromarray(image).convert('RGB')
|
157 |
+
# segmentation
|
158 |
+
inputs = processor_seg(images=image, return_tensors="pt")
|
159 |
+
outputs = model_seg(**inputs)
|
160 |
+
logits = outputs.logits.cpu()
|
161 |
+
upsampled_logits = nn.functional.interpolate(
|
162 |
+
logits,
|
163 |
+
size=image.size[::-1],
|
164 |
+
mode="bilinear",
|
165 |
+
align_corners=False,
|
166 |
+
)
|
167 |
+
pred_seg = upsampled_logits.argmax(dim=1)[0]
|
168 |
+
segments = torch.unique(pred_seg)
|
169 |
+
default_path = './'
|
170 |
+
|
171 |
+
for i in segments:
|
172 |
+
if int(i) == 0:
|
173 |
+
continue
|
174 |
+
if int(i) == 1:
|
175 |
+
cloth = 'hat'
|
176 |
+
cloths = 'hat'
|
177 |
+
mask = pred_seg == i
|
178 |
+
image = np.array(image)
|
179 |
+
mask_np = (mask * 255).numpy().astype(np.uint8)
|
180 |
+
result = cv2.bitwise_and(image.astype(np.uint8), image.astype(np.uint8), mask=mask_np)
|
181 |
+
img = cropping(result)
|
182 |
+
img_vector = image_to_vector(img)
|
183 |
+
elif int(i) == 3:
|
184 |
+
cloth= 'sunglasses'
|
185 |
+
cloths= 'sunglasses'
|
186 |
+
mask = pred_seg == i
|
187 |
+
image = np.array(image)
|
188 |
+
mask_np = (mask * 255).numpy().astype(np.uint8)
|
189 |
+
result = cv2.bitwise_and(image.astype(np.uint8), image.astype(np.uint8), mask=mask_np)
|
190 |
+
img = cropping(result)
|
191 |
+
img_vector = image_to_vector(img)
|
192 |
+
elif int(i) == 4:
|
193 |
+
cloth = 'top'
|
194 |
+
cloths = 'top'
|
195 |
+
mask = pred_seg == i
|
196 |
+
image = np.array(image)
|
197 |
+
mask_np = (mask * 255).numpy().astype(np.uint8)
|
198 |
+
result = cv2.bitwise_and(image.astype(np.uint8), image.astype(np.uint8), mask=mask_np)
|
199 |
+
img = cropping(result)
|
200 |
+
img_vector = image_to_vector(img)
|
201 |
+
elif int(i) in [5,6,7]:
|
202 |
+
cloth= ['pants','skirt','dress']
|
203 |
+
cloths= 'bottom'
|
204 |
+
mask = (pred_seg == torch.tensor(5)) | (pred_seg == torch.tensor(6)) | (pred_seg == torch.tensor(7))
|
205 |
+
image = np.array(image)
|
206 |
+
mask_np = (mask * 255).numpy().astype(np.uint8)
|
207 |
+
result = cv2.bitwise_and(image.astype(np.uint8), image.astype(np.uint8), mask=mask_np)
|
208 |
+
img = cropping(result)
|
209 |
+
img_vector = image_to_vector(img)
|
210 |
+
elif int(i) == 8:
|
211 |
+
cloth = 'belt'
|
212 |
+
cloths = 'belt'
|
213 |
+
mask = pred_seg == torch.tensor(8)
|
214 |
+
image = np.array(image)
|
215 |
+
mask_np = (mask * 255).numpy().astype(np.uint8)
|
216 |
+
result = cv2.bitwise_and(image.astype(np.uint8), image.astype(np.uint8), mask=mask_np)
|
217 |
+
img = cropping(result)
|
218 |
+
img_vector = image_to_vector(img)
|
219 |
+
elif (int(i) == 9):
|
220 |
+
cloth = 'shoes'
|
221 |
+
cloths = 'shoes'
|
222 |
+
mask = (pred_seg == torch.tensor(9)) | (pred_seg == torch.tensor(10))
|
223 |
+
image = np.array(image)
|
224 |
+
mask_np = (mask * 255).numpy().astype(np.uint8)
|
225 |
+
result = cv2.bitwise_and(image.astype(np.uint8), image.astype(np.uint8), mask=mask_np)
|
226 |
+
img = cropping(result)
|
227 |
+
img_vector = image_to_vector(img)
|
228 |
+
elif int(i) == 16:
|
229 |
+
cloth = 'bag'
|
230 |
+
cloths = 'bag'
|
231 |
+
mask = pred_seg == torch.tensor(16)
|
232 |
+
image = np.array(image)
|
233 |
+
mask_np = (mask * 255).numpy().astype(np.uint8)
|
234 |
+
result = cv2.bitwise_and(image.astype(np.uint8), image.astype(np.uint8), mask=mask_np)
|
235 |
+
img = cropping(result)
|
236 |
+
img_vector = image_to_vector(img)
|
237 |
+
elif int(i) == 17:
|
238 |
+
cloth = 'scarf'
|
239 |
+
cloths = 'scarf'
|
240 |
+
mask = pred_seg == torch.tensor(17)
|
241 |
+
image = np.array(image)
|
242 |
+
mask_np = (mask * 255).numpy().astype(np.uint8)
|
243 |
+
result = cv2.bitwise_and(image.astype(np.uint8), image.astype(np.uint8), mask=mask_np)
|
244 |
+
img = cropping(result)
|
245 |
+
img_vector = image_to_vector(img)
|
246 |
+
return img_vector
|
247 |
+
|
248 |
+
# ์
๋ ฅ๋ฐ์ ์ด๋ฏธ์ง ์ ์ฒ๋ฆฌ ์๋ฃ
|
249 |
+
input_img = final_image(image)
|
250 |
+
|
251 |
+
### ์ ์ฌ๋ ๋ถ์ ###
|
252 |
+
# ํ๋๋ ์ด๋ฏธ๏ฟฝ๏ฟฝ๏ฟฝ, ๋ค๋ฅธ ํ๋๋ ๊ฒฝ๋ก๋ก ๋ฐ๋ ๊ฒฝ์ฐ
|
253 |
+
def cosine_similarity(vec1, vec2_path):
|
254 |
+
vec2 = np.loadtxt(vec2_path)
|
255 |
+
dot_product = np.dot(vec1, vec2)
|
256 |
+
norm_vec1 = np.linalg.norm(vec1)
|
257 |
+
norm_vec2 = np.linalg.norm(vec2)
|
258 |
+
similarity = dot_product / (norm_vec1 * norm_vec2)
|
259 |
+
return similarity
|
260 |
+
|
261 |
+
# ๋ ๋ค ๊ฒฝ๋ก๋ก ๋ฐ๋ ๊ฒฝ์ฐ
|
262 |
+
def cosine_similarity_2(vec1_path, vec2_path):
|
263 |
+
vec1 = np.loadtxt(vec1_path)
|
264 |
+
vec2 = np.loadtxt(vec2_path)
|
265 |
+
dot_product = np.dot(vec1, vec2)
|
266 |
+
norm_vec1 = np.linalg.norm(vec1)
|
267 |
+
norm_vec2 = np.linalg.norm(vec2)
|
268 |
+
similarity = dot_product / (norm_vec1 * norm_vec2)
|
269 |
+
return similarity
|
270 |
+
|
271 |
+
with st.spinner('Wait for it...'):
|
272 |
+
# ์
๋ ฅ๋ฐ์ ์ด๋ฏธ์ง & ๋์ผ ์นดํ
๊ณ ๋ฆฌ ํด๋์ ์ ์ฅ๋ ์คํ์ผ ์ด๋ฏธ์ง
|
273 |
+
sim_list = []
|
274 |
+
file_path = './style/' + situation + '/' + input_cat + '/' # ex) './cafe/top/'
|
275 |
+
cloths = os.listdir('./style/' + situation + '/' + input_cat + '/')
|
276 |
+
for cloth in cloths:
|
277 |
+
sim_list.append(cosine_similarity(input_img, file_path + cloth))
|
278 |
+
max_idx = np.argmax(sim_list)
|
279 |
+
|
280 |
+
# target_image ์ ์
|
281 |
+
target_image = './style/' + situation + '/' + output_cat + '/' + cloths[max_idx]
|
282 |
+
# ์ ์ฌ๋ ๋ถ์ ์๋ฃ๋ ์คํ์ผseg ์ด๋ฏธ์ง์ product_seg ์ ์ฌ๋๋ถ์
|
283 |
+
sim_list = []
|
284 |
+
file_path = './product/' + output_cat + '/'
|
285 |
+
cloths = os.listdir('./product/' + output_cat + '/')
|
286 |
+
for cloth in cloths:
|
287 |
+
sim_list.append(cosine_similarity_2(target_image, file_path + cloth))
|
288 |
+
max_idx = np.argmax(sim_list)
|
289 |
+
output_name = cloths[max_idx]
|
290 |
+
## ์์ ์ถ๋ ฅ๊ฐ: 'bottom_1883.txt'
|
291 |
+
|
292 |
+
# name ๋ก๋
|
293 |
+
acc_name = pd.read_csv('acc_name.csv')
|
294 |
+
bottom_name =pd.read_csv('bottom_name.csv')
|
295 |
+
outer_name =pd.read_csv('outer_name.csv')
|
296 |
+
shoes_name =pd.read_csv('shoes_name.csv')
|
297 |
+
top_name =pd.read_csv('top_name.csv')
|
298 |
+
|
299 |
+
#์ํ ๋ฐ์ดํฐ ๋ก๋
|
300 |
+
outer = pd.read_csv('outer.csv')
|
301 |
+
top = pd.read_csv('top.csv')
|
302 |
+
bottom = pd.read_csv('bottom.csv')
|
303 |
+
shoes = pd.read_csv('shoes.csv')
|
304 |
+
acc = pd.read_csv('acc.csv')
|
305 |
+
|
306 |
+
if output_cat == 'bottom':
|
307 |
+
df = bottom.copy()
|
308 |
+
df_name = bottom_name.copy()
|
309 |
+
elif output_cat == 'top':
|
310 |
+
df = top.copy()
|
311 |
+
df_name = top_name.copy()
|
312 |
+
elif output_cat == 'shoes':
|
313 |
+
df = shoes.copy()
|
314 |
+
df_name = shoes_name.copy()
|
315 |
+
elif (output_cat == 'hat') or (output_cat == 'sunglasses') or (output_cat == 'scarf') or (output_cat == 'bag') or (output_cat == 'belt'):
|
316 |
+
df = acc.copy()
|
317 |
+
df_name = acc_name.copy()
|
318 |
+
|
319 |
+
output_name = output_name.split('.')[0]
|
320 |
+
file_name = df_name[df_name['index']==output_name].iloc[0,1] #3049906_16754112975667_500.jpg
|
321 |
+
final = df[df['id'] == file_name]
|
322 |
+
|
323 |
+
name = final['name'].values[0].split('\n')[-1] # ์ํ๋ช
|
324 |
+
price = final['price'].values[0] # ์ํ๊ฐ๊ฒฉ
|
325 |
+
|
326 |
+
image_path = './product/img/'
|
327 |
+
|
328 |
+
st.subheader('OUTPUT')
|
329 |
+
|
330 |
+
img = Image.open(image_path+output_cat+'/'+file_name)
|
331 |
+
|
332 |
+
col1, col2, col3 = st.columns(3)
|
333 |
+
with col1:
|
334 |
+
st.image(img,width=400)
|
335 |
+
with col3:
|
336 |
+
st.caption('์ํ๋ช
: ' + name)
|
337 |
+
st.caption('๊ฐ๊ฒฉ : ' + price)
|