Upload 5 files
Browse files- Dockerfile +22 -0
- app.py +47 -0
- process_img.py +49 -0
- requirements.txt +7 -0
- vector_emb.py +32 -0
Dockerfile
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.9
|
2 |
+
|
3 |
+
WORKDIR /app
|
4 |
+
|
5 |
+
COPY . /app
|
6 |
+
|
7 |
+
RUN pip3 install fastapi uvicorn transformers==4.42.3 pillow protobuf==4.25.3 fastapi-health boto3
|
8 |
+
|
9 |
+
RUN useradd -m -u 1000 user
|
10 |
+
|
11 |
+
USER user
|
12 |
+
|
13 |
+
ENV HOME=/home/user \
|
14 |
+
PATH=/home/user/.local/bin:$PATH
|
15 |
+
|
16 |
+
WORKDIR $HOME/app
|
17 |
+
|
18 |
+
COPY --chown=user . $HOME/app
|
19 |
+
|
20 |
+
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
|
21 |
+
|
22 |
+
#uvicorn app:app --host 0.0.0.0 --port 7860
|
app.py
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI, Depends, HTTPException
|
2 |
+
from pydantic import BaseModel
|
3 |
+
from fastapi_health import health
|
4 |
+
from PIL import Image
|
5 |
+
import logging
|
6 |
+
import sys
|
7 |
+
from io import BytesIO
|
8 |
+
import base64
|
9 |
+
|
10 |
+
from process_img import Image_Processor
|
11 |
+
from vector_emb import Get_EmbeddingModels
|
12 |
+
|
13 |
+
logger = logging.getLogger(__name__)
|
14 |
+
|
15 |
+
logging.basicConfig(
|
16 |
+
level=logging.getLevelName("INFO"),
|
17 |
+
handlers=[logging.StreamHandler(sys.stdout)],
|
18 |
+
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
|
19 |
+
logging.info('Logging module started')
|
20 |
+
|
21 |
+
def get_session():
|
22 |
+
return True
|
23 |
+
|
24 |
+
def is_database_online(session: bool = Depends(get_session)):
|
25 |
+
return session
|
26 |
+
|
27 |
+
app = FastAPI()
|
28 |
+
app.add_api_route("/healthz", health([is_database_online]))
|
29 |
+
|
30 |
+
|
31 |
+
model = Get_EmbeddingModels()
|
32 |
+
img_Processor = Image_Processor()
|
33 |
+
|
34 |
+
class ImageBase64(BaseModel):
|
35 |
+
base64_string: str
|
36 |
+
|
37 |
+
class TextInput(BaseModel):
|
38 |
+
text: str
|
39 |
+
|
40 |
+
@app.post("/design-dense/")
|
41 |
+
async def embed_image(data: ImageBase64):
|
42 |
+
base64_string = data.base64_string
|
43 |
+
image_data = base64.b64decode(base64_string)
|
44 |
+
image = Image.open(BytesIO(image_data))
|
45 |
+
final_image = img_Processor.get_processed_img(image)
|
46 |
+
embeddings = model.get_dense_embd(final_image)
|
47 |
+
return embeddings
|
process_img.py
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
from PIL import Image, ImageOps
|
3 |
+
import logging
|
4 |
+
|
5 |
+
class Image_Processor:
|
6 |
+
def __init__(self):
|
7 |
+
pass
|
8 |
+
def is_image_white_by_percentage(self,image_path, white_threshold):
|
9 |
+
image = image_path.convert('RGB')
|
10 |
+
image_np = np.array(image)
|
11 |
+
white_pixel = np.array([255, 255, 255])
|
12 |
+
white_pixels_count = np.sum(np.all(image_np == white_pixel, axis=-1))
|
13 |
+
total_pixels = image_np.shape[0] * image_np.shape[1]
|
14 |
+
white_pixel_percentage = (white_pixels_count / total_pixels) * 100
|
15 |
+
return white_pixel_percentage > white_threshold
|
16 |
+
|
17 |
+
def padding_white(self,image, output_size=(512, 512)):
|
18 |
+
# Ensure image is in RGB mode before padding
|
19 |
+
if image.mode != 'RGB':
|
20 |
+
image = image.convert('RGB')
|
21 |
+
new_image = ImageOps.pad(image, output_size, method=Image.Resampling.LANCZOS, color=(255, 255, 255))
|
22 |
+
return new_image
|
23 |
+
|
24 |
+
def resize_image_with_aspect_ratio(self,img):
|
25 |
+
target_size=512
|
26 |
+
width, height = img.size
|
27 |
+
original_aspect_ratio = width / height
|
28 |
+
if width > height:
|
29 |
+
new_width = target_size
|
30 |
+
new_height = int(target_size / original_aspect_ratio)
|
31 |
+
else:
|
32 |
+
new_height = target_size
|
33 |
+
new_width = int(target_size * original_aspect_ratio)
|
34 |
+
resized_img = img.resize((new_width, new_height))
|
35 |
+
return resized_img
|
36 |
+
|
37 |
+
def get_processed_img(self,image):
|
38 |
+
white_thresh = self.is_image_white_by_percentage(image,50)
|
39 |
+
if white_thresh == True:
|
40 |
+
resized_image = self.resize_image_with_aspect_ratio(image)
|
41 |
+
final_image = self.padding_white(resized_image)
|
42 |
+
logging.info('Resized and Padded Image')
|
43 |
+
else:
|
44 |
+
#final_image = self.resize_image_with_aspect_ratio(image)
|
45 |
+
final_image = image.resize((512,512))
|
46 |
+
logging.info('Resized Image')
|
47 |
+
|
48 |
+
final_image = final_image.convert('L') if final_image.mode != 'L' else final_image
|
49 |
+
return final_image
|
requirements.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
fastapi
|
2 |
+
uvicorn
|
3 |
+
pillow
|
4 |
+
torch
|
5 |
+
transformers
|
6 |
+
fastapi-health
|
7 |
+
boto3
|
vector_emb.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from sentence_transformers import SentenceTransformer
|
2 |
+
import torch.nn.functional as F
|
3 |
+
import torch
|
4 |
+
|
5 |
+
class Load_EmbeddingModels:
|
6 |
+
def __init__(self, model_name ='jinaai/jina-clip-v2'):
|
7 |
+
self.device = "cuda" if torch.cuda.is_available() else "cpu"
|
8 |
+
self.img_model_ID = model_name
|
9 |
+
self.img_model = self.get_cp_img_model_info(self.img_model_ID)
|
10 |
+
|
11 |
+
def get_cp_img_model_info(self, model_name):
|
12 |
+
print('Loading SentenceTransformer model')
|
13 |
+
model =SentenceTransformer(model_name, trust_remote_code=True)
|
14 |
+
model = model.to(self.device)
|
15 |
+
return model
|
16 |
+
|
17 |
+
def get_single_image_embedding_cp_im(self, my_image):
|
18 |
+
embedding = self.img_model.encode(
|
19 |
+
my_image,
|
20 |
+
normalize_embeddings=True
|
21 |
+
)
|
22 |
+
values = embedding.tolist()
|
23 |
+
return values
|
24 |
+
|
25 |
+
class Get_EmbeddingModels:
|
26 |
+
def __init__(self, model_name='jinaai/jina-clip-v2'):
|
27 |
+
self.embed_model = Load_EmbeddingModels(model_name)
|
28 |
+
|
29 |
+
def get_dense_embd(self, img):
|
30 |
+
embd = self.embed_model.get_single_image_embedding_cp_im(img)
|
31 |
+
return embd
|
32 |
+
|