ckandemir commited on
Commit
c4fb714
·
1 Parent(s): 366e626

Update handler.py

Browse files
Files changed (1) hide show
  1. handler.py +8 -12
handler.py CHANGED
@@ -1,5 +1,5 @@
1
  import requests
2
- from typing import Dict, List, Any
3
  from PIL import Image
4
  import torch
5
  from io import BytesIO
@@ -14,22 +14,19 @@ class EndpointHandler():
14
  "Salesforce/blip-image-captioning-large"
15
  ).to(device)
16
  self.model.eval()
17
- self.model = self.model.to(device)
18
 
19
  def __call__(self, data: Dict[str, Any]) -> Dict[str, Any]:
20
- image_urls = data.get("images")
21
 
22
- # Check if images is None or empty and handle it appropriately
23
- if not image_urls:
24
  return {"captions": [], "error": "No images provided"}
25
-
26
- # Default to "a photography of" if texts not provided
27
- texts = data.get("texts", ["a photography of"] * len(image_urls))
28
-
29
  try:
30
- raw_images = [Image.open(requests.get(url, stream=True).raw).convert("RGB") for url in image_urls]
31
  processed_inputs = [
32
- self.processor(img, txt, return_tensors="pt") for img, txt in zip(raw_images, texts)
33
  ]
34
  processed_inputs = {
35
  "pixel_values": torch.cat([inp["pixel_values"] for inp in processed_inputs], dim=0).to(device),
@@ -43,6 +40,5 @@ class EndpointHandler():
43
  captions = self.processor.batch_decode(out, skip_special_tokens=True)
44
  return {"captions": captions}
45
  except Exception as e:
46
- # Handle or log the exception and optionally return an error message
47
  print(f"Error during processing: {str(e)}")
48
  return {"captions": [], "error": str(e)}
 
1
  import requests
2
+ from typing import Dict, Any
3
  from PIL import Image
4
  import torch
5
  from io import BytesIO
 
14
  "Salesforce/blip-image-captioning-large"
15
  ).to(device)
16
  self.model.eval()
 
17
 
18
  def __call__(self, data: Dict[str, Any]) -> Dict[str, Any]:
19
+ encoded_images = data.get("images")
20
 
21
+ if not encoded_images:
 
22
  return {"captions": [], "error": "No images provided"}
23
+
24
+ texts = data.get("texts", ["a photography of"] * len(encoded_images))
25
+
 
26
  try:
27
+ raw_images = [Image.open(BytesIO(base64.b64decode(img))).convert("RGB") for img in encoded_images]
28
  processed_inputs = [
29
+ self.processor(image, text, return_tensors="pt") for image, text in zip(raw_images, texts)
30
  ]
31
  processed_inputs = {
32
  "pixel_values": torch.cat([inp["pixel_values"] for inp in processed_inputs], dim=0).to(device),
 
40
  captions = self.processor.batch_decode(out, skip_special_tokens=True)
41
  return {"captions": captions}
42
  except Exception as e:
 
43
  print(f"Error during processing: {str(e)}")
44
  return {"captions": [], "error": str(e)}