Alex commited on
Commit
779723a
·
1 Parent(s): d2009f5

revert to fiel

Browse files
Files changed (3) hide show
  1. README.md +7 -3
  2. app-base64.py +82 -0
  3. app.py +13 -32
README.md CHANGED
@@ -12,18 +12,22 @@ pinned: false
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
13
 
14
 
15
- curl -X POST "https://alexgenovese-segmentation.hf.space/api/segment" \
16
  -F "data=[{\"type\": \"image\", \"value\": null}]" \
17
  -F "data=@woman_with_bag.jpeg" \
18
  -H "Content-Type: multipart/form-data" \
19
  -o response.json
20
 
21
 
22
- curl -X POST "https://alexgenovese-segmentation.hf.space/api/segment" \
23
  -H "accept: application/json" \
24
  -H "Content-Type: multipart/form-data" \
25
  -F "file=@woman_with_bag.jpeg"
26
 
27
 
28
 
29
- curl -X POST -F "file=@woman_with_bag.jpeg" https://alexgenovese-segmentation.hf.space/api/segment
 
 
 
 
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
13
 
14
 
15
+ curl -X POST "https://alexgenovese-segmentation.hf.space/segment" \
16
  -F "data=[{\"type\": \"image\", \"value\": null}]" \
17
  -F "data=@woman_with_bag.jpeg" \
18
  -H "Content-Type: multipart/form-data" \
19
  -o response.json
20
 
21
 
22
+ curl -X POST "https://alexgenovese-segmentation.hf.space/segment" \
23
  -H "accept: application/json" \
24
  -H "Content-Type: multipart/form-data" \
25
  -F "file=@woman_with_bag.jpeg"
26
 
27
 
28
 
29
+ curl -X POST -F "file=@woman_with_bag.jpeg" https://alexgenovese-segmentation.hf.space/api/segment
30
+
31
+ curl -X POST https://alexgenovese-segmentation.hf.space/segment \
32
+ -H "Content-Type: application/json" \
33
+ -d "{\"image_base64\": \"$(base64 woman_with_bag.jpeg)\"}"
app-base64.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, File, UploadFile
2
+ from pydantic import BaseModel
3
+ from transformers import SamModel, SamProcessor
4
+ import torch
5
+ from PIL import Image
6
+ import numpy as np
7
+ import io
8
+ import base64
9
+
10
+ class ImageRequest(BaseModel):
11
+ image_base64: str
12
+
13
+ # Inizializza l'app FastAPI
14
+ app = FastAPI()
15
+
16
+ # Carica il modello e il processore SAM
17
+ model = SamModel.from_pretrained("facebook/sam-vit-base")
18
+ processor = SamProcessor.from_pretrained("facebook/sam-vit-base")
19
+ model.to("cpu") # Usa CPU per il free tier
20
+
21
+ @app.get("/health")
22
+ async def health_check():
23
+ return {"status": "ok"}
24
+
25
+ def preprocess_image(image: Image.Image, size=(320, 320)):
26
+ """Ridimensiona l'immagine per velocizzare l'inferenza"""
27
+ img = image.convert("RGB")
28
+ img = img.resize(size, Image.LANCZOS) # 320x320 è veloce su CPU
29
+ return img
30
+
31
+ # Funzione per segmentare l'immagine
32
+ def segment_image(image: Image.Image):
33
+ # Prepara l'input per SAM
34
+ inputs = processor(image, return_tensors="pt").to("cpu")
35
+
36
+ # Inferenza
37
+ with torch.no_grad():
38
+ outputs = model(**inputs, multimask_output=False)
39
+
40
+ # Post-processa la maschera
41
+ mask = processor.image_processor.post_process_masks(
42
+ outputs.pred_masks, inputs["original_sizes"], inputs["reshaped_input_sizes"]
43
+ )[0][0].cpu().numpy()
44
+
45
+ # Converti la maschera in immagine
46
+ mask_img = Image.fromarray((mask * 255).astype(np.uint8))
47
+
48
+ # Converti la maschera in base64 per la risposta
49
+ buffered = io.BytesIO()
50
+ mask_img.save(buffered, format="PNG")
51
+ mask_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
52
+
53
+ # Annotazioni
54
+ annotations = {"mask": mask.tolist(), "label": "object"}
55
+
56
+ return mask_base64, annotations
57
+
58
+ # Endpoint API
59
+ # @app.post("/segment")
60
+ async def segment_endpoint(file: ImageRequest):
61
+ try:
62
+ # Decodifica la stringa Base64
63
+ image_data = base64.b64decode(file.image_base64)
64
+ image = Image.open(io.BytesIO(image_data))
65
+ image = preprocess_image(image)
66
+
67
+ # Segmenta l'immagine
68
+ mask_base64, annotations = segment_image(image)
69
+
70
+ # Restituisci la risposta
71
+ return {
72
+ "mask": f"data:image/png;base64,{mask_base64}",
73
+ "annotations": annotations
74
+ }
75
+ except Exception as e:
76
+ # In caso di errore (es. Base64 non valido), restituisci False
77
+ return {"output": False, "error": str(e), "debug": file}
78
+
79
+ # Per compatibilità con Hugging Face Spaces (Uvicorn viene gestito automaticamente)
80
+ # if __name__ == "__main__":
81
+ # import uvicorn
82
+ # uvicorn.run(app, host="0.0.0.0", port=7860)
app.py CHANGED
@@ -1,5 +1,4 @@
1
  from fastapi import FastAPI, File, UploadFile
2
- from pydantic import BaseModel
3
  from transformers import SamModel, SamProcessor
4
  import torch
5
  from PIL import Image
@@ -7,9 +6,6 @@ import numpy as np
7
  import io
8
  import base64
9
 
10
- class ImageRequest(BaseModel):
11
- image_base64: str
12
-
13
  # Inizializza l'app FastAPI
14
  app = FastAPI()
15
 
@@ -18,16 +14,6 @@ model = SamModel.from_pretrained("facebook/sam-vit-base")
18
  processor = SamProcessor.from_pretrained("facebook/sam-vit-base")
19
  model.to("cpu") # Usa CPU per il free tier
20
 
21
- @app.get("/health")
22
- async def health_check():
23
- return {"status": "ok"}
24
-
25
- def preprocess_image(image: Image.Image, size=(320, 320)):
26
- """Ridimensiona l'immagine per velocizzare l'inferenza"""
27
- img = image.convert("RGB")
28
- img = img.resize(size, Image.LANCZOS) # 320x320 è veloce su CPU
29
- return img
30
-
31
  # Funzione per segmentare l'immagine
32
  def segment_image(image: Image.Image):
33
  # Prepara l'input per SAM
@@ -57,24 +43,19 @@ def segment_image(image: Image.Image):
57
 
58
  # Endpoint API
59
  @app.post("/segment")
60
- async def segment_endpoint(file: ImageRequest):
61
- try:
62
- # Decodifica la stringa Base64
63
- image_data = base64.b64decode(file.image_base64)
64
- image = Image.open(io.BytesIO(image_data))
65
- image = preprocess_image(image)
66
-
67
- # Segmenta l'immagine
68
- mask_base64, annotations = segment_image(image)
69
-
70
- # Restituisci la risposta
71
- return {
72
- "mask": f"data:image/png;base64,{mask_base64}",
73
- "annotations": annotations
74
- }
75
- except Exception as e:
76
- # In caso di errore (es. Base64 non valido), restituisci False
77
- return {"output": False, "error": str(e), "debug": file}
78
 
79
  # Per compatibilità con Hugging Face Spaces (Uvicorn viene gestito automaticamente)
80
  if __name__ == "__main__":
 
1
  from fastapi import FastAPI, File, UploadFile
 
2
  from transformers import SamModel, SamProcessor
3
  import torch
4
  from PIL import Image
 
6
  import io
7
  import base64
8
 
 
 
 
9
  # Inizializza l'app FastAPI
10
  app = FastAPI()
11
 
 
14
  processor = SamProcessor.from_pretrained("facebook/sam-vit-base")
15
  model.to("cpu") # Usa CPU per il free tier
16
 
 
 
 
 
 
 
 
 
 
 
17
  # Funzione per segmentare l'immagine
18
  def segment_image(image: Image.Image):
19
  # Prepara l'input per SAM
 
43
 
44
  # Endpoint API
45
  @app.post("/segment")
46
+ async def segment_endpoint(file: UploadFile = File(...)):
47
+ # Leggi l'immagine caricata
48
+ image_data = await file.read()
49
+ image = Image.open(io.BytesIO(image_data)).convert("RGB")
50
+
51
+ # Segmenta l'immagine
52
+ mask_base64, annotations = segment_image(image)
53
+
54
+ # Restituisci la risposta
55
+ return {
56
+ "mask": f"data:image/png;base64,{mask_base64}",
57
+ "annotations": annotations
58
+ }
 
 
 
 
 
59
 
60
  # Per compatibilità con Hugging Face Spaces (Uvicorn viene gestito automaticamente)
61
  if __name__ == "__main__":