Spaces:
Sleeping
Sleeping
Preparing submission
Browse files
tasks/image.py
CHANGED
@@ -12,6 +12,13 @@ from .utils.emissions import tracker, clean_emissions_data, get_space_info
|
|
12 |
from dotenv import load_dotenv
|
13 |
load_dotenv()
|
14 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
router = APIRouter()
|
16 |
|
17 |
DESCRIPTION = "Frugal Object Detector for forest fires"
|
@@ -97,18 +104,13 @@ async def evaluate_image(request: ImageEvaluationRequest):
|
|
97 |
# YOUR MODEL INFERENCE CODE HERE
|
98 |
# Update the code below to replace the random baseline with your model inference
|
99 |
#--------------------------------------------------------------------------------------------
|
100 |
-
# Import strict minimum
|
101 |
-
from pathlib import Path
|
102 |
-
from ultralytics import YOLO
|
103 |
-
from torch import device
|
104 |
-
from torch.cuda import is_available
|
105 |
|
106 |
THRESHOLD = 0.18
|
107 |
|
108 |
# Load model
|
109 |
model_path = Path("tasks", "models")
|
110 |
-
model_name = "
|
111 |
-
|
112 |
model = YOLO(Path(model_path, model_name), task="detect")
|
113 |
device_name = device("cuda" if is_available() else "cpu")
|
114 |
IMGSIZE = 1280
|
@@ -118,7 +120,7 @@ async def evaluate_image(request: ImageEvaluationRequest):
|
|
118 |
pred_boxes = []
|
119 |
true_boxes_list = [] # List of lists, each inner list contains boxes for one image
|
120 |
|
121 |
-
|
122 |
for example in test_dataset:
|
123 |
# Parse true annotation (YOLO format: class_id x_center y_center width height)
|
124 |
annotation = example.get("annotations", "").strip()
|
|
|
12 |
from dotenv import load_dotenv
|
13 |
load_dotenv()
|
14 |
|
15 |
+
# Dependencies for inference
|
16 |
+
import logging
|
17 |
+
from pathlib import Path
|
18 |
+
from ultralytics import YOLO
|
19 |
+
from torch import device
|
20 |
+
from torch.cuda import is_available
|
21 |
+
|
22 |
router = APIRouter()
|
23 |
|
24 |
DESCRIPTION = "Frugal Object Detector for forest fires"
|
|
|
104 |
# YOUR MODEL INFERENCE CODE HERE
|
105 |
# Update the code below to replace the random baseline with your model inference
|
106 |
#--------------------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
107 |
|
108 |
THRESHOLD = 0.18
|
109 |
|
110 |
# Load model
|
111 |
model_path = Path("tasks", "models")
|
112 |
+
model_name = "best_gpu_fp16.pt"
|
113 |
+
logging.info(f"Loading model {model_name}")
|
114 |
model = YOLO(Path(model_path, model_name), task="detect")
|
115 |
device_name = device("cuda" if is_available() else "cpu")
|
116 |
IMGSIZE = 1280
|
|
|
120 |
pred_boxes = []
|
121 |
true_boxes_list = [] # List of lists, each inner list contains boxes for one image
|
122 |
|
123 |
+
logging.info(f"Inference start on device: {device_name}")
|
124 |
for example in test_dataset:
|
125 |
# Parse true annotation (YOLO format: class_id x_center y_center width height)
|
126 |
annotation = example.get("annotations", "").strip()
|
tasks/models/{pruned_fp16.pt → best_gpu_fp16.pt}
RENAMED
File without changes
|
tasks/models/cpu_fp16.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:df234f73cd50dcb745021703a067a350e7b1ff192cf5d9a1f67af3527fa9f0d3
|
3 |
+
size 5322682
|