linhcuem commited on
Commit
bbe6668
1 Parent(s): d63b241

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -0
app.py CHANGED
@@ -11,4 +11,39 @@ model_ids = ['linhcuem/gold_yolov5m']
11
 
12
  current_model_id = model_ids[-1]
13
  model = yolov5.load(current_model_id)
 
 
 
 
 
 
 
 
 
14
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
  current_model_id = model_ids[-1]
13
  model = yolov5.load(current_model_id)
14
+ examples = [['test_images/yen thien viet_4.jpg', 0.25, 'linhcuem/gold_yolov5m'], ['test_images/yen thien viet_6.jpg', 0.25, 'linhcuem/gold_yolov5m'], ['test_images/yen thien viet_7.jpg', 0.25, 'linhcuem/gold_yolov5m'], ['test_images/yen thien viet_7.jpg', 0.25, 'linhcuem/gold_yolov5m'],
15
+ ['test_images/yen thien viet_8.jpg', 0.25, 'linhcuem/gold_yolov5m'], ['test_images/yen thien viet_9.jpg', 0.25, 'linhcuem/gold_yolov5m'], ['test_images/yen thien viet_94.jpg', 0.25, 'linhcuem/gold_yolov5m'], ['test_images/yen thien viet_13.jpg', 0.25, 'linhcuem/gold_yolov5m'],
16
+ ['test_images/yen thien viet_16.jpg', 0.25, 'linhcuem/gold_yolov5m'], ['test_images/yen thien viet_19.jpg', 0.25, 'linhcuem/gold_yolov5m'], ['test_images/yen thien viet_13.jpg', 0.25, 'linhcuem/gold_yolov5m']]
17
+ def predict(image, threshold=0.25, model_id=None):
18
+ #update model if required
19
+ global current_model_id
20
+ global model
21
+ if model_id != current_model_id:
22
+ current_model_id = model_id
23
 
24
+ #get model input size
25
+ config_path = hf_hub_download(repo_id=model_id, filename="config.json")
26
+ with open(config_path, "r") as f:
27
+ config = json.load(f)
28
+ input_size = config["input_size"]
29
+
30
+ #perform inference
31
+ model.conf = threshold
32
+ results = model(image, size=input_size)
33
+ numpy_image = results.render()[0]
34
+ output_image = Image.fromarray(numpy_image)
35
+ return output_image
36
+
37
+ gr.Interface(
38
+ title=app_title,
39
+ description="DO ANH DAT",
40
+ fn=predict,
41
+ inputs=[
42
+ gr.Image(type="pil"),
43
+ gr.Slider(maximum=1, step=0.01, value=0.25),
44
+ gr.Dropdown(models_ids, value=models_ids[-1]),
45
+ ],
46
+ outputs=gr.Image(type="pil"),
47
+ examples=examples,
48
+ cache_examples=True if examples else False,
49
+ ).launch(enable_queue=True)