Upload 8 files
Browse files- README.md +69 -0
- config.json +81 -0
- gitattributes +35 -0
- preprocessor_config.json +25 -0
- special_tokens_map.json +7 -0
- tokenizer.json +0 -0
- tokenizer_config.json +56 -0
- vocab.txt +0 -0
README.md
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: apache-2.0
|
3 |
+
tags:
|
4 |
+
- vision
|
5 |
+
inference: false
|
6 |
+
pipeline_tag: zero-shot-object-detection
|
7 |
+
---
|
8 |
+
|
9 |
+
# Grounding DINO model (base variant)
|
10 |
+
|
11 |
+
The Grounding DINO model was proposed in [Grounding DINO: Marrying DINO with Grounded Pre-Training for Open-Set Object Detection](https://arxiv.org/abs/2303.05499) by Shilong Liu, Zhaoyang Zeng, Tianhe Ren, Feng Li, Hao Zhang, Jie Yang, Chunyuan Li, Jianwei Yang, Hang Su, Jun Zhu, Lei Zhang. Grounding DINO extends a closed-set object detection model with a text encoder, enabling open-set object detection. The model achieves remarkable results, such as 52.5 AP on COCO zero-shot.
|
12 |
+
|
13 |
+
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/grouding_dino_architecture.png"
|
14 |
+
alt="drawing" width="600"/>
|
15 |
+
|
16 |
+
<small> Grounding DINO overview. Taken from the <a href="https://arxiv.org/abs/2303.05499">original paper</a>. </small>
|
17 |
+
|
18 |
+
## Intended uses & limitations
|
19 |
+
|
20 |
+
You can use the raw model for zero-shot object detection (the task of detecting things in an image out-of-the-box without labeled data).
|
21 |
+
|
22 |
+
### How to use
|
23 |
+
|
24 |
+
Here's how to use the model for zero-shot object detection:
|
25 |
+
|
26 |
+
```python
|
27 |
+
import requests
|
28 |
+
|
29 |
+
import torch
|
30 |
+
from PIL import Image
|
31 |
+
from transformers import AutoProcessor, AutoModelForZeroShotObjectDetection
|
32 |
+
|
33 |
+
model_id = "IDEA-Research/grounding-dino-base"
|
34 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
35 |
+
|
36 |
+
processor = AutoProcessor.from_pretrained(model_id)
|
37 |
+
model = AutoModelForZeroShotObjectDetection.from_pretrained(model_id).to(device)
|
38 |
+
|
39 |
+
image_url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
40 |
+
image = Image.open(requests.get(image_url, stream=True).raw)
|
41 |
+
# Check for cats and remote controls
|
42 |
+
# VERY important: text queries need to be lowercased + end with a dot
|
43 |
+
text = "a cat. a remote control."
|
44 |
+
|
45 |
+
inputs = processor(images=image, text=text, return_tensors="pt").to(device)
|
46 |
+
with torch.no_grad():
|
47 |
+
outputs = model(**inputs)
|
48 |
+
|
49 |
+
results = processor.post_process_grounded_object_detection(
|
50 |
+
outputs,
|
51 |
+
inputs.input_ids,
|
52 |
+
box_threshold=0.4,
|
53 |
+
text_threshold=0.3,
|
54 |
+
target_sizes=[image.size[::-1]]
|
55 |
+
)
|
56 |
+
```
|
57 |
+
|
58 |
+
### BibTeX entry and citation info
|
59 |
+
|
60 |
+
```bibtex
|
61 |
+
@misc{liu2023grounding,
|
62 |
+
title={Grounding DINO: Marrying DINO with Grounded Pre-Training for Open-Set Object Detection},
|
63 |
+
author={Shilong Liu and Zhaoyang Zeng and Tianhe Ren and Feng Li and Hao Zhang and Jie Yang and Chunyuan Li and Jianwei Yang and Hang Su and Jun Zhu and Lei Zhang},
|
64 |
+
year={2023},
|
65 |
+
eprint={2303.05499},
|
66 |
+
archivePrefix={arXiv},
|
67 |
+
primaryClass={cs.CV}
|
68 |
+
}
|
69 |
+
```
|
config.json
ADDED
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"activation_dropout": 0.0,
|
3 |
+
"activation_function": "relu",
|
4 |
+
"architectures": [
|
5 |
+
"GroundingDinoForObjectDetection"
|
6 |
+
],
|
7 |
+
"attention_dropout": 0.0,
|
8 |
+
"auxiliary_loss": false,
|
9 |
+
"backbone": null,
|
10 |
+
"backbone_config": {
|
11 |
+
"depths": [
|
12 |
+
2,
|
13 |
+
2,
|
14 |
+
18,
|
15 |
+
2
|
16 |
+
],
|
17 |
+
"embed_dim": 128,
|
18 |
+
"hidden_size": 1024,
|
19 |
+
"image_size": 384,
|
20 |
+
"model_type": "swin",
|
21 |
+
"num_heads": [
|
22 |
+
4,
|
23 |
+
8,
|
24 |
+
16,
|
25 |
+
32
|
26 |
+
],
|
27 |
+
"out_features": [
|
28 |
+
"stage2",
|
29 |
+
"stage3",
|
30 |
+
"stage4"
|
31 |
+
],
|
32 |
+
"out_indices": [
|
33 |
+
2,
|
34 |
+
3,
|
35 |
+
4
|
36 |
+
],
|
37 |
+
"window_size": 12
|
38 |
+
},
|
39 |
+
"backbone_kwargs": null,
|
40 |
+
"bbox_cost": 5.0,
|
41 |
+
"bbox_loss_coefficient": 5.0,
|
42 |
+
"class_cost": 1.0,
|
43 |
+
"d_model": 256,
|
44 |
+
"decoder_attention_heads": 8,
|
45 |
+
"decoder_bbox_embed_share": true,
|
46 |
+
"decoder_ffn_dim": 2048,
|
47 |
+
"decoder_layers": 6,
|
48 |
+
"decoder_n_points": 4,
|
49 |
+
"disable_custom_kernels": false,
|
50 |
+
"dropout": 0.1,
|
51 |
+
"embedding_init_target": true,
|
52 |
+
"encoder_attention_heads": 8,
|
53 |
+
"encoder_ffn_dim": 2048,
|
54 |
+
"encoder_layers": 6,
|
55 |
+
"encoder_n_points": 4,
|
56 |
+
"focal_alpha": 0.25,
|
57 |
+
"fusion_dropout": 0.0,
|
58 |
+
"fusion_droppath": 0.1,
|
59 |
+
"giou_cost": 2.0,
|
60 |
+
"giou_loss_coefficient": 2.0,
|
61 |
+
"init_std": 0.02,
|
62 |
+
"is_encoder_decoder": true,
|
63 |
+
"layer_norm_eps": 1e-05,
|
64 |
+
"max_text_len": 256,
|
65 |
+
"model_type": "grounding-dino",
|
66 |
+
"num_feature_levels": 4,
|
67 |
+
"num_queries": 900,
|
68 |
+
"position_embedding_type": "sine",
|
69 |
+
"positional_embedding_temperature": 20,
|
70 |
+
"query_dim": 4,
|
71 |
+
"text_config": {
|
72 |
+
"model_type": "bert"
|
73 |
+
},
|
74 |
+
"text_enhancer_dropout": 0.0,
|
75 |
+
"torch_dtype": "float32",
|
76 |
+
"transformers_version": "4.40.0.dev0",
|
77 |
+
"two_stage": true,
|
78 |
+
"two_stage_bbox_embed_share": false,
|
79 |
+
"use_pretrained_backbone": false,
|
80 |
+
"use_timm_backbone": false
|
81 |
+
}
|
gitattributes
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
preprocessor_config.json
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"do_normalize": true,
|
3 |
+
"do_pad": true,
|
4 |
+
"do_rescale": true,
|
5 |
+
"do_resize": true,
|
6 |
+
"format": "coco_detection",
|
7 |
+
"image_mean": [
|
8 |
+
0.485,
|
9 |
+
0.456,
|
10 |
+
0.406
|
11 |
+
],
|
12 |
+
"image_processor_type": "GroundingDinoImageProcessor",
|
13 |
+
"image_std": [
|
14 |
+
0.229,
|
15 |
+
0.224,
|
16 |
+
0.225
|
17 |
+
],
|
18 |
+
"processor_class": "GroundingDinoProcessor",
|
19 |
+
"resample": 2,
|
20 |
+
"rescale_factor": 0.00392156862745098,
|
21 |
+
"size": {
|
22 |
+
"longest_edge": 1333,
|
23 |
+
"shortest_edge": 800
|
24 |
+
}
|
25 |
+
}
|
special_tokens_map.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": "[CLS]",
|
3 |
+
"mask_token": "[MASK]",
|
4 |
+
"pad_token": "[PAD]",
|
5 |
+
"sep_token": "[SEP]",
|
6 |
+
"unk_token": "[UNK]"
|
7 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "[PAD]",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"100": {
|
12 |
+
"content": "[UNK]",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"101": {
|
20 |
+
"content": "[CLS]",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"102": {
|
28 |
+
"content": "[SEP]",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"103": {
|
36 |
+
"content": "[MASK]",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
}
|
43 |
+
},
|
44 |
+
"clean_up_tokenization_spaces": true,
|
45 |
+
"cls_token": "[CLS]",
|
46 |
+
"do_lower_case": true,
|
47 |
+
"mask_token": "[MASK]",
|
48 |
+
"model_max_length": 512,
|
49 |
+
"pad_token": "[PAD]",
|
50 |
+
"processor_class": "GroundingDinoProcessor",
|
51 |
+
"sep_token": "[SEP]",
|
52 |
+
"strip_accents": null,
|
53 |
+
"tokenize_chinese_chars": true,
|
54 |
+
"tokenizer_class": "BertTokenizer",
|
55 |
+
"unk_token": "[UNK]"
|
56 |
+
}
|
vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|