Xenova HF staff commited on
Commit
2dfc49e
1 Parent(s): d81ceb9

Upload folder using huggingface_hub

Browse files
config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "CIDAS/clipseg-rd64",
3
+ "architectures": [
4
+ "CLIPSegForImageSegmentation"
5
+ ],
6
+ "conditional_layer": 0,
7
+ "decoder_attention_dropout": 0.0,
8
+ "decoder_hidden_act": "quick_gelu",
9
+ "decoder_intermediate_size": 2048,
10
+ "decoder_num_attention_heads": 4,
11
+ "extract_layers": [
12
+ 3,
13
+ 6,
14
+ 9
15
+ ],
16
+ "initializer_factor": 1.0,
17
+ "logit_scale_init_value": 2.6592,
18
+ "model_type": "clipseg",
19
+ "projection_dim": 512,
20
+ "reduce_dim": 64,
21
+ "text_config": {
22
+ "bos_token_id": 0,
23
+ "dropout": 0.0,
24
+ "eos_token_id": 2,
25
+ "model_type": "clipseg_text_model"
26
+ },
27
+ "transformers_version": "4.37.0.dev0",
28
+ "use_complex_transposed_convolution": false,
29
+ "vision_config": {
30
+ "dropout": 0.0,
31
+ "model_type": "clipseg_vision_model",
32
+ "patch_size": 16
33
+ }
34
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
onnx/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6134b4555d4eb6a758caae66f7338d39a75a25b061bec1fa089a372badbc3c74
3
+ size 545201523
onnx/model_quantized.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c3a00e7d77ccf59ed38c2a2d1b64de430145aa9fe9048bdcd98800a6dda4d864
3
+ size 138548175
preprocessor_config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_rescale": true,
4
+ "do_resize": true,
5
+ "image_mean": [
6
+ 0.485,
7
+ 0.456,
8
+ 0.406
9
+ ],
10
+ "image_processor_type": "ViTImageProcessor",
11
+ "image_std": [
12
+ 0.229,
13
+ 0.224,
14
+ 0.225
15
+ ],
16
+ "processor_class": "CLIPSegProcessor",
17
+ "resample": 2,
18
+ "rescale_factor": 0.00392156862745098,
19
+ "size": {
20
+ "height": 352,
21
+ "width": 352
22
+ }
23
+ }
quantize_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "per_channel": true,
3
+ "reduce_range": true,
4
+ "per_model_config": {
5
+ "model": {
6
+ "op_types": [
7
+ "Transpose",
8
+ "Add",
9
+ "MatMul",
10
+ "Reshape",
11
+ "Sub",
12
+ "Less",
13
+ "ConstantOfShape",
14
+ "ArgMax",
15
+ "Expand",
16
+ "Constant",
17
+ "Softmax",
18
+ "ReduceMean",
19
+ "Where",
20
+ "Concat",
21
+ "Relu",
22
+ "Equal",
23
+ "Slice",
24
+ "Pow",
25
+ "Sqrt",
26
+ "Gemm",
27
+ "Range",
28
+ "Sigmoid",
29
+ "Unsqueeze",
30
+ "Cast",
31
+ "ConvTranspose",
32
+ "Div",
33
+ "Gather",
34
+ "Shape",
35
+ "Conv",
36
+ "Squeeze",
37
+ "Flatten",
38
+ "Mul"
39
+ ],
40
+ "weight_type": "QUInt8"
41
+ }
42
+ }
43
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|startoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "<|endoftext|>",
17
+ "unk_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": true,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "49406": {
5
+ "content": "<|startoftext|>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "49407": {
13
+ "content": "<|endoftext|>",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ }
20
+ },
21
+ "bos_token": "<|startoftext|>",
22
+ "clean_up_tokenization_spaces": true,
23
+ "do_lower_case": true,
24
+ "eos_token": "<|endoftext|>",
25
+ "errors": "replace",
26
+ "model_max_length": 77,
27
+ "pad_token": "<|endoftext|>",
28
+ "processor_class": "CLIPSegProcessor",
29
+ "tokenizer_class": "CLIPTokenizer",
30
+ "trust_remote_code": false,
31
+ "unk_token": "<|endoftext|>"
32
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff