wnma3mz commited on
Commit
bb668d7
·
verified ·
1 Parent(s): d7880c3

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ license_name: deepseek
4
+ license_link: LICENSE
5
+ pipeline_tag: text-generation
6
+ library_name: transformers
7
+ base_model:
8
+ - deepseek-ai/Janus-Pro-1B
9
+ tags:
10
+ - chat
11
+ ---
12
+
13
+ This model is derived from https://huggingface.co/deepseek-ai/Janus-Pro-1B and the main modifications are as follows
14
+
15
+ - bin files are updated to safetensors
16
+ - Add chat_template
17
+
18
+ `4bit` mainly refers to quantifying the LLM part to 4 bits.
19
+
20
+ ## Quick Start
21
+
22
+ In Macos (Apple silicon), use [mlx](https://github.com/ml-explore/mlx) framework https://github.com/wnma3mz/tLLM
23
+
24
+ ```bash
25
+ tllm.server --model_path $MODEL_PATH --hostname localhost --is_local --client_size 1
26
+ ```
27
+
28
+ `$MODEL_PATH` like `wnma3mz/Janus-Pro-1B-4bit`
config.json ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "aligner_config": {
3
+ "cls": "MlpProjector",
4
+ "model_type": "aligner",
5
+ "params": {
6
+ "depth": 2,
7
+ "input_dim": 1024,
8
+ "n_embed": 2048,
9
+ "projector_type": "mlp_gelu"
10
+ }
11
+ },
12
+ "architectures": [
13
+ "JanusProConditionalGeneration"
14
+ ],
15
+ "gen_aligner_config": {
16
+ "cls": "MlpProjector",
17
+ "model_type": "gen_aligner",
18
+ "params": {
19
+ "depth": 2,
20
+ "input_dim": 8,
21
+ "n_embed": 2048,
22
+ "projector_type": "mlp_gelu"
23
+ }
24
+ },
25
+ "gen_head_config": {
26
+ "cls": "vision_head",
27
+ "model_type": "gen_head",
28
+ "params": {
29
+ "image_token_embed": 2048,
30
+ "image_token_size": 16384,
31
+ "n_embed": 2048
32
+ }
33
+ },
34
+ "gen_vision_config": {
35
+ "cls": "VQ-16",
36
+ "model_type": "gen_vision",
37
+ "params": {
38
+ "image_token_size": 16384,
39
+ "n_embed": 8
40
+ }
41
+ },
42
+ "num_hidden_layers": 24,
43
+ "language_config": {
44
+ "hidden_size": 2048,
45
+ "intermediate_size": 5632,
46
+ "max_position_embeddings": 16384,
47
+ "model_type": "llama",
48
+ "num_attention_heads": 16,
49
+ "num_hidden_layers": 24,
50
+ "num_key_value_heads": 16,
51
+ "torch_dtype": "bfloat16",
52
+ "rms_norm_eps": 1e-6,
53
+ "tie_word_embeddings": false,
54
+ "vocab_size": 102400
55
+ },
56
+ "torch_dtype": "bfloat16",
57
+ "transformers_version": "4.33.1",
58
+ "quantization": {
59
+ "group_size": 64,
60
+ "bits": 4
61
+ },
62
+ "quantization_config": {
63
+ "group_size": 64,
64
+ "bits": 4
65
+ },
66
+ "vision_config": {
67
+ "cls": "CLIPVisionTower",
68
+ "model_type": "vision",
69
+ "use_head": true,
70
+ "ignore_head": true,
71
+ "image_size": 384,
72
+ "patch_size": 16,
73
+ "hidden_size": 1024,
74
+ "num_hidden_layers": 24,
75
+ "num_attention_heads": 16,
76
+ "intermediate_size": 4096,
77
+ "mlp_ratio": 4,
78
+ "global_pool": "map",
79
+ "use_checkpoint": false,
80
+ "params": {
81
+ "image_size": 384,
82
+ "model_name": "siglip_large_patch16_384",
83
+ "select_feature": "same",
84
+ "select_layer": -1
85
+ }
86
+ }
87
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1306d068356ec26c1ef49f3205c9b4ac61e461ba4154eb509ebae4abc13928b2
3
+ size 1803190693
preprocessor_config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "background_color": [
3
+ 127,
4
+ 127,
5
+ 127
6
+ ],
7
+ "do_normalize": true,
8
+ "image_mean": [
9
+ 0.5,
10
+ 0.5,
11
+ 0.5
12
+ ],
13
+ "image_processor_type": "VLMImageProcessor",
14
+ "image_size": 384,
15
+ "image_std": [
16
+ 0.5,
17
+ 0.5,
18
+ 0.5
19
+ ],
20
+ "min_size": 14,
21
+ "processor_class": "VLChatProcessor",
22
+ "rescale_factor": 0.00392156862745098
23
+ }
processor_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_special_token": false,
3
+ "ignore_id": -100,
4
+ "image_tag": "<image_placeholder>",
5
+ "mask_prompt": true,
6
+ "num_image_tokens": 576,
7
+ "processor_class": "VLChatProcessor",
8
+ "sft_format": "deepseek"
9
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<image_placeholder>",
4
+ "<patch_placeholder>",
5
+ "<|ref|>",
6
+ "<|/ref|>",
7
+ "<|det|>",
8
+ "<|/det|>",
9
+ "<|grounding|>",
10
+ "<|User|>",
11
+ "<|Assistant|>"
12
+ ],
13
+ "bos_token": "<|begin▁of▁sentence|>",
14
+ "eos_token": "<|end▁of▁sentence|>",
15
+ "pad_token": "<|▁pad▁|>"
16
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|begin▁of▁sentence|>",
3
+ "clean_up_tokenization_spaces": false,
4
+ "eos_token": "<|end▁of▁sentence|>",
5
+ "model_max_length": 16384,
6
+ "pad_token": null,
7
+ "tokenizer_class": "LlamaTokenizer",
8
+ "unk_token": null,
9
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='You are a helpful language and vision assistant. You are able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language.') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{% break %}{%- endif %}{%- endfor %}{{ns.system_prompt + '\n\n'}}{%- for message in messages %}{%- if message['role'] == 'user' %}{{'<|User|>: ' + message['content'] + '\n\n'}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{% set content = message['content'] %}{% if '</think>' in content %}{% set content = content.split('</think>')[-1] %}{% endif %}{{'<|Assistant|>: ' + content + '<|end of sentence|>'}}{%- endif %}{%- endfor -%}{% if add_generation_prompt %}{{'<|Assistant|>:'}}{% endif %}",
10
+ "use_default_system_prompt": true
11
+ }