apepkuss79 commited on
Commit
d0d2ad2
·
verified ·
1 Parent(s): ece0ae8

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +8 -24
README.md CHANGED
@@ -28,31 +28,15 @@ library_name: transformers
28
 
29
  <!-- - LlamaEdge version: [v0.14.3](https://github.com/LlamaEdge/LlamaEdge/releases/tag/0.14.3) -->
30
 
31
- <!-- - Prompt template
32
 
33
- - Prompt type: `qwen-2.5-coder`
34
 
35
  - Prompt string
36
 
37
- - File-Level Code Completion (Fill in the middle)
38
-
39
- ```text
40
- <|fim_prefix|>{prefix_code}<|fim_suffix|>{suffix_code}<|fim_middle|>
41
- ```
42
-
43
- *Reference: https://github.com/QwenLM/Qwen2.5-Coder?tab=readme-ov-file#3-file-level-code-completion-fill-in-the-middle*
44
-
45
- - Repository-Level Code Completion
46
-
47
- ```text
48
- <|repo_name|>{repo_name}
49
- <|file_sep|>{file_path1}
50
- {file_content1}
51
- <|file_sep|>{file_path2}
52
- {file_content2}
53
- ```
54
-
55
- *Reference: https://github.com/QwenLM/Qwen2.5-Coder?tab=readme-ov-file#4-repository-level-code-completion*
56
 
57
  - Context size: `128000`
58
 
@@ -62,7 +46,7 @@ library_name: transformers
62
  wasmedge --dir .:. --nn-preload default:GGML:AUTO:DeepSeek-R1-Distill-Qwen-7B-Q5_K_M.gguf \
63
  llama-api-server.wasm \
64
  --model-name DeepSeek-R1-Distill-Qwen-7B \
65
- --prompt-template qwen-2.5-coder \
66
  --ctx-size 128000
67
  ```
68
 
@@ -71,9 +55,9 @@ library_name: transformers
71
  ```bash
72
  wasmedge --dir .:. --nn-preload default:GGML:AUTO:DeepSeek-R1-Distill-Qwen-7B-Q5_K_M.gguf \
73
  llama-chat.wasm \
74
- --prompt-template qwen-2.5-coder \
75
  --ctx-size 128000
76
- ``` -->
77
 
78
  ## Quantized GGUF Models
79
 
 
28
 
29
  <!-- - LlamaEdge version: [v0.14.3](https://github.com/LlamaEdge/LlamaEdge/releases/tag/0.14.3) -->
30
 
31
+ - Prompt template
32
 
33
+ - Prompt type: `deepseek-chat-25`
34
 
35
  - Prompt string
36
 
37
+ ```text
38
+ <|begin_of_sentence|>{system_message}<|User|>{user_message_1}<|Assistant|>{assistant_message_1}<|end_of_sentence|><|User|>{user_message_2}<|Assistant|>
39
+ ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
 
41
  - Context size: `128000`
42
 
 
46
  wasmedge --dir .:. --nn-preload default:GGML:AUTO:DeepSeek-R1-Distill-Qwen-7B-Q5_K_M.gguf \
47
  llama-api-server.wasm \
48
  --model-name DeepSeek-R1-Distill-Qwen-7B \
49
+ --prompt-template deepseek-chat-25 \
50
  --ctx-size 128000
51
  ```
52
 
 
55
  ```bash
56
  wasmedge --dir .:. --nn-preload default:GGML:AUTO:DeepSeek-R1-Distill-Qwen-7B-Q5_K_M.gguf \
57
  llama-chat.wasm \
58
+ --prompt-template deepseek-chat-25 \
59
  --ctx-size 128000
60
+ ```
61
 
62
  ## Quantized GGUF Models
63