mav23 commited on
Commit
745f24b
1 Parent(s): 535d83e

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,20 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ codestral-22b-v0.1.Q2_K.gguf filter=lfs diff=lfs merge=lfs -text
37
+ codestral-22b-v0.1.Q3_K.gguf filter=lfs diff=lfs merge=lfs -text
38
+ codestral-22b-v0.1.Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text
39
+ codestral-22b-v0.1.Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
40
+ codestral-22b-v0.1.Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text
41
+ codestral-22b-v0.1.Q4_0.gguf filter=lfs diff=lfs merge=lfs -text
42
+ codestral-22b-v0.1.Q4_1.gguf filter=lfs diff=lfs merge=lfs -text
43
+ codestral-22b-v0.1.Q4_K.gguf filter=lfs diff=lfs merge=lfs -text
44
+ codestral-22b-v0.1.Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
45
+ codestral-22b-v0.1.Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text
46
+ codestral-22b-v0.1.Q5_0.gguf filter=lfs diff=lfs merge=lfs -text
47
+ codestral-22b-v0.1.Q5_1.gguf filter=lfs diff=lfs merge=lfs -text
48
+ codestral-22b-v0.1.Q5_K.gguf filter=lfs diff=lfs merge=lfs -text
49
+ codestral-22b-v0.1.Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
50
+ codestral-22b-v0.1.Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text
51
+ codestral-22b-v0.1.Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
52
+ codestral-22b-v0.1.Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - code
4
+ license: other
5
+ tags:
6
+ - code
7
+ inference: false
8
+ license_name: mnpl
9
+ license_link: https://mistral.ai/licences/MNPL-0.1.md
10
+
11
+ extra_gated_description: If you want to learn more about how we process your personal data, please read our <a href="https://mistral.ai/terms/">Privacy Policy</a>.
12
+ ---
13
+
14
+ # Model Card for Codestral-22B-v0.1
15
+
16
+
17
+ ## Encode and Decode with `mistral_common`
18
+
19
+ ```py
20
+ from mistral_common.tokens.tokenizers.mistral import MistralTokenizer
21
+ from mistral_common.protocol.instruct.messages import UserMessage
22
+ from mistral_common.protocol.instruct.request import ChatCompletionRequest
23
+
24
+ mistral_models_path = "MISTRAL_MODELS_PATH"
25
+
26
+ tokenizer = MistralTokenizer.v3()
27
+
28
+ completion_request = ChatCompletionRequest(messages=[UserMessage(content="Explain Machine Learning to me in a nutshell.")])
29
+
30
+ tokens = tokenizer.encode_chat_completion(completion_request).tokens
31
+ ```
32
+
33
+ ## Inference with `mistral_inference`
34
+
35
+ ```py
36
+ from mistral_inference.transformer import Transformer
37
+ from mistral_inference.generate import generate
38
+
39
+ model = Transformer.from_folder(mistral_models_path)
40
+ out_tokens, _ = generate([tokens], model, max_tokens=64, temperature=0.0, eos_id=tokenizer.instruct_tokenizer.tokenizer.eos_id)
41
+
42
+ result = tokenizer.decode(out_tokens[0])
43
+
44
+ print(result)
45
+ ```
46
+
47
+ ## Inference with hugging face `transformers`
48
+
49
+ ```py
50
+ from transformers import AutoModelForCausalLM
51
+
52
+ model = AutoModelForCausalLM.from_pretrained("mistralai/Codestral-22B-v0.1")
53
+ model.to("cuda")
54
+
55
+ generated_ids = model.generate(tokens, max_new_tokens=1000, do_sample=True)
56
+
57
+ # decode with mistral tokenizer
58
+ result = tokenizer.decode(generated_ids[0].tolist())
59
+ print(result)
60
+ ```
61
+
62
+ > [!TIP]
63
+ > PRs to correct the `transformers` tokenizer so that it gives 1-to-1 the same results as the `mistral_common` reference implementation are very welcome!
64
+
65
+ ---
66
+
67
+ Codestral-22B-v0.1 is trained on a diverse dataset of 80+ programming languages, including the most popular ones, such as Python, Java, C, C++, JavaScript, and Bash (more details in the [Blogpost](https://mistral.ai/news/codestral/)). The model can be queried:
68
+ - As instruct, for instance to answer any questions about a code snippet (write documentation, explain, factorize) or to generate code following specific indications
69
+ - As Fill in the Middle (FIM), to predict the middle tokens between a prefix and a suffix (very useful for software development add-ons like in VS Code)
70
+
71
+
72
+ ## Installation
73
+
74
+ It is recommended to use `mistralai/Codestral-22B-v0.1` with [mistral-inference](https://github.com/mistralai/mistral-inference).
75
+
76
+ ```
77
+ pip install mistral_inference
78
+ ```
79
+
80
+ ## Download
81
+
82
+ ```py
83
+ from huggingface_hub import snapshot_download
84
+ from pathlib import Path
85
+
86
+ mistral_models_path = Path.home().joinpath('mistral_models', 'Codestral-22B-v0.1')
87
+ mistral_models_path.mkdir(parents=True, exist_ok=True)
88
+
89
+ snapshot_download(repo_id="mistralai/Codestral-22B-v0.1", allow_patterns=["params.json", "consolidated.safetensors", "tokenizer.model.v3"], local_dir=mistral_models_path)
90
+ ```
91
+
92
+ ### Chat
93
+
94
+ After installing `mistral_inference`, a `mistral-chat` CLI command should be available in your environment.
95
+
96
+ ```
97
+ mistral-chat $HOME/mistral_models/Codestral-22B-v0.1 --instruct --max_tokens 256
98
+ ```
99
+
100
+ Will generate an answer to "Write me a function that computes fibonacci in Rust" and should give something along the following lines:
101
+
102
+ ```
103
+ Sure, here's a simple implementation of a function that computes the Fibonacci sequence in Rust. This function takes an integer `n` as an argument and returns the `n`th Fibonacci number.
104
+
105
+ fn fibonacci(n: u32) -> u32 {
106
+ match n {
107
+ 0 => 0,
108
+ 1 => 1,
109
+ _ => fibonacci(n - 1) + fibonacci(n - 2),
110
+ }
111
+ }
112
+
113
+ fn main() {
114
+ let n = 10;
115
+ println!("The {}th Fibonacci number is: {}", n, fibonacci(n));
116
+ }
117
+
118
+ This function uses recursion to calculate the Fibonacci number. However, it's not the most efficient solution because it performs a lot of redundant calculations. A more efficient solution would use a loop to iteratively calculate the Fibonacci numbers.
119
+ ```
120
+
121
+
122
+ ### Fill-in-the-middle (FIM)
123
+
124
+ After installing `mistral_inference` and running `pip install --upgrade mistral_common` to make sure to have mistral_common>=1.2 installed:
125
+
126
+ ```py
127
+ from mistral_inference.transformer import Transformer
128
+ from mistral_inference.generate import generate
129
+ from mistral_common.tokens.tokenizers.mistral import MistralTokenizer
130
+ from mistral_common.tokens.instruct.request import FIMRequest
131
+
132
+ tokenizer = MistralTokenizer.v3()
133
+ model = Transformer.from_folder("~/codestral-22B-240529")
134
+
135
+ prefix = """def add("""
136
+ suffix = """ return sum"""
137
+
138
+ request = FIMRequest(prompt=prefix, suffix=suffix)
139
+
140
+ tokens = tokenizer.encode_fim(request).tokens
141
+
142
+ out_tokens, _ = generate([tokens], model, max_tokens=256, temperature=0.0, eos_id=tokenizer.instruct_tokenizer.tokenizer.eos_id)
143
+ result = tokenizer.decode(out_tokens[0])
144
+
145
+ middle = result.split(suffix)[0].strip()
146
+ print(middle)
147
+ ```
148
+
149
+ Should give something along the following lines:
150
+
151
+ ```
152
+ num1, num2):
153
+
154
+ # Add two numbers
155
+ sum = num1 + num2
156
+
157
+ # return the sum
158
+ ```
159
+
160
+ ## Usage with transformers library
161
+
162
+ This model is also compatible with `transformers` library, first run `pip install -U transformers` then use the snippet below to quickly get started:
163
+
164
+ ```python
165
+ from transformers import AutoModelForCausalLM, AutoTokenizer
166
+
167
+ model_id = "mistralai/Codestral-22B-v0.1"
168
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
169
+
170
+ model = AutoModelForCausalLM.from_pretrained(model_id)
171
+
172
+ text = "Hello my name is"
173
+ inputs = tokenizer(text, return_tensors="pt")
174
+
175
+ outputs = model.generate(**inputs, max_new_tokens=20)
176
+ print(tokenizer.decode(outputs[0], skip_special_tokens=True))
177
+ ```
178
+
179
+ By default, transformers will load the model in full precision. Therefore you might be interested to further reduce down the memory requirements to run the model through the optimizations we offer in HF ecosystem.
180
+
181
+ ## Limitations
182
+
183
+ The Codestral-22B-v0.1 does not have any moderation mechanisms. We're looking forward to engaging with the community on ways to
184
+ make the model finely respect guardrails, allowing for deployment in environments requiring moderated outputs.
185
+
186
+ ## License
187
+
188
+ Codestral-22B-v0.1 is released under the `MNLP-0.1` license.
189
+
190
+ ## The Mistral AI Team
191
+
192
+ Albert Jiang, Alexandre Sablayrolles, Alexis Tacnet, Antoine Roux, Arthur Mensch, Audrey Herblin-Stoop, Baptiste Bout, Baudouin de Monicault, Blanche Savary, Bam4d, Caroline Feldman, Devendra Singh Chaplot, Diego de las Casas, Eleonore Arcelin, Emma Bou Hanna, Etienne Metzger, Gianna Lengyel, Guillaume Bour, Guillaume Lample, Harizo Rajaona, Henri Roussez, Jean-Malo Delignon, Jia Li, Justus Murke, Kartik Khandelwal, Lawrence Stewart, Louis Martin, Louis Ternon, Lucile Saulnier, Lélio Renard Lavaud, Margaret Jennings, Marie Pellat, Marie Torelli, Marie-Anne Lachaux, Marjorie Janiewicz, Mickael Seznec, Nicolas Schuhl, Patrick von Platen, Romain Sauvestre, Pierre Stock, Sandeep Subramanian, Saurabh Garg, Sophia Yang, Szymon Antoniak, Teven Le Scao, Thibaut Lavril, Thibault Schueller, Timothée Lacroix, Théophile Gervet, Thomas Wang, Valera Nemychnikova, Wendy Shang, William El Sayed, William Marshall
codestral-22b-v0.1.Q2_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b2c6285e9e25fbf2c325ad68121ba9fdddd2f47877383932fb71f4bd2558b24
3
+ size 8272094944
codestral-22b-v0.1.Q3_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f77c07e3ab43ee928a25ba10e7cf55538ca973cf1f2c1fad428256b8ca971c1
3
+ size 10756826848
codestral-22b-v0.1.Q3_K_L.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32a9abc241cd8e654313d48916ce233107642ce416eb5258c29a784c2a2a6efa
3
+ size 11730429664
codestral-22b-v0.1.Q3_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f77c07e3ab43ee928a25ba10e7cf55538ca973cf1f2c1fad428256b8ca971c1
3
+ size 10756826848
codestral-22b-v0.1.Q3_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e6dece638b10ad7c3f11a6a6184fe2fc9e1834cdd22501f51fe65706409e86f
3
+ size 9641273056
codestral-22b-v0.1.Q4_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7194e242e3461becbf06e42899c1a22f782d5dd4bfcf7ce669c00c5cc558a428
3
+ size 12569159392
codestral-22b-v0.1.Q4_1.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6676222c7e4987e82992b361195f284591aead22e7b4fc3dc1a44707c6797ba5
3
+ size 13946988256
codestral-22b-v0.1.Q4_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03510510b27740757cdbfea50df5d589fd3f86257359d3c685063d7dfd677542
3
+ size 13341239008
codestral-22b-v0.1.Q4_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03510510b27740757cdbfea50df5d589fd3f86257359d3c685063d7dfd677542
3
+ size 13341239008
codestral-22b-v0.1.Q4_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d29d1a18c28654b686eecb5471b506706a0ebc7e85813dbc8a64ec612fe0cd90
3
+ size 12660385504
codestral-22b-v0.1.Q5_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d94da9087fb35cbef7b52e2ca73ffeeef7e8369d532c42fae5f629e4f1e680c
3
+ size 15324817120
codestral-22b-v0.1.Q5_1.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81d30d3bbb28f37ed66ab8f429d839ad591bacc7c067b7e04391b1f712ca199a
3
+ size 16702645984
codestral-22b-v0.1.Q5_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b68866d15df91d088a971f40abeea1b268b774533a9381fecb65c70e2b04609
3
+ size 15722555104
codestral-22b-v0.1.Q5_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b68866d15df91d088a971f40abeea1b268b774533a9381fecb65c70e2b04609
3
+ size 15722555104
codestral-22b-v0.1.Q5_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c2b84ffc69b26c5b1b945501fd5ef490b487f10d7095c4f7df7aa3d5e230ea3
3
+ size 15324817120
codestral-22b-v0.1.Q6_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa8b64e18067f817de052125e7515691310e39b56adb33233ba0eeed75d43854
3
+ size 18252703456
codestral-22b-v0.1.Q8_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e0f6875fc0076c83ad5bca16943c8bc9d3e08df06e724e155b5c764384a7b5c
3
+ size 23640549088
config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "mistral-community/Codestral-22B-v0.1",
3
+ "architectures": [
4
+ "MistralForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 1,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 6144,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 16384,
13
+ "max_position_embeddings": 32768,
14
+ "model_type": "mistral",
15
+ "num_attention_heads": 48,
16
+ "num_hidden_layers": 56,
17
+ "num_key_value_heads": 8,
18
+ "rms_norm_eps": 1e-05,
19
+ "rope_theta": 1000000.0,
20
+ "sliding_window": null,
21
+ "tie_word_embeddings": false,
22
+ "torch_dtype": "bfloat16",
23
+ "transformers_version": "4.42.0.dev0",
24
+ "use_cache": true,
25
+ "vocab_size": 32768
26
+ }