JARINJV commited on
Commit
c5f8180
1 Parent(s): 95ec0be

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -35,3 +35,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
  mistral-7b-instruct-v0.3.Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
37
  mistral-7b-instruct-v0.3.bf16.gguf filter=lfs diff=lfs merge=lfs -text
 
 
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
  mistral-7b-instruct-v0.3.Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
37
  mistral-7b-instruct-v0.3.bf16.gguf filter=lfs diff=lfs merge=lfs -text
38
+ mistral-7b-instruct-v0.3.Q4_1.gguf filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,10 +1,7 @@
1
  ---
2
  license: apache-2.0
3
- tags:
4
- - autoquant
5
- - gguf
6
- extra_gated_description: If you want to learn more about how we process your personal
7
- data, please read our <a href="https://mistral.ai/terms/">Privacy Policy</a>.
8
  ---
9
 
10
  # Model Card for Mistral-7B-Instruct-v0.3
@@ -162,18 +159,19 @@ def get_current_weather(location: str, format: str):
162
  conversation = [{"role": "user", "content": "What's the weather like in Paris?"}]
163
  tools = [get_current_weather]
164
 
165
- # render the tool use prompt as a string:
166
- tool_use_prompt = tokenizer.apply_chat_template(
 
167
  conversation,
168
  tools=tools,
169
- tokenize=False,
170
  add_generation_prompt=True,
 
 
171
  )
172
 
173
- inputs = tokenizer(tool_use_prompt, return_tensors="pt")
174
-
175
  model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map="auto")
176
 
 
177
  outputs = model.generate(**inputs, max_new_tokens=1000)
178
  print(tokenizer.decode(outputs[0], skip_special_tokens=True))
179
  ```
 
1
  ---
2
  license: apache-2.0
3
+ base_model: mistralai/Mistral-7B-v0.3
4
+ extra_gated_description: If you want to learn more about how we process your personal data, please read our <a href="https://mistral.ai/terms/">Privacy Policy</a>.
 
 
 
5
  ---
6
 
7
  # Model Card for Mistral-7B-Instruct-v0.3
 
159
  conversation = [{"role": "user", "content": "What's the weather like in Paris?"}]
160
  tools = [get_current_weather]
161
 
162
+
163
+ # format and tokenize the tool use prompt
164
+ inputs = tokenizer.apply_chat_template(
165
  conversation,
166
  tools=tools,
 
167
  add_generation_prompt=True,
168
+ return_dict=True,
169
+ return_tensors="pt",
170
  )
171
 
 
 
172
  model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map="auto")
173
 
174
+ inputs.to(model.device)
175
  outputs = model.generate(**inputs, max_new_tokens=1000)
176
  print(tokenizer.decode(outputs[0], skip_special_tokens=True))
177
  ```
mistral-7b-instruct-v0.3.Q4_1.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad285df95654b1b5b5cfe8d841b9ffbffb6dd35cb40b9764f2afa06df8d953cb
3
+ size 4557889472
mistral-7b-instruct-v0.3.bf16.gguf CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5f828be62045c220ebdce81502c6f7738de089fde094ba7a4a0de80c362279b3
3
- size 14497341056
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0f44c427ef088acf4c52eab0034e47624e7fe8c0c3eda66c46102a3feba8894
3
+ size 14497341376