Andresckamilo
commited on
Commit
•
bf2847e
1
Parent(s):
7dbf033
Initial commit
Browse files- README.md +29 -3
- lora/lora.safetensors +3 -0
- model/tokenizer.model.v3 +0 -0
- script.py +20 -0
README.md
CHANGED
@@ -1,3 +1,29 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
# Mistral LoRA Adaptors
|
3 |
+
|
4 |
+
This repository contains a Mistral model with LoRA adaptors.
|
5 |
+
|
6 |
+
## Usage
|
7 |
+
|
8 |
+
Run the following script to load the model and generate text:
|
9 |
+
|
10 |
+
```python
|
11 |
+
from mistral_inference.model import Transformer
|
12 |
+
from mistral_inference.generate import generate
|
13 |
+
from mistral_common.tokens.tokenizers.mistral import MistralTokenizer
|
14 |
+
from mistral_common.protocol.instruct.messages import UserMessage
|
15 |
+
from mistral_common.protocol.instruct.request import ChatCompletionRequest
|
16 |
+
|
17 |
+
def main():
|
18 |
+
tokenizer = MistralTokenizer.from_file("model/tokenizer.model.v3")
|
19 |
+
model = Transformer.from_folder("model")
|
20 |
+
model.load_lora("lora/lora.safetensors")
|
21 |
+
|
22 |
+
completion_request = ChatCompletionRequest(messages=[UserMessage(content="Explain Machine Learning to me in a nutshell.")])
|
23 |
+
tokens = tokenizer.encode_chat_completion(completion_request).tokens
|
24 |
+
out_tokens, _ = generate([tokens], model, max_tokens=64, temperature=0.0, eos_id=tokenizer.instruct_tokenizer.tokenizer.eos_id)
|
25 |
+
result = tokenizer.instruct_tokenizer.tokenizer.decode(out_tokens[0])
|
26 |
+
print(result)
|
27 |
+
|
28 |
+
if __name__ == "__main__":
|
29 |
+
main()
|
lora/lora.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:eeb94a64b55f6408b420ec980a822c2212da24a222eb2d4238237ea527bbb4d9
|
3 |
+
size 335594288
|
model/tokenizer.model.v3
ADDED
Binary file (587 kB). View file
|
|
script.py
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
from mistral_inference.model import Transformer
|
3 |
+
from mistral_inference.generate import generate
|
4 |
+
from mistral_common.tokens.tokenizers.mistral import MistralTokenizer
|
5 |
+
from mistral_common.protocol.instruct.messages import UserMessage
|
6 |
+
from mistral_common.protocol.instruct.request import ChatCompletionRequest
|
7 |
+
|
8 |
+
def main():
|
9 |
+
tokenizer = MistralTokenizer.from_file("model/tokenizer.model.v3")
|
10 |
+
model = Transformer.from_folder("model")
|
11 |
+
model.load_lora("lora/lora.safetensors")
|
12 |
+
|
13 |
+
completion_request = ChatCompletionRequest(messages=[UserMessage(content="Explain Machine Learning to me in a nutshell.")])
|
14 |
+
tokens = tokenizer.encode_chat_completion(completion_request).tokens
|
15 |
+
out_tokens, _ = generate([tokens], model, max_tokens=64, temperature=0.0, eos_id=tokenizer.instruct_tokenizer.tokenizer.eos_id)
|
16 |
+
result = tokenizer.instruct_tokenizer.tokenizer.decode(out_tokens[0])
|
17 |
+
print(result)
|
18 |
+
|
19 |
+
if __name__ == "__main__":
|
20 |
+
main()
|