Update README.md
Browse files
README.md
CHANGED
@@ -25,8 +25,7 @@ from transformers import AutoModelForCausalLM, AutoTokenizer, AutoConfig
|
|
25 |
import torch
|
26 |
|
27 |
tokenizer = AutoTokenizer.from_pretrained("biomap-research/xtrimopglm-1b-clm", trust_remote_code=True, use_fast=True)
|
28 |
-
|
29 |
-
model = AutoModelForCausalLM.from_pretrained(config, trust_remote_code=True, torch_dtype=torch.bfloat16)
|
30 |
if torch.cuda.is_available():
|
31 |
model = model.cuda()
|
32 |
model.eval()
|
@@ -36,7 +35,7 @@ prompt=['', 'MLFVVL', 'LDL', 'VTQA']
|
|
36 |
|
37 |
for idx, each in enumerate(prompt):
|
38 |
print(f"Begin generating idx: {idx} with prompt {each}")
|
39 |
-
output = model.chat(tokenizer, each)
|
40 |
print(f"\nEnd generation with length: {len(output.split())} - seqs: {output}\n")
|
41 |
```
|
42 |
|
|
|
25 |
import torch
|
26 |
|
27 |
tokenizer = AutoTokenizer.from_pretrained("biomap-research/xtrimopglm-1b-clm", trust_remote_code=True, use_fast=True)
|
28 |
+
model = AutoModelForCausalLM.from_pretrained("biomap-research/xtrimopglm-1b-clm", trust_remote_code=True, torch_dtype=torch.bfloat16)
|
|
|
29 |
if torch.cuda.is_available():
|
30 |
model = model.cuda()
|
31 |
model.eval()
|
|
|
35 |
|
36 |
for idx, each in enumerate(prompt):
|
37 |
print(f"Begin generating idx: {idx} with prompt {each}")
|
38 |
+
output = model.chat(tokenizer, each, **gen_kwargs)
|
39 |
print(f"\nEnd generation with length: {len(output.split())} - seqs: {output}\n")
|
40 |
```
|
41 |
|