Update to deepseek-coder-7b-base-v1.5 in code

#1
Files changed (1) hide show
  1. README.md +2 -2
README.md CHANGED
@@ -25,8 +25,8 @@ Here give an example of how to use our model.
25
  ```python
26
  from transformers import AutoTokenizer, AutoModelForCausalLM
27
  import torch
28
- tokenizer = AutoTokenizer.from_pretrained("deepseek-ai/deepseek-coder-6.7b-base", trust_remote_code=True)
29
- model = AutoModelForCausalLM.from_pretrained("deepseek-ai/deepseek-coder-6.7b-base", trust_remote_code=True).cuda()
30
  input_text = "#write a quick sort algorithm"
31
  inputs = tokenizer(input_text, return_tensors="pt").cuda()
32
  outputs = model.generate(**inputs, max_length=128)
 
25
  ```python
26
  from transformers import AutoTokenizer, AutoModelForCausalLM
27
  import torch
28
+ tokenizer = AutoTokenizer.from_pretrained("deepseek-ai/deepseek-coder-7b-base-v1.5", trust_remote_code=True)
29
+ model = AutoModelForCausalLM.from_pretrained("deepseek-ai/deepseek-coder-7b-base-v1.5", trust_remote_code=True).cuda()
30
  input_text = "#write a quick sort algorithm"
31
  inputs = tokenizer(input_text, return_tensors="pt").cuda()
32
  outputs = model.generate(**inputs, max_length=128)