BAAI
/

shunxing1234 commited on
Commit
8e91e9c
1 Parent(s): abed985

Update README_zh.md

Browse files
Files changed (1) hide show
  1. README_zh.md +9 -11
README_zh.md CHANGED
@@ -29,8 +29,8 @@ license: other
29
  悟道·天鹰Aquila系列模型将持续开源更优版本。
30
 
31
  - 2023/07/19 :开源 v0.9
32
- - AquilaCode-mutil-01 md5: e202e5b82db773ea369fe843fef1c34c
33
- - AquilaCode-mutil-02 md5: 3923b2b020e2af71755b11248076437f
34
  - AquilaCode-Python-01 md5: e202e5b82db773ea369fe843fef1c34c
35
  - AquilaCode-Python-02 md5: 3923b2b020e2af71755b11248076437f
36
 
@@ -49,20 +49,18 @@ Aquila-7B v0.8 在 FlagEval 大模型评测中( “客观”)相比0.7的版
49
  ```python
50
  from transformers import AutoTokenizer, AutoModelForCausalLM
51
  import torch
52
- from cyg_conversation import covert_prompt_to_input_ids_with_history
53
 
54
- tokenizer = AutoTokenizer.from_pretrained("BAAI/AquilaChat-7B")
55
- model = AutoModelForCausalLM.from_pretrained("BAAI/AquilaChat-7B")
 
56
  model.eval()
57
- model.to("cuda:0")
58
- vocab = tokenizer.vocab
59
- print(len(vocab))
60
 
61
- text = "请给出10个要到北京旅游的理由。"
62
 
63
- tokens = covert_prompt_to_input_ids_with_history(text, history=[], tokenizer=tokenizer, max_token=512)
64
 
65
- tokens = torch.tensor(tokens)[None,].to("cuda:0")
66
 
67
 
68
  with torch.no_grad():
 
29
  悟道·天鹰Aquila系列模型将持续开源更优版本。
30
 
31
  - 2023/07/19 :开源 v0.9
32
+ - AquilaCode-mutil-01 md5: e6ea49fea7a737ffe41086ec7019cebb
33
+ - AquilaCode-mutil-02 md5: 4bba98eac44d785358ed5b6d2144a94a
34
  - AquilaCode-Python-01 md5: e202e5b82db773ea369fe843fef1c34c
35
  - AquilaCode-Python-02 md5: 3923b2b020e2af71755b11248076437f
36
 
 
49
  ```python
50
  from transformers import AutoTokenizer, AutoModelForCausalLM
51
  import torch
 
52
 
53
+ model_info = "BAAI/AquilaCode-multi"
54
+ tokenizer = AutoTokenizer.from_pretrained(model_info, trust_remote_code=True)
55
+ model = AutoModelForCausalLM.from_pretrained(model_info, trust_remote_code=True)
56
  model.eval()
57
+ model.to("cuda:3")
 
 
58
 
59
+ text = "#补全代码\ndef quick_sort(x):"
60
 
61
+ tokens = tokenizer.encode_plus(text)['input_ids'][:-1]
62
 
63
+ tokens = torch.tensor(tokens)[None,].to("cuda:3")
64
 
65
 
66
  with torch.no_grad():