AndrewZeng commited on
Commit
fdfba35
1 Parent(s): 2a49bf3

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +45 -0
README.md CHANGED
@@ -1,3 +1,48 @@
1
  ---
2
  license: apache-2.0
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  license: apache-2.0
3
  ---
4
+
5
+ ## Usage Code
6
+ ```python
7
+ import torch
8
+ from transformers import AutoTokenizer, AutoModelForCausalLM
9
+ import numpy as np
10
+ from scipy.special import softmax
11
+ # 选择模型和模型名称(例如,这里使用GPT-2模型)
12
+ model_name = "hkust-nlp/Deita-Quality-Scorer"
13
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
14
+ model = AutoModelForCausalLM.from_pretrained(model_name)
15
+
16
+
17
+ quality_template = ("You are a helpful assistant. Please identify the quality score of the Response corresponding to the Question. \n #Question#:\n{instruction}\n#Response#:\n{output} \n##Quality: ")
18
+ # 输入文本
19
+ input_text = "word to describe UI with helpful tooltips"
20
+ output_text = "User-friendly or intuitive UI"
21
+
22
+ user_input = quality_template.format(instruction=input_text, output=output_text)
23
+
24
+ # 将输入文本编码为tokens
25
+ input_ids = tokenizer.encode(user_input, return_tensors="pt")
26
+
27
+ # 生成文本
28
+ max_length = 512 # 设置生成文本的最大长度
29
+ outputs = model.generate(input_ids, max_length=512, num_return_sequences=1, return_dict_in_generate=True, output_scores=True)
30
+ logprobs_list = outputs.scores[0][0]
31
+ score_logits = []
32
+ id2score = {
33
+ 29896: "1",
34
+ 29906: "2",
35
+ 29941: "3",
36
+ 29946: "4",
37
+ 29945: "5",
38
+ 29953: "6"
39
+ }
40
+ score_template = np.array([1,2,3,4,5,6])
41
+ for k in id2score:
42
+ score_logits.append(logprobs_list[k])
43
+ score_logits = np.array(score_logits)
44
+ score_npy = softmax(score_logits, axis=0)
45
+ score_npy = score_npy * score_template
46
+
47
+ score_npy = np.sum(score_npy, axis=0)
48
+ ```