CocoRoF commited on
Commit
fac7157
·
verified ·
1 Parent(s): 32d895f

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +78 -1
README.md CHANGED
@@ -22,7 +22,84 @@ It achieves the following results on the evaluation set:
22
  - Loss: 0.3242
23
  - Accuracy: 0.8997
24
 
25
- ## Model description
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
  More information needed
28
 
 
22
  - Loss: 0.3242
23
  - Accuracy: 0.8997
24
 
25
+ ## How To use
26
+ #### Load Base Model and Plateer Classifier Model.
27
+ ```python
28
+ import joblib;
29
+ from huggingface_hub import hf_hub_download;
30
+ from peft import PeftModel, PeftConfig;
31
+ from transformers import AutoTokenizer, TextClassificationPipeline, AutoModelForSequenceClassification;
32
+ from huggingface_hub import HfApi, login
33
+ with open('./api_key/HGF_TOKEN.txt', 'r') as hgf:
34
+ login(token=hgf.read())
35
+ api = HfApi()
36
+ repo_id = "x2bee/plateer_classifier_v0.1"
37
+ data_id = "x2bee/plateer_category_data"
38
+
39
+ # Load Config, Tokenizer, Label_Encoder
40
+ config = PeftConfig.from_pretrained(repo_id, subfolder="last-checkpoint")
41
+ tokenizer = AutoTokenizer.from_pretrained(repo_id, subfolder="last-checkpoint")
42
+ label_encoder_file = hf_hub_download(repo_id=data_id, repo_type="dataset", filename="label_encoder.joblib")
43
+ label_encoder = joblib.load(label_encoder_file)
44
+
45
+ # Load base_model
46
+ base_model = AutoModelForSequenceClassification.from_pretrained("Qwen/Qwen2.5-1.5B", num_labels=17)
47
+ base_model.resize_token_embeddings(len(tokenizer))
48
+
49
+ # Load Model
50
+ model = PeftModel.from_pretrained(base_model, repo_id, subfolder="last-checkpoint")
51
+
52
+ import torch
53
+ class TextClassificationPipeline(TextClassificationPipeline):
54
+ def __call__(self, inputs, top_k=5, **kwargs):
55
+ inputs = self.tokenizer(inputs, return_tensors="pt", truncation=True, padding=True, **kwargs)
56
+ inputs = {k: v.to(self.model.device) for k, v in inputs.items()}
57
+
58
+ with torch.no_grad():
59
+ outputs = self.model(**inputs)
60
+
61
+ probs = torch.nn.functional.softmax(outputs.logits, dim=-1)
62
+ scores, indices = torch.topk(probs, top_k, dim=-1)
63
+
64
+ results = []
65
+ for batch_idx in range(indices.shape[0]):
66
+ batch_results = []
67
+ for score, idx in zip(scores[batch_idx], indices[batch_idx]):
68
+ temp_list = []
69
+ label = self.model.config.id2label[idx.item()]
70
+ label = int(label.split("_")[1])
71
+ temp_list.append(label)
72
+ predicted_class = label_encoder.inverse_transform(temp_list)[0]
73
+
74
+ batch_results.append({
75
+ "label": label,
76
+ "label_decode": predicted_class,
77
+ "score": score.item(),
78
+ })
79
+ results.append(batch_results)
80
+
81
+ return results
82
+
83
+ classifier_model = TextClassificationPipeline(tokenizer=tokenizer, model=model)
84
+
85
+ def plateer_classifier(text, top_k=3):
86
+ result = classifier_model(text, top_k=top_k)
87
+ return result
88
+ ```
89
+
90
+ #### Run
91
+ ```python
92
+ user_input = "머리띠"
93
+ result = plateer_classifier(user_input)[0]
94
+ print(result)
95
+ ```
96
+
97
+ ```bash
98
+ {'label': 6, 'label_decode': '뷰티/케어', 'score': 0.42996299266815186}
99
+ {'label': 15, 'label_decode': '패션/의류/잡화', 'score': 0.1485249102115631}
100
+ {'label': 8, 'label_decode': '스포츠', 'score': 0.1281907707452774}
101
+ ```
102
+
103
 
104
  More information needed
105