jamiehudson
commited on
Commit
•
77e0f2f
1
Parent(s):
7a40c18
Push model using huggingface_hub.
Browse files- 1_Pooling/config.json +10 -0
- README.md +312 -0
- config.json +32 -0
- config_sentence_transformers.json +9 -0
- config_setfit.json +8 -0
- model.safetensors +3 -0
- model_head.pkl +3 -0
- modules.json +20 -0
- sentence_bert_config.json +4 -0
- special_tokens_map.json +37 -0
- tokenizer.json +0 -0
- tokenizer_config.json +57 -0
- vocab.txt +0 -0
1_Pooling/config.json
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"word_embedding_dimension": 768,
|
3 |
+
"pooling_mode_cls_token": true,
|
4 |
+
"pooling_mode_mean_tokens": false,
|
5 |
+
"pooling_mode_max_tokens": false,
|
6 |
+
"pooling_mode_mean_sqrt_len_tokens": false,
|
7 |
+
"pooling_mode_weightedmean_tokens": false,
|
8 |
+
"pooling_mode_lasttoken": false,
|
9 |
+
"include_prompt": true
|
10 |
+
}
|
README.md
ADDED
@@ -0,0 +1,312 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
library_name: setfit
|
3 |
+
tags:
|
4 |
+
- setfit
|
5 |
+
- sentence-transformers
|
6 |
+
- text-classification
|
7 |
+
- generated_from_setfit_trainer
|
8 |
+
metrics:
|
9 |
+
- accuracy
|
10 |
+
- f1
|
11 |
+
- precision
|
12 |
+
- recall
|
13 |
+
widget:
|
14 |
+
- text: man, product/whatever is my new best friend. i like product but the integration
|
15 |
+
of product into office and product is a lot of fun. i just spent the day feeding
|
16 |
+
it my training presentation i'm preparing in my day job and it was very helpful.
|
17 |
+
almost better than humans.
|
18 |
+
- text: that's great news! product is the perfect platform to share these advanced
|
19 |
+
product prompts and help more users get the most out of it!
|
20 |
+
- text: after only one week's trial of the new product with brand enabled, i have
|
21 |
+
replaced my default browser product that i was using for more than 7 years with
|
22 |
+
new product. i no longer need to spend a lot of time finding answers from a bunch
|
23 |
+
of search results and web pages. it's amazing
|
24 |
+
- text: very impressive. brand is finally fighting back. i am just a little worried
|
25 |
+
about the scalability of such a high context window size, since even in their
|
26 |
+
demos it took quite a while to process everything. regardless, i am very interested
|
27 |
+
in seeing what types of capabilities a >1m token size window can unleash.
|
28 |
+
- text: product the way it shows the sources is so fucking cool, this new ai is amazing
|
29 |
+
pipeline_tag: text-classification
|
30 |
+
inference: true
|
31 |
+
base_model: BAAI/bge-base-en-v1.5
|
32 |
+
model-index:
|
33 |
+
- name: SetFit with BAAI/bge-base-en-v1.5
|
34 |
+
results:
|
35 |
+
- task:
|
36 |
+
type: text-classification
|
37 |
+
name: Text Classification
|
38 |
+
dataset:
|
39 |
+
name: Unknown
|
40 |
+
type: unknown
|
41 |
+
split: test
|
42 |
+
metrics:
|
43 |
+
- type: accuracy
|
44 |
+
value: 0.7876447876447876
|
45 |
+
name: Accuracy
|
46 |
+
- type: f1
|
47 |
+
value:
|
48 |
+
- 0.3720930232558139
|
49 |
+
- 0.4528301886792453
|
50 |
+
- 0.8720379146919431
|
51 |
+
name: F1
|
52 |
+
- type: precision
|
53 |
+
value:
|
54 |
+
- 0.23529411764705882
|
55 |
+
- 0.3
|
56 |
+
- 0.9945945945945946
|
57 |
+
name: Precision
|
58 |
+
- type: recall
|
59 |
+
value:
|
60 |
+
- 0.8888888888888888
|
61 |
+
- 0.9230769230769231
|
62 |
+
- 0.7763713080168776
|
63 |
+
name: Recall
|
64 |
+
---
|
65 |
+
|
66 |
+
# SetFit with BAAI/bge-base-en-v1.5
|
67 |
+
|
68 |
+
This is a [SetFit](https://github.com/huggingface/setfit) model that can be used for Text Classification. This SetFit model uses [BAAI/bge-base-en-v1.5](https://huggingface.co/BAAI/bge-base-en-v1.5) as the Sentence Transformer embedding model. A [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance is used for classification.
|
69 |
+
|
70 |
+
The model has been trained using an efficient few-shot learning technique that involves:
|
71 |
+
|
72 |
+
1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning.
|
73 |
+
2. Training a classification head with features from the fine-tuned Sentence Transformer.
|
74 |
+
|
75 |
+
## Model Details
|
76 |
+
|
77 |
+
### Model Description
|
78 |
+
- **Model Type:** SetFit
|
79 |
+
- **Sentence Transformer body:** [BAAI/bge-base-en-v1.5](https://huggingface.co/BAAI/bge-base-en-v1.5)
|
80 |
+
- **Classification head:** a [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance
|
81 |
+
- **Maximum Sequence Length:** 512 tokens
|
82 |
+
- **Number of Classes:** 3 classes
|
83 |
+
<!-- - **Training Dataset:** [Unknown](https://huggingface.co/datasets/unknown) -->
|
84 |
+
<!-- - **Language:** Unknown -->
|
85 |
+
<!-- - **License:** Unknown -->
|
86 |
+
|
87 |
+
### Model Sources
|
88 |
+
|
89 |
+
- **Repository:** [SetFit on GitHub](https://github.com/huggingface/setfit)
|
90 |
+
- **Paper:** [Efficient Few-Shot Learning Without Prompts](https://arxiv.org/abs/2209.11055)
|
91 |
+
- **Blogpost:** [SetFit: Efficient Few-Shot Learning Without Prompts](https://huggingface.co/blog/setfit)
|
92 |
+
|
93 |
+
### Model Labels
|
94 |
+
| Label | Examples |
|
95 |
+
|:--------|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
96 |
+
| neither | <ul><li>'product cloud fails to cash in on product - as enterprises optimize cloud spending, product has registered its slowest growth in three years.'</li><li>'what do those things have to do with product? and its funny youre trying to argue facts by bringing your god into this.'</li><li>'your question didn\'t mean what you think it meant. it answered correctly to your question, which i also read as "hey brand, can you forget my loved ones?"'</li></ul> |
|
97 |
+
| peak | <ul><li>'chatbrandandme product brand product dang, my product msftadvertising experience is already so smooth and satisfying wow. they even gave me a free landing page for my product and product. i love msftadvertising and product for buying out brand and making gpt my best friend even more'</li><li>'i asked my physics teacher for help on a question i didnt understand on a test and she sent me back a 5 slide product with audio explaining each part of the question. she 100% is my fav teacher now.'</li><li>'brand!! it helped me finish my resume. i just asked it if it could write my resume based on horribly written descriptions i came up with. and it made it all pretty:)'</li></ul> |
|
98 |
+
| pit | <ul><li>'do not upgrade to product, it is a complete joke of an operating system. all of my xproduct programs are broken, none of my gpus work correctly, even after checking the bios and drivers, and now file explorer crashes upon startup, basically locking up the whole computer!'</li><li>'yes, and it would be great if product stops changing the format of data from other sources automatically, that is really annoying when 10-1-2 becomes "magically and wrongly" 2010/01/02. we are in the age of data and product just cannot handle them well..'</li><li>'it\'s a pity that the *product* doesn\'t work such as the "*normal chat*" does, but with 18,000 chars lim. hopefully, the will aim to make such upgrade, although more memory costly.'</li></ul> |
|
99 |
+
|
100 |
+
## Evaluation
|
101 |
+
|
102 |
+
### Metrics
|
103 |
+
| Label | Accuracy | F1 | Precision | Recall |
|
104 |
+
|:--------|:---------|:-------------------------------------------------------------|:-----------------------------------------------|:-------------------------------------------------------------|
|
105 |
+
| **all** | 0.7876 | [0.3720930232558139, 0.4528301886792453, 0.8720379146919431] | [0.23529411764705882, 0.3, 0.9945945945945946] | [0.8888888888888888, 0.9230769230769231, 0.7763713080168776] |
|
106 |
+
|
107 |
+
## Uses
|
108 |
+
|
109 |
+
### Direct Use for Inference
|
110 |
+
|
111 |
+
First install the SetFit library:
|
112 |
+
|
113 |
+
```bash
|
114 |
+
pip install setfit
|
115 |
+
```
|
116 |
+
|
117 |
+
Then you can load this model and run inference.
|
118 |
+
|
119 |
+
```python
|
120 |
+
from setfit import SetFitModel
|
121 |
+
|
122 |
+
# Download from the 🤗 Hub
|
123 |
+
model = SetFitModel.from_pretrained("jamiehudson/725_32batch_150_sample")
|
124 |
+
# Run inference
|
125 |
+
preds = model("product the way it shows the sources is so fucking cool, this new ai is amazing")
|
126 |
+
```
|
127 |
+
|
128 |
+
<!--
|
129 |
+
### Downstream Use
|
130 |
+
|
131 |
+
*List how someone could finetune this model on their own dataset.*
|
132 |
+
-->
|
133 |
+
|
134 |
+
<!--
|
135 |
+
### Out-of-Scope Use
|
136 |
+
|
137 |
+
*List how the model may foreseeably be misused and address what users ought not to do with the model.*
|
138 |
+
-->
|
139 |
+
|
140 |
+
<!--
|
141 |
+
## Bias, Risks and Limitations
|
142 |
+
|
143 |
+
*What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.*
|
144 |
+
-->
|
145 |
+
|
146 |
+
<!--
|
147 |
+
### Recommendations
|
148 |
+
|
149 |
+
*What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.*
|
150 |
+
-->
|
151 |
+
|
152 |
+
## Training Details
|
153 |
+
|
154 |
+
### Training Set Metrics
|
155 |
+
| Training set | Min | Median | Max |
|
156 |
+
|:-------------|:----|:--------|:----|
|
157 |
+
| Word count | 9 | 37.1711 | 98 |
|
158 |
+
|
159 |
+
| Label | Training Sample Count |
|
160 |
+
|:--------|:----------------------|
|
161 |
+
| pit | 150 |
|
162 |
+
| peak | 150 |
|
163 |
+
| neither | 150 |
|
164 |
+
|
165 |
+
### Training Hyperparameters
|
166 |
+
- batch_size: (32, 32)
|
167 |
+
- num_epochs: (1, 1)
|
168 |
+
- max_steps: -1
|
169 |
+
- sampling_strategy: oversampling
|
170 |
+
- body_learning_rate: (2e-05, 1e-05)
|
171 |
+
- head_learning_rate: 0.01
|
172 |
+
- loss: CosineSimilarityLoss
|
173 |
+
- distance_metric: cosine_distance
|
174 |
+
- margin: 0.25
|
175 |
+
- end_to_end: False
|
176 |
+
- use_amp: False
|
177 |
+
- warmup_proportion: 0.1
|
178 |
+
- seed: 42
|
179 |
+
- eval_max_steps: -1
|
180 |
+
- load_best_model_at_end: False
|
181 |
+
|
182 |
+
### Training Results
|
183 |
+
| Epoch | Step | Training Loss | Validation Loss |
|
184 |
+
|:------:|:----:|:-------------:|:---------------:|
|
185 |
+
| 0.0000 | 1 | 0.2383 | - |
|
186 |
+
| 0.0119 | 50 | 0.2395 | - |
|
187 |
+
| 0.0237 | 100 | 0.2129 | - |
|
188 |
+
| 0.0356 | 150 | 0.1317 | - |
|
189 |
+
| 0.0474 | 200 | 0.0695 | - |
|
190 |
+
| 0.0593 | 250 | 0.01 | - |
|
191 |
+
| 0.0711 | 300 | 0.0063 | - |
|
192 |
+
| 0.0830 | 350 | 0.0028 | - |
|
193 |
+
| 0.0948 | 400 | 0.0026 | - |
|
194 |
+
| 0.1067 | 450 | 0.0021 | - |
|
195 |
+
| 0.1185 | 500 | 0.0018 | - |
|
196 |
+
| 0.1304 | 550 | 0.0016 | - |
|
197 |
+
| 0.1422 | 600 | 0.0014 | - |
|
198 |
+
| 0.1541 | 650 | 0.0015 | - |
|
199 |
+
| 0.1659 | 700 | 0.0013 | - |
|
200 |
+
| 0.1778 | 750 | 0.0012 | - |
|
201 |
+
| 0.1896 | 800 | 0.0012 | - |
|
202 |
+
| 0.2015 | 850 | 0.0012 | - |
|
203 |
+
| 0.2133 | 900 | 0.0011 | - |
|
204 |
+
| 0.2252 | 950 | 0.0011 | - |
|
205 |
+
| 0.2370 | 1000 | 0.0009 | - |
|
206 |
+
| 0.2489 | 1050 | 0.001 | - |
|
207 |
+
| 0.2607 | 1100 | 0.0009 | - |
|
208 |
+
| 0.2726 | 1150 | 0.0008 | - |
|
209 |
+
| 0.2844 | 1200 | 0.0008 | - |
|
210 |
+
| 0.2963 | 1250 | 0.0009 | - |
|
211 |
+
| 0.3081 | 1300 | 0.0008 | - |
|
212 |
+
| 0.3200 | 1350 | 0.0007 | - |
|
213 |
+
| 0.3318 | 1400 | 0.0007 | - |
|
214 |
+
| 0.3437 | 1450 | 0.0007 | - |
|
215 |
+
| 0.3555 | 1500 | 0.0006 | - |
|
216 |
+
| 0.3674 | 1550 | 0.0007 | - |
|
217 |
+
| 0.3792 | 1600 | 0.0007 | - |
|
218 |
+
| 0.3911 | 1650 | 0.0008 | - |
|
219 |
+
| 0.4029 | 1700 | 0.0006 | - |
|
220 |
+
| 0.4148 | 1750 | 0.0006 | - |
|
221 |
+
| 0.4266 | 1800 | 0.0006 | - |
|
222 |
+
| 0.4385 | 1850 | 0.0006 | - |
|
223 |
+
| 0.4503 | 1900 | 0.0006 | - |
|
224 |
+
| 0.4622 | 1950 | 0.0006 | - |
|
225 |
+
| 0.4740 | 2000 | 0.0006 | - |
|
226 |
+
| 0.4859 | 2050 | 0.0005 | - |
|
227 |
+
| 0.4977 | 2100 | 0.0006 | - |
|
228 |
+
| 0.5096 | 2150 | 0.0006 | - |
|
229 |
+
| 0.5215 | 2200 | 0.0005 | - |
|
230 |
+
| 0.5333 | 2250 | 0.0005 | - |
|
231 |
+
| 0.5452 | 2300 | 0.0005 | - |
|
232 |
+
| 0.5570 | 2350 | 0.0006 | - |
|
233 |
+
| 0.5689 | 2400 | 0.0005 | - |
|
234 |
+
| 0.5807 | 2450 | 0.0005 | - |
|
235 |
+
| 0.5926 | 2500 | 0.0006 | - |
|
236 |
+
| 0.6044 | 2550 | 0.0006 | - |
|
237 |
+
| 0.6163 | 2600 | 0.0005 | - |
|
238 |
+
| 0.6281 | 2650 | 0.0005 | - |
|
239 |
+
| 0.6400 | 2700 | 0.0005 | - |
|
240 |
+
| 0.6518 | 2750 | 0.0005 | - |
|
241 |
+
| 0.6637 | 2800 | 0.0005 | - |
|
242 |
+
| 0.6755 | 2850 | 0.0005 | - |
|
243 |
+
| 0.6874 | 2900 | 0.0005 | - |
|
244 |
+
| 0.6992 | 2950 | 0.0004 | - |
|
245 |
+
| 0.7111 | 3000 | 0.0004 | - |
|
246 |
+
| 0.7229 | 3050 | 0.0004 | - |
|
247 |
+
| 0.7348 | 3100 | 0.0005 | - |
|
248 |
+
| 0.7466 | 3150 | 0.0005 | - |
|
249 |
+
| 0.7585 | 3200 | 0.0005 | - |
|
250 |
+
| 0.7703 | 3250 | 0.0004 | - |
|
251 |
+
| 0.7822 | 3300 | 0.0004 | - |
|
252 |
+
| 0.7940 | 3350 | 0.0004 | - |
|
253 |
+
| 0.8059 | 3400 | 0.0004 | - |
|
254 |
+
| 0.8177 | 3450 | 0.0004 | - |
|
255 |
+
| 0.8296 | 3500 | 0.0004 | - |
|
256 |
+
| 0.8414 | 3550 | 0.0004 | - |
|
257 |
+
| 0.8533 | 3600 | 0.0004 | - |
|
258 |
+
| 0.8651 | 3650 | 0.0004 | - |
|
259 |
+
| 0.8770 | 3700 | 0.0004 | - |
|
260 |
+
| 0.8888 | 3750 | 0.0004 | - |
|
261 |
+
| 0.9007 | 3800 | 0.0004 | - |
|
262 |
+
| 0.9125 | 3850 | 0.0004 | - |
|
263 |
+
| 0.9244 | 3900 | 0.0005 | - |
|
264 |
+
| 0.9362 | 3950 | 0.0004 | - |
|
265 |
+
| 0.9481 | 4000 | 0.0004 | - |
|
266 |
+
| 0.9599 | 4050 | 0.0004 | - |
|
267 |
+
| 0.9718 | 4100 | 0.0004 | - |
|
268 |
+
| 0.9836 | 4150 | 0.0004 | - |
|
269 |
+
| 0.9955 | 4200 | 0.0004 | - |
|
270 |
+
|
271 |
+
### Framework Versions
|
272 |
+
- Python: 3.10.12
|
273 |
+
- SetFit: 1.0.3
|
274 |
+
- Sentence Transformers: 2.5.1
|
275 |
+
- Transformers: 4.38.1
|
276 |
+
- PyTorch: 2.1.0+cu121
|
277 |
+
- Datasets: 2.18.0
|
278 |
+
- Tokenizers: 0.15.2
|
279 |
+
|
280 |
+
## Citation
|
281 |
+
|
282 |
+
### BibTeX
|
283 |
+
```bibtex
|
284 |
+
@article{https://doi.org/10.48550/arxiv.2209.11055,
|
285 |
+
doi = {10.48550/ARXIV.2209.11055},
|
286 |
+
url = {https://arxiv.org/abs/2209.11055},
|
287 |
+
author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren},
|
288 |
+
keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences},
|
289 |
+
title = {Efficient Few-Shot Learning Without Prompts},
|
290 |
+
publisher = {arXiv},
|
291 |
+
year = {2022},
|
292 |
+
copyright = {Creative Commons Attribution 4.0 International}
|
293 |
+
}
|
294 |
+
```
|
295 |
+
|
296 |
+
<!--
|
297 |
+
## Glossary
|
298 |
+
|
299 |
+
*Clearly define terms in order to be accessible across audiences.*
|
300 |
+
-->
|
301 |
+
|
302 |
+
<!--
|
303 |
+
## Model Card Authors
|
304 |
+
|
305 |
+
*Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.*
|
306 |
+
-->
|
307 |
+
|
308 |
+
<!--
|
309 |
+
## Model Card Contact
|
310 |
+
|
311 |
+
*Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.*
|
312 |
+
-->
|
config.json
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "BAAI/bge-base-en-v1.5",
|
3 |
+
"architectures": [
|
4 |
+
"BertModel"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.1,
|
7 |
+
"classifier_dropout": null,
|
8 |
+
"gradient_checkpointing": false,
|
9 |
+
"hidden_act": "gelu",
|
10 |
+
"hidden_dropout_prob": 0.1,
|
11 |
+
"hidden_size": 768,
|
12 |
+
"id2label": {
|
13 |
+
"0": "LABEL_0"
|
14 |
+
},
|
15 |
+
"initializer_range": 0.02,
|
16 |
+
"intermediate_size": 3072,
|
17 |
+
"label2id": {
|
18 |
+
"LABEL_0": 0
|
19 |
+
},
|
20 |
+
"layer_norm_eps": 1e-12,
|
21 |
+
"max_position_embeddings": 512,
|
22 |
+
"model_type": "bert",
|
23 |
+
"num_attention_heads": 12,
|
24 |
+
"num_hidden_layers": 12,
|
25 |
+
"pad_token_id": 0,
|
26 |
+
"position_embedding_type": "absolute",
|
27 |
+
"torch_dtype": "float32",
|
28 |
+
"transformers_version": "4.38.1",
|
29 |
+
"type_vocab_size": 2,
|
30 |
+
"use_cache": true,
|
31 |
+
"vocab_size": 30522
|
32 |
+
}
|
config_sentence_transformers.json
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"__version__": {
|
3 |
+
"sentence_transformers": "2.2.2",
|
4 |
+
"transformers": "4.28.1",
|
5 |
+
"pytorch": "1.13.0+cu117"
|
6 |
+
},
|
7 |
+
"prompts": {},
|
8 |
+
"default_prompt_name": null
|
9 |
+
}
|
config_setfit.json
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"normalize_embeddings": false,
|
3 |
+
"labels": [
|
4 |
+
"pit",
|
5 |
+
"peak",
|
6 |
+
"neither"
|
7 |
+
]
|
8 |
+
}
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c0d81266419174d40321937571cb78326a0493efd3eaa9b8dd9d48fe7abeccb4
|
3 |
+
size 437951328
|
model_head.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c8ead2cc05d2b228e1be7e7df602dc37612241c3d1e4fa951c9d849263aa260a
|
3 |
+
size 19327
|
modules.json
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"idx": 0,
|
4 |
+
"name": "0",
|
5 |
+
"path": "",
|
6 |
+
"type": "sentence_transformers.models.Transformer"
|
7 |
+
},
|
8 |
+
{
|
9 |
+
"idx": 1,
|
10 |
+
"name": "1",
|
11 |
+
"path": "1_Pooling",
|
12 |
+
"type": "sentence_transformers.models.Pooling"
|
13 |
+
},
|
14 |
+
{
|
15 |
+
"idx": 2,
|
16 |
+
"name": "2",
|
17 |
+
"path": "2_Normalize",
|
18 |
+
"type": "sentence_transformers.models.Normalize"
|
19 |
+
}
|
20 |
+
]
|
sentence_bert_config.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"max_seq_length": 512,
|
3 |
+
"do_lower_case": true
|
4 |
+
}
|
special_tokens_map.json
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": {
|
3 |
+
"content": "[CLS]",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"mask_token": {
|
10 |
+
"content": "[MASK]",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": false,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"pad_token": {
|
17 |
+
"content": "[PAD]",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": false,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
+
},
|
23 |
+
"sep_token": {
|
24 |
+
"content": "[SEP]",
|
25 |
+
"lstrip": false,
|
26 |
+
"normalized": false,
|
27 |
+
"rstrip": false,
|
28 |
+
"single_word": false
|
29 |
+
},
|
30 |
+
"unk_token": {
|
31 |
+
"content": "[UNK]",
|
32 |
+
"lstrip": false,
|
33 |
+
"normalized": false,
|
34 |
+
"rstrip": false,
|
35 |
+
"single_word": false
|
36 |
+
}
|
37 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "[PAD]",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"100": {
|
12 |
+
"content": "[UNK]",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"101": {
|
20 |
+
"content": "[CLS]",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"102": {
|
28 |
+
"content": "[SEP]",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"103": {
|
36 |
+
"content": "[MASK]",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
}
|
43 |
+
},
|
44 |
+
"clean_up_tokenization_spaces": true,
|
45 |
+
"cls_token": "[CLS]",
|
46 |
+
"do_basic_tokenize": true,
|
47 |
+
"do_lower_case": true,
|
48 |
+
"mask_token": "[MASK]",
|
49 |
+
"model_max_length": 512,
|
50 |
+
"never_split": null,
|
51 |
+
"pad_token": "[PAD]",
|
52 |
+
"sep_token": "[SEP]",
|
53 |
+
"strip_accents": null,
|
54 |
+
"tokenize_chinese_chars": true,
|
55 |
+
"tokenizer_class": "BertTokenizer",
|
56 |
+
"unk_token": "[UNK]"
|
57 |
+
}
|
vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|