Add SetFit model
Browse files- 1_Pooling/config.json +10 -0
- README.md +239 -0
- config.json +32 -0
- config_sentence_transformers.json +10 -0
- config_setfit.json +4 -0
- model.safetensors +3 -0
- model_head.pkl +3 -0
- modules.json +20 -0
- sentence_bert_config.json +4 -0
- special_tokens_map.json +37 -0
- tokenizer.json +0 -0
- tokenizer_config.json +57 -0
- vocab.txt +0 -0
1_Pooling/config.json
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"word_embedding_dimension": 768,
|
3 |
+
"pooling_mode_cls_token": true,
|
4 |
+
"pooling_mode_mean_tokens": false,
|
5 |
+
"pooling_mode_max_tokens": false,
|
6 |
+
"pooling_mode_mean_sqrt_len_tokens": false,
|
7 |
+
"pooling_mode_weightedmean_tokens": false,
|
8 |
+
"pooling_mode_lasttoken": false,
|
9 |
+
"include_prompt": true
|
10 |
+
}
|
README.md
ADDED
@@ -0,0 +1,239 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
base_model: BAAI/bge-base-en-v1.5
|
3 |
+
library_name: setfit
|
4 |
+
metrics:
|
5 |
+
- accuracy
|
6 |
+
pipeline_tag: text-classification
|
7 |
+
tags:
|
8 |
+
- setfit
|
9 |
+
- sentence-transformers
|
10 |
+
- text-classification
|
11 |
+
- generated_from_setfit_trainer
|
12 |
+
widget:
|
13 |
+
- text: "Reasoning:\nThe answer provided aligns well with the information in the provided\
|
14 |
+
\ document. It discusses the types of toys suitable for rabbits, such as small\
|
15 |
+
\ animal toys, bird toys, and specific items like wooden blocks and willow balls.\
|
16 |
+
\ It also mentions offering non-toxic plastic balls and homemade options like\
|
17 |
+
\ cardboard tubes or boxes, which the document details. The answer is concise\
|
18 |
+
\ and stays relevant to the question of how to choose toys for a rabbit, avoiding\
|
19 |
+
\ unnecessary information.\n\nFinal Evaluation: \nEvaluation:"
|
20 |
+
- text: 'Reasoning:
|
21 |
+
|
22 |
+
The provided answer incorrectly states who signed Kieron Freeman when he went
|
23 |
+
on loan to Notts County instead of answering the question about Aaron Pryor''s
|
24 |
+
boxing manager. This answer does not relate to the provided question, making it
|
25 |
+
contextually irrelevant and failing to address the inquiry about Aaron Pryor’s
|
26 |
+
manager during his boxing career.
|
27 |
+
|
28 |
+
|
29 |
+
Final Evaluation:'
|
30 |
+
- text: "Reasoning:\nThe provided answer discusses a general concern about user data\
|
31 |
+
\ being compromised when using online casinos. However, this is not relevant to\
|
32 |
+
\ the specified question regarding a concern raised by the husband of a person\
|
33 |
+
\ who wrote a message on July 10, 2011. The question requires identifying specific\
|
34 |
+
\ details about a particular individual's concern from the document. Therefore,\
|
35 |
+
\ the provided answer does not address or relate to the asked question and lacks\
|
36 |
+
\ context grounding from the document.\n\nFinal Evaluation: \nEvaluation:"
|
37 |
+
- text: "Reasoning:\nThe answer fails to accurately address the specific question\
|
38 |
+
\ based on the provided document. The recommendation to teach the \"fly\" command\
|
39 |
+
\ and the use of a toy to train the dog to raise itself onto its hind legs are\
|
40 |
+
\ not supported by the document and are quite irrelevant to the issue of preventing\
|
41 |
+
\ a dog from running out of the house. Furthermore, the mention of using a \"\
|
42 |
+
magic spell\" and minimal exercise during the night is nonsensical and not grounded\
|
43 |
+
\ in the provided document or realistic dog training practices. The document contains\
|
44 |
+
\ practical and relevant strategies such as using a treat, practicing sit/stay\
|
45 |
+
\ commands, using barriers or spray devices, and keeping the dog entertained and\
|
46 |
+
\ exercised, but these were not correctly reflected in the answer.\n\nFinal Evaluation:\
|
47 |
+
\ \nEvaluation:"
|
48 |
+
- text: 'Reasoning:
|
49 |
+
|
50 |
+
The answer directly addresses the question by correctly stating that Allan Cox''s
|
51 |
+
First Class Delivery was launched on a H128-10W for his Level 1 certification
|
52 |
+
flight. This information is explicitly provided in the document and is clearly
|
53 |
+
well-grounded in the context. The answer is concise and to the point, without
|
54 |
+
deviating into unrelated details.
|
55 |
+
|
56 |
+
|
57 |
+
Final Evaluation:'
|
58 |
+
inference: true
|
59 |
+
---
|
60 |
+
|
61 |
+
# SetFit with BAAI/bge-base-en-v1.5
|
62 |
+
|
63 |
+
This is a [SetFit](https://github.com/huggingface/setfit) model that can be used for Text Classification. This SetFit model uses [BAAI/bge-base-en-v1.5](https://huggingface.co/BAAI/bge-base-en-v1.5) as the Sentence Transformer embedding model. A [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance is used for classification.
|
64 |
+
|
65 |
+
The model has been trained using an efficient few-shot learning technique that involves:
|
66 |
+
|
67 |
+
1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning.
|
68 |
+
2. Training a classification head with features from the fine-tuned Sentence Transformer.
|
69 |
+
|
70 |
+
## Model Details
|
71 |
+
|
72 |
+
### Model Description
|
73 |
+
- **Model Type:** SetFit
|
74 |
+
- **Sentence Transformer body:** [BAAI/bge-base-en-v1.5](https://huggingface.co/BAAI/bge-base-en-v1.5)
|
75 |
+
- **Classification head:** a [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance
|
76 |
+
- **Maximum Sequence Length:** 512 tokens
|
77 |
+
- **Number of Classes:** 2 classes
|
78 |
+
<!-- - **Training Dataset:** [Unknown](https://huggingface.co/datasets/unknown) -->
|
79 |
+
<!-- - **Language:** Unknown -->
|
80 |
+
<!-- - **License:** Unknown -->
|
81 |
+
|
82 |
+
### Model Sources
|
83 |
+
|
84 |
+
- **Repository:** [SetFit on GitHub](https://github.com/huggingface/setfit)
|
85 |
+
- **Paper:** [Efficient Few-Shot Learning Without Prompts](https://arxiv.org/abs/2209.11055)
|
86 |
+
- **Blogpost:** [SetFit: Efficient Few-Shot Learning Without Prompts](https://huggingface.co/blog/setfit)
|
87 |
+
|
88 |
+
### Model Labels
|
89 |
+
| Label | Examples |
|
90 |
+
|:------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
91 |
+
| 0 | <ul><li>"Reasoning:\nThe answer provides key insights into the reasons behind the Denver Nuggets' offensive outburst in January, specifically mentioning the comfort and effectiveness of the team, the coaching strategy of taking the first available shot in the rhythm of the offense, and emphasizing pushing the ball after both makes and misses. These points are directly supported by the document. However, the inclusion of information about a new training technique involving virtual reality is not supported by the provided document and thus detracts from the answer's accuracy and relevance.\n\nFinal Evaluation:"</li><li>'Reasoning:\nWhile the provided answer attempts to address the differences between film and digital photography, it contains several inaccuracies and inconsistencies with the document. The answer incorrectly states that film under-exposes better and compresses the range into the bottom end, whereas the document clearly states that film over-exposes better and compresses the range into the top end. Additionally, it mentions that the digital sensors capture all three colors at each point, which is inaccurate as per the document; it states digital sensors capture only one color at each point and then interpolate the data to create an RGB image. Moreover, the answer states the comparison is to 5MP digital sensors whereas the document talks about 10MP sensors.\n\nThese inaccuracies undermine the grounding andreliability of the answer.\n\nFinal Evaluation:'</li><li>"Reasoning:\nThe provided answer addresses a topic entirely unrelated to the given question. The question is about the main conflict in the third book of the Arcana Chronicles by Kresley Cole, but the answer discusses the result of an MMA event featuring Antonio Rogerio Nogueira. There is no connection between the document's content and the question, leading to a clear lack of context grounding, relevance, and conciseness.\n\nFinal Evaluation: \nEvaluation:"</li></ul> |
|
92 |
+
| 1 | <ul><li>'Reasoning:\nThe answer provided effectively draws upon the document to list several best practices that a web designer can incorporate into their client discovery and web design process to avoid unnecessary revisions and conflicts. Each practice mentioned—getting to know the client and their needs, signing a detailed contract, and communicating honestly about extra charges—is directly supported by points raised in the document. The answer is relevant, concise, and closely aligned with the specific question asked.\n\nFinal Evaluation:'</li><li>"Reasoning:\nThe answer provided correctly identifies the author's belief on what creates a connection between the reader and the characters in a story. It states that drawing from the author's own painful and emotional experiences makes the story genuine and relatable, thus engaging the reader. This is consistent with the document, which emphasizes the importance of authenticity and emotional depth derived from the author's personal experiences to make characters and their struggles real to the reader. The answer is relevant to the question and concisely captures the essence of the author's argument without includingunnecessary information.\n\nFinal Evaluation:"</li><li>'Reasoning:\nThe answer correctly identifies Mauro Rubin as the CEO of JoinPad during the event at Talent Garden Calabiana, Milan. This is well-supported by the context provided in the document, which specifically mentions Mauro Rubin, the JoinPad CEO, taking the stage at the event. The answer is relevant to the question andconcise.\n\nFinal Evaluation: \nEvaluation:'</li></ul> |
|
93 |
+
|
94 |
+
## Uses
|
95 |
+
|
96 |
+
### Direct Use for Inference
|
97 |
+
|
98 |
+
First install the SetFit library:
|
99 |
+
|
100 |
+
```bash
|
101 |
+
pip install setfit
|
102 |
+
```
|
103 |
+
|
104 |
+
Then you can load this model and run inference.
|
105 |
+
|
106 |
+
```python
|
107 |
+
from setfit import SetFitModel
|
108 |
+
|
109 |
+
# Download from the 🤗 Hub
|
110 |
+
model = SetFitModel.from_pretrained("Netta1994/setfit_baai_gpt-4o_improved-cot-instructions_chat_few_shot_generated_remove_final_eva")
|
111 |
+
# Run inference
|
112 |
+
preds = model("Reasoning:
|
113 |
+
The provided answer incorrectly states who signed Kieron Freeman when he went on loan to Notts County instead of answering the question about Aaron Pryor's boxing manager. This answer does not relate to the provided question, making it contextually irrelevant and failing to address the inquiry about Aaron Pryor’s manager during his boxing career.
|
114 |
+
|
115 |
+
Final Evaluation:")
|
116 |
+
```
|
117 |
+
|
118 |
+
<!--
|
119 |
+
### Downstream Use
|
120 |
+
|
121 |
+
*List how someone could finetune this model on their own dataset.*
|
122 |
+
-->
|
123 |
+
|
124 |
+
<!--
|
125 |
+
### Out-of-Scope Use
|
126 |
+
|
127 |
+
*List how the model may foreseeably be misused and address what users ought not to do with the model.*
|
128 |
+
-->
|
129 |
+
|
130 |
+
<!--
|
131 |
+
## Bias, Risks and Limitations
|
132 |
+
|
133 |
+
*What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.*
|
134 |
+
-->
|
135 |
+
|
136 |
+
<!--
|
137 |
+
### Recommendations
|
138 |
+
|
139 |
+
*What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.*
|
140 |
+
-->
|
141 |
+
|
142 |
+
## Training Details
|
143 |
+
|
144 |
+
### Training Set Metrics
|
145 |
+
| Training set | Min | Median | Max |
|
146 |
+
|:-------------|:----|:--------|:----|
|
147 |
+
| Word count | 33 | 77.1275 | 176 |
|
148 |
+
|
149 |
+
| Label | Training Sample Count |
|
150 |
+
|:------|:----------------------|
|
151 |
+
| 0 | 200 |
|
152 |
+
| 1 | 208 |
|
153 |
+
|
154 |
+
### Training Hyperparameters
|
155 |
+
- batch_size: (16, 16)
|
156 |
+
- num_epochs: (1, 1)
|
157 |
+
- max_steps: -1
|
158 |
+
- sampling_strategy: oversampling
|
159 |
+
- num_iterations: 20
|
160 |
+
- body_learning_rate: (2e-05, 2e-05)
|
161 |
+
- head_learning_rate: 2e-05
|
162 |
+
- loss: CosineSimilarityLoss
|
163 |
+
- distance_metric: cosine_distance
|
164 |
+
- margin: 0.25
|
165 |
+
- end_to_end: False
|
166 |
+
- use_amp: False
|
167 |
+
- warmup_proportion: 0.1
|
168 |
+
- l2_weight: 0.01
|
169 |
+
- seed: 42
|
170 |
+
- eval_max_steps: -1
|
171 |
+
- load_best_model_at_end: False
|
172 |
+
|
173 |
+
### Training Results
|
174 |
+
| Epoch | Step | Training Loss | Validation Loss |
|
175 |
+
|:------:|:----:|:-------------:|:---------------:|
|
176 |
+
| 0.0010 | 1 | 0.2056 | - |
|
177 |
+
| 0.0490 | 50 | 0.2533 | - |
|
178 |
+
| 0.0980 | 100 | 0.1804 | - |
|
179 |
+
| 0.1471 | 150 | 0.0762 | - |
|
180 |
+
| 0.1961 | 200 | 0.0731 | - |
|
181 |
+
| 0.2451 | 250 | 0.0501 | - |
|
182 |
+
| 0.2941 | 300 | 0.0336 | - |
|
183 |
+
| 0.3431 | 350 | 0.0197 | - |
|
184 |
+
| 0.3922 | 400 | 0.0105 | - |
|
185 |
+
| 0.4412 | 450 | 0.0049 | - |
|
186 |
+
| 0.4902 | 500 | 0.0031 | - |
|
187 |
+
| 0.5392 | 550 | 0.0024 | - |
|
188 |
+
| 0.5882 | 600 | 0.0021 | - |
|
189 |
+
| 0.6373 | 650 | 0.0018 | - |
|
190 |
+
| 0.6863 | 700 | 0.0017 | - |
|
191 |
+
| 0.7353 | 750 | 0.0018 | - |
|
192 |
+
| 0.7843 | 800 | 0.0016 | - |
|
193 |
+
| 0.8333 | 850 | 0.0015 | - |
|
194 |
+
| 0.8824 | 900 | 0.0017 | - |
|
195 |
+
| 0.9314 | 950 | 0.0015 | - |
|
196 |
+
| 0.9804 | 1000 | 0.0013 | - |
|
197 |
+
|
198 |
+
### Framework Versions
|
199 |
+
- Python: 3.10.14
|
200 |
+
- SetFit: 1.1.0
|
201 |
+
- Sentence Transformers: 3.1.1
|
202 |
+
- Transformers: 4.44.0
|
203 |
+
- PyTorch: 2.4.0+cu121
|
204 |
+
- Datasets: 3.0.0
|
205 |
+
- Tokenizers: 0.19.1
|
206 |
+
|
207 |
+
## Citation
|
208 |
+
|
209 |
+
### BibTeX
|
210 |
+
```bibtex
|
211 |
+
@article{https://doi.org/10.48550/arxiv.2209.11055,
|
212 |
+
doi = {10.48550/ARXIV.2209.11055},
|
213 |
+
url = {https://arxiv.org/abs/2209.11055},
|
214 |
+
author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren},
|
215 |
+
keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences},
|
216 |
+
title = {Efficient Few-Shot Learning Without Prompts},
|
217 |
+
publisher = {arXiv},
|
218 |
+
year = {2022},
|
219 |
+
copyright = {Creative Commons Attribution 4.0 International}
|
220 |
+
}
|
221 |
+
```
|
222 |
+
|
223 |
+
<!--
|
224 |
+
## Glossary
|
225 |
+
|
226 |
+
*Clearly define terms in order to be accessible across audiences.*
|
227 |
+
-->
|
228 |
+
|
229 |
+
<!--
|
230 |
+
## Model Card Authors
|
231 |
+
|
232 |
+
*Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.*
|
233 |
+
-->
|
234 |
+
|
235 |
+
<!--
|
236 |
+
## Model Card Contact
|
237 |
+
|
238 |
+
*Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.*
|
239 |
+
-->
|
config.json
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "BAAI/bge-base-en-v1.5",
|
3 |
+
"architectures": [
|
4 |
+
"BertModel"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.1,
|
7 |
+
"classifier_dropout": null,
|
8 |
+
"gradient_checkpointing": false,
|
9 |
+
"hidden_act": "gelu",
|
10 |
+
"hidden_dropout_prob": 0.1,
|
11 |
+
"hidden_size": 768,
|
12 |
+
"id2label": {
|
13 |
+
"0": "LABEL_0"
|
14 |
+
},
|
15 |
+
"initializer_range": 0.02,
|
16 |
+
"intermediate_size": 3072,
|
17 |
+
"label2id": {
|
18 |
+
"LABEL_0": 0
|
19 |
+
},
|
20 |
+
"layer_norm_eps": 1e-12,
|
21 |
+
"max_position_embeddings": 512,
|
22 |
+
"model_type": "bert",
|
23 |
+
"num_attention_heads": 12,
|
24 |
+
"num_hidden_layers": 12,
|
25 |
+
"pad_token_id": 0,
|
26 |
+
"position_embedding_type": "absolute",
|
27 |
+
"torch_dtype": "float32",
|
28 |
+
"transformers_version": "4.44.0",
|
29 |
+
"type_vocab_size": 2,
|
30 |
+
"use_cache": true,
|
31 |
+
"vocab_size": 30522
|
32 |
+
}
|
config_sentence_transformers.json
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"__version__": {
|
3 |
+
"sentence_transformers": "3.1.1",
|
4 |
+
"transformers": "4.44.0",
|
5 |
+
"pytorch": "2.4.0+cu121"
|
6 |
+
},
|
7 |
+
"prompts": {},
|
8 |
+
"default_prompt_name": null,
|
9 |
+
"similarity_fn_name": null
|
10 |
+
}
|
config_setfit.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"labels": null,
|
3 |
+
"normalize_embeddings": false
|
4 |
+
}
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4ecabf5648b7d16334aa529fb6d430dad2c31fbf8a74ad858f50d6feb661e872
|
3 |
+
size 437951328
|
model_head.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:96e2d056f397cb7f0fece376761e24b8fecc862d872b74d422c41c822611428e
|
3 |
+
size 7007
|
modules.json
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"idx": 0,
|
4 |
+
"name": "0",
|
5 |
+
"path": "",
|
6 |
+
"type": "sentence_transformers.models.Transformer"
|
7 |
+
},
|
8 |
+
{
|
9 |
+
"idx": 1,
|
10 |
+
"name": "1",
|
11 |
+
"path": "1_Pooling",
|
12 |
+
"type": "sentence_transformers.models.Pooling"
|
13 |
+
},
|
14 |
+
{
|
15 |
+
"idx": 2,
|
16 |
+
"name": "2",
|
17 |
+
"path": "2_Normalize",
|
18 |
+
"type": "sentence_transformers.models.Normalize"
|
19 |
+
}
|
20 |
+
]
|
sentence_bert_config.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"max_seq_length": 512,
|
3 |
+
"do_lower_case": true
|
4 |
+
}
|
special_tokens_map.json
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": {
|
3 |
+
"content": "[CLS]",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"mask_token": {
|
10 |
+
"content": "[MASK]",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": false,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"pad_token": {
|
17 |
+
"content": "[PAD]",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": false,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
+
},
|
23 |
+
"sep_token": {
|
24 |
+
"content": "[SEP]",
|
25 |
+
"lstrip": false,
|
26 |
+
"normalized": false,
|
27 |
+
"rstrip": false,
|
28 |
+
"single_word": false
|
29 |
+
},
|
30 |
+
"unk_token": {
|
31 |
+
"content": "[UNK]",
|
32 |
+
"lstrip": false,
|
33 |
+
"normalized": false,
|
34 |
+
"rstrip": false,
|
35 |
+
"single_word": false
|
36 |
+
}
|
37 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "[PAD]",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"100": {
|
12 |
+
"content": "[UNK]",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"101": {
|
20 |
+
"content": "[CLS]",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"102": {
|
28 |
+
"content": "[SEP]",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"103": {
|
36 |
+
"content": "[MASK]",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
}
|
43 |
+
},
|
44 |
+
"clean_up_tokenization_spaces": true,
|
45 |
+
"cls_token": "[CLS]",
|
46 |
+
"do_basic_tokenize": true,
|
47 |
+
"do_lower_case": true,
|
48 |
+
"mask_token": "[MASK]",
|
49 |
+
"model_max_length": 512,
|
50 |
+
"never_split": null,
|
51 |
+
"pad_token": "[PAD]",
|
52 |
+
"sep_token": "[SEP]",
|
53 |
+
"strip_accents": null,
|
54 |
+
"tokenize_chinese_chars": true,
|
55 |
+
"tokenizer_class": "BertTokenizer",
|
56 |
+
"unk_token": "[UNK]"
|
57 |
+
}
|
vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|