arkohut commited on
Commit
c0aafd6
1 Parent(s): caec940

Upload 9 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
The diff for this file is too large to render. See raw diff
 
config.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "jinaai/jina-embeddings-v3",
3
+ "architectures": [
4
+ "XLMRobertaModel"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "auto_map": {
8
+ "AutoConfig": "jinaai/xlm-roberta-flash-implementation--configuration_xlm_roberta.XLMRobertaFlashConfig",
9
+ "AutoModel": "jinaai/xlm-roberta-flash-implementation--modeling_lora.XLMRobertaLoRA",
10
+ "AutoModelForMaskedLM": "jinaai/xlm-roberta-flash-implementation--modeling_xlm_roberta.XLMRobertaForMaskedLM",
11
+ "AutoModelForPreTraining": "jinaai/xlm-roberta-flash-implementation--modeling_xlm_roberta.XLMRobertaForPreTraining"
12
+ },
13
+ "bos_token_id": 0,
14
+ "classifier_dropout": null,
15
+ "emb_pooler": null,
16
+ "eos_token_id": 2,
17
+ "hidden_act": "gelu",
18
+ "hidden_dropout_prob": 0.1,
19
+ "hidden_size": 1024,
20
+ "initializer_range": 0.02,
21
+ "intermediate_size": 4096,
22
+ "layer_norm_eps": 1e-05,
23
+ "load_trained_adapters": true,
24
+ "lora_adaptations": ["retrieval.query", "retrieval.passage", "separation", "classification", "text-matching"],
25
+ "lora_alpha": 1,
26
+ "lora_dropout_p": 0.0,
27
+ "lora_main_params_trainable": false,
28
+ "lora_rank": 4,
29
+ "matryoshka_dimensions": [32, 64, 128, 256, 512, 768, 1024],
30
+ "max_position_embeddings": 8194,
31
+ "num_attention_heads": 16,
32
+ "num_hidden_layers": 24,
33
+ "output_past": true,
34
+ "pad_token_id": 1,
35
+ "position_embedding_type": "rotary",
36
+ "rotary_emb_base": 20000.0,
37
+ "torch_dtype": "bfloat16",
38
+ "transformers_version": "4.30.2",
39
+ "truncate_dim": null,
40
+ "type_vocab_size": 1,
41
+ "use_cache": true,
42
+ "use_flash_attn": true,
43
+ "vocab_size": 250002,
44
+ "task_instructions": {
45
+ "retrieval.query": "Represent the query for retrieving evidence documents: ",
46
+ "retrieval.passage": "Represent the document for retrieval: ",
47
+ "separation": "",
48
+ "classification": "",
49
+ "text-matching": ""
50
+ }
51
+ }
config_sentence_transformers.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "__version__":{
3
+ "sentence_transformers":"3.1.0",
4
+ "transformers":"4.41.2",
5
+ "pytorch":"2.3.1+cu121"
6
+ },
7
+ "prompts":{
8
+ "retrieval.query":"Represent the query for retrieving evidence documents: ",
9
+ "retrieval.passage":"Represent the document for retrieval: ",
10
+ "separation": "",
11
+ "classification": "",
12
+ "text-matching": ""
13
+ },
14
+ "default_prompt_name":null,
15
+ "similarity_fn_name":"cosine"
16
+ }
custom_st.py ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import logging
3
+ import os
4
+ from io import BytesIO
5
+ from typing import Any, Dict, List, Optional, Tuple, Union
6
+
7
+ import torch
8
+ from torch import nn
9
+ from transformers import AutoConfig, AutoModel, AutoTokenizer
10
+
11
+ logger = logging.getLogger(__name__)
12
+
13
+
14
+ class Transformer(nn.Module):
15
+ """Huggingface AutoModel to generate token embeddings.
16
+ Loads the correct class, e.g. BERT / RoBERTa etc.
17
+
18
+ Args:
19
+ model_name_or_path: Huggingface models name
20
+ (https://huggingface.co/models)
21
+ max_seq_length: Truncate any inputs longer than max_seq_length
22
+ model_args: Keyword arguments passed to the Huggingface
23
+ Transformers model
24
+ tokenizer_args: Keyword arguments passed to the Huggingface
25
+ Transformers tokenizer
26
+ config_args: Keyword arguments passed to the Huggingface
27
+ Transformers config
28
+ cache_dir: Cache dir for Huggingface Transformers to store/load
29
+ models
30
+ do_lower_case: If true, lowercases the input (independent if the
31
+ model is cased or not)
32
+ tokenizer_name_or_path: Name or path of the tokenizer. When
33
+ None, then model_name_or_path is used
34
+ """
35
+
36
+ save_in_root: bool = True
37
+
38
+ def __init__(
39
+ self,
40
+ model_name_or_path: str,
41
+ max_seq_length: int = None,
42
+ model_args: Dict[str, Any] = None,
43
+ tokenizer_args: Dict[str, Any] = None,
44
+ config_args: Dict[str, Any] = None,
45
+ cache_dir: str = None,
46
+ do_lower_case: bool = False,
47
+ tokenizer_name_or_path: str = None,
48
+ **kwargs,
49
+ ) -> None:
50
+ super().__init__()
51
+ self.config_keys = ["max_seq_length", "do_lower_case"]
52
+ self.do_lower_case = do_lower_case
53
+ if model_args is None:
54
+ model_args = {}
55
+ if tokenizer_args is None:
56
+ tokenizer_args = {}
57
+ if config_args is None:
58
+ config_args = {}
59
+
60
+ if kwargs.get("backend", "torch") != "torch":
61
+ logger.warning(
62
+ f'"jinaai/jina-embeddings-v3" is currently not compatible with the {kwargs["backend"]} backend. '
63
+ 'Continuing with the "torch" backend.'
64
+ )
65
+
66
+ self.config = AutoConfig.from_pretrained(model_name_or_path, **config_args, cache_dir=cache_dir)
67
+
68
+ self._lora_adaptations = self.config.lora_adaptations
69
+ if (
70
+ not isinstance(self._lora_adaptations, list)
71
+ or len(self._lora_adaptations) < 1
72
+ ):
73
+ raise ValueError(
74
+ f"`lora_adaptations` must be a list and contain at least one element"
75
+ )
76
+ self._adaptation_map = {
77
+ name: idx for idx, name in enumerate(self._lora_adaptations)
78
+ }
79
+
80
+ self.default_task = model_args.pop('default_task', None)
81
+
82
+ self.auto_model = AutoModel.from_pretrained(model_name_or_path, config=self.config, cache_dir=cache_dir, **model_args)
83
+
84
+ if max_seq_length is not None and "model_max_length" not in tokenizer_args:
85
+ tokenizer_args["model_max_length"] = max_seq_length
86
+ self.tokenizer = AutoTokenizer.from_pretrained(
87
+ tokenizer_name_or_path if tokenizer_name_or_path is not None else model_name_or_path,
88
+ cache_dir=cache_dir,
89
+ **tokenizer_args,
90
+ )
91
+
92
+ # No max_seq_length set. Try to infer from model
93
+ if max_seq_length is None:
94
+ if (
95
+ hasattr(self.auto_model, "config")
96
+ and hasattr(self.auto_model.config, "max_position_embeddings")
97
+ and hasattr(self.tokenizer, "model_max_length")
98
+ ):
99
+ max_seq_length = min(self.auto_model.config.max_position_embeddings, self.tokenizer.model_max_length)
100
+
101
+ self.max_seq_length = max_seq_length
102
+
103
+ if tokenizer_name_or_path is not None:
104
+ self.auto_model.config.tokenizer_class = self.tokenizer.__class__.__name__
105
+
106
+
107
+ @property
108
+ def default_task(self):
109
+ return self._default_task
110
+
111
+ @default_task.setter
112
+ def default_task(self, task: Union[None, str]):
113
+ self._validate_task(task)
114
+ self._default_task = task
115
+
116
+
117
+ def _validate_task(self, task: str):
118
+ if task and task not in self._lora_adaptations:
119
+ raise ValueError(
120
+ f"Unsupported task '{task}'. "
121
+ f"Supported tasks are: {', '.join(self.config.lora_adaptations)}. "
122
+ f"Alternatively, don't pass the `task` argument to disable LoRA."
123
+ )
124
+
125
+ def forward(
126
+ self, features: Dict[str, torch.Tensor], task: Optional[str] = None
127
+ ) -> Dict[str, torch.Tensor]:
128
+ """Returns token_embeddings, cls_token"""
129
+ self._validate_task(task)
130
+ task = task or self.default_task
131
+ adapter_mask = None
132
+ if task:
133
+ task_id = self._adaptation_map[task]
134
+ num_examples = features['input_ids'].size(0)
135
+ adapter_mask = torch.full(
136
+ (num_examples,), task_id, dtype=torch.int32, device=features['input_ids'].device
137
+ )
138
+
139
+ lora_arguments = (
140
+ {"adapter_mask": adapter_mask} if adapter_mask is not None else {}
141
+ )
142
+ features.pop('prompt_length', None)
143
+ output_states = self.auto_model.forward(**features, **lora_arguments, return_dict=False)
144
+ output_tokens = output_states[0]
145
+ features.update({"token_embeddings": output_tokens, "attention_mask": features["attention_mask"]})
146
+ return features
147
+
148
+ def get_word_embedding_dimension(self) -> int:
149
+ return self.auto_model.config.hidden_size
150
+
151
+ def tokenize(
152
+ self,
153
+ texts: Union[List[str], List[dict], List[Tuple[str, str]]],
154
+ padding: Union[str, bool] = True
155
+ ) -> Dict[str, torch.Tensor]:
156
+ """Tokenizes a text and maps tokens to token-ids"""
157
+ output = {}
158
+ if isinstance(texts[0], str):
159
+ to_tokenize = [texts]
160
+ elif isinstance(texts[0], dict):
161
+ to_tokenize = []
162
+ output["text_keys"] = []
163
+ for lookup in texts:
164
+ text_key, text = next(iter(lookup.items()))
165
+ to_tokenize.append(text)
166
+ output["text_keys"].append(text_key)
167
+ to_tokenize = [to_tokenize]
168
+ else:
169
+ batch1, batch2 = [], []
170
+ for text_tuple in texts:
171
+ batch1.append(text_tuple[0])
172
+ batch2.append(text_tuple[1])
173
+ to_tokenize = [batch1, batch2]
174
+
175
+ # strip
176
+ to_tokenize = [[str(s).strip() for s in col] for col in to_tokenize]
177
+
178
+ # Lowercase
179
+ if self.do_lower_case:
180
+ to_tokenize = [[s.lower() for s in col] for col in to_tokenize]
181
+
182
+ output.update(
183
+ self.tokenizer(
184
+ *to_tokenize,
185
+ padding=padding,
186
+ truncation="longest_first",
187
+ return_tensors="pt",
188
+ max_length=self.max_seq_length,
189
+ )
190
+ )
191
+ return output
192
+
193
+ def get_config_dict(self) -> Dict[str, Any]:
194
+ return {key: self.__dict__[key] for key in self.config_keys}
195
+
196
+ def save(self, output_path: str, safe_serialization: bool = True) -> None:
197
+ self.auto_model.save_pretrained(output_path, safe_serialization=safe_serialization)
198
+ self.tokenizer.save_pretrained(output_path)
199
+
200
+ with open(os.path.join(output_path, "sentence_bert_config.json"), "w") as fOut:
201
+ json.dump(self.get_config_dict(), fOut, indent=2)
202
+
203
+
204
+ @classmethod
205
+ def load(cls, input_path: str) -> "Transformer":
206
+ # Old classes used other config names than 'sentence_bert_config.json'
207
+ for config_name in [
208
+ "sentence_bert_config.json",
209
+ "sentence_roberta_config.json",
210
+ "sentence_distilbert_config.json",
211
+ "sentence_camembert_config.json",
212
+ "sentence_albert_config.json",
213
+ "sentence_xlm-roberta_config.json",
214
+ "sentence_xlnet_config.json",
215
+ ]:
216
+ sbert_config_path = os.path.join(input_path, config_name)
217
+ if os.path.exists(sbert_config_path):
218
+ break
219
+
220
+ with open(sbert_config_path) as fIn:
221
+ config = json.load(fIn)
222
+ # Don't allow configs to set trust_remote_code
223
+ if "model_args" in config and "trust_remote_code" in config["model_args"]:
224
+ config["model_args"].pop("trust_remote_code")
225
+ if "tokenizer_args" in config and "trust_remote_code" in config["tokenizer_args"]:
226
+ config["tokenizer_args"].pop("trust_remote_code")
227
+ if "config_args" in config and "trust_remote_code" in config["config_args"]:
228
+ config["config_args"].pop("trust_remote_code")
229
+ return cls(model_name_or_path=input_path, **config)
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17ca06efd886a065d0081912b04c9e27ef5086a9dd09659cce32aa9c84587f23
3
+ size 1144685320
modules.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "idx": 0,
4
+ "name": "transformer",
5
+ "path": "",
6
+ "type": "custom_st.Transformer",
7
+ "kwargs": ["task"]
8
+ },
9
+ {
10
+ "idx": 1,
11
+ "name": "pooler",
12
+ "path": "1_Pooling",
13
+ "type": "sentence_transformers.models.Pooling"
14
+ },
15
+ {
16
+ "idx": 2,
17
+ "name": "normalizer",
18
+ "path": "2_Normalize",
19
+ "type": "sentence_transformers.models.Normalize"
20
+ }
21
+ ]
special_tokens_map.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "cls_token": {
10
+ "content": "<s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "eos_token": {
17
+ "content": "</s>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "mask_token": {
24
+ "content": "<mask>",
25
+ "lstrip": true,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "pad_token": {
31
+ "content": "<pad>",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ },
37
+ "sep_token": {
38
+ "content": "</s>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false
43
+ },
44
+ "unk_token": {
45
+ "content": "<unk>",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false
50
+ }
51
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f59925fcb90c92b894cb93e51bb9b4a6105c5c249fe54ce1c704420ac39b81af
3
+ size 17082756
tokenizer_config.json ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<s>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<pad>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "<unk>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "250001": {
36
+ "content": "<mask>",
37
+ "lstrip": true,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "bos_token": "<s>",
45
+ "clean_up_tokenization_spaces": true,
46
+ "cls_token": "<s>",
47
+ "eos_token": "</s>",
48
+ "mask_token": "<mask>",
49
+ "model_max_length": 8194,
50
+ "pad_token": "<pad>",
51
+ "sep_token": "</s>",
52
+ "tokenizer_class": "XLMRobertaTokenizer",
53
+ "unk_token": "<unk>"
54
+ }