Update optimum_encoder.py
Browse files- optimum_encoder.py +13 -19
optimum_encoder.py
CHANGED
@@ -82,7 +82,7 @@ class OptimumEncoder(BaseEncoder):
|
|
82 |
short_text = ["short"]
|
83 |
short_encoded_input = tokenizer(
|
84 |
short_text, padding=True, truncation=True, return_tensors="pt"
|
85 |
-
).to(
|
86 |
short_output = ort_model(**short_encoded_input)
|
87 |
|
88 |
print("Building engine for a long sequence...")
|
@@ -123,11 +123,11 @@ class OptimumEncoder(BaseEncoder):
|
|
123 |
|
124 |
if pooling_strategy == "mean":
|
125 |
embeddings = self._mean_pooling(
|
126 |
-
model_output, encoded_input["attention_mask"]
|
127 |
)
|
128 |
elif pooling_strategy == "max":
|
129 |
embeddings = self._max_pooling(
|
130 |
-
model_output, encoded_input["attention_mask"]
|
131 |
)
|
132 |
else:
|
133 |
raise ValueError(
|
@@ -135,36 +135,30 @@ class OptimumEncoder(BaseEncoder):
|
|
135 |
)
|
136 |
|
137 |
if normalize_embeddings:
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
|
|
|
|
142 |
|
143 |
all_embeddings.extend(embeddings)
|
144 |
|
145 |
return all_embeddings
|
146 |
|
147 |
-
def _mean_pooling(self, model_output, attention_mask
|
148 |
token_embeddings = model_output[0]
|
149 |
input_mask_expanded = (
|
150 |
attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
|
151 |
)
|
152 |
-
|
153 |
token_embeddings * input_mask_expanded, 1
|
154 |
) / self._torch.clamp(input_mask_expanded.sum(1), min=1e-9)
|
155 |
-
if convert_to_numpy:
|
156 |
-
return embeddings.detach().cpu().numpy()
|
157 |
-
else:
|
158 |
-
return embeddings
|
159 |
|
160 |
-
def _max_pooling(self, model_output, attention_mask
|
161 |
token_embeddings = model_output[0]
|
162 |
input_mask_expanded = (
|
163 |
attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
|
164 |
)
|
165 |
token_embeddings[input_mask_expanded == 0] = -1e9
|
166 |
-
|
167 |
-
if convert_to_numpy:
|
168 |
-
return embeddings.detach().cpu().numpy()
|
169 |
-
else:
|
170 |
-
return embeddings
|
|
|
82 |
short_text = ["short"]
|
83 |
short_encoded_input = tokenizer(
|
84 |
short_text, padding=True, truncation=True, return_tensors="pt"
|
85 |
+
).to(self.device)
|
86 |
short_output = ort_model(**short_encoded_input)
|
87 |
|
88 |
print("Building engine for a long sequence...")
|
|
|
123 |
|
124 |
if pooling_strategy == "mean":
|
125 |
embeddings = self._mean_pooling(
|
126 |
+
model_output, encoded_input["attention_mask"]
|
127 |
)
|
128 |
elif pooling_strategy == "max":
|
129 |
embeddings = self._max_pooling(
|
130 |
+
model_output, encoded_input["attention_mask"]
|
131 |
)
|
132 |
else:
|
133 |
raise ValueError(
|
|
|
135 |
)
|
136 |
|
137 |
if normalize_embeddings:
|
138 |
+
embeddings = self._torch.nn.functional.normalize(embeddings, p=2, dim=1)
|
139 |
+
|
140 |
+
if convert_to_numpy:
|
141 |
+
embeddings.detach().cpu().numpy()
|
142 |
+
else:
|
143 |
+
embeddings.tolist()
|
144 |
|
145 |
all_embeddings.extend(embeddings)
|
146 |
|
147 |
return all_embeddings
|
148 |
|
149 |
+
def _mean_pooling(self, model_output, attention_mask):
|
150 |
token_embeddings = model_output[0]
|
151 |
input_mask_expanded = (
|
152 |
attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
|
153 |
)
|
154 |
+
return self._torch.sum(
|
155 |
token_embeddings * input_mask_expanded, 1
|
156 |
) / self._torch.clamp(input_mask_expanded.sum(1), min=1e-9)
|
|
|
|
|
|
|
|
|
157 |
|
158 |
+
def _max_pooling(self, model_output, attention_mask):
|
159 |
token_embeddings = model_output[0]
|
160 |
input_mask_expanded = (
|
161 |
attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
|
162 |
)
|
163 |
token_embeddings[input_mask_expanded == 0] = -1e9
|
164 |
+
return self._torch.max(token_embeddings, 1)[0]
|
|
|
|
|
|
|
|