|
from typing import List |
|
from resources import set_start, audit_elapsedtime, entities_list_to_dict |
|
from transformers import BertTokenizer, BertForTokenClassification |
|
import torch |
|
|
|
|
|
|
|
def init_model_ner(): |
|
print("Initiating NER model...") |
|
start = set_start() |
|
|
|
|
|
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased") |
|
model = BertForTokenClassification.from_pretrained("dbmdz/bert-large-cased-finetuned-conll03-english") |
|
|
|
audit_elapsedtime(function="Initiating NER model", start=start) |
|
return tokenizer, model |
|
|
|
def get_entity_results(tokenizer, model, text: str, entities_list: List[str]): |
|
print("Initiating entity recognition...") |
|
start = set_start() |
|
tokens = tokenizer.tokenize(tokenizer.decode(tokenizer.encode(text))) |
|
labels = entities_list |
|
|
|
|
|
input_ids = tokenizer.encode(text, return_tensors="pt") |
|
|
|
|
|
with torch.no_grad(): |
|
outputs = model(input_ids) |
|
|
|
|
|
predicted_labels = torch.argmax(outputs.logits, dim=2)[0] |
|
|
|
|
|
entities = [] |
|
current_entity = "" |
|
for i, label_id in enumerate(predicted_labels): |
|
label = model.config.id2label[label_id.item()] |
|
token = tokens[i] |
|
if label.startswith('B-'): |
|
if current_entity: |
|
entities.append(current_entity.strip()) |
|
current_entity = token |
|
elif label.startswith('I-'): |
|
current_entity += " " + token |
|
else: |
|
if current_entity: |
|
entities.append(current_entity.strip()) |
|
current_entity = "" |
|
|
|
|
|
filtered_entities = [entity for entity in entities if entity in labels] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
audit_elapsedtime(function="Retreiving entity labels from text", start=start) |
|
return filtered_entities |