testspace / inference.py
kdevoe's picture
Update inference.py
6fbe5f8 verified
raw
history blame
1.28 kB
import torch # Import PyTorch
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
from torch.utils.data import DataLoader
from torch import nn
from transformers import AutoModel, AutoTokenizer
class DebertaEvaluator(nn.Module):
def __init__(self):
super().__init__()
self.deberta = AutoModel.from_pretrained('microsoft/deberta-v3-base')
self.dropout = nn.Dropout(0.5)
self.linear = nn.Linear(768, 6)
def forward(self, input_id, mask):
output = self.deberta(input_ids=input_id, attention_mask=mask)
output_pooled = torch.mean(output.last_hidden_state, 1)
dropout_output = self.dropout(output_pooled)
linear_output = self.linear(dropout_output)
return linear_output
def inference(input_text):
saved_model_path = './'
model = torch.load(saved_model_path + 'fine-tuned-model.pt', map_location=torch.device('cpu'))
tokenizer = torch.load(saved_model_path + 'fine-tuned-tokenizer.pt', map_location=torch.device('cpu'))
model.eval()
input = tokenizer(input_text)
output = model(input_data['input_ids'].squeeze(1), input_data['attention_mask'])
return output.tolist()
if __name__ == "__main__":
inference()