|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Tokenization classes for Molformer.""" |
|
from typing import List, Optional, Tuple |
|
|
|
from transformers.tokenization_utils_fast import PreTrainedTokenizerFast |
|
from transformers.utils import logging |
|
from .tokenization_molformer import MolformerTokenizer |
|
|
|
|
|
logger = logging.get_logger(__name__) |
|
|
|
VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "tokenizer_file": "tokenizer.json"} |
|
|
|
PRETRAINED_VOCAB_FILES_MAP = { |
|
"vocab_file": { |
|
"ibm/MoLFormer-XL-both-10pct": "https://huggingface.co./ibm/MoLFormer-XL-both-10pct/resolve/main/vocab.json", |
|
} |
|
} |
|
|
|
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { |
|
"ibm/MoLFormer-XL-both-10pct": 202, |
|
} |
|
|
|
|
|
class MolformerTokenizerFast(PreTrainedTokenizerFast): |
|
r""" |
|
Construct a "fast" Molformer tokenizer. |
|
|
|
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should |
|
refer to this superclass for more information regarding those methods. |
|
|
|
Args: |
|
vocab_file (`str`, *optional*): |
|
File containing the vocabulary. |
|
tokenizer_file (`str`, *optional*): |
|
The path to a tokenizer file to use instead of the vocab file. |
|
unk_token (`str`, *optional*, defaults to `"<unk>"`): |
|
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this |
|
token instead. |
|
sep_token (`str`, *optional*, defaults to `"<eos>"`): |
|
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for |
|
sequence classification or for a text and a question for question answering. It is also used as the last |
|
token of a sequence built with special tokens. |
|
pad_token (`str`, *optional*, defaults to `"<pad>"`): |
|
The token used for padding, for example when batching sequences of different lengths. |
|
cls_token (`str`, *optional*, defaults to `"<bos>"`): |
|
The classifier token which is used when doing sequence classification (classification of the whole sequence |
|
instead of per-token classification). It is the first token of the sequence when built with special tokens. |
|
mask_token (`str`, *optional*, defaults to `"<mask>"`): |
|
The token used for masking values. This is the token used when training this model with masked language |
|
modeling. This is the token which the model will try to predict. |
|
""" |
|
|
|
vocab_files_names = VOCAB_FILES_NAMES |
|
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP |
|
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES |
|
model_input_names = ["input_ids", "attention_mask"] |
|
slow_tokenizer_class = MolformerTokenizer |
|
|
|
def __init__( |
|
self, |
|
vocab_file=None, |
|
tokenizer_file=None, |
|
unk_token="<unk>", |
|
sep_token="<eos>", |
|
pad_token="<pad>", |
|
cls_token="<bos>", |
|
mask_token="<mask>", |
|
**kwargs, |
|
): |
|
super().__init__( |
|
vocab_file, |
|
tokenizer_file=tokenizer_file, |
|
unk_token=unk_token, |
|
sep_token=sep_token, |
|
pad_token=pad_token, |
|
cls_token=cls_token, |
|
mask_token=mask_token, |
|
**kwargs, |
|
) |
|
|
|
|
|
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): |
|
""" |
|
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and |
|
adding special tokens. A BERT sequence has the following format: |
|
|
|
- single sequence: `[CLS] X [SEP]` |
|
- pair of sequences: `[CLS] A [SEP] B [SEP]` |
|
|
|
Args: |
|
token_ids_0 (`List[int]`): |
|
List of IDs to which the special tokens will be added. |
|
token_ids_1 (`List[int]`, *optional*): |
|
Optional second list of IDs for sequence pairs. |
|
|
|
Returns: |
|
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. |
|
""" |
|
output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id] |
|
|
|
if token_ids_1 is not None: |
|
output += token_ids_1 + [self.sep_token_id] |
|
|
|
return output |
|
|
|
|
|
def create_token_type_ids_from_sequences( |
|
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None |
|
) -> List[int]: |
|
""" |
|
Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence |
|
pair mask has the following format: |
|
|
|
``` |
|
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 |
|
| first sequence | second sequence | |
|
``` |
|
|
|
If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). |
|
|
|
Args: |
|
token_ids_0 (`List[int]`): |
|
List of IDs. |
|
token_ids_1 (`List[int]`, *optional*): |
|
Optional second list of IDs for sequence pairs. |
|
|
|
Returns: |
|
`List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). |
|
""" |
|
sep = [self.sep_token_id] |
|
cls = [self.cls_token_id] |
|
if token_ids_1 is None: |
|
return len(cls + token_ids_0 + sep) * [0] |
|
return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] |
|
|
|
|
|
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: |
|
files = self._tokenizer.model.save(save_directory, name=filename_prefix) |
|
return tuple(files) |
|
|