|
_commit_hash: 41265b09a862144b2517afdfd46da4388f1380df |
|
_name_or_path: tau/tavbert-he |
|
add_cross_attention: false |
|
architectures: |
|
- RobertaForMaskedLM |
|
attention_probs_dropout_prob: 0.1 |
|
bad_words_ids: null |
|
begin_suppress_tokens: null |
|
bos_token_id: 0 |
|
chunk_size_feed_forward: 0 |
|
classifier_dropout: null |
|
cross_attention_hidden_size: null |
|
decoder_start_token_id: null |
|
diversity_penalty: 0.0 |
|
do_sample: false |
|
early_stopping: false |
|
encoder_no_repeat_ngram_size: 0 |
|
eos_token_id: 2 |
|
exponential_decay_length_penalty: null |
|
finetuning_task: null |
|
forced_bos_token_id: null |
|
forced_eos_token_id: null |
|
gradient_checkpointing: false |
|
hidden_act: gelu |
|
hidden_dropout_prob: 0.1 |
|
hidden_size: 768 |
|
id2label: |
|
0: LABEL_0 |
|
1: LABEL_1 |
|
initializer_range: 0.02 |
|
intermediate_size: 3072 |
|
is_decoder: false |
|
is_encoder_decoder: false |
|
label2id: |
|
LABEL_0: 0 |
|
LABEL_1: 1 |
|
layer_norm_eps: 1.0e-05 |
|
length_penalty: 1.0 |
|
max_length: 512 |
|
max_position_embeddings: 2050 |
|
min_length: 0 |
|
model_type: roberta |
|
no_repeat_ngram_size: 0 |
|
num_attention_heads: 12 |
|
num_beam_groups: 1 |
|
num_beams: 1 |
|
num_hidden_layers: 12 |
|
num_return_sequences: 1 |
|
output_attentions: false |
|
output_hidden_states: false |
|
output_scores: false |
|
pad_token_id: 1 |
|
position_embedding_type: absolute |
|
prefix: null |
|
problem_type: null |
|
pruned_heads: {} |
|
remove_invalid_values: false |
|
repetition_penalty: 1.0 |
|
return_dict: true |
|
return_dict_in_generate: false |
|
sep_token_id: null |
|
suppress_tokens: null |
|
task_specific_params: null |
|
temperature: 1.0 |
|
tf_legacy_loss: false |
|
tie_encoder_decoder: false |
|
tie_word_embeddings: true |
|
tokenizer_class: null |
|
top_k: 50 |
|
top_p: 1.0 |
|
torch_dtype: null |
|
torchscript: false |
|
transformers_version: 4.6.0.dev0 |
|
type_vocab_size: 2 |
|
typical_p: 1.0 |
|
use_bfloat16: false |
|
use_cache: true |
|
vocab_size: 345 |
|
|