|
from datasets import load_dataset |
|
from transformers import AutoTokenizer, AutoModelForSequenceClassification, Trainer, TrainingArguments |
|
|
|
|
|
dataset = load_dataset('text', data_files={'train': 'cleaned_data.txt'}) |
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased') |
|
def tokenize_function(examples): |
|
return tokenizer(examples['text'], padding="max_length", truncation=True) |
|
|
|
tokenized_datasets = dataset.map(tokenize_function, batched=True) |
|
|
|
|
|
model = AutoModelForSequenceClassification.from_pretrained('bert-base-uncased', num_labels=2) |
|
|
|
|
|
training_args = TrainingArguments( |
|
output_dir="./results", |
|
evaluation_strategy="epoch", |
|
per_device_train_batch_size=8, |
|
per_device_eval_batch_size=8, |
|
num_train_epochs=3, |
|
weight_decay=0.01, |
|
) |
|
|
|
|
|
trainer = Trainer( |
|
model=model, |
|
args=training_args, |
|
train_dataset=tokenized_datasets["train"], |
|
eval_dataset=tokenized_datasets["train"], |
|
) |
|
|
|
|
|
trainer.train() |