-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathfinetune_bert.py
131 lines (112 loc) · 4.28 KB
/
finetune_bert.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
"""
Fine-tune BERT-based models
"""
from math import floor
import argparse
import numpy as np
import evaluate
from transformers import (
AutoTokenizer,
AutoModelForMultipleChoice,
TrainingArguments,
Trainer,
EarlyStoppingCallback
)
from custom_data_collator import DataCollatorForMultipleChoice
from load_datasets import DatasetManager
from custom_trainer_callback import CustomTrainerCallback
# Args
parser = argparse.ArgumentParser(description="Fine-tune T5-based models on BrainTeaser")
parser.add_argument("--dataset", required=True)
parser.add_argument("--checkpoint", required=True)
parser.add_argument("--tokenizer")
parser.add_argument("--name", required=True)
parser.add_argument(
"--log_steps",
type=float,
required=True,
help="A float number in range [0,1] specifying a ratio of epochs"
)
parser.add_argument("--epochs", type=int, default=4)
parser.add_argument("--batch_size", type=int, default=2)
parser.add_argument("--accumulation_steps", type=int, default=2)
parser.add_argument("--learning_rate", type=float, default=5e-5)
parser.add_argument("--early_stopping_patience", type=int, default=10)
args = parser.parse_args()
args.tokenizer = args.checkpoint if args.tokenizer is None else args.tokenizer
assert 0 < args.log_steps <= 1, "Invalid value for log_steps"
# Process examples
def preprocess(examples):
"""Tokenize and group the given examples"""
n_choices = 4
n_examples = len(examples['label'])
first_sentences = [[context] * n_choices for context in examples["text"]]
second_sentences = [[examples[f'choice{c}'][i] for c in range(n_choices)] for i in range(n_examples)]
first_sentences = sum(first_sentences, [])
second_sentences = sum(second_sentences, [])
tokenized_examples = tokenizer(first_sentences, second_sentences, truncation=True, max_length=512)
return {k: [v[i : i + n_choices] for i in range(0, len(v), n_choices)] for k, v in tokenized_examples.items()}
# Load dataset
dataset_manager = DatasetManager(ignore_case=False, force_4_choices=True, ds_format='bert')
if '|' in args.dataset:
assert args.dataset.count('|') == 1, "Invalid number of datasets"
primary_ds, secondary_ds = args.dataset.split('|')
dataset = dataset_manager.load_combined_datasets(primary_ds, secondary_ds)
else:
dataset = dataset_manager.load_ds(args.dataset)
# Calculate the log steps based on the number of steps in each epoch
effective_batch_size = args.batch_size * args.accumulation_steps
args.log_steps = floor(args.log_steps * len(dataset["train"]) / effective_batch_size)
args.log_steps = max(args.log_steps, 1)
# Load tokenizer and process dataset
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer)
dataset = dataset.map(preprocess, batched=True)
# Evaluation metrics
accuracy = evaluate.load("accuracy")
def compute_metrics(eval_pred):
"""Calculate accuracy metric"""
predictions, labels = eval_pred
predictions = np.argmax(predictions, axis=1)
result = accuracy.compute(predictions=predictions, references=labels)
return result
# Load model & start training
callback = CustomTrainerCallback(vars(args))
early_stopping = EarlyStoppingCallback(early_stopping_patience=args.early_stopping_patience)
callbacks = [callback, early_stopping]
model = AutoModelForMultipleChoice.from_pretrained(args.checkpoint)
# Training args
training_args = TrainingArguments(
# Saving
output_dir=args.name,
logging_dir=f"{args.name}/logs",
save_strategy="steps",
save_steps=args.log_steps,
save_total_limit=2,
load_best_model_at_end=True,
metric_for_best_model="eval_accuracy",
# Loggging
logging_strategy="steps",
logging_steps=args.log_steps,
# Training
learning_rate=args.learning_rate,
num_train_epochs=args.epochs,
per_device_train_batch_size=args.batch_size,
per_device_eval_batch_size=args.batch_size,
gradient_accumulation_steps=args.accumulation_steps,
# Evaluation
evaluation_strategy="steps",
eval_steps=args.log_steps,
)
# Create Trainer instance
trainer = Trainer(
model=model,
args=training_args,
train_dataset=dataset["train"],
eval_dataset=dataset["test"],
tokenizer=tokenizer,
data_collator=DataCollatorForMultipleChoice(tokenizer=tokenizer),
compute_metrics=compute_metrics,
callbacks=callbacks
)
# Train
trainer.train()