-
Notifications
You must be signed in to change notification settings - Fork 3
/
Copy pathrloo.py
137 lines (116 loc) · 4.93 KB
/
rloo.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
import multiprocessing
import os
from dataclasses import dataclass, field
from typing import Optional
from datasets import load_dataset
from transformers import (
AutoModelForCausalLM,
AutoModelForSequenceClassification,
AutoTokenizer,
)
from trl import ModelConfig
from trl.trainer.rloo_trainer import RLOOConfig
from src.online_bok_trainer import OnlineBoKTrainer
from src.rloo_trainer import MyRLOOTrainer as RLOOTrainer
from src.utils import TRLParser, WandbLogModelConfig
@dataclass
class ScriptArguments:
output_global_parent_dir: str = field(default=None)
dataset_name: str = field(default=None, metadata={"help": "the dataset name"})
# dataset_text_field: str = field(default=None, metadata={"help": "the text field of the dataset"})
dataset_train_split: str = field(default="train", metadata={"help": "the name of the training set of the dataset"})
dataset_test_split: str = field(default="test", metadata={"help": "the name of the training set of the dataset"})
# output_model_name: str = field(default="", metadata={"help": "model name to upload"})
max_length: int = field(default=512, metadata={"help": "The maximum sequence length for SFT Trainer"})
config: str = field(default=None, metadata={"help": "Path to the optional config file"})
vllm: bool = field(default=False)
bok: bool = field(default=False)
wandb_run_id: Optional[str] = field(default=None)
def prepare_dataset(dataset, tokenizer):
"""pre-tokenize the dataset before training; only collate during training"""
def tokenize(element):
input_ids = tokenizer(
element["query"],
padding=False,
)["input_ids"]
return {"input_ids": input_ids, "lengths": [len(ids) for ids in input_ids]}
return dataset.map(
tokenize,
batched=True,
remove_columns=dataset.column_names,
num_proc=multiprocessing.cpu_count(),
)
if __name__ == "__main__":
parser = TRLParser((ScriptArguments, RLOOConfig, ModelConfig))
args, config, model_config = parser.parse_args_and_config()
if args.output_global_parent_dir is not None:
run_id = os.path.basename(os.getcwd())
config.output_dir = os.path.join(args.output_global_parent_dir, run_id, config.output_dir)
if args.wandb_run_id == "slurm":
run_id = os.environ["SLURM_JOB_ID"]
config_name = os.path.basename(config.output_dir)
# save to parent / slurm id / output_dir
if args.output_global_parent_dir is not None:
config.output_dir = os.path.join(args.output_global_parent_dir, run_id, config.output_dir)
os.environ["WANDB_RUN_ID"] = run_id + "_" + config_name
else:
os.environ["WANDB_RUN_ID"] = args.wandb_run_id
################
# Model & Tokenizer
################
tokenizer = AutoTokenizer.from_pretrained(
model_config.model_name_or_path,
padding_side="left",
trust_remote_code=True,
)
reward_model = AutoModelForSequenceClassification.from_pretrained(config.reward_model_path, num_labels=1)
ref_policy = AutoModelForCausalLM.from_pretrained(config.sft_model_path)
policy = AutoModelForCausalLM.from_pretrained(config.sft_model_path)
################
# Dataset
################
raw_datasets = load_dataset(args.dataset_name)
if config.sanity_check:
for key in raw_datasets:
raw_datasets[key] = raw_datasets[key].select(range(1024))
config.push_to_hub = False
config.report_to = ""
config.save_strategy = "no"
config.num_sample_generations = 0
train_dataset = raw_datasets[args.dataset_train_split]
eval_dataset = raw_datasets[args.dataset_test_split]
train_dataset = prepare_dataset(train_dataset, tokenizer)
eval_dataset = prepare_dataset(eval_dataset, tokenizer)
# filtering
train_dataset = train_dataset.filter(lambda x: x["lengths"] <= args.max_length)
eval_dataset = eval_dataset.filter(lambda x: x["lengths"] <= args.max_length)
assert train_dataset[0]["input_ids"][-1] != tokenizer.eos_token_id, "The last token should not be an EOS token"
################
# Training
################
if args.bok:
TrainerCls = OnlineBoKTrainer
else:
TrainerCls = RLOOTrainer
trainer = TrainerCls(
config=config,
tokenizer=tokenizer,
policy=policy,
ref_policy=ref_policy,
reward_model=reward_model,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
callbacks=[WandbLogModelConfig(model_config)],
)
trainer.train()
if not config.sanity_check:
trainer.save_model(config.output_dir)
if config.push_to_hub:
trainer.push_to_hub()
trainer.generate_completions()
if trainer.accelerator.is_main_process:
try:
os.remove("output_dir")
except OSError:
pass
os.symlink(config.output_dir, "output_dir")