-
Notifications
You must be signed in to change notification settings - Fork 805
Expand file tree
/
Copy pathtrain.py
More file actions
242 lines (208 loc) · 7.1 KB
/
train.py
File metadata and controls
242 lines (208 loc) · 7.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
import os
import random
import string
import sys
from dataclasses import dataclass
from pathlib import Path
from typing import Optional
import hydra
import omegaconf
import pytorch_lightning as pl
import torch
import torch.multiprocessing
from omegaconf import OmegaConf, listconfig
from pytorch_lightning import LightningModule
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.strategies import DDPStrategy
from pytorch_lightning.utilities import rank_zero_only
from boltz.data.module.training import BoltzTrainingDataModule, DataConfig
@dataclass
class TrainConfig:
"""Train configuration.
Attributes
----------
data : DataConfig
The data configuration.
model : ModelConfig
The model configuration.
output : str
The output directory.
trainer : Optional[dict]
The trainer configuration.
resume : Optional[str]
The resume checkpoint.
pretrained : Optional[str]
The pretrained model.
wandb : Optional[dict]
The wandb configuration.
disable_checkpoint : bool
Disable checkpoint.
matmul_precision : Optional[str]
The matmul precision.
find_unused_parameters : Optional[bool]
Find unused parameters.
save_top_k : Optional[int]
Save top k checkpoints.
validation_only : bool
Run validation only.
debug : bool
Debug mode.
strict_loading : bool
Fail on mismatched checkpoint weights.
load_confidence_from_trunk: Optional[bool]
Load pre-trained confidence weights from trunk.
"""
data: DataConfig
model: LightningModule
output: str
trainer: Optional[dict] = None
resume: Optional[str] = None
pretrained: Optional[str] = None
wandb: Optional[dict] = None
disable_checkpoint: bool = False
matmul_precision: Optional[str] = None
find_unused_parameters: Optional[bool] = False
save_top_k: Optional[int] = 1
validation_only: bool = True
debug: bool = False
strict_loading: bool = True
load_confidence_from_trunk: Optional[bool] = False
def train(raw_config: str, args: list[str]) -> None: # noqa: C901, PLR0912, PLR0915
"""Run training.
Parameters
----------
raw_config : str
The input yaml configuration.
args : list[str]
Any command line overrides.
"""
# Load the configuration
raw_config = omegaconf.OmegaConf.load(raw_config)
# Apply input arguments
args = omegaconf.OmegaConf.from_dotlist(args)
raw_config = omegaconf.OmegaConf.merge(raw_config, args)
# Instantiate the task
cfg = hydra.utils.instantiate(raw_config)
cfg = TrainConfig(**cfg)
# Set matmul precision
if cfg.matmul_precision is not None:
torch.set_float32_matmul_precision(cfg.matmul_precision)
# Create trainer dict
trainer = cfg.trainer
if trainer is None:
trainer = {}
# Flip some arguments in debug mode
devices = trainer.get("devices", 1)
wandb = cfg.wandb
if cfg.debug:
if isinstance(devices, int):
devices = 1
elif isinstance(devices, (list, listconfig.ListConfig)):
devices = [devices[0]]
trainer["devices"] = devices
cfg.data.num_workers = 0
if wandb:
wandb = None
# Create objects
data_config = DataConfig(**cfg.data)
data_module = BoltzTrainingDataModule(data_config)
model_module = cfg.model
if cfg.pretrained and not cfg.resume:
# Load the pretrained weights into the confidence module
if cfg.load_confidence_from_trunk:
checkpoint = torch.load(cfg.pretrained, map_location="cpu")
# Modify parameter names in the state_dict
new_state_dict = {}
for key, value in checkpoint["state_dict"].items():
if not key.startswith("structure_module") and not key.startswith(
"distogram_module"
):
new_key = "confidence_module." + key
new_state_dict[new_key] = value
new_state_dict.update(checkpoint["state_dict"])
# Update the checkpoint with the new state_dict
checkpoint["state_dict"] = new_state_dict
# Save the modified checkpoint
random_string = "".join(
random.choices(string.ascii_lowercase + string.digits, k=10)
)
file_path = os.path.dirname(cfg.pretrained) + "/" + random_string + ".ckpt"
print(
f"Saving modified checkpoint to {file_path} created by broadcasting trunk of {cfg.pretrained} to confidence module."
)
torch.save(checkpoint, file_path)
else:
file_path = cfg.pretrained
print(f"Loading model from {file_path}")
model_module = type(model_module).load_from_checkpoint(
file_path, map_location="cpu", strict=False, **(model_module.hparams)
)
if cfg.load_confidence_from_trunk:
os.remove(file_path)
# Create checkpoint callback
callbacks = []
dirpath = cfg.output
if not cfg.disable_checkpoint:
mc = ModelCheckpoint(
monitor="val/lddt",
save_top_k=cfg.save_top_k,
save_last=True,
mode="max",
every_n_epochs=1,
)
callbacks = [mc]
# Create wandb logger
loggers = []
if wandb:
wdb_logger = WandbLogger(
name=wandb["name"],
group=wandb["name"],
save_dir=cfg.output,
project=wandb["project"],
entity=wandb["entity"],
log_model=False,
)
loggers.append(wdb_logger)
# Save the config to wandb
@rank_zero_only
def save_config_to_wandb() -> None:
config_out = Path(wdb_logger.experiment.dir) / "run.yaml"
with Path.open(config_out, "w") as f:
OmegaConf.save(raw_config, f)
wdb_logger.experiment.save(str(config_out))
save_config_to_wandb()
# Set up trainer
strategy = "auto"
if (isinstance(devices, int) and devices > 1) or (
isinstance(devices, (list, listconfig.ListConfig)) and len(devices) > 1
):
strategy = DDPStrategy(find_unused_parameters=cfg.find_unused_parameters)
trainer = pl.Trainer(
default_root_dir=str(dirpath),
strategy=strategy,
callbacks=callbacks,
logger=loggers,
enable_checkpointing=not cfg.disable_checkpoint,
reload_dataloaders_every_n_epochs=1,
**trainer,
)
if not cfg.strict_loading:
model_module.strict_loading = False
if cfg.validation_only:
print(f'====== Running validation on {cfg.resume}===========')
trainer.validate(
model_module,
datamodule=data_module,
ckpt_path=cfg.resume,
)
else:
trainer.fit(
model_module,
datamodule=data_module,
ckpt_path=cfg.resume,
)
if __name__ == "__main__":
arg1 = sys.argv[1]
arg2 = sys.argv[2:]
train(arg1, arg2)