-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtrain.py
124 lines (95 loc) · 3.74 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
from os import path
from typing import Optional
import torch
from lightning.pytorch import LightningDataModule, LightningModule, cli_lightning_logo
from lightning.pytorch.cli import LightningCLI
from lightning.pytorch.demos.mnist_datamodule import MNIST
from torch.nn import functional as F
from torch.utils.data import DataLoader, random_split
from torchvision import transforms
DATASETS_PATH = path.join(path.dirname(__file__), "Datasets")
class Backbone(torch.nn.Module):
"""
>>> Backbone() # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
Backbone(
(l1): Linear(...)
(l2): Linear(...)
)
"""
def __init__(self, hidden_dim=128):
super().__init__()
self.l1 = torch.nn.Linear(28 * 28, hidden_dim)
self.l2 = torch.nn.Linear(hidden_dim, 10)
def forward(self, x):
x = x.view(x.size(0), -1)
x = torch.relu(self.l1(x))
return torch.relu(self.l2(x))
class LitClassifier(LightningModule):
"""
>>> LitClassifier(Backbone()) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
LitClassifier(
(backbone): ...
)
"""
def __init__(self, backbone: Optional[Backbone] = None, learning_rate: float = 0.0001):
super().__init__()
self.save_hyperparameters(ignore=["backbone"])
if backbone is None:
backbone = Backbone()
self.backbone = backbone
def forward(self, x):
# use forward for inference/predictions
return self.backbone(x)
def training_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
loss = F.cross_entropy(y_hat, y)
self.log("train_loss", loss, on_epoch=True)
return loss
def validation_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
loss = F.cross_entropy(y_hat, y)
self.log("valid_loss", loss, on_step=True)
def test_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
loss = F.cross_entropy(y_hat, y)
self.log("test_loss", loss)
def predict_step(self, batch, batch_idx, dataloader_idx=None):
x, _ = batch
return self(x)
def configure_optimizers(self):
# self.hparams available because we called self.save_hyperparameters()
return torch.optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
class MyDataModule(LightningDataModule):
def __init__(self, batch_size: int = 32):
super().__init__()
dataset = MNIST(DATASETS_PATH, train=True, download=True, transform=transforms.ToTensor())
self.mnist_test = MNIST(DATASETS_PATH, train=False, download=True, transform=transforms.ToTensor())
self.mnist_train, self.mnist_val = random_split(
dataset, [55000, 5000], generator=torch.Generator().manual_seed(42)
)
self.batch_size = batch_size
def train_dataloader(self):
return DataLoader(self.mnist_train, batch_size=self.batch_size)
def val_dataloader(self):
return DataLoader(self.mnist_val, batch_size=self.batch_size)
def test_dataloader(self):
return DataLoader(self.mnist_test, batch_size=self.batch_size)
def predict_dataloader(self):
return DataLoader(self.mnist_test, batch_size=self.batch_size)
def cli_main():
cli = LightningCLI(
LitClassifier, MyDataModule, seed_everything_default=1234, save_config_kwargs={"overwrite": True}, run=False
)
cli.trainer.fit(cli.model, datamodule=cli.datamodule)
cli.trainer.test(ckpt_path="best", datamodule=cli.datamodule)
predictions = cli.trainer.predict(ckpt_path="best", datamodule=cli.datamodule)
print(predictions[0])
if __name__ == "__main__":
print(DATASETS_PATH)
cli_lightning_logo()
cli_main()
main problem batch size not defined
use train2.py