-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtrain2.py
97 lines (75 loc) · 2.95 KB
/
train2.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
from os import path
from typing import Optional
import torch, torch.nn as nn, torch.utils.data as data, torchvision as tv, torch.nn.functional as F
from lightning.pytorch import LightningDataModule, LightningModule, cli_lightning_logo
from lightning.pytorch.cli import LightningCLI
from lightning.pytorch.demos.mnist_datamodule import MNIST
from torch.nn import functional as F
from torch.utils.data import DataLoader, random_split
from torchvision import transforms
import lightning
DATASETS_PATH = path.join(path.dirname(__file__), "Datasets")
class Backbone(torch.nn.Module):
"""
>>> Backbone() # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
Backbone(
(l1): Linear(...)
(l2): Linear(...)
)
"""
def __init__(self, hidden_dim=128):
super().__init__()
self.l1 = torch.nn.Linear(28 * 28, hidden_dim)
self.l2 = torch.nn.Linear(hidden_dim, 10)
def forward(self, x):
x = x.view(x.size(0), -1)
x = torch.relu(self.l1(x))
return torch.relu(self.l2(x))
class LitClassifier(LightningModule):
"""
>>> LitClassifier(Backbone()) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
LitClassifier(
(backbone): ...
)
"""
def __init__(self, backbone: Optional[Backbone] = None, learning_rate: float = 0.0001):
super().__init__()
self.save_hyperparameters(ignore=["backbone"])
if backbone is None:
backbone = Backbone()
self.backbone = backbone
def forward(self, x):
# use forward for inference/predictions
return self.backbone(x)
def training_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
loss = F.cross_entropy(y_hat, y)
self.log("train_loss", loss, on_epoch=True)
return loss
def validation_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
loss = F.cross_entropy(y_hat, y)
self.log("valid_loss", loss, on_step=True)
def test_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
loss = F.cross_entropy(y_hat, y)
self.log("test_loss", loss)
def predict_step(self, batch, batch_idx, dataloader_idx=None):
x, _ = batch
return self(x)
def configure_optimizers(self):
# self.hparams available because we called self.save_hyperparameters()
return torch.optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
dataset = MNIST(DATASETS_PATH, train=True, download=True, transform=transforms.ToTensor())
mnist_test = MNIST(DATASETS_PATH, train=False, download=True, transform=transforms.ToTensor())
mnist_train, mnist_val = random_split(
dataset, [55000, 5000], generator=torch.Generator().manual_seed(42)
)
batch_size = 2056
if __name__ == "__main__":
lit_classifier = LitClassifier()
trainer = lightning.Trainer(max_epochs=10)
trainer.fit(lit_classifier,data.DataLoader(mnist_train, num_workers=4), data.DataLoader(mnist_val, num_workers=4) )