-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtask2_functions.py
122 lines (105 loc) · 3.79 KB
/
task2_functions.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
import torch
import matplotlib.pyplot as plt
from torch import nn
from numpy.random import default_rng
# class definitions
class NeuralNetwork(nn.Module):
def __init__(self):
super(NeuralNetwork, self).__init__()
self.flatten = nn.Flatten()
self.linear_relu_stack = nn.Sequential(
nn.Linear(28*28, 512),
nn.ReLU(),
nn.Linear(512, 512),
nn.ReLU(),
nn.Linear(512, 10)
)
def forward(self, x):
x = self.flatten(x)
logits = self.linear_relu_stack(x)
return logits
class CNeuralNetwork(nn.Module):
def __init__(self):
super(CNeuralNetwork, self).__init__()
self.flatten = nn.Flatten()
self.conv_stack = nn.Sequential(
nn.Conv2d(1, 16, 3, stride=2, padding=1, padding_mode='reflect'),
nn.ReLU(),
nn.Conv2d(16, 256, 3, stride=2, padding=1, padding_mode='reflect'),
nn.ReLU(),
nn.Conv2d(256, 512, 7, stride=1, padding=1, padding_mode='reflect'),
nn.MaxPool2d(3),
)
self.linear_relu_stack = nn.Sequential(
nn.Linear(512, 512),
nn.ReLU(),
nn.Linear(512, 512),
nn.ReLU(),
nn.Linear(512, 10)
)
def forward(self, x):
x = self.conv_stack(x)
x = self.flatten(x)
logits = self.linear_relu_stack(x)
return logits
# Convenience functions
def train(device, dataloader, model, loss_fn, optimizer):
size = len(dataloader.dataset)
model.train()
for batch, (X, y) in enumerate(dataloader):
X, y = X.to(device), y.to(device)
# Compute prediction error
pred = model(X)
loss = loss_fn(pred, y)
# Backpropagation
optimizer.zero_grad()
loss.backward()
optimizer.step()
def test(device, dataloader, model, loss_fn):
size = len(dataloader.dataset)
num_batches = len(dataloader)
model.eval()
test_loss, correct = 0, 0
with torch.no_grad():
for X, y in dataloader:
X, y = X.to(device), y.to(device)
pred = model(X)
test_loss += loss_fn(pred, y).item()
correct += (pred.argmax(1) == y).type(torch.float).sum().item()
test_loss /= num_batches
correct /= size
print(f"Test Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")
@torch.no_grad()
def explore_wrong_5x5(dataloader, model, device, class_labels=None, seed=None, replace=False):
model.eval()
rng = default_rng(seed)
all_wrong = torch.empty(0, dtype=torch.int64, device=device)
preds = torch.empty(0, dtype=torch.int64, device=device)
gtruths = torch.empty(0, dtype=torch.int64, device=device)
for X, y in dataloader:
X, y = X.to(device), y.to(device)
pred = model(X).argmax(1)
wrong = pred != y
wrong_ixs = torch.argwhere(wrong).flatten()
for ix in wrong_ixs:
all_wrong = torch.cat((all_wrong, X[ix, ...]))
preds = torch.cat((preds, torch.tensor([pred[ix]]).to(device)))
gtruths = torch.cat((gtruths, torch.tensor([y[ix]]).to(device)))
example_ixs = rng.choice(range(len(gtruths)), 25, replace=replace)
fig, axes = plt.subplots(nrows=5, ncols=5, figsize=(14, 14))
fig.tight_layout()
for i, ix in enumerate(example_ixs):
X = all_wrong[ix]
y = gtruths[ix]
y_guess = preds[ix]
if class_labels:
true = class_labels[y]
guess = class_labels[y_guess]
else:
true = str(int(y))
guess = str(int(y_guess))
ax = axes.flatten()[i]
ax.set_title(f'True:{true}, Guess:{guess}')
im = X.squeeze().cpu()
ax.imshow(im, cmap='gray')
model.train()