-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathlfw_verification.py
127 lines (109 loc) · 4.14 KB
/
lfw_verification.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
import nptorch
from nptorch.utils.data import Dataset, DataLoader, ImageFolder
from nptorch.transforms import ToTensor, Resize, Compose, Grayscale
from nptorch import nn
from nptorch.nn import functional as F
from nptorch.optim import SGD
from tqdm import tqdm
trans = Compose([Grayscale(1),
Resize((36, 36)),
ToTensor()])
class LFWDataset(Dataset):
def __init__(self, root_dir=None, transform=None):
super(LFWDataset, self).__init__()
if root_dir:
self.data = ImageFolder(root_dir)
else:
self.data = None
self.transform = transform
def __getitem__(self, index):
"""
:param index: 表示数据的index
:return: 返回两张图片与label,label为1表示同一个人,为0表示非同一个人
"""
img1, label = self.data[2 * index]
img2 = self.data[2 * index + 1][0]
if self.transform:
img1, img2 = self.transform(img1), self.transform(img2)
return img1, img2, nptorch.tensor(label, dtype=nptorch.float32)
def __len__(self):
return len(self.data) // 2
def __radd__(self, other):
dataset = LFWDataset(transform=self.transform)
if isinstance(other, LFWDataset):
dataset.data = self.data + other.data
else:
dataset.data = self.data
return dataset
class SiameseNet(nn.Module):
def __init__(self):
super(SiameseNet, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(1, 16, 3, dilation=(1, 1)),
nn.MaxPool2d(2),
nn.BatchNorm2d(16),
nn.ReLU(inplace=True),
nn.Conv2d(16, 32, 3, dilation=(1, 1)),
nn.MaxPool2d(2),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
)
self.layer2 = nn.Sequential(
nn.Linear(32 * 6 ** 2, 512),
nn.ReLU(inplace=True),
nn.Linear(512, 128),
nn.ReLU(inplace=True),
nn.Linear(128, 10)
)
def forward_once(self, x):
x = self.layer1(x)
x = x.reshape(x.shape[0], -1)
x = self.layer2(x)
return x
def forward(self, x1, x2):
output1 = self.forward_once(x1)
output2 = self.forward_once(x2)
return output1, output2
class ContrastiveLoss(nn.Module):
def __init__(self, margin=2.0):
super(ContrastiveLoss, self).__init__()
self.margin = margin
def forward(self, output1, output2, label):
euclidean_distance = F.pairwise_distance(output1, output2)
loss = nptorch.mean(label * nptorch.pow(euclidean_distance, 2) + (1 - label) * nptorch.pow(
nptorch.clamp(self.margin - euclidean_distance, min=0.0), 2))
return loss
@nptorch.no_grad()
def test_model(model, test_loader: DataLoader):
model.eval()
count = 0
for img1, img2, lb in tqdm(test_loader):
out1, out2 = model(img1, img2)
dist = F.pairwise_distance(out1, out2)
p = (dist < 1).int()
count += (p == lb).float().sum()
return count.item() / len(test_loader.dataset)
train_root = r'F:\pycharmProjects\FYP-FaceVerification\final_year_project\dataset\lfw_cropped\split_data\train'
siamese_net = SiameseNet()
loss_fcn = ContrastiveLoss()
optimizer = SGD(siamese_net.parameters(), lr=1e-2)
datasets = [LFWDataset(train_root + f'\\0{train_set_idx}', transform=trans) for train_set_idx in tqdm(range(1, 11), desc='loading...')]
datasets = sum(datasets)
dataloader = DataLoader(datasets, batch_size=32, shuffle=True)
# for i in tqdm(range(5)):
# count = 0
# for j, (img1, img2, lb) in enumerate(dataloader):
# count += len(lb)
# print(count)
# siamese_net.train()
# out1, out2 = siamese_net(img1, img2)
# loss = loss_fcn(out1, out2, lb)
# print('loss:', loss)
# loss.backward()
# optimizer.step()
# siamese_net.eval()
# with nptorch.no_grad():
# out1, out2 = siamese_net(img1, img2)
# pred = (F.pairwise_distance(out1, out2) < 1).int()
# print('优化后准确率:', (pred == lb).float().mean().item())
# optimizer.zero_grad()