forked from Kira0096/DIPDefend
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdefense_cifar.py
148 lines (114 loc) · 4.74 KB
/
defense_cifar.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
from torchvision import models
import os
import numpy as np
from models import *
import torch
import torch.optim
from skimage.measure import compare_psnr
from utils.denoising_utils import *
import torch
from torch.autograd import Variable
import argparse
parser = argparse.ArgumentParser(description='Args for DIPDefend')
parser.add_argument('--fname',
help='The name of the attacked image', type=str)
parser.add_argument('--out_dir', default='.',
help='The directory used to save the output', type=str)
parser.add_argument('--num_iter', default=4000, type=int,
help='Number of total iterations to run')
parser.add_argument('--input_depth', default=1, type=int,
help='Input depth for the generator')
parser.add_argument('--lr', default=0.01, type=float,
help='Learning rate for the optimizer')
parser.add_argument('--Lambda', default=0.002, type=float,
help='Hyperparameter of SES')
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark =True
dtype = torch.cuda.FloatTensor
def preprocess_image(cv2im, resize_im=True):
"""
Processes image for CNNs
Args:
PIL_img (PIL_img): Image to process
resize_im (bool): Resize to 224 or not
returns:
im_as_var (Pytorch variable): Variable that contains processed float tensor
"""
im_as_ten = torch.from_numpy(cv2im).float()
im_as_ten.unsqueeze_(0)
im_as_var = Variable(im_as_ten, requires_grad=False).to(0)
return im_as_var
def adapative_psnr(img1, img2, size=32):
psnr, area_cnt = [], 0
_, h, w = img1.shape
for i in range(int(h // size)):
for j in range(int(w // size)):
img1_part = img1[:, i*size:(i+1)*size, j*size:(j+1)*size]
img2_part = img2[:, i*size:(i+1)*size, j*size:(j+1)*size]
psnr.append(compare_psnr(img1_part, img2_part))
area_cnt += 1
psnr = np.array(psnr).min()
return psnr
args = parser.parse_args()
attack_fname = args.fname
img_noisy_pil = crop_image(get_image(attack_fname, -1)[0], d=32)
img_noisy_np = pil_to_np(img_noisy_pil)
out_dir = args.out_dir
LAMBDA = args.Lambda
reg_noise_std = -1./20.
LR = args.lr
num_iter = args.num_iter
input_depth = args.input_depth
def Net():
return skip(input_depth, 3,
num_channels_down = [4, 8, 16],
num_channels_up = [4, 8, 16],
num_channels_skip = [0, 0, 0],
upsample_mode='bilinear').type(dtype)
net = Net()
series, out_series, delta_series = [], [], []
psnr_max_img = None
SES_img = None
net_inputs = []
for i in range(1):
net_input = get_noise(input_depth, 'noise', (img_noisy_pil.size[1], img_noisy_pil.size[0])).type(dtype).detach()
net_inputs.append(net_input.squeeze(0).cpu().numpy())
net_input = torch.FloatTensor(np.array(net_inputs)).type(dtype).detach().to(0)
# Compute number of parameters
s = sum([np.prod(list(p.size())) for p in net.parameters()]);
print ('Number of params: %d' % s)
# Loss
mse = torch.nn.MSELoss().type(dtype)
img_noisy_torch = np_to_torch(img_noisy_np).type(dtype)
net_input_saved = net_input.detach().clone()
noise = net_input.detach().clone()
def closure():
global net_input
global psnr_max_img, SES_img
if reg_noise_std > 0:
net_input = net_input_saved + (noise.normal_() * reg_noise_std)
out = net(net_input)
total_loss = mse(out.mean(dim=0,keepdim=True), img_noisy_torch)
total_loss.backward()
psrn_gt = adapative_psnr(img_noisy_np, out.detach().cpu().numpy().mean(axis=0))
if len(series) == 0:
series.append(psrn_gt)
out_series.append(psrn_gt)
elif len(series) == 1:
series.append(psrn_gt)
delta_series.append(series[1] - series[0])
out_series.append(LAMBDA * series[-1] + (1 - LAMBDA) * (out_series[-1] + delta_series[-1]))
else:
series.append(psrn_gt)
s = LAMBDA * series[-1] + (1 - LAMBDA) * (out_series[-1] + delta_series[-1])
t = LAMBDA * (s - out_series[-1]) + (1 - LAMBDA) * (delta_series[-1])
out_series.append(s); delta_series.append(t)
if out_series[-1] > np.array(out_series[:-1]).max():
SES_img = out.detach().cpu().numpy().mean(axis=0)
# if series[-1] > np.array(series[:-1]).max():
# psnr_max_img = out.detach().cpu().numpy().mean(axis=0)
return total_loss
p = get_params('net,input', net, net_input)
optimize('adam', p, closure, LR, num_iter)
np.save(os.path.join(out_dir, 'defense_inflection.npy'), SES_img)
np_to_pil(SES_img).save(os.path.join(out_dir, 'defense_inflection.png'))