-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathmodel_dw2.py
120 lines (101 loc) · 3.78 KB
/
model_dw2.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
import torch
from torch import nn
import torch.nn.functional as F
def fixed_padding(inputs, kernel_size, dilation):
kernel_size_effective = kernel_size + (kernel_size - 1) * (dilation - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
padded_inputs = F.pad(inputs, (pad_beg, pad_end, pad_beg, pad_end))
return padded_inputs
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, expand):
super(InvertedResidual, self).__init__()
self.expand=expand
self.conv = nn.Sequential(
# dw
nn.Conv2d(inp, inp, 3, 1, 0, dilation=expand, groups=inp, bias=False),
nn.BatchNorm2d(inp),
nn.ReLU6(inplace=True),
# pw
nn.Conv2d(inp, oup, 1, 1, 0, 1, bias=False),
)
def forward(self, x):
x_pad = fixed_padding(x, 3, self.expand)
y= self.conv(x_pad)
return y
class block_down(nn.Module):
def __init__(self, inp_channel, out_channel, expand):
super(block_down, self).__init__()
self.deepwise1 = InvertedResidual(inp_channel, inp_channel, expand)
self.deepwise2 = InvertedResidual(inp_channel, out_channel, expand)
self.resnet= nn.Conv2d(inp_channel, out_channel, 1, 1, 0, 1, bias=False)
def forward(self, input):
resnet=self.resnet(input)
x = self.deepwise1(input)
x= self.deepwise2(x)
out=torch.add(resnet,x)
return out
class block_up(nn.Module):
def __init__(self, inp_channel, out_channel, expand):
super(block_up, self).__init__()
self.up = nn.ConvTranspose2d(inp_channel, out_channel, 2, stride=2)
self.deepwise1 = InvertedResidual(inp_channel, inp_channel, expand)
self.deepwise2 = InvertedResidual(inp_channel, out_channel, expand)
self.resnet = nn.Conv2d(inp_channel, out_channel, 1, 1, 0, 1, bias=False)
def forward(self, x, y):
x = self.up(x)
x1 = torch.cat([x, y], dim=1)
x = self.deepwise1(x1)
x = self.deepwise2(x)
resnet=self.resnet(x1)
out=torch.add(resnet,x)
return out
class U_net(nn.Module):
def __init__(self, class_num):
super(U_net, self).__init__()
self.inp = nn.Conv2d(3, 64, 1)
self.block2 = block_down(64, 128, expand=1)
self.block3 = block_down(128, 256, expand=2)
self.block4 = block_down(256, 512, expand=3)
self.block5 = block_down(512, 1024, expand=1)
self.block6 = block_up(1024, 512, expand=1)
self.block7 = block_up(512, 256, expand=1)
self.block8 = block_up(256, 128, expand=1)
self.block9 = block_up(128, 64, expand=1)
self.out = nn.Conv2d(64, class_num, 1)
self.maxpool = nn.MaxPool2d(2)
def forward(self, x):
x1_use = self.inp(x)
x1 = self.maxpool(x1_use)
x2_use = self.block2(x1)
x2 = self.maxpool(x2_use)
x3_use = self.block3(x2)
x3 = self.maxpool(x3_use)
x4_use = self.block4(x3)
x4 = self.maxpool(x4_use)
x5 = self.block5(x4)
x6 = self.block6(x5, x4_use)
x7 = self.block7(x6, x3_use)
x8 = self.block8(x7, x2_use)
x9 = self.block9(x8, x1_use)
out= self.out(x9)
return F.sigmoid(out)
if __name__ == "__main__":
test_input = torch.rand(1, 3, 480, 640).to("cuda")
print("input_size:", test_input.size())
model = U_net(3)
model.cuda()
ouput = model(test_input)
print("output_size:", ouput.size())
params=list(model.named_parameters())
k=0
for name,param in params:
print(name)
if param.requires_grad:
l=1
for i in param.size():
l*=i
k=k+l
print(l)
print("模型总的参数量是:"+str(k))