-
Notifications
You must be signed in to change notification settings - Fork 5
/
Copy pathCNN_train_2d.py
155 lines (123 loc) · 4.82 KB
/
CNN_train_2d.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
#------------------------- python ------------------------------------------
# jupyter notebook
import numpy as np
import pandas as pd
import datetime
from model2_cpx import Net_cpx_2D
import time
import torch.optim as optim
from scipy import io
import argparse
import os # nn.BatchNorm2d(2,affine=False),
import torch
# from SSIM import SSIM
from losses import SSIMLoss2D_MC
from torch import nn
from torch.utils.data import Dataset, DataLoader
import h5py
import matplotlib.pyplot as plt
import h5py
import matplotlib
from PIL import Image
import math
from sklearn.metrics import confusion_matrix
import pylab as pl
import matplotlib.pyplot as plt
import numpy as np
import itertools
os.environ["CUDA_VISIBLE_DEVICES"]="0" #USE gpu 1, gp0 cannot be used for some reason
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# device = torch.device("cpu")
print(device)
epoch_num = 50 #itration number
num_workers = 0
current_data= '//media/bisp/New Volume/Linfang/PF_CC398_218_170_218/PF60/SS//'
current_data_file = current_data + 'CC_brain_2D/'
os.makedirs(current_data+'/ssim_64_16_cpx'+'/', exist_ok=True)
model_save_path = current_data + '/ssim_64_16_cpx'+'/'
class prepareData(Dataset):
def __init__(self, train_or_test):
self.files = os.listdir(current_data_file+train_or_test)
self.train_or_test= train_or_test
def __len__(self):
return len(self.files)
def __getitem__(self, idx):
c=current_data_file+self.train_or_test+'/'+self.files[idx]
data = torch.load(current_data_file+self.train_or_test+'/'+self.files[idx])
return data['k-space'], data['label']
trainset = prepareData('train')
trainloader = torch.utils.data.DataLoader(trainset, batch_size=16,shuffle=True, num_workers=num_workers)
validationset = prepareData('validation')
validationloader = torch.utils.data.DataLoader(validationset, batch_size=1,shuffle=True, num_workers=num_workers)
testset = prepareData('test')
testloader = torch.utils.data.DataLoader(testset, batch_size=1,shuffle=False, num_workers=num_workers)
model = Net_cpx_2D().to(device)
# model = torch.load(current_data +'/real_L1_64_16_cpx'+'/epoch-35-0.0229811855.pth')# repeat once
print(model)
criterion1 = nn.L1Loss()
lr = 0.0002
nx = 218
ny = 170
nc = 2
weight_decay = 0.000
optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)
# ssim=SSIM(channels=2)
ssim = SSIMLoss2D_MC(in_chan=2)
loss_train_list = []
loss_validation_list = []
for epoch in range(epoch_num): #set to 0 for no running the training
model.train()
loss_batch = []
time_start=time.time()
for i, data in enumerate(trainloader, 0):
# break
inputs = data[0].reshape(-1,nc,ny,nx).to(device) ##single slice
label = data[1].reshape(-1,2,ny,nx).to(device)
if nc == 6:
labels= label
labels[:,0,:,:]= label[:,0,:,:] +inputs[:,0,:,:]
labels[:,1,:,:]= label[:,1,:,:] +inputs[:,3,:,:]
else:
labels = inputs + label
outs = model(inputs)
loss = criterion1(outs, labels)
# loss = ssim(outs, labels,1) ####using the L1loss for several epochs, then using ssim to train the whole model
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_batch.append(loss.item())
if (i)%10==0:
print('epoch:%d - %d, loss:%.10f'%(epoch+1,i+1,loss.item()))
# break
# h=0
loss_train_list.append(round(sum(loss_batch) / len(loss_batch),10))
print(loss_train_list)
time_end=time.time()
print('time cost for training',time_end-time_start,'s')
model.eval() # evaluation
loss_batch = []
print('\n testing...')
time_start=time.time()
for i, data in enumerate(validationloader, 0):
inputs = data[0].reshape(-1,nc,ny,nx).to(device)
label = data[1].reshape(-1,2,ny,nx).to(device)
if nc ==6:
labels= label
labels[:,0,:,:]= label[:,0,:,:] +inputs[:,0,:,:]
labels[:,1,:,:]= label[:,1,:,:] +inputs[:,3,:,:]
else:
labels = inputs + label
with torch.no_grad():
outs = model(inputs)
loss = criterion1(outs, labels)
# loss = ssim(outs, labels,1) ######using the L1loss for several epochs, then using ssim to train the whole model
loss_batch.append(loss.item())
time_end=time.time()
print('time cost for testing',time_end-time_start,'s')
loss_validation_list.append(round(sum(loss_batch) / len(loss_batch),10))
print(loss_validation_list)
torch.save(model, os.path.join(model_save_path, 'epoch-%d-%.10f.pth' % (epoch+1, loss.item())))
# if (epoch+1) % 2 == 0:
# lr = max(5e-5,lr*0.8)
# optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)
print('Finished Training')