diff --git a/nets/frcnn_training.py b/nets/frcnn_training.py index d4eb439..ebd3a5f 100644 --- a/nets/frcnn_training.py +++ b/nets/frcnn_training.py @@ -1,4 +1,6 @@ import math +import random +import time from functools import partial import numpy as np @@ -8,6 +10,23 @@ def bbox_iou(bbox_a, bbox_b): + if bbox_a.shape[1] != 4 or bbox_b.shape[1] != 4: + print(bbox_a, bbox_b) + raise IndexError + + tlt = torch.maximum(bbox_a[:, None, :2], bbox_b[:, :2]) + brt = torch.minimum(bbox_a[:, None, 2:], bbox_b[:, 2:]) + + area_i = torch.prod(brt - tlt, dim=2) * (tlt < brt).all(dim=2) + area_a = torch.prod(bbox_a[:, 2:] - bbox_a[:, :2], dim=1) + area_b = torch.prod(bbox_b[:, 2:] - bbox_b[:, :2], dim=1) + + iou = area_i / (area_a[:, None] + area_b - area_i) + + return iou + + +def bbox_iounp(bbox_a, bbox_b): if bbox_a.shape[1] != 4 or bbox_b.shape[1] != 4: print(bbox_a, bbox_b) raise IndexError @@ -19,6 +38,33 @@ def bbox_iou(bbox_a, bbox_b): return area_i / (area_a[:, None] + area_b - area_i) def bbox2loc(src_bbox, dst_bbox): + # src_bbox = src_bbox.cpu().numpy() + width = src_bbox[:, 2] - src_bbox[:, 0] + height = src_bbox[:, 3] - src_bbox[:, 1] + ctr_x = src_bbox[:, 0] + 0.5 * width + ctr_y = src_bbox[:, 1] + 0.5 * height + + base_width = dst_bbox[:, 2] - dst_bbox[:, 0] + base_height = dst_bbox[:, 3] - dst_bbox[:, 1] + base_ctr_x = dst_bbox[:, 0] + 0.5 * base_width + base_ctr_y = dst_bbox[:, 1] + 0.5 * base_height + + eps = torch.finfo(height.dtype).eps + epst = torch.empty(len(height)).cuda() + epst.fill_(eps) + width = torch.maximum(width, epst) + height = torch.maximum(height, epst) + + dx = (base_ctr_x - ctr_x) / width + dy = (base_ctr_y - ctr_y) / height + dw = torch.log(base_width / width) + dh = torch.log(base_height / height) + + loc = torch.cat((dx.unsqueeze(0), dy.unsqueeze(0), dw.unsqueeze(0), dh.unsqueeze(0)), dim=0) + return loc.T + + +def bbox2locnp(src_bbox, dst_bbox): width = src_bbox[:, 2] - src_bbox[:, 0] height = src_bbox[:, 3] - src_bbox[:, 1] ctr_x = src_bbox[:, 0] + 0.5 * width @@ -41,6 +87,7 @@ def bbox2loc(src_bbox, dst_bbox): loc = np.vstack((dx, dy, dw, dh)).transpose() return loc + class AnchorTargetCreator(object): def __init__(self, n_sample=256, pos_iou_thresh=0.7, neg_iou_thresh=0.3, pos_ratio=0.5): self.n_sample = n_sample @@ -54,7 +101,7 @@ def __call__(self, bbox, anchor): loc = bbox2loc(anchor, bbox[argmax_ious]) return loc, label else: - return np.zeros_like(anchor), label + return torch.zeros_like(anchor), label def _calc_ious(self, anchor, bbox): #----------------------------------------------# @@ -63,8 +110,8 @@ def _calc_ious(self, anchor, bbox): #----------------------------------------------# ious = bbox_iou(anchor, bbox) - if len(bbox)==0: - return np.zeros(len(anchor), np.int32), np.zeros(len(anchor)), np.zeros(len(bbox)) + if len(bbox) == 0: + return torch.zeros(len(anchor)), torch.zeros(len(anchor)), torch.zeros(len(bbox)) #---------------------------------------------------------# # 获得每一个先验框最对应的真实框 [num_anchors, ] #---------------------------------------------------------# @@ -72,7 +119,7 @@ def _calc_ious(self, anchor, bbox): #---------------------------------------------------------# # 找出每一个先验框最对应的真实框的iou [num_anchors, ] #---------------------------------------------------------# - max_ious = np.max(ious, axis=1) + max_ious, _ = torch.max(ious, dim=1) #---------------------------------------------------------# # 获得每一个真实框最对应的先验框 [num_gt, ] #---------------------------------------------------------# @@ -90,8 +137,8 @@ def _create_label(self, anchor, bbox): # 1是正样本,0是负样本,-1忽略 # 初始化的时候全部设置为-1 # ------------------------------------------ # - label = np.empty((len(anchor),), dtype=np.int32) - label.fill(-1) + label = torch.empty(len(anchor)).cuda() + label.fill_(-1) # ------------------------------------------------------------------------ # # argmax_ious为每个先验框对应的最大的真实框的序号 [num_anchors, ] @@ -110,6 +157,96 @@ def _create_label(self, anchor, bbox): if len(gt_argmax_ious)>0: label[gt_argmax_ious] = 1 + # ----------------------------------------------------- # + # 判断正样本数量是否大于128,如果大于则限制在128 + # ----------------------------------------------------- # + n_pos = int(self.pos_ratio * self.n_sample) + pos_index = torch.where(label == 1)[0].cpu().numpy() + if len(pos_index) > n_pos: + disable_index = np.random.choice(pos_index, size=(len(pos_index) - n_pos), replace=False) + label[disable_index] = -1 + + # ----------------------------------------------------- # + # 平衡正负样本,保持总数量为256 + # ----------------------------------------------------- # + n_neg = self.n_sample - torch.sum(label == 1) + neg_index = torch.where(label == 0)[0].cpu().numpy() + if len(neg_index) > n_neg: + disable_index = np.random.choice(neg_index, size=(len(neg_index) - n_neg.item()), replace=False) + label[disable_index] = -1 + + return argmax_ious, label + + +class AnchorTargetCreatornp(object): + def __init__(self, n_sample=256, pos_iou_thresh=0.7, neg_iou_thresh=0.3, pos_ratio=0.5): + self.n_sample = n_sample + self.pos_iou_thresh = pos_iou_thresh + self.neg_iou_thresh = neg_iou_thresh + self.pos_ratio = pos_ratio + + def __call__(self, bbox, anchor): + argmax_ious, label = self._create_label(anchor, bbox) + if (label > 0).any(): + loc = bbox2locnp(anchor, bbox[argmax_ious]) + return loc, label + else: + return np.zeros_like(anchor), label + + def _calc_ious(self, anchor, bbox): + # ----------------------------------------------# + # anchor和bbox的iou + # 获得的ious的shape为[num_anchors, num_gt] + # ----------------------------------------------# + ious = bbox_iounp(anchor, bbox) + + if len(bbox) == 0: + return np.zeros(len(anchor), np.int32), np.zeros(len(anchor)), np.zeros(len(bbox)) + # ---------------------------------------------------------# + # 获得每一个先验框最对应的真实框 [num_anchors, ] + # ---------------------------------------------------------# + argmax_ious = ious.argmax(axis=1) + # ---------------------------------------------------------# + # 找出每一个先验框最对应的真实框的iou [num_anchors, ] + # ---------------------------------------------------------# + max_ious = np.max(ious, axis=1) + # ---------------------------------------------------------# + # 获得每一个真实框最对应的先验框 [num_gt, ] + # ---------------------------------------------------------# + gt_argmax_ious = ious.argmax(axis=0) + # ---------------------------------------------------------# + # 保证每一个真实框都存在对应的先验框 + # ---------------------------------------------------------# + for i in range(len(gt_argmax_ious)): + argmax_ious[gt_argmax_ious[i]] = i + + return argmax_ious, max_ious, gt_argmax_ious + + def _create_label(self, anchor, bbox): + # ------------------------------------------ # + # 1是正样本,0是负样本,-1忽略 + # 初始化的时候全部设置为-1 + # ------------------------------------------ # + label = np.empty((len(anchor),), dtype=np.int32) + label.fill(-1) + + # ------------------------------------------------------------------------ # + # argmax_ious为每个先验框对应的最大的真实框的序号 [num_anchors, ] + # max_ious为每个真实框对应的最大的真实框的iou [num_anchors, ] + # gt_argmax_ious为每一个真实框对应的最大的先验框的序号 [num_gt, ] + # ------------------------------------------------------------------------ # + argmax_ious, max_ious, gt_argmax_ious = self._calc_ious(anchor, bbox) + + # ----------------------------------------------------- # + # 如果小于门限值则设置为负样本 + # 如果大于门限值则设置为正样本 + # 每个真实框至少对应一个先验框 + # ----------------------------------------------------- # + label[max_ious < self.neg_iou_thresh] = 0 + label[max_ious >= self.pos_iou_thresh] = 1 + if len(gt_argmax_ious) > 0: + label[gt_argmax_ious] = 1 + # ----------------------------------------------------- # # 判断正样本数量是否大于128,如果大于则限制在128 # ----------------------------------------------------- # @@ -141,25 +278,25 @@ def __init__(self, n_sample=128, pos_ratio=0.5, pos_iou_thresh=0.5, neg_iou_thre self.neg_iou_thresh_low = neg_iou_thresh_low def __call__(self, roi, bbox, label, loc_normalize_std=(0.1, 0.1, 0.2, 0.2)): - roi = np.concatenate((roi.detach().cpu().numpy(), bbox), axis=0) + roi = torch.cat((roi, bbox), dim=0) + # ----------------------------------------------------- # # 计算建议框和真实框的重合程度 # ----------------------------------------------------- # iou = bbox_iou(roi, bbox) - - if len(bbox)==0: - gt_assignment = np.zeros(len(roi), np.int32) - max_iou = np.zeros(len(roi)) - gt_roi_label = np.zeros(len(roi)) + if len(bbox) == 0: + gt_assignment = torch.zeros(len(roi)) + max_iou = torch.zeros(len(roi)) + gt_roi_label = torch.zeros(len(roi)) else: #---------------------------------------------------------# # 获得每一个建议框最对应的真实框 [num_roi, ] #---------------------------------------------------------# - gt_assignment = iou.argmax(axis=1) + gt_assignment = iou.argmax(dim=1) #---------------------------------------------------------# # 获得每一个建议框最对应的真实框的iou [num_roi, ] #---------------------------------------------------------# - max_iou = iou.max(axis=1) + max_iou = iou.max(dim=1)[0] #---------------------------------------------------------# # 真实框的标签要+1因为有背景的存在 #---------------------------------------------------------# @@ -169,7 +306,7 @@ def __call__(self, roi, bbox, label, loc_normalize_std=(0.1, 0.1, 0.2, 0.2)): # 满足建议框和真实框重合程度大于neg_iou_thresh_high的作为负样本 # 将正样本的数量限制在self.pos_roi_per_image以内 #----------------------------------------------------------------# - pos_index = np.where(max_iou >= self.pos_iou_thresh)[0] + pos_index = torch.where(max_iou >= self.pos_iou_thresh)[0].cpu().numpy() pos_roi_per_this_image = int(min(self.pos_roi_per_image, pos_index.size)) if pos_index.size > 0: pos_index = np.random.choice(pos_index, size=pos_roi_per_this_image, replace=False) @@ -178,7 +315,7 @@ def __call__(self, roi, bbox, label, loc_normalize_std=(0.1, 0.1, 0.2, 0.2)): # 满足建议框和真实框重合程度小于neg_iou_thresh_high大于neg_iou_thresh_low作为负样本 # 将正样本的数量和负样本的数量的总和固定成self.n_sample #-----------------------------------------------------------------------------------------------------# - neg_index = np.where((max_iou < self.neg_iou_thresh_high) & (max_iou >= self.neg_iou_thresh_low))[0] + neg_index = torch.where((max_iou < self.neg_iou_thresh_high) & (max_iou >= self.neg_iou_thresh_low))[0].cpu().numpy() neg_roi_per_this_image = self.n_sample - pos_roi_per_this_image neg_roi_per_this_image = int(min(neg_roi_per_this_image, neg_index.size)) if neg_index.size > 0: @@ -189,19 +326,92 @@ def __call__(self, roi, bbox, label, loc_normalize_std=(0.1, 0.1, 0.2, 0.2)): # gt_roi_loc [n_sample, 4] # gt_roi_label [n_sample, ] #---------------------------------------------------------# - keep_index = np.append(pos_index, neg_index) + keep_index = torch.from_numpy(np.append(pos_index, neg_index)) sample_roi = roi[keep_index] if len(bbox)==0: - return sample_roi, np.zeros_like(sample_roi), gt_roi_label[keep_index] + return sample_roi, torch.zeros_like(sample_roi), gt_roi_label[keep_index] gt_roi_loc = bbox2loc(sample_roi, bbox[gt_assignment[keep_index]]) + gt_roi_loc = (gt_roi_loc / torch.tensor(loc_normalize_std).cuda()) + + gt_roi_label = gt_roi_label[keep_index] + gt_roi_label[pos_roi_per_this_image:] = 0 + return sample_roi, gt_roi_loc, gt_roi_label + + +class ProposalTargetCreatornp(object): + def __init__(self, n_sample=128, pos_ratio=0.5, pos_iou_thresh=0.5, neg_iou_thresh_high=0.5, neg_iou_thresh_low=0): + self.n_sample = n_sample + self.pos_ratio = pos_ratio + self.pos_roi_per_image = np.round(self.n_sample * self.pos_ratio) + self.pos_iou_thresh = pos_iou_thresh + self.neg_iou_thresh_high = neg_iou_thresh_high + self.neg_iou_thresh_low = neg_iou_thresh_low + + def __call__(self, roi, bbox, label, loc_normalize_std=(0.1, 0.1, 0.2, 0.2)): + roi = np.concatenate((roi.detach().cpu().numpy(), bbox), axis=0) + # ----------------------------------------------------- # + # 计算建议框和真实框的重合程度 + # ----------------------------------------------------- # + iou = bbox_iounp(roi, bbox) + + if len(bbox) == 0: + gt_assignment = np.zeros(len(roi), np.int32) + max_iou = np.zeros(len(roi)) + gt_roi_label = np.zeros(len(roi)) + else: + # ---------------------------------------------------------# + # 获得每一个建议框最对应的真实框 [num_roi, ] + # ---------------------------------------------------------# + gt_assignment = iou.argmax(axis=1) + # ---------------------------------------------------------# + # 获得每一个建议框最对应的真实框的iou [num_roi, ] + # ---------------------------------------------------------# + max_iou = iou.max(axis=1) + # ---------------------------------------------------------# + # 真实框的标签要+1因为有背景的存在 + # ---------------------------------------------------------# + gt_roi_label = label[gt_assignment] + 1 + + # ----------------------------------------------------------------# + # 满足建议框和真实框重合程度大于neg_iou_thresh_high的作为负样本 + # 将正样本的数量限制在self.pos_roi_per_image以内 + # ----------------------------------------------------------------# + pos_index = np.where(max_iou >= self.pos_iou_thresh)[0] + pos_roi_per_this_image = int(min(self.pos_roi_per_image, pos_index.size)) + if pos_index.size > 0: + pos_index = np.random.choice(pos_index, size=pos_roi_per_this_image, replace=False) + + # -----------------------------------------------------------------------------------------------------# + # 满足建议框和真实框重合程度小于neg_iou_thresh_high大于neg_iou_thresh_low作为负样本 + # 将正样本的数量和负样本的数量的总和固定成self.n_sample + # -----------------------------------------------------------------------------------------------------# + neg_index = np.where((max_iou < self.neg_iou_thresh_high) & (max_iou >= self.neg_iou_thresh_low))[0] + neg_roi_per_this_image = self.n_sample - pos_roi_per_this_image + neg_roi_per_this_image = int(min(neg_roi_per_this_image, neg_index.size)) + if neg_index.size > 0: + neg_index = np.random.choice(neg_index, size=neg_roi_per_this_image, replace=False) + + # ---------------------------------------------------------# + # sample_roi [n_sample, ] + # gt_roi_loc [n_sample, 4] + # gt_roi_label [n_sample, ] + # ---------------------------------------------------------# + keep_index = np.append(pos_index, neg_index) + + sample_roi = roi[keep_index] + if len(bbox) == 0: + return sample_roi, np.zeros_like(sample_roi), gt_roi_label[keep_index] + + gt_roi_loc = bbox2locnp(sample_roi, bbox[gt_assignment[keep_index]]) gt_roi_loc = (gt_roi_loc / np.array(loc_normalize_std, np.float32)) gt_roi_label = gt_roi_label[keep_index] gt_roi_label[pos_roi_per_this_image:] = 0 return sample_roi, gt_roi_loc, gt_roi_label + class FasterRCNNTrainer(nn.Module): def __init__(self, model_train, optimizer): super(FasterRCNNTrainer, self).__init__() @@ -211,9 +421,15 @@ def __init__(self, model_train, optimizer): self.rpn_sigma = 1 self.roi_sigma = 1 + #numpy + # self.anchor_target_creatornp = AnchorTargetCreatornp() + # self.proposal_target_creatornp = ProposalTargetCreatornp() + + #pytorch self.anchor_target_creator = AnchorTargetCreator() self.proposal_target_creator = ProposalTargetCreator() + self.loc_normalize_std = [0.1, 0.1, 0.2, 0.2] def _fast_rcnn_loc_loss(self, pred_loc, gt_loc, gt_label, sigma): @@ -241,12 +457,10 @@ def forward(self, imgs, bboxes, labels, scale): # 获取公用特征层 #-------------------------------# base_feature = self.model_train(imgs, mode = 'extractor') - # -------------------------------------------------- # # 利用rpn网络获得调整参数、得分、建议框、先验框 # -------------------------------------------------- # rpn_locs, rpn_scores, rois, roi_indices, anchor = self.model_train(x = [base_feature, img_size], scale = scale, mode = 'rpn') - rpn_loc_loss_all, rpn_cls_loss_all, roi_loc_loss_all, roi_cls_loss_all = 0, 0, 0, 0 sample_rois, sample_indexes, gt_roi_locs, gt_roi_labels = [], [], [], [] for i in range(n): @@ -261,9 +475,19 @@ def forward(self, imgs, bboxes, labels, scale): # gt_rpn_loc [num_anchors, 4] # gt_rpn_label [num_anchors, ] # -------------------------------------------------- # - gt_rpn_loc, gt_rpn_label = self.anchor_target_creator(bbox, anchor[0].cpu().numpy()) - gt_rpn_loc = torch.Tensor(gt_rpn_loc).type_as(rpn_locs) - gt_rpn_label = torch.Tensor(gt_rpn_label).type_as(rpn_locs).long() + + #numpy + # gt_rpn_loc, gt_rpn_label = self.anchor_target_creatornp(bbox, anchor[0].cpu().numpy()) + # gt_rpn_loc = torch.Tensor(gt_rpn_loc).type_as(rpn_locs) + # gt_rpn_label = torch.Tensor(gt_rpn_label).type_as(rpn_locs).long() + + #pytorch + bbox = torch.from_numpy(bbox).cuda() + label = torch.from_numpy(label).cuda() + gt_rpn_loc, gt_rpn_label = self.anchor_target_creator(bbox, anchor[0]) + gt_rpn_loc = gt_rpn_loc.type_as(rpn_locs) + gt_rpn_label = gt_rpn_label.type_as(rpn_locs).long() + # -------------------------------------------------- # # 分别计算建议框网络的回归损失和分类损失 # -------------------------------------------------- # @@ -279,15 +503,24 @@ def forward(self, imgs, bboxes, labels, scale): # gt_roi_loc [n_sample, 4] # gt_roi_label [n_sample, ] # ------------------------------------------------------ # + #numpy + # sample_roi, gt_roi_loc, gt_roi_label = self.proposal_target_creatornp(roi, bbox, label, self.loc_normalize_std) + # sample_rois.append(torch.Tensor(sample_roi).type_as(rpn_locs)) + # sample_indexes.append(torch.ones(len(sample_roi)).type_as(rpn_locs) * roi_indices[i][0]) + # gt_roi_locs.append(torch.Tensor(gt_roi_loc).type_as(rpn_locs)) + # gt_roi_labels.append(torch.Tensor(gt_roi_label).type_as(rpn_locs).long()) + + #pytorch sample_roi, gt_roi_loc, gt_roi_label = self.proposal_target_creator(roi, bbox, label, self.loc_normalize_std) - sample_rois.append(torch.Tensor(sample_roi).type_as(rpn_locs)) + sample_rois.append(sample_roi.type_as(rpn_locs)) sample_indexes.append(torch.ones(len(sample_roi)).type_as(rpn_locs) * roi_indices[i][0]) - gt_roi_locs.append(torch.Tensor(gt_roi_loc).type_as(rpn_locs)) - gt_roi_labels.append(torch.Tensor(gt_roi_label).type_as(rpn_locs).long()) + gt_roi_locs.append(gt_roi_loc.type_as(rpn_locs)) + gt_roi_labels.append(gt_roi_label.type_as(rpn_locs).long()) sample_rois = torch.stack(sample_rois, dim=0) sample_indexes = torch.stack(sample_indexes, dim=0) roi_cls_locs, roi_scores = self.model_train([base_feature, sample_rois, sample_indexes, img_size], mode = 'head') + for i in range(n): # ------------------------------------------------------ # # 根据建议框的种类,取出对应的回归预测结果