Skip to content

Commit

Permalink
add code and files
Browse files Browse the repository at this point in the history
  • Loading branch information
qixuxiang committed Mar 20, 2019
1 parent c6f9532 commit 20544e5
Show file tree
Hide file tree
Showing 15 changed files with 2,108 additions and 0 deletions.
1 change: 1 addition & 0 deletions data/ApolloDatas/test/readme.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
place all test data and unzip them here
1 change: 1 addition & 0 deletions data/ApolloDatas/train/readme.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
place all train data and unzip them here
78 changes: 78 additions & 0 deletions data_augmentor.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
#-*- coding:utf-8 -*-
import numpy as np
import cv2
import math
from math import fabs,sin,cos,radians
import random
from random import choice
'''
data agumentor,incude Flip,Rotate,Scale and Translation
can replace the code with Augmentor
source code and docs: https://github.com/mdbloice/Augmentor
'''
path = "/media/airobot/docs/BaiduDatas/apolloscape/apolloscape/train/"
image = "image/170927_063811892_Camera_5.jpg"
label = "label/170927_063811892_Camera_5_bin.png"
flipCode = [1,1]

class DataAugmentor:
def __init__(self):
pass
def random_flip(self, img, code):
return cv2.flip(img, code)

def random_rotation(self, img, degree):
height,width = img.shape[:2]
heightNew = int(width*fabs(sin(radians(degree)))+height*fabs(cos(radians(degree))))
widthNew = int(height*fabs(sin(radians(degree)))+width*fabs(cos(radians(degree))))
matRotation = cv2.getRotationMatrix2D((width/2,height/2),degree,1)
matRotation[0,2] +=(widthNew-width)/2
matRotation[1,2] +=(heightNew-height)/2
imgRotation = cv2.warpAffine(img,matRotation,(widthNew,heightNew))
return imgRotation

def rotate(self,image, angle, center=None, scale=1.0):
(h, w) = image.shape[:2]
if center is None:
center = (w / 2, h / 2)
M = cv2.getRotationMatrix2D(center, angle, scale)
rotated = cv2.warpAffine(image, M, (w, h),flags=cv2.INTER_NEAREST)
return rotated

def tfactor(self,img):
hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV);#增加饱和度光照的噪声
hsv[:,:,0] = hsv[:,:,0]*(0.8+ np.random.random()*0.2)
hsv[:,:,1] = hsv[:,:,1]*(0.6+ np.random.random()*0.4)
hsv[:,:,2] = hsv[:,:,2]*(0.4+ np.random.random()*0.6)
img = cv2.cvtColor(hsv,cv2.COLOR_HSV2BGR)
return img

def disturb(self, image, label):
flip_code = choice(flipCode)
rotate_degree = random.uniform(160,200)
scale = random.uniform(1.0,2.0)
image = self.random_flip(image, flip_code)
label = self.random_flip(label, flip_code)
#image = self.rotate(image, rotate_degree,scale=scale)
#label = self.rotate(label, rotate_degree,scale=scale)
#image = self.tfactor(image)
return image, label

if __name__ == '__main__':
img = cv2.imread(path+image)
img_label = cv2.imread(path+label)
img = cv2.resize(img, (1024, 512), interpolation=cv2.INTER_CUBIC)
img_label = cv2.resize(img_label, (1024, 512), interpolation=cv2.INTER_NEAREST)

cv2.imshow("origin image",img)
cv2.imshow("origin label image", img_label)
augmentor = DataAugmentor()
image,label = augmentor.disturb(img, img_label)

cv2.imshow("image",image)
cv2.imshow("label image", label)

#cv2.imwrite("/home/airobot/1.jpg", image)
#cv2.imwrite("/home/airobot/2.png", label)
cv2.waitKey(0)
cv2.destroyAllWindows()
Binary file added demo.gif
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
137 changes: 137 additions & 0 deletions models/PAN.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,137 @@
#-*- coding:utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle
import paddle.fluid as fluid
from utils import *
import utils
import contextlib
import os
import math
os.environ["CUDA_VISIBLE_DEVICES"] = "0"

class pannet(object):
def __init__(self, rows=512, cols=512):
self.rows = rows
self.cols = cols
def Inception_dilation(self, inputs, channels):
conv3 = conv_bn_layer(input=inputs, num_filters=channels,
filter_size=3, stride=1, dilation=1, act='relu')
print("conv3.shape----------",conv3.shape)

conv5 = conv_bn_layer(input=inputs, num_filters=channels,
filter_size=3, stride=1, dilation=2, act='relu')
print("conv5.shape----------",conv5.shape)
conv7 = conv_bn_layer(input=inputs, num_filters=channels,
filter_size=3, stride=1, dilation=4, act='relu')
print("conv7.shape----------",conv7.shape)
conv9 = conv_bn_layer(input=inputs, num_filters=channels,
filter_size=3, stride=1, dilation=6, act='relu')
print("conv9.shape----------",conv9.shape)

merge2 = fluid.layers.concat([conv3, conv5, conv7, conv9], axis = 1)
return merge2

def FeaturePyramidAttention(self, inputs, channels):
conv1 = conv_bn_layer(input=inputs, num_filters=channels,
filter_size=1, stride=1,dilation=1, act='relu')

conv7 = conv_bn_layer(input=inputs, num_filters=channels,
filter_size=3, stride=1, dilation=4, act='relu')
print("before pooling,",conv7.shape)

pool1 = fluid.layers.pool2d(conv7, pool_size=4, pool_type='max',
pool_stride=4)
print("after pooling,",pool1.shape)
conv5 = conv_bn_layer(input=pool1, num_filters=channels,
filter_size=3, stride=1, dilation=3, act='relu')
pool2 = fluid.layers.pool2d(conv5, pool_size=4, pool_type='max',
pool_stride=4)

conv3 = conv_bn_layer(input=pool2, num_filters=channels,
filter_size=3, stride=1, dilation=2, act='relu')
pool3 = fluid.layers.pool2d(conv3, pool_size=4, pool_type='max',
pool_stride=4)
conv2 = conv_bn_layer(input=pool3, num_filters=channels,
filter_size=3, stride=1, dilation=1, act='relu')

up1 = fluid.layers.resize_bilinear(input = conv2,scale = 4)
up1 = conv_bn_layer(input=up1, num_filters=channels,filter_size=1,
stride=1,dilation=1, act='relu')
up1 = fluid.layers.concat([up1, conv3], axis = 1)

up2 = fluid.layers.resize_bilinear(input = up1,scale = 4)
up2 = conv_bn_layer(input=up2, num_filters=channels,filter_size=1,
stride=1,dilation=1, act='relu')
up2 = fluid.layers.concat([up2, conv5], axis = 1)

up3 = fluid.layers.resize_bilinear(input = up2,scale = 4)
up3 = conv_bn_layer(input=up3, num_filters=channels,filter_size=1,
stride=1,dilation=1, act='relu')
up3 = fluid.layers.concat([up3, conv7], axis = 1)
out = fluid.layers.concat([up3, conv1], axis = 1)
return out

def GlobalAttentionUpsample(self, inputs_low, inputs_high, channels):
#inputs_low:低层次信息输入
#inputs_high:高层次信息输入
print('inputs_high.shape---------',inputs_high.shape)
conv3 = conv_bn_layer(input=inputs_low, num_filters=3*channels,
filter_size=3, stride=1,dilation=1, act='relu')
gap = fluid.layers.pool2d(inputs_high,pool_type='avg',global_pooling=True)

print('gap.shape------------', gap.shape)
h = conv3.shape[2]
w = conv3.shape[3]
gap = fluid.layers.resize_bilinear(input = gap,out_shape = [h,w] )

conv1conv3 = fluid.layers.elementwise_mul(gap, conv3)
'''
conv1 = conv_bn_layer(input=gap, num_filters=3*channels,
filter_size=1, stride=1,dilation=1, act='relu')
print("conv1.shape---------",conv1.shape)
'''
#out = fluid.layers.sequence_concat(input=[conv1conv3, inputs_high])
out = fluid.layers.concat([conv1conv3, inputs_high], axis = 1)

return out

def model(self, inputs):

conv1 = self.Inception_dilation(inputs, 4)
res1 = fluid.layers.concat([inputs, conv1], axis = 1)
conv2 = self.Inception_dilation(res1, 4)
conv2 = self.Inception_dilation(conv2, 4)
res2 = fluid.layers.concat([res1, conv2], axis = 1)
conv3 = self.Inception_dilation(res2, 4)
conv3 = self.Inception_dilation(conv3, 4)
res3 = fluid.layers.concat([res2, conv3], axis = 1)
conv4 = self.Inception_dilation(res3, 4)
conv4 = self.Inception_dilation(conv4, 4)

FPA = self.FeaturePyramidAttention(conv4, 4)
print('FPA.shape', FPA.shape)
print('conv3.shape', conv3.shape)
GAU1 = self.GlobalAttentionUpsample(conv3, FPA, 4)
GF1 = fluid.layers.concat([FPA, GAU1], axis = 1)

GAU2 = self.GlobalAttentionUpsample(conv2, GF1, 12)
GF2 = fluid.layers.concat([GF1, GAU2], axis = 1)

GAU3 = self.GlobalAttentionUpsample(conv1, GF2, 36)
GF3 = fluid.layers.concat([GF2, GAU3], axis = 1)

conv8 = conv_bn_layer(input=GF3, num_filters=12,filter_size=1,
stride=1,dilation=1, act='relu')
print("conv8 shape:", conv8.shape)

conv9 = conv_bn_layer(input=conv8, num_filters=9,filter_size=1,
stride=1,dilation=1, act='relu')

conv9 = fluid.layers.transpose(x=conv9, perm=[0, 2, 3, 1])
conv9 = fluid.layers.reshape(conv9, shape=[-1, 9])
modelOut = fluid.layers.softmax(conv9)
print('modelOut.shape == ',modelOut.shape)

return modelOut
Loading

0 comments on commit 20544e5

Please sign in to comment.