基于深度学习的无人驾驶道路检测
作者:互联网
最近在自学深度学习,网上有很多计算机视觉比赛和资源,比如kaggle,天池 ,百度飞浆,paddle现在做得越来越好,于是我就选择了百度飞浆,支持国产开源框架,也自己跑通了代码,以此记录一下学习过程,若有纰漏,恳请各位大佬多多指点。
目录
一、配置好 paddle的GPU环境
参考以下链接安装好环境
温馨提醒:
- 最好用conda安装,由于本人当时安装的环境是win7+cuda 9.0的环境,安装过程也遇到不少坑,当时安装过程有些截图没及时保存,所以大家自行查找一下,网上有很多教程,问题不大。
- 配置cuda+cudnn,根据自己电脑显卡的配置信息来安装,一般情况下电脑是cuda9.2,去英伟达官网安装cuda 9.0+对应的cudnn版本都可以兼容,以此类推
- 如果下载英伟达驱动安装包,网速很慢,建议不要用无线,用有线下载,之前被坑了一次
二、从官网下载好数据集图片
数据集分为训练集 +测试集,大概有5万多张图片 ,文件还是蛮大哦!
三、下载好源码,并把项目导入到 Pycharm
代码参考以下链接:百度无人车车道线检测源码,里面有仔细的说明与代码结构图,但是接下来的补充是我运行过程的一些理解
1.process_labels.py文件
这个标签图是把图片中不同物体类别进行分类 ,ignoreInEval的含义是否可以忽略,True置为0(归为背景),False置为1,原本是0-8个属性 ID(catid )一共9类,仔细分析catid=4时,ignoreInEval=True,所以这个属性可以忽略,所以就剩下8个类别。
这个程序是涉及到one-hot编码,具体的原理可以自行查一下。本程序其实可以弄一个数组+一个for循环赋值,这样可以简洁一些,当然这些是每个人编写程序的风格不一样而已。
"""
Description: Encode & Decode Labels for Lane Segmentation Competition
NOTE: For 8 classes
"""
import numpy as np
def encode_labels(color_mask):
encode_mask = np.zeros((color_mask.shape[0], color_mask.shape[1]))
# 0
encode_mask[color_mask == 0] = 0
encode_mask[color_mask == 249] = 0
encode_mask[color_mask == 255] = 0
# 1
encode_mask[color_mask == 200] = 1
encode_mask[color_mask == 204] = 1
encode_mask[color_mask == 213] = 0
encode_mask[color_mask == 209] = 1
encode_mask[color_mask == 206] = 0
encode_mask[color_mask == 207] = 0
# 2
encode_mask[color_mask == 201] = 2
encode_mask[color_mask == 203] = 2
encode_mask[color_mask == 211] = 0
encode_mask[color_mask == 208] = 0
# 3
encode_mask[color_mask == 216] = 0
encode_mask[color_mask == 217] = 3
encode_mask[color_mask == 215] = 0
# 4 In the test, it will be ignored
encode_mask[color_mask == 218] = 0
encode_mask[color_mask == 219] = 0
# 4
encode_mask[color_mask == 210] = 4
encode_mask[color_mask == 232] = 0
# 5
encode_mask[color_mask == 214] = 5
# 6
encode_mask[color_mask == 202] = 0
encode_mask[color_mask == 220] = 6
encode_mask[color_mask == 221] = 6
encode_mask[color_mask == 222] = 6
encode_mask[color_mask == 231] = 0
encode_mask[color_mask == 224] = 6
encode_mask[color_mask == 225] = 6
encode_mask[color_mask == 226] = 6
encode_mask[color_mask == 230] = 0
encode_mask[color_mask == 228] = 0
encode_mask[color_mask == 229] = 0
encode_mask[color_mask == 233] = 0
# 7
encode_mask[color_mask == 205] = 7
encode_mask[color_mask == 212] = 0
encode_mask[color_mask == 227] = 7
encode_mask[color_mask == 223] = 0
encode_mask[color_mask == 250] = 7
return encode_mask
#看代码要独立思考,注意跳坑(id<---对应--->trainid )
def decode_labels(labels):
deocde_mask = np.zeros((labels.shape[0], labels.shape[1]), dtype='uint8')
# 0
deocde_mask[labels == 0] = 0
# 1
deocde_mask[labels == 1] = 204
# 2
deocde_mask[labels == 2] = 203
# 3
deocde_mask[labels == 3] = 217
# 4
deocde_mask[labels == 4] = 210
# 5
deocde_mask[labels == 5] = 214
# 6
deocde_mask[labels == 6] = 224
# 7
deocde_mask[labels == 7] = 227
return deocde_mask
def decode_color_labels(labels):
decode_mask = np.zeros((3, labels.shape[0], labels.shape[1]), dtype='uint8')
# 0
decode_mask[0][labels == 0] = 0
decode_mask[1][labels == 0] = 0
decode_mask[2][labels == 0] = 0
# 1
decode_mask[0][labels == 1] = 70
decode_mask[1][labels == 1] = 130
decode_mask[2][labels == 1] = 180
# 2
decode_mask[0][labels == 2] = 0
decode_mask[1][labels == 2] = 0
decode_mask[2][labels == 2] = 142
# 3
decode_mask[0][labels == 3] = 153
decode_mask[1][labels == 3] = 153
decode_mask[2][labels == 3] = 153
# 4
decode_mask[0][labels == 4] = 128
decode_mask[1][labels == 4] = 64
decode_mask[2][labels == 4] = 128
# 5
decode_mask[0][labels == 5] = 190
decode_mask[1][labels == 5] = 153
decode_mask[2][labels == 5] = 153
# 6
decode_mask[0][labels == 6] = 0
decode_mask[1][labels == 6] = 0
decode_mask[2][labels == 6] = 230
# 7
decode_mask[0][labels == 7] = 255
decode_mask[1][labels == 7] = 128
decode_mask[2][labels == 7] = 0
return decode_mask
#统计Label的类别
def verify_labels(labels):
pixels = [0]
for x in range(labels.shape[0]):
for y in range(labels.shape[1]):
pixel = labels[x, y]
if pixel not in pixels:
pixels.append(pixel)
print('The Labels Has Value:', pixels)
2.make_lists.py和data_feeder文件
我在代码文件添加了一些注释,辅助理解
# Description: Make Data Lists for Lane Segmentation Competition(csv文件只保存图像文件位置和对应关系)
import os
import pandas as pd #csv文件的创建和读取
from sklearn.utils import shuffle #打散数据
import shutil #文件复制删除等操作
import numpy as np
import cv2 #建议没有必要使用cv2,例如PIL
#怎样引入本地文件?模块名严格对应路径和文件名(不要py后缀)
from utils.process_labels import encode_labels, decode_color_labels
#================================================
# make train & validation lists
#================================================
label_list = []
image_list = []
#根据自己的路径修改,用绝对路径
#制作train.csv
# image_dir = 'E:/PaddleXinstallation/data/chusai_data/Image_Data/'
# label_dir = 'E:/PaddleXinstallation/data/chusai_data/Gray_Label/'
#制作val.csv
image_dir = 'E:/PaddleXinstallation/data/chusai_data/Val_Data/'
label_dir = 'E:/PaddleXinstallation/data/chusai_data/Gray_Label/'
# image_dir = '/home/gujingxiao/projects/PaddlePaddle/Image_Data/'
# label_dir = '/home/gujingxiao/projects/PaddlePaddle/Gray_Label/'
#同步遍历image和label两个目录,建立对应关系
# os.path.join(path1[,path2[,......]]) 返回值:将多个路径组合后返回
#os.listdir()描述:返回指定路径下的文件和文件夹列表
# lower() 返回将字符串中所有大写字符转换为小写后生成的字符串
for s1 in os.listdir(image_dir):
image_sub_dir1 = os.path.join(image_dir, s1)
label_sub_dir1 = os.path.join(label_dir, 'Label_' + str.lower(s1), 'Label')
# print(image_sub_dir1, label_sub_dir1)
for s2 in os.listdir(image_sub_dir1):
image_sub_dir2 = os.path.join(image_sub_dir1, s2)
label_sub_dir2 = os.path.join(label_sub_dir1, s2)
# print(image_sub_dir2, label_sub_dir2)
for s3 in os.listdir(image_sub_dir2):
image_sub_dir3 = os.path.join(image_sub_dir2, s3)
label_sub_dir3 = os.path.join(label_sub_dir2, s3)
# print(image_sub_dir3, label_sub_dir3)
for s4 in os.listdir(image_sub_dir3):
s44 = s4.replace('.jpg','_bin.png')#生成label文件名
image_sub_dir4 = os.path.join(image_sub_dir3, s4)
label_sub_dir4 = os.path.join(label_sub_dir3, s44)
if not os.path.exists(image_sub_dir4):
print(image_sub_dir4)
if not os.path.exists(label_sub_dir4):
print(label_sub_dir4)
# print(image_sub_dir4, label_sub_dir4)
image_list.append(image_sub_dir4)
label_list.append(label_sub_dir4)
print(len(image_list), len(label_list))
save = pd.DataFrame({'image':image_list, 'label':label_list}) #生成CSV文件格式
save_shuffle = shuffle(save) #打散数据
#制作train.csv
#save_shuffle.to_csv('E:/PaddleXinstallation/data/Lane-Segmentation-Solution-For-BaiduAI-Autonomous-Driving-Competition-master/data_list/train.csv', index=False)
#制作val.csv
#save_shuffle.to_csv('../data_list/train.csv', index=False)
save_shuffle.to_csv('E:/PaddleXinstallation/data/Lane-Segmentation-Solution-For-BaiduAI-Autonomous-Driving-Competition-master/data_list/val.csv', index=False)
# Description: Data Generator for Lane Segmentation Competition
import os
import cv2
import numpy as np
from paddle.fluid import core
from utils.process_labels import encode_labels, verify_labels
from utils.image_process import crop_resize_data, crop_val_resize_data
# Feed Data into Tensor
def get_feeder_data(data, place, for_test=False):
feed_dict = {}
image_t = core.LoDTensor()
image_t.set(data[0], place)
feed_dict["image"] = image_t
# if not test, feed label also
# Otherwise, only feed image
if not for_test:
labels_t = core.LoDTensor()
labels_t.set(data[1], place)
feed_dict["label"] = labels_t
return feed_dict
# Train Images Generator
def train_image_gen(train_list, batch_size=4, image_size=[1024, 384], crop_offset=690):
# Arrange all indexes
all_batches_index = np.arange(0, len(train_list))
out_images = []
out_masks = []
image_dir = np.array(train_list['image'])
label_dir = np.array(train_list['label'])
while True:
# Random shuffle indexes every epoch
np.random.shuffle(all_batches_index) #数据打散
for index in all_batches_index: #动态生成数据,读取数据
if os.path.exists(image_dir[index]):
ori_image = cv2.imread(image_dir[index])
ori_mask = cv2.imread(label_dir[index], cv2.IMREAD_GRAYSCALE)
# Crop the top part of the image
# Resize to train size
train_img, train_mask = crop_resize_data(ori_image, ori_mask, image_size, crop_offset)
# Encode
train_mask = encode_labels(train_mask)
# verify_labels(train_mask)
out_images.append(train_img)
out_masks.append(train_mask)
if len(out_images) >= batch_size:
out_images = np.array(out_images)
out_masks = np.array(out_masks)
out_images = out_images[:, :, :, ::-1].transpose(0, 3, 1, 2).astype(np.float32) / (255.0 / 2) - 1
out_masks = out_masks.astype(np.int64)
yield out_images, out_masks
out_images, out_masks = [], []
else:
print(image_dir, 'does not exist.')
# Validation Images Generator
def val_image_gen(val_list, batch_size=4, image_size=[1024, 384], crop_offset=690):
all_batches_index = np.arange(0, len(val_list))
out_images = []
out_masks = []
image_dir = np.array(val_list['image'])
label_dir = np.array(val_list['label'])
while True:
np.random.shuffle(all_batches_index)
for index in all_batches_index:
if os.path.exists(image_dir[index]):
ori_image = cv2.imread(image_dir[index])
ori_mask = cv2.imread(label_dir[index], cv2.IMREAD_GRAYSCALE)
val_img, val_mask = crop_val_resize_data(ori_image, ori_mask, image_size, crop_offset)
val_mask = encode_labels(val_mask)
out_images.append(val_img)
out_masks.append(val_mask)
if len(out_images) >= batch_size:
out_images = np.array(out_images)
out_masks = np.array(out_masks)
out_images = out_images[:, :, :, ::-1].transpose(0, 3, 1, 2).astype(np.float32) / (255.0 / 2) - 1
out_masks = out_masks.astype(np.int64)
yield out_images, out_masks
out_images, out_masks = [], []
else:
print(image_dir, 'does not exist.')
make_lists.py文件的一些提醒:
3.val_inference.py文件
# Description: Val & Inference Code for Lane Segmentation Competition
import cv2
import sys
import os
import time
import paddle
import pandas as pd
import numpy as np
import paddle.fluid as fluid
from utils.process_labels import decode_color_labels
from utils.image_process import crop_resize_data, expand_resize_data
from utils.data_feeder import get_feeder_data, val_image_gen
from models.unet_base import unet_base
from models.unet_simple import unet_simple
from models.deeplabv3p import deeplabv3p
paddle.enable_static()
def mean_iou(pred, label, num_classes):
pred = fluid.layers.argmax(pred, axis=1)
pred = fluid.layers.cast(pred, 'int32')
label = fluid.layers.cast(label, 'int32')
miou, wrong, correct = fluid.layers.mean_iou(pred, label, num_classes)
return miou
no_grad_set = []
def create_loss(predict, label, num_classes):
predict = fluid.layers.transpose(predict, perm=[0, 2, 3, 1])
predict = fluid.layers.reshape(predict, shape=[-1, num_classes])
predict = fluid.layers.softmax(predict)
label = fluid.layers.reshape(label, shape=[-1, 1])
bce_loss = fluid.layers.cross_entropy(predict, label)
no_grad_set.append(label.name)
loss = bce_loss
miou = mean_iou(predict, label, num_classes)
return fluid.layers.reduce_mean(loss), miou
def create_network(train_image, train_label, classes, network='unet_simple', image_size=(1024, 384), for_test=False):
if network == 'unet_base':
predict = unet_base(train_image, classes, image_size)
elif network == 'unet_simple':
predict = unet_simple(train_image, classes, image_size)
elif network == 'deeplabv3p':
predict = deeplabv3p(train_image, classes)
else:
raise Exception('Not support this model:', network)
print('The program will run', network)
if for_test == False:
loss, miou = create_loss(predict, train_label, classes)
return loss, miou, predict
elif for_test == True:
return predict
else:
raise Exception('Wrong Status:', for_test)
# The main method
def main():
IMG_SIZE =[1536, 512]
SUBMISSION_SIZE = [3384, 1710]
#当False时,仅保存单模型预测结果的png;如果是True,将会保存单模型预测结果的npy文件,用于最终ensemble使用
#save_test_logits = False
save_test_logits = True
num_classes = 8
batch_size = 4
log_iters = 100
#network = 'unet_simple'
network ='deeplabv3p'
# Define paths for each model
if network == 'deeplabv3p':
#model_path = "./model_weights/paddle_deeplabv3p_8_end_060223"
#npy_dir = '/npy_save/deeplabv3p/'
model_path = "E:/PaddleXinstallation/data/Lane-Segmentation-Solution-For-BaiduAI-Autonomous-Driving-Competition-master/model_weights/paddle_deeplabv3p_8_end_060223"
npy_dir = 'E:/PaddleXinstallation/data/Lane-Segmentation-Solution-For-BaiduAI-Autonomous-Driving-Competition-master/npy_save/deeplabv3p/'
elif network == 'unet_base':
# model_path = "./model_weights/paddle_unet_base_10_end_059909"
# npy_dir = '/npy_save/unet_base/'
model_path = "E:/PaddleXinstallation/data/Lane-Segmentation-Solution-For-BaiduAI-Autonomous-Driving-Competition-master/model_weights/paddle_unet_base_0_2000"
npy_dir = 'E:/PaddleXinstallation/data/Lane-Segmentation-Solution-For-BaiduAI-Autonomous-Driving-Competition-master/npy_save/unet_base/'
elif network == 'unet_simple':
# model_path = "./model_weights/paddle_unet_simple_12_end_060577"
# npy_dir = '/npy_save/unet_simple/'
model_path = "./model_weights/paddle_unet_simple_12_end_060577"
npy_dir = 'E:/PaddleXinstallation/data/Lane-Segmentation-Solution-For-BaiduAI-Autonomous-Driving-Competition-master/npy_save/unet_simple/'
program_choice = 2 # 1 - Validtion; 2 - Test
show_label = False
crop_offset = 690
data_dir = 'E:/PaddleXinstallation/data/Lane-Segmentation-Solution-For-BaiduAI-Autonomous-Driving-Competition-master/data_list/val.csv'
#data_dir = './data_list/val.csv'
test_dir = 'E:/PaddleXinstallation/data/chusai_data/TestSet/ColorImage/'
#test_dir = '../PaddlePaddle/TestSet_Final/ColorImage/'
sub_dir = 'E:/PaddleXinstallation/data/Lane-Segmentation-Solution-For-BaiduAI-Autonomous-Driving-Competition-master/test_submission/'
# Get data list and split it into train and validation set.
val_list = pd.read_csv(data_dir)
#Initialization
images = fluid.layers.data(name='image', shape=[3, IMG_SIZE[1], IMG_SIZE[0]], dtype='float32')
labels = fluid.layers.data(name='label', shape=[1, IMG_SIZE[1], IMG_SIZE[0]], dtype='float32')
iter_id = 0
total_loss = 0.0
total_miou = 0.0
prev_time = time.time()
# Validation
if program_choice == 1:
val_reader = val_image_gen(val_list, batch_size=batch_size, image_size=IMG_SIZE, crop_offset=crop_offset)
reduced_loss, miou, pred = create_network(images, labels, num_classes, network=network, image_size=(IMG_SIZE[1], IMG_SIZE[0]), for_test=False)
place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
fluid.io.load_params(exe, model_path)
print("loaded model from: %s" % model_path)
# Parallel Executor to use multi-GPUs
exec_strategy = fluid.ExecutionStrategy()
exec_strategy.allow_op_delay = True
build_strategy = fluid.BuildStrategy()
build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce
train_exe = fluid.ParallelExecutor(use_cuda=True, build_strategy=build_strategy, exec_strategy=exec_strategy)
print('Start Validation!')
for iteration in range(int(len(val_list) / batch_size)):
val_data = next(val_reader)
results = train_exe.run(
feed=get_feeder_data(val_data, place),
fetch_list=[reduced_loss.name, miou.name, pred.name])
if iter_id % log_iters == 0:
print('Finished Processing %d Images.' %(iter_id * batch_size))
iter_id += 1
total_loss += np.mean(results[0])
total_miou += np.mean(results[1])
# label to mask
if show_label == True:
label_image = val_data[1][0]
color_label_mask = decode_color_labels(label_image)
color_label_mask = np.transpose(color_label_mask, (1, 2, 0))
cv2.imshow('gt_label', cv2.resize(color_label_mask, (IMG_SIZE[0], IMG_SIZE[1])))
prediction = np.argmax(results[2][0], axis=0)
color_pred_mask = decode_color_labels(prediction)
color_pred_mask = np.transpose(color_pred_mask, (1, 2, 0))
cv2.imshow('pred_label', cv2.resize(color_pred_mask, (IMG_SIZE[0], IMG_SIZE[1])))
cv2.waitKey(0)
end_time = time.time()
print("validation loss: %.3f, mean iou: %.3f, time cost: %.3f s"
% (total_loss / iter_id, total_miou / iter_id, end_time - prev_time))
# Test
elif program_choice == 2:
predictions = create_network(images, labels, num_classes, network=network, image_size=(IMG_SIZE[1], IMG_SIZE[0]), for_test=True)
place = fluid.CUDAPlace(0)
# place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
fluid.io.load_params(exe, model_path)
print("loaded model from: %s" % model_path)
print('Start Making Submissions!')
test_list = os.listdir(test_dir)
for test_name in test_list:
test_ori_image = cv2.imread(os.path.join(test_dir, test_name))
test_image = crop_resize_data(test_ori_image, label=None, image_size=IMG_SIZE, offset=crop_offset)
out_image = np.expand_dims(np.array(test_image), axis=0)
out_image = out_image[:, :, :, ::-1].transpose(0, 3, 1, 2).astype(np.float32) / (255.0 / 2) - 1
feed_dict = {}
feed_dict["image"] = out_image
results_1 = exe.run(
feed=feed_dict,
fetch_list=[predictions])
if iter_id % 20 == 0:
print('Finished Processing %d Images.' %(iter_id))
iter_id += 1
prediction = np.argmax(results_1[0][0], axis=0)
# Save npy files
if save_test_logits == True:
np.save(npy_dir + test_name.replace('.jpg', '.npy'), results_1[0][0])
# Save Submission PNG
submission_mask = expand_resize_data(prediction, SUBMISSION_SIZE, crop_offset)
cv2.imwrite(os.path.join(sub_dir, test_name.replace('.jpg', '.png')), submission_mask)
# Show Label
if show_label == True:
cv2.imshow('test_image', cv2.resize(test_ori_image,(IMG_SIZE[0], IMG_SIZE[1])))
cv2.imshow('pred_label', cv2.resize(submission_mask,(IMG_SIZE[0], IMG_SIZE[1])))
cv2.waitKey(0)
sys.stdout.flush()
# Main
if __name__ == "__main__":
main()
4.train.py文件
PS:注意单卡与双卡训练的代码区别,我猜测作者运行的环境应该是在Ubuntu系统下运行的,因为作者说明了是用paddlepaddle-gpu 1.3.0.post97,就是paddle 1.3.0版本+cuda9.0+cudnn v7+双卡训练(paddle只有linux系统才支持双卡训练,Windows仅支持单卡训练)
- 当然目前paddle已经更新到2.1了,可能有些语法上的不兼容之类的,自己根据实际报错来找答案
- 此处labels的类型由float32改为int32目的是,当时是报了一个数据类型的错误,后面咨询了别人才知道修改
- #labels = fluid.layers.data(name='label', shape=[1, IMG_SIZE[1], IMG_SIZE[0]], dtype='float32')
-
# Description: Train Code for Lane Segmentation Competition
import time
import paddle
import pandas as pd
import numpy as np
import paddle.fluid as fluid
import os
from utils.data_feeder import get_feeder_data, train_image_gen
from models.unet_base import unet_base
from models.unet_simple import unet_simple
from models.deeplabv3p import deeplabv3p
paddle.enable_static()
os.environ['CUDA_VISIBLE_DEVICES'] = '1,2' #以防GPU出问题
# Compute Mean Iou
def mean_iou(pred, label, num_classes=8):
pred = fluid.layers.argmax(pred, axis=1)
pred = fluid.layers.cast(pred, 'int32')
label = fluid.layers.cast(label, 'int32')
miou, wrong, correct = fluid.layers.mean_iou(pred, label, num_classes)
return miou
# Get Loss Function
no_grad_set = []
def create_loss(predict, label, num_classes):
predict = fluid.layers.transpose(predict, perm=[0, 2, 3, 1])
predict = fluid.layers.reshape(predict, shape=[-1, num_classes])
predict = fluid.layers.softmax(predict)
label = fluid.layers.reshape(label, shape=[-1, 1])
# BCE with DICE
bce_loss = fluid.layers.cross_entropy(predict, label)
dice_loss = fluid.layers.dice_loss(predict, label)
no_grad_set.append(label.name)
loss = bce_loss + dice_loss
miou = mean_iou(predict, label, num_classes)
return fluid.layers.reduce_mean(loss), miou
def create_network(train_image, train_label, classes, network='unet_simple', image_size=(1024, 384), for_test=False):
if network == 'unet_base':
predict = unet_base(train_image, classes, image_size)
elif network == 'unet_simple':
predict = unet_simple(train_image, classes, image_size)
elif network == 'deeplabv3p':
predict = deeplabv3p(train_image, classes)
else:
raise Exception('Not support this model:', network)
print('The program will run', network)
if for_test == False:
loss, miou = create_loss(predict, train_label, classes)
return loss, miou, predict
elif for_test == True:
return predict
else:
raise Exception('Wrong Status:', for_test)
# The main method
def main():
IMG_SIZE =[1536, 512]
SUBMISSION_SIZE = [3384, 1710]
add_num = 13
num_classes = 8
#batch_size = 2 #双卡训练为偶数
batch_size = 1
log_iters = 100
base_lr = 0.001
#base_lr = 0.0006
save_model_iters = 2000
#use_pretrained = True
use_pretrained = False
#network = 'deeplabv3p'
network = 'unet_base'
save_model_path ="E:/PaddleXinstallation/data/Lane-Segmentation-Solution-For-BaiduAI-Autonomous-Driving-Competition-master/model_weights/paddle_" + network + "_"
model_path ="E:/PaddleXinstallation/data/Lane-Segmentation-Solution-For-BaiduAI-Autonomous-Driving-Competition-master/models" + network + "_12_end"
#save_model_path = "./model_weights/paddle_" + network + "_"
#model_path = "./model_weights/paddle_" + network + "_12_end"
epoches = 2
crop_offset = 690
data_dir = 'E:/PaddleXinstallation/data/Lane-Segmentation-Solution-For-BaiduAI-Autonomous-Driving-Competition-master/data_list/train.csv'
# Get data list and split it into train and validation set.
train_list = pd.read_csv(data_dir)
#Initialization
images = fluid.layers.data(name='image', shape=[3, IMG_SIZE[1], IMG_SIZE[0]], dtype='float32')
labels = fluid.layers.data(name='label', shape=[1, IMG_SIZE[1], IMG_SIZE[0]], dtype='int32')
#labels = fluid.layers.data(name='label', shape=[1, IMG_SIZE[1], IMG_SIZE[0]], dtype='float32')
iter_id = 0
total_loss = 0.0
total_miou = 0.0
prev_time = time.time()
# Train
print('Train Data Size:', len(train_list))
train_reader = train_image_gen(train_list, batch_size, IMG_SIZE, crop_offset) #数据生成器
# Create model and define optimizer
reduced_loss, miou, pred = create_network(images, labels, num_classes, network=network, image_size=(IMG_SIZE[1], IMG_SIZE[0]), for_test=False)
optimizer = fluid.optimizer.AdamOptimizer(learning_rate=base_lr)
optimizer.minimize(reduced_loss, no_grad_set=no_grad_set)
# Whether load pretrained model(单卡执行的程序)
place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
if use_pretrained == True:
fluid.io.load_params(exe, model_path)
print("loaded model from: %s" % model_path)
else:
print("Train from initialized model.")
# Parallel Executor to use multi-GPUs(这个是多卡执行的程序)
#若是单卡训练,直接屏蔽掉以下程序
# exec_strategy = fluid.ExecutionStrategy()
# exec_strategy.allow_op_delay = True
# build_strategy = fluid.BuildStrategy()
# build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce
# train_exe = fluid.ParallelExecutor(use_cuda=True, loss_name=reduced_loss.name,
# build_strategy=build_strategy, exec_strategy=exec_strategy)
# Training
for epoch in range(epoches):
print('Start Training Epoch: %d'%(epoch + 1))
train_length = len(train_list)
for iteration in range(int(train_length / batch_size)):
train_data = next(train_reader)
#以下是单卡训练(与上面的训练器代码对应)
results = exe.run(
feed=get_feeder_data(train_data, place),
fetch_list=[reduced_loss.name, miou.name])
#以下是多卡训练
# results = train_exe.run(
# feed=get_feeder_data(train_data, place),
# fetch_list=[reduced_loss.name, miou.name])
iter_id += 1
total_loss += np.mean(results[0])
total_miou += np.mean(results[1])
if iter_id % log_iters == 0: # Print log
end_time = time.time()
print(
"Iter - %d: train loss: %.3f, mean iou: %.3f, time cost: %.3f s"
% (iter_id, total_loss / log_iters, total_miou / log_iters, end_time - prev_time))
total_loss = 0.0
total_miou = 0.0
prev_time = time.time()
if iter_id % save_model_iters == 0: # save model
dir_name =save_model_path + str(epoch + add_num) + '_' + str(iter_id)
fluid.io.save_params(exe, dirname=dir_name)
print("Saved checkpoint: %s" % (dir_name))
iter_id = 0
dir_name = save_model_path + str(epoch + add_num) + '_end'
fluid.io.save_params(exe, dirname=dir_name)
print("Saved checkpoint: %s" % (dir_name))
# Main
if __name__ == "__main__":
main()
5.ensemble.py文件
该文件是模型融合代码,它是将三个算法模型训练好进行融合,进一步提升语义分割识别的准确率
import os
import numpy as np
import cv2
from paddle import fluid
from utils.image_process import expand_resize_data
# Create a bilineraNet to resize predictions to full size
def bilinearNet(predictions, submission_size, crop_offset):
logit = fluid.layers.resize_bilinear(input=predictions, out_shape=(submission_size[0], submission_size[1] - crop_offset))
return logit
# Main
if __name__ == "__main__":
print('Start Making Ensemble Submissions!')
#test_dir = '../PaddlePaddle/TestSet_Final/ColorImage/'
#sub_dir = './test_submission/'
test_dir = 'E:/PaddleXinstallation/data/chusai_data/TestSet_Final/ColorImage/'
sub_dir = 'E:/PaddleXinstallation/data/Lane-Segmentation-Solution-For-BaiduAI-Autonomous-Driving-Competition-master/test_submission/'
IMG_SIZE = [1536, 512]
SUBMISSION_SIZE = [3384, 1710]
crop_offset = 690
# Ignore Class 4
label_num = 8
test_list = os.listdir(test_dir)
# Three Folders which save npy files corresponding to all test images
# ensemble index 1 0.61234
# model_lists = ['/npy_save/deeplabv3p/',
# '/npy_save/unet_base/',
# '/npy_save/unet_simple/']
model_lists = ['E:/PaddleXinstallation/data/Lane-Segmentation-Solution-For-BaiduAI-Autonomous-Driving-Competition-master/npy_save/deeplabv3p/',
'E:/PaddleXinstallation/data/Lane-Segmentation-Solution-For-BaiduAI-Autonomous-Driving-Competition-master/npy_save/unet_base/',
'E:/PaddleXinstallation/data/Lane-Segmentation-Solution-For-BaiduAI-Autonomous-Driving-Competition-master/npy_save/unet_simple/']
# Build Model & Initialize Program
images = fluid.layers.data(name='image', shape=[label_num, IMG_SIZE[1], IMG_SIZE[0]], dtype='float32')
predictions = bilinearNet(images, SUBMISSION_SIZE, crop_offset)
place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
for index in range(len(test_list)):
test_name = test_list[index]
print(index, test_name)
# Load three diffirent npys and then do average
model_logits1 = np.load(model_lists[0] + test_name.replace('.jpg', '.npy'))
model_logits2 = np.load(model_lists[1] + test_name.replace('.jpg', '.npy'))
model_logits3 = np.load(model_lists[2] + test_name.replace('.jpg', '.npy'))
avg_model_logits = (model_logits1 + model_logits2 + model_logits3) / 3.0
logits_input = np.expand_dims(np.array(avg_model_logits), axis=0)
# Feed data & Run BilinearNet
feed_dict = {}
feed_dict["image"] = logits_input
results = exe.run(
feed=feed_dict,
fetch_list=[predictions])
prediction = np.argmax(results[0][0], axis=0)
# Convert prediction to submission image
submission_mask = expand_resize_data(prediction, SUBMISSION_SIZE, crop_offset)
# Save submission png
cv2.imwrite(os.path.join(sub_dir, test_name.replace('.jpg', '.png')), submission_mask)
以上就是这个项目源码的解剖分析 ,运行的结果如下:
如果自己电脑显卡不支持训练,也可以在线 GPU训练,欢迎关注百度飞浆
这期的文章就先更新到这里,后期会更新关于深度学习的相关技术原理,欢迎大家下方留言或者私信,有空都会回复
标签:无人驾驶,检测,image,labels,mask,fluid,label,深度,data 来源: https://blog.csdn.net/weixin_37933097/article/details/117381594