OpenCV在车道线查找中的使用

• 计算相机校准矩阵和给定一组棋盘图像的失真系数。
• 对原始图像应用畸变校正。
• 使用颜色变换，渐变等创建阈值二值图像。
• 应用透视变换来纠正二值图像（“鸟瞰”）。
• 检测车道像素，找到车道边界。
• 确定车道和车辆相对于中心的曲率。
• 将检测到的车道边界转回到原始图像上。
• 输出车道边界的视觉显示和车道曲率和车辆位置的数值估计。

相机校准矩阵和失真系数

```import os, glob, pickle
import numpy as np
import cv2
import matplotlib.pyplot as plt
class CameraCalibrator:
'''Compute the camera calibration matrix and distortion coefficients given a set of chessboard images.'''
def __init__(self, image_directory, image_filename, binary_filename, nx, ny):
print ('Initializing CameraCalibrator ...')
self.__image_directory = image_directory
self.__image_filename = image_filename
self.__binary_filename = binary_filename
self.__nx = nx # the number of inside corners in x
self.__ny = ny # the number of inside corners in y
self.mtx = None
self.dist = None
self.rvecs = None
self.tvecs = None
self.__calibrated = False
def __calibrate(self):
# Read in and make a list of calibration images
calibration_filenames = glob.glob(self.__image_directory+'/'+self.__image_filename)
# Arrays to store object points and image points from all the images
object_points = [] # 3D points in real world space
image_points = [] # 2D points in image plane
# Prepare object points, like (0,0,0), (1,0,0), (2,0,0), ...,(7,5,0)
object_p = np.zeros((self.__ny*self.__nx,3),np.float32)
object_p[:,:2] = np.mgrid[0:self.__nx,0:self.__ny].T.reshape(-1,2) # s,y coordinates
# Extract the shape of any image
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
shape = gray.shape[::-1] # (width,height)
# Process each calibration image
for image_filename in calibration_filenames:
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # RGB is standard in matlibplot
# Convert to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (self.__nx, self.__ny), None)
# If found, draw corners
if ret == True:
# Store the corners found in the current image
object_points.append(object_p) # how it should look like
image_points.append(corners) # how it looks like
# Draw and display the corners
cv2.drawChessboardCorners(image, (self.__nx, self.__ny), corners, ret)
plt.figure()
plt.imshow(image)
plt.show()
# Do the calibration
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(object_points, image_points, shape, None, None)
#print(ret, mtx, dist, rvecs, tvecs)
# Pickle to save time for subsequent runs
binary = {}
binary["mtx"] = mtx
binary["dist"] = dist
binary["rvecs"] = rvecs
binary["tvecs"] = tvecs
pickle.dump(binary, open(self.__image_directory + '/' + self.__binary_filename, "wb"))
self.mtx = mtx
self.dist = dist
self.rvecs = rvecs
self.tvecs = tvecs
self.__calibrated = True
'''Load previously computed calibration binary data'''
with open(self.__image_directory + '/' + self.__binary_filename, mode='rb') as f:
self.mtx = binary['mtx']
self.dist = binary['dist']
self.rvecs = binary['rvecs']
self.tvecs = binary['tvecs']
self.__calibrated = True
def get_data(self):
'''Getter for the calibration data. At the first call it gerenates it.'''
if os.path.isfile(self.__image_directory + '/' + self.__binary_filename):
else:
self.__calibrate()
return self.mtx, self.dist, self.rvecs, self.tvecs
def undistort(self, image):
if  self.__calibrated == False:
self.get_data()
return cv2.undistort(image, self.mtx, self.dist, None, self.mtx)
def test_undistort(self, image_filename, plot=False):
'''A method to test the undistort and to plot its result.'''
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # RGB is standard in matlibplot
image_undist = self.undistort(image)
# Ploting both images Original and Undistorted
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10))
ax1.set_title('Original/Distorted')
ax1.imshow(image)
ax2.set_title('Undistorted')
ax2.imshow(image_undist)
plt.show()```

使用颜色变换，渐变等创建阈值二值图像

```def compute_binary_image(self, color_image, plot=False):
# Convert to HLS color space and separate the S channel
# Note: img is the undistorted image
hls = cv2.cvtColor(color_image, cv2.COLOR_RGB2HLS)
s_channel = hls[:,:,2]
# Grayscale image
# NOTE: we already saw that standard grayscaling lost color information for the lane lines
# Explore gradients in other colors spaces / color channels to see what might work better
gray = cv2.cvtColor(color_image, cv2.COLOR_RGB2GRAY)
# Sobel x
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0) # Take the derivative in x
abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal
scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
thresh_min = 20
thresh_max = 100
sxbinary = np.zeros_like(scaled_sobel)
sxbinary[(scaled_sobel >= thresh_min) & (scaled_sobel <= thresh_max)] = 1
# Threshold color channel
s_thresh_min = 170
s_thresh_max = 255
s_binary = np.zeros_like(s_channel)
s_binary[(s_channel >= s_thresh_min) & (s_channel <= s_thresh_max)] = 1
# Combine the two binary thresholds
combined_binary = np.zeros_like(sxbinary)
combined_binary[(s_binary == 1) | (sxbinary == 1)] = 1
if (plot):
# Ploting both images Original and Binary
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10))
ax1.set_title('Undistorted/Color')
ax1.imshow(color_image)
ax2.set_title('Binary/Combined S channel and gradient thresholds')
ax2.imshow(combined_binary, cmap='gray')
plt.show()
return combined_binary```

应用透视变换来纠正二值图像（“鸟瞰”）

```def compute_perspective_transform(self, binary_image):
# Define 4 source and 4 destination points = np.float32([[,],[,],[,],[,]])
shape = binary_image.shape[::-1] # (width,height)
w = shape[0]
h = shape[1]
transform_src = np.float32([ [580,450], [160,h], [1150,h], [740,450]])
transform_dst = np.float32([ [0,0], [0,h], [w,h], [w,0]])
M = cv2.getPerspectiveTransform(transform_src, transform_dst)
return M
def apply_perspective_transform(self, binary_image, M, plot=False):
warped_image = cv2.warpPerspective(binary_image, M, (binary_image.shape[1], binary_image.shape[0]), flags=cv2.INTER_NEAREST)  # keep same size as input image
if(plot):
# Ploting both images Binary and Warped
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10))
ax1.set_title('Binary/Undistorted and Tresholded')
ax1.imshow(binary_image, cmap='gray')
ax2.set_title('Binary/Undistorted and Warped Image')
ax2.imshow(warped_image, cmap='gray')
plt.show()
return warped_image```

```transform_src = np.float32([ [580,450], [160,h], [1150,h], [740,450]])
transform_dst = np.float32([ [0,0], [0,h], [w,h], [w,0]])```

检测车道像素，找到车道边界

```import numpy as np
histogram = np.sum(img[img.shape[0]/2:,:], axis=0)
plt.plot(histogram)```

```def extract_lanes_pixels(self, binary_warped):
# Take a histogram of the bottom half of the image
histogram = np.sum(binary_warped[binary_warped.shape[0]/2:,:], axis=0)
#plt.plot(histogram)
#plt.show()
# Create an output image to draw on and  visualize the result
out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]/2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# Choose the number of sliding windows
nwindows = 9
# Set height of windows
window_height = np.int(binary_warped.shape[0]/nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated for each window
leftx_current = leftx_base
rightx_current = rightx_base
# Set the width of the windows +/- margin
margin = 100
# Set minimum number of pixels found to recenter window
minpix = 50
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = binary_warped.shape[0] - (window+1)*window_height
win_y_high = binary_warped.shape[0] - window*window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
# Draw the windows on the visualization image
cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),(0,255,0), 2)
cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),(0,255,0), 2)
# Identify the nonzero pixels in x and y within the window
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
return leftx, lefty, rightx, righty, left_lane_inds, right_lane_inds
def poly_fit(self, leftx, lefty, rightx, righty, left_lane_inds, right_lane_inds, binary_warped, plot:False):
# Fit a second order polynomial to each
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
# Generate x and y values for plotting
ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
if(plot):
plt.imshow(out_img)
plt.plot(left_fitx, ploty, color='yellow')
plt.plot(right_fitx, ploty, color='yellow')
plt.xlim(0, 1280)
plt.ylim(720, 0)
plt.show()
return left_fit, right_fit, ploty, left_fitx, right_fitx```

输出车道边界的视觉显示和车道曲率和车辆位置的数值估计

lane_detection.py中的函数render_curvature_and_offset用于将检测到的车道线返回到原始图像上，并使用填充的多边形绘制检测到的车道。它还绘制了图像或视频帧的左上角和底部的曲率和位置。

0 条评论

• OpenCV在车道线查找中的使用

本篇是自动驾驶系列的第二篇，在后台留言索取代码会提供源码链接。这次的目标是编写一个软件流水线来识别汽车前置摄像头的视频中的车道边界。摄像机标定图像，试验路图像和...

• 自动驾驶汽车硬件系统概述

如果说人工智能技术将是自动驾驶汽车的大脑，那么硬件系统就是它的神经与四肢。从自动驾驶汽车周边环境信息的采集、传导、处理、反应再到各种复杂情景的解析，硬件系统的构...

• ZLG深度解析——语音识别技术

语言作为人类的一种基本交流方式，在数千年历史中得到持续传承。近年来，语音识别技术的不断成熟，已广泛应用于我们的生活当中。语音识别技术是如何让机器“听懂”人类语言...

• OpenCV在车道线查找中的使用

本篇是自动驾驶系列的第二篇，在后台留言索取代码会提供源码链接。这次的目标是编写一个软件流水线来识别汽车前置摄像头的视频中的车道边界。摄像机标定图像，试验路图像和...

• 手写批量线性回归算法：在Python3中梯度下降方法实现模型训练

在这篇文章中，我们将看一个使用NumPy作为数据处理库的Python3编写的程序，来了解如何实现使用梯度下降法的（批量）线性回归。

• 程序员修神之路--分布式缓存的一条明路（附代码）

你忘了去年分布式缓存服务器也扩容过一次，很多请求都穿透了，DB差点扛不住呀，这次再扩容DB估计就得挂了

• 解决军哥lnmp1.3环境下wordpress不显示主题问题

最近给新买的vps用了军哥的lnmp环境包，由于是第一次用他的，还不太熟悉。 一开始就遇到了一些问题吧，在安装好wordpress之后，发现主题不能显示。 然后...

• 张量分解与应用-学习笔记[01]

未来将以张量如何切入深度学习及强化学习领域等方面进行研究和探讨。希望这个长篇能够坚持下去。