我正在使用下面的代码来检测人脸,并像这样在人脸顶部绘制矩形。
Inference.py在这个文件中,我们尝试在脸部周围绘制raw_bounding_box:
import cv2
import matplotlib.pyplot as plt
import numpy as np
from keras.preprocessing import image
def load_image(image_path, grayscale=False, target_size=None):
pil_image = image.load_face_coordinates(image_path, grayscale, target_size)
return image.face_coordinates_to_array(pil_image)
def load_detection_model(model_path):
detection_model = cv2.CascadeClassifier(model_path)
return detection_model
def detect_faces(detection_model, gray_image_array):
return detection_model.detectMultiScale(gray_image_array, 1.3, 5)
def draw_bounding_box(face_coordinates, image_array, color,r,d):
x1,y1,x2,y2 = face_coordinates
# cv2.rectangle(image_array, (x, y), (x + w, y + h), color, 2)
cv2.line(image_array, (x1 + r, y1), (x1 + r + d, y1), color, 2)
cv2.line(image_array, (x1, y1 + r), (x1, y1 + r + d), color, 2)
cv2.ellipse(image_array, (x1 + r, y1 + r), (r, r), 180, 0, 90, color, 2)
# Top right
cv2.line(image_array, (x2 - r, y1), (x2 - r - d, y1), color, 2)
cv2.line(image_array, (x2, y1 + r), (x2, y1 + r + d), color, 2)
cv2.ellipse(image_array, (x2 - r, y1 + r), (r, r), 270, 0, 90, color, 2)
# Bottom left
cv2.line(image_array, (x1 + r, y2), (x1 + r + d, y2), color, 2)
cv2.line(image_array, (x1, y2 - r), (x1, y2 - r - d), color, 2)
cv2.ellipse(image_array, (x1 + r, y2 - r), (r, r), 90, 0, 90, color, 2)
# Bottom right
cv2.line(image_array, (x2 - r, y2), (x2 - r - d, y2), color, 2)
cv2.line(image_array, (x2, y2 - r), (x2, y2 - r - d), color, 2)
cv2.ellipse(image_array, (x2 - r, y2 - r), (r, r), 0, 0, 90, color, 2)
image_array = np.zeros((256,256,3), dtype=np.uint8)
detectface.py在这个文件中,我们正在检测人脸,并调用Inference.py中的函数来绘制人脸周围的方框。
# starting video streaming
cv2.namedWindow('window_frame')
video_capture = cv2.VideoCapture(0)
while True:
bgr_image = video_capture.read()[1]
gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
faces = detect_faces(face_detection, gray_image)
for face_coordinates in faces:
x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
gray_face = gray_image[y1:y2, x1:x2]
try:
gray_face = cv2.resize(gray_face, (emotion_target_size))
except:
continue
gray_face = preprocess_input(gray_face, True)
gray_face = np.expand_dims(gray_face, 0)
gray_face = np.expand_dims(gray_face, -1)
emotion_prediction = emotion_classifier.predict(gray_face)
emotion_probability = np.max(emotion_prediction)
emotion_label_arg = np.argmax(emotion_prediction)
emotion_text = emotion_labels[emotion_label_arg]
emotion_window.append(emotion_text)
if len(emotion_window) > frame_window:
emotion_window.pop(0)
try:
emotion_mode = mode(emotion_window)
except:
continue
if emotion_text == 'angry':
color = emotion_probability * np.asarray((255, 0, 0))
elif emotion_text == 'sad':
color = emotion_probability * np.asarray((0, 0, 255))
elif emotion_text == 'happy':
color = emotion_probability * np.asarray((0, 128, 255))
elif emotion_text == 'surprise':
color = emotion_probability * np.asarray((0, 255, 255))
else:
color = emotion_probability * np.asarray((0, 255, 0))
color = color.astype(int)
color = color.tolist()
draw_bounding_box(face_coordinates, rgb_image, color)
这个文件(detectface.py)中的最后一行代码似乎不正确,所以我不知道如何添加这个文件中缺少的两个必需的位置参数:'r‘和'd’。如果你有任何想法来实现这个目标,请分享
发布于 2018-06-11 07:27:38
draw_bounding_box()
所做的是在示例图像中绘制类似于绿色框架的内容,包括对圆角的支持。
这是一个图片胜过千言万语的例子,所以让我们看看左上角(其他3个片段遵循相同的模式,只是旋转了一下)。
它是由生成的
cv2.line(image_array, (x1 + r, y1), (x1 + r + d, y1), color, 2)
cv2.line(image_array, (x1, y1 + r), (x1, y1 + r + d), color, 2)
cv2.ellipse(image_array, (x1 + r, y1 + r), (r, r), 180, 0, 90, color, 2)
在哪里?
(x1, y1)
指定要绘制的矩形区域的左上角around.r
是圆弧的半径(圆角corner)d
是两条线的长度(水平和vertical)color
是绘制直线和圆弧的颜色with2
是直线和圆弧的粗细至于如何设置参数...
r
参数看起来更像是美学上的选择--我认为8左右的值可能看起来还不错,尽管样例图像似乎没有圆角,这意味着r == 0
。我不确定(意思是我现在懒得去尝试;) ) cv2.ellipse
绘制一个0半径椭圆会有多高兴,但一个简单的if
语句就可以解决这个问题(即,当r > 0
时只调用cv2.ellipse
)。
d
参数似乎应该设置为使差距大约为投资回报率的33%。我会选择感兴趣区域的较小维度(即min(width, height)
),将其除以3,减去r
并使用结果。
https://stackoverflow.com/questions/50765763
复制相似问题