首页
学习
活动
专区
圈层
工具
发布
首页
学习
活动
专区
圈层
工具
MCP广场
社区首页 >专栏 >图片眨眼摇头生成器,制作眨眼睛张嘴图软件,眨眼张嘴人脸识别软件

图片眨眼摇头生成器,制作眨眼睛张嘴图软件,眨眼张嘴人脸识别软件

原创
作者头像
用户11701393
发布2025-06-23 22:11:50
发布2025-06-23 22:11:50
1.4K0
举报

下载地址:https://www.pan38.com/share.php?code=pvvmX 提取码:8888 【仅供学习】

完整的人脸动画生成系统,包含眨眼、张嘴和头部运动检测功能。以下是完整的项目代码:

代码语言:txt
复制

import cv2
import dlib
import numpy as np
from scipy.spatial import distance as dist
from collections import OrderedDict
import time
import os

# 面部特征点索引
FACIAL_LANDMARKS_IDXS = OrderedDict([
    ("mouth", (48, 68)),
    ("right_eyebrow", (17, 22)),
    ("left_eyebrow", (22, 27)),
    ("right_eye", (36, 42)),
    ("left_eye", (42, 48)),
    ("nose", (27, 36)),
    ("jaw", (0, 17))
])

class FaceAnimator:
    def __init__(self):
        self.detector = dlib.get_frontal_face_detector()
        self.predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
        self.ear_thresh = 0.25
        self.ear_frames = 3
        self.mar_thresh = 0.75
        self.mar_frames = 3
        self.head_movement_thresh = 15
        self.head_movement_frames = 5
        self.blink_counter = 0
        self.mouth_open_counter = 0
        self.head_movement_counter = 0
        self.total_blinks = 0
        self.total_mouth_opens = 0
        self.total_head_movements = 0
        self.prev_center = None
        self.animation_frames = []
        self.animation_delay = 0.1
        
    def eye_aspect_ratio(self, eye):
        A = dist.euclidean(eye[1], eye[5])
        B = dist.euclidean(eye[2], eye[4])
        C = dist.euclidean(eye[0], eye[3])
        ear = (A + B) / (2.0 * C)
        return ear
    
    def mouth_aspect_ratio(self, mouth):
        A = dist.euclidean(mouth[13], mouth[19])
        B = dist.euclidean(mouth[14], mouth[18])
        C = dist.euclidean(mouth[15], mouth[17])
        D = dist.euclidean(mouth[12], mouth[16])
        mar = (A + B + C) / (3.0 * D)
        return mar
    
    def head_movement_detected(self, current_center):
        if self.prev_center is None:
            self.prev_center = current_center
            return False
        
        movement = dist.euclidean(current_center, self.prev_center)
        self.prev_center = current_center
        return movement > self.head_movement_thresh
    
    def process_frame(self, frame):
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        rects = self.detector(gray, 0)
        
        for rect in rects:
            shape = self.predictor(gray, rect)
            shape = self.shape_to_np(shape)
            
            left_eye = shape[42:48]
            right_eye = shape[36:42]
            mouth = shape[48:68]
            
            left_ear = self.eye_aspect_ratio(left_eye)
            right_ear = self.eye_aspect_ratio(right_eye)
            ear = (left_ear + right_ear) / 2.0
            
            mar = self.mouth_aspect_ratio(mouth)
            
            face_center = np.mean(shape, axis=0)
            head_moved = self.head_movement_detected(face_center)
            
            if ear < self.ear_thresh:
                self.blink_counter += 1
            else:
                if self.blink_counter >= self.ear_frames:
                    self.total_blinks += 1
                    self.animate_blink(frame, shape)
                self.blink_counter = 0
                
            if mar > self.mar_thresh:
                self.mouth_open_counter += 1
            else:
                if self.mouth_open_counter >= self.mar_frames:
                    self.total_mouth_opens += 1
                    self.animate_mouth_open(frame, shape)
                self.mouth_open_counter = 0
                
            if head_moved:
                self.head_movement_counter += 1
            else:
                if self.head_movement_counter >= self.head_movement_frames:
                    self.total_head_movements += 1
                    self.animate_head_movement(frame, shape)
                self.head_movement_counter = 0
                
            self.draw_landmarks(frame, shape)
            
        return frame
    
    def animate_blink(self, frame, shape):
        left_eye = shape[42:48]
        right_eye = shape[36:42]
        
        for i in range(3):
            temp_frame = frame.copy()
            ratio = (i + 1) / 4.0
            left_eye_closed = self.interpolate_eye(left_eye, ratio)
            right_eye_closed = self.interpolate_eye(right_eye, ratio)
            
            cv2.polylines(temp_frame, [left_eye_closed], True, (0, 0, 255), 2)
            cv2.polylines(temp_frame, [right_eye_closed], True, (0, 0, 255), 2)
            self.animation_frames.append(temp_frame)
            time.sleep(self.animation_delay)
    
    def animate_mouth_open(self, frame, shape):
        mouth = shape[48:68]
        
        for i in range(3):
            temp_frame = frame.copy()
            ratio = 1 + (i * 0.2)
            mouth_open = self.interpolate_mouth(mouth, ratio)
            
            cv2.polylines(temp_frame, [mouth_open], True, (0, 255, 0), 2)
            self.animation_frames.append(temp_frame)
            time.sleep(self.animation_delay)
    
    def animate_head_movement(self, frame, shape):
        for i in range(5):
            temp_frame = frame.copy()
            offset = (i - 2) * 5
            moved_shape = shape.copy()
            moved_shape[:, 0] += offset
            
            self.draw_landmarks(temp_frame, moved_shape)
            self.animation_frames.append(temp_frame)
            time.sleep(self.animation_delay)
    
    def interpolate_eye(self, eye, ratio):
        center = np.mean(eye, axis=0)
        interpolated = []
        
        for point in eye:
            new_point = center + ratio * (point - center)
            interpolated.append(new_point)
            
        return np.array(interpolated, dtype=np.int32)
    
    def interpolate_mouth(self, mouth, ratio):
        center = np.mean(mouth, axis=0)
        interpolated = []
        
        for point in mouth:
            new_point = center + ratio * (point - center)
            interpolated.append(new_point)
            
        return np.array(interpolated, dtype=np.int32)
    
    def shape_to_np(self, shape, dtype="int"):
        coords = np.zeros((68, 2), dtype=dtype)
        
        for i in range(0, 68):
            coords[i] = (shape.part(i).x, shape.part(i).y)
            
        return coords
    
    def draw_landmarks(self, image, shape):
        for (name, (i, j)) in FACIAL_LANDMARKS_IDXS.items():
            pts = shape[i:j]
            
            if name == "jaw":
                for l in range(1, len(pts)):
                    ptA = tuple(pts[l - 1])
                    ptB = tuple(pts[l])
                    cv2.line(image, ptA, ptB, (255, 0, 0), 2)
            else:
                hull = cv2.convexHull(pts)
                cv2.drawContours(image, [hull], -1, (0, 255, 0), 1)

def main():
    animator = FaceAnimator()
    cap = cv2.VideoCapture(0)
    
    while True:
        ret, frame = cap.read()
        if not ret:
            break
            
        frame = animator.process_frame(frame)
        
        cv2.putText(frame, f"Blinks: {animator.total_blinks}", (10, 30),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
        cv2.putText(frame, f"Mouth Opens: {animator.total_mouth_opens}", (10, 60),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
        cv2.putText(frame, f"Head Movements: {animator.total_head_movements}", (10, 90),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 2)
        
        cv2.imshow("Face Animator", frame)
        
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
            
    cap.release()
    cv2.destroyAllWindows()

if __name__ == "__main__":
    main()
代码语言:txt
复制

import cv2
import numpy as np

class AnimationUtils:
    @staticmethod
    def create_blinking_animation(frames, eye_coords, blink_frames=5):
        animations = []
        for i in range(blink_frames):
            ratio = min(1.0, (i + 1) / blink_frames)
            frame_copy = frames[-1].copy()
            
            for eye in eye_coords:
                center = np.mean(eye, axis=0)
                new_eye = []
                for point in eye:
                    new_point = center + ratio * (point - center)
                    new_eye.append(new_point)
                
                new_eye = np.array(new_eye, dtype=np.int32)
                cv2.polylines(frame_copy, [new_eye], True, (0, 0, 255), 2)
            
            animations.append(frame_copy)
        
        return animations
    
    @staticmethod
    def create_mouth_animation(frames, mouth_coords, open_frames=5, max_ratio=1.5):
        animations = []
        for i in range(open_frames):
            ratio = 1.0 + (max_ratio - 1.0) * (i + 1) / open_frames
            frame_copy = frames[-1].copy()
            
            center = np.mean(mouth_coords, axis=0)
            new_mouth = []
            for point in mouth_coords:
                new_point = center + ratio * (point - center)
                new_mouth.append(new_point)
            
            new_mouth = np.array(new_mouth, dtype=np.int32)
            cv2.polylines(frame_copy, [new_mouth], True, (0, 255, 0), 2)
            animations.append(frame_copy)
        
        return animations
    
    @staticmethod
    def create_head_movement_animation(frames, all_coords, movement_frames=5, max_offset=20):
        animations = []
        for i in range(movement_frames):
            offset = (i - movement_frames//2) * max_offset * 2 // movement_frames
            frame_copy = frames[-1].copy()
            
            new_coords = all_coords.copy()
            new_coords[:, 0] += offset
            
            for (name, (i, j)) in FACIAL_LANDMARKS_IDXS.items():
                pts = new_coords[i:j]
                
                if name == "jaw":
                    for l in range(1, len(pts)):
                        ptA = tuple(pts[l - 1])
                        ptB = tuple(pts[l])
                        cv2.line(frame_copy, ptA, ptB, (255, 0, 0), 2)
                else:
                    hull = cv2.convexHull(pts)
                    cv2.drawContours(frame_copy, [hull], -1, (0, 255, 0), 1)
            
            animations.append(frame_copy)
        
        return animations

主程序、依赖文件和动画工具。主程序使用dlib进行人脸检测和特征点定位,实现了眨眼、张嘴和头部运动的检测与动画生成。系统可以实时处理摄像头输入,检测面部动作并生成相应的动画效果。使用时需要先安装列出的依赖项,并下载shape_predictor_68_face_landmarks.dat模型文件。

原创声明:本文系作者授权腾讯云开发者社区发表,未经许可,不得转载。

如有侵权,请联系 cloudcommunity@tencent.com 删除。

原创声明:本文系作者授权腾讯云开发者社区发表,未经许可,不得转载。

如有侵权,请联系 cloudcommunity@tencent.com 删除。

评论
登录后参与评论
0 条评论
热度
最新
推荐阅读
领券
问题归档专栏文章快讯文章归档关键词归档开发者手册归档开发者手册 Section 归档