系列文章目录
第一章 Python 机器学习入门之mediapipe和pyttsx3的结合使用
文章目录
前言
在比赛准备时,由于比赛任务要求需要机器人在自主迅游中记录家庭成员的行为动作,并进行语音播报。故由此我有了用pycharm软件编写比赛相关demo。从而诞生了一个基于mediapipe和pyttsx3姿态识别语音播报器。
一、mediapipe和pyttsx3是什么?
MediaPipe是由Google开发的开源框架,用于实时处理多媒体数据,尤其擅长人体姿势检测和面部识别等计算机视觉任务。它支持跨平台应用,适用于移动设备、桌面及边缘设备,不仅涵盖计算机视觉,还涉及手势识别和语音处理等领域,帮助开发者快速构建高性能应用。
pyttsx3
(Python Text-to-Speech version 3)是一个Python库,用于将文本转换为语音(Text-to-Speech, TTS)。它允许开发者轻松地将文本转成语音输出,这对于开发语音助手、阅读辅助工具以及其他需要语音输出的应用非常有用。
二、使用步骤
1.引入库
代码如下(示例):
import cv2
import mediapipe as mp
import numpy as np
import pyttsx3
2.初始化姿态识别
mp_pose = mp.solutions.pose
pose = mp_pose.Pose()
cap = cv2.VideoCapture(0)
total_counter = 0 # 总计数器
pose_counter = 0 # 特定姿态计数器
pose_count = {} # 用于记录姿态出现次数的字典
current_pose = None # 当前识别到的动作
enable_pose_detection = False # 添加姿态识别开关标志
previous_keypoints_detected = 0 # 上一帧检测到的关键点数量
has_spoken = False # 是否已经播报过当前姿态
poses_recognized = set() # 已经识别的动作集合
first_pose_done = False # 第一个动作是否完成
second_pose_done = False # 第二个动作是否完成
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
results = pose.process(rgb_frame)
if results.pose_landmarks:
ih, iw, _ = frame.shape
keypoints_detected = 0
for landmark in results.pose_landmarks.landmark:
if landmark.visibility > 0.5: # 可以调整这个阈值来决定何时认为检测到了关键点
x, y = int(landmark.x * iw), int(landmark.y * ih)
cv2.circle(frame, (x, y), 5, (0, 255, 0), -1)
keypoints_detected += 1
if keypoints_detected == 33:
if previous_keypoints_detected != 33:
# 如果前一帧不是33个关键点,那么播报提示
engine.say("机器人已准备好,请做出双手叉腰的动作来开启姿态识别功能。")
engine.runAndWait()
for i in range(33):
cx = int(results.pose_landmarks.landmark[i].x * iw)
cy = int(results.pose_landmarks.landmark[i].y * ih)
keypoints[i] = (cx, cy)
for connection in mp_pose.POSE_CONNECTIONS:
idx1, idx2 = connection
landmark1 = results.pose_landmarks.landmark[idx1]
landmark2 = results.pose_landmarks.landmark[idx2]
x1, y1 = int(landmark1.x * iw), int(landmark1.y * ih)
x2, y2 = int(landmark2.x * iw), int(landmark2.y * ih)
cv2.line(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
3.初始化语音播报
代码如下(示例):
engine = pyttsx3.init()
rate = engine.getProperty('rate')
engine.setProperty('rate', rate - 50)
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[0].id)
4.关节点的角度计算和姿态检测
def get_angle(v1, v2):
angle = np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))
angle = np.arccos(angle) / 3.14 * 180
cross = v2[0] * v1[1] - v2[1] * v1[0]
if cross < 0:
angle = -angle
return angle
def get_pos(keypoints):
keypoints = np.array(keypoints)
# 计算左臂与水平方向的夹角
v1 = keypoints[12] - keypoints[11]
v2 = keypoints[13] - keypoints[11]
angle_left_arm = get_angle(v1, v2)
# 计算右臂与水平方向的夹角
v1 = keypoints[11] - keypoints[12]
v2 = keypoints[14] - keypoints[12]
angle_right_arm = get_angle(v1, v2)
# 计算左肘的夹角
v1 = keypoints[11] - keypoints[13]
v2 = keypoints[15] - keypoints[13]
angle_left_elbow = get_angle(v1, v2)
# 计算右肘的夹角
v1 = keypoints[12] - keypoints[14]
v2 = keypoints[16] - keypoints[14]
angle_right_elbow = get_angle(v1, v2)
str_pose = ""
enable_pose = False
if (angle_left_arm < 0 and angle_right_arm < 0) or (angle_left_arm > 0 and angle_right_arm > 0):
str_pose = "挥手"
elif 90 < angle_left_arm < 120 and 10 < angle_right_elbow < 40:
str_pose = "打电话"
elif angle_left_arm < 0 and angle_right_arm > 0:
str_pose = "挥双手"
elif angle_left_arm > 0 and angle_right_arm < 0:
str_pose = "站立"
if abs(angle_left_elbow) > 70 and 160 > abs(angle_right_elbow) > 90 and angle_left_arm > 90 and angle_right_arm < -100:
str_pose = "双手叉腰"
enable_pose = True
return str_pose, enable_pose
5.完整代码
import cv2
import mediapipe as mp
import numpy as np
import pyttsx3
# 初始化语音引擎
engine = pyttsx3.init()
rate = engine.getProperty('rate')
engine.setProperty('rate', rate - 50)
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[0].id)
# 定义 keypoints
keypoints = [None for i in range(33)]
def get_angle(v1, v2):
angle = np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))
angle = np.arccos(angle) / 3.14 * 180
cross = v2[0] * v1[1] - v2[1] * v1[0]
if cross < 0:
angle = -angle
return angle
def get_pos(keypoints):
keypoints = np.array(keypoints)
# 计算左臂与水平方向的夹角
v1 = keypoints[12] - keypoints[11]
v2 = keypoints[13] - keypoints[11]
angle_left_arm = get_angle(v1, v2)
# 计算右臂与水平方向的夹角
v1 = keypoints[11] - keypoints[12]
v2 = keypoints[14] - keypoints[12]
angle_right_arm = get_angle(v1, v2)
# 计算左肘的夹角
v1 = keypoints[11] - keypoints[13]
v2 = keypoints[15] - keypoints[13]
angle_left_elbow = get_angle(v1, v2)
# 计算右肘的夹角
v1 = keypoints[12] - keypoints[14]
v2 = keypoints[16] - keypoints[14]
angle_right_elbow = get_angle(v1, v2)
str_pose = ""
enable_pose = False
if (angle_left_arm < 0 and angle_right_arm < 0) or (angle_left_arm > 0 and angle_right_arm > 0):
str_pose = "挥手"
elif 90 < angle_left_arm < 120 and 10 < angle_right_elbow < 40:
str_pose = "打电话"
elif angle_left_arm < 0 and angle_right_arm > 0:
str_pose = "挥双手"
elif angle_left_arm > 0 and angle_right_arm < 0:
str_pose = "站立"
if abs(angle_left_elbow) > 70 and 160 > abs(angle_right_elbow) > 90 and angle_left_arm > 90 and angle_right_arm < -100:
str_pose = "双手叉腰"
enable_pose = True
return str_pose, enable_pose
def main():
mp_pose = mp.solutions.pose
pose = mp_pose.Pose()
cap = cv2.VideoCapture(0)
total_counter = 0 # 总计数器
pose_counter = 0 # 特定姿态计数器
pose_count = {} # 用于记录姿态出现次数的字典
current_pose = None # 当前识别到的动作
enable_pose_detection = False # 添加姿态识别开关标志
previous_keypoints_detected = 0 # 上一帧检测到的关键点数量
has_spoken = False # 是否已经播报过当前姿态
poses_recognized = set() # 已经识别的动作集合
first_pose_done = False # 第一个动作是否完成
second_pose_done = False # 第二个动作是否完成
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
results = pose.process(rgb_frame)
if results.pose_landmarks:
ih, iw, _ = frame.shape
keypoints_detected = 0
for landmark in results.pose_landmarks.landmark:
if landmark.visibility > 0.5: # 可以调整这个阈值来决定何时认为检测到了关键点
x, y = int(landmark.x * iw), int(landmark.y * ih)
cv2.circle(frame, (x, y), 5, (0, 255, 0), -1)
keypoints_detected += 1
if keypoints_detected == 33:
if previous_keypoints_detected != 33:
# 如果前一帧不是33个关键点,那么播报提示
engine.say("机器人已准备好,请做出双手叉腰的动作来开启姿态识别功能。")
engine.runAndWait()
for i in range(33):
cx = int(results.pose_landmarks.landmark[i].x * iw)
cy = int(results.pose_landmarks.landmark[i].y * ih)
keypoints[i] = (cx, cy)
for connection in mp_pose.POSE_CONNECTIONS:
idx1, idx2 = connection
landmark1 = results.pose_landmarks.landmark[idx1]
landmark2 = results.pose_landmarks.landmark[idx2]
x1, y1 = int(landmark1.x * iw), int(landmark1.y * ih)
x2, y2 = int(landmark2.x * iw), int(landmark2.y * ih)
cv2.line(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
str_pose, enable_pose = get_pos(keypoints)
print(str_pose)
# 如果 enable_pose 为 True 且当前动作为“双手叉腰”,则切换 enable_pose_detection 状态
if enable_pose and str_pose == "双手叉腰":
pose_counter += 1
total_counter += 1
if pose_counter >= 40 and total_counter >= 50:
enable_pose_detection = not enable_pose_detection # 切换姿态识别开关状态
pose_counter = 0 # 重置计数器
total_counter = 0 # 重置总计数器
status = "开启" if enable_pose_detection else "关闭"
engine.say(f"姿态识别 {status}")
engine.runAndWait()
print(f"Pose detection {status}.")
elif enable_pose_detection:
total_counter += 1
if total_counter >= 100:
max_pose, max_count = max(pose_count.items(), key=lambda item: item[1], default=(None, 0))
if max_count >= 80 and max_pose != current_pose:
if not first_pose_done:
# 第一个动作完成
first_pose_done = True
poses_recognized.add(max_pose)
engine.say("成功识别到第一个动作" + max_pose + "请做第二个动作。")
engine.runAndWait()
print("成功识别到第一个动作" + max_pose + "请做第二个动作。")
elif not second_pose_done:
# 第二个动作完成
second_pose_done = True
poses_recognized.add(max_pose)
engine.say(max_pose)
engine.runAndWait()
print("成功识别到第二个动作" + max_pose)
if len(poses_recognized) == 2:
# 两个动作都完成
engine.say("成功识别到第二个动作" + max_pose + "请双手叉腰来关闭姿态识别。")
engine.runAndWait()
print("成功识别到第二个动作" + max_pose + "请双手叉腰来关闭姿态识别。")
poses_recognized.clear()
first_pose_done = False
second_pose_done = False
else:
# 重复第一个动作,不做处理
pass
current_pose = max_pose # 更新当前识别到的姿态
total_counter = 0 # 重置计数器
pose_count.clear() # 重置姿态字典
# 更新姿态计数
if str_pose in pose_count:
pose_count[str_pose] += 1
else:
pose_count[str_pose] = 1
else:
print("姿态识别不到.")
pose_counter = 0 # 重置计数器
total_counter = 0 # 重置总计数器
current_pose = None # 重置当前识别到的动作
first_pose_done = False # 重置第一个动作完成标志
second_pose_done = False # 重置第二个动作完成标志
poses_recognized.clear() # 清空已识别的动作集合
else:
print("NO PERSON")
# 清空姿态字典以避免上一次的姿态被误报
pose_count.clear()
pose_counter = 0 # 重置计数器
total_counter = 0 # 重置总计数器
current_pose = None # 重置当前识别到的动作
first_pose_done = False # 重置第一个动作完成标志
second_pose_done = False # 重置第二个动作完成标志
poses_recognized.clear() # 清空已识别的动作集合
cv2.imshow('Pose Detection', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
previous_keypoints_detected = keypoints_detected # 更新上一帧的关键点数量
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
总结
通过这个小实验,我更加深刻的了解了姿态识别和语音播报的相关知识,并制作了这个姿态识别语音播报器,其实还是有点酷的,虽然自己还是个菜鸟,但是还是有所收获的。
标签:播报,mediapipe,pyttsx3,pose,keypoints,v1,v2,angle,arm From: https://blog.csdn.net/sun_boy98/article/details/141865232