import cv2
import mediapipe
import numpy
def get_angle(vector1,vector2):#角度计算
angle = numpy.dot(vector1, vector2) / (numpy.sqrt(numpy.sum(vector1 * vector1)) * numpy.sqrt(numpy.sum(vector2 * vector2)))#cos(angle)=向量的点乘/向量的模
angle=numpy.arccos(angle)/3.14*180#angle=arccos(angle) 弧度=(angle/3.14)*180
return angle
def gesture(judge_finger,finger_list):#手势规则
if len(judge_finger)==1 and judge_finger[0]==8:
vector1=finger_list[6]-finger_list[7]#向量计算
vector2=finger_list[8]-finger_list[7]
angle=get_angle(vector1,vector2)
if angle<170:
gesture_str="9"
else:
gesture_str="1"
elif len(judge_finger)==2 and judge_finger[0]==8 and judge_finger[1]==12:
gesture_str="2"
elif len(judge_finger)==2 and judge_finger[0]==4 and judge_finger[1]==20:
gesture_str="6"
elif len(judge_finger)==2 and judge_finger[0]==4 and judge_finger[1]==8:
gesture_str="8"
elif len(judge_finger)==3 and judge_finger[0]==8 and judge_finger[1]==12 and judge_finger[2]==16:
gesture_str="3"
elif len(judge_finger)==3 and judge_finger[0]==4 and judge_finger[1]==8 and judge_finger[2]==12:
gesture_str="7"
elif len(judge_finger)==4 and judge_finger[0]==8 and judge_finger[1]==12 and judge_finger[2]==16 and judge_finger[3]==20:
gesture_str="4"
elif len(judge_finger) == 5 and judge_finger[0] == 4 and judge_finger[1] == 8 and judge_finger[2] == 12 and judge_finger[3] == 16 and judge_finger[4] == 20:
gesture_str="5"
elif len(judge_finger)==0:
gesture_str="10"
elif len(judge_finger)==1 and judge_finger[0]==4:
gesture_str="yyds"
elif len(judge_finger)==1 and judge_finger[0]==20:
gesture_str="so what?"
elif len(judge_finger)==1 and judge_finger[0]==12:
gesture_str="fuck you"
else:
gesture_str="?"
return gesture_str
if __name__ == '__main__':
open_camera=cv2.VideoCapture(0)#打开笔记本内置摄像头
#定义检测对象
hand=mediapipe.solutions.hands#手部识别器
hand_detector=hand.Hands()#手部关键点检测器 使用默认参数
mediapipe_draw=mediapipe.solutions.drawing_utils#绘图模块
while True:
#读取图像 初始化
success,image=open_camera.read()#参数success 为True 或者False,代表有没有读取到图片 参数image表示截取到一帧的图片
image=cv2.flip(image,1)#镜像反转image
image_height,image_width,image_channels=image.shape#height 表示图像的高度(垂直方向上的像素数) width 表示图像的宽度(水平方向上的像素数) channels 表示图像的通道数
if success:
imageRGB=cv2.cvtColor(image,cv2.COLOR_BGR2RGB)#转换为rgb图像
resule=hand_detector.process(imageRGB)#process 接收RGB格式的numpy数组,返回<class 'mediapipe.python.solution_base.SolutionOutputs'>
#print(resule)
#手部关键点抓取可视化输出
if resule.multi_hand_landmarks: #multi_hand_landmarks 被检测/跟踪的手的集合,其中每只手被表示为21个手部地标的列表,每个地标由x、y和z组成
hand_point=resule.multi_hand_landmarks[0]#只识别一只手
mediapipe_draw.draw_landmarks(image,hand_point,hand.HAND_CONNECTIONS)#将识别到的手部关键点连线绘制到cv2图像中
#print(resule.multi_hand_landmarks)#量化的关键点坐标
#手势识别判断依据计算
finger_list = []#存储关键点坐标
for i in range(21):#关键点坐标计算
point_x=hand_point.landmark[i].x*image_width
point_y = hand_point.landmark[i].y * image_height
finger_list.append([int(point_x), int(point_y)])
finger_list=numpy.array(finger_list, dtype=numpy.int32)#list转换为numpy.ndarray 采集关键点坐标
#print(finger_list)
irrelevant_index=[0,1,2,3,6,10,14,19,18,17,10]# 关键点[0,1,2,3,6,10,14,19,18,17,10]作一个无关组
irrelevant=cv2.convexHull(finger_list[irrelevant_index])#cv2.convexHull 构造无关组凸包
#print("无关组:",irrelevant)
cv2.polylines(image,[irrelevant],True,(255,255,255),2, cv2.LINE_AA)#绘制无关组凸包
judge_list=[4,8,12,16,20]# 以检测关键点[4,8,12,16,20]有无在凸包外作为识别判断依据
judge_finger=[]#存储识别关键点
for index in judge_list:
finger=(int(finger_list[index][0]), int(finger_list[index][1]))#计算识别关键点坐标
#print("判断关键点:",finger)
judge=cv2.pointPolygonTest(irrelevant,finger,True)#检测识别关键点有无在凸包内
if judge<0:#不在凸包内
judge_finger.append(index)
#手势判断
gesture_str=gesture(judge_finger,finger_list)
cv2.putText(image,gesture_str,(100,100),cv2.FONT_HERSHEY_SIMPLEX,3,(255,255,255),2,cv2.LINE_AA)#手势意思可视化
cv2.imshow("gesture detector",image)
key=cv2.waitKey(1)#cv.waitKey()是一个键盘绑定函数
if key==ord('q'):#等待'q'键 退出
break
open_camera.release()
运行结果
标签:mediapipe,angle,python,list,hand,opencv,finger,numpy,关键点 From: https://blog.csdn.net/cha_0409/article/details/140926549