首页 > 其他分享 >5.21结组任务

5.21结组任务

时间:2024-05-21 16:31:34浏览次数:25  
标签:PoseLandmark pose 77 任务 mp 5.21 import 结组 255

今天改进跌倒检测模块的main部分,使其可以通过摄像头实时监测

import time
from collections import deque
import requests
import cv2
import numpy as np
import mediapipe as mp

from stgcn.stgcn import STGCN
from PIL import Image, ImageDraw, ImageFont
mp_drawing = mp.solutions.drawing_utils
mp_drawing_styles = mp.solutions.drawing_styles
mp_pose = mp.solutions.pose
KEY_JOINTS = [
    mp_pose.PoseLandmark.NOSE,
    mp_pose.PoseLandmark.LEFT_SHOULDER,
    mp_pose.PoseLandmark.RIGHT_SHOULDER,
    mp_pose.PoseLandmark.LEFT_ELBOW,
    mp_pose.PoseLandmark.RIGHT_ELBOW,
    mp_pose.PoseLandmark.LEFT_WRIST,
    mp_pose.PoseLandmark.RIGHT_WRIST,
    mp_pose.PoseLandmark.LEFT_HIP,
    mp_pose.PoseLandmark.RIGHT_HIP,
    mp_pose.PoseLandmark.LEFT_KNEE,
    mp_pose.PoseLandmark.RIGHT_KNEE,
    mp_pose.PoseLandmark.LEFT_ANKLE,
    mp_pose.PoseLandmark.RIGHT_ANKLE
]

POSE_CONNECTIONS = [(6, 4), (4, 2), (2, 13), (13, 1), (5, 3), (3, 1), (12, 10),
                    (10, 8), (8, 2), (11, 9), (9, 7), (7, 1), (13, 0)]

POINT_COLORS = [(0, 255, 255), (0, 191, 255), (0, 255, 102), (0, 77, 255), (0, 255, 0),  # Nose, LEye, REye, LEar, REar
                (77, 255, 255), (77, 255, 204), (77, 204, 255), (191, 255, 77), (77, 191, 255), (191, 255, 77),  # LShoulder, RShoulder, LElbow, RElbow, LWrist, RWrist
                (204, 77, 255), (77, 255, 204), (191, 77, 255), (77, 255, 191), (127, 77, 255), (77, 255, 127), (0, 255, 255)]  # LHip, RHip, LKnee, Rknee, LAnkle, RAnkle, Neck

LINE_COLORS = [(0, 215, 255), (0, 255, 204), (0, 134, 255), (0, 255, 50), (77, 255, 222),
               (77, 196, 255), (77, 135, 255), (191, 255, 77), (77, 255, 77), (77, 222, 255),
               (255, 156, 127), (0, 127, 255), (255, 127, 77), (0, 77, 255), (255, 77, 36)]

ACTION_MODEL_MAX_FRAMES = 30
class FallDetection:
    def __init__(self):
        self.isOk = True
        self.action_model = STGCN(weight_file='./weights/tsstg-model.pth', device='cpu')
        self.joints_list = deque(maxlen=ACTION_MODEL_MAX_FRAMES)

    def draw_skeleton(self, frame, pts):
        l_pair = POSE_CONNECTIONS
        p_color = POINT_COLORS

        line_color = LINE_COLORS

        part_line = {}
        pts = np.concatenate((pts, np.expand_dims((pts[1, :] + pts[2, :]) / 2, 0)), axis=0)
        for n in range(pts.shape[0]):
            if pts[n, 2] <= 0.05:
                continue
            cor_x, cor_y = int(pts[n, 0]), int(pts[n, 1])
            part_line[n] = (cor_x, cor_y)
            cv2.circle(frame, (cor_x, cor_y), 3, p_color[n], -1)
            # cv2.putText(frame, str(n), (cor_x+10, cor_y+10), cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 255), 1)

        for i, (start_p, end_p) in enumerate(l_pair):
            if start_p in part_line and end_p in part_line:
                start_xy = part_line[start_p]
                end_xy = part_line[end_p]
                cv2.line(frame, start_xy, end_xy, line_color[i], int(1*(pts[start_p, 2] + pts[end_p, 2]) + 3))
        return frame

    def cv2_add_chinese_text(self, img, text, position, textColor=(0, 255, 0), textSize=30):
        if (isinstance(img, np.ndarray)):  # 判断是否OpenCV图片类型
            img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
        # 创建一个可以在给定图像上绘图的对象
        draw = ImageDraw.Draw(img)
        # 字体的格式
        fontStyle = ImageFont.truetype(
            "./fonts/MSYH.ttc", textSize, encoding="utf-8")
        # 绘制文本
        draw.text(position, text, textColor, font=fontStyle)
        # 转换回OpenCV格式
        return cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)

    def detect(self):
        # Initialize the webcam capture.
        cap = cv2.VideoCapture(0)  # 使用0表示默认摄像头
        # cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'))
        # cap.set(cv2.CAP_PROP_FRAME_WIDTH, 800)  # 解决问题的关键!!!
        # cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 600)
        # cap.set(cv2.CAP_PROP_FPS, 30)

        image_h = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
        image_w = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
        frame_num = 0
        print(image_h, image_w)

        with mp_pose.Pose(
                min_detection_confidence=0.7,
                min_tracking_confidence=0.5) as pose:
            while cap.isOpened():
                fps_time = time.time()
                frame_num += 1
                success, image = cap.read()
                if not success:
                    print("Ignoring empty camera frame.")
                    # 如果是实时摄像头,不需要使用'continue'
                    continue

                # 提高性能
                image.flags.writeable = False
                image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
                results = pose.process(image)

                if not results.pose_landmarks:
                    continue

                # 识别骨骼点
                image.flags.writeable = True
                image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)

                # mp_drawing.draw_landmarks(
                #     image,
                #     results.pose_landmarks,
                #     mp_pose.POSE_CONNECTIONS,
                #     landmark_drawing_spec=mp_drawing_styles.get_default_pose_landmarks_style())

                landmarks = results.pose_landmarks.landmark
                joints = np.array([[landmarks[joint].x * image_w,
                                    landmarks[joint].y * image_h,
                                    landmarks[joint].visibility]
                                   for joint in KEY_JOINTS])
                # 人体框
                box_l, box_r = int(joints[:, 0].min())-50, int(joints[:, 0].max())+50
                box_t, box_b = int(joints[:, 1].min())-100, int(joints[:, 1].max())+100

                self.joints_list.append(joints)

                # 识别动作
                action = ''
                clr = (0, 255, 0)
                # 30帧数据预测动作类型
                if len(self.joints_list) == ACTION_MODEL_MAX_FRAMES:
                    pts = np.array(self.joints_list, dtype=np.float32)
                    out = self.action_model.predict(pts, (image_w, image_h))
                    action_name = self.action_model.class_names[out[0].argmax()]
                    action = '{}: {:.2f}%'.format(action_name, out[0].max() * 100)
                    print(action)
                    if action_name == 'Fall Down':
                        if self.isOk == True:
                            requests.get('http://localhost:8080/book/isokfalse')
                            self.isOk = False

                        clr = (255, 0, 0)
                        action = '摔倒'
                    elif action_name == 'Walking':
                        clr = (255, 128, 0)
                        action = '行走'
                    else:
                        action = ''

                # 绘制骨骼点和动作类别
                image = self.draw_skeleton(image, self.joints_list[-1])
                image = cv2.rectangle(image, (box_l, box_t), (box_r, box_b), (255, 0, 0), 1)
                image = self.cv2_add_chinese_text(image, f'当前状态:{action}', (box_l + 10, box_t + 10), clr, 40)
                image = cv2.putText(image, f'FPS: {int(1.0 / (time.time() - fps_time))}',
                                    (50, 50), cv2.FONT_HERSHEY_PLAIN, 3, (0, 255, 0), 2)
                # Flip the image horizontally for a selfie-view display.
                cv2.imshow('MediaPipe Pose', image)
                if cv2.waitKey(1) & 0xFF == ord("q"):
                    break

            # Release the resources.
            cap.release()
            cv2.destroyAllWindows()

if __name__ == '__main__':
    FallDetection().detect()

 

标签:PoseLandmark,pose,77,任务,mp,5.21,import,结组,255
From: https://www.cnblogs.com/zeyangshuaige/p/18204339

相关文章

  • 5.21
    工程数学matlab黄金分割法(0.618法)求解优化问题x^2-x+6的极小点和极小值(进退法确定初始区间),精度为10-6;要求输出内容包括:极小点、极小值、每次迭代的a、b、al、ak的值;代码 function[xmin,fmin,a_vals,b_vals,al_vals,ak_vals]=golds(f,a,b,epsilon)%黄金分割法求......
  • 自动化任务编排工具首选:TASKCTL在企业IT运维中的应用与职业发展预测
    今天,作为一名经验丰富、从业多年经常与运维人员打交道的人,我想与大家聊聊运维的日常工作、部门协调以及未来发展,希望能为即将转行或正在从事运维工作的你,提供一些新的视角和启发。  运维的日常工作:挑战与乐趣并存很多人对运维工作的第一印象可能是枯燥无味的,无非是敲敲代......
  • spring boot中的定时任务
    SpringBoot中的定时任务主要通过@Scheduled注解以及SchedulingConfigurer接口实现。@Scheduled注解是Spring提供的一个注解,用于标记方法作为定时任务执行:配置方法在指定的时间间隔或时间点执行,实现各种定时任务需求。//在你需要定时的方法上加上@Scheduled注解,并用corn表达......
  • mit6.828笔记 - lab4 Part C:抢占式多任务和进程间通信(IPC)
    PartC:抢占式多任务和进程间通信(IPClab4到目前为止,我们能够启动多个CPU,让多个CPU同时处理多个进程。实现了中断处理,并且实现了用户级页面故障机制以及写时复制fork。但是,我们的进程调度不是抢占式的,现在每个进程只有在发生中断的时候,才会被调度(调用shed_yeild),这样就有可能会有......
  • fltk-rs 隐藏标题栏但显示任务栏图标
    usefltk::{prelude::*,*};usestd::os::raw::*;constGWL_EXSTYLE:i32=-20;constWS_EX_APPWINDOW:c_ulong=0x00040000;extern"system"{pubfnGetWindowLongA(wnd:*mutc_void,idx:c_int)->c_ulong;pubfnSetWindowLongA(wnd:*......
  • Linux 终端复用器tmux,实现任务后台运行,即会话不间断,踩坑及使用要点记录
    1.同类型的还有比较老screen命令,tmux功能更多2.解决的问题比如正在终端ssh连接到服务器执行一个安装或打包任务,过程很长,期间不能中断,情况1:你想执行其他命令需要再开一个终端情况2:网络不稳定断联,任务会失败,就需要重新执行;情况3:你到点下班了,任务还在执行,晚上公司......
  • Windows任务管理器 替代品,当你需要一个替代Windows任务管理器的工具时,以下一些工具可
    当你需要一个替代Windows任务管理器的工具时,以下一些工具可能会满足你的需求:ProcessExplorer:这是由Sysinternals提供的免费工具,它提供了比Windows任务管理器更详细的进程信息,包括已加载的DLL、网络连接等。SystemExplorer:它提供了类似任务管理器的功能,同时还包含了系......
  • 在 ASP.NET Core 中使用托管服务实现后台任务
    在ASP.NETCore中,后台任务作为托管服务实现。托管服务是一个类,具有实现 IHostedService 接口的后台任务逻辑。本文提供了三个托管服务示例:在计时器上运行的后台任务。激活有作用域的服务的托管服务。有作用域的服务可使用依赖项注入(DI)。按顺序运行的已排队后台任务......
  • 使用django_celery_beat在admin后台配置计划任务
    使用步骤安装包pipinstalldjango-celery-beatapp注册app注册INSTALLED_APPS=[....'django_celery_beat',]配置文件:屏蔽原来的调度器CELERY_BEAT_SCHEDULER='django_celery_beat.schedulers.DatabaseScheduler'设置时区LANGUAGE_CODE='z......
  • Flower 监控celery任务
    Flower监控celery任务如果不想通django的管理界面监控任务的执行,还可以通过Flower插件来进行任务的监控。Flower的界面更加丰富,可以监控的信息更全Flower是一个用于监控和管理Celery集群的开源Web应用程序。它提供有关Celeryworkers和tasks状态的实时信息功能【1】......