效果图
使用技术
face-api.js,canvas
1、npm安装face-api.js
npm install face-api.js
2、下载face-api.js的models
下载models
放在\public\models目录
3、创建face.vue组件
<template>
<div style="height: calc(100vh - 140px); display: flex; justify-content: center; align-items: center; flex-direction: column">
<div style="position: relative; z-index: 100">
<video ref="video" autoplay muted playsinline width="720" height="560"></video>
<canvas ref="canvas" width="720" height="560"></canvas>
<el-button type="primary" size="default" @click="takePhoto" style="position: absolute; top: 170px; left: 50%; transform: translateX(-50%)">拍照</el-button>
</div>
</div>
</template>
<script setup>
import { onMounted, ref, onBeforeUnmount } from 'vue';
import * as faceapi from 'face-api.js';
import { facialRecognitionUserAdd, groupAdd, userAddFile, detectUploadFile, SearchFacialRecognition } from '/@/api/facial/facialRecognition';
import { ElMessage } from 'element-plus';
import { useUserInfo } from '/@/stores/userInfo';
import { storeToRefs } from 'pinia';
const stores = useUserInfo();
const { userInfos } = storeToRefs(stores);
const video = ref(null);
const canvas = ref(null);
let photoInterval = null;
let intervalId=null;
const startVideo = async () => {
try {
const stream = await navigator.mediaDevices.getUserMedia({ video: {} });
video.value.srcObject = stream;
} catch (err) {
console.error('Error accessing webcam:', err);
}
};
const loadModels = async () => {
const MODEL_URL = '/models';
await faceapi.nets.tinyFaceDetector.loadFromUri(MODEL_URL);
await faceapi.nets.faceLandmark68Net.loadFromUri(MODEL_URL);
await faceapi.nets.faceRecognitionNet.loadFromUri(MODEL_URL);
await faceapi.nets.faceExpressionNet.loadFromUri(MODEL_URL);
};
const drawDetectionsWithStyle = (canvas, detections) => {
const context = canvas.getContext('2d');
const rectWidth = 250; // 固定宽度
const rectHeight = 300; // 固定高度
const x = (canvas.width - rectWidth) / 2;
const y = (canvas.height - rectHeight) / 2;
// 绘制视频到 canvas
context.drawImage(video.value, 0, 0, canvas.width, canvas.height);
// 绘制黑色遮罩
context.fillStyle = 'rgba(0, 0, 0, 0.7)'; // 半透明黑色
context.fillRect(0, 0, canvas.width, canvas.height);
// 清除矩形框内的遮罩
context.clearRect(x, y, rectWidth, rectHeight);
// 绘制固定位置的矩形框
context.strokeStyle = 'rgba(0, 255, 255, 0.8)';
context.lineWidth = 2;
context.strokeRect(x, y, rectWidth, rectHeight);
// 调整人脸检测结果的位置
const displaySize = { width: canvas.width, height: canvas.height };
const resizedDetections = faceapi.resizeResults(detections, displaySize);
// 绘制人脸轮廓
faceapi.draw.drawFaceLandmarks(canvas, resizedDetections);
faceapi.draw.drawFaceExpressions(canvas, resizedDetections);
};
const detectFaces = async () => {
const detections = await faceapi.detectAllFaces(video.value, new faceapi.TinyFaceDetectorOptions()).withFaceLandmarks().withFaceExpressions();
const context = canvas.value.getContext('2d');
context.clearRect(0, 0, canvas.value.width, canvas.value.height);
// 使用自定义的绘制函数
drawDetectionsWithStyle(canvas.value, detections);
};
const takePhoto = async () => {
const context = canvas.value.getContext('2d');
context.drawImage(video.value, 0, 0, canvas.value.width, canvas.value.height);
const imgData = canvas.value.toDataURL('image/jpeg');
// 以下为调用接口代码,imgData是base64格式数据,按需修改
const base64Data = imgData.split(',')[1];
const params = {
image: base64Data,
imageType: 'BASE64',
groupId: userInfos.value.tenantId,
};
const res = await “你的接口”(params);
//按照你的逻辑修改判断条件
if (res.data.result.result != null) {
if (res.data.result.result.user_list[0].user_id == userInfos.value.faceId) {
ElMessage.success('签到成功');
stopVideo();
emit('closeSignIn');
} else {
ElMessage.error('识别失败,请本人操作!');
}
} else {
ElMessage.error(res.data.result.error_msg);
// console.log('识别失败,继续尝试');
}
};
const emit = defineEmits(['closeSignIn']);
const stopVideo = () => {
// 停止视频流
if (video.value && video.value.srcObject) {
const stream = video.value.srcObject;
const tracks = stream.getTracks();
tracks.forEach((track) => track.stop());
video.value.srcObject = null;
}
if (intervalId) {
clearInterval(intervalId);
intervalId = null;
}
// 自动拍照
// if (photoInterval) {
// clearInterval(photoInterval);
// photoInterval = null;
// }
};
onMounted(async () => {
await loadModels();
await startVideo();
video.value.addEventListener('play', () => {
intervalId=setInterval(detectFaces, 200);
});
// 自动拍照
// setTimeout(() => {
// photoInterval = setInterval(takePhoto, 1000);
// }, 3000);
});
// 关闭定时器
onBeforeUnmount(() => {
stopVideo();
});
</script>
<style scoped>
video,
canvas {
position: absolute;
top: 50%;
left: 50%;
transform: translate(-50%, -50%);
}
</style>
4、在需要使用的地方引入即可
5、此为vue3+ts用法,人脸框各种样式可以根据自己的需求修改。原帖地址 原贴vue2实现方式