1、安装依赖
npm install face-api.js --save
我安装的版本
2、下载模型文件
face-api.js需要一些预先训练好的模型文件来执行人脸检测和识别。需要从GitHub仓库中下载这些文件,并放置在项目的public目录下,或者配置一个正确的路径指向这些文件。可以从face-api.js的GitHub页面下载模型。
1.在这个页面上,你会看到多个模型文件,例如 ssd_mobilenetv1_model-weights_manifest.json、face_landmark_68_model-weights_manifest.json 等。为了使用 face-api.js 的不同功能,如人脸检测、特征点定位、表情识别等,你需要下载相应的模型文件。
2.你可以手动下载,也可以直接克隆,你也可以私聊我一下我给你发。
git clone https://github.com/justadudewhohacks/face-api.js.git
3. 克隆完之后你需要把模型文件放到 public下的models没有models自己创建,你也可以自己规定路径,只要路径正确就行。
到此为止,准备工作已完成。
3、组件代码
1、人脸检测、人脸对比
主要实现对上传的两张图片进行人脸检测以及对比
<template>
<div>
<input type="file" @change="onFileChange1" />
<img v-if="image1" :src="image1" style="width: 100px; height: 100px" />
<input type="file" @change="onFileChange2" />
<img v-if="image2" :src="image2" style="width: 100px; height: 100px" />
<button @click="compareFaces">Compare Faces</button>
<p v-if="comparisonResult">{{ comparisonResult }}</p>
</div>
</template>
<script>
import * as faceapi from "face-api.js"
export default {
name: "employee-list",
data() {
return {
image1: null,
image2: null,
comparisonResult: null
}
},
methods: {
async onFileChange1(e) {
const file = e.target.files[0]
this.image1 = URL.createObjectURL(file)
const image = await faceapi.bufferToImage(e.target.files[0]);
this.img1El = image;
},
async onFileChange2(e) {
const file = e.target.files[0]
this.image2 = URL.createObjectURL(file)
const image = await faceapi.bufferToImage(e.target.files[0]);
this.img2El = image;
},
async compareFaces() {
if (!this.img1El || !this.img2El) {
alert("Please upload both images first.")
return
}
// const options = new faceapi.SsdMobilenetv1Options({ minConfidence: 0.4 });
// // detectSingleFace: 它用于在输入图像中检测到单一的人脸。这个方法返回一个包含人脸信息的描述符。
// const detections1 = await faceapi.detectSingleFace(this.img1El, options).withFaceLandmarks().withFaceDescriptor()
// const detections2 = await faceapi.detectSingleFace(this.img2El, options).withFaceLandmarks().withFaceDescriptor()
// if (detections1 && detections2) {
// const face1 = detections1;
// const face2 = detections2;
// const faceDescriptors = [face1.descriptor];
// const faceMatcher = new faceapi.FaceMatcher(faceDescriptors, 0.4);
// const result = faceMatcher.findBestMatch(face2.descriptor);
// const person = result.label;
// if (person == "person 1") {
// console.log('这两张图片可能是同一个人。');
// } else {
// console.log('这两张图片可能是不同的人。');
// }
// } else {
// console.log("没有检测到人脸")
// }
this.start()
},
async start() { // 使用检测图像中所有的人脸
const base64Image1 = ''
const base64Image2 = ''
// 创建一个可以使用上下文的DOM元素
const canvas = faceapi.createCanvasFromMedia(document.querySelector("img"))
document.body.append(canvas)
const displaySize = { width: canvas.width, height: canvas.height }
faceapi.matchDimensions(canvas, displaySize)
// 加载图像
// const img1 = await faceapi.bufferToImage(base64Image1)
// const img2 = await faceapi.bufferToImage(base64Image2)
const img1 = this.img1El
const img2 = this.img2El
// 人脸检测 detectAllFaces: 它用于在图像中检测到所有的人脸。这个方法返回一个人脸的数组,每个人脸由一个包含位置、尺寸和可能的表情的对象表示。
const detections1 = await faceapi.detectAllFaces(img1).withFaceLandmarks().withFaceDescriptors();
const detections2 = await faceapi.detectAllFaces(img2).withFaceLandmarks().withFaceDescriptors()
// 检查检测到的人脸数量
if (detections1.length && detections2.length) {
// const resizedDims = { width: Math.round(displaySize.width), height: Math.round(displaySize.height) }
// 设定一个阈值,如果两张脸的相似度低于这个值,则认为它们一致
const threshold = 0.4
const descriptor1 = detections1[0].descriptor
const descriptor2 = detections2[0].descriptor
// 判断方式一
const faceDescriptors = [descriptor1];
const faceMatcher = new faceapi.FaceMatcher(faceDescriptors, threshold);
const result = faceMatcher.findBestMatch(descriptor2);
// result中存在 相似度distance
const distanceVal = result.distance
// 使用label 判断 (distance 值大于 定义的范围值 threshold ,label值就不是 person 1)
const labelVal = result.label
if (labelVal == "person 1") {
console.log('这两张图片可能是同一个人。');
} else {
console.log('这两张图片可能是不同的人。');
}
// 判断方式二:计算两张脸的相似度、计算两点之间的欧氏距离(值越小,越相似)
const distance = faceapi.euclideanDistance(descriptor1, descriptor2)
// 输出结果
if (distance < threshold) {
console.log("两张脸一致")
} else {
console.log("两张脸不一致")
}
} else {
console.log("没有检测到人脸")
}
}
},
mounted() {
// 引入face-api.js库
Promise.all([
faceapi.nets.tinyFaceDetector.loadFromUri("/models"),
faceapi.nets.faceLandmark68Net.loadFromUri("/models"),
faceapi.nets.faceRecognitionNet.loadFromUri("/models"),
faceapi.loadSsdMobilenetv1Model("/models"),
faceapi.nets.ssdMobilenetv1.loadFromUri('/models'),
])
// .then(this.start)
}
}
</script>
<style>
.img1,
.img2 {
position: absolute;
top: 0;
left: 50%;
transform: translateX(-50%);
pointer-events: none;
}
.img1 {
z-index: 10;
}
.img2 {
z-index: 20;
}
</style>
2、人流量计数
主要实现人脸检测、并进行人流量计数
<template>
<div class="face-recognition">
<video ref="video" width="640" height="480" autoplay></video>
<canvas ref="canvas" width="640" height="480"></canvas>
<div>当前人流量:{{ peopleCount }}</div>
</div>
</template>
<script>
import * as faceapi from "face-api.js"
export default {
name: "FaceRecognition",
data() {
return {
isLoaded: false,
lastDetections: [], // 上一帧检测到的人脸
peopleCount: 0 // 当前人流量计数
}
},
mounted() {
this.loadModels()
},
methods: {
async loadModels() {
try {
await Promise.all([faceapi.nets.faceRecognitionNet.loadFromUri("/models"), faceapi.nets.faceLandmark68Net.loadFromUri("/models"), faceapi.nets.ssdMobilenetv1.loadFromUri("/models")])
this.isLoaded = true
this.startVideo()
} catch (error) {
console.error("Failed to load models:", error)
}
},
startVideo() {
if (navigator.mediaDevices && this.isLoaded) {
navigator.mediaDevices
.getUserMedia({ video: true })
.then(stream => {
this.$refs.video.srcObject = stream
this.$refs.video.onloadedmetadata = () => {
this.detectFaces()
}
})
.catch(error => console.error("getUserMedia error:", error))
}
},
async detectFaces() {
const video = this.$refs.video
const canvas = this.$refs.canvas
const ctx = canvas.getContext("2d")
const detectionOptions = new faceapi.SsdMobilenetv1Options({ minConfidence: 0.9 })
let recentDetections = []
const detectionHistoryLength = 5
setInterval(async () => {
if (video.readyState === video.HAVE_ENOUGH_DATA) {
ctx.drawImage(video, 0, 0, canvas.width, canvas.height)
const detections = await faceapi.detectAllFaces(video, detectionOptions).withFaceLandmarks()
// console.log('查看获取帧',validDescriptors)
// 确保只收集有效的面部描述符
const validDescriptors = detections
console.log("查看获取帧", validDescriptors)
recentDetections.push(...validDescriptors.map(face => face.descriptor))
// 限制历史长度并去重
recentDetections = recentDetections.slice(-detectionHistoryLength)
// console.log('查看获取帧',recentDetections)
const uniqueDescriptors = Array.from(new Set(recentDetections))
this.peopleCount = uniqueDescriptors.length // 直接使用去重后的数组长度,因已排除undefined,无需JSON.stringify和parse
faceapi.draw.drawDetections(canvas, detections)
faceapi.draw.drawFaceLandmarks(canvas, detections)
}
}, 100)
}
},
beforeDestroy() {
// 清理视频流
if (this.$refs.video.srcObject) {
this.$refs.video.srcObject.getTracks().forEach(track => track.stop())
}
}
}
</script>
<style scoped>
.face-recognition {
position: relative;
}
</style>
标签:canvas,const,js,api,人脸,console,face,faceapi
From: https://blog.csdn.net/jiangzhihao0515/article/details/140500280