前言
人脸检测标准库FDDB详细介绍了数据库和使用方法。对于训练的模型,如何评估模型的效果呢,本文对此进行介绍。说实话,参考了很多博客,但是感觉都不是很明白(当然本文也会有瑕疵),故在此记录!
测试环境
1.安装Perl;
2.安装Gnuplot;
操作步骤
1.根据训练好的模型测试数据库的人脸检测结果,并将结果输出,输出格式与要求一致即可,即out-fold-**.txt和results.txt;
检测结果格式如下:
...
<image name i>
<number of faces in this image =im>
<face i1>
<face i2>
...
<face im>
...
shapes format:
4 a. Rectangular regions
Each face region is represented as:
<left_x top_y width height detection_score>
4 b. Elliptical regions
Each face region is represented as:
<major_axis_radius minor_axis_radius angle center_x center_y detection_score>.
这里需要得到detection_score这个参数,如何得到这个参数是一个好问题,可以使用opencv自带的函数获取,也可以使用其他方法获取(fddb_faq);
cascade.detectMultiScale(img, objs, reject_levels, level_weights, scale_factor, min_neighbors, 0, cv::Size(), cv::Size(), true);
fddb_faq:
Q. How do you compute the detection score that is to be included in the face detection output file?
The score included in the output file of a face detection system should be obtained by the system itself. The eval(-infinity to infinity). In other words, the scores are used to order the detections, and their absolute values do
本文使用的是opencv自带函数和IOU判断结合来获取的。
//re:javascript:void(0)
/************************************************************************
* File: genResult.cpp
* Coder: AMY
* Email:[email protected]
* Date: 2018/10/15
* ChLog: score max.
* Re: http://haoxiang.org/2013/11/opencv-detectmultiscale-output-detection-score/
************************************************************************/
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <cctype>
#include <iostream>
#include <fstream>
#include <iterator>
#include <stdio.h>
std::vector<cv::Rect> detectAndScore(cv::Mat& img, cv::CascadeClassifier& cascade, double scale, double* score);
std::vector<cv::Rect> detectAndScoreMax(cv::Mat& img, cv::CascadeClassifier& cascade, double scale, double* score);
std::string cascadeName = "..//src//haar_roboman_ff_alt2.xml";
//compute iou.
float compute_iou(cv::Rect boxA, cv::Rect boxB)
{
int xA = std::max(boxA.x, boxB.x);
int yA = std::max(boxA.y, boxB.y);
int xB = std::min(boxA.x+boxA.width, boxB.x+boxB.width);
int yB = std::min(boxA.y+boxA.height, boxB.y+boxB.height);
float inter_area = std::max(0, xB-xA+1) * std::max(0, yB-yA+1);
float boxA_area = boxA.width * boxA.height;
float boxB_area = boxB.width * boxB.height;
float iou = inter_area / (boxA_area + boxB_area - inter_area);
return iou;
}
int main(int argc, const char** argv)
{
cv::Mat frame, frameCopy, image;
std::string inputName;
std::string dir;
cv::CascadeClassifier cascade;
double scale = 1;
cascade.load(cascadeName);
std::ofstream out_all_txt("result_all.txt");
for (unsigned int file_num=1; file_num<11;file_num++)
{
std::string str = std::to_string(file_num);
if (str.size() < 2) str = "0" + str;
std::string out_name = "out-fold-" + str + ".txt";
std::cout << "start file " << out_name << std::endl;
std::ifstream in_txt("..//FDDB-folds//FDDB-fold-" + str + ".txt");
std::ofstream out_txt(out_name);
std::string dir1 = "..//FDDB-originalPics//";
while (!in_txt.eof())
{
getline(in_txt, inputName);
if (in_txt.eof()) break;
dir = dir1 + inputName;
dir += ".jpg";
//dir = "..//FDDB-originalPics//2002//08//25//big//img_674.jpg";
//std::cout << dir << std::endl;
image = cv::imread(dir, CV_LOAD_IMAGE_COLOR);
if (!image.empty())
{
std::ofstream out_txt(out_name, std::ios::app);
double scoreBuffer[50];
std::vector<cv::Rect> faces = detectAndScoreMax(image, cascade, scale, scoreBuffer);
out_txt << inputName << std::endl << faces.size() << std::endl;
out_all_txt << inputName << std::endl << faces.size() << std::endl;
//std::cout << faces.size() << std::endl;
for (unsigned int i = 0; i<faces.size(); i++)
{
cv::rectangle(image, faces[i], cv::Scalar(0, 0, 255), 1, 1, 0);
out_txt << faces[i].x << " " << faces[i].y << " " << faces[i].width
<< " " << faces[i].height << " " << scoreBuffer[i] << std::endl;
out_all_txt << faces[i].x << " " << faces[i].y << " " << faces[i].width
<< " " << faces[i].height << " " << scoreBuffer[i] << std::endl;
}
faces.clear();
}
//cv::imshow("src", image);
//cv::waitKey(100);
}
//if (in_txt.eof()) std::cout << "[EOF reached]" << std::endl;
//else std::cout << "[EOF reading]" << std::endl;
in_txt.close();
out_txt.close();
}
out_all_txt.close();
cv::waitKey(1);
return 0;
}
std::vector<cv::Rect> detectAndScoreMax(cv::Mat& color, cv::CascadeClassifier& cascade, double scale, double* scoreBuffer)
{
cv::Mat gray;
cv::Mat img(cvRound(color.rows / scale), cvRound(color.cols / scale), CV_8UC1);
cv::cvtColor(color, gray, CV_BGR2GRAY);
cv::resize(gray, img, img.size(), 0, 0, CV_INTER_LINEAR);
cv::equalizeHist(img, img);
const float scale_factor(1.2f);
const int min_neighbors(3);
std::vector<cv::Rect> faces;
std::vector<int> reject_levels;
std::vector<double> level_weights;
cascade.detectMultiScale(img, faces, reject_levels, level_weights, scale_factor, min_neighbors, 0, cv::Size(), cv::Size(), true);
//std::cout << "faces.size(): " << faces.size() << "---level_weights.size(): " << level_weights.size() << std::endl;
for (unsigned int n = 0; n < faces.size(); n++)
{
scoreBuffer[n] = level_weights[n];
//std::cout << "level_weight: " << level_weights[n] << std::endl;
}
return faces;
}
std::vector<cv::Rect> detectAndScore(cv::Mat& color, cv::CascadeClassifier& cascade, double scale, double* scoreBuffer)
{
cv::Mat gray;
cv::Mat img(cvRound(color.rows / scale), cvRound(color.cols / scale), CV_8UC1);
cv::cvtColor(color, gray, CV_BGR2GRAY);
cv::resize(gray, img, img.size(), 0, 0, CV_INTER_LINEAR);
cv::equalizeHist(img, img);
const float scale_factor(1.2f);
const int min_neighbors(3);
//long t0 = cv::getTickCount();
std::vector<cv::Rect> faces;
cascade.detectMultiScale(img, faces, scale_factor, min_neighbors, 0, cv::Size(), cv::Size());
//long t1 = cv::getTickCount();
//double secs = (t1 - t0)/cv::getTickFrequency();
//std::cout << "Detections takes " << secs << " seconds " << std::endl;
std::vector<cv::Rect> objs;
std::vector<int> reject_levels;
std::vector<double> level_weights;
cascade.detectMultiScale(img, objs, reject_levels, level_weights, scale_factor, min_neighbors, 0, cv::Size(), cv::Size(), true);
//std::cout << "faces.size(): " << faces.size() << "---objs.size(): " << objs.size() << std::endl;
for (unsigned int n = 0; n < faces.size(); n++)
{
int iou_max_idx = 0;
float iou_max = 0.0;
for (unsigned int k=0; k < objs.size(); k++)
{
float iou = compute_iou(faces[n], objs[k]);
if ( (iou>0.5) && (reject_levels[k]>=15) && (iou>iou_max) )
{
iou_max = iou;
iou_max_idx = k;
//std::cout << "iou: " << iou << "---reject_levels[k]: " << reject_levels[k] << std::endl;
}
}
scoreBuffer[n] = level_weights[iou_max_idx];
}
return faces;
}
View Code
2.准备好图片数据库、数据库的groundtruth文件(ellipseList.txt、imList.txt)及其对应的输出文件(results.txt),根据下载的eval(runEvaluate.pl),运行该程序即可得到检测器的效果;
eval(232, 232, 232); background: rgb(249, 249, 249);">
#ifdef _WIN32标签:std,scale,..,img,人脸,linux,视觉,txt,cv From: https://blog.51cto.com/u_15711436/5732825
string baseDir = "..//..//FDDB-originalPics//";
string listFile = "..//..//imList.txt";
string detFile = "..//..//results.txt";
string annotFile = "..//..//ellipseList.txt";
#else
string baseDir = "..//FDDB-originalPics//";
string listFile = "..//imList.txt";
string detFile = "..//results.txt";
string annotFile = "..//ellipseList.txt";
#endif