首页 > 其他分享 >39、使用NCNN+YOLOFast/YOLOV5,实现视频流/图片的自动化的labelme标注数据

39、使用NCNN+YOLOFast/YOLOV5,实现视频流/图片的自动化的labelme标注数据

时间:2022-09-28 12:05:03浏览次数:90  
标签:YOLOV5 sub int 视频流 cv str rapidjson allocator YOLOFast


基本思想: 首先使用c++调用了腾讯的开源框架NCNN,然后使用腾讯的rapidjson实现自动化的labelme格式化json生成,实现了自动化识别和标注数据,人工稍微修正存在错误的数据集即可;(可以python调用该静态包,但是比较喜欢c++O(∩_∩)O哈哈~)该工程的源代码:

https://github.com/sxj731533730/Autolabel.git

第一步:首先下载NCNN源代码:​​GitHub - Tencent/ncnn: ncnn is a high-performance neural network inference framework optimized for the mobile platform​

ubuntu@ubuntu:~$ git clone https://github.com/Tencent/ncnn.git
ubuntu@ubuntu:~$ cd ncnn/
ubuntu@ubuntu:~/ncnn$ ./build.sh

然后在目录/home/ubuntu/ncnn/build-host-gcc-linux/src/ 会生成一个静态包libncnn.a

第二步:下载android版本的对应包:​​https://github.com/Tencent/ncnn/releases/download/20200916/ncnn-android-lib.zip​

将头文件include拖出来,放入clion工程中;同时将上述编译的静态包也拖入到新建的libs文件夹中,目录结构如下:

ubuntu@ubuntu:~/CLionProjects/untitled3$ tree -L 2
.
├── cmake-build-debug
│ ├── CMakeCache.txt
│ ├── CMakeFiles
│ ├── cmake_install.cmake
│ ├── Makefile
│ └── untitled3.cbp
├── CMakeLists.txt
├── include
│ └── ncnn
├── libs
│ └── libncnn.a
└── main.cpp

5 directories, 7 files

修改CmakeLIst.txt文件和对应的代码如下:

cmake_minimum_required(VERSION 3.17)
project(untitled3)


set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fopenmp")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fopenmp")

include_directories(${CMAKE_SOURCE_DIR}/include)
#导入ncnn
add_library(libncnn STATIC IMPORTED)
set_target_properties(libncnn PROPERTIES IMPORTED_LOCATION ${CMAKE_SOURCE_DIR}/lib/libncnn.a)
find_package(OpenCV REQUIRED)
set(CMAKE_CXX_STANDARD 11)

add_executable(untitled3 main.cpp)
target_link_libraries(untitled3 libncnn ${OpenCV_LIBS})

或者使用up的​​https://github.com/nihui/opencv-mobile​

cmake_minimum_required(VERSION 3.17)
project(untitled3)


set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fopenmp")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fopenmp")

include_directories(${CMAKE_SOURCE_DIR}/include)
include_directories(${CMAKE_SOURCE_DIR}/include/opencv4)
#导入ncnn
add_library(libncnn STATIC IMPORTED)
set_target_properties(libncnn PROPERTIES IMPORTED_LOCATION ${CMAKE_SOURCE_DIR}/lib/libncnn.a)
add_library(libopencv_core STATIC IMPORTED)
set_target_properties(libopencv_core PROPERTIES IMPORTED_LOCATION ${CMAKE_SOURCE_DIR}/lib/libopencv/${TARGET_ARCH}/libopencv_core.a)

add_library(libopencv_features2d STATIC IMPORTED)
set_target_properties(libopencv_features2d PROPERTIES IMPORTED_LOCATION ${CMAKE_SOURCE_DIR}/lib/libopencv/${TARGET_ARCH}/libopencv_features2d.a)

add_library(libopencv_highgui STATIC IMPORTED)
set_target_properties(libopencv_highgui PROPERTIES IMPORTED_LOCATION ${CMAKE_SOURCE_DIR}/lib/libopencv/${TARGET_ARCH}/libopencv_highgui.a)

add_library(libopencv_imgproc STATIC IMPORTED)
set_target_properties(libopencv_imgproc PROPERTIES IMPORTED_LOCATION ${CMAKE_SOURCE_DIR}/lib/libopencv/${TARGET_ARCH}/libopencv_imgproc.a)

add_library(libopencv_photo STATIC IMPORTED)
set_target_properties(libopencv_photo PROPERTIES IMPORTED_LOCATION ${CMAKE_SOURCE_DIR}/lib/libopencv/${TARGET_ARCH}/libopencv_photo.a)

add_library(libopencv_video STATIC IMPORTED)
set_target_properties(libopencv_video PROPERTIES IMPORTED_LOCATION ${CMAKE_SOURCE_DIR}/lib/libopencv/${TARGET_ARCH}/libopencv_video.a)


add_executable(untitled3 main.cpp)
target_link_libraries(untitled3
libncnn
libopencv_highgui
libopencv_imgproc
libopencv_features2d
libopencv_photo
libopencv_video
libopencv_core #务必放在最后
-ldl)

修改代码和指定模型路径​

#include "ncnn/benchmark.h"
#include "ncnn/cpu.h"
#include "ncnn/datareader.h"
#include "ncnn/net.h"
#include "ncnn/gpu.h"

#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
#include <vector>
#include <algorithm>
using namespace cv;
using namespace std;
using namespace ncnn;
int demo(cv::Mat& image, ncnn::Net &detector, int detector_size_width, int detector_size_height)
{

static const char* class_names[] = {"background",
"aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair",
"cow", "diningtable", "dog", "horse",
"motorbike", "person", "pottedplant",
"sheep", "sofa", "train", "tvmonitor"
};

cv::Mat bgr = image.clone();
int img_w = bgr.cols;
int img_h = bgr.rows;

ncnn::Mat in = ncnn::Mat::from_pixels_resize(bgr.data, ncnn::Mat::PIXEL_BGR2RGB,\
bgr.cols, bgr.rows, detector_size_width, detector_size_height);

//数据预处理
const float mean_vals[3] = {0.f, 0.f, 0.f};
const float norm_vals[3] = {1/255.f, 1/255.f, 1/255.f};
in.substract_mean_normalize(mean_vals, norm_vals);

ncnn::Extractor ex = detector.create_extractor();
ex.set_num_threads(8);
ex.input("data", in);
ncnn::Mat out;
ex.extract("output", out);

for (int i = 0; i < out.h; i++)
{
int label;
float x1, y1, x2, y2, score;
float pw,ph,cx,cy;
const float* values = out.row(i);

x1 = values[2] * img_w;
y1 = values[3] * img_h;
x2 = values[4] * img_w;
y2 = values[5] * img_h;

score = values[1];
label = values[0];

//处理坐标越界问题
if(x1<0) x1=0;
if(y1<0) y1=0;
if(x2<0) x2=0;
if(y2<0) y2=0;

if(x1>img_w) x1=img_w;
if(y1>img_h) y1=img_h;
if(x2>img_w) x2=img_w;
if(y2>img_h) y2=img_h;
cv::rectangle (image, cv::Point(x1, y1), cv::Point(x2, y2), cv::Scalar(255, 255, 0), 1, 1, 0);

char text[256];
sprintf(text, "%s %.1f%%", class_names[label], score * 100);
int baseLine = 0;
cv::Size label_size = cv::getTextSize(text, cv::FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine);
cv::putText(image, text, cv::Point(x1, y1 + label_size.height),
cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 0, 0));
}
return 0;
}

//摄像头测试
int test_cam()
{
//定义yolo-fastest VOC检测器
ncnn::Net detector;
detector.load_param("/home/ubuntu/CLionProjects/untitled3/model/yolo-fastest.param");
detector.load_model("/home/ubuntu/CLionProjects/untitled3/model/yolo-fastest.bin");
int detector_size_width = 320;
int detector_size_height = 320;

cv::Mat frame;
cv::VideoCapture cap(0);

while (true)
{
cap >> frame;
double start = ncnn::get_current_time();
demo(frame, detector, detector_size_width, detector_size_height);
double end = ncnn::get_current_time();
double time = end - start;
printf("Time:%7.2f \n",time);
cv::imshow("demo", frame);
cv::waitKey(1);
}
return 0;
}
int main()
{
test_cam();
return 0;
}

完成了c++ 调用小企鹅的NCNN框架,然后在使用小马哥的rapidjson组件的功能,拼凑labelme标注的标注json格式数据;

下载 ​​https://github.com/Tencent/rapidjson  或者使用命令安装,但是在clion工程中仍然使用头文件​

sudo apt-get install rapidjson-dev
ubuntu@ubuntu:~$ git clone https://github.com/Tencent/rapidjson.git
ubuntu@ubuntu:~$ cd rapidjson/
ubuntu@ubuntu:~/rapidjson$ mkdir -p build
ubuntu@ubuntu:~/rapidjson$ cd build/
ubuntu@ubuntu:~/rapidjson/build$ cmake ..
ubuntu@ubuntu:~/rapidjson/build$ sudo make install

​然后,将工程中include文件拷贝到CLion工程,整个工程目录为:​

.
├── cmake-build-debug
│ ├── CMakeCache.txt
│ ├── CMakeFiles
│ ├── cmake_install.cmake
│ ├── Makefile
│ ├── untitled3
│ └── untitled3.cbp
├── CMakeLists.txt
├── include
│ ├── ncnn
│ └── rapidjson
├── libs
│ └── libncnn.a
├── main.cpp
└── model
├── yolo-fastest.bin
└── yolo-fastest.param

7 directories, 10 files

修改代码为检测图片的目录和生成对应的检测json文件(增加生成json模块);

#include "ncnn/benchmark.h"
#include "ncnn/cpu.h"
#include "ncnn/datareader.h"
#include "ncnn/net.h"
#include "ncnn/gpu.h"
#include "rapidjson/document.h"
#include "rapidjson/writer.h"
#include "rapidjson/stringbuffer.h"
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
#include <vector>
#include <fstream>

using namespace cv;
using namespace std;
using namespace ncnn;
using namespace rapidjson;

struct LabelInfo {
int label;
float x_min;
float y_min;
float x_max;
float y_max;
};
LabelInfo labelinfo;

vector<LabelInfo> vec_info;

ncnn::Net detector;

int
detect(cv::Mat &image, ncnn::Net &detector, int detector_size_width, int detector_size_height, char *class_names[]) {


cv::Mat bgr = image.clone();
int img_w = bgr.cols;
int img_h = bgr.rows;

ncnn::Mat in = ncnn::Mat::from_pixels_resize(bgr.data, ncnn::Mat::PIXEL_BGR2RGB, \
bgr.cols, bgr.rows, detector_size_width, detector_size_height);

//数据预处理
const float mean_vals[3] = {0.f, 0.f, 0.f};
const float norm_vals[3] = {1 / 255.f, 1 / 255.f, 1 / 255.f};
in.substract_mean_normalize(mean_vals, norm_vals);

ncnn::Extractor ex = detector.create_extractor();
ex.set_num_threads(8);
ex.input("data", in);
ncnn::Mat out;
ex.extract("output", out);

for (int i = 0; i < out.h; i++) {
int label;
float x1, y1, x2, y2, score;
float pw, ph, cx, cy;
const float *values = out.row(i);

x1 = values[2] * img_w;
y1 = values[3] * img_h;
x2 = values[4] * img_w;
y2 = values[5] * img_h;

score = values[1];
label = values[0];

//处理坐标越界问题
if (x1 < 0) x1 = 0;
if (y1 < 0) y1 = 0;
if (x2 < 0) x2 = 0;
if (y2 < 0) y2 = 0;

if (x1 > img_w) x1 = img_w;
if (y1 > img_h) y1 = img_h;
if (x2 > img_w) x2 = img_w;
if (y2 > img_h) y2 = img_h;
//cv::rectangle(image, cv::Point(x1, y1), cv::Point(x2, y2), cv::Scalar(255, 255, 0), 1, 1, 0);

char text[256];
sprintf(text, "%s %.1f%%", class_names[label], score * 100);
int baseLine = 0;
//cv::Size label_size = cv::getTextSize(text, cv::FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine);
//cv::putText(image, text, cv::Point(x1, y1 + label_size.height),cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 0, 0));

labelinfo.label = label;
labelinfo.x_min = x1;
labelinfo.y_min = y1;
labelinfo.x_max = x2;
labelinfo.y_max = y2;
vec_info.push_back(labelinfo);


}

}


int init_detect(char *detect_param, char *detect_bin) {
//定义yolo-fastest VOC检测器

detector.load_param(detect_param);
detector.load_model(detect_bin);

}

// 帧号 目的json文件 帧 图片目的 生成json文件 图片的实际宽和高 文件的基本名字
void
makejson(string frame_name, cv::Mat &frame, string image_destion, string json_destion, int imageWidth, int imageHeight, char *class_names[]) {
if (vec_info.size()) {

rapidjson::Document doc;
doc.SetObject();
rapidjson::Document::AllocatorType &allocator = doc.GetAllocator();
//成员1
rapidjson::Value str_version(rapidjson::kStringType);
str_version.SetString("4.5.6");
rapidjson::Value str_flags(rapidjson::kStringType);
str_flags.SetObject();
rapidjson::Value str_imageData(rapidjson::kStringType);
str_imageData.SetNull();
rapidjson::Value str_imageWidth(rapidjson::kStringType);
str_imageWidth.SetInt(imageWidth);
rapidjson::Value str_imageHeight(rapidjson::kStringType);
str_imageHeight.SetInt(imageHeight);
rapidjson::Value str_imagePath(rapidjson::kStringType);
string image_frame_name = frame_name + ".jpg";
str_imagePath.SetString(image_frame_name.c_str(), image_frame_name.length(), allocator);
rapidjson::Value ary(rapidjson::kArrayType);
for(int i=0;i<vec_info.size();i++)
{
// 嵌套成员2对象
rapidjson::Document sub_doc;
sub_doc.SetObject();
rapidjson::Document::AllocatorType &sub_allocator = sub_doc.GetAllocator();
rapidjson::Value sub_str_shape_type(rapidjson::kStringType);
sub_str_shape_type.SetString("rectangle");
rapidjson::Value sub_str_flags(rapidjson::kStringType);
sub_str_flags.SetObject();
rapidjson::Value sub_str_group_id(rapidjson::kStringType);
sub_str_group_id.SetNull();
rapidjson::Value sub_str_label(rapidjson::kStringType);
int labelid=vec_info[i].label;
string labelname=class_names[labelid];
sub_str_label.SetString(labelname.c_str(), labelname.length(), allocator);
// 嵌套坐标点
Value sub_array0(kArrayType);
Value sub_array1(kArrayType);
Value sub_point(kArrayType);

float x_min=vec_info[i].x_min;
float y_min=vec_info[i].y_min;
float x_max=vec_info[i].x_max;
float y_max=vec_info[i].y_max;

sub_array0.PushBack(x_min, allocator).PushBack(y_min, allocator);
sub_array1.PushBack(x_max, allocator).PushBack(y_max, allocator);
sub_point.PushBack(sub_array0, allocator);
sub_point.PushBack(sub_array1, allocator);
sub_doc.AddMember("points", sub_point, allocator);

// 嵌套坐标点完成

sub_doc.AddMember("shape_type", sub_str_shape_type, allocator);
sub_doc.AddMember("flags", sub_str_flags, allocator);
sub_doc.AddMember("group_id", sub_str_group_id, allocator);
sub_doc.AddMember("label", sub_str_label, allocator);

ary.PushBack(sub_doc, allocator);
//成员2完成
}
//加入doc中
doc.AddMember("version", str_version, allocator);
doc.AddMember("flags", str_flags, allocator);
doc.AddMember("imageData", str_imageData, allocator);
doc.AddMember("imageWidth", imageWidth, allocator);
doc.AddMember("imageHeight", imageHeight, allocator);
doc.AddMember("imagePath", str_imagePath, allocator);

doc.AddMember("shapes", ary, allocator);
//转化为string
rapidjson::StringBuffer buffer;
rapidjson::Writer<rapidjson::StringBuffer> write(buffer);
doc.Accept(write);
std::string json = buffer.GetString();

// Output {"project":"rapidjson","stars":11}
std::cout << json << std::endl;
ofstream fout;
string destination_name=json_destion+"/"+frame_name+".json";

fout.open(destination_name); //可以使绝对和相对路径,用\\隔开目录,test, test.json, test.txt 都行,不局限于文件格式后缀,只要是文本文档
fout<<buffer.GetString();
fout.close();
string destination_image=image_destion+"/"+frame_name+".jpg";
imwrite(destination_image,frame);

}
vec_info.clear();
vector<LabelInfo>().swap(vec_info);
}

void detect_object(char *video_src, int detector_size_width, int detector_size_height, char *class_names[],
string image_destion, string json_destion) {


char *base_name = basename(video_src);
string label_ext = base_name;
string file_name = label_ext.substr(0, label_ext.rfind("."));

cv::Mat frame;
cv::VideoCapture cap(video_src);
int frame_num = 0;
while (cap.read(frame)) {

double start = ncnn::get_current_time();
detect(frame, detector, detector_size_width, detector_size_height, class_names);
double end = ncnn::get_current_time();
double time = end - start;
string frame_name = file_name + to_string(frame_num);
makejson(frame_name, frame, image_destion, json_destion, frame.cols, frame.rows,class_names);
printf("Time:%7.2f \n", time);
cv::imshow("demo", frame);
cv::waitKey(1);
frame_num++;
}
cap.release();
destroyAllWindows();
}

int main() {

char *video_src = "/home/ubuntu/MOT16Labels/MOT16-06-raw.webm";
char *detect_param = "/home/ubuntu/CLionProjects/untitled3/model/yolo-fastest.param";
char *detect_bin = "/home/ubuntu/CLionProjects/untitled3/model/yolo-fastest.bin";
int detector_size_width = 320;
int detector_size_height = 320;
char *class_names[] = {"background",
"aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair",
"cow", "diningtable", "dog", "horse",
"motorbike", "person", "pottedplant",
"sheep", "sofa", "train", "tvmonitor"
};

string image_destion = "/home/ubuntu/Downloads/A";
string json_destion = "/home/ubuntu/Downloads/A";
init_detect(detect_param, detect_bin);
detect_object(video_src, detector_size_width, detector_size_height, class_names, image_destion, json_destion);

return 0;
}

读取图片


#include "benchmark.h"
#include "cpu.h"
#include "datareader.h"
#include "net.h"
#include "gpu.h"
#include "rapidjson/document.h"
#include "rapidjson/writer.h"
#include "rapidjson/stringbuffer.h"
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
#include <vector>
#include <fstream>

using namespace cv;
using namespace std;
using namespace ncnn;
using namespace rapidjson;

struct LabelInfo {
int label;
float x_min;
float y_min;
float x_max;
float y_max;
};
LabelInfo labelinfo;

vector<LabelInfo> vec_info;

ncnn::Net detector;

int
detect(cv::Mat &image, ncnn::Net &detector, int detector_size_width, int detector_size_height, char *class_names[]) {


cv::Mat bgr = image.clone();
int img_w = bgr.cols;
int img_h = bgr.rows;

ncnn::Mat in = ncnn::Mat::from_pixels_resize(bgr.data, ncnn::Mat::PIXEL_BGR2RGB, \
bgr.cols, bgr.rows, detector_size_width, detector_size_height);

//数据预处理
const float mean_vals[3] = {0.f, 0.f, 0.f};
const float norm_vals[3] = {1 / 255.f, 1 / 255.f, 1 / 255.f};
in.substract_mean_normalize(mean_vals, norm_vals);

ncnn::Extractor ex = detector.create_extractor();
ex.set_num_threads(8);
ex.input("data", in);
ncnn::Mat out;
ex.extract("output", out);

for (int i = 0; i < out.h; i++) {
int label;
float x1, y1, x2, y2, score;
float pw, ph, cx, cy;
const float *values = out.row(i);

x1 = values[2] * img_w;
y1 = values[3] * img_h;
x2 = values[4] * img_w;
y2 = values[5] * img_h;

score = values[1];
label = values[0];

//处理坐标越界问题
if (x1 < 0) x1 = 0;
if (y1 < 0) y1 = 0;
if (x2 < 0) x2 = 0;
if (y2 < 0) y2 = 0;

if (x1 > img_w) x1 = img_w;
if (y1 > img_h) y1 = img_h;
if (x2 > img_w) x2 = img_w;
if (y2 > img_h) y2 = img_h;
//cv::rectangle(image, cv::Point(x1, y1), cv::Point(x2, y2), cv::Scalar(255, 255, 0), 1, 1, 0);

char text[256];
sprintf(text, "%s %.1f%%", class_names[label], score * 100);
int baseLine = 0;
//cv::Size label_size = cv::getTextSize(text, cv::FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine);
//cv::putText(image, text, cv::Point(x1, y1 + label_size.height),cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 0, 0));
if(label==15){
labelinfo.label = label;
labelinfo.x_min = x1;
labelinfo.y_min = y1;
labelinfo.x_max = x2;
labelinfo.y_max = y2;
vec_info.push_back(labelinfo);
}

}

}


int init_detect(char *detect_param, char *detect_bin) {
//定义yolo-fastest VOC检测器

detector.load_param(detect_param);
detector.load_model(detect_bin);

}

// 帧号 目的json文件 帧 图片目的 生成json文件 图片的实际宽和高 文件的基本名字
void
makejson(string frame_name, cv::Mat &frame, string image_destion, string json_destion, int imageWidth, int imageHeight, char *class_names[]) {
if (vec_info.size()) {

rapidjson::Document doc;
doc.SetObject();
rapidjson::Document::AllocatorType &allocator = doc.GetAllocator();
//成员1
rapidjson::Value str_version(rapidjson::kStringType);
str_version.SetString("4.5.6");
rapidjson::Value str_flags(rapidjson::kStringType);
str_flags.SetObject();
rapidjson::Value str_imageData(rapidjson::kStringType);
str_imageData.SetNull();
rapidjson::Value str_imageWidth(rapidjson::kStringType);
str_imageWidth.SetInt(imageWidth);
rapidjson::Value str_imageHeight(rapidjson::kStringType);
str_imageHeight.SetInt(imageHeight);
rapidjson::Value str_imagePath(rapidjson::kStringType);
string image_frame_name = frame_name + ".jpg";
str_imagePath.SetString(image_frame_name.c_str(), image_frame_name.length(), allocator);
rapidjson::Value ary(rapidjson::kArrayType);
for(int i=0;i<vec_info.size();i++)
{
// 嵌套成员2对象
rapidjson::Document sub_doc;
sub_doc.SetObject();
rapidjson::Document::AllocatorType &sub_allocator = sub_doc.GetAllocator();
rapidjson::Value sub_str_shape_type(rapidjson::kStringType);
sub_str_shape_type.SetString("rectangle");
rapidjson::Value sub_str_flags(rapidjson::kStringType);
sub_str_flags.SetObject();
rapidjson::Value sub_str_group_id(rapidjson::kStringType);
sub_str_group_id.SetNull();
rapidjson::Value sub_str_label(rapidjson::kStringType);
int labelid=vec_info[i].label;
string labelname=class_names[labelid];
sub_str_label.SetString(labelname.c_str(), labelname.length(), allocator);
// 嵌套坐标点
Value sub_array0(kArrayType);
Value sub_array1(kArrayType);
Value sub_point(kArrayType);

float x_min=vec_info[i].x_min;
float y_min=vec_info[i].y_min;
float x_max=vec_info[i].x_max;
float y_max=vec_info[i].y_max;

sub_array0.PushBack(x_min, allocator).PushBack(y_min, allocator);
sub_array1.PushBack(x_max, allocator).PushBack(y_max, allocator);
sub_point.PushBack(sub_array0, allocator);
sub_point.PushBack(sub_array1, allocator);
sub_doc.AddMember("points", sub_point, allocator);

// 嵌套坐标点完成

sub_doc.AddMember("shape_type", sub_str_shape_type, allocator);
sub_doc.AddMember("flags", sub_str_flags, allocator);
sub_doc.AddMember("group_id", sub_str_group_id, allocator);
sub_doc.AddMember("label", sub_str_label, allocator);

ary.PushBack(sub_doc, allocator);
//成员2完成
}
//加入doc中
doc.AddMember("version", str_version, allocator);
doc.AddMember("flags", str_flags, allocator);
doc.AddMember("imageData", str_imageData, allocator);
doc.AddMember("imageWidth", imageWidth, allocator);
doc.AddMember("imageHeight", imageHeight, allocator);
doc.AddMember("imagePath", str_imagePath, allocator);

doc.AddMember("shapes", ary, allocator);
//转化为string
rapidjson::StringBuffer buffer;
rapidjson::Writer<rapidjson::StringBuffer> write(buffer);
doc.Accept(write);
std::string json = buffer.GetString();

// Output {"project":"rapidjson","stars":11}
std::cout << json << std::endl;
ofstream fout;
string destination_name=json_destion+"/"+frame_name+".json";

fout.open(destination_name); //可以使绝对和相对路径,用\\隔开目录,test, test.json, test.txt 都行,不局限于文件格式后缀,只要是文本文档
fout<<buffer.GetString();
fout.close();
string destination_image=image_destion+"/"+frame_name+".jpg";
imwrite(destination_image,frame);

}
vec_info.clear();
vector<LabelInfo>().swap(vec_info);
}

void detect_object(char *video_src, int detector_size_width, int detector_size_height, char *class_names[],
string image_destion, string json_destion) {



//sxj731533730
const std::string& img_dir = video_src;//"F:\\sxj\\20200407\\ALL\\finger";
std::vector<cv::String> fn;
char *base_name = basename(video_src);
string label_ext = base_name;
string file_name = label_ext.substr(0, label_ext.rfind("."));
cv::glob(img_dir, fn, true); // recurse
int frame_num = 0;
for (size_t iter_k = 0; iter_k < fn.size(); ++iter_k)
{
printf("%s", fn[iter_k].c_str());



cv::Mat frame=imread(fn[iter_k]);

double start = ncnn::get_current_time();
detect(frame, detector, detector_size_width, detector_size_height, class_names);
double end = ncnn::get_current_time();
double time = end - start;
string frame_name = file_name + to_string(frame_num);
makejson(frame_name, frame, image_destion, json_destion, frame.cols, frame.rows,class_names);
printf("Time:%7.2f \n", time);
resize(frame, frame, cv::Size(frame.cols*0.5, frame.rows*0.5));
cv::imshow("demo", frame);
cv::waitKey(1);
frame_num++;

}

}

int main() {

char *video_src = "/home/ubuntu/20210623";
char *detect_param = "/home/ubuntu/CLionProjects/untitled2/yolo-fastest.param";
char *detect_bin = "/home/ubuntu/CLionProjects/untitled2/yolo-fastest.bin";
int detector_size_width = 320;
int detector_size_height = 320;
char *class_names[] = {"background",
"aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair",
"cow", "diningtable", "dog", "horse",
"motorbike", "person", "pottedplant",
"sheep", "sofa", "train", "tvmonitor"};

string image_destion = "/home/ubuntu/Downloads/A";
string json_destion = "/home/ubuntu/Downloads/A";
init_detect(detect_param, detect_bin);
detect_object(video_src, detector_size_width, detector_size_height, class_names, image_destion, json_destion);

return 0;
}

更新一下 好像labelme 版本更新了 使用yolov5+raidjson标注数据

#include "ncnn/layer.h"
#include "ncnn/net.h"
#include "rapidjson/document.h"
#include "rapidjson/writer.h"
#include "rapidjson/stringbuffer.h"
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>

#if defined(USE_NCNN_SIMPLEOCV)
#include "simpleocv.h"
#else
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#endif
#include <float.h>
#include <stdio.h>
#include <vector>
#include <iostream>
#include <vector>
#include <fstream>
ncnn::Net yolov5;
#define YOLOV5_V60 0 //YOLOv5 v6.0

#if YOLOV5_V60
#define MAX_STRIDE 64
#else
#define MAX_STRIDE 32
class YoloV5Focus : public ncnn::Layer
{
public:
YoloV5Focus()
{
one_blob_only = true;
}

virtual int forward(const ncnn::Mat& bottom_blob, ncnn::Mat& top_blob, const ncnn::Option& opt) const
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;

int outw = w / 2;
int outh = h / 2;
int outc = channels * 4;

top_blob.create(outw, outh, outc, 4u, 1, opt.blob_allocator);
if (top_blob.empty())
return -100;

#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outc; p++)
{
const float* ptr = bottom_blob.channel(p % channels).row((p / channels) % 2) + ((p / channels) / 2);
float* outptr = top_blob.channel(p);

for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
*outptr = *ptr;

outptr += 1;
ptr += 2;
}

ptr += w;
}
}

return 0;
}
};

DEFINE_LAYER_CREATOR(YoloV5Focus)
#endif //YOLOV5_V60

struct Object
{
cv::Rect_<float> rect;
int label;
float prob;
};

static inline float intersection_area(const Object& a, const Object& b)
{
cv::Rect_<float> inter = a.rect & b.rect;
return inter.area();
}

static void qsort_descent_inplace(std::vector<Object>& faceobjects, int left, int right)
{
int i = left;
int j = right;
float p = faceobjects[(left + right) / 2].prob;

while (i <= j)
{
while (faceobjects[i].prob > p)
i++;

while (faceobjects[j].prob < p)
j--;

if (i <= j)
{
// swap
std::swap(faceobjects[i], faceobjects[j]);

i++;
j--;
}
}

#pragma omp parallel sections
{
#pragma omp section
{
if (left < j) qsort_descent_inplace(faceobjects, left, j);
}
#pragma omp section
{
if (i < right) qsort_descent_inplace(faceobjects, i, right);
}
}
}

static void qsort_descent_inplace(std::vector<Object>& faceobjects)
{
if (faceobjects.empty())
return;

qsort_descent_inplace(faceobjects, 0, faceobjects.size() - 1);
}

static void nms_sorted_bboxes(const std::vector<Object>& faceobjects, std::vector<int>& picked, float nms_threshold)
{
picked.clear();

const int n = faceobjects.size();

std::vector<float> areas(n);
for (int i = 0; i < n; i++)
{
areas[i] = faceobjects[i].rect.area();
}

for (int i = 0; i < n; i++)
{
const Object& a = faceobjects[i];

int keep = 1;
for (int j = 0; j < (int)picked.size(); j++)
{
const Object& b = faceobjects[picked[j]];

// intersection over union
float inter_area = intersection_area(a, b);
float union_area = areas[i] + areas[picked[j]] - inter_area;
// float IoU = inter_area / union_area
if (inter_area / union_area > nms_threshold)
keep = 0;
}

if (keep)
picked.push_back(i);
}
}

static inline float sigmoid(float x)
{
return static_cast<float>(1.f / (1.f + exp(-x)));
}

static void generate_proposals(const ncnn::Mat& anchors, int stride, const ncnn::Mat& in_pad, const ncnn::Mat& feat_blob, float prob_threshold, std::vector<Object>& objects)
{
const int num_grid = feat_blob.h;

int num_grid_x;
int num_grid_y;
if (in_pad.w > in_pad.h)
{
num_grid_x = in_pad.w / stride;
num_grid_y = num_grid / num_grid_x;
}
else
{
num_grid_y = in_pad.h / stride;
num_grid_x = num_grid / num_grid_y;
}

const int num_class = feat_blob.w - 5;

const int num_anchors = anchors.w / 2;

for (int q = 0; q < num_anchors; q++)
{
const float anchor_w = anchors[q * 2];
const float anchor_h = anchors[q * 2 + 1];

const ncnn::Mat feat = feat_blob.channel(q);

for (int i = 0; i < num_grid_y; i++)
{
for (int j = 0; j < num_grid_x; j++)
{
const float* featptr = feat.row(i * num_grid_x + j);

// find class index with max class score
int class_index = 0;
float class_score = -FLT_MAX;
for (int k = 0; k < num_class; k++)
{
float score = featptr[5 + k];
if (score > class_score)
{
class_index = k;
class_score = score;
}
}

float box_score = featptr[4];

float confidence = sigmoid(box_score) * sigmoid(class_score);

if (confidence >= prob_threshold)
{
// yolov5/models/yolo.py Detect forward
// y = x[i].sigmoid()
// y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i].to(x[i].device)) * self.stride[i] # xy
// y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh

float dx = sigmoid(featptr[0]);
float dy = sigmoid(featptr[1]);
float dw = sigmoid(featptr[2]);
float dh = sigmoid(featptr[3]);

float pb_cx = (dx * 2.f - 0.5f + j) * stride;
float pb_cy = (dy * 2.f - 0.5f + i) * stride;

float pb_w = pow(dw * 2.f, 2) * anchor_w;
float pb_h = pow(dh * 2.f, 2) * anchor_h;

float x0 = pb_cx - pb_w * 0.5f;
float y0 = pb_cy - pb_h * 0.5f;
float x1 = pb_cx + pb_w * 0.5f;
float y1 = pb_cy + pb_h * 0.5f;

Object obj;
obj.rect.x = x0;
obj.rect.y = y0;
obj.rect.width = x1 - x0;
obj.rect.height = y1 - y0;
obj.label = class_index;
obj.prob = confidence;

objects.push_back(obj);
}
}
}
}
}

static int detect_yolov5(const cv::Mat& bgr, std::vector<Object>& objects)
{

const int target_size = 640;
const float prob_threshold = 0.25f;
const float nms_threshold = 0.45f;

int img_w = bgr.cols;
int img_h = bgr.rows;

// letterbox pad to multiple of MAX_STRIDE
int w = img_w;
int h = img_h;
float scale = 1.f;
if (w > h)
{
scale = (float)target_size / w;
w = target_size;
h = h * scale;
}
else
{
scale = (float)target_size / h;
h = target_size;
w = w * scale;
}

ncnn::Mat in = ncnn::Mat::from_pixels_resize(bgr.data, ncnn::Mat::PIXEL_BGR2RGB, img_w, img_h, w, h);

// pad to target_size rectangle
// yolov5/utils/datasets.py letterbox
int wpad = (w + MAX_STRIDE - 1) / MAX_STRIDE * MAX_STRIDE - w;
int hpad = (h + MAX_STRIDE - 1) / MAX_STRIDE * MAX_STRIDE - h;
ncnn::Mat in_pad;
ncnn::copy_make_border(in, in_pad, hpad / 2, hpad - hpad / 2, wpad / 2, wpad - wpad / 2, ncnn::BORDER_CONSTANT, 114.f);

const float norm_vals[3] = {1 / 255.f, 1 / 255.f, 1 / 255.f};
in_pad.substract_mean_normalize(0, norm_vals);

ncnn::Extractor ex = yolov5.create_extractor();

ex.input("images", in_pad);

std::vector<Object> proposals;

// anchor setting from yolov5/models/yolov5s.yaml

// stride 8
{
ncnn::Mat out;
ex.extract("output", out);

ncnn::Mat anchors(6);
anchors[0] = 10.f;
anchors[1] = 13.f;
anchors[2] = 16.f;
anchors[3] = 30.f;
anchors[4] = 33.f;
anchors[5] = 23.f;

std::vector<Object> objects8;
generate_proposals(anchors, 8, in_pad, out, prob_threshold, objects8);

proposals.insert(proposals.end(), objects8.begin(), objects8.end());
}

// stride 16
{
ncnn::Mat out;
#if YOLOV5_V60
ex.extract("376", out);
#else
ex.extract("771", out);
#endif

ncnn::Mat anchors(6);
anchors[0] = 30.f;
anchors[1] = 61.f;
anchors[2] = 62.f;
anchors[3] = 45.f;
anchors[4] = 59.f;
anchors[5] = 119.f;

std::vector<Object> objects16;
generate_proposals(anchors, 16, in_pad, out, prob_threshold, objects16);

proposals.insert(proposals.end(), objects16.begin(), objects16.end());
}

// stride 32
{
ncnn::Mat out;
#if YOLOV5_V60
ex.extract("401", out);
#else
ex.extract("791", out);
#endif
ncnn::Mat anchors(6);
anchors[0] = 116.f;
anchors[1] = 90.f;
anchors[2] = 156.f;
anchors[3] = 198.f;
anchors[4] = 373.f;
anchors[5] = 326.f;

std::vector<Object> objects32;
generate_proposals(anchors, 32, in_pad, out, prob_threshold, objects32);

proposals.insert(proposals.end(), objects32.begin(), objects32.end());
}

// sort all proposals by score from highest to lowest
qsort_descent_inplace(proposals);

// apply nms with nms_threshold
std::vector<int> picked;
nms_sorted_bboxes(proposals, picked, nms_threshold);

int count = picked.size();

objects.resize(count);
for (int i = 0; i < count; i++)
{
objects[i] = proposals[picked[i]];

// adjust offset to original unpadded
float x0 = (objects[i].rect.x - (wpad / 2)) / scale;
float y0 = (objects[i].rect.y - (hpad / 2)) / scale;
float x1 = (objects[i].rect.x + objects[i].rect.width - (wpad / 2)) / scale;
float y1 = (objects[i].rect.y + objects[i].rect.height - (hpad / 2)) / scale;

// clip
x0 = std::max(std::min(x0, (float)(img_w - 1)), 0.f);
y0 = std::max(std::min(y0, (float)(img_h - 1)), 0.f);
x1 = std::max(std::min(x1, (float)(img_w - 1)), 0.f);
y1 = std::max(std::min(y1, (float)(img_h - 1)), 0.f);

objects[i].rect.x = x0;
objects[i].rect.y = y0;
objects[i].rect.width = x1 - x0;
objects[i].rect.height = y1 - y0;
}

return 0;
}

static void draw_objects(const cv::Mat& bgr, const std::vector<Object>& objects,char* class_names[])
{


cv::Mat image = bgr;

for (size_t i = 0; i < objects.size(); i++)
{
const Object& obj = objects[i];

fprintf(stderr, "%d = %.5f at %.2f %.2f %.2f x %.2f\n", obj.label, obj.prob,
obj.rect.x, obj.rect.y, obj.rect.width, obj.rect.height);

cv::rectangle(image, obj.rect, cv::Scalar(255, 0, 0));

char text[256];
sprintf(text, "%s %.1f%%", class_names[obj.label], obj.prob * 100);

int baseLine = 0;
cv::Size label_size = cv::getTextSize(text, cv::FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine);

int x = obj.rect.x;
int y = obj.rect.y - label_size.height - baseLine;
if (y < 0)
y = 0;
if (x + label_size.width > image.cols)
x = image.cols - label_size.width;

cv::rectangle(image, cv::Rect(cv::Point(x, y), cv::Size(label_size.width, label_size.height + baseLine)),
cv::Scalar(255, 255, 255), -1);

cv::putText(image, text, cv::Point(x, y + label_size.height),
cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 0, 0));
}

}
void makejson(std::string frame_name, cv::Mat &frame, std::string image_destion, std::string json_destion, int imageWidth, int imageHeight, char *class_names[], const std::vector<Object>& objects) {
if (objects.size()) {

rapidjson::Document doc;
doc.SetObject();
rapidjson::Document::AllocatorType &allocator = doc.GetAllocator();
//成员1
rapidjson::Value str_version(rapidjson::kStringType);
str_version.SetString("3.16.7");
rapidjson::Value str_flags(rapidjson::kStringType);
str_flags.SetObject();
rapidjson::Value str_imageData(rapidjson::kStringType);
str_imageData.SetNull();
rapidjson::Value str_imageWidth(rapidjson::kStringType);
str_imageWidth.SetInt(imageWidth);
rapidjson::Value str_imageHeight(rapidjson::kStringType);
str_imageHeight.SetInt(imageHeight);
rapidjson::Value str_imagePath(rapidjson::kStringType);
std::string image_frame_name = frame_name + ".jpg";
str_imagePath.SetString(image_frame_name.c_str(), image_frame_name.length(), allocator);
rapidjson::Value ary(rapidjson::kArrayType);

rapidjson::Value sub_line_color_array0(rapidjson::kArrayType);
rapidjson::Value sub_full_color_array1(rapidjson::kArrayType);

for(int i=0;i<objects.size();i++)
{
// 嵌套成员2对象
rapidjson::Document sub_doc;
sub_doc.SetObject();
rapidjson::Document::AllocatorType &sub_allocator = sub_doc.GetAllocator();
rapidjson::Value sub_str_shape_type(rapidjson::kStringType);
sub_str_shape_type.SetString("rectangle");
rapidjson::Value sub_str_flags(rapidjson::kStringType);
sub_str_flags.SetObject();
rapidjson::Value sub_str_group_id(rapidjson::kStringType);
sub_str_group_id.SetNull();
rapidjson::Value sub_str_line_color_id(rapidjson::kStringType);
sub_str_line_color_id.SetNull();
rapidjson::Value sub_str_full_color_id(rapidjson::kStringType);
sub_str_full_color_id.SetNull();
rapidjson::Value sub_str_label(rapidjson::kStringType);
int labelid=objects[i].label;
std::string labelname=class_names[labelid];
sub_str_label.SetString(labelname.c_str(), labelname.length(), allocator);
// 嵌套坐标点
rapidjson::Value sub_array0(rapidjson::kArrayType);
rapidjson::Value sub_array1(rapidjson::kArrayType);

rapidjson::Value sub_point(rapidjson::kArrayType);


float x_min=objects[i].rect.x;
float y_min=objects[i].rect.y;
float x_max=objects[i].rect.x+objects[i].rect.width;
float y_max=objects[i].rect.y+objects[i].rect.height;

sub_array0.PushBack(x_min, allocator).PushBack(y_min, allocator);
sub_array1.PushBack(x_max, allocator).PushBack(y_max, allocator);
sub_point.PushBack(sub_array0, allocator);
sub_point.PushBack(sub_array1, allocator);
sub_doc.AddMember("points", sub_point, allocator);

// 嵌套坐标点完成

sub_doc.AddMember("shape_type", sub_str_shape_type, allocator);
sub_doc.AddMember("flags", sub_str_flags, allocator);
sub_doc.AddMember("group_id", sub_str_group_id, allocator);
sub_doc.AddMember("label", sub_str_label, allocator);
sub_doc.AddMember("line_color", sub_str_line_color_id, allocator);
sub_doc.AddMember("fill_color", sub_str_full_color_id, allocator);
ary.PushBack(sub_doc, allocator);
//成员2完成
}
//加入doc中
doc.AddMember("version", str_version, allocator);
doc.AddMember("flags", str_flags, allocator);
doc.AddMember("imageData", str_imageData, allocator);
doc.AddMember("imageWidth", imageWidth, allocator);
doc.AddMember("imageHeight", imageHeight, allocator);
doc.AddMember("imagePath", str_imagePath, allocator);
doc.AddMember("shapes", ary, allocator);

sub_line_color_array0.PushBack(0, allocator).PushBack(255, allocator).PushBack(0, allocator).PushBack(128, allocator);
sub_full_color_array1.PushBack(255, allocator).PushBack(0, allocator).PushBack(0, allocator).PushBack(128, allocator);

doc.AddMember("lineColor", sub_line_color_array0, allocator);
doc.AddMember("fillColor", sub_full_color_array1, allocator);
//转化为string
rapidjson::StringBuffer buffer;
rapidjson::Writer<rapidjson::StringBuffer> write(buffer);
doc.Accept(write);
std::string json = buffer.GetString();

// Output {"project":"rapidjson","stars":11}
std::cout << json << std::endl;
std::ofstream fout;
std::string destination_name=json_destion+"/"+frame_name+".json";

fout.open(destination_name); //可以使绝对和相对路径,用\\隔开目录,test, test.json, test.txt 都行,不局限于文件格式后缀,只要是文本文档
fout<<buffer.GetString();
fout.close();
std::string destination_image=image_destion+"/"+frame_name+".jpg";
imwrite(destination_image,frame);
allocator.Clear();
}

}

int init_detect(char *detect_param, char *detect_bin) {
//定义yolo-fastest VOC检测器


yolov5.opt.use_vulkan_compute = true;
// yolov5.opt.use_bf16_storage = true;

// original pretrained model from https://github.com/ultralytics/yolov5
// the ncnn model https://github.com/nihui/ncnn-assets/tree/master/models
#if YOLOV5_V60
yolov5.load_param(detect_param);
yolov5.load_model(detect_bin);
#else
yolov5.register_custom_layer("YoloV5Focus", YoloV5Focus_layer_creator);

yolov5.load_param(detect_param);
yolov5.load_model(detect_bin);
#endif


}
int main(int argc, char** argv)
{

char *detect_param = "/home/ubuntu/CLionProjects/untitled1/model/yolov5sFake.param";
char *detect_bin = "/home/ubuntu/CLionProjects/untitled1/model/yolov5sFake.bin";

char *class_names[] = {"real",
"fake"};

init_detect(detect_param, detect_bin);
std::string image_destion = "/home/ubuntu/Downloads/A";
std::string json_destion = "/home/ubuntu/Downloads/A";
std::vector<cv::String> fn;
std::string img_dir="/home/ubuntu/red";
cv::glob(img_dir, fn, true); // recurse
for (int iter_k = 0; iter_k < fn.size(); ++iter_k) {
cv::Mat frame=cv::imread(fn[iter_k]);

const char *base_name = basename(fn[iter_k].c_str());
// std::string::size_type iPos = fn[iter_k].find_last_of('\\') + 1;
// std::string base_name = fn[iter_k].substr(iPos, fn[iter_k].length() - iPos);
std::string label_ext = base_name;
std::string file_name = label_ext.substr(0, label_ext.rfind("."));



std::vector<Object> objects;
detect_yolov5(frame, objects);

makejson(file_name, frame, image_destion, json_destion, frame.cols, frame.rows, class_names,objects);
draw_objects(frame, objects,class_names);
cv::imshow("image", frame);
cv::waitKey(1);
}

return 0;
}

标签:YOLOV5,sub,int,视频流,cv,str,rapidjson,allocator,YOLOFast
From: https://blog.51cto.com/u_12504263/5719142

相关文章