1 OpenCV cv::Mat与FFmpeg AVFrame相互转换
最近在处理OpenCV采集摄像头图片然后使用ffmpeg编码为h264裸流,之后再将h264裸流转换为OpenCV cv::Mat进行显示的问题,在这个过程中,如何将OpenCV的cv::Mat转化为FFmpeg AVFrame在进行h264编码,以及如何将h264解码后的AVFrame转换为cv::Mat是两个核心的问题,下文将简单展示OpenCV cv::Mat与FFmpeg AVFrame相互转换的核心代码,主要使用了FFmpeg中主要用于视频像素格式转换和视频缩放的sws_getContext和sws_scale函数。
注意这里使用的ffmpeg的版本为4.1,使用更低版本注意API适配,使用更高版本注意丢弃API的更换。
1.1 OpenCV cv::Mat转换为FFmpeg AVFrame
void CvMatToAVFrame(const cv::Mat& input_mat, AVFrame* out_avframe)
{
int image_width = input_mat.cols;
int image_height = input_mat.rows;
int cvLinesizes[1];
cvLinesizes[0] = input_mat.step1();
SwsContext* openCVBGRToAVFrameSwsContext = sws_getContext(
image_width,
image_height,
AVPixelFormat::AV_PIX_FMT_BGR24,
image_width,
image_height,
AVPixelFormat::AV_PIX_FMT_YUV420P,
SWS_FAST_BILINEAR,
nullptr, nullptr, nullptr
);
sws_scale(openCVBGRToAVFrameSwsContext,
&input_mat.data,
cvLinesizes,
0,
image_height,
out_avframe->data,
out_avframe->linesize);
if (openCVBGRToAVFrameSwsContext != nullptr)
{
sws_freeContext(openCVBGRToAVFrameSwsContext);
openCVBGRToAVFrameSwsContext = nullptr;
}
}
C++
然后是之后用chatGPT生成的转换代码,大家看一下,哈哈哈,码农真要失业了
#include <opencv2/opencv.hpp>
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavutil/frame.h>
#include <libavutil/imgutils.h>
}
void convertMatToAVPicture(const cv::Mat& mat, AVFrame* frame)
{
int width = mat.cols;
int height = mat.rows;
int channels = mat.channels();
int ret;
frame->width = width;
frame->height = height;
frame->format = AV_PIX_FMT_BGR24;
// 为AVFrame分配内存
ret = av_image_alloc(frame->data, frame->linesize, width, height, frame->format, 32);
if (ret < 0)
{
return;
}
// 将opencv的Mat转换成AVFrame
int step = width * channels;
for (int row = 0; row < height; row++)
{
memcpy(frame->data[0] + row * frame->linesize[0], mat.data + row * step, step);
}
}
C++
1.2 FFmpeg AVFrame转换为OpenCV cv::Mat
cv::Mat AVFrameToCvMat(AVFrame* input_avframe)
{
int image_width = input_avframe->width;
int image_height = input_avframe->height;
cv::Mat resMat(image_height, image_width, CV_8UC3);
int cvLinesizes[1];
cvLinesizes[0] = resMat.step1();
SwsContext* avFrameToOpenCVBGRSwsContext = sws_getContext(
image_width,
image_height,
AVPixelFormat::AV_PIX_FMT_YUV420P,
image_width,
image_height,
AVPixelFormat::AV_PIX_FMT_BGR24,
SWS_FAST_BILINEAR,
nullptr, nullptr, nullptr
);
sws_scale(avFrameToOpenCVBGRSwsContext,
input_avframe->data,
input_avframe->linesize,
0,
image_height,
&resMat.data,
cvLinesizes);
if (avFrameToOpenCVBGRSwsContext != nullptr)
{
sws_freeContext(avFrameToOpenCVBGRSwsContext);
avFrameToOpenCVBGRSwsContext = nullptr;
}
return resMat;
}
C++
1.3 使用示例
#include <iostream>
// ffmpeg
extern "C" {
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libavutil/avutil.h"
#include "libswscale/swscale.h"
#include <libavutil/imgutils.h>
}
// opencv
#include "opencv/cv.h"
#include "opencv2/opencv.hpp"
void CvMatToAVFrame(const cv::Mat& input_mat, AVFrame* out_avframe)
{
int image_width = input_mat.cols;
int image_height = input_mat.rows;
int cvLinesizes[1];
cvLinesizes[0] = input_mat.step1();
SwsContext* openCVBGRToAVFrameSwsContext = sws_getContext(
image_width,
image_height,
AVPixelFormat::AV_PIX_FMT_BGR24,
image_width,
image_height,
AVPixelFormat::AV_PIX_FMT_YUV420P,
SWS_FAST_BILINEAR,
nullptr, nullptr, nullptr
);
sws_scale(openCVBGRToAVFrameSwsContext,
&input_mat.data,
cvLinesizes,
0,
image_height,
out_avframe->data,
out_avframe->linesize);
if (openCVBGRToAVFrameSwsContext != nullptr)
{
sws_freeContext(openCVBGRToAVFrameSwsContext);
openCVBGRToAVFrameSwsContext = nullptr;
}
}
cv::Mat AVFrameToCvMat(AVFrame* input_avframe)
{
int image_width = input_avframe->width;
int image_height = input_avframe->height;
cv::Mat resMat(image_height, image_width, CV_8UC3);
int cvLinesizes[1];
cvLinesizes[0] = resMat.step1();
SwsContext* avFrameToOpenCVBGRSwsContext = sws_getContext(
image_width,
image_height,
AVPixelFormat::AV_PIX_FMT_YUV420P,
image_width,
image_height,
AVPixelFormat::AV_PIX_FMT_BGR24,
SWS_FAST_BILINEAR,
nullptr, nullptr, nullptr
);
sws_scale(avFrameToOpenCVBGRSwsContext,
input_avframe->data,
input_avframe->linesize,
0,
image_height,
&resMat.data,
cvLinesizes);
if (avFrameToOpenCVBGRSwsContext != nullptr)
{
sws_freeContext(avFrameToOpenCVBGRSwsContext);
avFrameToOpenCVBGRSwsContext = nullptr;
}
return resMat;
}
int main()
{
cv::Mat input_image = cv::imread("C:/Users/Administrator/Desktop/example.jpg");
AVFrame* avFrame = av_frame_alloc();
avFrame->format = AVPixelFormat::AV_PIX_FMT_YUV420P;
avFrame->width = input_image.cols;
avFrame->height = input_image.rows;
// 为需要创建的YUV Frame分配内存
if (av_frame_get_buffer(avFrame, 0) < 0)
{
av_frame_free(&avFrame);
avFrame = nullptr;
return -1;
}
cv::imshow("解码前", input_image);
// OpenCV cv::Mat转换成AVFrame
CvMatToAVFrame(input_image,avFrame);
// 将AVFrame转换成OpenCV cv::Mat
cv::Mat out_avFrameToMat = AVFrameToCvMat(avFrame);
cv::imshow("解码后", out_avFrameToMat);
cv::waitKey(0);
cv::destroyAllWindows();
// free memory
if (avFrame != nullptr)
{
av_frame_free(&avFrame);
avFrame = nullptr;
}
return 0;
}
C++
从示例程序的结果上看,上述的转换代码是正确的
End 标签:FFmpeg,width,image,nullptr,height,OpenCV,input,cv,Mat From: https://www.cnblogs.com/lidabo/p/17648921.html