一.视频滤镜初始化
本次代码实现的是给输入视频文件添加水平翻转滤镜,在视频滤镜初始化部分我们可以分为以下几步进行:
1.创建滤镜图结构
视频滤镜功能最核心的结构为滤镜图结构,即AVFilterGraph结构,我们调用avfilter_graph_alloc()函数就可以创建一个滤镜图结构。
2.创建滤镜实例结构
仅创建一个空的滤镜图显然是无法完成任何工作的,因此必须根据需求向滤镜图中添加相应的滤镜实例。这里,我们添加buffer滤镜和buffersink滤镜作为视频滤镜的输入和输出。滤镜由AVFilter结构实现,调用avfilter_get_by_name()函数即可获得相应的滤镜。在获取了这两个滤镜后,接下来,需要创建对应的滤镜实例,滤镜实例由AVFilterContext结构实现,通过调用avfilter_graph_create_filter()函数就能将滤镜实例添加到创建好的滤镜图中。
3.创建和配置滤镜接口
对于创建好的滤镜,需要将相应的接口连接后方可正常工作,滤镜接口类型定义为AVFilterInOut结构,其本质是一个链表的节点,创建输入输出接口可以调用avfilter_inout_alloc()函数,创建好之后,将滤镜对象和接口绑定即可。
4.根据滤镜描述解析并配置滤镜图
在完成滤镜图,相关滤镜和接口结构的创建后,接下来需要根据字符串类型的滤镜描述信息对整体的滤镜图进行解析和配置,这一步需要先后调用avfilter_graph_parse_ptr()和avfilter_graph_config()函数。
完整的初始化代码如下:
//video_filter_core.cpp #define STREAM_FRAME_RATE 25 AVFilterContext *buffersink_ctx; AVFilterContext *buffersrc_ctx; AVFilterGraph *filter_graph; AVFrame *input_frame= nullptr,*output_frame= nullptr; static int32_t init_frames(int32_t width,int32_t height,enum AVPixelFormat pix_fmt){ int result=0; input_frame=av_frame_alloc(); output_frame=av_frame_alloc(); if(!input_frame||!output_frame){ cerr<<"Error:frame allocation failed."<<endl; return -1; } input_frame->width=width; input_frame->height=height; input_frame->format=pix_fmt; result= av_frame_get_buffer(input_frame,0); if(result<0){ cerr<<"Error:av_frame_get_buffer failed."<<endl; return -1; } result= av_frame_make_writable(input_frame); if(result<0){ cerr<<"Error:av_frame_make_writable failed."<<endl; return -1; } return 0; } int32_t init_video_filter(int32_t width,int32_t height,const char *filter_descr){ int32_t result=0; char args[512]={0}; const AVFilter *buffersrc= avfilter_get_by_name("buffer"); const AVFilter *buffersink= avfilter_get_by_name("buffersink"); AVFilterInOut *outputs=avfilter_inout_alloc(); AVFilterInOut *inputs=avfilter_inout_alloc(); enum AVPixelFormat pix_fmts[]={AV_PIX_FMT_YUV420P,AV_PIX_FMT_NONE}; do{ filter_graph=avfilter_graph_alloc(); if(!outputs||!inputs||!filter_graph){ cerr<<"Error:creating filter graph failed."<<endl; result=AVERROR(ENOMEM); break; } snprintf(args,sizeof(args),"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",width,height,AV_PIX_FMT_YUV420P,1,STREAM_FRAME_RATE,1,1); result= avfilter_graph_create_filter(&buffersrc_ctx,buffersrc,"in",args, nullptr,filter_graph); if(result<0){ cerr<<"Error:could not create source filter."<<endl; break; } result= avfilter_graph_create_filter(&buffersink_ctx,buffersink,"out", nullptr,nullptr,filter_graph); if(result<0){ cerr<<"Error:could not create sink filter."<<endl; break; } result= av_opt_set_int_list(buffersink_ctx,"pix_fmts",pix_fmts,AV_PIX_FMT_NONE,AV_OPT_SEARCH_CHILDREN); if(result<0){ cerr<<"Error:could not set output pixel format."<<endl; break; } outputs->name=av_strdup("in"); outputs->filter_ctx=buffersrc_ctx; outputs->pad_idx=0; outputs->next= nullptr; inputs->name=av_strdup("out"); inputs->filter_ctx=buffersink_ctx; inputs->pad_idx=0; inputs->next= nullptr; //根据滤镜描述解析并配置滤镜图 if((result= avfilter_graph_parse_ptr(filter_graph,filter_descr,&inputs,&outputs, nullptr))<0){ cerr<<"Error:avfilter_graph_parse_ptr failed."<<endl; break; } //在解析滤镜描述后,需要验证滤镜图整体配置的有效性 if((result=avfilter_graph_config(filter_graph, nullptr))<0){ cerr<<"Error:Graph config invalid."<<endl; break; } result= init_frames(width,height,AV_PIX_FMT_YUV420P); if(result<0){ cerr<<"Error:init frames failed."<<endl; break; } }while(0); avfilter_inout_free(&inputs); avfilter_inout_free(&outputs); return result; } static void free_frames(){ av_frame_free(&input_frame); av_frame_free(&output_frame); } void destroy_video_filter(){ free_frames(); avfilter_graph_free(&filter_graph); }
二.循环编辑视频帧
在这一步主要用到av_buffersrc_add_frame_flags()和av_buffersink_get_frame()这两个函数,它们的功能分别是将输入图像添加到滤镜图和从sink滤镜中获取编辑后的图像。代码如下:
//video_filter_core.cpp static int32_t filter_frame(){ int32_t result=0; if((result= av_buffersrc_add_frame_flags(buffersrc_ctx,input_frame,AV_BUFFERSRC_FLAG_KEEP_REF))<0){ cerr<<"Error:add frame to buffer src failed."<<endl; return result; } while(true){ result= av_buffersink_get_frame(buffersink_ctx,output_frame); if(result==AVERROR(EAGAIN)||result==AVERROR_EOF){ return 1; } else if(result<0){ cerr<<"Error:buffersink_get_frame failed."<<endl; return result; } cout<<"Frame filtered,width:"<<output_frame->width<<",height:"<<output_frame->height<<endl; write_frame_to_yuv(output_frame); av_frame_unref(output_frame); } return result; } int32_t filtering_video(int32_t frame_cnt){ int32_t result=0; for(size_t i=0;i<frame_cnt;i++){ result= read_yuv_to_frame(input_frame); if(result<0){ cerr<<"Error:read_yuv_to_frame failed."<<endl; return result; } result=filter_frame(); if(result<0){ cerr<<"Error:filter_frame failed."<<endl; return result; } } return result; }
下面是数据读入和数据写出代码:
//io_data.cpp static FILE* input_file= nullptr; static FILE* output_file= nullptr; int32_t open_input_output_files(const char* input_name,const char* output_name){ if(strlen(input_name)==0||strlen(output_name)==0){ cout<<"Error:empty input or output file name."<<endl; return -1; } close_input_output_files(); input_file=fopen(input_name,"rb");//rb:读取一个二进制文件,该文件必须存在 if(input_file==nullptr){ cerr<<"Error:failed to open input file."<<endl; return -1; } output_file=fopen(output_name,"wb");//wb:打开或新建一个二进制文件,只允许写 if(output_file== nullptr){ cout<<"Error:failed to open output file."<<endl; return -1; } return 0; } void close_input_output_files(){ if(input_file!= nullptr){ fclose(input_file); input_file= nullptr; } if(output_file!= nullptr){ fclose(output_file); output_file= nullptr; } } int32_t read_yuv_to_frame(AVFrame* frame){ int32_t frame_width=frame->width; int32_t frame_height=frame->height; int32_t luma_stride=frame->linesize[0]; int32_t chroma_stride=frame->linesize[1]; int32_t frame_size=frame_width*frame_height*3/2; int32_t read_size=0; if(frame_width==luma_stride){ //如果width等于stride,则说明frame中不存在padding字节,可整体读取 read_size+=fread(frame->data[0],1,frame_width*frame_height,input_file); read_size+=fread(frame->data[1],1,frame_width*frame_height/4,input_file); read_size+=fread(frame->data[2],1,frame_width*frame_height/4,input_file); } else{ //如果width不等于stride,则说明frame中存在padding字节 //对三个分量应该逐行读取 for(size_t i=0;i<frame_height;i++){ read_size+=fread(frame->data[0]+i*luma_stride,1,frame_width,input_file); } for(size_t uv=1;uv<=2;uv++){ for(size_t i=0;i<frame_height/2;i++){ read_size+=fread(frame->data[uv]+i*chroma_stride,1,frame_width/2,input_file); } } } if(read_size!=frame_size){ cerr<<"Error:Read data error,frame_size:"<<frame_size<<",read_size:"<<read_size<<endl; return -1; } return 0; } int32_t write_frame_to_yuv(AVFrame* frame){ uint8_t** pBuf=frame->data; int* pStride=frame->linesize; for(size_t i=0;i<3;i++){ int32_t width=(i==0?frame->width:frame->width/2); int32_t height=(i==0?frame->height:frame->height/2); for(size_t j=0;j<height;j++){ fwrite(pBuf[i],1,width,output_file); pBuf[i]+= pStride[i]; } } return 0; }
main函数实现:
int main(){ const char *input_file_name="../input.yuv"; int32_t pic_width=1920; int32_t pic_height=1080; int32_t total_frame_cnt=250; const char *filter_descr="hflip"; const char *output_file_name="../output.yuv"; int32_t result= open_input_output_files(input_file_name,output_file_name); if(result<0){ return result; } result= init_video_filter(pic_width,pic_height,filter_descr); if(result<0){ return result; } result= filtering_video(total_frame_cnt); if(result<0){ return result; } close_input_output_files(); destroy_video_filter(); return 0; }
最后,可以以下指令测试输出的output.yuv文件:
ffplay -f rawvideo -video_size 1920x1080 -i output.yuv
标签:int32,libavfilter,frame,yuv,滤镜,output,input,height From: https://www.cnblogs.com/luqman/p/filter.html