方法1:命令保存
# 保存bmp图片 ffmpeg -i input.mp4 -vf scale=768:432 picture/%d.bmp # 播放 ffplay picture/87.bmp
方法2:代码保存
main.c
#include "libavutil/log.h" #include "libavformat/avformat.h" #include "libavutil/avutil.h" #include "libavcodec/avcodec.h" #include "libavutil/parseutils.h" #include "libswscale/swscale.h" #include "libavutil/imgutils.h" #define WORD uint16_t #define DWORD uint32_t #define LONG int32_t #pragma pack(2) typedef struct TagBitMapFileHeader { WORD bfType; DWORD bfSize; WORD bfReserved1; WORD bfReserved2; DWORD bfOffBits; } BitMapFileHeader; typedef struct TagBitMapInfoHeader { DWORD biSize; LONG biWidth; LONG biHeight; WORD biPlanes; WORD biBitCount; DWORD biCompression; DWORD biSizeImage; LONG biXPelsPerMeter; LONG biYPelsPerMeter; DWORD biClrUsed; DWORD biClrImportant; } BitMapInfoHeader; int frameCount = 0; void saveBmp(const char *filename, unsigned char *rgbData, int width, int height) { int bmpDataSize = width * height * 3; BitMapFileHeader bitMapFileHeader = {0}; bitMapFileHeader.bfType = 0x4d42; bitMapFileHeader.bfSize = sizeof(BitMapFileHeader) + sizeof(BitMapInfoHeader) + bmpDataSize; bitMapFileHeader.bfOffBits = sizeof(BitMapFileHeader) + sizeof(BitMapInfoHeader); BitMapInfoHeader bitMapInfoHeader = {0}; bitMapInfoHeader.biSize = sizeof(BitMapInfoHeader); bitMapInfoHeader.biWidth = width; bitMapInfoHeader.biHeight = height * (-1); bitMapInfoHeader.biPlanes = 1; bitMapInfoHeader.biBitCount = 24; bitMapInfoHeader.biCompression = 0; bitMapInfoHeader.biSizeImage = 0; bitMapInfoHeader.biXPelsPerMeter = 0; bitMapInfoHeader.biYPelsPerMeter = 0; bitMapInfoHeader.biClrUsed = 0; bitMapInfoHeader.biClrImportant = 0; FILE *fp = fopen(filename, "wb"); fwrite(&bitMapFileHeader, 1, sizeof(BitMapFileHeader), fp); fwrite(&bitMapInfoHeader, 1, sizeof(BitMapInfoHeader), fp); // fwrite(rgbData, 1, bmpDataSize, fp); // BMP stores color data in BGR format, so we need to convert RGB to BGR for (int y = 0; y < height; y++) { for (int x = 0; x < width; x++) { uint8_t r = rgbData[(y * width + x) * 3 + 0]; uint8_t g = rgbData[(y * width + x) * 3 + 1]; uint8_t b = rgbData[(y * width + x) * 3 + 2]; fwrite(&b, 1, 1, fp); fwrite(&g, 1, 1, fp); fwrite(&r, 1, 1, fp); } } fclose(fp); } int decodeVideo(AVCodecContext *codecCtx, AVPacket *packet, struct SwsContext *c, int dstWeight, int dstHeight, AVFrame *dstFrame, FILE *dst) { int ret = avcodec_send_packet(codecCtx, packet); if (ret < 0) { av_log(NULL, AV_LOG_ERROR, "avcodec_send_packet failed: %s\n", av_err2str(ret)); av_packet_unref(packet); return ret; } AVFrame *frame = av_frame_alloc(); while ((ret = avcodec_receive_frame(codecCtx, frame)) == 0) { sws_scale(c, (const uint8_t *const *) frame->data, frame->linesize, 0, codecCtx->height, dstFrame->data, dstFrame->linesize); char bmpFilename[64] = {0}; snprintf(bmpFilename, sizeof(bmpFilename), "../picture/%d.bmp", frameCount); saveBmp(bmpFilename, dstFrame->data[0], dstWeight, dstHeight); frameCount++; } if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) { av_frame_free(&frame); return 0; } else if (ret < 0) { av_log(NULL, AV_LOG_ERROR, "avcodec_receive_frame failed: %s\n", av_err2str(ret)); av_frame_free(&frame); return ret; } av_frame_free(&frame); return 0; } int main(int argc, char **argv) { av_log_set_level(AV_LOG_DEBUG); if (argc < 4) { av_log(NULL, AV_LOG_ERROR, "Usage: %s inputFile outputFile\n", argv[0]); return -1; } const char *inputFile = argv[1]; const char *outputFile = argv[2]; const char *dstVideoSizeString = argv[3]; int ret; int dstHeight = 0; int dstWeight = 0; ret = av_parse_video_size(&dstWeight, &dstHeight, dstVideoSizeString); if (ret < 0) { av_log(NULL, AV_LOG_ERROR, "av_parse_video_size failed: %s\n", inputFile, av_err2str(ret)); return -1; } AVFormatContext *fCtx = NULL; if ((ret = avformat_open_input(&fCtx, inputFile, NULL, NULL)) < 0) { av_log(NULL, AV_LOG_ERROR, "Open input file %s failed: %s\n", inputFile, av_err2str(ret)); return -1; } if ((ret = avformat_find_stream_info(fCtx, NULL)) < 0) { av_log(NULL, AV_LOG_ERROR, "Find input file stream info failed: %s\n", av_err2str(ret)); avformat_close_input(&fCtx); return -1; } ret = av_find_best_stream(fCtx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0); if (ret < 0) { av_log(NULL, AV_LOG_ERROR, "av_find_best_stream failed: %s\n", av_err2str(ret)); avformat_close_input(&fCtx); return -1; } int videoIndex = ret; AVCodecContext *codecCtx = avcodec_alloc_context3(NULL); if (codecCtx == NULL) { av_log(NULL, AV_LOG_ERROR, "avcodec_alloc_context3 failed\n"); avformat_close_input(&fCtx); return -1; } if ((ret = avcodec_parameters_to_context(codecCtx, fCtx->streams[videoIndex]->codecpar)) < 0) { av_log(NULL, AV_LOG_ERROR, "avcodec_parameters_to_context failed: %s\n", av_err2str(ret)); avformat_close_input(&fCtx); avcodec_free_context(&codecCtx); return -1; } AVCodec *decoder = avcodec_find_decoder(codecCtx->codec_id); if (decoder == NULL) { av_log(NULL, AV_LOG_ERROR, "avcodec_find_decoder failed\n"); avformat_close_input(&fCtx); avcodec_free_context(&codecCtx); return -1; } if ((ret = avcodec_open2(codecCtx, decoder, NULL)) < 0) { av_log(NULL, AV_LOG_ERROR, "avcodec_open2 failed: %s\n", av_err2str(ret)); avformat_close_input(&fCtx); avcodec_free_context(&codecCtx); return -1; } enum AVPixelFormat dstFormat = AV_PIX_FMT_RGB24; struct SwsContext *swsCtx = sws_getContext(codecCtx->width, codecCtx->height, codecCtx->pix_fmt, dstWeight, dstHeight, dstFormat, SWS_FAST_BILINEAR, NULL, NULL, NULL); if (swsCtx == NULL) { av_log(NULL, AV_LOG_ERROR, "sws_getContext failed\n"); avformat_close_input(&fCtx); avcodec_free_context(&codecCtx); return -1; } AVFrame *dstFrame = av_frame_alloc(); ret = av_image_get_buffer_size(dstFormat, dstWeight, dstHeight, 1); if (ret < 0) { av_log(NULL, AV_LOG_ERROR, "av_image_get_buffer_size failed:%s\n", av_err2str(ret)); avformat_close_input(&fCtx); avcodec_free_context(&codecCtx); return -1; } uint8_t *outBuffer = av_malloc(ret); av_image_fill_arrays(dstFrame->data, dstFrame->linesize, outBuffer, dstFormat, dstWeight, dstHeight, 1); FILE *dst = fopen(outputFile, "wb"); if (dst == NULL) { av_log(NULL, AV_LOG_ERROR, "open outputFile failed\n"); avformat_close_input(&fCtx); avcodec_free_context(&codecCtx); return -1; } AVPacket *packet = av_packet_alloc(); if (!packet) { av_log(NULL, AV_LOG_ERROR, "Could not allocate AVPacket\n"); avformat_close_input(&fCtx); avcodec_free_context(&codecCtx); fclose(dst); return -1; } while (av_read_frame(fCtx, packet) == 0) { if (packet->stream_index == videoIndex) { if (decodeVideo(codecCtx, packet, swsCtx, dstWeight, dstHeight, dstFrame, dst) < 0) { av_packet_unref(packet); break; } } av_packet_unref(packet); } decodeVideo(codecCtx, NULL, swsCtx, dstWeight, dstHeight, dstFrame, dst); // Cleanup av_packet_free(&packet); avformat_close_input(&fCtx); avcodec_free_context(&codecCtx); fclose(dst); av_frame_free(&dstFrame); return 0; }
Makefile
TARGET=main SRC=main.c CC=gcc CFLAGS=-I /usr/local/ffmpeg/include LDFLAGS=-L /usr/local/ffmpeg/lib LDFLAGS+= -lavutil -lavformat -lavcodec -lswscale all:$(TARGET) $(TARGET):$(SRC) $(CC) $(SRC) $(CFLAGS) $(LDFLAGS) -o $(TARGET) clean: rm -rf $(TARGET)
标签:视频,return,ffmpeg,avcodec,ret,bmp,codecCtx,av,NULL From: https://www.cnblogs.com/navysummer/p/18219172