首页 > 其他分享 >一个简单的Rtmp推流客户端(QT录音,OpenCV摄像,FFmpeg编码推流)

一个简单的Rtmp推流客户端(QT录音,OpenCV摄像,FFmpeg编码推流)

时间:2024-08-18 09:23:08浏览次数:17  
标签:include return QT int frame nullptr ret 推流 FFmpeg

        RTMP(Real-Time Messaging Protocol)是一种实时流媒体传输协议,常用于音视频直播。

        RTMP推流客户端是一种能够将音视频数据推送到直播服务器的工具。QT录音是利用Qt库实现的录音功能。OpenCV摄像是利用OpenCV库实现的对摄像头的控制和图像处理功能。FFmpeg编码推流是利用FFmpeg库实现的将音视频数据进行编码并推流到RTMP服务器的功能。

        在本文中,我们将介绍如何使用RTMP推流客户端结合QT录音、OpenCV摄像和FFmpeg编码推流来实现将音视频数据推送到RTMP服务器的功能。

一、 环境介绍    

1、QT版本: QT5.12.12

2、编译器:  MSVC2017 64

3、ffmpeg版本: 6.1.1

4、openCV 4.x

5、完整工程下载地址(下载即可编译运行): https://download.csdn.net/download/u012959478/89646684

二、实现思路  

这个推流客户端的主要运行有三个线程,并且需要两个队列。

线程1(音频数据采集):使用QT录音功能,将音频数据采集下来,并存入音频队列中。

线程2(视频数据采集):使用OpenCV库实现视频数据的采集,将摄像头捕获的视频数据存入视频队列中。

线程3(推流):从音频和视频队列中读取数据,并使用FFmpeg库对音视频数据进行编码,最终推流到指定的服务器。

三、示例代码  
 AVFrameQueue.h
#pragma once

extern "C" {
#include "libavcodec/avcodec.h"
}

#include <QQueue>
#include <QMutex>

class AVFrameQueue
{
public:
    // 添加元素到队列尾部
    void enqueue(AVFrame* value) {
        QMutexLocker locker(&m_mutex);
        AVFrame* tmp_frame = av_frame_alloc();
        av_frame_move_ref(tmp_frame, value);
        m_queue.enqueue(tmp_frame);
    }

    // 从队列头部移除一个元素,并返回它
    AVFrame* dequeue() {
        QMutexLocker locker(&m_mutex);
        if (m_queue.isEmpty()) {
            return nullptr;
        }
        return m_queue.dequeue();
    }

    // 返回队列是否为空
    bool isEmpty() const {
        QMutexLocker locker(&m_mutex);
        return m_queue.isEmpty();
    }

    int size() const {
        QMutexLocker locker(&m_mutex);
        return m_queue.size();
    }

    void clear() {
        QMutexLocker locker(&m_mutex);
        m_queue.clear();
    }

private:
    QQueue<AVFrame*> m_queue;
    mutable QMutex m_mutex;
};
audiorecordthread.h音频采集线程
#ifndef AUDIORECORDTHREAD_H
#define AUDIORECORDTHREAD_H

#include <QThread>
#include <QAudioInput>
#include "AVFrameQueue.h"

extern "C"
{
#include <libswresample/swresample.h>
#include <libavformat/avformat.h>
}

class AudioRecordThread : public QThread
{
    Q_OBJECT
public:
    explicit AudioRecordThread(AVFrameQueue * frame_queue);
    ~AudioRecordThread();

    bool Init();

private:
    void run();
    bool InitResample();
    void increaseVolume(AVFrame *frame, double volume);//提高音量

private:
    SwrContext *_swr_ctx = nullptr;
    AVFrame* _pcmAvFrame = nullptr;
    QAudioInput *_input = nullptr;
    QIODevice *_io = nullptr;

    AVFrameQueue *_frame_queue = nullptr;

    int channels = 2; // 声道数
    int sampleRate = 44100; // 采样率
    int sampleByte = 2; // 采样字节数(2字节,16位)
    int nbSamples = 1024; // 一帧音频每个通道的采样数量
};

#endif // AUDIORECORDTHREAD_H
audiorecordthread.cpp 
#include "audiorecordthread.h"
#include <QDebug>

AudioRecordThread::AudioRecordThread(AVFrameQueue * frame_queue):_frame_queue(frame_queue)
{
    connect(this, &AudioRecordThread::finished,this, &AudioRecordThread::deleteLater);
}


AudioRecordThread::~AudioRecordThread()
{
    requestInterruption();

    if (_input)
        _input->stop();
    _input = nullptr;

    if (_io)
        _io->close();   
    _io = nullptr;

    swr_free(&_swr_ctx);
    av_frame_free(&_pcmAvFrame);

    quit();
    wait();
    qDebug() << "AudioRecordThread析构";
}

bool AudioRecordThread::Init()
{
    if(QAudioDeviceInfo::availableDevices(QAudio::AudioInput).size()<1)
    {
        qDebug()<<"没有录音设备";
        return false;
    }

    QAudioFormat fmt;
    fmt.setSampleRate(sampleRate);
    fmt.setChannelCount(channels);
    fmt.setSampleSize(sampleByte * 8);
    fmt.setCodec("audio/pcm");
    fmt.setByteOrder(QAudioFormat::LittleEndian);
    fmt.setSampleType(QAudioFormat::UnSignedInt);
    QAudioDeviceInfo info = QAudioDeviceInfo::defaultInputDevice();
    if (!info.isFormatSupported(fmt)) {
        fmt = info.nearestFormat(fmt);
    }
    _input = new QAudioInput(fmt);
    //开始录制音频
    _io = _input->start();
    if (!_io)
        return false;

    if(!InitResample())
        return false;

    return true;
}

bool AudioRecordThread::InitResample()
{
    // 音频重采样 上下文初始化
    _swr_ctx = swr_alloc_set_opts(nullptr,
                             av_get_default_channel_layout(channels), AV_SAMPLE_FMT_S16, sampleRate,//输出格式
                             av_get_default_channel_layout(channels), AV_SAMPLE_FMT_S16, sampleRate, 0, nullptr);//输入格式
    if (!_swr_ctx)
    {
        return false;
    }
    int ret = swr_init(_swr_ctx);
    if (ret < 0)
    {
        return false;
    }

    return true;
}

void AudioRecordThread::run()
{
    int readSize = nbSamples * channels * sampleByte;
    char* buf = new char[readSize];
    while(!isInterruptionRequested())
    {
        if (_frame_queue->size() > 10) {
            msleep(10);
            continue;
        }

        //一次读取一帧音频
        if (_input->bytesReady() < readSize)
        {
            QThread::msleep(1);
            continue;
        }
        int size = 0;
        while (size != readSize)
        {
            int len = _io->read(buf + size, readSize - size);
            if (len < 0)break;
            size += len;
        }

        if (size != readSize)continue;

        //已经读一帧源数据
        const uint8_t *indata[AV_NUM_DATA_POINTERS] = { 0 };
        indata[0] = (uint8_t *)buf;

        //音频重采样输出空间分配
        _pcmAvFrame = av_frame_alloc();
        _pcmAvFrame->format = AV_SAMPLE_FMT_S16;
        _pcmAvFrame->channels = channels;
        _pcmAvFrame->channel_layout = av_get_default_channel_layout(channels);
        _pcmAvFrame->nb_samples = nbSamples; //一帧音频一通道的采用数量
        av_frame_get_buffer(_pcmAvFrame, 0); // 给pcm分配存储空间
        swr_convert(_swr_ctx, _pcmAvFrame->data, _pcmAvFrame->nb_samples, indata, nbSamples);
        increaseVolume(_pcmAvFrame,8);//简单的提高音量,没有回声消除,噪音抑制
        _frame_queue->enqueue(_pcmAvFrame);

        msleep(1);
    }
    delete []buf;
}

void AudioRecordThread::increaseVolume(AVFrame *frame, double volume)
{
    int16_t *samples = (int16_t *)frame->data[0];
    int nb_samples = frame->nb_samples;
    int channels = av_get_channel_layout_nb_channels(frame->channel_layout);
    // 提高音量
    for (int i = 0; i < nb_samples; i++)
    {
        for (int ch = 0; ch < channels; ch++)
        {
            // 使用线性插值来提高音量
            int pcmval = samples[ch] * volume;
            if (pcmval < 32767 && pcmval > -32768)
            {
                samples[ch] = pcmval;
            }
            else if (pcmval > 32767)
            {
                samples[ch] = 32767;
            }
            else if (pcmval < -32768)
            {
                samples[ch] = -32768;
            }
        }
        samples += channels;
    }
}
videocapturethread.h视频采集线程
#ifndef VIDEOCAPTURETHREAD_H
#define VIDEOCAPTURETHREAD_H

#include <QThread>
#include "AVFrameQueue.h"
#include "opencv2/opencv.hpp"

extern "C"
{
#include <libswscale/swscale.h>
#include <libswresample/swresample.h>
}

class VideoCaptureThread : public QThread
{
    Q_OBJECT
public:
    explicit VideoCaptureThread(AVFrameQueue * frame_queue);
    ~VideoCaptureThread();

    bool Init(int camIndex = 0); // 打开本地摄像头
    bool Init(const char* url); // 打开流
    bool InitScale();

private:
    void run();
    AVFrame* RGBToYUV(cv::Mat &frame);

private:
    AVFrameQueue *_frame_queue = nullptr;
    cv::VideoCapture capture;

    SwsContext* _swsContext = nullptr; // 像素格式转换上下文
    AVFrame* _yuvAvFrame = nullptr; // 存放转换后的YUV数据

    int inWidth;
    int inHeight;
//    int fps;

    int outWidth = 640;
    int outHeight = 360;
};

#endif // VIDEOCAPTURETHREAD_H
videocapturethread.cpp
#include "videocapturethread.h"
#include <QDebug>

VideoCaptureThread::VideoCaptureThread(AVFrameQueue * frame_queue) : _frame_queue(frame_queue)
{
    connect(this, &VideoCaptureThread::finished,this, &VideoCaptureThread::deleteLater);
}

VideoCaptureThread::~VideoCaptureThread()
{
    requestInterruption();

    if (capture.isOpened())
    {
        capture.release();
    }

    sws_freeContext(_swsContext);
    av_frame_free(&_yuvAvFrame);

    quit();
    wait();
    qDebug() << "VideoCaptureThread析构";
}

bool VideoCaptureThread::Init(int camIndex)
{
    // 打开本地摄像头
    capture.open(camIndex);
    if (!capture.isOpened())
    {
        return false;
    }

    // 得到本地相机参数
    inWidth = capture.get(cv::CAP_PROP_FRAME_WIDTH);
    inHeight = capture.get(cv::CAP_PROP_FRAME_HEIGHT);
//    fps = capture.get(cv::CAP_PROP_FPS);

    return true;
}

bool VideoCaptureThread::Init(const char *url)
{
    capture.open(url);
    if (!capture.isOpened())
    {
        return false;
    }

    // 得到流媒体的参数
    inWidth = capture.get(cv::CAP_PROP_FRAME_WIDTH);
    inHeight = capture.get(cv::CAP_PROP_FRAME_HEIGHT);
//    fps = capture.get(cv::CAP_PROP_FPS);

    return true;
}

bool VideoCaptureThread::InitScale()
{
    _swsContext = sws_getCachedContext(_swsContext,
                                      inWidth, inHeight, AV_PIX_FMT_BGR24,
                                      outWidth, outHeight, AV_PIX_FMT_YUV420P,
                                      SWS_BICUBIC,
                                      0, 0, 0);

    if (!_swsContext)
    {
        return false;
    }

    return true;
}

void VideoCaptureThread::run()
{
    cv::Mat frame;
    while(!isInterruptionRequested())
    {
        if (_frame_queue->size() > 10) {
            msleep(10);
            continue;
        }

        // 读取一帧
        if (!capture.read(frame)) {
            msleep(1); // 如果没有读取到,等待1ms
            continue;
        }

        AVFrame *yuv = RGBToYUV(frame);
        _frame_queue->enqueue(yuv);

        msleep(1);
    }
}

AVFrame* VideoCaptureThread::RGBToYUV(cv::Mat &frame)
{
    //输入的数据结构
    uint8_t *indata[AV_NUM_DATA_POINTERS] = {0};
    indata[0] = frame.data;
    int insize[AV_NUM_DATA_POINTERS] = {0};
    // 一行(宽)数据的字节数
    insize[0] = frame.cols * frame.elemSize();

    _yuvAvFrame = av_frame_alloc();
    _yuvAvFrame->format = AV_PIX_FMT_YUV420P;
    _yuvAvFrame->width = outWidth;
    _yuvAvFrame->height = outHeight;
    _yuvAvFrame->pts = 0;
    // 实际分配yuv空间
    int ret = av_frame_get_buffer(_yuvAvFrame, 0);
    if (ret != 0)
    {
        return nullptr;
    }

    // 开始格式转换,把转换后的数据存放到yuvAvFrame->data中
    int h = sws_scale(_swsContext, indata, insize, 0, frame.rows,
                      _yuvAvFrame->data, _yuvAvFrame->linesize);
    if (h <= 0)
    {
        return nullptr;
    }

    return _yuvAvFrame;
}
 mediaencode.h
#ifndef MEDIAENCODE_H
#define MEDIAENCODE_H

#include <QObject>

extern "C"
{
#include <libavcodec/avcodec.h>
}

class MediaEncode : public QObject
{
    Q_OBJECT
public:
    explicit MediaEncode(QObject *parent = nullptr);
    ~MediaEncode();

    bool InitVideoCodec();// 视频编码器初始化
    AVPacket* EncodeVideo(AVFrame* frame);// 开始编码视频

    bool InitAudioCodec();// 音频编码器初始化
    AVPacket* EncodeAudio(AVFrame* frame);// 开始音频编码

public:
    // 视频编码器上下文, YUV->H264
    AVCodecContext* _videoCodecContext = nullptr;
    // 音频编码上下文, PCM-AAC
    AVCodecContext* _audioCodecContext = nullptr;

private:
    int outWidth = 640; //和采集的尺寸保持一致
    int outHeight = 360;
    int fps = 30;

    int videoPts = 0;
    int audioPts = 0;
    AVPacket outAudioPacket = {0};
    AVPacket outVideoPacket = {0};
};

#endif // MEDIAENCODE_H
mediaencode.cpp
#include "mediaencode.h"

MediaEncode::MediaEncode(QObject *parent) : QObject(parent)
{
    InitVideoCodec();
    InitAudioCodec();
}

MediaEncode::~MediaEncode()
{
    avcodec_free_context(&_videoCodecContext);
    avcodec_free_context(&_audioCodecContext);
}

bool MediaEncode::InitVideoCodec()
{
    int ret = 0;
    // 找到编码器
    const AVCodec* videoCodec = avcodec_find_encoder(AV_CODEC_ID_H264);
    if (!videoCodec) {
        return false;
    }
    // 创建编码器上下文
    _videoCodecContext = avcodec_alloc_context3(videoCodec);
    if (!_videoCodecContext) {
        return false;
    }
    // 配置编码器参数
    _videoCodecContext->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
    _videoCodecContext->codec_id = videoCodec->id;
//    _videoCodecContext->thread_count = 8;

    //压缩后每秒视频的bit位大小
    _videoCodecContext->bit_rate = 1200 * 1024;
    _videoCodecContext->width = outWidth;
    _videoCodecContext->height = outHeight;
    _videoCodecContext->time_base = {1, fps};
    _videoCodecContext->framerate = {fps, 1};
    // 画面组的大小,多少帧一个关键帧
    _videoCodecContext->gop_size = 15;
    _videoCodecContext->max_b_frames = 0;
    _videoCodecContext->pix_fmt = AV_PIX_FMT_YUV420P;

    // 打开编码器上下文
    ret = avcodec_open2(_videoCodecContext, 0, 0);
    if (ret != 0) {
        return false;
    }

    return true;
}

AVPacket* MediaEncode::EncodeVideo(AVFrame* frame)
{
    // 开始h264编码, pts必须递增
    frame->pts = videoPts;
    videoPts++;

    // 发送原始帧,开始编码
    int ret = avcodec_send_frame(_videoCodecContext, frame);
    if (ret != 0) {
        return nullptr;
    }

    av_packet_unref(&outVideoPacket);
    ret = avcodec_receive_packet(_videoCodecContext, &outVideoPacket);
    if (ret != 0 || outVideoPacket.size <= 0) {
        return nullptr;
    }

    return &outVideoPacket;
}

bool MediaEncode::InitAudioCodec()
{
    const AVCodec *codec = avcodec_find_encoder_by_name("libfdk_aac");
    if(!codec){
        return false;
    }

    _audioCodecContext = avcodec_alloc_context3(codec);
    if (!_audioCodecContext) {
        return false;
    }

    _audioCodecContext->sample_fmt = AV_SAMPLE_FMT_S16;       // 输入音频的采样大小。fdk_aac需要16位的音频输													                入数据
    _audioCodecContext->channel_layout = AV_CH_LAYOUT_STEREO; // 输入音频的CHANNEL LAYOUT
    _audioCodecContext->channels = 2;                         // 输入音频的声道数
    _audioCodecContext->sample_rate = 44100;                  // 输入音频的采样率
    _audioCodecContext->bit_rate = 0;                         // AAC : 128K   AAV_HE: 64K  AAC_HE_V2: 32K. bit_rate为0时会查找profile属性值
//    _audioCodecContext->thread_count = 8;

    // 打开编码器
    int ret = avcodec_open2(_audioCodecContext,codec,nullptr);
    if (ret < 0) {
        return false;
    }

    return true;
}

AVPacket * MediaEncode::EncodeAudio(AVFrame* frame)
{
    frame->pts = audioPts;
    audioPts += av_rescale_q(frame->nb_samples, { 1, 44100 }, _audioCodecContext->time_base);
    int ret = avcodec_send_frame(_audioCodecContext, frame);
    if (ret != 0)
        return nullptr;

    av_packet_unref(&outAudioPacket);
    ret = avcodec_receive_packet(_audioCodecContext, &outAudioPacket);
    if (ret != 0)
        return nullptr;

    return &outAudioPacket;
}
rtmppushthread.h推流线程
#ifndef RTMPPUSHTHREAD_H
#define RTMPPUSHTHREAD_H

#include <QThread>
#include "AVFrameQueue.h"
#include "mediaencode.h"
extern "C"
{
#include <libavformat/avformat.h>
}

class RtmpPushThread : public QThread
{
    Q_OBJECT
public:
    explicit RtmpPushThread(AVFrameQueue *audioFrameQueue,AVFrameQueue *videoFrameQueue,QObject *parent = nullptr);
    ~RtmpPushThread();

    bool InitMux(const char* url);

private:
    void run();
    // 添加视频或者音频流
    int AddStream(const AVCodecContext* codecContext);
    // 打开RTMP网络IO,发送封装头MUX
    bool SendMuxHead();
    // RTMP推流
    bool SendFrame(AVPacket* pack, int streamIndex);

private:
    AVFrameQueue *_audioFrameQueue = nullptr;
    AVFrameQueue *_videoFrameQueue = nullptr;
    MediaEncode *_mediaEncode = nullptr;
    AVFormatContext* _avFormatContext = nullptr;//FLV 封装器
    const AVCodecContext *_videoCodecContext = nullptr;
    const AVCodecContext *_audioCodecContext = nullptr;
    AVStream *_videoStream = nullptr;
    AVStream *_audioStream = nullptr;
    std::string outURL = "";
};

#endif // RTMPPUSHTHREAD_H
rtmppushthread.cpp
#include "rtmppushthread.h"
#include <QDebug>

RtmpPushThread::RtmpPushThread(AVFrameQueue *audioFrameQueue,AVFrameQueue *videoFrameQueue,QObject *parent)
    : QThread(parent),_audioFrameQueue(audioFrameQueue),_videoFrameQueue(videoFrameQueue)
{
    connect(this, &RtmpPushThread::finished,this, &RtmpPushThread::deleteLater);

    _mediaEncode = new MediaEncode(this);
}

RtmpPushThread::~RtmpPushThread()
{
    requestInterruption();

    if (_avFormatContext)
    {
        avformat_close_input(&_avFormatContext);
        _avFormatContext = nullptr;
    }

    quit();
    wait();
    qDebug() << "RtmpPushThread析构";
}

bool RtmpPushThread::InitMux(const char* url)
{
    int ret = avformat_alloc_output_context2(&_avFormatContext, 0, "flv", url);
    outURL = url;
    if (ret != 0) {
        return false;
    }
    return true;
}

void RtmpPushThread::run()
{
    int aindex = AddStream(_mediaEncode->_audioCodecContext);
    int vindex = AddStream(_mediaEncode->_videoCodecContext);

    if(!SendMuxHead())
        return;

    while(!isInterruptionRequested())
    {
        AVFrame *audioFrame = _audioFrameQueue->dequeue();
        AVFrame *videoFrame = _videoFrameQueue->dequeue();

        if (audioFrame == nullptr && videoFrame == nullptr)
        {
            msleep(1);
            continue;
        }

        //处理音频
        if (audioFrame)
        {
            AVPacket *pkt = _mediaEncode->EncodeAudio(audioFrame);
            if (pkt)
            {
                SendFrame(pkt,aindex); //推流
            }
            av_frame_free(&audioFrame);
        }

        //处理视频
        if (videoFrame)
        {
            AVPacket *pkt = _mediaEncode->EncodeVideo(videoFrame);
            if (pkt)
            {
                SendFrame(pkt,vindex); //推流
            }
            av_frame_free(&videoFrame);
        }

        msleep(1);
    }
}

int RtmpPushThread::AddStream(const AVCodecContext* codecContext) {
    if (!codecContext) {
        return -1;
    }

    // 添加视频流
    AVStream* avStream = avformat_new_stream(_avFormatContext, NULL);
    if (!avStream) {
        return -1;
    }

    avStream->codecpar->codec_tag = 0;
    // 从编码器复制参数
    avcodec_parameters_from_context(avStream->codecpar, codecContext);
    av_dump_format(_avFormatContext, 0, outURL.c_str(), 1);

    if (codecContext->codec_type == AVMEDIA_TYPE_VIDEO) {
        _videoCodecContext = codecContext;
        _videoStream = avStream;
    }
    else if (codecContext->codec_type == AVMEDIA_TYPE_AUDIO) {
        _audioCodecContext = codecContext;
        _audioStream = avStream;
    }
    return avStream->index;
}

// 打开RTMP网络IO,发送封装头MUX
bool RtmpPushThread::SendMuxHead() {
    ///打开rtmp 的网络输出IO
    int ret = avio_open(&_avFormatContext->pb, outURL.c_str(), AVIO_FLAG_WRITE);
    if (ret != 0)
    {
        return false;
    }

    //写入封装头
    ret = avformat_write_header(_avFormatContext, NULL);
    if (ret != 0)
    {
        return false;
    }

    return true;
}

bool RtmpPushThread::SendFrame(AVPacket* pack, int streamIndex)
{
    if (!pack || pack->size <= 0 || !pack->data)
        return false;

    pack->stream_index = streamIndex;

    AVRational stime;
    AVRational dtime;

    //判断是音频还是视频
    if (_videoStream && _videoCodecContext && pack->stream_index == _videoStream->index)
    {
        stime = _videoCodecContext->time_base;
        dtime = _videoStream->time_base;
    }
    else if (_audioStream && _audioCodecContext &&pack->stream_index == _audioStream->index)
    {
        stime = _audioCodecContext->time_base;
        dtime = _audioStream->time_base;
    }
    else
    {
        return false;
    }

    //推流
    pack->pts = av_rescale_q(pack->pts, stime, dtime);
    pack->dts = av_rescale_q(pack->dts, stime, dtime);
    pack->duration = av_rescale_q(pack->duration, stime, dtime);
    int ret = av_interleaved_write_frame(_avFormatContext, pack);
    if (ret == 0)
    {
        return true;
    }

    return false;
}
 界面设计mainwindow.ui

mainwindow.h
#ifndef MAINWINDOW_H
#define MAINWINDOW_H

#include <QMainWindow>
#include "rtmppushthread.h"
#include "audiorecordthread.h"
#include "videocapturethread.h"

QT_BEGIN_NAMESPACE
namespace Ui { class MainWindow; }
QT_END_NAMESPACE

class MainWindow : public QMainWindow
{
    Q_OBJECT

public:
    MainWindow(QWidget *parent = nullptr);
    ~MainWindow();

private slots:
    void on_pushButton_clicked();
    void onPushThreadFinished();

private:
    Ui::MainWindow *ui;

    AVFrameQueue audioFrameQueue;
    AVFrameQueue videoFrameQueue;
    RtmpPushThread *_pushThread = nullptr;
    AudioRecordThread *_audioThread = nullptr;
    VideoCaptureThread *_videoThread = nullptr;
};
#endif // MAINWINDOW_H
mainwindow.cpp
#include "mainwindow.h"
#include "ui_mainwindow.h"

MainWindow::MainWindow(QWidget *parent)
    : QMainWindow(parent)
    , ui(new Ui::MainWindow)
{
    ui->setupUi(this);

    ui->lineEdit->setText("rtmp://192.168.37.128/live/livestream");
    avformat_network_init();
}

MainWindow::~MainWindow()
{
    delete ui;
}


void MainWindow::on_pushButton_clicked()
{
    if(!_pushThread)
    {
        _audioThread = new AudioRecordThread(&audioFrameQueue);
        if(!_audioThread->Init())
            return;

        _videoThread = new VideoCaptureThread(&videoFrameQueue);
        if(!_videoThread->Init() || !_videoThread->InitScale())
            return;

        _pushThread = new RtmpPushThread(&audioFrameQueue,&videoFrameQueue,this);
        connect(_pushThread,&RtmpPushThread::finished,this,&MainWindow::onPushThreadFinished);
        if(!_pushThread->InitMux(ui->lineEdit->text().toUtf8().data()))
            return;

        _audioThread->start();
        _videoThread->start();
        _pushThread->start();
        ui->pushButton->setText("停止推流");
    }
    else
    {
        _audioThread->requestInterruption();
        _videoThread->requestInterruption();
        _pushThread->requestInterruption();
    }
}

void MainWindow::onPushThreadFinished()
{
    _pushThread = nullptr;
    _audioThread = nullptr;
    _videoThread = nullptr;
    audioFrameQueue.clear();
    videoFrameQueue.clear();
    ui->pushButton->setText("开始推流");
}

        以上介绍了如何使用RTMP推流客户端结合QT录音、OpenCV摄像和FFmpeg编码推流来实现将音视频数据推送到RTMP服务器的功能。通过这种方式,我们可以实现将音视频数据实时推送到RTMP服务器。

        使用RTMP推流客户端结合QT录音、OpenCV摄像和FFmpeg编码推流,可以实现许多应用场景,如实时直播、视频会议、监控系统等。

四、运行效果

1、启动自己搭建的SRS服务器,详情请见:Ubuntu24.04使用SRS 搭建 RTMP流媒体服务器-CSDN博客

2、运行程序开始推流 

        以上就是启动SRS服务器并开始推流的流程。请根据实际情况调整步骤中的路径和参数,并参考具体的SRS搭建指南进行操作。

        这是一个简单的Rtmp推流客户端示例,使用QT进行音频录制、OpenCV进行摄像、FFmpeg进行编码和推流。请注意,这只是一个简单的示例代码,实际的RTMP推流客户端可能需要更多的功能和错误处理。您可以根据自己的需求进行修改和扩展。

        谢谢您的阅读。希望本文能对您有所帮助,并且给您带来了一些新的观点和思考。如果您有任何问题或意见,请随时与我联系。再次感谢您的支持!

 五、相关文章

Windosw下Visual Studio2022编译FFmpeg(支持x264、x265、fdk-acc)-CSDN博客

Windosw下Visual Studio2022编译OpenCV-CSDN博客

标签:include,return,QT,int,frame,nullptr,ret,推流,FFmpeg
From: https://blog.csdn.net/u012959478/article/details/141278836

相关文章

  • FFmpeg开发笔记(四十七)寒冬下安卓程序员的几个技术转型发展方向
    ​IT寒冬之下,程序员这个职业不再像以往那么吃香,尤其是APP开发的门槛越来越高,使得安卓程序员不得不求变,如果不在技术上及时转型提高,逆水行舟未来不可期呀。有鉴于此,博主整理了几个可供安卓程序员的技术转型发展方向,供大家参考。1、继续深耕Android的应用开发谷歌爸爸是安卓的爹......
  • 合宙Air780EP模组LuatOS脚本开发MQTT应用示例
    本文详细讲解了基于合宙Air780EP模组LuatOS开发的多个MQTT应用示例。本文同样适用于合宙的以下型号:Air780EPA/Air780EPT/Air780EPSAir780E/Air780EX/Air201…一、相关准备工作1.1硬件准备合宙EVB_Air780EP开发板一套,包括天线、SIM卡;USB线PC电脑1.2软件准备登录合宙......
  • QT 开发循环delete指针的安全操作
    背景在做TcpSocket例子的时候,发下移除QList<QTcpSocket*>后,第二次重新连接发现出现异常。经过排查发现,原来是deleteLater与delete有区别原因分析deleteLater和delete都是在Qt中用于对象内存管理的关键方法,但它们的作用和使用时机有所不同。deleteLater用于安全异步删......
  • MFC制作MQTT(EMQX)客户端,报错:MQTTClient_message::MQTTClient_message”: 没有重载函
    前言全局说明MQTTClient_message::MQTTClient_message”:没有重载函数接受9个参数一、说明环境:Windows7旗舰版VisualStudio2013CMakeversion3.19.8paho.mqtt.cV1.3.13二、报错MQTTClient_message::MQTTClient_message”:没有重载函数接受9个参数......
  • Qt中ui页面交互切换
    在Qt中实现UI页面之间的交互切换通常需要使用堆栈窗口(QStackedWidget)或选项卡窗口(QTabWidget)这样的控件。下面是一个简单的示例代码,演示了如何在Qt中实现UI页面的交互切换: 假设我们有两个页面,一个是Page1,另一个是Page2,我们通过点击按钮在这两个页面之间进行切换。 首先,在......
  • QT设置回调函数给python调用——参数法
    这种方法将回调函数作为python函数参数对象的方法来使用。Qt已经添加了Python库,并且能够正常调用Python的API,可以成功调用Python的代码块,这部分可以参考我另外一篇博客:1.QT相关函数定义1.1创建回调函数例如下面两个函数//实际的回调函数voidprintValue(intvalue){......
  • QT设置回调函数给python调用——内置模块法
    1.QT相关函数定义和 QT设置回调函数给python调用——参数法中的定义相同如下://实际的回调函数voidprintValue(intvalue){qDebug()<<"printValuevalue:"<<value;}intgetValue(intvalue){qDebug()<<"getValuevalue:"<<value;......
  • 【Qt】QSS
    QSS一、QSS1.QSS2.QSS设置方式(1)指定控件样式设置(2)全局样式设置(3)从文件加载样式表(4)使用QtDesigner编辑样式3.选择器(1)选择器介绍(2)子控件选择器(Sub-Controls)(3)伪类选择器(Pseudo-States)4.样式属性盒模型(BoxModel)5.控件样式示例(1)按钮(2)复选框(3)单选框(4)输入......
  • Qt - QTimer 定时器深入使用
    QTimer1、定时器执行start(0)时:会立即启动定时器,且超时时间为0,即一直超时并执行超时函数 2、对于同一定时器,当定时器是start状态,再次执行start函数,Qt会将定时器stop后重新启动该定时器 3、对于同一定时器,当再次执行start函数,Qt会以新的超时时间开始该定时器,比如setInterva......
  • 【Qt笔记】键盘控制Qt按钮
    目录一、前言二、初始化三、键盘移动后需要选中哪个按钮四、键盘按键处理函数五、实现效果一、前言Qt框架支持通过键盘输入来间接控制界面元素,如按钮,实现无需鼠标操作的交互方式。这通常涉及到键盘事件的监听与处理,比如监听特定的按键事件(如空格键、回车键等),并在这些......