首页 > 其他分享 >RTMP 推流增加对H265的支持

RTMP 推流增加对H265的支持

时间:2023-07-02 20:32:34浏览次数:40  
标签:body GetWord ++ H265 sps len RTMP bs 推流


 RTMP协议本身是不支持H265的。但现在的设备越来越追求更高的压缩比和更高的图形质量。H265相对其他的媒体格式更多受到厂家的重视。rtmp协议要支持H265首先要定义一个ID。按照大家的约定来看,基本使用12(0xc)作为ID. 同时相对H264对NALU的分析要进行改变。并对发送的Metadata数据进行修改。

先看下发送metadata:

int SendVideoSpsPpsVps(RTMP* r, unsigned char* pps, int pps_len, unsigned char* sps, int sps_len, unsigned char* vps,
	int vps_len, uint32_t dts)
{
	char        tBuffer[RTMP_HEAD_SIZE + 1024] = { 0 };
	RTMPPacket* packet = (RTMPPacket*)tBuffer;
	packet->m_body = (char*)packet + RTMP_HEAD_SIZE;
	unsigned char* body = (unsigned char*)packet->m_body;
	// http://ffmpeg.org/doxygen/trunk/hevc_8c_source.html#l00040  hvcc_write 函数
	// 在nginx-rtmp中会跳过48位不去处理 我们在表示后面补0
	// skip tag header and configurationVersion(1 byte)
	int i = 0;
	body[i++] = 0x1C;
	body[i++] = 0x0;
	body[i++] = 0x0;
	body[i++] = 0x0;
	body[i++] = 0x0;
	body[i++] = 0x1;
	// general_profile_idc 8bit
	body[i++] = sps[1];
	// general_profile_compatibility_flags 32 bit
	body[i++] = sps[2];
	body[i++] = sps[3];
	body[i++] = sps[4];
	body[i++] = sps[5];

	// 48 bit NUll nothing deal in rtmp
	body[i++] = sps[6];
	body[i++] = sps[7];
	body[i++] = sps[8];
	body[i++] = sps[9];
	body[i++] = sps[10];
	body[i++] = sps[11];

	// general_level_idc
	body[i++] = sps[12];

	// 48 bit NUll nothing deal in rtmp
	body[i++] = 0;
	body[i++] = 0;
	body[i++] = 0;
	body[i++] = 0;
	body[i++] = 0;
	body[i++] = 0;
	body[i++] = 0;
	body[i++] = 0;
	// bit(16) avgFrameRate;
	/* bit(2) constantFrameRate; */
	/* bit(3) numTemporalLayers; */
	/* bit(1) temporalIdNested; */
	body[i++] = 0x83;

	/* unsigned int(8) numOfArrays; 03 */
	body[i++] = 0x03;
	// vps 32
	body[i++] = 0x20;
	body[i++] = (1 >> 8) & 0xff;
	body[i++] = 1 & 0xff;
	body[i++] = (vps_len >> 8) & 0xff;
	body[i++] = (vps_len) & 0xff;
	memcpy(&body[i], vps, vps_len);
	i += vps_len;

	// sps
	body[i++] = 0x21;  // sps 33
	body[i++] = 0;
	body[i++] = 1;
	body[i++] = (sps_len >> 8) & 0xff;
	body[i++] = sps_len & 0xff;
	memcpy(&body[i], sps, sps_len);
	i += sps_len;

	// pps
	body[i++] = 0x22;  // pps 34
	body[i++] = (1 >> 8) & 0xff;
	body[i++] = 1 & 0xff;
	body[i++] = (pps_len >> 8) & 0xff;
	body[i++] = (pps_len) & 0xff;
	memcpy(&body[i], pps, pps_len);
	i += pps_len;
	packet->m_packetType = RTMP_PACKET_TYPE_VIDEO;
	packet->m_nBodySize = i;
	packet->m_nChannel = 0x04;
	packet->m_nTimeStamp = dts;
	packet->m_hasAbsTimestamp = 0;
	packet->m_headerType = RTMP_PACKET_SIZE_LARGE;
	packet->m_nInfoField2 = r->m_stream_id;
	int nRet = 0;
	if (RTMP_IsConnected(r))
		nRet = RTMP_SendPacket(r, packet, 0);  // 1为放进发送队列,0是不放进发送队列,直接发送
	return nRet;
}

RTMP 推流增加对H265的支持_H265 视频播放

上面中需要注意// bit(16) avgFrameRate;
    /* bit(2) constantFrameRate; */
    /* bit(3) numTemporalLayers; */
    /* bit(1) temporalIdNested; */
    body[i++] = 0x83;

这个地方在一些网站上写的是0,或者不处理,可能造成一些服务器,不工作。

其他,在发送媒体信息的时候需要解释sps。对H265的解释跟H264不一样。

#ifndef h265_decode_info_h__
#define h265_decode_info_h__


#include <iostream>   
#include <sstream>   
//#include <unistd.h>  
#include <stdint.h>
#include <stdio.h>   
#include <string.h>   
#include <stdlib.h>
struct vc_params_t
{
	LONG width, height;
	DWORD profile, level;
	DWORD nal_length_size;
	void clear()
	{
		memset(this, 0, sizeof(*this));
	}
};

class NALBitstream
{
public:
	NALBitstream() : m_data(NULL), m_len(0), m_idx(0), m_bits(0), m_byte(0), m_zeros(0) {};
	NALBitstream(void * data, int len) { Init(data, len); };
	void Init(void * data, int len) { m_data = (LPBYTE)data; m_len = len; m_idx = 0; m_bits = 0; m_byte = 0; m_zeros = 0; };


	BYTE GetBYTE()
	{
		//printf("m_idx=%d,m_len=%d\n", m_idx, m_len);
		if (m_idx >= m_len)
			return 0;
		BYTE b = m_data[m_idx++];

		// to avoid start-code emulation, a byte 0x03 is inserted   
		// after any 00 00 pair. Discard that here.   
		if (b == 0)
		{
			m_zeros++;
			if ((m_idx < m_len) && (m_zeros == 2) && (m_data[m_idx] == 0x03))
			{

				m_idx++;
				m_zeros = 0;
			}
		}

		else
		{
			m_zeros = 0;

		}
		return b;
	};


	UINT32 GetBit()
	{
		
		if (m_bits == 0)
		{
			m_byte = GetBYTE();
			m_bits = 8;

		}
		m_bits--;
		return (m_byte >> m_bits) & 0x1;
	};

	UINT32 GetWord(int bits)
	{

		UINT32 u = 0;
		while (bits > 0)

		{
			u <<= 1;
			u |= GetBit();
			bits--;
		}
		return u;
	};
	UINT32 GetUE()
	{

		// Exp-Golomb entropy coding: leading zeros, then a one, then   
		// the data bits. The number of leading zeros is the number of   
		// data bits, counting up from that number of 1s as the base.   
		// That is, if you see   
		//      0001010   
		// You have three leading zeros, so there are three data bits (010)   
		// counting up from a base of 111: thus 111 + 010 = 1001 = 9   
		int zeros = 0;
		while (m_idx < m_len && GetBit() == 0) zeros++;
		return GetWord(zeros) + ((1 << zeros) - 1);
	};


	INT32 GetSE()
	{

		// same as UE but signed.   
		// basically the unsigned numbers are used as codes to indicate signed numbers in pairs   
		// in increasing value. Thus the encoded values   
		//      0, 1, 2, 3, 4   
		// mean   
		//      0, 1, -1, 2, -2 etc   
		UINT32 UE = GetUE();
		bool positive = UE & 1;
		INT32 SE = (UE + 1) >> 1;
		if (!positive)
		{
			SE = -SE;
		}
		return SE;
	};


private:
	LPBYTE m_data;
	int m_len;
	int m_idx;
	int m_bits;
	BYTE m_byte;
	int m_zeros;
};


bool  ParseSequenceParameterSet(BYTE* data, int size, vc_params_t& params)
{
	if (size < 20)
	{
		return false;
	}

	NALBitstream bs(data, size);

	// seq_parameter_set_rbsp()   
	bs.GetWord(4);// sps_video_parameter_set_id   
	int sps_max_sub_layers_minus1 = bs.GetWord(3); // "The value of sps_max_sub_layers_minus1 shall be in the range of 0 to 6, inclusive."   
	if (sps_max_sub_layers_minus1 > 6)
	{
		return false;
	}
	bs.GetWord(1);// sps_temporal_id_nesting_flag   
				  // profile_tier_level( sps_max_sub_layers_minus1 )   
	{
		bs.GetWord(2);// general_profile_space   
		bs.GetWord(1);// general_tier_flag   
		params.profile = bs.GetWord(5);// general_profile_idc   
		bs.GetWord(32);// general_profile_compatibility_flag[32]   
		bs.GetWord(1);// general_progressive_source_flag   
		bs.GetWord(1);// general_interlaced_source_flag   
		bs.GetWord(1);// general_non_packed_constraint_flag   
		bs.GetWord(1);// general_frame_only_constraint_flag   
		bs.GetWord(44);// general_reserved_zero_44bits   
		params.level = bs.GetWord(8);// general_level_idc   
		unsigned char sub_layer_profile_present_flag[6] = { 0 };
		unsigned char sub_layer_level_present_flag[6] = { 0 };
		for (int i = 0; i < sps_max_sub_layers_minus1; i++)
		{
			sub_layer_profile_present_flag[i] = bs.GetWord(1);
			sub_layer_level_present_flag[i] = bs.GetWord(1);
		}
		if (sps_max_sub_layers_minus1 > 0)
		{
			for (int i = sps_max_sub_layers_minus1; i < 8; i++)
			{
				unsigned char reserved_zero_2bits = bs.GetWord(2);
			}
		}
		for (int i = 0; i < sps_max_sub_layers_minus1; i++)
		{
			if (sub_layer_profile_present_flag[i])
			{
				bs.GetWord(2);// sub_layer_profile_space[i]   
				bs.GetWord(1);// sub_layer_tier_flag[i]   
				bs.GetWord(5);// sub_layer_profile_idc[i]   
				bs.GetWord(32);// sub_layer_profile_compatibility_flag[i][32]   
				bs.GetWord(1);// sub_layer_progressive_source_flag[i]   
				bs.GetWord(1);// sub_layer_interlaced_source_flag[i]   
				bs.GetWord(1);// sub_layer_non_packed_constraint_flag[i]   
				bs.GetWord(1);// sub_layer_frame_only_constraint_flag[i]   
				bs.GetWord(44);// sub_layer_reserved_zero_44bits[i]   
			}
			if (sub_layer_level_present_flag[i])
			{
				bs.GetWord(8);// sub_layer_level_idc[i]   
			}
		}
	}
	unsigned long sps_seq_parameter_set_id = bs.GetUE(); // "The  value  of sps_seq_parameter_set_id shall be in the range of 0 to 15, inclusive."   
	/*if (sps_seq_parameter_set_id > 15)
	{
		printf("enter2\r\n");
		return false;
	}*/
	unsigned long chroma_format_idc = bs.GetUE(); // "The value of chroma_format_idc shall be in the range of 0 to 3, inclusive."   
	/*if (sps_seq_parameter_set_id > 3)
	{
		printf("enter3\r\n");
		return false;
	}*/
	if (chroma_format_idc == 3)
	{
		bs.GetWord(1);// separate_colour_plane_flag   
	}
	params.width = bs.GetUE(); // pic_width_in_luma_samples   
	params.height = bs.GetUE(); // pic_height_in_luma_samples   
	if (bs.GetWord(1))
	{// conformance_window_flag   
		bs.GetUE();  // conf_win_left_offset   
		bs.GetUE();  // conf_win_right_offset   
		bs.GetUE();  // conf_win_top_offset   
		bs.GetUE();  // conf_win_bottom_offset   
	}
	unsigned long bit_depth_luma_minus8 = bs.GetUE();
	unsigned long bit_depth_chroma_minus8 = bs.GetUE();
	/*if (bit_depth_luma_minus8 != bit_depth_chroma_minus8)
	{
		printf("enter4\r\n");
		return false;
	}*/
	//...   


	return true;
}

#endif // h265_decode_info_h__

RTMP 推流增加对H265的支持_服务器处理H265视频_02

这样可以发送根据媒体格式进行头信息填写了。

if(lpMetaData == NULL)
	{
		return -1;
	}
	char buffer[1024] = {0};
	char *body = buffer+RTMP_MAX_HEADER_SIZE;

	char * p = (char *)body;  
	p = put_byte(p, AMF_STRING );
	p = put_amf_string(p , "@setDataFrame" );

	p = put_byte( p, AMF_STRING );
	p = put_amf_string( p, "onMetaData" );

	p = put_byte(p, AMF_OBJECT );  
	p = put_amf_string( p, "copyright" );  
	p = put_byte(p, AMF_STRING );  
	p = put_amf_string( p, "CarEyeRTMP" );
	
	if (type == 1)
	{
		p = put_amf_string(p, "width");
		p = put_amf_double(p, lpMetaData->Width);

		p = put_amf_string(p, "height");
		p = put_amf_double(p, lpMetaData->Height);

		p = put_amf_string(p, "framerate");
		p = put_amf_double(p, lpMetaData->FrameRate);

		p = put_amf_string(p, "videocodecid");
		if (lpMetaData->VCodec == CAREYE_VCODE_H264)
		{
			p = put_amf_double(p, FLV_CODECID_H264);
		}
		else
		{
			p = put_amf_double(p, FLV_CODECID_H265);
		}
	}

	p =put_amf_string( p, "audiosamplerate");
	p =put_amf_double( p, lpMetaData->SampleRate);

	p =put_amf_string( p, "audiocodecid");
	p =put_amf_double( p, 10);

	p =put_amf_string( p, "" );
	p =put_byte( p, AMF_OBJECT_END  );

RTMP 推流增加对H265的支持_服务器处理H265视频_03

car-eye RTMP推流是将GB28181或者GT1078协议的数据的音视频数据推送到RTMP拉流服务器。以实现客户端对RTMP,http,websocket,HLS等多种方式的拉取和播放。

car-eye流媒体服务器实现了对监控和车载移动设备多种场景的支持。相关的开源源码地址:car-eye 开源团队 · GitHub

CarEye Open Platform: 本组织用来发布CarEye 车辆管理系统,视频管理平台,流媒体服务器,设备端程序和相关测试工具

本文章参考:librtmp h265 推流_don.wang的博客-CSDN博客



标签:body,GetWord,++,H265,sps,len,RTMP,bs,推流
From: https://blog.51cto.com/u_13592015/6606819

相关文章

  • ZLMediaKit 服务器源码解读,RTSP推流拉流
    RTSP推流1、数据解析过程数据都会通过回调的方式到达RtspSession类(会话)的onRecv函数先给出类的继承关系:classRtspSession:publicTcpSession,publicRtspSplitter,publicRtpReceiver,publicMediaSourceEvent{}classRtspSplitter:publicHttpRequestSplitter{}数据回......
  • nginx RTMP推拉流,多个音频流合并。
    使用nginxRTMP(nginx的一个插件模块,具体的网上搜一下)做一个简易的多人音频通话流媒体服务器,多个端通话时,客户端无法处理其他端发过来的音频流,比如A、B、C三个端通话,A设备同时持有B、C的音频流,这样对设备端非常的不友好。这时候就需要用到一个强大的工具,FFMPEG,安装网上很多,搜下就......
  • FFmpeg混流及录制rtmp直播流
    1、什么是混流?混流就是把多路音视频流合成单流。准确的说,混流应该叫做混音(音频流)混画面(视频流)混流的过程包括解码、混流、编码和推流四个部分。混流这个环节包括做抖动缓冲,目的是把多路流进行画面对齐和音画同步,同时通过缓冲对抗网络抖动,以便混合成一路流以后能够达到良好的效果。......
  • Qt编写跨平台的推流工具(支持win/linux/mac/嵌入式linux/安卓等)
    一、前言跨平台的推流工具当属OBS最牛逼,功能也是最强大的,唯一的遗憾就是多路推流需要用到插件,而且CPU占用比较高,默认OBS的规则是将对应画布中的视频画面和设定的音频一起重新编码再推流,意味着肯定占用不少CPU资源,而很多时候尤其是视频监控行业,往往源头的摄像头的视频流就是标准的......
  • windows环境下,搭建RTSP视频推流服务器
    1.环境与配置1.1系统环境我这里使用的Windows1064位1.2下载RTSP服务器下载页面:https://github.com/aler9/rtsp-simple-server/releases这里,我们下载rtsp-simple-server_v0.19.1_windows_amd64.zip在百度网盘上下载也可以链接:https://pan.baidu.com/s/1FqMnAJWPo......
  • h265编码是什么意思,H265的编码和H264的编码有什么区别,视频编码h.264和mpeg4的对比
    MPEG-4标准则是基于对象和内容的编码方式,和传统的图像帧编码方式不同,它只处理图像帧与帧之间的差异元素,抛弃相同图像元素,因此大大减少了合成多媒体文件的体积,从而以较小的文件体积同样可得到高清晰的还原图像。换句话说,相同的原始图像,MPEG-4编码标准具有更高的压缩比。H.264编码技......
  • 直播推流技术指南
    一、推流架构推流SDK客户端的模块主要有三个,推流采集端、队列控制模块、推流端。其中每个模块的主要流程如下,本文的主要目的就是拆分推流流程,1.1采集端视频采集:通过Camera采集视频。音频采集:通过麦克风采集音频。视频后处理:美颜、滤镜、贴纸、翻转等特效。音频后处理:重采样、3A处......
  • EasyCVR平台如何推送RTMP流实现上级平台级联?
    EasyCVR可拓展性强、视频能力灵活、部署轻快,可支持的主流标准协议有GB28181、RTSP/Onvif、RTMP等,以及厂家私有协议与SDK接入,包括海康Ehome、海大宇等设备的SDK等,能对外分发RTSP、RTMP、FLV、HLS、WebRTC等格式的视频流。有用户反馈,现场的设备是运动相机,不支持国标和其他协议接入......
  • 音视频网络传输协议,RTSP/RTMP/SRT/NDI协议之间特点
    网络视频传输协议有哪些,RTSP/RTMP/SRT/RTP之间特点下面详细介绍:有兴趣的小伙伴可加qq群一起交流384170753RTP协议(Real-timeTransportProtocol)是一个网络传输协议,是一种实时传输协议技术,RTP协议常用于流媒体系统(配合RTSP协议)视频会议和一键通(PushtoTalk)系统(配合H.323或SIP),使它成为IP......
  • SRT推流如何有效解决直播丢帧和卡顿问题
    随着互联网技术的发展,直播已经成为一种广泛应用的实时传输方式。然而,在直播过程中,丢帧和卡顿问题仍然困扰着许多用户。为了解决这些问题,SRT(SecureReliableTransport)推流技术应运而生。本文将介绍SRT推流如何有效解决直播丢帧和卡顿问题。一、SRT推流技术简介SRT(SecureReliabl......