Android下RTMP推流h264和aac数据
RTMP服务器搭建参考:https://blog.csdn.net/wangchao1412/article/details/103641770
h264格式,aac格式,rtmppacket格式参考:https://blog.csdn.net/wangchao1412/article/details/103642403
环境:
RtmpDump c文件少,可以直接引入源文件,配置针对rtmpdump的cmakelist。
ndk 交叉编译android 平台下的 faac x264静态库,引入。
faac作用:将AudioRecord录制的pcm编码成aac。
x264作用:将camera2输出的原始图像,编码成h264.
rtmpdump:连接rtmp服务器,将aac和h264各自打包RtmpPacket,实现推流。
Java层AudioRecord和Camera2就不介绍了,直接进入native中开始讲解。
先看一下几个native 方法的作用。
public class RtmpPush {
static {
System.loadLibrary("rtmpPush");
}
/**
* 创建x264编码类和faac编码类
*/
public static native void native_init();
/**
* 连接rtmp服务器,推流开始
* @param path
*/
public static native void native_start(String path);
/**
* 初始化x264配置
* @param width
* @param height
* @param fps
* @param bitrate
*/
public static native void native_setVideoEncInfo(int width, int height, int fps, int bitrate);
/**
* yuv输入编码,推流
* @param ydata
* @param yLen
*/
public static native void native_pushVideo(
byte[] ydata, int yLen
);
public static native void native_stop();
public static native void native_release();
/**
* 初始化faac配置
* @param sampleRateInHz
* @param channels
*/
public static native void native_setAudioEncInfo(int sampleRateInHz, int channels);
/**
* 获取faac 输入buffer的大小,配置audioRecord的输入buffer大小
* @return
*/
public static native int getInputSamples();
/**
* pcm编码aac,推流
* @param data
*/
public static native void native_pushAudio(byte[] data);
}
native_init
extern "C"
JNIEXPORT void JNICALL
Java_com_example_rtmppushdemo_RtmpPush_native_1init(JNIEnv *env, jclass instance) {
videoChannel = new VideoChannel();
videoChannel->setVideoCallBack(callback);
audioChannel = new AudioChannel();
audioChannel->setAudioCallBack(callback);
packets.setReleaseCallback(releasePackets);
}
创建VideoChannel和AudioChannel。
VideoChannel:负责编码h264和组装rtmp。
AudioChannel:负责编码aac和组装rtmp。
native_start
void *task_start(void *args) {
char *url = static_cast<char *>(args);
RTMP *rtmp = 0;
do {
rtmp = RTMP_Alloc();
if (!rtmp) {
LOGE("alloc rtmp失败");
break;
}
RTMP_Init(rtmp);
int ret = RTMP_SetupURL(rtmp, url);
if (!ret) {
LOGE("设置地址失败:%s", url);
break;
}
rtmp->Link.timeout = 5;
RTMP_EnableWrite(rtmp);
ret = RTMP_Connect(rtmp, 0);
if (!ret) {
LOGE("连接服务器失败:%s", url);
break;
}
ret = RTMP_ConnectStream(rtmp, 0);
if (!ret) {
LOGE("连接流失败");
break;
}
//获取当前开始时间
startTime = RTMP_GetTime();
readyPush = 1;
packets.setWork(1);
callback(audioChannel->getAudioTag());
RTMPPacket *packet = 0;
//循环从queue中读取rtmppacket推送
while (readyPush) {
ret = packets.pop(packet);
if (!readyPush) {
break;
}
if (!ret) {
continue;
}
packet->m_nInfoField2 = rtmp->m_stream_id;
ret = RTMP_SendPacket(rtmp, packet, 1);
releasePackets(packet);
if (!ret) {
LOGE("发送失败");
break;
}
}
releasePackets(packet);
} while (0);
isStart = 0;
readyPush = 0;
packets.setWork(0);
packets.clear();
if (rtmp) {
RTMP_Close(rtmp);
RTMP_Free(rtmp);
}
DELETE (url);
return 0;
}
native_setVideoEncInfo
Java_com_example_rtmppushdemo_RtmpPush_native_1setVideoEncInfo(JNIEnv *env, jclass clazz,
jint width, jint height, jint fps,
jint bitrate) {
if (videoChannel) {
videoChannel->setVideoEncInfo(width, height, fps, bitrate);
}
}
调用videochannel->setVideoEncInfo,初始化x264。
void VideoChannel::setVideoEncInfo(int width, int height, int fps, int bitrate) {
pthread_mutex_lock(&mutex);
mWidth = width;
mHeight = height;
mFps = fps;
mBitrate = bitrate;
ySize = width * height;
uvSize = ySize / 4;
if (videoCodec) {
x264_encoder_close(videoCodec);
videoCodec = 0;
}
if (pic_in) {
x264_picture_clean(pic_in);
DELETE(pic_in);
}
//打开x264编码器
//x264编码器的属性
x264_param_t param;
//2: 最快
//3: 无延迟编码
x264_param_default_preset(¶m, "ultrafast", "zerolatency");
//base_line 3.2 编码规格
param.i_level_idc = 32;
//输入数据格式
param.i_csp = X264_CSP_I420;
param.i_width = width;
param.i_height = height;
//无b帧
param.i_bframe = 0;
//参数i_rc_method表示码率控制,CQP(恒定质量),CRF(恒定码率),ABR(平均码率)
param.rc.i_rc_method = X264_RC_ABR;
//码率(比特率,单位Kbps)
param.rc.i_bitrate = bitrate / 1000;
//瞬时最大码率
param.rc.i_vbv_max_bitrate = bitrate / 1000 * 1.2;
//设置了i_vbv_max_bitrate必须设置此参数,码率控制区大小,单位kbps
param.rc.i_vbv_buffer_size = bitrate / 1000;
//帧率
param.i_fps_num = fps;
param.i_fps_den = 1;
param.i_timebase_den = param.i_fps_num;
param.i_timebase_num = param.i_fps_den;
// param.pf_log = x264_log_default2;
//用fps而不是时间戳来计算帧间距离
param.b_vfr_input = 0;
//帧距离(关键帧) 2s一个关键帧
param.i_keyint_max = fps * 2;
// 是否复制sps和pps放在每个关键帧的前面 该参数设置是让每个关键帧(I帧)都附带sps/pps。
param.b_repeat_headers = 1;
//多线程
param.i_threads = 1;
x264_param_apply_profile(¶m, "baseline");
//打开编码器
videoCodec = x264_encoder_open(¶m);
pic_in = new x264_picture_t;
x264_picture_alloc(pic_in, X264_CSP_I420, width, height);
pthread_mutex_unlock(&mutex);
}
native_setAudioEncInfo
调用AudioChannel 初始化faac
void AudioChannel::setAudioEncInfo(int samplesInHZ, int channels) {
//打开编码器
mChannels = channels;
//3、一次最大能输入编码器的样本数量 也编码的数据的个数 (一个样本是16位 2字节)
//4、最大可能的输出数据 编码后的最大字节数
audioCodec = faacEncOpen(samplesInHZ, channels, &inputSamples, &maxOutputBytes);
//设置编码器参数
faacEncConfigurationPtr config = faacEncGetCurrentConfiguration(audioCodec);
//指定为 mpeg4 标准
config->mpegVersion = MPEG4;
//lc 标准
config->aacObjectType = LOW;
//16位
config->inputFormat = FAAC_INPUT_16BIT;
// 编码出原始数据 既不是adts也不是adif
config->outputFormat = 0;
faacEncSetConfiguration(audioCodec, config);
//输出缓冲区 编码后的数据 用这个缓冲区来保存
buffer = new u_char[maxOutputBytes];
}
native_pushVideo
调用 videoChannel->encodeData 编码封装rtmppacket
void VideoChannel::encodeData(int8_t *ydata, int ylen) {
pthread_mutex_lock(&mutex);
LOGE("encodeData");
if (b == 0) {
FILE *stream;
if ((stream = fopen("sdcard/h264", "wb")) == NULL) {
LOGE("Cannot open output file.\n");
}
fwrite(ydata, mWidth * mHeight*3/2, 1, stream);
fclose(stream);
b = 1;
}
memcpy(pic_in->img.plane[0], ydata, mWidth * mHeight);
memcpy(pic_in->img.plane[1], ydata + mWidth * mHeight, mWidth * mHeight / 4);
memcpy(pic_in->img.plane[2], ydata + mWidth * mHeight * 5 / 4, mWidth * mHeight / 4);
x264_nal_t *pp_nal;
int pi_nal;
x264_picture_t pic_out;
x264_encoder_encode(videoCodec, &pp_nal, &pi_nal, pic_in, &pic_out);
int sps_len;
int pps_len;
uint8_t sps[100];
uint8_t pps[100];
for (int i = 0; i < pi_nal; ++i) {
if (pp_nal[i].i_type == NAL_SPS) {
sps_len = pp_nal[i].i_payload - 4;
memcpy(sps, pp_nal[i].p_payload + 4, sps_len);
} else if (pp_nal[i].i_type == NAL_PPS) {
pps_len = pp_nal[i].i_payload - 4;
memcpy(pps, pp_nal[i].p_payload + 4, pps_len);
sendSPSPPS(sps, sps_len, pps, pps_len);
} else {
sendH264(pp_nal[i].i_type, pp_nal[i].p_payload, pp_nal->i_payload);
}
}
pthread_mutex_unlock(&mutex);
}
将I420数据放入img中的三个平面内,然后编码输出h264,这里在初始化x264,设置每个I帧都带有sps 和 pps,spspps和h264数据要按照格式分别打包rtmppacket。
spspps打包RtmpPacket
void VideoChannel::sendSPSPPS(uint8_t *sps, int sps_len, uint8_t *pps, int pps_len) {
int body_size = 13 + sps_len + 3 + pps_len;
RTMPPacket *packet = new RTMPPacket();
RTMPPacket_Alloc(packet, body_size);
int i = 0;
packet->m_body[i++] = 0x17;
packet->m_body[i++] = 0x00;
packet->m_body[i++] = 0x00;
packet->m_body[i++] = 0x00;
packet->m_body[i++] = 0x00;
packet->m_body[i++] = 0x01;
packet->m_body[i++] = sps[1];
packet->m_body[i++] = sps[2];
packet->m_body[i++] = sps[3];
packet->m_body[i++] = 0xFF;
packet->m_body[i++] = 0xE1;
packet->m_body[i++] = (sps_len >> 8) & 0xff;
packet->m_body[i++] = (sps_len) & 0xff;
memcpy(&packet->m_body[i], sps, sps_len);
i += sps_len;
packet->m_body[i++] = 0x01;
packet->m_body[i++] = (pps_len >> 8) & 0xff;
packet->m_body[i++] = (pps_len) & 0xff;
memcpy(&packet->m_body[i], pps, pps_len);
packet->m_packetType = RTMP_PACKET_TYPE_VIDEO;
packet->m_nBodySize = body_size;
packet->m_nChannel = 10;
packet->m_nTimeStamp = 0;
packet->m_hasAbsTimestamp = 0;
packet->m_headerType = RTMP_PACKET_SIZE_MEDIUM;
videoCallback(packet);
}
h264打包RtmpPacket
void VideoChannel::sendH264(int type, uint8_t *h264, int len) {
if (h264[2] == 0x00) {
len -= 4;
h264 += 4;
} else {
len -= 3;
h264 += 3;
}
int bodySize = 9 + len;
RTMPPacket *packet = new RTMPPacket;
//
RTMPPacket_Alloc(packet, bodySize);
packet->m_body[0] = 0x27;
if (type == NAL_SLICE_IDR) {
packet->m_body[0] = 0x17;
}
packet->m_body[1] = 0x01;
packet->m_body[2] = 0x00;
packet->m_body[3] = 0x00;
packet->m_body[4] = 0x00;
packet->m_body[5] = (len >> 24) & 0xff;
packet->m_body[6] = (len >> 16) & 0xff;
packet->m_body[7] = (len >> 8) & 0xff;
packet->m_body[8] = (len) & 0xff;
memcpy(&packet->m_body[9], h264, len);
packet->m_packetType = RTMP_PACKET_TYPE_VIDEO;
packet->m_nBodySize = bodySize;
packet->m_nChannel = 0x10;
packet->m_hasAbsTimestamp = 0;
packet->m_headerType = RTMP_PACKET_SIZE_MEDIUM;
videoCallback(packet);
};
native_pushAudio
调用audioChannel->encodeData,编码aac,打包RtmpPacket
void AudioChannel::encodeData(signed char *data) {
pthread_mutex_lock(&mutex);
int bytelen = faacEncEncode(audioCodec, reinterpret_cast<int32_t *>(data), inputBuffer, buffer,
maxOutBuffer);
if (bytelen > 0) {
int bodySize = 2 + bytelen;
RTMPPacket *packet = new RTMPPacket;
RTMPPacket_Alloc(packet, bodySize);
//双声道
packet->m_body[0] = 0xAF;
if (channels == 1) {
packet->m_body[0] = 0xAE;
}
//编码出的声音 都是 0x01
packet->m_body[1] = 0x01;
//图片数据
memcpy(&packet->m_body[2], buffer, bytelen);
packet->m_hasAbsTimestamp = 0;
packet->m_nBodySize = bodySize;
packet->m_packetType = RTMP_PACKET_TYPE_AUDIO;
packet->m_nChannel = 0x11;
packet->m_headerType = RTMP_PACKET_SIZE_LARGE;
audioCallback(packet);
}
pthread_mutex_unlock(&mutex);
}
在初始化faac的时候,设置了编码帧不带ADTS,所以要自己获取编码信息,在rtmp发送aac数据之前要先发送编码信息。
getAudioTag :获取音频编码信息,打包RTMP
RTMPPacket *AudioChannel::getAudioTag() {
pthread_mutex_lock(&mutex);
unsigned char *buf;
unsigned long len;
faacEncGetDecoderSpecificInfo(audioCodec, &buf, &len);
int bodySize = 2 + len;
RTMPPacket *packet = new RTMPPacket();
RTMPPacket_Alloc(packet, bodySize);
packet->m_body[0] = 0xAF;
if (channels == 1) {
packet->m_body[0] = 0xAE;
}
packet->m_body[1] = 0x00;
//图片数据
memcpy(&packet->m_body[2], buf, len);
packet->m_hasAbsTimestamp = 0;
packet->m_nBodySize = bodySize;
packet->m_packetType = RTMP_PACKET_TYPE_AUDIO;
packet->m_nChannel = 0x11;
packet->m_headerType = RTMP_PACKET_SIZE_LARGE;
pthread_mutex_unlock(&mutex);
return packet;
}
基本流程就已经完成了,在我们初始化rtmp,开启循环从queue中读取rtmp包发送之前,需要先调用audioChannel->getAudioTag,获取音频编码信息帧,发送。
void *task_start(void *args) {
char *url = static_cast<char *>(args);
RTMP *rtmp = 0;
do {
...
//获取音频编码信息,放入queue中
callback(audioChannel->getAudioTag());
RTMPPacket *packet = 0;
//循环从queue中读取rtmppacket推送
while (readyPush) {
//从queue中读取rtmppacket
ret = packets.pop(packet);
if (!readyPush) {
break;
}
if (!ret) {
continue;
}
packet->m_nInfoField2 = rtmp->m_stream_id;
ret = RTMP_SendPacket(rtmp, packet, 1);
releasePackets(packet);
if (!ret) {
LOGE("发送失败");
break;
}
}
releasePackets(packet);
} while (0);
...
return 0;
}