FFMpeg + SDL2.0 RTMP视频流播放器
由于工作,需要对RTMP视频直播流进行接收和播放。研究一段时间,终于有所成效。下面对在研究过程进行一些总结,方便其他人员少走一些弯路。
一、简介
视频包含原始音频和视频数据,服务器在推送这些原始数据时,需要对音视频进行编码,同时在传输过程中通过视频流输协议进行封装,比如(rtmp,rtsp…)。因此,客户端如果想要播放网络传输的视频流,我们需要剥取传输协议封装的音视频文件,同时对其进行解码,分别获取对应的音频和视频文件,然后对其进行操作。如图(摘自雷神的流程图,太懒…)
值得庆幸的是,FFMPEG对网络视频流的传输协议进行很好的处理,对项目比较急的人,可以忽略传输协议的研究(有兴趣可以后期研究),更加高效的完成项目的集成。客户端的接收与播放过程主要是对视频和音频进行处理,我将分别进行介绍。
二、FFMPEG初始化
处理音视频之前,我们首先需要与服务器进行连接,获取视频流相关各种信息。
2.1).h文件,数据成员
AVFormatContext *pFormatCtx;//FFMPEG解封装(flv,mp4,rmvb,avi)功能的结构体
AVPacket *packet; //存储压缩的数据
AVFrame *pFrame; //存储原始数据(即非压缩数据,例如对视频来说是YUV,RGB,对音频来说是PCM)
AVCodecContext *pCodecCtx; //视频编码
int videoindex; //视频编码序号
AVFrame *pFrameYUV; //原始的yuv图像数据
struct SwsContext *img_convert_ctx;
uint8_t *out_buffer;
AVCodecContext *pAudioCodecCtx; //音频编码
int audioIndex; //音频编码序号
// AVCodec *pAudioCodec;
uint8_t *outAudio_buffer; //原始的PCM数据
struct SwrContext *au_convert_ctx;
int outAudio_buffer_size; //原始音频数据大小
SDL_Renderer* sdlRenderer;
SDL_Texture* sdlTexture;
SDL_Rect sdlRect;
SDL_Window *screen;
2.2)初始化FFMpeg
char filepath[] = "rtmp://118.178.252.242/live/123"; //rtmp流服务器url
//申请内存
pFrame = av_frame_alloc();
pFrameYUV = av_frame_alloc();
packet = (AVPacket *)av_malloc(sizeof(AVPacket));
pFormatCtx = avformat_alloc_context();
outAudio_buffer = (uint8_t *)av_malloc(MAX_AUDIO_FRAME_SIZE * 2);
av_register_all();
avformat_network_init();
dwLastFrameRealtime = GetTickCount();
pFormatCtx->interrupt_callback.opaque = this; //C++
pFormatCtx->interrupt_callback.callback = interrupt_cb; //超时链接,设置回调函数,否则有可能ffmpeg在打开和读取数据阻塞进程。
if (avformat_open_input(&pFormatCtx, filepath, NULL, NULL) != 0) {//与服务器建立链接
printf("Couldn't open input stream.\n");
return -1;
}
if (avformat_find_stream_info(pFormatCtx, NULL)<0) {
printf("Couldn't find stream information.\n");
return -1;
}
//Output Info-----------------------------
av_dump_format(pFormatCtx, 0, filepath, 0);
2.3)初始化SDL
if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
printf("Could not initialize SDL - %s\n", SDL_GetError());
return -1;
}
三、视频
下面将对视频进行一些处理,主要是对视频进行解码,同时展示对应的YUV或者RGB信息。
3.1)获取视频相关信息
videoindex = -1;
for (int i = 0; i<pFormatCtx->nb_streams; i++)
if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
videoindex = i; //获取视频解码序号
break;
}
if (videoindex == -1) {
printf("Didn't find a video stream.\n");
return -1;
}
pCodecCtx = pFormatCtx->streams[videoindex]->codec;
AVCodec* pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
if (pCodec == NULL) {
printf("Codec not found.\n");
return -1;
}
if (avcodec_open2(pCodecCtx, pCodec, NULL)<0) {
printf("Could not open codec.\n");
return -1;
}
3.2)初始化SDL视频展示区域以及渲染数据
//SDL 2.0 Support for multiple windows SDL显示在ui窗体中
screen = SDL_CreateWindowFrom((void*)(ui.playWidget->winId()));
if (!screen) {
printf("SDL: could not create window - exiting:%s\n", SDL_GetError());
return -1;
}
SDL_ShowWindow(screen);//防止在调用摧毁窗体函数,窗体隐藏,无法显示画面
sdlRenderer = SDL_CreateRenderer(screen, -1, 0);//基于窗口创建渲染器
int screen_w = 0, screen_h = 0;
screen_w = ui.playWidget->width();//pCodecCtx->width;
screen_h = ui.playWidget->height();// pCodecCtx->height;
out_buffer = (uint8_t *)av_malloc(avpicture_get_size(AV_PIX_FMT_YUV420P, screen_w, screen_h));
avpicture_fill((AVPicture *)pFrameYUV, out_buffer, AV_PIX_FMT_YUV420P, screen_w, screen_h);
img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt,
screen_w, screen_h, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);//图像色彩空间转换,pCodecCtx->pix_fmt->AV_PIX_FMT_YUV420P,大小转化为screen
//IYUV: Y + U + V (3 planes) YV12: Y + V + U (3 planes)
sdlTexture = SDL_CreateTexture(sdlRenderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING, screen_w, screen_h); //创建纹理
sdlRect.x = 0;
sdlRect.y = 0;
sdlRect.w = screen_w;
sdlRect.h = screen_h;
return 0;
3.3)视频展示
if (av_read_frame(pFormatCtx, packet) >= 0) //接收视频包
{
if (ReadAudioPacket() < 0) //视频处理
{
return -1;
}
if (ReadVideoPacket() < 0) //音频处理
{
return -1;
}
dwLastFrameRealtime = GetTickCount();
av_packet_unref(packet);
av_free_packet(packet);
}
int WRtmpPlayer::ReadVideoPacket()
{
int ret = 0;
int got_picture = 0;
if (packet->stream_index == videoindex) {
ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
if (ret < 0) {
printf("Decode Error.\n");
return -1;
}
if (got_picture) {
sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height,
pFrameYUV->data, pFrameYUV->linesize);
//设置纹理的数据
SDL_UpdateYUVTexture(sdlTexture, &sdlRect,
pFrameYUV->data[0], pFrameYUV->linesize[0],
pFrameYUV->data[1], pFrameYUV->linesize[1],
pFrameYUV->data[2], pFrameYUV->linesize[2]);
SDL_RenderClear(sdlRenderer);
SDL_RenderCopy(sdlRenderer, sdlTexture, NULL, &sdlRect);//纹理复制给渲染器
SDL_RenderPresent(sdlRenderer);//显示
SDL_DestroyTexture(sdlTexture);
sws_freeContext(img_convert_ctx);
img_convert_ctx = NULL;
av_free(out_buffer);
out_buffer = NULL;
// av_frame_free(&pFrameYUV);
//SDL End-----------------------
SDL_Delay(1);
}
}
return 0;
}
四、音频
4.1)获取音频相关信息
audioIndex = -1;
for (int i = 0; i < pFormatCtx->nb_streams; i++)
if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
audioIndex = i;//音频解码序号
break;
}
if (audioIndex == -1) {
printf("Didn't find a audio stream.\n");
return -1;
}
// Get a pointer to the codec context for the audio stream
pAudioCodecCtx = pFormatCtx->streams[audioIndex]->codec;
// Find the decoder for the audio stream
AVCodec *pAudioCodec = avcodec_find_decoder(pAudioCodecCtx->codec_id);
if (pAudioCodec == NULL) {
printf("Codec not found.\n");
return -1;
}
// Open codec
if (avcodec_open2(pAudioCodecCtx, pAudioCodec, NULL)<0) {
printf("Could not open codec.\n");
return -1;
}
4.2)初始化SDL音频数据
//初始化SDL音频
//Out Audio Param
uint64_t out_channel_layout = AV_CH_LAYOUT_STEREO;
//nb_samples: AAC-1024 MP3-1152
int out_nb_samples = pAudioCodecCtx->frame_size;
AVSampleFormat out_sample_fmt = AV_SAMPLE_FMT_S16;
int out_sample_rate = 44100;
int out_channels = av_get_channel_layout_nb_channels(out_channel_layout);
outAudio_buffer_size = av_samples_get_buffer_size(NULL, out_channels, out_nb_samples, out_sample_fmt, 1);
//Out Buffer Size
SDL_AudioSpec wanted_spec;
int64_t in_channel_layout;
//SDL_AudioSpec
wanted_spec.freq = out_sample_rate;
wanted_spec.format = AUDIO_S16SYS;
wanted_spec.channels = out_channels;
wanted_spec.silence = 0;
wanted_spec.samples = out_nb_samples;
wanted_spec.callback = fill_audio; //回调函数,用于音频播放
wanted_spec.userdata = pAudioCodecCtx;
if (SDL_OpenAudio(&wanted_spec, NULL)<0) {
printf("can't open audio.\n");
return -1;
}
//FIX:Some Codec's Context Information is missing
in_channel_layout = av_get_default_channel_layout(pAudioCodecCtx->channels);
//Swr
au_convert_ctx = swr_alloc();
au_convert_ctx = swr_alloc_set_opts(au_convert_ctx, out_channel_layout, out_sample_fmt, out_sample_rate,
in_channel_layout, pAudioCodecCtx->sample_fmt, pAudioCodecCtx->sample_rate, 0, NULL);
swr_init(au_convert_ctx);
//Play
SDL_PauseAudio(0);
4.3)播放音频
int WRtmpPlayer::ReadAudioPacket()
{
int ret = 0;
int got_picture = 0;
if (packet->stream_index == audioIndex)
{
ret = avcodec_decode_audio4(pAudioCodecCtx, pFrame, &got_picture, packet);
if (ret < 0) {
EndAv();
printf("Error in decoding audio frame.\n");
return -1;
}
if (got_picture > 0) {
swr_convert(au_convert_ctx, &outAudio_buffer, MAX_AUDIO_FRAME_SIZE, (const uint8_t **)pFrame->data, pFrame->nb_samples);
//memset(outAudio_buffer, 0, MAX_AUDIO_FRAME_SIZE);
#if 1
// printf("index:%5d\t pts:%lld\t packet size:%d\n", index, packet->pts, packet->size);
#endif
#if OUTPUT_PCM
//Write PCM
// fwrite(outAudio_buffer, 1, outAudio_buffer_size, pFile);
#endif
// index++;
}
#if USE_SDL
while (audio_len>0)//Wait until finish
SDL_Delay(1);
//Set audio buffer (PCM data)
audio_chunk = (Uint8 *)outAudio_buffer;
//Audio buffer length+
audio_len = outAudio_buffer_size;
audio_pos = audio_chunk;
#endif
}
return 0;
}
五、问题介绍
5.1)由于ffmpeg在链接和读取数据的时候是阻塞进程,出现超时链接,读取数据超时的问题,所以本文采用了回调函数的方式去处理。
static Uint32 dwLastFrameRealtime = 0;
int interrupt_cb(void *ctx) //回调函数
{
WRtmpPlayer *pThis = (WRtmpPlayer *)ctx;
if ((GetTickCount() -dwLastFrameRealtime) > 10 * 1000) {//100s超时退出
printf("主码流断开");
return AVERROR_EOF;
}
return 0;
}
在ffmpeg初始化的时候传递回调函数的地址
5.2)音频处理过程过程需要对其进行回调函数处理。
//Buffer:
//|-----------|-------------|
//chunk-------pos---len-----|
static Uint8 *audio_chunk=NULL;
static Uint32 audio_len = 0;;
static Uint8 *audio_pos = NULL;
static Uint32 dwLastFrameRealtime = 0;
void fill_audio(void *udata, Uint8 *stream, int len)
{
//SDL 2.0
SDL_memset(stream, 0, len);
if (audio_len == 0)
return;
len = (len>audio_len ? audio_len : len);
SDL_MixAudio(stream, audio_pos, len, SDL_MIX_MAXVOLUME);
audio_pos += len;
audio_len -= len;
}
在设置sdl音频的过程中传递回调函数地址
六、结果展示
RTMP客户端播放器 工程源码和应用程序已经上传:https://download.csdn.net/download/qq_25372599/10304366
七、参考文献
视频参照文献:https://blog.csdn.net/leixiaohua1020/article/details/38868499
音频参考文献:https://blog.csdn.net/leixiaohua1020/article/details/10528443
推荐阅读
-
RTMP协议与RTMP视频播放器的应用
-
Easydarwin加FFMPEG实现HLS流视频点播
-
海康&大华&DSS视频拉流-RTSP转RTMP多媒体播放技术
-
视频拉流 Linux安装FFmpeg
-
vue项目中播放rtmp视频文件流的方法
-
Android 音视频深入 十六 FFmpeg 推流手机摄像头,实现直播 (附源码下载)
-
Qt+FFmpeg播放RTSP H264视频流(2)- FFmpeg自定义类封装
-
ffmpeg推流实现实时播放监控画面(rtsp to rtmp)
-
基于VLC的Unity视频播放器(支持本地视频和rtmp、rtsp等视频流)
-
如何搭建RTMP视频流媒体推流服务器进行视频推流?