欢迎您访问程序员文章站本站旨在为大家提供分享程序员计算机编程知识!
您现在的位置是: 首页

FFmpeg实现h264 转mpeg1video 存储

程序员文章站 2024-03-24 21:38:52
...

需要实现的功能: 把h264文件进行解码,解码后然后进行mpeg1的编码. 这一步完成接下来再实现存储为ts,本篇代码实现了第一部分,

注意事项:
ffmpeg版本问题,今天2020.2.11的最新版本是ffmpeg 4.2.2,这个对mpeg1video的编码支持不行.

使用ffmpeg sample encode_video.c会报:

mpeg1video The encoder timebase is not set的错误.

其实这个导致的主要问题是版本问题,推荐大家使用:ffmpeg 3.2.14版本.

具体代码:

#ifdef __cplusplus
extern "C" {
#endif
#include <libswscale/swscale.h>
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
#include <libavutil/opt.h>
#include <libavutil/time.h>
#include <libavutil/avutil.h>
#include <libavutil/imgutils.h>
#include <libavutil/hwcontext.h>
#include <libavutil/error.h>
#include <libavfilter/avfilter.h>
#ifdef __cplusplus
}
#endif

/**
 * 功能:    实现把h264转码为mpeg1的格式s
 * 参考:    https://www.jianshu.com/p/8cd90dba49e1
 */

int main()
{
    av_register_all();
    avcodec_register_all();
    avfilter_register_all();
    avformat_network_init();

    const char *fname = "/mnt/d/x264_encode.264";

    // 从h264文件中提取出一帧一帧的数据
    AVFormatContext *inputCtx = NULL;
    // open the input file
	if (avformat_open_input(&inputCtx,fname,NULL,NULL) != 0)
	{
		fprintf(stderr, "avformat open input file :%s fail.\n" , fname);
		return -1;
	}
    AVPacket packet;
    int ret = 0;
 //   int framesize = 0;

    // h624解码为yuv
    AVCodec *codec_h264 = avcodec_find_decoder(AV_CODEC_ID_H264);
    AVCodecContext *ctx_h264 = avcodec_alloc_context3(codec_h264);
    if (avcodec_open2(ctx_h264,codec_h264,NULL) < 0)
    {
        /* code */
        fprintf(stderr, "avcodec_open2 h264 fail.\n" );
        exit(1);
        return -1;
    }
    
    AVFrame *decodeResFrame = av_frame_alloc();     //存储h264解码后的图像
    if (!decodeResFrame)
    {
        fprintf(stderr, "could not alloc decode result frame.\n" );
        exit(1);
    }

    // AVFrame *yuvframe = av_frame_alloc();       //存储转码后的图像
    // if(!yuvframe)
    // {
    //     fprintf(stderr, "could not alloc yuvTransferFrame .\n" );
    //     exit(1);
    // }

    AVFrame *yuvframe = NULL;
    
    // 转码为YUV42OP,这个是为了解决某些情况下的花屏.
    uint8_t *outBuffer = NULL;
	SwsContext *pSwsCtx = NULL;


    // mpeg1 video
    uint8_t endcode[] = { 0, 0, 1, 0xb7 };          // 写入文件尾部
    FILE *f = fopen("test.mpg","wb+");
    if(!f)
    {
        fprintf(stderr, "Could not open test.mpg\n");
        exit(1);
    }

    AVCodec *codec_mpeg1 = avcodec_find_encoder(AV_CODEC_ID_MPEG1VIDEO);
    if(!codec_mpeg1)
    {
        fprintf(stderr, "Codec not found\n");
        exit(1);
    }

    AVCodecContext *ctx_mpeg1 = avcodec_alloc_context3(codec_mpeg1);
    if (!ctx_mpeg1) 
    {
        fprintf(stderr, "Could not allocate video codec context\n");
        exit(1);
    }

    /* put sample parameters */
    ctx_mpeg1->bit_rate = 400000;
    /* resolution must be a multiple of two */
    ctx_mpeg1->width = 640;
    ctx_mpeg1->height = 480;
    /* frames per second */
    ctx_mpeg1->time_base = (AVRational){1,25};
    /* emit one intra frame every ten frames
     * check frame pict_type before passing frame
     * to encoder, if frame->pict_type is AV_PICTURE_TYPE_I
     * then gop_size is ignored and the output of encoder
     * will always be I frame irrespective to gop_size
     */
    ctx_mpeg1->gop_size = 10;
    ctx_mpeg1->max_b_frames = 1;
    ctx_mpeg1->pix_fmt = AV_PIX_FMT_YUV420P;




    // if (codec_id == AV_CODEC_ID_H264)
    //     av_opt_set(c->priv_data, "preset", "slow", 0);

    /* open it */
    if (avcodec_open2(ctx_mpeg1, codec_mpeg1, NULL) < 0) {
        fprintf(stderr, "Could not open codec mpeg1\n");
        exit(1);
    }

    int i = 0;
    AVPacket mpegPkt;

    while(ret >= 0)
    {
        av_init_packet(&mpegPkt);
        mpegPkt.data = NULL;    // packet data will be allocated by the encoder
        mpegPkt.size = 0;

        // 从文件中读取一帧数据
        ret = av_read_frame(inputCtx,&packet);
        fprintf(stderr, "av_read_frame pkt size: %d. \n" , packet.size);

        // 解码h264
        // int len, got_frame;
        // char buf[1024];
        // len = avcodec_decode_video2(ctx_h264, decodeResFrame, &got_frame, &packet);
        // if (len < 0)
        // {
        //     fprintf(stderr, "Error while decoding frame %d\n");
        // }
        ret = avcodec_send_packet(ctx_h264,&packet);
        if(ret == 0)
        {
            ret = avcodec_receive_frame(ctx_h264,decodeResFrame);
            if(ret == 0)
            {   
                if(yuvframe == NULL)
                {
                    // init sws context convert
                    // size_t bufferSize = av_image_get_buffer_size(AV_PIX_FMT_YUV420P, ctx_h264->width, ctx_h264->height, 1) * sizeof(uint8_t);
                    // outBuffer = (uint8_t *)av_malloc(bufferSize);
                    // av_image_fill_arrays(yuvframe->data, yuvframe->linesize, outBuffer, AV_PIX_FMT_YUV420P, ctx_h264->width, ctx_h264->height, 1);
                    
                    pSwsCtx = sws_getContext(ctx_h264->width, ctx_h264->height, ctx_h264->pix_fmt,
					ctx_h264->width, ctx_h264->height, AV_PIX_FMT_YUV420P, NULL, NULL, NULL, NULL);

                    yuvframe = av_frame_alloc();
                    yuvframe->format = ctx_mpeg1->pix_fmt;
                    yuvframe->width  = ctx_mpeg1->width;
                    yuvframe->height = ctx_mpeg1->height;
                    ret = av_image_alloc(yuvframe->data, yuvframe->linesize, ctx_mpeg1->width, ctx_mpeg1->height,
                         ctx_mpeg1->pix_fmt, 32);
                    if (ret < 0) 
                    {
                        fprintf(stderr, "Could not allocate raw picture buffer\n");
                        exit(1);
                    }
                }

                // sws convert
                if (sws_scale(pSwsCtx,
                 (const uint8_t* const*)decodeResFrame->data, decodeResFrame->linesize, 0, decodeResFrame->height,
				    yuvframe->data, yuvframe->linesize)
                     == 0)
                {
                    fprintf(stderr, "sws_scale fail. \n" );
                    exit(2);
                }
                else
                {
                    //这里得到了YUV数据--进行mpeg1video的编码
                    yuvframe->pts = i;
                    ret = avcodec_send_frame(ctx_mpeg1,yuvframe);
                    if(ret == 0)
                    {
                        do
                        {
                            ret = avcodec_receive_packet(ctx_mpeg1,&mpegPkt);
                            if(ret == AVERROR(EAGAIN))
                            {            
                                ret = 0;
                                fprintf(stderr, "again. \n" );                    
//                                continue;
                            } 

                            if(ret == 0)
                            {
                                fwrite(mpegPkt.data, 1, mpegPkt.size, f);
                                av_packet_unref(&mpegPkt);
                            }
                        } while (0);
                    }
                }
            }
            av_packet_unref(&packet);
        }

        i++;
    }
    
    avformat_close_input(&inputCtx);
    
    av_freep(&decodeResFrame->data[0]);
    av_frame_free(&decodeResFrame); decodeResFrame = NULL;

    av_freep(&yuvframe->data[0]);
    av_frame_free(&yuvframe); yuvframe = NULL;
    
    avcodec_close(ctx_h264);
    av_free(ctx_h264);

    sws_freeContext(pSwsCtx);

    avcodec_close(ctx_mpeg1);
    av_free(ctx_mpeg1);

    /* add sequence end code to have a real MPEG file */
    fwrite(endcode, 1, sizeof(endcode), f);
    fclose(f);

    return 0;
}

FFmpeg实现h264 转mpeg1video 存储
测试基本通过,存储的mpg文件都能播放.

这里大家一定要多参考官方的sample,参考官方的sample,比上百度上漫无目的的摸索更有效率.

相关标签: 知识库