欢迎您访问程序员文章站本站旨在为大家提供分享程序员计算机编程知识!
您现在的位置是: 首页

ffmpeg如果采用硬解码器h264_cuvid解码时颜色空间AV_PIX_FMT_CUDA无法转换为BGR24

程序员文章站 2022-07-02 08:36:40
...

如果我们采用ffmpeg硬解码:

AVCodec *m_codec=avcodec_find_decoder_by_name(“h264_cuvid”);
AVCodecContext * m_codecCtx = avcodec_alloc_context3(m_codec);
AVCodecParameters *codecPar = inCtx.codecParams;
avcodec_parameters_to_context(m_codecCtx, codecPar);
 m_codecCtx->bit_rate = 0;
  m_codecCtx->time_base.den = 1;
  m_codecCtx->time_base.num = 25;
  //如果在这里颜色空间设置为AV_PIX_FMT_CUDA,那么在receive解码后,m_frame->data指针指向
  //的地址还是显存,所以这时候拷贝的话就会出错,并且sws_scale也会出问题,那么在上述情况下还需要一步
  //AVFrame *frameInter = av_frame_alloc();
  //    av_hwframe_transfer_data(frameInter, m_frame, 0);
//  如果嫌麻烦,直接将颜色空间设置为AV_PIX_FMT_NV12,自动就将数据下载到主存中了
//这时候数据也可以用sws_scale了
  m_codecCtx->pix_fmt =  AV_PIX_FMT_NV12 /*AV_PIX_FMT_CUDA*/
                                           : m_codecCtx->pix_fmt;
avcodec_open2(m_codecCtx, m_codec, nullptr);

avcodec_send_frame(m_codecCtx, m_frame);
while (ret >= 0) { 
   ret=avcodec_receive_frame(m_codecCtx, m_frame);
    if (ret < 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
      av_strerror(ret, m_errBuf, ERR_BUF_LEN);
      LOG_ERROR("avcodec_receive_packet error: {}", m_errBuf);
      return false;
    }
    }

参考代码:

while (ret >= 0) { // not so good
       ret = avcodec_receive_frame(m_codecCtx, m_frame);
      if (ret < 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
          return false;
      }

      auto policy = getCapturePolicy();
      int width = m_frame->width;
      int height = m_frame->height;
      // unsigned long resolution = size_t(height) * size_t(width);
      //   auto pixFmt = AVPixelFormat(m_frame->format);

      if (m_blackFrame.empty()) {
          m_blackFrame = cv::Mat(height, width, CV_8UC3);
      }
      AVFrame *frameInter = av_frame_alloc();
      av_hwframe_transfer_data(frameInter, m_frame, 0);
      const char *pixname = av_get_pix_fmt_name(AVPixelFormat(frameInter->format));
      cv::Mat cpuMat;
      //******************opencv颜色空间转化**********************
      //      cv::Mat imgYUV;
      //      imgYUV.create(int(height * 1.5), width, CV_8UC1);
      //      auto p = height * width;
      //      memcpy(imgYUV.data, frameInter->data[0], size_t(p));
      //      memcpy(imgYUV.data + p, frameInter->data[1], size_t(p / 4));
      //      memcpy(imgYUV.data + 5 * p / 4, frameInter->data[2], size_t(p / 4));
      //      cv::cvtColor(imgYUV, cpuMat, cv::COLOR_YUV2BGR_I420 /*cv::COLOR_YUV2BGR_NV12*/);
      //*****************END***************************************

      //  if (m_param.gpu < 0) { // transcode on CPU

      //**************ffmpgecpu转码******************
      sws_scale(m_imgcvtCtx,
                static_cast<uint8_t const *const *>(frameInter->data),
                frameInter->linesize,
                0,
                height,
                m_bgrFrame->data,
                m_bgrFrame->linesize);

      cv::Mat bgrImg(height,
                     width,
                     CV_8UC3,
                     static_cast<uchar *>(m_outbuf),
                     static_cast<size_t>(width * 3));
      cpuMat = bgrImg.clone();
      //*****************END****************************
      //  }
      /*else { // transcode on GPU
        if (m_gpuImg.empty()) {
            cv::cuda::setDevice(m_param.gpu);        // first frame
            m_gpuImg.create(height, width, CV_8UC3); // allocate a BGR24 gpu image
            m_gpuImg.step = size_t(m_bgrFrame->linesize[0]);
            LOG_INFO("frame height = {}, width = {}, pix format = {}, linsize0 = {}, "
                     "linesize1 = {}, gpu image step = {}",
                     height,
                     width,
                     pixFmt,
                     m_frame->linesize[0],
                     m_frame->linesize[1],
                     m_bgrFrame->linesize[0]);
        }

      if (m_skipIdx == 0 || !policy.useBlackFrame) {
          cv::cuda::setDevice(m_param.gpu);
          //xassert(pixFmt == AV_PIX_FMT_NV12 || pixFmt == AV_PIX_FMT_CUDA);
          //xassert(!m_gpuImg.empty());
          
          //  -----------------GPU-------------------
          flow::img::Nv12ToBGR24(m_frame->data[0],
                                 m_frame->data[1],
                                 m_gpuImg.data,
                                 resolution,
                                 m_frame->height,
                                 m_frame->width,
                                 m_frame->linesize[0],
                                 m_frame->linesize[1]);
          m_gpuImg.download(cpuMat);

          // -------------------END--------------------
          
          img::setCvImgFlag(BLACK_FRAME_BIT, cpuMat, false);
      } else {
          cpuMat = m_blackFrame;
          img::setCvImgFlag(BLACK_FRAME_BIT, cpuMat, true);
      }
    }*/
      // add tag according to the value of m_skipIdx
      auto isKeyFrame = (m_skipIdx == 0);
      img::setCvImgFlag(KEY_FRAME_BIT, cpuMat, isKeyFrame);
      CustomerMat custMat{cpuMat, m_frame->pts};
      outFrame.push_back(custMat);
      m_skipIdx = (m_skipIdx + 1) % (policy.skip + 1);
      break;
  }

参考二:

bool FFvideoCodec::decode(OnePktPtr &inPkt,
                          std::vector<CustomerMat> &outFrame) {
  // CUDA pixel format can keep decoded data on GPU, otherwise decoded data
  // will download to host memory
  auto inPacket = reinterpret_cast<AVPacket *>(inPkt->data); // NOLINT
  m_codecCtx->pix_fmt = (m_param.gpu >= 0) ? AV_PIX_FMT_NV12 /*AV_PIX_FMT_CUDA*/
                                           : m_codecCtx->pix_fmt;
  auto ret = avcodec_send_packet(m_codecCtx, inPacket);
  if (ret < 0) {
    av_strerror(ret, m_errBuf, ERR_BUF_LEN);
    LOG_ERROR("avcodec_send_packet < 0 of {} for {}, closing ...", ret,
              m_errBuf);
    return false;
  }

  while (ret >= 0) { // not so good
    ret = avcodec_receive_frame(m_codecCtx, m_frame);
    if (ret < 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
      return false;
    }

    auto policy = getCapturePolicy();
    int width = m_frame->width;
    int height = m_frame->height;
    unsigned long resolution = size_t(height) * size_t(width);
    auto pixFmt = AVPixelFormat(m_frame->format);

    if (m_blackFrame.empty()) {
      m_blackFrame = cv::Mat(height, width, CV_8UC3);
    }
    const char *pixname = av_get_pix_fmt_name(AVPixelFormat(m_frame->format));
    // LOG_INFO("pix format:{}", name);
    cv::Mat cpuMat;

    if (m_param.gpu >= 0) { // transcode on CPU
      auto bret = sws_scale(
          m_imgcvtCtx, static_cast<uint8_t const *const *>(m_frame->data),
          m_frame->linesize, 0, height, m_bgrFrame->data, m_bgrFrame->linesize);

      cv::Mat bgrImg(height, width, CV_8UC3, static_cast<uchar *>(m_outbuf),
                     static_cast<size_t>(width * 3));

      //      cv::Mat bgrImg;
      //      cv::cvtColor(nv12Mat, bgrImg, cv::COLOR_YUV2BGR_NV12);
      cpuMat = bgrImg.clone();
      img::setCvImgFlag(BLACK_FRAME_BIT, cpuMat, false);

      // add tag according to the value of m_skipIdx
      auto isKeyFrame = (m_skipIdx == 0);
      img::setCvImgFlag(KEY_FRAME_BIT, cpuMat, isKeyFrame);
      CustomerMat custMat{cpuMat, m_frame->pts};
      outFrame.push_back(custMat);
      m_skipIdx = (m_skipIdx + 1) % (policy.skip + 1);
    } else { // transcode on GPU
      if (m_gpuImg.empty()) {
        cv::cuda::setDevice(m_param.gpu);        // first frame
        m_gpuImg.create(height, width, CV_8UC3); // allocate a BGR24 gpu image
        m_gpuImg.step = size_t(m_bgrFrame->linesize[0]);
        LOG_INFO(
            "frame height = {}, width = {}, pix format = {}, linsize0 = {}, "
            "linesize1 = {}, gpu image step = {}",
            height, width, pixFmt, m_frame->linesize[0], m_frame->linesize[1],
            m_bgrFrame->linesize[0]);
      }

      if (m_skipIdx == 0 || !policy.useBlackFrame) {
        cv::cuda::setDevice(m_param.gpu);
        xassert(pixFmt == AV_PIX_FMT_NV12 || pixFmt == AV_PIX_FMT_CUDA);
        xassert(!m_gpuImg.empty());
        flow::img::Nv12ToBGR24(m_frame->data[0], m_frame->data[1],
                               m_gpuImg.data, resolution, m_frame->height,
                               m_frame->width, m_frame->linesize[0],
                               m_frame->linesize[1]);
        m_gpuImg.download(cpuMat);

        img::setCvImgFlag(BLACK_FRAME_BIT, cpuMat, false);
      } else {
        cpuMat = m_blackFrame;
        img::setCvImgFlag(BLACK_FRAME_BIT, cpuMat, true);
      }

      // add tag according to the value of m_skipIdx
      auto isKeyFrame = (m_skipIdx == 0);
      img::setCvImgFlag(KEY_FRAME_BIT, cpuMat, isKeyFrame);
      CustomerMat custMat{cpuMat, m_frame->pts};
      outFrame.push_back(custMat);
      m_skipIdx = (m_skipIdx + 1) % (policy.skip + 1);
    }
    break;
  }

  av_frame_unref(m_frame);
  return (!outFrame.empty());
}

参考:https://www.jianshu.com/p/ad05a94001b4