欢迎您访问程序员文章站本站旨在为大家提供分享程序员计算机编程知识!
您现在的位置是: 首页

unity截取视频(录屏)并编码发送以及性能测试

程序员文章站 2022-04-28 22:51:31
...

方法一:也是比较常见的方法,把摄像头的target Texture设置为一个rendertexture,然后读取像素到一个texture2d上,在在texture2d上获取得到图片字节。

优点:简单明了

缺点:ReadPixels + GetRawTextureData 经测试,需要耗时10-20ms左右,会降低FPS.


            yield return new WaitForEndOfFrame();

            if (depthCamera.targetTexture == null)
            {
                _tempRT = new RenderTexture(_width, _height, 24, GetTargetFormat(depthCamera));

                //_tempRT.antiAliasing = GetAntiAliasingLevel(depthCamera);
                depthCamera.targetTexture = _tempRT;
                _screenShot = new Texture2D(_tempRT.width, _tempRT.height, TextureFormat.RGBA32, false);
            }

            RenderTexture.active = _tempRT;

            _screenShot.ReadPixels(new Rect(0, 0, _tempRT.width, _tempRT.height), 0, 0);
            _screenShot.Apply();
            RenderTexture.active = null;

            //string fileName = "test1234.png";
            //System.IO.File.WriteAllBytes(fileName, _screenShot.EncodeToPNG());

        btyes videoBytes = _screenShot.GetRawTextureData();
        
//对videoBytes进行编码,发送到客户端
        encode(videoBytes);

方案二:利用Unity 提供的一个GPUReadBack的方法获取图片数据,经测试,这个方法没有显著的提高效率。

        void remoteRender2(msgStrcut msgCmd)
        {
            if (depthCamera.targetTexture == null)
            {
                _tempRT = new RenderTexture(_width, _height, 24, GetTargetFormat(depthCamera));

                //_tempRT.antiAliasing = GetAntiAliasingLevel(depthCamera);
                depthCamera.targetTexture = _tempRT;
            }

            GPUReadBackData data = new GPUReadBackData(msgCmd.playID, msgCmd.frameID, msgCmd.timestampUs, depthCamera.targetTexture);
            gpuReadBack.PushFrame(data);
        }

    public class ArAsyncGPUReadback
    { 

        private List<GPUReadBackData> _readbackQueue =
        new List<GPUReadBackData>(4);
        private int count = 0;
        private Int64 totalTime = 0;

   
        public void PushFrame(GPUReadBackData source)
        {
            ProcessQueue();
            if (source != null) QueueFrame(source);
        }

        public void QueueFrame(GPUReadBackData source)
        {
            if (_readbackQueue.Count > 6)
            {
                Debug.LogWarning("Too many GPU readback requests.");
                return;
            }

            // Blit to a temporary texture and request readback on it.
            var rt = RenderTexture.GetTemporary
                (source.texture.width, source.texture.height, 0, RenderTextureFormat.ARGB32);
            Graphics.Blit(source.texture, rt);
            source.rq = AsyncGPUReadback.Request(rt);
            _readbackQueue.Add(source);
            RenderTexture.ReleaseTemporary(rt);
        }

        public void ProcessQueue()
        {
            while (_readbackQueue.Count > 0)
            {
                // Check if the first entry in the queue is completed.
                if (!_readbackQueue[0].rq.done)
                {
                    // Detect out-of-order case (the second entry in the queue
                    // is completed before the first entry).
                    if (_readbackQueue.Count > 1 && _readbackQueue[1].rq.done)
                    {
                        // We can't allow the out-of-order case, so force it to
                        // be completed now.
                        _readbackQueue[0].rq.WaitForCompletion();
                    }
                    else
                    {
                        // Nothing to do with the queue.
                        break;
                    }
                }

                // Retrieve the first entry in the queue.
                var readData = _readbackQueue[0];
                _readbackQueue.RemoveAt(0);

                // Error detection
                if (readData.rq.hasError)
                {
                    Debug.LogWarning("GPU readback error was detected.");
                    continue;
                }

                FrameData data = new FrameData(readData.timestampUs, readData.rq.GetData<byte>().ToArray());

                count++;
                Int64 end = arServer.GetTimeStamp(true);

                totalTime += (end - readData.initTimestamp);
                if (totalTime >= 30000)
                {
                    string msg = string.Format("read byte from texture use {0} ms", totalTime / count);
                    count = 0;
                    totalTime = 0;
                    Debug.Log(msg);
                }
            }
        }
    }
}

方法三:这个也是我们推荐的方法,我们知道unity是在GPU进行渲染,在结合我们使用GPU进行编码,就减少了一遍从GPU拷贝数据到CPU,然后进行编码的过程,能够整体的减少整个帧的端到端的时延。直接将texture的指针传到uvencoder。

unity截取视频(录屏)并编码发送以及性能测试

        public bool Encode(Texture texture, long timestamp, bool forceIdrFrame)
        {
            if (!texture)
            {
                Debug.LogError("The given texture is invalid.");
                return false;
            }

            var ptr = texture.GetNativeTexturePtr();
            if (!Encode(ptr, timestamp, forceIdrFrame))
            {
                Debug.LogError(error);
                return false;
            }

            timestampMap.Add(timestamp, arServer.GetTimeStamp(true));

            return true;
        }

        public bool Encode(System.IntPtr ptr, long timestamp, bool forceIdrFrame)
        {
            if (ptr == System.IntPtr.Zero)
            {
                Debug.LogError("The given texture pointer is invalid.");
                return false;
            }

            if (!isValid)
            {
                Debug.LogError("uNvEncoder has not been initialized yet.");
                return false;
            }

            var result = Lib.Encode(id, ptr, timestamp, forceIdrFrame);
            if (!result)
            {
                Debug.LogError(error);
            }

            return result;
        }

综合三种方法,我们需要获取屏幕数据,进行编码,发送到客户端,从测试效果来看,第三种方案能够减小整个帧的端到端的时延。

参考文档:

https://devblogs.microsoft.com/cse/2019/03/19/real-time-streaming-of-3d-enterprise-applications-from-the-cloud-to-low-powered-devices/

https://3dstreamingtoolkit.github.io/docs-3dstk/#getting-started

https://github.com/hecomi/uNvEncoder

http://ntown.at/knowledgebase/cuda-gpu-accelerated-h264-h265-hevc-video-encoding-with-ffmpeg/

相关标签: VR/AR