unity实现远视角场景细节
程序员文章站
2022-03-14 19:46:44
问题:当我们处理一些远视角的,并且物体时细长的(如远方球场的边界)渲染时。会出现一些线段丢失,并且移动视角会法线线段闪烁,感觉整个线段不完整。比如类似下图:我要实现的是把线段填充并且尽量没有锯齿,类似下图:原因:这个问题出现的原因是,远方的物体可以表达的像素会越来越少。无论是透视视角还是正交的,只要在远方的物体可以表现得像素都是很小了。有些位置甚至一个像素都不能表达,所以就会出现像素丢失。实现方式:针对这种问题,我的解决思路是既然他没办法在一个像素表达,我就让.....
问题:
当我们处理一些远视角的,并且物体时细长的(如远方球场的边界)渲染时。会出现一些线段丢失,并且移动视角会法线线段闪烁,感觉整个线段不完整。比如类似下图:
我要实现的是把线段填充并且尽量没有锯齿,类似下图:
原因:
这个问题出现的原因是,远方的物体可以表达的像素会越来越少。
无论是透视视角还是正交的,只要在远方的物体可以表现得像素都是很小了。有些位置甚至一个像素都不能表达,所以就会出现像素丢失。
实现方式:
针对这种问题,我的解决思路是既然他没办法在一个像素表达,我就让他用多个像素表达出来。也就是对像素做四周的采样,然后看没有像素的位置四周有没接近白色像素,如果有则加上这个颜色。
这样就可以扩大一部分像素了,变为下面这样:
看上去好多了,整个线端比较连续了。
urp下核心逻辑是:
首先用一个commandbuffer来接受球场地板的渲染,我定义的是FootBallScene的tag:
commandbuffer是这样:
using System;
namespace UnityEngine.Rendering.Universal.Internal
{
/// <summary>
/// Render all objects that have a 'DepthOnly' pass into the given depth buffer.
///
/// You can use this pass to prime a depth buffer for subsequent rendering.
/// Use it as a z-prepass, or use it to generate a depth buffer.
/// </summary>
public class FootBallScenePass : ScriptableRenderPass
{
int kDepthBufferBits = 32;
private RenderTargetHandle depthAttachmentHandle { get; set; }
internal RenderTextureDescriptor descriptor { get; private set; }
FilteringSettings m_FilteringSettings;
string m_ProfilerTag = "FootBallScene";
ShaderTagId m_ShaderTagId = new ShaderTagId("FootBallScene");
/// <summary>
/// Create the DepthOnlyPass
/// </summary>
public FootBallScenePass(RenderPassEvent evt, RenderQueueRange renderQueueRange, LayerMask layerMask)
{
m_FilteringSettings = new FilteringSettings(renderQueueRange, layerMask);
renderPassEvent = evt;
}
/// <summary>
/// Configure the pass
/// </summary>
public void Setup(
RenderTextureDescriptor baseDescriptor,
RenderTargetHandle depthAttachmentHandle)
{
this.depthAttachmentHandle = depthAttachmentHandle;
baseDescriptor.colorFormat = RenderTextureFormat.ARGB4444;
baseDescriptor.depthBufferBits = kDepthBufferBits;
// Depth-Only pass don't use MSAA
baseDescriptor.msaaSamples = 1;
descriptor = baseDescriptor;
}
public override void Configure(CommandBuffer cmd, RenderTextureDescriptor cameraTextureDescriptor)
{
cmd.GetTemporaryRT(depthAttachmentHandle.id, descriptor, FilterMode.Point);
ConfigureTarget(depthAttachmentHandle.Identifier());
ConfigureClear(ClearFlag.All, Color.black);
}
/// <inheritdoc/>
public override void Execute(ScriptableRenderContext context, ref RenderingData renderingData)
{
CommandBuffer cmd = CommandBufferPool.Get(m_ProfilerTag);
using (new ProfilingSample(cmd, m_ProfilerTag))
{
context.ExecuteCommandBuffer(cmd);
cmd.Clear();
var sortFlags = renderingData.cameraData.defaultOpaqueSortFlags;
var drawSettings = CreateDrawingSettings(m_ShaderTagId, ref renderingData, sortFlags);
drawSettings.perObjectData = PerObjectData.None;
ref CameraData cameraData = ref renderingData.cameraData;
Camera camera = cameraData.camera;
if (cameraData.isStereoEnabled)
context.StartMultiEye(camera);
context.DrawRenderers(renderingData.cullResults, ref drawSettings, ref m_FilteringSettings);
}
context.ExecuteCommandBuffer(cmd);
CommandBufferPool.Release(cmd);
}
/// <inheritdoc/>
public override void FrameCleanup(CommandBuffer cmd)
{
if (cmd == null)
throw new ArgumentNullException("cmd");
if (depthAttachmentHandle != RenderTargetHandle.CameraTarget)
{
cmd.ReleaseTemporaryRT(depthAttachmentHandle.id);
depthAttachmentHandle = RenderTargetHandle.CameraTarget;
}
}
}
}
然后shader是加一个pass:
Pass
{
Name "FootBallScene"
Tags{"LightMode" = "FootBallScene"}
ZWrite On
ColorMask RGB
Cull Off
HLSLPROGRAM
// Required to compile gles 2.0 with standard srp library
#pragma prefer_hlslcc gles
#pragma exclude_renderers d3d11_9x
#pragma target 2.0
#pragma vertex FootBallSceneVertex
#pragma fragment FootBallSceneFragment
#include "UnlitInput.hlsl"
struct Attributes
{
float4 positionOS : POSITION;
float2 uv : TEXCOORD0;
};
struct Varyings
{
float2 uv : TEXCOORD0;
float4 vertex : SV_POSITION;
};
Varyings FootBallSceneVertex(Attributes input)
{
Varyings output = (Varyings)0;
VertexPositionInputs vertexInput = GetVertexPositionInputs(input.positionOS.xyz);
output.vertex = vertexInput.positionCS;
output.uv = TRANSFORM_TEX(input.uv, _BaseMap);
return output;
}
half4 FootBallSceneFragment(Varyings input) : SV_Target
{
return SampleAlbedoAlpha(input.uv, TEXTURE2D_ARGS(_BaseMap, sampler_BaseMap));
}
ENDHLSL
}
这样就拿到了我们的场景图了。
第二步就是要获取他的z轴信息,我的思想是类似获取深度图,但我是自己算了一遍符合自己的,我用相机的世界坐标和当前像素的世界坐标的距离做为深度信息:
commandbuffer其实差不多:
using System;
namespace UnityEngine.Rendering.Universal.Internal
{
/// <summary>
/// Render all objects that have a 'DepthOnly' pass into the given depth buffer.
///
/// You can use this pass to prime a depth buffer for subsequent rendering.
/// Use it as a z-prepass, or use it to generate a depth buffer.
/// </summary>
public class PosZPass : ScriptableRenderPass
{
int kDepthBufferBits = 32;
private RenderTargetHandle depthAttachmentHandle { get; set; }
internal RenderTextureDescriptor descriptor { get; private set; }
FilteringSettings m_FilteringSettings;
string m_ProfilerTag = "Pos Z";
ShaderTagId m_ShaderTagId = new ShaderTagId("PosZ");
/// <summary>
/// Create the DepthOnlyPass
/// </summary>
public PosZPass(RenderPassEvent evt, RenderQueueRange renderQueueRange, LayerMask layerMask)
{
m_FilteringSettings = new FilteringSettings(renderQueueRange, layerMask);
renderPassEvent = evt;
}
/// <summary>
/// Configure the pass
/// </summary>
public void Setup(
RenderTextureDescriptor baseDescriptor,
RenderTargetHandle depthAttachmentHandle)
{
this.depthAttachmentHandle = depthAttachmentHandle;
baseDescriptor.colorFormat = RenderTextureFormat.ARGB4444;
baseDescriptor.depthBufferBits = kDepthBufferBits;
// Depth-Only pass don't use MSAA
baseDescriptor.msaaSamples = 1;
descriptor = baseDescriptor;
}
public override void Configure(CommandBuffer cmd, RenderTextureDescriptor cameraTextureDescriptor)
{
cmd.GetTemporaryRT(depthAttachmentHandle.id, descriptor, FilterMode.Point);
ConfigureTarget(depthAttachmentHandle.Identifier());
ConfigureClear(ClearFlag.All, Color.black);
}
/// <inheritdoc/>
public override void Execute(ScriptableRenderContext context, ref RenderingData renderingData)
{
CommandBuffer cmd = CommandBufferPool.Get(m_ProfilerTag);
using (new ProfilingSample(cmd, m_ProfilerTag))
{
context.ExecuteCommandBuffer(cmd);
cmd.Clear();
var sortFlags = renderingData.cameraData.defaultOpaqueSortFlags;
var drawSettings = CreateDrawingSettings(m_ShaderTagId, ref renderingData, sortFlags);
drawSettings.perObjectData = PerObjectData.None;
ref CameraData cameraData = ref renderingData.cameraData;
Camera camera = cameraData.camera;
if (cameraData.isStereoEnabled)
context.StartMultiEye(camera);
context.DrawRenderers(renderingData.cullResults, ref drawSettings, ref m_FilteringSettings);
}
context.ExecuteCommandBuffer(cmd);
CommandBufferPool.Release(cmd);
}
/// <inheritdoc/>
public override void FrameCleanup(CommandBuffer cmd)
{
if (cmd == null)
throw new ArgumentNullException("cmd");
if (depthAttachmentHandle != RenderTargetHandle.CameraTarget)
{
cmd.ReleaseTemporaryRT(depthAttachmentHandle.id);
depthAttachmentHandle = RenderTargetHandle.CameraTarget;
}
}
}
}
然后shader加一个pass:
Pass
{
Name "PosZ"
Tags{"LightMode" = "PosZ"}
ZWrite On
ColorMask RGB
Cull Off
HLSLPROGRAM
// Required to compile gles 2.0 with standard srp library
#pragma prefer_hlslcc gles
#pragma exclude_renderers d3d11_9x
#pragma target 2.0
#pragma vertex PosZVertex
#pragma fragment PosZFragment
#include "UnlitInput.hlsl"
struct Attributes
{
float4 positionOS : POSITION;
float2 uv : TEXCOORD0;
};
struct Varyings
{
float2 uv : TEXCOORD0;
float4 vertex : SV_POSITION;
float3 vertexW : TEXCOORD1;
};
half4 _CameraPos;
Varyings PosZVertex(Attributes input)
{
Varyings output = (Varyings)0;
VertexPositionInputs vertexInput = GetVertexPositionInputs(input.positionOS.xyz);
output.vertex = vertexInput.positionCS;
output.vertexW = vertexInput.positionWS;
output.uv = TRANSFORM_TEX(input.uv, _BaseMap);
return output;
}
half4 PosZFragment(Varyings input) : SV_Target
{
return half4(distance(_CameraPos, input.vertexW) / 200,0,0, 1);
}
ENDHLSL
}
}
这样就能得到这个图的信息了。
场景图:
深度图:
然后我们需要知道哪些部分需要扩大像素,一般我们可以自定义,我是定义了深度在大于我的场景的0.2的时候做处理。然后用blit的方式来实现像素写入:
cmd.Blit(source, opaqueColorRT, blurMat, 0);
但是这里还有个问题,就是我们的线段还是有比较明显的锯齿感,那么我们就应该对他做一些模糊的处理,所以我们又做多了一个pass来把他变模糊。
整体的commandbuffer是:
using System;
namespace UnityEngine.Rendering.Universal.Internal
{
/// <summary>
/// Render all objects that have a 'DepthOnly' pass into the given depth buffer.
///
/// You can use this pass to prime a depth buffer for subsequent rendering.
/// Use it as a z-prepass, or use it to generate a depth buffer.
/// </summary>
public class FootBallScenePostProgressPass : ScriptableRenderPass
{
private Material blurMat = null;
private RenderTexture mCurrPreRt;
int m_SampleOffsetShaderHandle;
Downsampling m_DownsamplingMethod;
private RenderTargetIdentifier source { get; set; }
private RenderTargetHandle destination { get; set; }
const string m_ProfilerTag = "FootBallScenePostProgressPass";
/// <summary>
/// Create the CopyColorPass
/// </summary>
public FootBallScenePostProgressPass(RenderPassEvent evt)
{
m_SampleOffsetShaderHandle = Shader.PropertyToID("_SampleOffset");
renderPassEvent = evt;
m_DownsamplingMethod = Downsampling.None;
if (mCurrPreRt != null)
{
mCurrPreRt.DiscardContents();
mCurrPreRt.Release();
RenderTexture.ReleaseTemporary(mCurrPreRt);
}
}
/// <summary>
/// Configure the pass with the source and destination to execute on.
/// </summary>
/// <param name="source">Source Render Target</param>
/// <param name="destination">Destination Render Target</param>
public void Setup(RenderTargetIdentifier source, RenderTargetHandle destination, Downsampling downsampling)
{
this.source = source;
this.destination = destination;
m_DownsamplingMethod = downsampling;
}
public override void Configure(CommandBuffer cmd, RenderTextureDescriptor cameraTextureDescripor)
{
RenderTextureDescriptor descriptor = cameraTextureDescripor;
descriptor.msaaSamples = 1;
descriptor.depthBufferBits = 0;
if (m_DownsamplingMethod == Downsampling._2xBilinear)
{
descriptor.width /= 2;
descriptor.height /= 2;
}
else if (m_DownsamplingMethod == Downsampling._4xBox || m_DownsamplingMethod == Downsampling._4xBilinear)
{
descriptor.width /= 4;
descriptor.height /= 4;
}
cmd.GetTemporaryRT(destination.id, descriptor, m_DownsamplingMethod == Downsampling.None ? FilterMode.Point : FilterMode.Bilinear);
}
/// <inheritdoc/>
public override void Execute(ScriptableRenderContext context, ref RenderingData renderingData)
{
CommandBuffer cmd = CommandBufferPool.Get(m_ProfilerTag);
RenderTargetIdentifier opaqueColorRT = destination.Identifier();
ref CameraData cameraData = ref renderingData.cameraData;
if (mCurrPreRt == null)
{
blurMat = new Material(Shader.Find("Hidden/VT_Blur"));
mCurrPreRt = RenderTexture.GetTemporary(cameraData.camera.pixelWidth, cameraData.camera.pixelHeight, 0,
RenderTextureFormat.ARGB4444);
}
cmd.Blit(source, mCurrPreRt, blurMat, 0);
//cmd.Blit(mCurrPreRt, opaqueColorRT, blurMat, 1);
//Blit(cmd, source, mCurrPreRt, blurMat, 0);
Blit(cmd, mCurrPreRt, opaqueColorRT, blurMat, 1);
mCurrPreRt.DiscardContents();
mCurrPreRt.Release();
context.ExecuteCommandBuffer(cmd);
CommandBufferPool.Release(cmd);
}
/// <inheritdoc/>
public override void FrameCleanup(CommandBuffer cmd)
{
if (cmd == null)
throw new ArgumentNullException("cmd");
if (destination != RenderTargetHandle.CameraTarget)
{
cmd.ReleaseTemporaryRT(destination.id);
destination = RenderTargetHandle.CameraTarget;
}
}
}
}
然后shader实现整体是:
Shader "Hidden/VT_Blur" {
Properties{
_MainTex("Albedo Tex", 2D) = "white" {}
_TintColor("TintColor",color) = (1,1,1,1)
}
Subshader{
Tags{ "RenderType" = "Transparent" "Queue" = "Transparent" "IgnoreProjector" = "true" }
Pass
{
Blend One Zero
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
sampler2D _FootballTexture;
sampler2D _CameraPosZTexture;
CBUFFER_START(UnityPerMaterial)
float4 _FootballTexture_ST;
float4 _FootballTexture_TexelSize;
CBUFFER_END
struct appdata
{
float4 vertex : POSITION;
float2 texcoord : TEXCOORD0;
};
struct v2f
{
float4 vertex : SV_POSITION;
float2 disuv : TEXCOORD0;
float4 vertexV : TEXCOORD1;
float4 uv01 : TEXCOORD2;
float4 uv23 : TEXCOORD3;
};
v2f vert(appdata v)
{
v2f o;
o.vertex = UnityObjectToClipPos(v.vertex);
o.disuv = TRANSFORM_TEX(v.texcoord, _FootballTexture);
o.vertexV = mul(UNITY_MATRIX_V, mul(unity_ObjectToWorld, v.vertex));
//#if UNITY_UV_STARTS_AT_TOP
// o.disuv.y = 1 - o.disuv.y;
//#endif
half4 _offsets = o.vertexV.y * 2 * _FootballTexture_TexelSize.xyxy;
o.uv01 = o.disuv.xyxy + _offsets.xyxy * float4(1, 1, -1, -1);
o.uv23 = o.disuv.xyxy + _offsets.xyxy * float4(1, 0, -1, 0);
return o;
}
float4 frag(v2f i) : SV_Target
{
half4 posTex = tex2D(_CameraPosZTexture, i.disuv);
half4 mainTex = tex2D(_FootballTexture, i.disuv);
half posR = 1;//posTex.r + 0.2;
if (posTex.r > 0.2)
{
half4 mainTex01xy = tex2D(_FootballTexture, i.uv01.xy);
half4 mainTex01zw = tex2D(_FootballTexture, i.uv01.zw);
half4 mainTex23xy = tex2D(_FootballTexture, i.uv23.xy);
half4 mainTex23zw = tex2D(_FootballTexture, i.uv23.zw);
float edge = 0.2;
half4 color = half4(0, 0, 0, 0);
if (mainTex01xy.r > edge &&
mainTex01xy.g > edge &&
mainTex01xy.b > edge)
{
color += mainTex01xy * posR;
}
if (mainTex01zw.r > edge &&
mainTex01zw.g > edge &&
mainTex01zw.b > edge)
{
color += mainTex01zw * posR;
}
if (mainTex23xy.r > edge &&
mainTex23xy.g > edge &&
mainTex23xy.b > edge)
{
color += mainTex23xy * posR;
}
if (mainTex23zw.r > edge &&
mainTex23zw.g > edge &&
mainTex23zw.b > edge)
{
color += mainTex23xy * posR;
}
return color;
}
return half4(0,0,0,0);//half4(i.vertexV.y, 0,0,1);
}
ENDCG
}
Pass
{
Blend One Zero
ZWrite Off
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
sampler2D _CameraPosZTexture;
sampler2D _MainTex;
CBUFFER_START(UnityPerMaterial)
float4 _MainTex_ST;
float4 _MainTex_TexelSize;
float _SceneZBlurIntensity;
CBUFFER_END
struct appdata
{
float4 vertex : POSITION;
float2 texcoord : TEXCOORD0;
};
struct v2f
{
float4 vertex : SV_POSITION;
float2 disuv : TEXCOORD0;
float4 vertexV : TEXCOORD1;
float4 uv01 : TEXCOORD2;
float4 uv23 : TEXCOORD3;
float4 uv45 : TEXCOORD4;
};
v2f vert(appdata v)
{
v2f o;
o.vertex = UnityObjectToClipPos(v.vertex);
o.vertexV = mul(unity_ObjectToWorld, v.vertex);
o.disuv = TRANSFORM_TEX(v.texcoord, _MainTex);
half4 _offsets = 1 * _MainTex_TexelSize.xyxy;
o.uv01 = o.disuv.xyxy + _offsets.xyxy * float4(1, 1, -1, -1);
o.uv23 = o.disuv.xyxy + _offsets.xyxy * float4(1, 1, -1, -1) * 2.0;
o.uv45 = o.disuv.xyxy + _offsets.xyxy * float4(1, 1, -1, -1) * 3.0;
return o;
}
float4 frag(v2f i) : SV_Target
{
half4 color = float4(0, 0, 0, 0);
color += 0.40 * tex2D(_MainTex, i.disuv);
color += 0.15 * tex2D(_MainTex, i.uv01.xy);
color += 0.15 * tex2D(_MainTex, i.uv01.zw);
color += 0.10 * tex2D(_MainTex, i.uv23.xy);
color += 0.10 * tex2D(_MainTex, i.uv23.zw);
color += 0.05 * tex2D(_MainTex, i.uv45.xy);
color += 0.05 * tex2D(_MainTex, i.uv45.zw);
return half4(color.rgb * _SceneZBlurIntensity, 1);
}
ENDCG
}
}
}
最终的效果是:
当然这里四边的扩大和模糊都多了很多采样,消耗是多一点。如果想消耗低点可以用比较美那么多消耗的方式,比如简单的均值模糊之类的。
本文地址:https://blog.csdn.net/llsansun/article/details/110143360
下一篇: 拓扑排序(容器和优先队列优化版)