UGUI中实现多边形网格显示图形
程序员文章站
2022-05-30 17:37:47
...
UGUI中实现多边形网格显示图形
在使用UGUI Image组件显示不规则图片、镂空图片时,Image总是会创建一个四边形网格来显示图形,渲染过程中,GPU需要对完全透明的区域进行计算,这不利于性能的优化,一个解决办法是采用多边形网格显示图形,来减少这种不必要的消耗。
下面是Image组件和多边形显示组件的网格对比
下面是Image和多边形的Overdraw对比
整个方案的实现过程包括以下几个步骤:
1. 生成图集,这里推荐使用 Texture Packer,这里要求导出 tpsheet 格式。
2. 导入图集、生成多边形,这里需要从AssetStore下载TexturePackerImporter(已经包含的项目中)。到导入插件之后,将 tpsheet 文件和图集一起导入项目中,导入之后TexturePackerImporter会自动将图集转换成带多边形的Sprite。
3. 使用UIPolyImage组件替换 Image 组件。(该组件目前只支持 Simple 模式)
如果需要根据图形做射线检测,在 UIPolyImage/Image 组件上添加PolyRaycastFilter 组件。因为该组件需要读取贴图的像素,所以需要将贴图的 readAndWrite 属性勾选。
示例项目:这里
多边形Image组件
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
namespace UnityEngine.UI
{
public class UIPolyImage : Image
{
protected override void OnPopulateMesh(VertexHelper vh)
{
if (overrideSprite == null)
{
base.OnPopulateMesh(vh);
return;
}
switch (type)
{
case Type.Simple:
case Type.Filled:
case Type.Sliced:
case Type.Tiled:
GenerateSimpleSprite(vh, preserveAspect);
break;
}
}
void GenerateSimpleSprite(VertexHelper vh, bool lPreserveAspect)
{
Vector4 v = GetDrawingDimensions(lPreserveAspect);
var color32 = color;
float width = v.z - v.x;
float height = v.w - v.y;
//将sprite.pivot进行归一化
Vector2 spritePivot = new Vector2(sprite.pivot.x / sprite.rect.width, sprite.pivot.y / sprite.rect.height);
Vector2 pivotOffset = Vector2.Scale(spritePivot - rectTransform.pivot, new Vector2(width, height));
Vector3 scale = new Vector3(1, 1, 1);
scale.x = width / sprite.bounds.size.x;
scale.y = height / sprite.bounds.size.y;
Matrix4x4 trsMT = Matrix4x4.TRS(pivotOffset, Quaternion.identity, scale);
vh.Clear();
for (int i = 0; i < sprite.vertices.Length; i++)
{
vh.AddVert(trsMT.MultiplyPoint3x4(sprite.vertices[i]), color32, sprite.uv[i]);
}
for (int i = 0; i < sprite.triangles.Length; i += 3)
{
vh.AddTriangle(sprite.triangles[i], sprite.triangles[i + 1], sprite.triangles[i + 2]);
}
}
public Rect GetDrawingRect()
{
Vector4 v = GetDrawingDimensions(preserveAspect);
return Rect.MinMaxRect(v.x, v.y, v.z, v.w);
}
/// Image's dimensions used for drawing. X = left, Y = bottom, Z = right, W = top.
private Vector4 GetDrawingDimensions(bool shouldPreserveAspect)
{
var padding = overrideSprite == null ? Vector4.zero : UnityEngine.Sprites.DataUtility.GetPadding(overrideSprite);
var size = overrideSprite == null ? Vector2.zero : new Vector2(overrideSprite.rect.width, overrideSprite.rect.height);
Rect r = GetPixelAdjustedRect();
// Debug.Log(string.Format("r:{2}, size:{0}, padding:{1}", size, padding, r));
int spriteW = Mathf.RoundToInt(size.x);
int spriteH = Mathf.RoundToInt(size.y);
var v = new Vector4(
padding.x / spriteW,
padding.y / spriteH,
(spriteW - padding.z) / spriteW,
(spriteH - padding.w) / spriteH);
if (shouldPreserveAspect && size.sqrMagnitude > 0.0f)
{
var spriteRatio = size.x / size.y;
var rectRatio = r.width / r.height;
if (spriteRatio > rectRatio)
{
var oldHeight = r.height;
r.height = r.width * (1.0f / spriteRatio);
r.y += (oldHeight - r.height) * rectTransform.pivot.y;
}
else
{
var oldWidth = r.width;
r.width = r.height * spriteRatio;
r.x += (oldWidth - r.width) * rectTransform.pivot.x;
}
}
v = new Vector4(
r.x + r.width * v.x,
r.y + r.height * v.y,
r.x + r.width * v.z,
r.y + r.height * v.w
);
return v;
}
}
}
射线检测过滤组件
using System;
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
using UnityEngine.UI;
namespace UnityEngine.UI
{
/// <summary>
/// 用于检测不规则图形的射线检测方法,由于需要进行多边形的射线检测,
/// 效率相对较低,所以,确认自己必须进行不规则射线检测时,才使用
/// 该组件。
/// </summary>
[RequireComponent(typeof(UIPolyImage))]
public class PolyRaycastFilter : MonoBehaviour, ICanvasRaycastFilter
{
private UIPolyImage image
{
get
{
if (m_Image == null)
m_Image = GetComponent<UIPolyImage>();
return m_Image;
}
}
private RectTransform rectTransform
{
get
{
if (m_RectTransform == null)
m_RectTransform = GetComponent<RectTransform>();
return m_RectTransform;
}
}
private UIPolyImage m_Image;
private RectTransform m_RectTransform;
public bool IsRaycastLocationValid(Vector2 sp, Camera eventCamera)
{
if (image.sprite == null)
return false;
//首先转换到本地坐标系中,方便下面的计算
Vector2 localP = rectTransform.InverseTransformPoint(sp);
Rect pixelRect = image.GetDrawingRect();
if (!pixelRect.Contains(localP, false))
return false;
Vector2 delta = localP - pixelRect.min;
Vector2 normalizedDelta = new Vector2(delta.x / pixelRect.width, delta.y / pixelRect.height);
int x = Mathf.CeilToInt(normalizedDelta.x * image.sprite.rect.width+image.sprite.rect.xMin);
int y = Mathf.CeilToInt(normalizedDelta.y * image.sprite.rect.height+image.sprite.rect.yMin);
Color pixel = image.sprite.texture.GetPixel(x, y);
return !Mathf.Approximately(pixel.a, 0f);
}
}
}