Unity教程:实现视野范围效果

发表于2017-03-16
评论1 6.2k浏览
视野范围效果,在很多游戏中都可以发现这效果的存在,像是 2015 巴哈姆特 ACG 创作大赛游戏组金赏的 “落跑蓝图” 就运用了视野范围效果来作为画面呈现方式,透过显示敌人的警戒范围,玩家需要透过不同的策略来抵达目的地,下面就给大家介绍下unity中的视野范围效果

这次透过几个阶段的实作完成了这个好玩又酷炫的视野范围效果,视野范围侦测、优化侦测方式、程序化 Mesh、模板测试,以下分别为每个阶段的实作进行解说。
PS. 本次实作的环境为 Unity 5.5.0f3

射线资料结构

在进行视野范围侦测之前,需要先定义出射线所包含的资料结构。
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
using UnityEngine;
 
public class RayData
{
    public Vector3 m_start;
    public float m_distance;
    public float m_angle;
    public Vector3 m_direction;
    public Vector3 m_end;
    public Collider m_hitCollider;
    public bool m_hit;
 
    public RayData(Vector3 start, float angle, float distance)
    {
        m_start = start;
        m_distance = distance;
 
        UpdateDirection(angle);
    }
 
    public void UpdateDirection(float angle)
    {
        m_angle += angle;
        m_direction = DirectionFromAngle(m_angle);
        m_end = m_start + m_direction * m_distance;
    }
 
    private Vector3 DirectionFromAngle(float angle)
    {
        float pi = Mathf.Deg2Rad;
 
        return new Vector3(Mathf.Sin(angle * pi), 0, Mathf.Cos(angle * pi));
    }
 
    public static bool IsHittingSameObject(RayData data1, RayData data2)
    {
        return data1.m_hitCollider == data2.m_hitCollider;
    }
}

视野范围侦测

将射线的资料定义完成后,就可以开始进行范围侦测,而这个部分也是这次实作中运算最複杂且最核心的部份。
在最一开始先透过简单的运算,计算出射线的起始点、方向与半径。
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
private RayData[] GetOriginalDatas()
 {       
     RayData[] rayDatas = new RayData[_divide + 1];
 
     Vector3 center = transform.position;
     float startAngle = transform.eulerAngles.y -_angle / 2;
     float angle = _angle / _divide;
     RayData rayDataCache = null;
 
     for(int i = 0; i <= _divide; i++)
     {
         rayDataCache = new RayData(center, startAngle + angle * i, _radius);
 
         rayDatas[i] = rayDataCache;
     }
 
     return rayDatas;
 }
接著透过射线的碰撞检测获得碰撞点位置,并重新定义射线。
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
private RayData[] GetNormalDatas()
 {       
     RayData[] rayDatas = GetOriginalDatas();
 
     for (int i = 0; i < rayDatas.Length; i++)
     {
         UpdateRaycast(rayDatas[i]);
     }
 
     return rayDatas;
 }
 
 private void UpdateRaycast(RayData rayData)
 {
     rayData.m_hit = Physics.Raycast(transform.position, rayData.m_direction, out _hit, _radius);
 
     if (rayData.m_hit)
     {
         rayData.m_hitCollider = _hit.collider;
         rayData.m_end = _hit.point;
     }
     else
     {
         rayData.m_hitCollider = null;
         rayData.m_end = rayData.m_start + rayData.m_direction * _radius;
     }
 }

在这边因为射线的密度很低,所以出来的结果不尽理想,试著将射线的密度调高。
优化侦测方式

到这边为止,做出来的效果已经相当近似于我想要的效果,但是在某些情况下,还是会有误差产生。
且随著射线的密度上升,会使效能开始超出预期,可以透过一些简单的判断,来避免掉一些不必要的效能消耗,所以必须要将侦测方式做个调整,这边尝试的优化方式有逼近法以及二分逼近法。

逼近法

由于障碍物的边界往往会出现在任两条相邻射线之间,所以一种较简单的做法就是在确定相邻射线所碰触的碰撞体不同时,透过逼近的方式使其中一条射线缓慢的逼近另一条,透过这种方式来取得相邻射线中间的边界。
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
private EdgeData GetApproximationEdge(RayData startEdgeRayData, RayData endEdgeRayData)
  {
      if (_approximationPrecision <= 0) { return null; } Vector3 center = transform.position; float maxAngle = Vector3.Angle(startEdgeRayData.m_direction, endEdgeRayData.m_direction); float curAngle = _approximationPrecision; RayData edgeRayData = new RayData(center, startEdgeRayData.m_angle + _approximationPrecision, _radius); UpdateRaycast(edgeRayData); while (RayData.IsHittingSameObject(startEdgeRayData, edgeRayData)) { curAngle += _approximationPrecision; if (curAngle > maxAngle)
          {
              edgeRayData = null;
              break;
          }
 
          edgeRayData.UpdateDirection(_approximationPrecision);
          UpdateRaycast(edgeRayData);
      }
 
      if (edgeRayData == null)
      {
          return null;
      }
 
      EdgeData edgeData = new EdgeData();
      edgeData.m_secondRay = edgeRayData;
      edgeData.m_firstRay = new RayData(center, edgeRayData.m_angle - _approximationPrecision, _radius);
      UpdateRaycast(edgeData.m_firstRay);
 
      return edgeData;
  }
二分逼近法

在逼近法中,透过慢慢的逼近边界来检测物体边界,这种做法相当于在任两条射线中,细分多条射线来作为判断,会使效能有许多额外损耗。所以透过逼近二分法,混合逼近法与二分法,来更加优化这一阶段的计算次数。
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
private EdgeData GetBisectionEdge(RayData startEdgeRayData, RayData endEdgeRayData)
   {
       if (!startEdgeRayData.m_hit && !endEdgeRayData.m_hit)
       {
           return GetApproximationEdge(startEdgeRayData, endEdgeRayData);
       }
 
       if (RayData.IsHittingSameObject(startEdgeRayData, endEdgeRayData))
       {
           return null;
       }
 
       Vector3 center = transform.position;
       EdgeData edgeData = new EdgeData();
       float angle = 0;
       RayData edgeRayData = null;
 
       for (int i = 0; i < _bisectionCount; i++)
       {
           angle = (startEdgeRayData.m_angle + endEdgeRayData.m_angle) / 2;
           edgeRayData = new RayData(center, angle, _radius);
           UpdateRaycast(edgeRayData);
 
           if (RayData.IsHittingSameObject(startEdgeRayData, edgeRayData))
           {
               startEdgeRayData = edgeRayData;
           }
           else
           {
               endEdgeRayData = edgeRayData;
           }
       }
 
       edgeData.m_firstRay = startEdgeRayData;
       edgeData.m_secondRay = endEdgeRayData;
 
       return edgeData;
   }

程序化 Mesh

完成范围侦测与优化侦测方式后,我们已经可以直接透过 Editor Scene 来看到射线的焦点情况,所以接下来就是要在 Runtime 透过动态产生程序化 Mesh 来画出这个侦测范围。
一般的 Mesh 由三个基本要素所组成,Vertex、Triangle 与 UV,每三个顶点可以形成一个三角形。透过这个简单的概念,去重新计算 Mesh 生成所需要的要素。
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
private void GenerateMesh()
 {
     int meshCount = _rayDatas.Length - 1;
     int vertexCount = meshCount * 2 + 1;
     int triangleCount = meshCount * 3;
 
     _vertices = new Vector3[vertexCount];
     _vertices[0] = Vector3.zero;
     for (int i = 1, mesh = 0; i < _vertices.Length; i += 2, mesh++)
     {
         _vertices[i] = transform.InverseTransformPoint(_rayDatas[mesh].m_end);
         _vertices[i + 1] = transform.InverseTransformPoint(_rayDatas[mesh + 1].m_end);
     }
 
     _triangles = new int[triangleCount];
     for (int i = 0; i < meshCount; i ++)
     {
         _triangles[i * 3] = 0;
         _triangles[i * 3 + 1] = i * 2 + 1;
         _triangles[i * 3 + 2] = i * 2 + 2;
     }
 
     _uvs = new Vector2[vertexCount];
     _uvs[0] = new Vector2(0.5f, 0.5f);
     float lerp = 0;
     Vector3 direction = Vector3.zero;
     for (int i = 1, mesh = 0; i < _uvs.Length; i += 2, mesh++)
     {
         lerp = _vertices[i].magnitude / _radius;
         direction = _rayDatas[mesh].m_direction * _rayDatas[mesh].m_distance * 0.6f / _radius;
         _uvs[i] = new Vector2(direction.x, direction.z) * lerp + _uvs[0];
 
         lerp = _vertices[i + 1].magnitude / _radius;
         direction = _rayDatas[mesh + 1].m_direction * _rayDatas[mesh].m_distance * 0.6f / _radius;
         _uvs[i + 1] = new Vector2(direction.x, direction.z) * lerp + _uvs[0];
     }
 
     _mesh.Clear();
     _mesh.vertices = _vertices;
     _mesh.triangles = _triangles;
     _mesh.uv = _uvs;
     _mesh.RecalculateNormals();
 
     _meshFilter.mesh = _mesh;
 }   private void GenerateMesh()
 {
     int meshCount = _rayDatas.Length - 1;
     int vertexCount = meshCount * 2 + 1;
     int triangleCount = meshCount * 3;
 
     _vertices = new Vector3[vertexCount];
     _vertices[0] = Vector3.zero;
     for (int i = 1, mesh = 0; i < _vertices.Length; i += 2, mesh++)
     {
         _vertices[i] = transform.InverseTransformPoint(_rayDatas[mesh].m_end);
         _vertices[i + 1] = transform.InverseTransformPoint(_rayDatas[mesh + 1].m_end);
     }
 
     _triangles = new int[triangleCount];
     for (int i = 0; i < meshCount; i ++)
     {
         _triangles[i * 3] = 0;
         _triangles[i * 3 + 1] = i * 2 + 1;
         _triangles[i * 3 + 2] = i * 2 + 2;
     }
 
     _uvs = new Vector2[vertexCount];
     _uvs[0] = new Vector2(0.5f, 0.5f);
     float lerp = 0;
     Vector3 direction = Vector3.zero;
     for (int i = 1, mesh = 0; i < _uvs.Length; i += 2, mesh++)
     {
         lerp = _vertices[i].magnitude / _radius;
         direction = _rayDatas[mesh].m_direction * _rayDatas[mesh].m_distance * 0.6f / _radius;
         _uvs[i] = new Vector2(direction.x, direction.z) * lerp + _uvs[0];
 
         lerp = _vertices[i + 1].magnitude / _radius;
         direction = _rayDatas[mesh + 1].m_direction * _rayDatas[mesh].m_distance * 0.6f / _radius;
         _uvs[i + 1] = new Vector2(direction.x, direction.z) * lerp + _uvs[0];
     }
 
     _mesh.Clear();
     _mesh.vertices = _vertices;
     _mesh.triangles = _triangles;
     _mesh.uv = _uvs;
     _mesh.RecalculateNormals();
 
     _meshFilter.mesh = _mesh;
 }
模板测试

视野范围效果到这边为止已经完成了,但是可以再加个小细节,来提升画面呈现品质。目前的呈现效果中,只有光与影的互动,并没有物件的互动,所以可以在这边加入一个简单的 Stencil Shader 来呈现。透过 Stencil Shader,针对有被视野范围平面所包覆的范围做模板测试,通过模板测试后,利用 Blend DstColor SrcColor 来将颜色混合后,就完成了这次所实现的视野范围的最终效果。

StencilMaskOne.shader
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
Shader "Unlit/Stencil/Stencil Mask One"
{
    Properties
    {
        _MainTex ("Main Texture", 2D) = "white" {}
        _Color ("Color", Color) = (1, 1, 1, 1)
    }
    SubShader
    {
        Tags {"RenderType"="Opaque" "PreviewType" = "Plane"}
 
        Stencil
        {
            Ref 1
            Comp always
            Pass replace
        }
 
        Pass
        {
            Blend SrcAlpha OneMinusSrcAlpha
 
            CGPROGRAM
            #pragma vertex vert
            #pragma fragment frag
             
            #include "UnityCG.cginc"
 
            struct appdata
            {
                float4 vertex : POSITION;
                float2 uv : TEXCOORD0;
            };
 
            struct v2f
            {
                float4 vertex : SV_POSITION;
                float2 uv : TEXCOORD0;
            };
 
            sampler2D _MainTex;
            float4 _Color;
             
            v2f vert (appdata v)
            {
                v2f o;
                o.vertex = UnityObjectToClipPos(v.vertex);
                o.uv = v.uv;
 
                return o;
            }
             
            fixed4 frag (v2f i) : SV_Target
            {
                fixed4 col = tex2D(_MainTex, i.uv);
                return col;
            }
            ENDCG
        }
    }
}
StencilEqualOne.shader
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
Shader "Unlit/Stencil/Stencil Equal One"
{
    Properties
    {
        _Color ("Color", Color) = (1, 1, 1, 1)
    }
    SubShader
    {
        Tags {"RenderType"="Opaque" }
 
        Stencil
        {
            Ref 1
            Comp equal
            Pass replace
        }
 
        Pass
        {
            Blend DstColor SrcColor
 
            CGPROGRAM
            #pragma vertex vert
            #pragma fragment frag
             
            #include "UnityCG.cginc"
 
            struct appdata
            {
                float4 vertex : POSITION;
            };
 
            struct v2f
            {
                float4 vertex : SV_POSITION;
            };
 
            fixed4 _Color;
             
            v2f vert (appdata v)
            {
                v2f o;
                o.vertex = UnityObjectToClipPos(v.vertex);
                return o;
            }
             
            fixed4 frag (v2f i) : SV_Target
            {
                return _Color;
            }
            ENDCG
        }
    }
}
StencilMaskOne.material
最终效果
结语

这次透过组合一些简单的功能,来完成这个有趣的视野范围效果,但最终实现出的版本,在效能上还是有许多进步的空间。在业界前辈的指导、讨论后,理解到可以利用 Shader 来作为同样功能的实现,透过实际光源的位置去重新计算顶点位置,用这种方式来画出阴影部分,也因为是将运算工作交付给 GPU 处理,所以效能上能得到大幅改善。若各位有兴趣继续研究这个效果,可以往这个方向去进行研究。

如社区发表内容存在侵权行为,您可以点击这里查看侵权投诉指引

0个评论