Unity RawImage UvRect 与UGUI坐标转换

发表于2018-04-10
评论0 3.2k浏览
想要实现RawImage UvRect 与UGUI坐标转换,类似于这种效果的需求:

最开始想到的是用遮罩,于是在知识海洋中找了一个Shader 加以修改实现效果
Shader "Tang/GuideMask"  
{  
Properties  
{  
[PerRendererData] _MainTex("Sprite Texture", 2D) = "white" {}  
_Color("Tint", Color) = (1,1,1,1)  
//_StencilComp("Stencil Comparison", Float) = 8  
//_Stencil("Stencil ID", Float) = 0  
//_StencilOp("Stencil Operation", Float) = 0  
//_StencilWriteMask("Stencil Write Mask", Float) = 255  
//_StencilReadMask("Stencil Read Mask", Float) = 255  
_ColorMask("Color Mask", Float) = 15  
_Length("Lenth",Float) = 3  
//中心  
_Origin("圆心",Vector) = (0,0,0,0)  
//裁剪方式 0圆形 1圆形  
_MaskType("Type",Float) = 0  
[Toggle(UNITY_UI_ALPHACLIP)] _UseUIAlphaClip("Use Alpha Clip", Float) = 0  
}  
SubShader  
{  
Tags  
{  
"Queue" = "Transparent"  
"IgnoreProjector" = "True"  
"RenderType" = "Transparent"  
"PreviewType" = "Plane"  
"CanUseSpriteAtlas" = "True"  
}  
Stencil  
{  
Ref[_Stencil]  
Comp[_StencilComp]  
Pass[_StencilOp]  
ReadMask[_StencilReadMask]  
WriteMask[_StencilWriteMask]  
}  
Cull Off  
Lighting Off  
ZWrite Off  
ZTest[unity_GUIZTestMode]  
Blend SrcAlpha OneMinusSrcAlpha  
ColorMask[_ColorMask]  
Pass  
{  
Name "Default"  
CGPROGRAM  
#pragma vertex vert  
#pragma fragment frag  
#pragma target 2.0  
#include "UnityCG.cginc"  
#include "UnityUI.cginc"  
#pragma multi_compile __ UNITY_UI_ALPHACLIP  
struct appdata_t  
{  
float4 vertex : POSITION;  
float4 color : COLOR;  
float2 texcoord : TEXCOORD0;  
UNITY_VERTEX_INPUT_INSTANCE_ID  
};  
struct v2f  
{  
float4 vertex : SV_POSITION;  
fixed4 color : COLOR;  
float2 texcoord : TEXCOORD0;  
float4 worldPosition : TEXCOORD1;  
UNITY_VERTEX_OUTPUT_STEREO  
};  
fixed4 _Color;  
fixed4 _TextureSampleAdd;  
float4 _ClipRect;  
float4 _Origin;  
float4 _Origin1;  
float _MaskType;  
//Test  
uniform float4 _Points[100];  // 数组变量  
uniform float _Points_Num;  // 数组长度变量  
//顶点函数,输入参数是网格数据,输出的是顶点到片元结构体  
v2f vert(appdata_t IN)  
{  
v2f OUT;  
UNITY_SETUP_INSTANCE_ID(IN);  
UNITY_INITIALIZE_VERTEX_OUTPUT_STEREO(OUT);  
OUT.worldPosition = IN.vertex;  
OUT.vertex = UnityObjectToClipPos(OUT.worldPosition);  
OUT.texcoord = IN.texcoord;  
OUT.color = IN.color * _Color;  
return OUT;  
}  
sampler2D _MainTex;  
fixed4 frag(v2f IN) : SV_Target  
{  
float2 uv = IN.texcoord;  
half4 col = IN.color;  
//开始裁剪  
//外部直接给坐标 宽 高 GPU计算比率  
float posX = (_Origin.x + 960) / 1920;  
float posY = (_Origin.y + 540) / 1080;  
float2 pos = float2(posX, posY);  
//float posX = 0;  
//float posY = 0;  
//if (_MaskType == 0) {  
//posX = posX * 1280 / 720;  
//pos = float2(posX, posY);  
//float rid = _Origin.z / 720 / 2;  
//uv.x = uv.x * 1280 / 720;  
//float2 nor = uv-pos;  
//if (length(nor) < rid)  
//col.a = 0;  
//}  
//else {  
float w = _Origin.z / 1920 / 2;  
float h = _Origin.w / 1080 / 2;  
//float w =0;  
//float h =0;  
//float result1 = 0;  
//float result2 = 0;  
//float result3 = 0;  
//float result4 = 0;  
//for (int j=0; j<_Points_Num; j++)  
//{  
//    float4 p4 = _Points[j]; // 索引取值  
//  posX = ( _Points[j].x + 960) / 1920;  
//    posY = ( _Points[j].y + 540) / 1080;  
//  w = _Points[j].z / 1920 /2;  
//  h = _Points[j].w / 1080 /2;  
//  result1 += posX - w;  
//  result2 += posX + w;  
//  result3 += posY - h;  
//  result4 += posY + h;  
//    // 自定义处理  
//}  
//if (uv.x <= result1|| uv.x>=result2|| uv.y<=result3|| uv.y >= result4)  
//{  
//col.a = 0.001f;  
//}  
if (uv.x > pos.x - w && uv.x<pos.x + w && uv.y>pos.y - h && uv.y < pos.y + h)  
{  
//col.a = 0.5f;  
}  
else{  
col.a = 0.001f;  
}  
half4 color = (tex2D(_MainTex,uv) + _TextureSampleAdd) * col;  
color.a *= UnityGet2DClipping(IN.worldPosition.xy, _ClipRect);  
clip(col.a);  
#ifdef UNITY_UI_ALPHACLIP  
clip(color.a - 0.001);  
#endif  
return color;  
}  
ENDCG  
}  
}  
}  

后来 由于要多个UI支持 唉 要了一个不会写Shader的老命,后来研究许久,转换了思路
利用UV 去实现
using System.Collections;  
using System.Collections.Generic;  
using UnityEngine;  
using UnityEngine.EventSystems;  
using UnityEngine.UI;  
public class TestImage : MonoBehaviour  
{  
    private RectTransform raw;  
    public CanvasScaler canvaScaler;  
    private float screenHeight;  
    private float screenWidth;  
    void Start()  
    {  
        raw = GetComponent<RectTransform>();  
        //获取设置分辨率  
        screenWidth = canvaScaler.referenceResolution.x;  
        screenHeight = canvaScaler.referenceResolution.y;  
    }  
    void Update()  
    {  
        var posX = (raw.anchoredPosition.x + screenWidth / 2) / screenWidth;  
        var posY = (raw.anchoredPosition.y + screenHeight / 2) / screenHeight;  
        var w1 = raw.rect.width / screenWidth / 2;  
        var h1 = raw.rect.height / screenHeight / 2;  
        var x = raw.anchoredPosition.x;  
        var y = raw.anchoredPosition.y;  
        var w = raw.rect.width;  
        var h = raw.rect.height;  
        var pow = raw.rect.width / screenWidth;  
        var poh = raw.rect.height / screenHeight;  
        //Debug.Log("X " + (posX - w1));  
        //Debug.Log("Y " + (posY - h1));  
        //Debug.Log("W " + pow);  
        //Debug.Log("H " + poh);  
        var uvRect = GetComponent<RawImage>();  
        uvRect.uvRect = new Rect(posX - w1, posY - h1, pow, poh);  
    }  
}  
来自:https://blog.csdn.net/ldy597321444/article/details/79015121

如社区发表内容存在侵权行为,您可以点击这里查看侵权投诉指引

0个评论