上一篇 图文混排
时至今日又有到这个需求 就又把这个捡起来,顺便发现有了更好的理解。以做记录 直接上shader
Shader "UI/EmojiText" {
Properties {
[PerRendererData] _MainTex ("Font Texture", 2D) = "white" {}
_Color ("Tint", Color) = (1,1,1,1)
_StencilComp ("Stencil Comparison", Float) = 8
_Stencil ("Stencil ID", Float) = 0
_StencilOp ("Stencil Operation", Float) = 0
_StencilWriteMask ("Stencil Write Mask", Float) = 255
_StencilReadMask ("Stencil Read Mask", Float) = 255
_ColorMask ("Color Mask", Float) = 15
[Toggle(UNITY_UI_ALPHACLIP)] _UseUIAlphaClip ("Use Alpha Clip", Float) = 0
_EmojiTex ("Emoji Texture", 2D) = "white" {}
_EmojiDataTex ("Emoji Data", 2D) = "white" {}
_EmojiSize ("Emoji count of every line",float) = 200
_FrameSpeed ("FrameSpeed",Range(0,10)) = 3
}
SubShader
{
Tags
{
"LightMode"="UniversalForward"
"Queue"="Transparent"
"IgnoreProjector"="True"
"RenderType"="Transparent"
"PreviewType"="Plane"
"CanUseSpriteAtlas"="True"
}
Stencil
{
Ref [_Stencil]
Comp [_StencilComp]
Pass [_StencilOp]
ReadMask [_StencilReadMask]
WriteMask [_StencilWriteMask]
}
Cull Off
Lighting Off
ZWrite Off
ZTest [unity_GUIZTestMode]
Blend SrcAlpha OneMinusSrcAlpha
ColorMask [_ColorMask]
Pass
{
Name "Default"
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma target 2.0
#include "UnityCG.cginc"
#include "UnityUI.cginc"
#pragma multi_compile __ UNITY_UI_ALPHACLIP
struct appdata_t
{
float4 vertex : POSITION;
float4 color : COLOR;
float2 texcoord : TEXCOORD0;
float2 texcoord1 : TEXCOORD1;
};
struct v2f
{
float4 vertex : SV_POSITION;
fixed4 color : COLOR;
half2 texcoord : TEXCOORD0;
half2 texcoord1 : TEXCOORD1;
};
fixed4 _Color;
fixed4 _TextureSampleAdd;
float4 _ClipRect;
v2f vert(appdata_t IN)
{
v2f OUT;
OUT.vertex = UnityObjectToClipPos(float4(IN.vertex.x, IN.vertex.y, IN.vertex.z, 1.0));
OUT.texcoord = IN.texcoord;
OUT.texcoord1 = IN.texcoord1;
#ifdef UNITY_HALF_TEXEL_OFFSET
OUT.vertex.xy += (_ScreenParams.zw-1.0) * float2(-1,1) * OUT.vertex.w;
#endif
OUT.color = IN.color * _Color;
return OUT;
}
sampler2D _MainTex;
sampler2D _EmojiTex;
sampler2D _EmojiDataTex;
float _EmojiSize;
float _FrameSpeed;
fixed4 frag(v2f IN) : SV_Target
{
fixed4 color;
if (IN.texcoord1.x >0 && IN.texcoord1.y > 0)
{
half size = (1 / _EmojiSize);
half2 uv = half2(floor(IN.texcoord1.x * _EmojiSize) * size + 0.5 * size,floor(IN.texcoord1.y * _EmojiSize) * size + 0.5 * size);
fixed4 data = tex2D(_EmojiDataTex, uv);
half frameCount = 1 + sign(data.r) + sign(data.g) * 2 + sign(data.b) * 4;
half index = abs(fmod(floor(_Time.x * _FrameSpeed * 50), frameCount));
half flag = (1 + sign(IN.texcoord1.x + index * size - 1)) * 0.5;
IN.texcoord1.x += index * size - flag;
IN.texcoord1.y += size * flag;
color = tex2D(_EmojiTex, IN.texcoord1);
}else
{
color = (tex2D(_MainTex, IN.texcoord) + _TextureSampleAdd) * IN.color;
}
#ifdef UNITY_UI_ALPHACLIP
clip (color.a - 0.001);
#endif
return color;
}
ENDCG
}
}
}
重要的就两部分 在片源着色器里 用if判断这个这个片元有没有第二套uv坐标(因为通过之前写的自定义text组件里把需要渲染图片的位置赋值了第二套uv坐标,而普通文字没有,所以是默认0)来区分是图片还是文字。是图片就走_emojiTex(之前生成的图片合集),并且用texcoord1(第二套uv坐标), 是文字就走下边默认的,用第一套uv。就这么简单。 至于中间那些是实现帧动画计算张数的,我暂时没用到,可以直接注释掉,不再赘述。
碰到过的难点 把这个shader放进工程里发现没有效果,甚至连正常文字都渲染不出来。 问题所在: 我们工程用了urp管线,我这个shader用的cg代码没有加上 "LightMode" = "UniversalForward 在tags里加上这个就好了
难点2 当输入的文字里有空格,制表符,回车之类的东西时候表情就会渲染失败,变成初始的“%%” 问题所在:是在自定义imagetext组件里计算图片所在顶点序号的时候把空格什么的也算进去了,但是生成渲染网格的时候空格什么的又不渲染,不在verts里面,所以就造成没对上 解决办法,给输入字符串匹配表情符号的时候先去空格,如图 好了大概就这样~
|