Search Unity

Help with view space normals

Discussion in 'Shaders' started by IronLionZion, Feb 1, 2017.

  1. IronLionZion

    IronLionZion

    Joined:
    Dec 15, 2015
    Posts:
    78
    I am trying to create a post processing rim highlight image effect using the normals produced by a camera's DepthTextureMode.DepthNormals setting. The camera is a temporary camera created at runtime, and only renders one layer - the highlight layer. Objects to be highlighted are added to this layer. I thought I would simply do a dot product of these normals and a (0,0,1) vector, and I would be home free, but the thing is, these normals are in view space and they rotate along with the camera, thus shifting the rim highlight as the camera rotates. Therefore things look right only when the objects to be highlighted are in the middle of the screen.

    Here's a screenshot:
    highlight.png .

    Here are trimmed down versions of the scripts:

    Code (CSharp):
    1. // HighlightPostEffect.cs
    2. [RequireComponent(typeof(Camera))]
    3. public class HighlightPostEffect : MonoBehaviour
    4. {
    5.    public Color highlightColor = Color.blue;
    6.    private Camera attachedCamera;
    7.    private Camera tempCam;
    8.    private int layerMask = -1;
    9.    private Material material;
    10.    
    11.    void Start()
    12.    {
    13.      layerMask = LayerMask.GetMask("Highlight");
    14.      
    15.      attachedCamera = GetComponent<Camera>();
    16.  
    17.      tempCam = new GameObject("Temp Camera").AddComponent<Camera>();
    18.      tempCam.transform.SetParent(transform, false);
    19.      tempCam.CopyFrom(attachedCamera);
    20.      tempCam.cullingMask = layerMask;
    21.      tempCam.depthTextureMode = DepthTextureMode.DepthNormals;
    22.      tempCam.enabled = false;
    23.  
    24.      material = new Material(Shader.Find("Hidden/HighlightEffectShader"));
    25.    }
    26.  
    27.    void OnRenderImage(RenderTexture source, RenderTexture destination)
    28.    {    
    29.      tempCam.Render();
    30.      
    31.      material.color = highlightColor;
    32.      Graphics.Blit(source, destination, material);
    33.    }
    34. }
    35.  
    36. // HighlightEffectShader.shader
    37. Shader "Hidden/HighlightEffectShader"
    38. {
    39.    Properties
    40.    {
    41.      _MainTex ("Texture", 2D) = "white" {}
    42.    }
    43.  
    44.    SubShader
    45.    {
    46.      // No culling or depth
    47.      Cull Off ZWrite Off ZTest Always
    48.      Lighting Off
    49.  
    50.      Pass
    51.      {
    52.        CGPROGRAM
    53.        #pragma vertex vert
    54.        #pragma fragment frag
    55.        
    56.        #include "UnityCG.cginc"
    57.  
    58.        struct appdata
    59.        {
    60.          float4 vertex : POSITION;
    61.          float2 uv : TEXCOORD0;
    62.        };
    63.  
    64.        struct v2f
    65.        {
    66.          float2 uv : TEXCOORD0;
    67.          float4 vertex : SV_POSITION;
    68.        };
    69.  
    70.        v2f vert (appdata v)
    71.        {
    72.          v2f o;
    73.          o.vertex = mul(UNITY_MATRIX_MVP, v.vertex);
    74.          o.uv = v.uv;
    75.          return o;
    76.        }
    77.  
    78.        sampler2D _MainTex;
    79.        fixed4 _Color;
    80.        sampler2D _LastCameraDepthNormalsTexture;
    81.  
    82.        fixed4 frag (v2f i) : SV_Target
    83.        {
    84.          fixed4 imgCol = tex2D(_MainTex, i.uv);
    85.          float4 tempNorms = tex2D(_LastCameraDepthNormalsTexture, i.uv);
    86.  
    87.          float3 normals = DecodeViewNormalStereo(tempNorms);
    88.          float rim = pow(1 - dot(float3(0, 0, 1), normals), 1.5);
    89.  
    90.          return lerp(imgCol, _Color, rim);
    91.        }
    92.      }
    93.    }
    94. }

    My question is: Do I have to convert the normals from view space to some other space? If so, how?
    If not, what do I have to do in order to make the rim highlight always centered?

    Note: I already made a version that correctly calculates the rim highlight from a second shader that the temp cam renders with, using the regular method (dot product of camera direction and the mesh normals), but that way doesn't work with meshes that have their vertices animated by their shader e.g. swaying vegetation.
     
  2. bgolus

    bgolus

    Joined:
    Dec 7, 2012
    Posts:
    12,352
    You need the dot product of the normalized view space position of the pixel and the view normal. In your vertex shader calculate the view space position, and pass that to the fragment shader.

    float3 viewPos : TEXCOORD1;
    ...
    o.viewPos = mul(UNITY_MATRIX_MV, v.vertex).xyz;
    ...
    dot(normalize(i.viewPos), normals)
     
  3. IronLionZion

    IronLionZion

    Joined:
    Dec 15, 2015
    Posts:
    78
    Didn't work. This is the result:
    result.PNG mask.PNG

    Sader code modifications:
    Code (CSharp):
    1.  
    2.        struct v2f
    3.        {
    4.          float2 uv : TEXCOORD0;
    5.          float3 viewPos : TEXCOORD1;
    6.          float4 vertex : SV_POSITION;
    7.        };
    8.  
    9.        v2f vert (appdata v)
    10.        {
    11.          v2f o;
    12.          o.vertex = mul(UNITY_MATRIX_MVP, v.vertex);
    13.          o.uv = v.uv;
    14.          o.viewPos = mul(UNITY_MATRIX_MV, v.vertex).xyz;
    15.          return o;
    16.        }
    17.  
    18.        sampler2D _MainTex;
    19.        fixed4 _Color;
    20.        sampler2D _LastCameraDepthNormalsTexture;
    21.  
    22.        fixed4 frag (v2f i) : SV_Target
    23.        {
    24.          fixed4 imgCol = tex2D(_MainTex, i.uv);
    25.          float4 tempNorms = tex2D(_LastCameraDepthNormalsTexture, i.uv);
    26.  
    27.          float depth;
    28.          float3 normals;
    29.          DecodeDepthNormal(tempNorms, depth, normals);
    30.          float mask = ceil(1-depth);
    31.  
    32.          float rim = pow(1 - dot(normalize(i.viewPos), normals), 1.5);
    33.          return lerp(imgCol, _Color, rim * mask);
    34.        }
     
  4. FuzzyQuills

    FuzzyQuills

    Joined:
    Jun 8, 2013
    Posts:
    2,871
    Not sure how this will work out, but try this:

    Code (CSharp):
    1. struct v2f
    2.        {
    3.          float2 uv : TEXCOORD0;
    4.          float3 viewPos : TEXCOORD1;
    5.          float4 vertex : SV_POSITION;
    6.        };
    7.        v2f vert (appdata v)
    8.        {
    9.          v2f o;
    10.          o.vertex = mul(UNITY_MATRIX_MVP, v.vertex);
    11.          o.uv = v.uv;
    12.          o.viewPos = WorldSpaceViewDir(v.vertex);
    13.          return o;
    14.        }
    15.        sampler2D _MainTex;
    16.        fixed4 _Color;
    17.        sampler2D _LastCameraDepthNormalsTexture;
    18.        fixed4 frag (v2f i) : SV_Target
    19.        {
    20.          fixed4 imgCol = tex2D(_MainTex, i.uv);
    21.          float4 tempNorms = tex2D(_LastCameraDepthNormalsTexture, i.uv);
    22.          float depth;
    23.          float3 normals;
    24.          DecodeDepthNormal(tempNorms, depth, normals);
    25.          float mask = ceil(1-depth);
    26.          float rim = pow(1 - dot(normalize(i.viewPos), normals), 1.5);
    27.          return lerp(imgCol, _Color, rim * mask);
    28.        }
    Basically, use world space view direction. Since world space vectors change in view space, this may correct it.

    (Normally, if you were using object's actual normals instead of view-space ones, ObjSpaceViewDir would work)

    EDIT: if you get the opposite effect, aka. it looks like a light is shining from the camera, just invert the result. :)
     
    Last edited: Feb 2, 2017
  5. bgolus

    bgolus

    Joined:
    Dec 7, 2012
    Posts:
    12,352
    Ah, because it's an image effect, which has some weird extra stuff you have to do.

    Try:
    o.viewPos *= float3(1,-1,-1);
     
  6. FuzzyQuills

    FuzzyQuills

    Joined:
    Jun 8, 2013
    Posts:
    2,871
    Yep... View-space normals in a texture are strange things to deal with. :)
     
  7. IronLionZion

    IronLionZion

    Joined:
    Dec 15, 2015
    Posts:
    78
    Sorry to say neither of the last two suggestions work. :(
     
  8. IronLionZion

    IronLionZion

    Joined:
    Dec 15, 2015
    Posts:
    78
    I've figured out a way that kinda works. It's not perfect, but it's mush better than before. The trick was to calculate the view direction's xy coords based on the uv's remapped to a (-1..1) range.

    Screenshots:
    result1.PNG result2.PNG mask1.PNG mask2.PNG

    Here's the code:
    Code (CSharp):
    1.  
    2.        fixed4 frag (v2f i) : SV_Target
    3.        {
    4.          fixed4 imgCol = tex2D(_MainTex, i.uv);
    5.          float4 tempNorms = tex2D(_LastCameraDepthNormalsTexture, i.uv);
    6.  
    7.          float depth;
    8.          float3 normals;
    9.          DecodeDepthNormal(tempNorms, depth, normals);
    10.          float mask = ceil(1-depth);
    11.  
    12.          // remap x,y uv coords from 0..1 to -1..1
    13.          float3 viewDir = -normalize(float3((i.uv * 2 - 1), -1));
    14.          float rim = pow(1 - dot(viewDir, normals), 1.5);
    15.  
    16.          return lerp(imgCol, _Color, rim * mask);
    17.        }
    If anyone has a better way to do this, feel free to share.
     
    Last edited: Feb 2, 2017
  9. FuzzyQuills

    FuzzyQuills

    Joined:
    Jun 8, 2013
    Posts:
    2,871
    Since the normals are in view-space, that actually makes perfect sense, good job. :)
     
  10. bgolus

    bgolus

    Joined:
    Dec 7, 2012
    Posts:
    12,352
    That's a good approximation if your fov is close to 90 degrees (in fact it's exactly the same at the center horizontal line). I'm a little confused as to why it's not working for you though since that would be exactly what Unity's own shaders do. With the last modification I suggested, when you say "that didn't work", was it just a solid white, or a similar sideways angle?
     
  11. IronLionZion

    IronLionZion

    Joined:
    Dec 15, 2015
    Posts:
    78
    It was a similar sideways angle, but rotated 90 deg and with a little more white.

    I might be wrong, but I think the reason your method didn't work is because screen effect shaders are essentially shaders for flat 2D planes, with the origin in the bottom left. So v.vertex is actually a 2D vector (in object space) where the z component = 0, e.g (x,y,0,w). So if I use that in my dot product, that will always be a vector that is parallel to the screen (perpendicular to the camera forward axis). What I needed was a vector pointing out of the screen to compare with the normals vectors, which also point out of the screen. Multiplying o.vertex by (1,-1-1) only seemed to mirror that point vertically.
     
  12. bgolus

    bgolus

    Joined:
    Dec 7, 2012
    Posts:
    12,352
    Your reasoning is both right and wrong. The problem isn't that it's a 2D plane, rendering on a GPU is still fundamentally in 3D. The real reason is something I hadn't realized about how Unity constructs its image effect view and projection matrices.

    Basically during an image effect pass all of the "extra" matrices usually around for converting things from world or model space into projection space aren't really "useful" in the same way. Essentially they just have enough data to get a known quad to cover the camera's view and no more. The result is the "view position" as calculated from the UNITY_MATRIX_MV (or any of the built in functions for getting view or world position) will basically be the same as the uvs. In effect, they are "2D".

    Luckily there is still a matrix that we can use to get a valid view projection, unity_CameraProjection. Credit should go mainly to Keijiro Takahashi (of Unity Japan) for this solution.

    Code (CSharp):
    1. Shader "Hidden/ViewNormalsImageEffectShader"
    2. {
    3.     Properties
    4.     {
    5.         _MainTex ("Texture", 2D) = "white" {}
    6.     }
    7.     SubShader
    8.     {
    9.         // No culling or depth
    10.         Cull Off ZWrite Off ZTest Always
    11.  
    12.         Pass
    13.         {
    14.             CGPROGRAM
    15.             #pragma vertex vert_img
    16.             #pragma fragment frag
    17.          
    18.             #include "UnityCG.cginc"
    19.          
    20.             sampler2D _MainTex;
    21.             sampler2D _CameraDepthNormalsTexture;
    22.  
    23.             fixed4 frag (v2f_img i) : SV_Target
    24.             {
    25.                 fixed4 packedDepthNormals = tex2D(_CameraDepthNormalsTexture, i.uv);
    26.                 float depth;
    27.                 float3 normals;
    28.                 DecodeDepthNormal(packedDepthNormals, depth, normals);
    29.  
    30.                 // get the perspective projection
    31.                 float2 p11_22 = float2(unity_CameraProjection._11, unity_CameraProjection._22);
    32.                 // conver the uvs into view space by "undoing" projection
    33.                 float3 viewDir = -normalize(float3((i.uv * 2 - 1) / p11_22, -1));
    34.  
    35.                 float fresnel = 1.0 - dot(viewDir.xyz, normals);
    36.  
    37.                 return float4(fresnel, fresnel, fresnel, 1);
    38.             }
    39.             ENDCG
    40.         }
    41.     }
    42. }
    43.  
     
  13. IronLionZion

    IronLionZion

    Joined:
    Dec 15, 2015
    Posts:
    78
    Thanks, that fixed the minor issue with my solution.