I'm trying to replicate the smudging effect as seen here: The current effect I've got works well against the camera's far plane provided the way I interpolate depth. However the closer two objects are the less of an impact the effect has. The only method that immediately comes to mind is to split the 32 bit float alpha value to contain both the depth and the time that the smudge will fade into the background. Here's the current code: PostProcessEffect.cs - Code (CSharp): using UnityEngine; using System.Collections; [RequireComponent(typeof(Camera))] public class PostProcessEffect : MonoBehaviour { private Shader shader = null; private Material material = null; private Matrix4x4 viewProj = Matrix4x4.identity; private Camera camera; private RenderTexture lastFrame; public void Awake() { Graphics.Blit(Texture2D.blackTexture, lastFrame); shader = Shader.Find("PostProcessEffect"); if(!shader.isSupported) { enabled = false; return; } material = new Material(shader); material.hideFlags = HideFlags.DontSave; camera = GetComponent<Camera>(); camera.depthTextureMode = DepthTextureMode.DepthNormals; lastFrame = RenderTexture.GetTemporary(Screen.width, Screen.height); viewProj = camera.worldToCameraMatrix.inverse * camera.projectionMatrix; } public void OnDestroy() { RenderTexture.ReleaseTemporary(lastFrame); } public void OnRenderImage(RenderTexture source, RenderTexture destination) { Matrix4x4 VP = camera.worldToCameraMatrix.inverse * camera.projectionMatrix; material.SetMatrix("_UNITY_VP_INV", VP.inverse); material.SetMatrix("_UNITY_VP_prev", viewProj); viewProj = VP; material.SetTexture("_LastFrame", lastFrame); Graphics.Blit(source, destination, material); Graphics.Blit(destination, lastFrame); } } PostProcessEffect.shader - Code (Shader): Shader "PostProcessEffect" { Properties { _MainTex("Base (RGB)", 2D) = "white" {} } SubShader { ZTest Always Cull Off ZWrite Off Fog { Mode off } Pass { CGPROGRAM #include "UnityCG.cginc" #pragma target 3.0 #pragma vertex vert_img #pragma fragment frag uniform sampler2D _MainTex; uniform float4 _MainTex_TexelSize; uniform sampler2D _CameraDepthNormalsTexture; uniform sampler2D _LastFrame; uniform float4x4 _UNITY_VP_INV; uniform float4x4 _UNITY_VP_prev; float4 frag(v2f_img i) : SV_Target { float3 currentColor = tex2D(_MainTex, i.uv).rgb; float3 previousColor = float3(0,0,0); float3 norm; float depth[3]; DecodeDepthNormal(tex2D(_CameraDepthNormalsTexture, float2(i.uv.x, 1-i.uv.y)), depth[0], norm); // Depth values, 1=near, 0=far depth[0] = 1.0 - depth[0]; depth[1] = tex2D(_LastFrame, i.uv).a; // depth[2] = { min, max }, overwrite with new depths if greater, or if distance is less than epsilon if(depth[0] > depth[1] || depth[1] - depth[0] < 0.01) { float tmp = depth[0]; depth[0] = depth[1]; depth[1] = tmp; previousColor = currentColor; } else { // Inverse transform from view space to world space, then from world space back to the previous frame's view space float4 currentPos = float4(i.uv.x * 2.0 - 1.0, i.uv.y * 2.0 - 1.0, 1.0 - depth[0], 1.0); float4 worldPos = mul(currentPos, _UNITY_VP_INV); worldPos /= worldPos.w; float4 previousPos = mul(worldPos, _UNITY_VP_prev); previousPos /= previousPos.w; // Blur float2 velocity = (previousPos.xy - currentPos.xy) * 0.1; previousColor += tex2D(_LastFrame, i.uv + velocity).rgb; previousColor += tex2D(_LastFrame, i.uv + velocity * 0.75).rgb; previousColor += tex2D(_LastFrame, i.uv + velocity * 0.5).rgb; previousColor += tex2D(_LastFrame, i.uv + velocity * 0.25).rgb; previousColor += tex2D(_LastFrame, i.uv).rgb; previousColor *= 0.2; } float delta = unity_DeltaTime.x * exp(abs(depth[0] - depth[1])) * 3.0; return float4(lerp(previousColor.rgb, currentColor.rgb, delta), lerp(depth[1], depth[0], delta)); } ENDCG } } } And video that shows how it functions against the far plane vs closer objects: How do you figure to rid of these artifacts?