Hey, I’m running some hlsl that uses the View.ScreenPositionScaleBias to cast a ray from the camera position, but it seems to be 0.0, 0.0 every time.
I’m using the code implemented in Temaran’s compute shader plugin example to render out to a texture based on a raycast from the camera into world space, my hlsl looks like
#include "Common.usf"
void MainPixelShader( in float4 uv : TEXCOORD0,
out float4 OutColor : SV_Target0 )
{
OutColor = world_space_color( uv_projection(uv) );
}
float2 uv_projection( float4 uv )
{
float2 screen_uv = uv.xy / uv.w * View.ScreenPositionScaleBias.xy + View.ScreenPositionScaleBias.wz;
float scene_depth = CalcSceneDepth(screen_uv);
float4 h_world_pos = mul(float4(uv.xy * uv.w * scene_depth, scene_depth, 1), View.ScreenToWorld);
float3 world_pos = h_world_pos.xyz / h_world_pos.w;
float3 camera_dir = normalize(world_pos - View.ViewOrigin.xyz);
return intersect_ray_plane( world_pos, camera_dir);
}
float2 intersect_ray_plane(float3 origin, float3 direction)
{
float3 planePos = float3( 0,0,0 );
float3 planeNormal = float3( 0,0,1 );
float denom = dot(direction, planeNormal);
if (abs(denom) < 0.0000001 )
{
return float2(0.0, 0.0);
}
float t = dot((planePos - origin), planeNormal) / denom;
return (origin + t * direction).xy;
}
but View.ScreenPositionScaleBias.xy is always (0.0, 0.0) at which point everything messes up.
Im using the ENQUEUE_UNIQUE_RENDER_COMMAND_ONEPARAMETER to add the shader to the render thread, does it just mean that I happen to be rendering before the FViewUniformShaderParameters has been populated, or do I need to bind the data to my pixel shader when it executes somehow within its threaded function, atm that is
FRHICommandListImmediate& RHICmdList = GRHICommandList.GetImmediateCommandList();
SetRenderTarget( RHICmdList, current_render_target_->GetRenderTargetResource()->GetRenderTargetTexture(), FTextureRHIRef() );
RHICmdList.SetBlendState( TStaticBlendState<>::GetRHI() );
RHICmdList.SetRasterizerState( TStaticRasterizerState<>::GetRHI() );
RHICmdList.SetDepthStencilState( TStaticDepthStencilState<false, CF_Always>::GetRHI() );
static FGlobalBoundShaderState BoundShaderState;
TShaderMapRef<FBasicVertexShader> VertexShader( GetGlobalShaderMap( feature_level_ ) );
TShaderMapRef<FTerrainShader> PixelShader( GetGlobalShaderMap( feature_level_ ) );
SetGlobalBoundShaderState( RHICmdList, feature_level_, BoundShaderState, GTextureVertexDeclaration.VertexDeclarationRHI, *VertexShader, *PixelShader );
PixelShader->SetUniformBuffers( RHICmdList, constant_parameters_, variable_parameters_ );
FTextureVertex verts[ 4 ];
verts[ 0 ].Position = FVector4( -1.0f, 1.0f, 0, 1.0f );
verts[ 1 ].Position = FVector4( 1.0f, 1.0f, 0, 1.0f );
verts[ 2 ].Position = FVector4( -1.0f, -1.0f, 0, 1.0f );
verts[ 3 ].Position = FVector4( 1.0f, -1.0f, 0, 1.0f );
verts[ 0 ].UV = FVector2D( 0, 0 );
verts[ 1 ].UV = FVector2D( 1, 0 );
verts[ 2 ].UV = FVector2D( 0, 1 );
verts[ 3 ].UV = FVector2D( 1, 1 );
DrawPrimitiveUP( RHICmdList, PT_TriangleStrip, 2, verts, sizeof( verts[ 0 ] ) );
PixelShader->UnbindBuffers( RHICmdList );
the result I am then using in a material to
so that my procedural texture is then projected onto our terrain with the material system.
I can see that it is the View structure that doesn’t have any data if I debug the shader by outputing different colors for different issues and it all traced back to this structure not having the data I need. Do I need to just populate out some of the data myself, or is this a render order issue (like how the decal system is deferred?)