phong shader help

235b371d8fea8dbf60a687b31f8b5f67
0
staticVoid2 101 Aug 20, 2009 at 18:47

hello. I’ve just started with shader programming but I’m having problems getting a basic phong shader to work, I’m using RenderMonkey and the shader in question is one of the examples that comes with RenderMonkey (Textured Phong).

here is the vertex shader:

float3 fvLightPosition;
float3 fvEyePosition;
float4x4 matView;
float4x4 matViewProjection;

struct VS_INPUT 
{
   float4 Position : POSITION0;
   float2 Texcoord : TEXCOORD0;
   float3 Normal :   NORMAL0;
   
};

struct VS_OUTPUT 
{
   float4 Position :        POSITION0;
   float2 Texcoord :        TEXCOORD0;
   float3 ViewDirection :   TEXCOORD1;
   float3 LightDirection :  TEXCOORD2;
   float3 Normal :          TEXCOORD3;
   
};

VS_OUTPUT vs_main( VS_INPUT Input )
{
   VS_OUTPUT Output;

   Output.Position         = mul( Input.Position, matViewProjection );
   Output.Texcoord         = Input.Texcoord;
   
   float3 fvObjectPosition = mul( Input.Position, matView );
   
   Output.ViewDirection    = fvEyePosition - fvObjectPosition;
   Output.LightDirection   = fvLightPosition - fvObjectPosition;
   Output.Normal           = mul( Input.Normal, matView );
      
   return( Output );
   
}

// Registers:
//
//   Name              Reg   Size
//   ----------------- ----- ----
//   matViewProjection c0       4
//   matView           c4       3
//   fvLightPosition   c7       1
//   fvEyePosition     c8       1
//

    vs_2_0
    dcl_position v0
    dcl_texcoord v1
    dcl_normal v2
    dp4 oPos.x, v0, c0
    dp4 oPos.y, v0, c1
    dp4 oPos.z, v0, c2
    dp4 oPos.w, v0, c3
    dp4 r0.x, v0, c4
    dp4 r0.y, v0, c5
    dp4 r0.z, v0, c6
    add oT1.xyz, -r0, c8
    add oT2.xyz, -r0, c7
    dp3 oT3.x, v2, c4
    dp3 oT3.y, v2, c5
    dp3 oT3.z, v2, c6
    mov oT0.xy, v1

the first thing I dont understand is:
float3 fvObjectPosition = mul( Input.Position, matView ).
If the matView is the view matrix then this means it is transforming the vertex into view space wheras the light and eye vectors are both in world space, I’ve tried converting these vectors also into view space and also leaving the vertex in world space, but both of these don’t work.

here’s the pixel shader:

float4 fvAmbient;
float4 fvSpecular;
float4 fvDiffuse;
float fSpecularPower;
sampler2D baseMap;

struct PS_INPUT 
{
   float2 Texcoord :        TEXCOORD0;
   float3 ViewDirection :   TEXCOORD1;
   float3 LightDirection:   TEXCOORD2;
   float3 Normal :          TEXCOORD3;
   
};

float4 ps_main( PS_INPUT Input ) : COLOR0
{      
   float3 fvLightDirection = normalize( Input.LightDirection );
   float3 fvNormal         = normalize( Input.Normal );
   float  fNDotL           = dot( fvNormal, fvLightDirection ); 
   
   float3 fvReflection     = normalize( ( ( 2.0f * fvNormal ) * ( fNDotL ) ) - fvLightDirection ); 
   float3 fvViewDirection  = normalize( Input.ViewDirection );
   float  fRDotV           = max( 0.0f, dot( fvReflection, fvViewDirection ) );
   
   float4 fvBaseColor      = tex2D( baseMap, Input.Texcoord );
   
   float4 fvTotalAmbient   = fvAmbient * fvBaseColor; 
   float4 fvTotalDiffuse   = fvDiffuse * fNDotL * fvBaseColor; 
   float4 fvTotalSpecular  = fvSpecular * pow( fRDotV, fSpecularPower );
   
   return( saturate( fvTotalAmbient + fvTotalDiffuse + fvTotalSpecular ) );
      
}

// Registers:
//
//   Name           Reg   Size
//   -------------- ----- ----
//   fvAmbient      c0       1
//   fvSpecular     c1       1
//   fvDiffuse      c2       1
//   fSpecularPower c3       1
//   baseMap        s0       1
//

    ps_2_0
    def c4, 2, 0, 0, 0
    dcl t0.xy
    dcl t1.xyz
    dcl t2.xyz
    dcl t3.xyz
    dcl_2d s0
    texld r0, t0, s0
    nrm r1.xyz, t3
    nrm r2.xyz, t2
    dp3 r1.w, r1, r2
    mul r1.xyz, r1, r1.w
    mul r3, r1.w, c2
    mad r1.xyz, r1, c4.x, -r2
    nrm r2.xyz, r1
    nrm r1.xyz, t1
    dp3 r1.w, r2, r1
    max r2.w, r1.w, c4.y
    pow r1.w, r2.w, c3.x
    mul r2, r0, r3
    mad r0, c0, r0, r2
    mad_sat r0, c1, r1.w, r0
    mov oC0, r0

I convert this into the asm code and then place this in my application, I then set one vertex shader constant every time the world matrix changes, i.e.

pDevice->SetVertexShaderConstantF(0, (float*)&(getWorldMatrix() * getViewMatrix() * getProjectionMatrix()).transpose(), 4);

and for every frame I set these constants:

// pos == light position
// eye == camera position
// (both in world space)
pDevice->SetVertexShaderConstantF(7, pos, 1);
pDevice->SetVertexShaderConstantF(8, eye, 1);
pDevice->SetVertexShaderConstantF(4, (float*)&getViewMatrix().transpose(), 3);

float test[4] = { 0.3f, 0.3f, 0.3f, 0.3f };
pDevice->SetPixelShaderConstantF(0, test, 1);
pDevice->SetPixelShaderConstantF(1, test, 1);
pDevice->SetPixelShaderConstantF(2, test, 1);
pDevice->SetPixelShaderConstantF(3, test, 1);

but the lighting seems way off.

thanks in advance.

4 Replies

Please log in or register to post a reply.

B20d81438814b6ba7da7ff8eb502d039
0
Vilem_Otte 117 Aug 21, 2009 at 11:14

Your vertex shader has on issue - your eye and light vectors aren’t in view space, but in world space (if you aren’t transforming them on CPU) - you need to transform them to view space - by multiplying them with viewMat.

Also we don’t transform normals by model view matrix, but so called normal matrix (inverse-transpose of first three rows and columns of model view matrix).

235b371d8fea8dbf60a687b31f8b5f67
0
staticVoid2 101 Aug 21, 2009 at 16:35

Thanks, the main problem I was having was because the normals were not correct for each vertex, I have sorted that and it looks a lot better

I have also replaced matView with the inverse world matrix so that the vertices are in the same space (which is what I think matView should have been in the shader) which gives more correct lighting, however any surface that is backfacing still relects light because the normals are not orthogonal to the triangle surface at each point but rather interpolated to get a curving effect, is there any way to get the triangle normal at the vertex shader level?

Also we don’t transform normals by model view matrix, but so called normal matrix (inverse-transpose of first three rows and columns of model view matrix).

isnt the inverse-transpose of an orthogonal view matrix just going to leave you what you started with? i.e isn’t the inverse not equal to transpose when you are dealing with a rotation matrix?

A8433b04cb41dd57113740b779f61acb
0
Reedbeta 167 Aug 21, 2009 at 16:45

Yes, the inverse-transpose is the same as the original matrix when it’s orthogonal. It only makes a difference when there’s scaling involved.

As for the light getting on the backside of surfaces due to vertex normals being interpolated, that’s just how averaged normals work (they’re designed to give the illusion of a curved surface). You can calculate the face normals and store them in a separate vertex attribute if you wish (of course this will lead to duplication of any vertices shared by multiple triangles, since they will all need their own face normals). If you have geometry shaders you can also calculate the face normals there since they operate on a triangle at a time.

C4b4ac681e11772d2e07ed9a84cffe3f
0
kusma 101 Aug 23, 2009 at 15:48

@Reedbeta

You can calculate the face normals and store them in a separate vertex attribute if you wish (of course this will lead to duplication of any vertices shared by multiple triangles, since they will all need their own face normals). If you have geometry shaders you can also calculate the face normals there since they operate on a triangle at a time.

The face-normal can also be calculated in the fragment-shader without vertex-replication, by crossing the result of the derivatives of the fragment-position.