gpt4 book ai didi

c++ - Opengl 照明照亮了错误的表面

转载 作者:搜寻专家 更新时间:2023-10-31 01:04:04 25 4
gpt4 key购买 nike

我正在使用 OpenGL 来显示简单的对象和它们上方的灯光。问题是我的对象的面孔没有以正确的方式被照亮。这是我的结果

result

光线应该在物体上方我像这样从波前文件加载对象:

        if ( strcmp( lineHeader, "v" ) == 0 ){
glm::vec3 vertex;
fscanf(file, "%f %f %f\n", &vertex.x, &vertex.y, &vertex.z );
vertices.push_back(vertex);
}else if ( strcmp( lineHeader, "vt" ) == 0 ){
glm::vec2 uv;
fscanf(file, "%f %f\n", &uv.x, &uv.y );
uv.y = uv.y;
// Invert V coordinate since we will only use DDS texture, which are inverted. Remove if you want to use TGA or BMP loaders.
temp_uvs.push_back(uv);
}else if ( strcmp( lineHeader, "vn" ) == 0 ){
glm::vec3 normal;
fscanf(file, "%f %f %f\n", &normal.x, &normal.y, &normal.z );
temp_normals.push_back(normal);
}else if ( strcmp( lineHeader, "f" ) == 0 ){
std::string vertex1, vertex2, vertex3;
unsigned int vertexIndex[3], uvIndex[3], normalIndex[3];
int matches = fscanf(file, "%d/%d/%d %d/%d/%d %d/%d/%d\n", &vertexIndex[0], &uvIndex[0], &normalIndex[0], &vertexIndex[1], &uvIndex[1], &normalIndex[1], &vertexIndex[2], &uvIndex[2], &normalIndex[2] );
if (matches != 9){
printf("File can't be read by our simple parser :-( Try exporting with other options\n");
return false;
}
indices.push_back(vertexIndex[0]-1);
indices.push_back(vertexIndex[1]-1);
indices.push_back(vertexIndex[2]-1);
uvIndices .push_back(uvIndex[0]);
uvIndices .push_back(uvIndex[1]);
uvIndices .push_back(uvIndex[2]);
normalIndices.push_back(normalIndex[0]);
normalIndices.push_back(normalIndex[1]);
normalIndices.push_back(normalIndex[2]);
}else{
// Probably a comment, eat up the rest of the line
char stupidBuffer[1000];
fgets(stupidBuffer, 1000, file);
}
}

normals.reserve(indices.size());
uvs.reserve(indices.size());

for( unsigned int i=0; i<indices.size(); i++ ){

// Get the indices of its attributes
unsigned int uvIndex = uvIndices[i];
unsigned int normalIndex = normalIndices[i];
normals[indices[i]] = temp_normals[normalIndex-1];
uvs[indices[i]] = temp_uvs[uvIndex-1];

顶点着色器:

#version 150 core

in vec2 color;
in vec3 position;
in vec3 normal;


out vec2 UV;
out vec3 Position_worldspace;
out vec3 Normal_cameraspace;
out vec3 EyeDirection_cameraspace;
out vec3 LightDirection_cameraspace;

uniform mat4 MVP;
uniform mat4 V;
uniform mat4 M;
uniform vec3 LightPosition_worldspace;



void main() {

// Position of the vertex, in worldspace : M * position
Position_worldspace = (M * vec4(position.x , position.y , position.z ,1.0)).xyz;

// Vector that goes from the vertex to the camera, in camera space.
// In camera space, the camera is at the origin (0,0,0).
vec3 vertexPosition_cameraspace = ( V * M * vec4(position,1)).xyz;
EyeDirection_cameraspace = vec3(0,0,0) - vertexPosition_cameraspace;

// Vector that goes from the vertex to the light, in camera space. M is ommited because it's identity.
vec3 LightPosition_cameraspace = ( V * vec4(LightPosition_worldspace,1)).xyz;
LightDirection_cameraspace = LightPosition_cameraspace + EyeDirection_cameraspace;

// Normal of the the vertex, in camera space
Normal_cameraspace = ( V * M * vec4(normal,0)).xyz; // Only correct if ModelMatrix does not scale the model ! Use its inverse transpose if not.

// UV of the vertex. No special space for this one.
UV = color;
gl_Position = MVP*vec4(position.x , position.y , position.z , 1.0);

};

我的片段着色器是:

#version 150 core

// Interpolated values from the vertex shaders
in vec2 UV;
in vec3 Position_worldspace;
in vec3 Normal_cameraspace;
in vec3 EyeDirection_cameraspace;
in vec3 LightDirection_cameraspace;

out vec4 outColor

// Values that stay constant for the whole mesh.
uniform sampler2D myTextureSampler;
uniform vec3 LightPosition_worldspace;

void main(){

vec3 LightColor = vec3(1,1,1);
float LightPower = 20.0f;

// Material properties
vec3 MaterialDiffuseColor = texture2D( myTextureSampler, UV ).rgb;
vec3 MaterialAmbientColor = vec3(0.1,0.1,0.1) * MaterialDiffuseColor;
vec3 MaterialSpecularColor = vec3(0.3,0.3,0.3);

// Distance to the light
float distance = length( LightPosition_worldspace - Position_worldspace );

// Normal of the computed fragment, in camera space
vec3 n = normalize( Normal_cameraspace );
// Direction of the light (from the fragment to the light)
vec3 l = normalize( LightDirection_cameraspace );
// Cosine of the angle between the normal and the light direction,
// clamped above 0
// - light is at the vertical of the triangle -> 1
// - light is perpendicular to the triangle -> 0
// - light is behind the triangle -> 0
float cosTheta = clamp( dot( n,l ), 0,1 );

// Eye vector (towards the camera)
vec3 E = normalize(EyeDirection_cameraspace);
// Direction in which the triangle reflects the light
vec3 R = reflect(-l,n);
// Cosine of the angle between the Eye vector and the Reflect vector,
// clamped to 0
// - Looking into the reflection -> 1
// - Looking elsewhere -> < 1
float cosAlpha = clamp( dot( E,R ), 0,1 );

outColor.rgb =
// Ambient : simulates indirect lighting
MaterialAmbientColor +
// Diffuse : "color" of the object
MaterialDiffuseColor * LightColor * LightPower * cosTheta / (distance*distance) +
// Specular : reflective highlight, like a mirror
MaterialSpecularColor * LightColor * LightPower * pow(cosAlpha,5) / (distance*distance);

}

这是加载的立方体:

# cube.obj
#

o cube

v 0.0 0.0 0.0
v 0.0 0.0 1.0
v 0.0 1.0 0.0
v 0.0 1.0 1.0
v 1.0 0.0 0.0
v 1.0 0.0 1.0
v 1.0 1.0 0.0
v 1.0 1.0 1.0

vn 0.0 0.0 1.0
vn 0.0 0.0 -1.0
vn 0.0 1.0 0.0
vn 0.0 -1.0 0.0
vn 1.0 0.0 0.0
vn -1.0 0.0 0.0

vt 0.25 0.0
vt 0.5 0.0
vt 0 0.25
vt 0.25 0.25
vt 0.5 0.25
vt 0.75 0.25
vt 0.0 0.5
vt 0.25 0.5
vt 0.5 0.5
vt 0.75 0.5
vt 0.25 0.75
vt 0.5 0.75
vt 0.25 1.0
vt 0.5 1.0

f 1/11/2 7/14/2 5/12/2
f 1/11/2 3/13/2 7/14/2
f 1/7/6 4/4/6 3/3/6
f 1/7/6 2/8/6 4/4/6
f 3/1/3 8/5/3 7/2/3
f 3/1/3 4/4/3 8/5/3
f 5/10/5 7/6/5 8/5/5
f 5/10/5 8/5/5 6/9/5
f 1/11/4 5/12/4 6/9/4
f 1/11/4 6/9/4 2/8/4
f 2/8/1 6/9/1 8/5/1
f 2/8/1 8/5/1 4/4/1

以及我如何加载我的 VBO:

glGenVertexArrays(1, &vao);
glBindVertexArray(vao);

// Create a Vertex Buffer Object and copy the vertex data to it
glGenBuffers(1, &position_array_buffer);
glBindBuffer(GL_ARRAY_BUFFER, position_array_buffer);
glBufferData(GL_ARRAY_BUFFER, vertices.size() * sizeof(glm::vec3), &vertices[0], GL_STATIC_DRAW);


// Create an element array
glGenBuffers(1, &elements_array_buffer);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, elements_array_buffer);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, indices.size()*sizeof(GLuint), &indices[0], GL_STATIC_DRAW);

glGenBuffers(1, &normal_array_buffer);
glBindBuffer(GL_ARRAY_BUFFER, normal_array_buffer);
glBufferData(GL_ARRAY_BUFFER, normals.size() * sizeof(glm::vec3), &normals[0], GL_STATIC_DRAW);

if (textured) {
texture = new sf::Texture();
if(!texture->loadFromFile("textures/uv.jpeg"/*,sf::IntRect(0, 0, 128, 128)*/))
std::cout << "Error loading texture !!" << std::endl;
glGenBuffers(1, &color_array_buffer);
glBindBuffer(GL_ARRAY_BUFFER, color_array_buffer);
glBufferData(GL_ARRAY_BUFFER, uvs.size() * sizeof(glm::vec3), &uvs[0], GL_STATIC_DRAW);
}

这里是渲染场景的代码:

// Get a handle for our "myTextureSampler" uniform
GLuint TextureID = glGetUniformLocation(shaderProgram, "myTextureSampler");
if(!TextureID)
cout << "TextureID not found ..." << endl;
glActiveTexture(GL_TEXTURE0);
sf::Texture::bind(texture);
glUniform1i(TextureID, 0);
// 2nd attribute buffer : UVs
GLuint vertexUVID = glGetAttribLocation(shaderProgram, "color");
if(vertexUVID==-1)
cout << "vertexUVID not found ..." << endl;
glEnableVertexAttribArray(vertexUVID);
glBindBuffer(GL_ARRAY_BUFFER, color_array_buffer);
glVertexAttribPointer(vertexUVID, 2, GL_FLOAT, GL_FALSE, 0, 0);

// 3rd attribute buffer : normals
GLuint vertexNormal_modelspaceID = glGetAttribLocation(shaderProgram, "normal");
if(!vertexNormal_modelspaceID)
cout << "vertexNormal_modelspaceID not found ..." << endl;
glEnableVertexAttribArray(vertexNormal_modelspaceID);
glBindBuffer(GL_ARRAY_BUFFER, normal_array_buffer);
glVertexAttribPointer(vertexNormal_modelspaceID, 3, GL_FLOAT, GL_FALSE, 0, 0 );


// Specify the layout of the vertex data
GLint posAttrib;
posAttrib = glGetAttribLocation(shaderProgram, "position");
// glBindAttribLocation(shaderProgram,posAttrib,"position");
if(!posAttrib)
cout << "posAttrib not found ..." << endl;

glEnableVertexAttribArray(posAttrib);
glBindBuffer(GL_ARRAY_BUFFER, position_array_buffer);
glVertexAttribPointer(posAttrib, 3, GL_FLOAT, GL_FALSE, 0, 0);

glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, elements_array_buffer);
// Draw a rectangle from the indices_size/3 triangles using indices_size indices
glDrawElements(GL_TRIANGLES, indices.size(), GL_UNSIGNED_INT, 0);
// glDrawArrays(GL_TRIANGLES,posAttrib,indices.size());

while ((error = glGetError()) != GL_NO_ERROR) {
cerr << "OpenGL error: " << error << endl;
}

我觉得我的法线没有正确加载,此外我想知道我是否必须在我的元素数组中放置有关法线和 uv 的信息,或者是否只是以经典方式获取而没有索引。

编辑:更改了解析器,现在顶点加载正常但光照和纹理应用不正确。

最佳答案

  1. 这里:

    normals.reserve(indices.size()); 
    uvs.reserve(indices.size());

    不要改变大小,只改变容量(自己尝试:http://ideone.com/FbXtbm),例如这个

    glBufferData(GL_ARRAY_BUFFER, /*->*/normals.size() /*<-*/ * sizeof(glm::vec3), &normals[0], GL_STATIC_DRAW);

    接收零缓冲区大小作为参数。

  2. 片段着色器中存在语法错误

    in vec3 LightDirection_cameraspace;

    /*->*/ out vec4 outColor /*<-*/

    // Values that stay constant for the whole mesh.

    添加一个“;”在 outColor 之后.

  3. 您的数组没有为您的 glDrawElements 正确设置称呼。 我会在吃完早餐咖啡后添加一些示例代码。

    编辑 11:02

    立方体有 8 个顶点,要正确绘制它,每个顶点需要 3 个法线。(为了简单起见,我对 uvs 做了同样的事情):

     }
    indices.push_back(vertexIndex[0]-1);
    indices.push_back(vertexIndex[1]-1);
    indices.push_back(vertexIndex[2]-1);
    uvIndices .push_back(uvIndex[0]-1);
    uvIndices .push_back(uvIndex[1]-1);
    uvIndices .push_back(uvIndex[2]-1);
    normalIndices.push_back(normalIndex[0]-1);
    normalIndices.push_back(normalIndex[1]-1);
    normalIndices.push_back(normalIndex[2]-1);
    }else{
    // Probably a comment, eat up the rest of the line
    char stupidBuffer[1000];
    fgets(stupidBuffer, 1000, file);
    }
    }



    #if 1 // EITHER
    vertices.resize(indices.size());
    normals.resize(indices.size());
    uvs.resize(indices.size());

    for( unsigned int i=0; i<indices.size(); ++i){
    vertices[i] = temp_vertices[indices[i]];
    normals[i] = temp_normals[normalIndices[i]];
    uvs[i] = temp_uvs[uvIndices[i]];
    }
    #else // OR
    vertices.reserve(indices.size());
    normals.reserve(indices.size());
    uvs.reserve(indices.size());

    for( unsigned int i=0; i<indices.size(); ++i){
    vertices.push_back(temp_vertices[indices[i]]);
    normals.push_back(temp_normals[normalIndices[i]]);
    uvs.push_back(temp_uvs[uvIndices[i]]);
    }
    #endif

    struct yield {
    int i;
    yield() : i(0) {}
    int operator() (){ return i++;}
    };

    std::generate(indices.begin(), indices.end(), yield());



    std::clog << "num vertices: " << vertices.size() << std::endl
    << "num normals: " << normals.size() << std::endl
    << "num uvs: " << uvs.size() << std::endl
    << "num indices: " << indices.size() << std::endl;

    请注意,我还在循环中改变了某事;我在那里递减了所有索引。不必展开所有三角形的所有索引,但这是最简单的方法。

  4. 我还重构了你的着色器

    #version 150 core

    in vec2 color;
    in vec3 position;
    in vec3 normal;


    out vec2 UV;
    out vec3 Normal_cameraspace;
    out vec3 EyeDirection_cameraspace;
    out vec3 LightDirection_cameraspace;

    uniform mat4 MVP;
    uniform mat4 V;
    uniform mat4 M;
    uniform vec3 LightPosition_worldspace;


    void main() {

    // Position of the vertex, in worldspace : M * position
    vec3 wPos = (M * vec4(position, 1.0)).xyz;

    // Vector that goes from the vertex to the camera, in camera space.
    // In camera space, the camera is at the origin (0,0,0).
    vec3 vertexPosition_cameraspace = ( V * M * vec4(position,1)).xyz;
    EyeDirection_cameraspace = -vertexPosition_cameraspace;

    // Vector that goes from the vertex to the light, in camera space. M is ommited because it's identity.
    vec3 LightPosition_cameraspace = ( V * vec4(LightPosition_worldspace,1)).xyz;
    LightDirection_cameraspace = LightPosition_cameraspace - vertexPosition_cameraspace;

    // Normal of the the vertex, in camera space
    #if 0
    // Only correct if ModelMatrix does not scale the model ! Use its inverse transpose if not.
    Normal_cameraspace = (V * M * vec4(normal,0)).xyz;
    #else
    Normal_cameraspace = mat3(V) * inverse(transpose(mat3(M))) * normal;
    #endif
    Normal_cameraspace = normalize(Normal_cameraspace);
    // UV of the vertex. No special space for this one.
    UV = color;
    gl_Position = MVP*vec4(position, 1.0);
    } // void main()

    #version 150 core

    // Interpolated values from the vertex shaders
    in vec2 UV;
    in vec3 Normal_cameraspace;
    in vec3 EyeDirection_cameraspace;
    in vec3 LightDirection_cameraspace;

    out vec4 outColor;

    const float SHININESS = 5.0;
    const float AMBIENCE = 0.1;
    const float SPECULARITY = 0.3;
    const vec3 LIGHT_COLOR = vec3(1.0, 1.0, 1.0);
    const float LIGHT_INTENSITY = 300.0;


    //uniform sampler2D myTextureSampler;
    //uniform vec3 LightPosition_worldspace;

    float lambert_fac(vec3 lightPos, vec3 normal) {
    vec3 l_ = normalize(lightPos);
    vec3 n_ = normalize(normal);

    return max(dot(l_, n_),0.0);
    }

    float phong_fac(vec3 eyePos, vec3 lightPos, vec3 normal, float shiny) {
    vec3 e_ = normalize(eyePos);
    vec3 l_ = normalize(lightPos);
    vec3 n_ = normalize(normal);
    vec3 r_ = normalize(reflect(-l_, n_));

    return pow(max(dot(r_, e_),0.0), shiny);
    }

    float attenuate(float d/*distance*/, float c/*constant*/,
    float l/*linear*/, float q/*quadratic*/) {
    return 1.0/(c + l * d + q * d * d);
    }

    struct Material {
    vec3 ambient, diffuse, specular;
    };

    void main(){
    Material mat;

    /*texture2D( myTextureSampler, UV ).rgb;*/
    vec3 baseColor =
    vec3(UV.s, UV.t, clamp(UV.s + UV.t,0.,1.)); // just to save some attributes contributing to
    // from the optimizer
    mat.ambient = mat.diffuse = mat.specular = baseColor;
    mat.ambient *= AMBIENCE; mat.specular *= SPECULARITY;

    // attenuation
    float att = attenuate(length(LightDirection_cameraspace), 0., 0., 1.);

    // light
    vec3 l_ = LIGHT_COLOR * LIGHT_INTENSITY * att;

    // Diffuse Contribution
    float dc = lambert_fac(LightDirection_cameraspace, Normal_cameraspace);

    // Specular Contribution
    float sc = phong_fac(EyeDirection_cameraspace,
    LightDirection_cameraspace,
    Normal_cameraspace,
    SHININESS);

    outColor = vec4(mat.ambient
    + mat.diffuse * dc * l_
    + mat.specular * sc * l_, 1.0);
    } // void main()

    还有这个:

    screenshot

    是现在的样子

关于c++ - Opengl 照明照亮了错误的表面,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/24627242/

25 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com