gpt4 book ai didi

c++ - glGenTextures 有效但每次都返回相同的纹理名称

转载 作者:行者123 更新时间:2023-11-30 04:16:31 25 4
gpt4 key购买 nike

我有一个名为 Texture 的类。此类负责管理纹理。在程序启动中,OpenGL 上下文被正确初始化(这就是使这个问题不同于大多数涉及意外 glGenTextures 行为的问题)。 TextureglGenTextures() 获取纹理名称,然后通过函数 texGLInit() 将图像数据加载并绑定(bind)到该纹理名称的内存中。 这有效,并且纹理显示完全符合预期。

但是,我还希望我的纹理能够更改它在用户单击按钮并从 OpenFileDiaglog 中的 HDD 中选择一个时显示的纹理。此函数称为 reloadTexture()。此函数尝试从内存中删除旧图像/像素数据,并用用户选择的文件中的新数据替换它。 然而,当发生这种情况时,它会使用 glDeleteTextures 删除纹理名称,然后分配一个新的纹理名称并使用函数 texGLInit() 将新像素数据加载到内存中。但是纹理名称在 100% 的情况下与之前的名称(通常为“1”)完全相同。

发生这种情况后显示的图像很奇怪。它具有新的图像尺寸,但仍然是旧图像的像素。简而言之,它将旧图像扭曲到新图像的大小。它仍在使用据称已删除的像素数据进行绘制。应该发生的是屏幕现在在屏幕上显示新的图像文件。我相信这与纹理名称不唯一有关。

代码如下:

Texture::Texture(string filename)//---Constructor loads in the initial image. Works fine!
{
textureID[0]=0;

const char* fnPtr = filename.c_str(); //our image loader accepts a ptr to a char, not a string
//printf(fnPtr);

lodepng::load_file(buffer, fnPtr);//load the file into a buffer

unsigned error = lodepng::decode(image,w,h,buffer);//lodepng's decode function will load the pixel data into image vector from the buffer
//display any errors with the texture
if(error)
{
cout << "\ndecoder error " << error << ": " << lodepng_error_text(error) <<endl;
}
//execute the code that'll throw exceptions to do with the images size
checkPOT(w);
checkPOT(h);


//image now contains our pixeldata. All ready for OpenGL to do its thing

//let's get this texture up in the video memory
texGLInit();

Draw_From_Corner = CENTER;
}

void Texture::reloadTexture(string filename)//Reload texture replaces the texture name and image/pixel data bound to this texture
{
//first and foremost clear the image and buffer vectors back down to nothing so we can start afresh
buffer.clear();
image.clear();
w = 0;
h = 0;
//also delete the texture name we were using before
glDeleteTextures(1, &textureID[0]);




const char* fnPtr = filename.c_str(); //our image loader accepts a ptr to a char, not a string
//printf(fnPtr);

lodepng::load_file(buffer, fnPtr);//load the file into a buffer

unsigned error = lodepng::decode(image,w,h,buffer);//lodepng's decode function will load the pixel data into image vector from the buffer
//display any errors with the texture
if(error)
{
cout << "\ndecoder error " << error << ": " << lodepng_error_text(error) <<endl;
}
//execute the code that'll throw exceptions to do with the images size
checkPOT(w);
checkPOT(h);

//image now contains our pixeldata. All ready for to do its thing

//let's get this texture up in the video memoryOpenGL
texGLInit();

Draw_From_Corner = CENTER;
}

void Texture::texGLInit()//Actually gets the new texture name loads the pixeldata into openGL
{
glGenTextures(1, &textureID[0]);
////printf("\ntextureID = %u", textureID[0]);
glBindTexture(GL_TEXTURE_2D, textureID[0]);//evrything we're about to do is about this texture
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE);
//glDisable(GL_COLOR_MATERIAL);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8,w,h,0, GL_RGBA, GL_UNSIGNED_BYTE, &image[0]);
//we COULD free the image vectors memory right about now. But we'll do it when there's a need to. At the beginning of the reloadtexture func, makes sure it happens when we need it to.



}

这里值得一提的是 Texture 类中的绘制函数。

void Texture::draw(point* origin, ANCHOR drawFrom)
{
//let us set the DFC enum here.
Draw_From_Corner = drawFrom;

glEnable(GL_TEXTURE_2D);
//printf("\nDrawing texture at (%f, %f)",centerPoint.x, centerPoint.y);
glBindTexture(GL_TEXTURE_2D, textureID[0]);//bind the texture
//create a quick vertex array for the primitive we're going to bind the texture to
////printf("TexID = %u",textureID[0]);
GLfloat vArray[8];

#pragma region anchor switch
switch (Draw_From_Corner)
{
case CENTER:

vArray[0] = origin->x-(w/2); vArray[1] = origin->y-(h/2);//bottom left i0
vArray[2] = origin->x-(w/2); vArray[3] = origin->y+(h/2);//top left i1
vArray[4] = origin->x+(w/2); vArray[5] = origin->y+(h/2);//top right i2
vArray[6] = origin->x+(w/2); vArray[7] = origin->y-(h/2);//bottom right i3
break;

case BOTTOMLEFT:

vArray[0] = origin->x; vArray[1] = origin->y;//bottom left i0
vArray[2] = origin->x; vArray[3] = origin->y+h;//top left i1
vArray[4] = origin->x+w; vArray[5] = origin->y+h;//top right i2
vArray[6] = origin->x+w; vArray[7] = origin->y;//bottom right i3

break;

case TOPLEFT:

vArray[0] = origin->x; vArray[1] = origin->y-h;//bottom left i0
vArray[2] = origin->x; vArray[3] = origin->y;//top left i1
vArray[4] = origin->x+w; vArray[5] = origin->y;//top right i2
vArray[6] = origin->x+w; vArray[7] = origin->y-h;//bottom right i3

break;

case TOPRIGHT:

vArray[0] = origin->x-w; vArray[1] = origin->y-h;//bottom left i0
vArray[2] = origin->x-w; vArray[3] = origin->y;//top left i1
vArray[4] = origin->x; vArray[5] = origin->y;//top right i2
vArray[6] = origin->x; vArray[7] = origin->y-h;//bottom right i3

break;

case BOTTOMRIGHT:

vArray[0] = origin->x-w; vArray[1] = origin->y;//bottom left i0
vArray[2] = origin->x-w; vArray[3] = origin->y+h;//top left i1
vArray[4] = origin->x-h; vArray[5] = origin->y;//top right i2
vArray[6] = origin->x; vArray[7] = origin->y;//bottom right i3

break;

default: //same as center

vArray[0] = origin->x-(w/2); vArray[1] = origin->y-(h/2);//bottom left i0
vArray[2] = origin->x-(w/2); vArray[3] = origin->y+(h/2);//top left i1
vArray[4] = origin->x+(w/2); vArray[5] = origin->y+(h/2);//top right i2
vArray[6] = origin->x+(w/2); vArray[7] = origin->y-(h/2);//bottom right i3

break;


}
#pragma endregion

//create a quick texture array (we COULD create this on the heap rather than creating/destoying every cycle)
GLfloat tArray[8] =
{
//this has been tinkered with from my normal order. I think LodePNG is bringing the PD upside down. SO A QUICK FIX HERE WAS NECESSARY.
0.0f,1.0f,//0
0.0f,0.0f,//1
1.0f,0.0f,//2
1.0f,1.0f//3
};

//and finally.. the index array...remember, we draw in triangles....(and we'll go CW)
GLubyte iArray[6] =
{
0,1,2,
0,2,3
};

//Activate arrays
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);

//Give openGL a pointer to our vArray and tArray
glVertexPointer(2, GL_FLOAT, 0, &vArray[0]);
glTexCoordPointer(2, GL_FLOAT, 0, &tArray[0]);

//Draw it all
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_BYTE, &iArray[0]);

//glDrawArrays(GL_TRIANGLES,0,6);

//Disable the vertex arrays
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
glDisable(GL_TEXTURE_2D);
}

谁能告诉我为什么 OpenGL 不会加载我加载到其中的新像素数据并从中绘制?正如我所说,我怀疑这与 glGenTextures 没有给我新的纹理名称有关。

最佳答案

您正在调用 glTexImage2D 并将指针传递给客户端内存。当心,文档说:

If a non-zero named buffer object is bound to the GL_PIXEL_UNPACK_BUFFER target (see glBindBuffer) while a texture image is specified, data is treated as a byte offset into the buffer object's data store.

为了安全起见,您可能希望调用 glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0) 解除绑定(bind)任何缓冲区对象。

关于c++ - glGenTextures 有效但每次都返回相同的纹理名称,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/17729088/

25 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com