- iOS/Objective-C 元类和类别
- objective-c - -1001 错误,当 NSURLSession 通过 httpproxy 和/etc/hosts
- java - 使用网络类获取 url 地址
- ios - 推送通知中不播放声音
我需要将 3d 模型渲染成多个 View ,并且图像尺寸比屏幕分辨率大得多,所以我无法打开这么大的窗口,也不需要打开窗口来查看渲染结果。我只想将 3d 模型渲染成许多具有不同视点的图像。
我找到这个教程,http://www.opengl-tutorial.org/intermediate-tutorials/tutorial-14-render-to-texture/ ,我尝试使用 glGetTexImage 和 glReadPixels 从 fbo 读取像素值。
问题是如果窗口没有被隐藏,就OK了。如果我使用
设置隐藏窗口glfwWindowHint(GLFW_VISIBLE, GL_FALSE);
我只能使用 glGetTexImage 或 glReadPixels 从 fbo 读取背景颜色。并且使用 glDrawElements 绘制的模型消失了。
任何人都可以帮助我或给我举个例子吗?谢谢!
这是我读取 fbo 的代码:(如果窗口没有隐藏,它工作得很好,我的代码的其余部分与 http://www.opengl-tutorial.org/intermediate-tutorials/tutorial-14-render-to-texture/ 相同)
这是我的代码:
if (!glfwInit())
{
fprintf(stderr, "Failed to initialize GLFW\n");
getchar();
return -1;
}
glfwWindowHint(GLFW_SAMPLES, 1);
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 4);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 5);
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE); // To make MacOS happy; should not be needed
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
glfwWindowHint(GLFW_VISIBLE, GL_TRUE);
// Open a window and create its OpenGL context
int windowWidth = 4000;
int windowHeight = 3000;
window = glfwCreateWindow(windowWidth, windowHeight, "Tutorial 14 - Render To Texture", NULL, NULL);
if (window == NULL) {
fprintf(stderr, "Failed to open GLFW window. If you have an Intel GPU, they are not 3.3 compatible. Try the 2.1 version of the tutorials.\n");
getchar();
glfwTerminate();
return -1;
}
glfwMakeContextCurrent(window);
// Initialize GLEW
glewExperimental = true; // Needed for core profile
if (glewInit() != GLEW_OK) {
fprintf(stderr, "Failed to initialize GLEW\n");
getchar();
glfwTerminate();
return -1;
}
// Ensure we can capture the escape key being pressed below
glfwSetInputMode(window, GLFW_STICKY_KEYS, GL_TRUE);
// Hide the mouse and enable unlimited mouvement
glfwSetInputMode(window, GLFW_CURSOR, GLFW_CURSOR_DISABLED);
// Set the mouse at the center of the screen
glfwPollEvents();
glfwSetCursorPos(window, windowWidth / 2, windowHeight / 2);
// Dark blue background
glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
// Enable depth test
glEnable(GL_DEPTH_TEST);
// Accept fragment if it closer to the camera than the former one
glDepthFunc(GL_LESS);
// Cull triangles which normal is not towards the camera
glEnable(GL_CULL_FACE);
GLuint VertexArrayID;
glGenVertexArrays(1, &VertexArrayID);
glBindVertexArray(VertexArrayID);
// Create and compile our GLSL program from the shaders
GLuint programID = LoadShaders("StandardShadingRTT.vertexshader", "StandardShadingRTT.fragmentshader");
// Get a handle for our "MVP" uniform
GLuint MatrixID = glGetUniformLocation(programID, "MVP");
GLuint ViewMatrixID = glGetUniformLocation(programID, "V");
GLuint ModelMatrixID = glGetUniformLocation(programID, "M");
// Load the texture
GLuint Texture = loadDDS("uvmap.DDS");
// Get a handle for our "myTextureSampler" uniform
GLuint TextureID = glGetUniformLocation(programID, "myTextureSampler");
// Read our .obj file
std::vector<glm::vec3> vertices;
std::vector<glm::vec2> uvs;
std::vector<glm::vec3> normals;
bool res = loadOBJ("suzanne.obj", vertices, uvs, normals);
std::vector<unsigned short> indices;
std::vector<glm::vec3> indexed_vertices;
std::vector<glm::vec2> indexed_uvs;
std::vector<glm::vec3> indexed_normals;
indexVBO(vertices, uvs, normals, indices, indexed_vertices, indexed_uvs, indexed_normals);
// Load it into a VBO
GLuint vertexbuffer;
glGenBuffers(1, &vertexbuffer);
glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer);
glBufferData(GL_ARRAY_BUFFER, indexed_vertices.size() * sizeof(glm::vec3), &indexed_vertices[0], GL_STATIC_DRAW);
GLuint uvbuffer;
glGenBuffers(1, &uvbuffer);
glBindBuffer(GL_ARRAY_BUFFER, uvbuffer);
glBufferData(GL_ARRAY_BUFFER, indexed_uvs.size() * sizeof(glm::vec2), &indexed_uvs[0], GL_STATIC_DRAW);
GLuint normalbuffer;
glGenBuffers(1, &normalbuffer);
glBindBuffer(GL_ARRAY_BUFFER, normalbuffer);
glBufferData(GL_ARRAY_BUFFER, indexed_normals.size() * sizeof(glm::vec3), &indexed_normals[0], GL_STATIC_DRAW);
// Generate a buffer for the indices as well
GLuint elementbuffer;
glGenBuffers(1, &elementbuffer);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, elementbuffer);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, indices.size() * sizeof(unsigned short), &indices[0], GL_STATIC_DRAW);
// Get a handle for our "LightPosition" uniform
glUseProgram(programID);
GLuint LightID = glGetUniformLocation(programID, "LightPosition_worldspace");
// ---------------------------------------------
// Render to Texture - specific code begins here
// ---------------------------------------------
// The framebuffer, which regroups 0, 1, or more textures, and 0 or 1 depth buffer.
GLuint FramebufferName = 0;
glGenFramebuffers(1, &FramebufferName);
glBindFramebuffer(GL_FRAMEBUFFER, FramebufferName);
// The texture we're going to render to
GLuint renderedTexture;
glGenTextures(1, &renderedTexture);
// "Bind" the newly created texture : all future texture functions will modify this texture
glBindTexture(GL_TEXTURE_2D, renderedTexture);
// Give an empty image to OpenGL ( the last "0" means "empty" )
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, windowWidth, windowHeight, 0, GL_RGB, GL_UNSIGNED_BYTE, 0);
// Poor filtering
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
// Set "renderedTexture" as our colour attachement #0
glFramebufferTexture(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, renderedTexture, 0);
// Set the list of draw buffers.
GLenum DrawBuffers[1] = { GL_COLOR_ATTACHMENT0 };
glDrawBuffers(1, DrawBuffers); // "1" is the size of DrawBuffers
// Always check that our framebuffer is ok
if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE)
return false;
// Render to our framebuffer
glBindFramebuffer(GL_FRAMEBUFFER, FramebufferName);
glViewport(0, 0, windowWidth, windowHeight); // Render on the whole framebuffer, complete from the lower left corner to the upper right
// Clear the screen
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Use our shader
glUseProgram(programID);
// Compute the MVP matrix from keyboard and mouse input
computeMatricesFromInputs();
glm::mat4 ProjectionMatrix = getProjectionMatrix();
glm::mat4 ViewMatrix = getViewMatrix();
glm::mat4 ModelMatrix = glm::mat4(1.0);
glm::mat4 MVP = ProjectionMatrix * ViewMatrix * ModelMatrix;
// Send our transformation to the currently bound shader,
// in the "MVP" uniform
glUniformMatrix4fv(MatrixID, 1, GL_FALSE, &MVP[0][0]);
glUniformMatrix4fv(ModelMatrixID, 1, GL_FALSE, &ModelMatrix[0][0]);
glUniformMatrix4fv(ViewMatrixID, 1, GL_FALSE, &ViewMatrix[0][0]);
glm::vec3 lightPos = glm::vec3(4, 4, 4);
glUniform3f(LightID, lightPos.x, lightPos.y, lightPos.z);
// Bind our texture in Texture Unit 0
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, Texture);
// Set our "myTextureSampler" sampler to user Texture Unit 0
glUniform1i(TextureID, 0);
// 1rst attribute buffer : vertices
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer);
glVertexAttribPointer(
0, // attribute
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
// 2nd attribute buffer : UVs
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, uvbuffer);
glVertexAttribPointer(
1, // attribute
2, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
// 3rd attribute buffer : normals
glEnableVertexAttribArray(2);
glBindBuffer(GL_ARRAY_BUFFER, normalbuffer);
glVertexAttribPointer(
2, // attribute
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
// Index buffer
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, elementbuffer);
// Draw the triangles !
glDrawElements(
GL_TRIANGLES, // mode
indices.size(), // count
GL_UNSIGNED_SHORT, // type
(void*)0 // element array buffer offset
);
unsigned char *image = new unsigned char[windowWidth * windowHeight * 3];
glBindFramebuffer(GL_FRAMEBUFFER, FramebufferName);
glReadBuffer(GL_COLOR_ATTACHMENT0);
glReadPixels(0, 0, windowWidth, windowHeight, GL_RGB, GL_UNSIGNED_BYTE, image);
cv::Mat img;
img = cv::Mat::zeros(windowHeight, windowWidth, CV_8UC3);
for (int i = 0; i < windowHeight; i ++) {
for (int j = 0; j < windowWidth; j ++) {
int index = (i * windowWidth + j) * 3;
cv::Vec3b px;
px.val[0] = image[index + 0];
px.val[1] = image[index + 1];
px.val[2] = image[index + 2];
img.at<cv::Vec3b>(i, j) = px;
}
}
cv::imwrite("test.png", img);
当窗口可见时效果很好
glfwWindowHint(GLFW_VISIBLE, GL_FALSE);
但是如果我把它设置为GL_TRUE,我就不能再得到结果了!
谁能帮帮我?非常感谢!
最佳答案
你检查过这个例子了吗?它使用帧缓冲区
https://www.opengl.org/wiki/Framebuffer_Object_Examples#Quick_example.2C_render_to_texture_.282D.29
下面的代码(以防将来链接断开):
//RGBA8 2D texture, 24 bit depth texture, 256x256
glGenTextures(1, &color_tex);
glBindTexture(GL_TEXTURE_2D, color_tex);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
//NULL means reserve texture memory, but texels are undefined
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, 256, 256, 0, GL_BGRA, GL_UNSIGNED_BYTE, NULL);
//-------------------------
glGenFramebuffersEXT(1, &fb);
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, fb);
//Attach 2D texture to this FBO
glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_COLOR_ATTACHMENT0_EXT, GL_TEXTURE_2D, color_tex, 0);
//-------------------------
glGenRenderbuffersEXT(1, &depth_rb);
glBindRenderbufferEXT(GL_RENDERBUFFER_EXT, depth_rb);
glRenderbufferStorageEXT(GL_RENDERBUFFER_EXT, GL_DEPTH_COMPONENT24, 256, 256);
//-------------------------
//Attach depth buffer to FBO
glFramebufferRenderbufferEXT(GL_FRAMEBUFFER_EXT, GL_DEPTH_ATTACHMENT_EXT, GL_RENDERBUFFER_EXT, depth_rb);
//-------------------------
//Does the GPU support current FBO configuration?
GLenum status;
status = glCheckFramebufferStatusEXT(GL_FRAMEBUFFER_EXT);
switch(status)
{
case GL_FRAMEBUFFER_COMPLETE_EXT:
cout<<"good";
default:
HANDLE_THE_ERROR;
}
//-------------------------
//and now you can render to GL_TEXTURE_2D
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, fb);
glClearColor(0.0, 0.0, 0.0, 0.0);
glClearDepth(1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
//-------------------------
glViewport(0, 0, 256, 256);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0.0, 256.0, 0.0, 256.0, -1.0, 1.0);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
//-------------------------
glDisable(GL_TEXTURE_2D);
glDisable(GL_BLEND);
glEnable(GL_DEPTH_TEST);
//-------------------------
//**************************
//RenderATriangle, {0.0, 0.0}, {256.0, 0.0}, {256.0, 256.0}
//Read http://www.opengl.org/wiki/VBO_-_just_examples
RenderATriangle();
//-------------------------
GLubyte pixels[4*4*4];
glReadPixels(0, 0, 4, 4, GL_BGRA, GL_UNSIGNED_BYTE, pixels);
//pixels 0, 1, 2 should be white
//pixel 4 should be black
//----------------
//Bind 0, which means render to back buffer
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, 0);
代码清理:
//Delete resources
glDeleteTextures(1, &color_tex);
glDeleteRenderbuffersEXT(1, &depth_rb);
//Bind 0, which means render to back buffer, as a result, fb is unbound
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, 0);
glDeleteFramebuffersEXT(1, &fb);
关于c++ - OpenGL 离屏渲染和使用 glfw 的隐藏窗口读取像素值,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/37558512/
在我的 OpenGL 程序中,我按顺序执行以下操作: // Drawing filled polyhedrons // Drawing points using GL_POINTS // Displa
我想传递一个包含原始页面的局部变量,这个变量只包含一个带有值的符号。 当我使用此代码时,它运行良好,可以在部分中访问 origin 变量: render :partial => "products",
为什么这个 HTML/脚本(来自“JavaScript Ninja 的 secret ”)不渲染? http://jsfiddle.net/BCL54/
我想在阅读完 View 后返回到特定的网页位置(跳转到页内 anchor )。换句话说,在 views.py 中,我想做类似的事情: context={'form':my_form} return r
我有一个包含单条折线的 PathGeometry,并以固定的间隔向该线添加一个新点(以绘制波形)。使用 Perforator 工具时,我可以看到每次向直线添加一个点时,WPF 都会将整个 PathGe
尝试了解如何消除或最小化网站上不同 JavaScript 库的渲染延迟。 例如,如果我想加载来自许多社交网络的“即时”关注按钮,它们似乎会相互阻止渲染,并且您会收到令人不快的弹出窗口。 (func
我有以 xyz 点格式表示 3D 表面(即地震断层平面)的数据。我想创建这些表面的 3D 表示。我使用 rgl 和 akima 取得了一些成功,但是它无法真正处理可能会自行折叠或在同一 x,y 点具有
我正在用 Libgdx 编写一个小游戏。 我有一个 Render[OpenGL] 线程,它不断对所有对象调用 render() 和一个更新线程不断对所有对象调用 update(double delta
我有一个 .Rmd 文件包含: ```{r, echo=FALSE, message=FALSE, results='asis'} library(xtable) print(xtable(group
关闭。这个问题是opinion-based .它目前不接受答案。 想要改进这个问题? 更新问题,以便 editing this post 可以用事实和引用来回答它. 关闭 9 年前。 Improve
请不要评判我,我只是在学习 Swift。 最近我安装了 MetalPetal 框架,并按照说明操作: https://github.com/MetalPetal/MetalPetal#example-
如果您尝试渲染 Canvas 宽度和高度之外的图像,计算机是否仍会尝试渲染它并使用资源来尝试渲染它?我只是想找出在尝试渲染图像之前检查图像是否在 Canvas 内是否更好。 最佳答案 我相信它仍然在无
我在 safari 中渲染时遇到问题。 在 firefox、chrome 和 IE 上。如下图所示: input.searchbox{-webkit-border-radius:10px;-moz-b
我正在尝试通过远程桌面在 Windows7 下运行我在 RHEL7 服务器中制作的 java 程序。 服务器中的所有java程序都无法通过远程桌面呈现。如果我在服务器位置访问服务器本身,它们看起来没问
我正处于一个新项目的设计阶段,该项目将采用数据集并将其加载到文档中,然后围绕模板呈现文档。呈现的文件可以是 CSV 数据集、PDF 营销信函、电子邮件……很多东西。数据不会是数学方程式,我只是在寻找一
有没有办法在不同的 div 下渲染 React 组件的子组件? ... ... ... ... ...
使用以下代码: import numpy as np from plotly.offline import iplot, init_notebook_mode import plotly.graph_
截至最近, meteor 的所有文档都指出 onRendered是一种在模板完成渲染时获取回调的新方法。和 rendered只是为了向后兼容。 但是,这似乎对我不起作用。 onRendered永远不会
所以在我的基本模板中,我有:{% render "EcsCrmBundle:Module:checkClock" %} 然后我创建了 ModuleController.php ... getDoctr
我正在使用 vue-mathjax 来编译我的 vue 项目中的数学方程。它正在编译第一个括号 () 之间的文本。我想防止编译括号内的字符串。在文档中我发现,对于$符号,如果我们想逃避编译,我们需要使
我是一名优秀的程序员,十分优秀!