- html - 出于某种原因,IE8 对我的 Sass 文件中继承的 html5 CSS 不友好?
- JMeter 在响应断言中使用 span 标签的问题
- html - 在 :hover and :active? 上具有不同效果的 CSS 动画
- html - 相对于居中的 html 内容固定的 CSS 重复背景?
我从OpenGL-tutorial.org下载了教程, OpenGL 2.1 端口。我按照说明编译它(使用 C-make)。一切正常,直到我尝试运行第 8 课的教程。当我尝试从命令行运行可执行文件时,终端输出以下消息:
$ ./tutorial08_basic_shading
Compiling shader : StandardShading.vertexshader
Compiling shader : StandardShading.fragmentshader
Linking program
Loading OBJ file suzanne.obj...
r300 FP: Compiler Error:
Too many hardware temporaries used.
Using a dummy shader instead.
运行的结果程序显示了一个完全黑色的对象:
它应该是这样的:
程序 tutorial08_basic_shading
是使用 tutorial08.cpp
编译的:
// Include standard headers
#include <stdio.h>
#include <stdlib.h>
#include <vector>
// Include GLEW
#include <GL/glew.h>
// Include GLFW
#include <GL/glfw.h>
// Include GLM
#include <glm/glm.hpp>
#include <glm/gtc/matrix_transform.hpp>
using namespace glm;
#include <common/shader.hpp>
#include <common/texture.hpp>
#include <common/controls.hpp>
#include <common/objloader.hpp>
#include <common/vboindexer.hpp>
int main( void )
{
// Initialise GLFW
if( !glfwInit() )
{
fprintf( stderr, "Failed to initialize GLFW\n" );
return -1;
}
glfwOpenWindowHint(GLFW_FSAA_SAMPLES, 4);
glfwOpenWindowHint(GLFW_OPENGL_VERSION_MAJOR, 2);
glfwOpenWindowHint(GLFW_OPENGL_VERSION_MINOR, 1);
// Open a window and create its OpenGL context
if( !glfwOpenWindow( 1024, 768, 0,0,0,0, 32,0, GLFW_WINDOW ) )
{
fprintf( stderr, "Failed to open GLFW window.\n" );
glfwTerminate();
return -1;
}
// Initialize GLEW
if (glewInit() != GLEW_OK) {
fprintf(stderr, "Failed to initialize GLEW\n");
return -1;
}
glfwSetWindowTitle( "Tutorial 08" );
// Ensure we can capture the escape key being pressed below
glfwEnable( GLFW_STICKY_KEYS );
glfwSetMousePos(1024/2, 768/2);
// Dark blue background
glClearColor(0.0f, 0.0f, 0.4f, 0.0f);
// Enable depth test
glEnable(GL_DEPTH_TEST);
// Accept fragment if it closer to the camera than the former one
glDepthFunc(GL_LESS);
// Cull triangles which normal is not towards the camera
glEnable(GL_CULL_FACE);
// Create and compile our GLSL program from the shaders
GLuint programID = LoadShaders( "StandardShading.vertexshader", "StandardShading.fragmentshader" );
// Get a handle for our "MVP" uniform
GLuint MatrixID = glGetUniformLocation(programID, "MVP");
GLuint ViewMatrixID = glGetUniformLocation(programID, "V");
GLuint ModelMatrixID = glGetUniformLocation(programID, "M");
// Get a handle for our buffers
GLuint vertexPosition_modelspaceID = glGetAttribLocation(programID, "vertexPosition_modelspace");
GLuint vertexUVID = glGetAttribLocation(programID, "vertexUV");
GLuint vertexNormal_modelspaceID = glGetAttribLocation(programID, "vertexNormal_modelspace");
// Load the texture
GLuint Texture = loadDDS("uvmap.DDS");
// Get a handle for our "myTextureSampler" uniform
GLuint TextureID = glGetUniformLocation(programID, "myTextureSampler");
// Read our .obj file
std::vector<glm::vec3> vertices;
std::vector<glm::vec2> uvs;
std::vector<glm::vec3> normals;
bool res = loadOBJ("suzanne.obj", vertices, uvs, normals);
// Load it into a VBO
GLuint vertexbuffer;
glGenBuffers(1, &vertexbuffer);
glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer);
glBufferData(GL_ARRAY_BUFFER, vertices.size() * sizeof(glm::vec3), &vertices[0], GL_STATIC_DRAW);
GLuint uvbuffer;
glGenBuffers(1, &uvbuffer);
glBindBuffer(GL_ARRAY_BUFFER, uvbuffer);
glBufferData(GL_ARRAY_BUFFER, uvs.size() * sizeof(glm::vec2), &uvs[0], GL_STATIC_DRAW);
GLuint normalbuffer;
glGenBuffers(1, &normalbuffer);
glBindBuffer(GL_ARRAY_BUFFER, normalbuffer);
glBufferData(GL_ARRAY_BUFFER, normals.size() * sizeof(glm::vec3), &normals[0], GL_STATIC_DRAW);
// Get a handle for our "LightPosition" uniform
glUseProgram(programID);
GLuint LightID = glGetUniformLocation(programID, "LightPosition_worldspace");
do{
// Clear the screen
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Use our shader
glUseProgram(programID);
// Compute the MVP matrix from keyboard and mouse input
computeMatricesFromInputs();
glm::mat4 ProjectionMatrix = getProjectionMatrix();
glm::mat4 ViewMatrix = getViewMatrix();
glm::mat4 ModelMatrix = glm::mat4(1.0);
glm::mat4 MVP = ProjectionMatrix * ViewMatrix * ModelMatrix;
// Send our transformation to the currently bound shader,
// in the "MVP" uniform
glUniformMatrix4fv(MatrixID, 1, GL_FALSE, &MVP[0][0]);
glUniformMatrix4fv(ModelMatrixID, 1, GL_FALSE, &ModelMatrix[0][0]);
glUniformMatrix4fv(ViewMatrixID, 1, GL_FALSE, &ViewMatrix[0][0]);
glm::vec3 lightPos = glm::vec3(4,4,4);
glUniform3f(LightID, lightPos.x, lightPos.y, lightPos.z);
// Bind our texture in Texture Unit 0
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, Texture);
// Set our "myTextureSampler" sampler to user Texture Unit 0
glUniform1i(TextureID, 0);
// 1rst attribute buffer : vertices
glEnableVertexAttribArray(vertexPosition_modelspaceID);
glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer);
glVertexAttribPointer(
vertexPosition_modelspaceID, // The attribute we want to configure
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
// 2nd attribute buffer : UVs
glEnableVertexAttribArray(vertexUVID);
glBindBuffer(GL_ARRAY_BUFFER, uvbuffer);
glVertexAttribPointer(
vertexUVID, // The attribute we want to configure
2, // size : U+V => 2
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
// 3rd attribute buffer : normals
glEnableVertexAttribArray(vertexNormal_modelspaceID);
glBindBuffer(GL_ARRAY_BUFFER, normalbuffer);
glVertexAttribPointer(
vertexNormal_modelspaceID, // The attribute we want to configure
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
// Draw the triangles !
glDrawArrays(GL_TRIANGLES, 0, vertices.size() );
glDisableVertexAttribArray(vertexPosition_modelspaceID);
glDisableVertexAttribArray(vertexUVID);
glDisableVertexAttribArray(vertexNormal_modelspaceID);
// Swap buffers
glfwSwapBuffers();
} // Check if the ESC key was pressed or the window was closed
while( glfwGetKey( GLFW_KEY_ESC ) != GLFW_PRESS &&
glfwGetWindowParam( GLFW_OPENED ) );
// Cleanup VBO and shader
glDeleteBuffers(1, &vertexbuffer);
glDeleteBuffers(1, &uvbuffer);
glDeleteBuffers(1, &normalbuffer);
glDeleteProgram(programID);
glDeleteTextures(1, &Texture);
// Close OpenGL window and terminate GLFW
glfwTerminate();
return 0;
}
编译它的系统正在运行 Ubuntu 13.04 Raring Ringtail。我相信编译器是 g++,我正在为 ATI Radian xpress 1100 笔记本显卡使用 OpenGL 驱动程序(专有驱动程序不兼容)。
到目前为止,我能够编辑之前的示例并使用 g++ 编译它们而没有任何问题。本教程的唯一新函数位于 objloader.cpp
中:
#include <vector>
#include <stdio.h>
#include <string>
#include <cstring>
#include <glm/glm.hpp>
#include "objloader.hpp"
// Very, VERY simple OBJ loader.
// Here is a short list of features a real function would provide :
// - Binary files. Reading a model should be just a few memcpy's away, not parsing a file at runtime. In short : OBJ is not very great.
// - Animations & bones (includes bones weights)
// - Multiple UVs
// - All attributes should be optional, not "forced"
// - More stable. Change a line in the OBJ file and it crashes.
// - More secure. Change another line and you can inject code.
// - Loading from memory, stream, etc
bool loadOBJ(
const char * path,
std::vector<glm::vec3> & out_vertices,
std::vector<glm::vec2> & out_uvs,
std::vector<glm::vec3> & out_normals
){
printf("Loading OBJ file %s...\n", path);
std::vector<unsigned int> vertexIndices, uvIndices, normalIndices;
std::vector<glm::vec3> temp_vertices;
std::vector<glm::vec2> temp_uvs;
std::vector<glm::vec3> temp_normals;
FILE * file = fopen(path, "r");
if( file == NULL ){
printf("Impossible to open the file ! Are you in the right path ? See Tutorial 1 for details\n");
return false;
}
while( 1 ){
char lineHeader[128];
// read the first word of the line
int res = fscanf(file, "%s", lineHeader);
if (res == EOF)
break; // EOF = End Of File. Quit the loop.
// else : parse lineHeader
if ( strcmp( lineHeader, "v" ) == 0 ){
glm::vec3 vertex;
fscanf(file, "%f %f %f\n", &vertex.x, &vertex.y, &vertex.z );
temp_vertices.push_back(vertex);
}else if ( strcmp( lineHeader, "vt" ) == 0 ){
glm::vec2 uv;
fscanf(file, "%f %f\n", &uv.x, &uv.y );
uv.y = -uv.y; // Invert V coordinate since we will only use DDS texture, which are inverted. Remove if you want to use TGA or BMP loaders.
temp_uvs.push_back(uv);
}else if ( strcmp( lineHeader, "vn" ) == 0 ){
glm::vec3 normal;
fscanf(file, "%f %f %f\n", &normal.x, &normal.y, &normal.z );
temp_normals.push_back(normal);
}else if ( strcmp( lineHeader, "f" ) == 0 ){
std::string vertex1, vertex2, vertex3;
unsigned int vertexIndex[3], uvIndex[3], normalIndex[3];
int matches = fscanf(file, "%d/%d/%d %d/%d/%d %d/%d/%d\n", &vertexIndex[0], &uvIndex[0], &normalIndex[0], &vertexIndex[1], &uvIndex[1], &normalIndex[1], &vertexIndex[2], &uvIndex[2], &normalIndex[2] );
if (matches != 9){
printf("File can't be read by our simple parser :-( Try exporting with other options\n");
return false;
}
vertexIndices.push_back(vertexIndex[0]);
vertexIndices.push_back(vertexIndex[1]);
vertexIndices.push_back(vertexIndex[2]);
uvIndices .push_back(uvIndex[0]);
uvIndices .push_back(uvIndex[1]);
uvIndices .push_back(uvIndex[2]);
normalIndices.push_back(normalIndex[0]);
normalIndices.push_back(normalIndex[1]);
normalIndices.push_back(normalIndex[2]);
}else{
// Probably a comment, eat up the rest of the line
char stupidBuffer[1000];
fgets(stupidBuffer, 1000, file);
}
}
// For each vertex of each triangle
for( unsigned int i=0; i<vertexIndices.size(); i++ ){
// Get the indices of its attributes
unsigned int vertexIndex = vertexIndices[i];
unsigned int uvIndex = uvIndices[i];
unsigned int normalIndex = normalIndices[i];
// Get the attributes thanks to the index
glm::vec3 vertex = temp_vertices[ vertexIndex-1 ];
glm::vec2 uv = temp_uvs[ uvIndex-1 ];
glm::vec3 normal = temp_normals[ normalIndex-1 ];
// Put the attributes in buffers
out_vertices.push_back(vertex);
out_uvs .push_back(uv);
out_normals .push_back(normal);
}
return true;
}
#ifdef USE_ASSIMP // don't use this #define, it's only for me (it AssImp fails to compile on your machine, at least all the other tutorials still work)
// Include AssImp
#include <assimp/Importer.hpp> // C++ importer interface
#include <assimp/scene.h> // Output data structure
#include <assimp/postprocess.h> // Post processing flags
bool loadAssImp(
const char * path,
std::vector<unsigned short> & indices,
std::vector<glm::vec3> & vertices,
std::vector<glm::vec2> & uvs,
std::vector<glm::vec3> & normals
){
Assimp::Importer importer;
const aiScene* scene = importer.ReadFile(path, 0/*aiProcess_JoinIdenticalVertices | aiProcess_SortByPType*/);
if( !scene) {
fprintf( stderr, importer.GetErrorString());
return false;
}
const aiMesh* mesh = scene->mMeshes[0]; // In this simple example code we always use the 1rst mesh (in OBJ files there is often only one anyway)
// Fill vertices positions
vertices.reserve(mesh->mNumVertices);
for(unsigned int i=0; i<mesh->mNumVertices; i++){
aiVector3D pos = mesh->mVertices[i];
vertices.push_back(glm::vec3(pos.x, pos.y, pos.z));
}
// Fill vertices texture coordinates
uvs.reserve(mesh->mNumVertices);
for(unsigned int i=0; i<mesh->mNumVertices; i++){
aiVector3D UVW = mesh->mTextureCoords[0][i]; // Assume only 1 set of UV coords; AssImp supports 8 UV sets.
uvs.push_back(glm::vec2(UVW.x, UVW.y));
}
// Fill vertices normals
normals.reserve(mesh->mNumVertices);
for(unsigned int i=0; i<mesh->mNumVertices; i++){
aiVector3D n = mesh->mNormals[i];
normals.push_back(glm::vec3(n.x, n.y, n.z));
}
// Fill face indices
indices.reserve(3*mesh->mNumFaces);
for (unsigned int i=0; i<mesh->mNumFaces; i++){
// Assume the model has only triangles.
indices.push_back(mesh->mFaces[i].mIndices[0]);
indices.push_back(mesh->mFaces[i].mIndices[1]);
indices.push_back(mesh->mFaces[i].mIndices[2]);
}
// The "scene" pointer will be deleted automatically by "importer"
}
#endif
sazanne.obj
随教程一起提供,与 tutorial08.cpp
最佳答案
R300 架构是目前最早的着色器模型 2 GPU 之一。它们在 10 年前被引入市场。 SM2 是一个相当有限的编程模型,只有很少的硬件资源,只有 4 个纹理间接(即依赖于其他纹理操作的纹理操作)是必须支持的最小值。并且有硬指令数限制。
总而言之,这意味着需要一个出色的 GLSL 编译器才能尽可能多地从 GPU 中挤出。不幸的是,GLSL 编译器从未针对 SM2 硬件进行过优化——事实上,当涉及到 R300 时,专有驱动程序的 GLSL 编译器生成的代码比开源编译器更糟糕。大多数人使用某种汇编代码对 SM2 硬件进行编程。并且 GLSL 编译器只有在下一代 GPU 上市时才有用,因此没有人费心研究 SM2 硬件目标优化。
这对您意味着什么。好吧,您的 GPU 太旧了,无法用于 GLSL 开发。你仍然可以使用汇编器来获得很好的效果——我对挤出最后一个循环、间接限制和临时变量以获得期望的结果有着美好的记忆;例如,我能够在 Radeon 9800 GPU 上实现改进的 perlin 噪声,而(几乎)其他人都声称这在 SM2 级硬件上是不可能的。
关于c++ - 为什么我在尝试此 OpenGL 教程时得到 "r300 FP: Compiler Error:"?,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/18666432/
SQLite、Content provider 和 Shared Preference 之间的所有已知区别。 但我想知道什么时候需要根据情况使用 SQLite 或 Content Provider 或
警告:我正在使用一个我无法完全控制的后端,所以我正在努力解决 Backbone 中的一些注意事项,这些注意事项可能在其他地方更好地解决......不幸的是,我别无选择,只能在这里处理它们! 所以,我的
我一整天都在挣扎。我的预输入搜索表达式与远程 json 数据完美配合。但是当我尝试使用相同的 json 数据作为预取数据时,建议为空。点击第一个标志后,我收到预定义消息“无法找到任何内容...”,结果
我正在制作一个模拟 NHL 选秀彩票的程序,其中屏幕右侧应该有一个 JTextField,并且在左侧绘制弹跳的选秀球。我创建了一个名为 Ball 的类,它实现了 Runnable,并在我的主 Draf
这个问题已经有答案了: How can I calculate a time span in Java and format the output? (18 个回答) 已关闭 9 年前。 这是我的代码
我有一个 ASP.NET Web API 应用程序在我的本地 IIS 实例上运行。 Web 应用程序配置有 CORS。我调用的 Web API 方法类似于: [POST("/API/{foo}/{ba
我将用户输入的时间和日期作为: DatePicker dp = (DatePicker) findViewById(R.id.datePicker); TimePicker tp = (TimePic
放宽“邻居”的标准是否足够,或者是否有其他标准行动可以采取? 最佳答案 如果所有相邻解决方案都是 Tabu,则听起来您的 Tabu 列表的大小太长或您的释放策略太严格。一个好的 Tabu 列表长度是
我正在阅读来自 cppreference 的代码示例: #include #include #include #include template void print_queue(T& q)
我快疯了,我试图理解工具提示的行为,但没有成功。 1. 第一个问题是当我尝试通过插件(按钮 1)在点击事件中使用它时 -> 如果您转到 Fiddle,您会在“内容”内看到该函数' 每次点击都会调用该属
我在功能组件中有以下代码: const [ folder, setFolder ] = useState([]); const folderData = useContext(FolderContex
我在使用预签名网址和 AFNetworking 3.0 从 S3 获取图像时遇到问题。我可以使用 NSMutableURLRequest 和 NSURLSession 获取图像,但是当我使用 AFHT
我正在使用 Oracle ojdbc 12 和 Java 8 处理 Oracle UCP 管理器的问题。当 UCP 池启动失败时,我希望关闭它创建的连接。 当池初始化期间遇到 ORA-02391:超过
关闭。此题需要details or clarity 。目前不接受答案。 想要改进这个问题吗?通过 editing this post 添加详细信息并澄清问题. 已关闭 9 年前。 Improve
引用这个plunker: https://plnkr.co/edit/GWsbdDWVvBYNMqyxzlLY?p=preview 我在 styles.css 文件和 src/app.ts 文件中指定
为什么我的条形这么细?我尝试将宽度设置为 1,它们变得非常厚。我不知道还能尝试什么。默认厚度为 0.8,这是应该的样子吗? import matplotlib.pyplot as plt import
当我编写时,查询按预期执行: SELECT id, day2.count - day1.count AS diff FROM day1 NATURAL JOIN day2; 但我真正想要的是右连接。当
我有以下时间数据: 0 08/01/16 13:07:46,335437 1 18/02/16 08:40:40,565575 2 14/01/16 22:2
一些背景知识 -我的 NodeJS 服务器在端口 3001 上运行,我的 React 应用程序在端口 3000 上运行。我在 React 应用程序 package.json 中设置了一个代理来代理对端
我面临着一个愚蠢的问题。我试图在我的 Angular 应用程序中延迟加载我的图像,我已经尝试过这个2: 但是他们都设置了 src attr 而不是 data-src,我在这里遗漏了什么吗?保留 d
我是一名优秀的程序员,十分优秀!