- html - 出于某种原因,IE8 对我的 Sass 文件中继承的 html5 CSS 不友好?
- JMeter 在响应断言中使用 span 标签的问题
- html - 在 :hover and :active? 上具有不同效果的 CSS 动画
- html - 相对于居中的 html 内容固定的 CSS 重复背景?
警告:总的 OpenGL/TK 新手,请善待。我可能咬得太多了。
规范:
我正在尝试从 learnopengl.com(我设法开始工作)复制 Cubemap 教程。我对 C# 更熟悉,所以最好获得一个使用 OpenTK 的解决方案。我得到的只是一个空白屏幕。也许是图像的背面(太乐观了?)。任何帮助,将不胜感激。
如果我遗漏了什么,请告诉我。
这是代码:(我觉得我很接近。)
MainWindow.xaml.cs
using OpenTK;
using OpenTK.Graphics;
using OpenTK.Graphics.OpenGL;
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Windows;
using System.Windows.Forms;
using Path = System.IO.Path;
namespace OpenTKTesting
{
/// <summary>
/// Interaction logic for MainWindow.xaml
/// </summary>
public partial class MainWindow : Window
{
private int _vertexBufferObject;
private int _vertexArrayObject;
private Shader shader;
// For documentation on this, check Texture.cs
private TextureCubemap cubemap;
private Camera camera;
GLControl glControl; // the winforms opentk control
public MainWindow()
{
InitializeComponent();
glControl = new GLControl(new GraphicsMode(32, 24, 0, 8)) { VSync = true };
windowsFormsHost.Child = glControl;
System.Windows.Forms.Integration.WindowsFormsHost.EnableWindowsFormsInterop();
Toolkit.Init();
}
private List<string> ImageFaces
{
get;
set;
}
public void GetImageFaces()
{
ImageFaces = new List<string>();
string dir = @"C:\\Development\\ScratchDev\\OpenTKTesting\\OpenTKTesting\\Resources";
ImageFaces.Add(Path.Combine(dir, "f.jpg"));
ImageFaces.Add(Path.Combine(dir, "b.jpg"));
ImageFaces.Add(Path.Combine(dir, "u.jpg"));
ImageFaces.Add(Path.Combine(dir, "d.jpg"));
ImageFaces.Add(Path.Combine(dir, "r.jpg"));
ImageFaces.Add(Path.Combine(dir, "l.jpg"));
}
private void SetupGLControl()
{
if (glControl == null)
{
return;
}
GetImageFaces();
foreach (var item in ImageFaces)
{
Debug.WriteLine(item);
}
glControl.MakeCurrent();
glControl.VSync = true;
glControl.Resize += GlControl_Resize;
glControl.Paint += GlControl_Paint;
shader = new Shader("Shaders/shader.vert", "Shaders/shader.frag");
shader.SetInt("cubeMapArray", 0);
// We initialize the camera so that it is 3 units back from where the rectangle is
// and give it the proper aspect ratio
camera = new Camera(Vector3.UnitZ, glControl.AspectRatio);
}
private void GlControl_Paint(object sender, PaintEventArgs e)
{
GL.Clear(ClearBufferMask.ColorBufferBit | ClearBufferMask.DepthBufferBit);
_vertexBufferObject = GL.GenBuffer();
GL.BindBuffer(BufferTarget.ArrayBuffer, _vertexBufferObject);
GL.BufferData(BufferTarget.ArrayBuffer, _vertices.Length * sizeof(float), _vertices, BufferUsageHint.StaticDraw);
cubemap = new TextureCubemap(ImageFaces);
cubemap.UseCubemap();
_vertexArrayObject = GL.GenVertexArray();
GL.BindVertexArray(_vertexArrayObject);
GL.BindBuffer(BufferTarget.ArrayBuffer, _vertexBufferObject);
//GL.BindBuffer(BufferTarget.ElementArrayBuffer, _elementBufferObject);
var vertexLocation = shader.GetAttribLocation("aPos");
GL.EnableVertexAttribArray(vertexLocation);
GL.VertexAttribPointer(vertexLocation, 3, VertexAttribPointerType.Float, false, 3 * sizeof(float), 0);
//// Next, we also setup texture coordinates. It works in much the same way.
//// We add an offset of 3, since the first vertex coordinate comes after the first vertex
//// and change the amount of data to 2 because there's only 2 floats for vertex coordinates
//var texCoordLocation = shader.GetAttribLocation("TexCoords");
//GL.EnableVertexAttribArray(texCoordLocation);
//GL.VertexAttribPointer(texCoordLocation, 2, VertexAttribPointerType.Float, false, 5 * sizeof(float), 3 * sizeof(float));
GL.Clear(ClearBufferMask.ColorBufferBit);
cubemap.UseCubemap();
shader.Use();
shader.SetMatrix4("view", camera.GetViewMatrix());
shader.SetMatrix4("projection", camera.GetProjectionMatrix());
GL.DrawArrays(PrimitiveType.Triangles, 0, 36);
glControl.SwapBuffers();
}
private readonly float[] _vertices =
{
// positions
-1.0f, 1.0f, -1.0f,
-1.0f, -1.0f, -1.0f,
1.0f, -1.0f, -1.0f,
1.0f, -1.0f, -1.0f,
1.0f, 1.0f, -1.0f,
-1.0f, 1.0f, -1.0f,
-1.0f, -1.0f, 1.0f,
-1.0f, -1.0f, -1.0f,
-1.0f, 1.0f, -1.0f,
-1.0f, 1.0f, -1.0f,
-1.0f, 1.0f, 1.0f,
-1.0f, -1.0f, 1.0f,
1.0f, -1.0f, -1.0f,
1.0f, -1.0f, 1.0f,
1.0f, 1.0f, 1.0f,
1.0f, 1.0f, 1.0f,
1.0f, 1.0f, -1.0f,
1.0f, -1.0f, -1.0f,
-1.0f, -1.0f, 1.0f,
-1.0f, 1.0f, 1.0f,
1.0f, 1.0f, 1.0f,
1.0f, 1.0f, 1.0f,
1.0f, -1.0f, 1.0f,
-1.0f, -1.0f, 1.0f,
-1.0f, 1.0f, -1.0f,
1.0f, 1.0f, -1.0f,
1.0f, 1.0f, 1.0f,
1.0f, 1.0f, 1.0f,
-1.0f, 1.0f, 1.0f,
-1.0f, 1.0f, -1.0f,
-1.0f, -1.0f, -1.0f,
-1.0f, -1.0f, 1.0f,
1.0f, -1.0f, -1.0f,
1.0f, -1.0f, -1.0f,
-1.0f, -1.0f, 1.0f,
1.0f, -1.0f, 1.0f
};
private void GlControl_Resize(object sender, EventArgs e)
{
InitializeView();
Debug.WriteLine("Resizing...");
}
private void WindowsFormsHost_Loaded(object sender, RoutedEventArgs e)
{
Debug.WriteLine("WFH Loaded...");
SetupGLControl();
}
public void InitializeView()
{
double newWidth = glControl.ClientSize.Width;
double newHeight = glControl.ClientSize.Height;
GL.Viewport(0, 0, (int)newWidth, (int)newHeight);
// We enable depth testing here. If you try to draw something more complex than one plane without this,
// you'll notice that polygons further in the background will occasionally be drawn over the top of the ones in the foreground.
// Obviously, we don't want this, so we enable depth testing. We also clear the depth buffer in GL.Clear over in OnRenderFrame.
GL.Enable(EnableCap.DepthTest);
glControl.SwapBuffers();
}
}
}
TextureCubemap.cs
using OpenTK.Graphics.OpenGL;
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Drawing;
using System.Drawing.Imaging;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using PixelFormat = OpenTK.Graphics.OpenGL.PixelFormat;
namespace OpenTKTesting
{
class TextureCubemap
{
public readonly int Handle;
// Create texture from path.
public TextureCubemap(List<string> imagePaths)
{
// Generate handle
Handle = GL.GenTexture();
// Bind the handle
UseCubemap();
for (int i = 0; i < imagePaths.Count; i++)
{
// Load the image
using (var image = new Bitmap(imagePaths[i]))
{
Debug.WriteLine(imagePaths[i]);
var data = image.LockBits(
new Rectangle(0, 0, image.Width, image.Height),
ImageLockMode.ReadOnly,
System.Drawing.Imaging.PixelFormat.Format32bppRgb);
GL.TexImage2D(TextureTarget.TextureCubeMap,
0,
PixelInternalFormat.Rgb,
image.Width,
image.Height,
0,
PixelFormat.Rgb,
PixelType.UnsignedByte,
data.Scan0);
}
}
GL.TexParameter(TextureTarget.TextureCubeMap, TextureParameterName.TextureMagFilter, (int)TextureMagFilter.Linear);
GL.TexParameter(TextureTarget.TextureCubeMap, TextureParameterName.TextureMinFilter, (int)TextureMinFilter.Linear);
GL.TexParameter(TextureTarget.TextureCubeMap, TextureParameterName.TextureWrapS, (int)TextureParameterName.ClampToEdge);
GL.TexParameter(TextureTarget.TextureCubeMap, TextureParameterName.TextureWrapT, (int)TextureParameterName.ClampToEdge);
GL.TexParameter(TextureTarget.TextureCubeMap, TextureParameterName.TextureWrapR, (int)TextureParameterName.ClampToEdge);
}
public void UseCubemap(TextureUnit unit = TextureUnit.Texture0)
{
GL.ActiveTexture(unit);
GL.BindTexture(TextureTarget.TextureCubeMap, Handle);
}
}
}
Shader.cs
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using OpenTK;
using OpenTK.Graphics;
using OpenTK.Graphics.OpenGL;
namespace OpenTKTesting
{
// A simple class meant to help create shaders.
public class Shader
{
public readonly int Handle;
private readonly Dictionary<string, int> _uniformLocations;
// This is how you create a simple shader.
// Shaders are written in GLSL, which is a language very similar to C in its semantics.
// The GLSL source is compiled *at runtime*, so it can optimize itself for the graphics card it's currently being used on.
// A commented example of GLSL can be found in shader.vert
public Shader(string vertPath, string fragPath)
{
// There are several different types of shaders, but the only two you need for basic rendering are the vertex and fragment shaders.
// The vertex shader is responsible for moving around vertices, and uploading that data to the fragment shader.
// The vertex shader won't be too important here, but they'll be more important later.
// The fragment shader is responsible for then converting the vertices to "fragments", which represent all the data OpenGL needs to draw a pixel.
// The fragment shader is what we'll be using the most here.
// Load vertex shader and compile
// LoadSource is a simple function that just loads all text from the file whose path is given.
var shaderSource = LoadSource(vertPath);
// GL.CreateShader will create an empty shader (obviously). The ShaderType enum denotes which type of shader will be created.
var vertexShader = GL.CreateShader(ShaderType.VertexShader);
// Now, bind the GLSL source code
GL.ShaderSource(vertexShader, shaderSource);
// And then compile
CompileShader(vertexShader);
// We do the same for the fragment shader
shaderSource = LoadSource(fragPath);
var fragmentShader = GL.CreateShader(ShaderType.FragmentShader);
GL.ShaderSource(fragmentShader, shaderSource);
CompileShader(fragmentShader);
// These two shaders must then be merged into a shader program, which can then be used by OpenGL.
// To do this, create a program...
Handle = GL.CreateProgram();
// Attach both shaders...
GL.AttachShader(Handle, vertexShader);
GL.AttachShader(Handle, fragmentShader);
// And then link them together.
LinkProgram(Handle);
// When the shader program is linked, it no longer needs the individual shaders attacked to it; the compiled code is copied into the shader program.
// Detach them, and then delete them.
GL.DetachShader(Handle, vertexShader);
GL.DetachShader(Handle, fragmentShader);
GL.DeleteShader(fragmentShader);
GL.DeleteShader(vertexShader);
// The shader is now ready to go, but first, we're going to cache all the shader uniform locations.
// Querying this from the shader is very slow, so we do it once on initialization and reuse those values
// later.
// First, we have to get the number of active uniforms in the shader.
GL.GetProgram(Handle, GetProgramParameterName.ActiveUniforms, out var numberOfUniforms);
// Next, allocate the dictionary to hold the locations.
_uniformLocations = new Dictionary<string, int>();
// Loop over all the uniforms,
for (var i = 0; i < numberOfUniforms; i++)
{
// get the name of this uniform,
var key = GL.GetActiveUniform(Handle, i, out _, out _);
// get the location,
var location = GL.GetUniformLocation(Handle, key);
// and then add it to the dictionary.
_uniformLocations.Add(key, location);
}
}
private static void CompileShader(int shader)
{
// Try to compile the shader
GL.CompileShader(shader);
// Check for compilation errors
GL.GetShader(shader, ShaderParameter.CompileStatus, out var code);
if (code != (int)All.True)
{
// We can use `GL.GetShaderInfoLog(shader)` to get information about the error.
throw new Exception($"Error occurred whilst compiling Shader({shader})");
}
}
private static void LinkProgram(int program)
{
// We link the program
GL.LinkProgram(program);
// Check for linking errors
GL.GetProgram(program, GetProgramParameterName.LinkStatus, out var code);
if (code != (int)All.True)
{
// We can use `GL.GetProgramInfoLog(program)` to get information about the error.
throw new Exception($"Error occurred whilst linking Program({program})");
}
}
// A wrapper function that enables the shader program.
public void Use()
{
GL.UseProgram(Handle);
}
// The shader sources provided with this project use hardcoded layout(location)-s. If you want to do it dynamically,
// you can omit the layout(location=X) lines in the vertex shader, and use this in VertexAttribPointer instead of the hardcoded values.
public int GetAttribLocation(string attribName)
{
return GL.GetAttribLocation(Handle, attribName);
}
// Just loads the entire file into a string.
private static string LoadSource(string path)
{
using (var sr = new StreamReader(path, Encoding.UTF8))
{
return sr.ReadToEnd();
}
}
// Uniform setters
// Uniforms are variables that can be set by user code, instead of reading them from the VBO.
// You use VBOs for vertex-related data, and uniforms for almost everything else.
// Setting a uniform is almost always the exact same, so I'll explain it here once, instead of in every method:
// 1. Bind the program you want to set the uniform on
// 2. Get a handle to the location of the uniform with GL.GetUniformLocation.
// 3. Use the appropriate GL.Uniform* function to set the uniform.
/// <summary>
/// Set a uniform int on this shader.
/// </summary>
/// <param name="name">The name of the uniform</param>
/// <param name="data">The data to set</param>
public void SetInt(string name, int data)
{
GL.UseProgram(Handle);
GL.Uniform1(_uniformLocations[name], data);
}
/// <summary>
/// Set a uniform float on this shader.
/// </summary>
/// <param name="name">The name of the uniform</param>
/// <param name="data">The data to set</param>
public void SetFloat(string name, float data)
{
GL.UseProgram(Handle);
GL.Uniform1(_uniformLocations[name], data);
}
/// <summary>
/// Set a uniform Matrix4 on this shader
/// </summary>
/// <param name="name">The name of the uniform</param>
/// <param name="data">The data to set</param>
/// <remarks>
/// <para>
/// The matrix is transposed before being sent to the shader.
/// </para>
/// </remarks>
public void SetMatrix4(string name, Matrix4 data)
{
GL.UseProgram(Handle);
GL.UniformMatrix4(_uniformLocations[name], true, ref data);
}
/// <summary>
/// Set a uniform Vector3 on this shader.
/// </summary>
/// <param name="name">The name of the uniform</param>
/// <param name="data">The data to set</param>
public void SetVector3(string name, Vector3 data)
{
GL.UseProgram(Handle);
GL.Uniform3(_uniformLocations[name], data);
}
}
}
Camera.cs
using OpenTK;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace OpenTKTesting
{
// This is the camera class as it could be set up after the tutorials on the website
// It is important to note there are a few ways you could have set up this camera, for example
// you could have also managed the player input inside the camera class, and a lot of the properties could have
// been made into functions.
// TL;DR: This is just one of many ways in which we could have set up the camera
// Check out the web version if you don't know why we are doing a specific thing or want to know more about the code
public class Camera
{
// Those vectors are directions pointing outwards from the camera to define how it rotated
private Vector3 _front = -Vector3.UnitZ;
private Vector3 _up = Vector3.UnitY;
private Vector3 _right = Vector3.UnitX;
// Rotation around the X axis (radians)
private float _pitch;
// Rotation around the Y axis (radians)
private float _yaw = -MathHelper.PiOver2; // Without this you would be started rotated 90 degrees right
// The field of view of the camera (radians)
private float _fov = MathHelper.PiOver2;
public Camera(Vector3 position, float aspectRatio)
{
Position = position;
AspectRatio = aspectRatio;
}
// The position of the camera
public Vector3 Position { get; set; }
// This is simply the aspect ratio of the viewport, used for the projection matrix
public float AspectRatio { private get; set; }
public Vector3 Front => _front;
public Vector3 Up => _up;
public Vector3 Right => _right;
// We convert from degrees to radians as soon as the property is set to improve performance
public float Pitch
{
get => MathHelper.RadiansToDegrees(_pitch);
set
{
// We clamp the pitch value between -89 and 89 to prevent the camera from going upside down, and a bunch
// of weird "bugs" when you are using euler angles for rotation.
// If you want to read more about this you can try researching a topic called gimbal lock
var angle = MathHelper.Clamp(value, -89f, 89f);
_pitch = MathHelper.DegreesToRadians(angle);
UpdateVectors();
}
}
// We convert from degrees to radians as soon as the property is set to improve performance
public float Yaw
{
get => MathHelper.RadiansToDegrees(_yaw);
set
{
_yaw = MathHelper.DegreesToRadians(value);
UpdateVectors();
}
}
// The field of view (FOV) is the vertical angle of the camera view, this has been discussed more in depth in a
// previous tutorial, but in this tutorial you have also learned how we can use this to simulate a zoom feature.
// We convert from degrees to radians as soon as the property is set to improve performance
public float Fov
{
get => MathHelper.RadiansToDegrees(_fov);
set
{
var angle = MathHelper.Clamp(value, 1f, 45f);
_fov = MathHelper.DegreesToRadians(angle);
}
}
// Get the view matrix using the amazing LookAt function described more in depth on the web tutorials
public Matrix4 GetViewMatrix()
{
return Matrix4.LookAt(Position, Position + _front, _up);
}
// Get the projection matrix using the same method we have used up until this point
public Matrix4 GetProjectionMatrix()
{
return Matrix4.CreatePerspectiveFieldOfView(_fov, AspectRatio, 0.01f, 100f);
}
// This function is going to update the direction vertices using some of the math learned in the web tutorials
private void UpdateVectors()
{
// First the front matrix is calculated using some basic trigonometry
_front.X = (float)Math.Cos(_pitch) * (float)Math.Cos(_yaw);
_front.Y = (float)Math.Sin(_pitch);
_front.Z = (float)Math.Cos(_pitch) * (float)Math.Sin(_yaw);
// We need to make sure the vectors are all normalized, as otherwise we would get some funky results
_front = Vector3.Normalize(_front);
// Calculate both the right and the up vector using cross product
// Note that we are calculating the right from the global up, this behaviour might
// not be what you need for all cameras so keep this in mind if you do not want a FPS camera
_right = Vector3.Normalize(Vector3.Cross(_front, Vector3.UnitY));
_up = Vector3.Normalize(Vector3.Cross(_right, _front));
}
}
}
shader.vert
#version 330 core
layout (location = 0) in vec3 aPos;
out vec3 TexCoords;
uniform mat4 projection;
uniform mat4 view;
void main()
{
TexCoords = aPos;
gl_Position = projection * view * vec4(aPos, 1.0);
}
shader.frag
#version 330 core
out vec4 FragColor;
in vec3 texDir;
in vec3 TexCoords;
uniform samplerCube cubeMapArray;
void main()
{
FragColor = texture(cubeMapArray, TexCoords);
}
编辑:
@Rabbid76 - 我已经按照你的建议进行了更新。不幸的是,我仍然遇到黑屏。我或多或少地听从了你的建议。您能否提出其他任何建议,可能与着色器或相机有关?
private void SetupGLControl()
{
if (glControl == null)
{
return;
}
GetImageFaces();
foreach (var item in ImageFaces)
{
Debug.WriteLine(item);
}
glControl.MakeCurrent();
glControl.VSync = true;
glControl.Resize += GlControl_Resize;
glControl.Paint += GlControl_Paint;
shader = new Shader("Shaders/shader.vert", "Shaders/shader.frag");
shader.SetInt("cubeMapArray", 0);
// We initialize the camera so that it is 3 units back from where the rectangle is
// and give it the proper aspect ratio
camera = new Camera(Vector3.UnitZ, glControl.AspectRatio);
cubemap = new TextureCubemap(ImageFaces);
_vertexBufferObject = GL.GenBuffer();
GL.BindBuffer(BufferTarget.ArrayBuffer, _vertexBufferObject);
GL.BufferData(BufferTarget.ArrayBuffer, _vertices.Length * sizeof(float), _vertices, BufferUsageHint.StaticDraw);
_vertexArrayObject = GL.GenVertexArray();
GL.BindVertexArray(_vertexArrayObject);
GL.BindBuffer(BufferTarget.ArrayBuffer, _vertexBufferObject);
var vertexLocation = shader.GetAttribLocation("aPos");
GL.EnableVertexAttribArray(vertexLocation);
GL.VertexAttribPointer(vertexLocation, 3, VertexAttribPointerType.Float, false, 3 * sizeof(float), 0);
}
private void GlControl_Paint(object sender, PaintEventArgs e)
{
GL.Clear(ClearBufferMask.ColorBufferBit | ClearBufferMask.DepthBufferBit);
cubemap.UseCubemap();
shader.Use();
shader.SetMatrix4("view", camera.GetViewMatrix());
shader.SetMatrix4("projection", camera.GetProjectionMatrix());
GL.BindVertexArray(_vertexArrayObject);
GL.DrawArrays(PrimitiveType.Triangles, 0, 36);
glControl.SwapBuffers();
}
最佳答案
当您指定立方体贴图纹理的二维纹理图像时,您指定立方体贴图的单边,目标必须是其中之一TextureCubeMapNegativeX
、TextureCubeMapNegativeY
、TextureCubeMapNegativeZ
、TextureCubeMapPositiveX
、TextureCubeMapPositiveY
或 TextureCubeMapPositiveZ
:
TextureTarget[] targets =
{
TextureTarget.TextureCubeMapNegativeX, TextureTarget.TextureCubeMapNegativeY,
TextureTarget.TextureCubeMapNegativeZ, TextureTarget.TextureCubeMapPositiveX,
TextureTarget.TextureCubeMapPositiveY, TextureTarget.TextureCubeMapPositiveZ
}
for (int i = 0; i < imagePaths.Count; i++)
{
// Load the image
using (var image = new Bitmap(imagePaths[i]))
{
Debug.WriteLine(imagePaths[i]);
var data = image.LockBits(
new Rectangle(0, 0, image.Width, image.Height),
ImageLockMode.ReadOnly,
System.Drawing.Imaging.PixelFormat.Format32bppRgb);
GL.TexImage2D(targets[i], // <------
0,
PixelInternalFormat.Rgb,
image.Width,
image.Height,
0,
PixelFormat.Rgb,
PixelType.UnsignedByte,
data.Scan0);
}
}
缓冲区对象和顶点数组对象在每一帧中创建。在初始化时创建一次对象并在每一帧中使用它。请注意,您不删除对象,GPU 没有“垃圾收集”。执行 Vertex Specification在 MainWindow.SetupGLControl
中。绑定(bind) Vertex Array Object 就足够了在抽签之前:
public partial class MainWindow : Window
{
private TextureCubemap cubemap;
// [...]
private void SetupGLControl()
{
// [...]
cubemap = new TextureCubemap(ImageFaces);
_vertexBufferObject = GL.GenBuffer();
GL.BindBuffer(BufferTarget.ArrayBuffer, _vertexBufferObject);
GL.BufferData(BufferTarget.ArrayBuffer, _vertices.Length * sizeof(float), _vertices, BufferUsageHint.StaticDraw);
_vertexArrayObject = GL.GenVertexArray();
GL.BindVertexArray(_vertexArrayObject);
GL.BindBuffer(BufferTarget.ArrayBuffer, _vertexBufferObject);
var vertexLocation = shader.GetAttribLocation("aPos");
GL.EnableVertexAttribArray(vertexLocation);
GL.VertexAttribPointer(vertexLocation, 3, VertexAttribPointerType.Float, false, 3 * sizeof(float), 0);
//GL.BindBuffer(BufferTarget.ElementArrayBuffer, _elementBufferObject);
// [...]
}
private void GlControl_Paint(object sender, PaintEventArgs e)
{
GL.Clear(ClearBufferMask.ColorBufferBit | ClearBufferMask.DepthBufferBit);
cubemap.UseCubemap();
shader.Use();
shader.SetMatrix4("view", camera.GetViewMatrix());
shader.SetMatrix4("projection", camera.GetProjectionMatrix());
GL.BindVertexArray(_vertexArrayObject); // <------ bind vertex array object
GL.DrawArrays(PrimitiveType.Triangles, 0, 36);
glControl.SwapBuffers();
}
// [...]
}
``´
关于c# - OpenTK 中的立方体贴图,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/56680252/
这是顶点着色器: uniform mat4 projection; uniform mat4 view; uniform mat4 model; void main(void) { gl_Po
我有一个问题,我是需要二次幂 (POT) 纹理并且似乎无法使用 NPOT 的幸运者之一。 通常,这不会让我太烦恼,因为我只会确保我需要使用的任何纹理都是 POT。但是,我在尝试将光标 X 和 Y 位置
我最近在我的游戏中实现了视锥体剔除,并且为了从渲染周期中挤出最后一滴,我还决定实现遮挡剔除。它工作得很好,但是,我很痛苦地发现,如果我不看游戏中的任何东西(如果我向虚空看,远离我的游戏对象),我的显卡
我正在寻找一种用颜色“填充”三维几何体的方法,并且很可能在稍后的某个时间填充纹理。 假设你可以将头撞到混凝土墙上,从逻辑上讲你只会看到黑暗。然而,在 OpenGL 中,当您执行此操作时,由于剔除以及几
警告:总的 OpenGL/TK 新手,请善待。我可能咬得太多了。 规范: 2017 年 Visual Studio 社区 C# .Net 4.6.1(使用 WPF、WindowsFormsHost 控
我正在寻找一种跨平台(Win 和 MacOS)方法来检测 C# 中 OpenGL 应用程序的按键。 以下有效,但仅适用于字母数字字符。 protected override void OnKeyPre
我在 http://viewport3d.com/trackball.htm 上使用教程编程轨迹球。我计算了轴和角度,但因为我只是 OpenTK 和 OpenGL 的初学者,所以我不知道如何将旋转应用
我有两张图片。我必须找到第一张图像中强度大于 0.8 的点。同时,我必须在相同点上找到第二张图像的强度,并且需要使用阈值/ slider 值(范围从 0 到 1)调整相同点上第二张图像上的光线。我做了
有没有人知道如何使用 OpenTK 框架滚动(动画化)2D 文本,或者在哪里可以找到一些好的示例。我需要新闻电视 channel 底部(顶部)滚动代码之类的东西。 谢谢。 最佳答案 有许多可能的文本渲
尝试制作游戏,但纹理从未出现在屏幕上。 只为四边形使用颜色是可行的,但是当我尝试为四边形添加纹理时,它只是显示为黑色方 block 。 基本片段着色器: #version 300 es precisi
有没有办法在另一个线程中使用 OpenTK/OpenGL 绘制到屏幕上?我尝试使用的代码是 GL.Clear(ClearBufferMask.ColorBufferBit); GL.ClearColo
它是 tao 框架中已弃用的 OpenTK.Graphics.Glu 的替代品吗?尤其是对几何渲染的需求。 最佳答案 OpenTK 是 Tao 的演变。而且,老实说,我没有听说过 glut 被贬低了。
我是 opengl 的新手,我正在努力让 stencilbuffer 为一个简单的案例工作;我有两种纹理,一种是位图,另一种是“ mask ”,我试图用它来隐藏位图中的某些部分。 我似乎无法让它工作,
我正在尝试在我的 OpenTK 项目中旋转 2D 对象。 发生的事情是旋转对象之后的对象正在旋转,我不希望它们这样做。 我已经旋转了一个对象后如何重置旋转? protected overr
我正在为 OpenGL 和 C# 使用 OpenTK。 我需要使用模板缓冲区,目前想知道如何设置 StencilBuffer 深度。 我之前知道的是 TaoFramework,它的控件具有可以设置此深
我在进行视锥体剔除的方式上遇到了一些问题。当前的方法确实解决了剔除问题,但效果确实很奇怪。当我离我的主要父对象太近时,我正在使用场景图来渲染所有内容,对象开始非常快速地闪烁。当我离开时,它就消失了。我
我正在使用 OpenTK 和 C#,我在 3D 空间中定义了一个平面,如下所示: GL.Begin(BeginMode.Quads); GL.Color3(Color.Magenta); GL.Ver
我不确定为什么这段代码不是简单地在屏幕上绘制一个三角形(正交)。我正在使用与 OpenGL 1.1 相同的 OpenTK 1.1。 List simpleVertices = new List();
我正在尝试找到一种方法来使窗口始终位于顶部。 (必须是窗口模式)我目前正在使用 OpenTk.NetCore 库在 .net 核心中制作一个窗口。是否可以使用 OpenTk 将此窗口保持在顶部或是否有
我目前正在开发一款使用 OpenTK 进行渲染的游戏; C#、.Net 4.0、OpenGL 2。 在 1280x1024 分辨率下最大化运行(不是全屏,而是占用所有可用的屏幕空间),我看到平均帧率为
我是一名优秀的程序员,十分优秀!