gpt4 book ai didi

opengl - 将计算着色器输出渲染到屏幕

转载 作者:行者123 更新时间:2023-11-29 08:07:03 28 4
gpt4 key购买 nike

我正在尝试设置一个管道来进行一些光线渲染。

首先,我设置了一个顶点着色器和一个几何着色器,仅采用 1 个任意 float ,并制作了一个四边形,这样我就可以仅使用片段着色器,并通过将数据传递给所有着色器或制服来输入数据。

但后来我遇到了计算着色器,并找到了一些教程,但几乎都是一样的,制作一个四边形并渲染计算着色器输出到它,我认为这很愚蠢,如果你有可能渲染如何您想要使用计算着色器并仍然使用技巧来获得结果。

经过进一步研究,我发现了函数“glFramebufferImage2D”,据我所知,它将一个 ImageTexture(在我的例子中是我用我的计算着色器写入的那个)附加到帧缓冲区(显示的缓冲区据我了解)。所以我不必做四边形生成技巧。但是我写的代码只是显示黑屏。我弄错了什么吗?还是我遗漏了代码中的某些内容?

这是我的代码:(我还不担心警告和分离着色器程序。我现在只想测试这个概念。)

主要.rs

extern crate sdl2;
extern crate gl;

use sdl2::pixels::Color;
use std::ffi::{CStr, CString};


fn main() {

let width = 512;
let height = 512;

let sdl = sdl2::init().unwrap();
let video_sys = sdl.video().unwrap();

let gl_attr = video_sys.gl_attr();
gl_attr.set_context_profile(sdl2::video::GLProfile::Core);
gl_attr.set_context_version(4, 1);

let window = video_sys.window("test", width, height).opengl().build().unwrap();
let gl_ctx = window.gl_create_context().unwrap();

let gl = gl::load_with(|s| video_sys.gl_get_proc_address(s) as *const std::os::raw::c_void);


unsafe {
gl::Viewport(0, 0, width as gl::types::GLsizei, height as gl::types::GLsizei);
gl::ClearColor(0.0, 0.0, 0.0, 1.0);
}

let shader = unsafe { gl::CreateShader(gl::COMPUTE_SHADER) };
unsafe {
gl::ShaderSource(shader, 1, &CString::new(include_str!("screen.comp")).unwrap().as_ptr(), std::ptr::null());
gl::CompileShader(shader);
}

let program = unsafe { gl::CreateProgram() };
unsafe {
gl::AttachShader(program, shader);
gl::LinkProgram(program);
gl::UseProgram(program);
}

let mut tex_out : gl::types::GLuint = 0;
unsafe {
gl::GenTextures(1, &mut tex_out);
gl::ActiveTexture(gl::TEXTURE0);
gl::BindTexture(gl::TEXTURE_2D, tex_out);
gl::TexImage2D(
gl::TEXTURE_2D,
0,
gl::RGBA32F as gl::types::GLint,
width as gl::types::GLsizei,
height as gl::types::GLsizei,
0,
gl::RGBA,
gl::FLOAT,
std::ptr::null()
);
gl::BindImageTexture(0, tex_out, 0, gl::FALSE, 0, gl::WRITE_ONLY, gl::RGBA32F);
}


let mut event_pump = sdl.event_pump().unwrap();

'main: loop {

for event in event_pump.poll_iter() {
match event {
sdl2::event::Event::Quit {..} => break 'main,
_ => {},
}
}

unsafe {
gl::DispatchCompute(width as gl::types::GLuint, height as gl::types::GLuint, 1);
gl::MemoryBarrier(gl::SHADER_IMAGE_ACCESS_BARRIER_BIT);
gl::FramebufferTexture2D(gl::FRAMEBUFFER, gl::COLOR_ATTACHMENT0, gl::TEXTURE_2D, tex_out, 0);
}

window.gl_swap_window();

}

}

屏幕补偿

#version 430
layout(local_size_x = 1, local_size_y = 1) in;
layout(rgba32f, binding = 0) uniform image2D img_output;

void main() {

vec4 pixel = vec4(1.0, 1.0, 1.0, 1.0);

ivec2 pixel_coords = ivec2(gl_GlobalInvocationID.xy);

imageStore(img_output, pixel_coords, pixel);

}

编辑:工作代码:(有一些试色)

主要.rs:

extern crate sdl2;
extern crate gl;

use std::ffi::{CStr, CString};


struct Renderer {
width : u32 ,
height : u32 ,
shader : gl::types::GLuint ,
program : gl::types::GLuint ,
}

impl Renderer {

fn new(width : u32, height : u32, shader_name : &CStr) -> Self {

unsafe {
gl::Viewport(0, 0, width as gl::types::GLsizei, height as gl::types::GLsizei);
gl::ClearColor(0.0, 0.0, 0.0, 1.0);
}

let shader = unsafe { gl::CreateShader(gl::COMPUTE_SHADER) };
unsafe {
gl::ShaderSource(shader, 1, &shader_name.as_ptr(), std::ptr::null());
gl::CompileShader(shader);
}

let program = unsafe { gl::CreateProgram() };
unsafe {
gl::AttachShader(program, shader);
gl::LinkProgram(program);
gl::UseProgram(program);
}

let mut tex_out : gl::types::GLuint = 0;
unsafe {
gl::GenTextures(1, &mut tex_out);
gl::ActiveTexture(gl::TEXTURE0);
gl::BindTexture(gl::TEXTURE_2D, tex_out);
gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_MIN_FILTER, gl::LINEAR as gl::types::GLint);
gl::TexImage2D(
gl::TEXTURE_2D,
0,
gl::RGBA32F as gl::types::GLint,
width as gl::types::GLsizei,
height as gl::types::GLsizei,
0,
gl::RGBA,
gl::FLOAT,
std::ptr::null()
);
gl::BindImageTexture(0, tex_out, 0, gl::FALSE, 0, gl::WRITE_ONLY, gl::RGBA32F);
}

let mut fbo : gl::types::GLuint = 0;
unsafe {
gl::GenFramebuffers(1, &mut fbo );
gl::BindFramebuffer(gl::FRAMEBUFFER, fbo );
gl::FramebufferTexture2D(gl::FRAMEBUFFER, gl::COLOR_ATTACHMENT0, gl::TEXTURE_2D, tex_out, 0);
gl::BindFramebuffer(gl::READ_FRAMEBUFFER, fbo);
gl::BindFramebuffer(gl::DRAW_FRAMEBUFFER, 0);
}

let resolution = unsafe { gl::GetUniformLocation(program, CString::new("iResolution").unwrap().as_ptr()) };
unsafe { gl::Uniform2i(resolution, width as gl::types::GLint, height as gl::types::GLint); }

Self {
width : width ,
height : height ,
shader : shader ,
program : program ,
}

}

fn get_input(&self, name : &str) -> gl::types::GLint {
unsafe { gl::GetUniformLocation(self.program, CString::new(name).unwrap().as_ptr()) }
}

fn input1f(&self, ptr : gl::types::GLint, f : gl::types::GLfloat) {
unsafe { gl::Uniform1f(ptr, f) };
}

fn draw(&self) {

unsafe {
gl::DispatchCompute(self.width as gl::types::GLuint, self.height as gl::types::GLuint, 1);
gl::MemoryBarrier(gl::SHADER_IMAGE_ACCESS_BARRIER_BIT);
gl::BlitFramebuffer(0, 0, self.width as gl::types::GLint, self.height as gl::types::GLint, 0, 0, self.width as gl::types::GLint, self.height as gl::types::GLint, gl::COLOR_BUFFER_BIT, gl::LINEAR);
}

}

}

impl Drop for Renderer {

fn drop(&mut self) {
unsafe {
gl::DeleteShader(self.shader);
gl::DeleteProgram(self.program);
}
}

}


fn main() {

let width = 512;
let height = 512;

let sdl = sdl2::init().unwrap();
let video_sys = sdl.video().unwrap();

let gl_attr = video_sys.gl_attr();
gl_attr.set_context_profile(sdl2::video::GLProfile::Core);
gl_attr.set_context_version(4, 1);

let window = video_sys.window("test", width, height).opengl().build().unwrap();
let _gl_ctx = window.gl_create_context().unwrap();

let _gl = gl::load_with(|s| video_sys.gl_get_proc_address(s) as *const std::os::raw::c_void);

let render = Renderer::new(width, height, &CString::new(include_str!("screen.comp")).unwrap());
let time_attr = render.get_input("time");

let mut event_pump = sdl.event_pump().unwrap();

let mut time = 0.0;

'main: loop {

for event in event_pump.poll_iter() {
match event {
sdl2::event::Event::Quit {..} => break 'main,
_ => {},
}
}

time += 0.01;
if time > 1.0 {
time = 0.0;
}

render.input1f(time_attr, time);
render.draw();
window.gl_swap_window();

}

}

屏幕补偿:

#version 430
layout(local_size_x = 1, local_size_y = 1) in;
layout(rgba32f, binding = 0) uniform image2D img;
uniform ivec2 iResolution;

uniform float time;

void main() {

ivec2 iCoords = ivec2(gl_GlobalInvocationID.xy);

vec2 uv = vec2(iCoords) / vec2(iResolution);

vec4 pixel = vec4(uv, time, 1.0);

imageStore(img, iCoords, pixel);

}

最佳答案

生成纹理时出现问题。 TEXTURE_MIN_FILTER 的初始值为 NEAREST_MIPMAP_LINEAR。如果您不更改它并且不创建 mipmap,则纹理不“完整”。
TEXTURE_MIN_FILTER 设置为 NEARESTLINEAR 以解决问题:

gl::BindTexture(gl::TEXTURE_2D, tex_out);
gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_MIN_FILTER, gl::LINEAR)

您已经将图像从计算着色器渲染到纹理对象,但为了将该纹理 blit 到屏幕,您必须将该纹理图像复制到默认帧缓冲区的颜色平面。不能简单地通过附加纹理来更改默认帧缓冲区的颜色平面。

相反,您需要创建一个名为 Framebuffer Object 的(glGenFramebuffersglBindFramebuffer)并将纹理对象附加到帧缓冲区的颜色平面(glFramebufferTexture2D)。

let mut fbo : gl::types::GLuint = 0;
gl::GenFramebuffers(1, &mut fbo);
gl::BindFramebuffer(gl::FRAMEBUFFER, fbo);
gl::FramebufferTexture2D(gl::FRAMEBUFFER, gl::COLOR_ATTACHMENT0, gl::TEXTURE_2D, tex_out, 0)

将此帧缓冲区绑定(bind)到目标 GL_READ_FRAMEBUFFER ( glBindFramebuffer ) 并将默认帧缓冲区(零)绑定(bind)到目标 GL_DRAW_FRAMEBUFFER

gl::BindFramebuffer(gl::READ_FRAMEBUFFER, fbo);
gl::BindFramebuffer(gl::DRAW_FRAMEBUFFER, 0);

最后,在渲染循环中,使用 glBlitFramebuffer将命名帧缓冲区对象(纹理)的内容复制到默认帧缓冲区的颜色平面。

gl::DispatchCompute(width as gl::types::GLuint, height as gl::types::GLuint, 1);
gl::MemoryBarrier(gl::SHADER_IMAGE_ACCESS_BARRIER_BIT);
gl::BlitFramebuffer(
0, 0, width as gl::types::GLsizei, height as gl::types::GLsizei,
0, 0, width as gl::types::GLsizei, height as gl::types::GLsizei,
gl::COLOR_BUFFER_BIT, gl::LINEAR);

请注意,通过使用 glBlitFramebuffer,视口(viewport)的大小可以与纹理图像的大小不同。
如果纹理的格式是完整的,则此方法将不起作用。之所以可行,是因为纹理图像的格式是 float ,而默认帧缓冲区的格式是定点数。

关于opengl - 将计算着色器输出渲染到屏幕,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/58042393/

28 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com