gpt4 book ai didi

javascript - WebGL绘制带深度图的2D图像实现伪3D效果

转载 作者:行者123 更新时间:2023-11-29 16:05:22 27 4
gpt4 key购买 nike

我正在学习 WebGL,这是在 WebGLFundamentals 页面的帮助下完成的,它帮助我了解了缓冲区、着色器和所有这些东西的工作原理。但是现在我想达到我在这里看到的某种效果:https://tympanus.net/Tutorials/HeatDistortionEffect/index3.html 我知道如何制作热变形效果,我想要实现的效果是图像上的深度。这个演示有一个教程,但它并没有真正解释如何去做,它说我必须有一个灰度图,其中白色部分是最近的,黑色部分是最远的。但我真的无法理解它是如何工作的,这是我的着色器代码:

var vertexShaderText = [
"attribute vec2 a_position;",
"attribute vec2 a_texCoord;",
"uniform vec2 u_resolution;",
"varying vec2 v_texCoord;",
"void main() {",
" vec2 zeroToOne = a_position / u_resolution;",
" vec2 zeroToTwo = zeroToOne * 2.0;",
" vec2 clipSpace = zeroToTwo - 1.0;",
" gl_Position = vec4(clipSpace * vec2(1, -1), 0, 1);",
" v_texCoord = a_texCoord;",
"}"
].join("\n")

var fragShaderText = [
"precision mediump float;",
"uniform sampler2D u_image;",
"uniform sampler2D u_depthMap;",
"uniform vec2 mouse;",
"varying vec2 v_texCoord;",
"void main() {",
" float frequency=100.0;",
" float amplitude=0.010;",
" float distortion=sin(v_texCoord.y*frequency)*amplitude;",
" float map=texture2D(u_depthMap,v_texCoord).r;",
" vec4 color=texture2D(u_image,vec2(v_texCoord.x+distortion*map, v_texCoord.y));",
" gl_FragColor = color;",
"}"
].join("\n")

我想要的是当我移动鼠标时,图像会响应着色器进行扭曲,就像我在上面显示的链接中那样。但我真的不知道如何在 javascript 部分做到这一点。谢谢

最佳答案

image processing tutorials 之后来自同一个站点显示了如何加载多个图像。您链接到的示例和 the tutorial非常清楚它是如何工作的

首先你需要原始图像

original image

然后应用正弦波失真。

"use strict";

function main() {
// Get A WebGL context
/** @type {HTMLCanvasElement} */
const canvas = document.getElementById("canvas");
const gl = canvas.getContext("webgl");
if (!gl) {
return;
}

let originalImage = { width: 1, height: 1 }; // replaced after loading
const originalTexture = twgl.createTexture(gl, {
src: "https://i.imgur.com/xKYRSwu.jpg", crossOrigin: '',
}, (err, texture, source) => {
originalImage = source;
});

// compile shaders, link program, lookup location
const programInfo = twgl.createProgramInfo(gl, ["vs", "fs"]);

// calls gl.createBuffer, gl.bindBuffer, gl.bufferData for a quad
const bufferInfo = twgl.primitives.createXYQuadBufferInfo(gl);

requestAnimationFrame(render);

function render(time) {
time *= 0.001; // seconds

twgl.resizeCanvasToDisplaySize(gl.canvas);

gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);

gl.clearColor(0, 0, 0, 0);
gl.clear(gl.COLOR_BUFFER_BIT);

gl.useProgram(programInfo.program);

// calls gl.bindBuffer, gl.enableVertexAttribArray, gl.vertexAttribPointer
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);

const canvasAspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const imageAspect = originalImage.width / originalImage.height;
const mat = m3.scaling(imageAspect / canvasAspect, -1);

// calls gl.activeTexture, gl.bindTexture, gl.uniformXXX
twgl.setUniforms(programInfo, {
u_matrix: mat,
u_originalImage: originalTexture,
u_distortionAmount: 0.003, // .3%
u_distortionRange: 100,
u_time: time * 10,
});

// calls gl.drawArrays or gl.drawElements
twgl.drawBufferInfo(gl, bufferInfo);

requestAnimationFrame(render);
}
}

main();
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<script src="https://twgljs.org/dist/3.x/twgl-full.min.js"></script>
<canvas id="canvas"></canvas>

<!-- vertex shader -->
<script id="vs" type="f">
attribute vec2 position;
attribute vec2 texcoord;

uniform mat3 u_matrix;

varying vec2 v_texcoord;

void main() {
gl_Position = vec4((u_matrix * vec3(position, 1)).xy, 0, 1);
v_texcoord = texcoord;
}
</script>
<!-- fragment shader -->
<script id="fs" type="x-shader/x-fragment">
precision mediump float;

uniform float u_time;
uniform float u_distortionAmount;
uniform float u_distortionRange;

// our textures
uniform sampler2D u_originalImage;

// the texcoords passed in from the vertex shader.
varying vec2 v_texcoord;

void main() {
vec2 distortion = vec2(
sin(u_time + v_texcoord.y * u_distortionRange), 0) * u_distortionAmount;
vec4 original = texture2D(u_originalImage, v_texcoord + distortion);
gl_FragColor = original;
}
</script>
<script src="https://webglfundamentals.org/webgl/resources/m3.js"></script>

然后他们还加载了多个贴图的纹理。这个纹理是在 photoshop(或其他图像编辑程序)中手工创建的。绿色 channel 是将失真乘以多少。越绿失真越大。

"use strict";

function main() {
// Get A WebGL context
/** @type {HTMLCanvasElement} */
const canvas = document.getElementById("canvas");
const gl = canvas.getContext("webgl");
if (!gl) {
return;
}

let originalImage = { width: 1, height: 1 }; // replaced after loading
const originalTexture = twgl.createTexture(gl, {
src: "https://i.imgur.com/xKYRSwu.jpg",
crossOrigin: '',
}, (err, texture, source) => {
originalImage = source;
});

const mapTexture = twgl.createTexture(gl, {
src: "https://i.imgur.com/W9QazjL.jpg", crossOrigin: '',
});

// compile shaders, link program, lookup location
const programInfo = twgl.createProgramInfo(gl, ["vs", "fs"]);

// calls gl.createBuffer, gl.bindBuffer, gl.bufferData for a quad
const bufferInfo = twgl.primitives.createXYQuadBufferInfo(gl);

requestAnimationFrame(render);

function render(time) {
time *= 0.001; // seconds

twgl.resizeCanvasToDisplaySize(gl.canvas);

gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);

gl.clearColor(0, 0, 0, 0);
gl.clear(gl.COLOR_BUFFER_BIT);

gl.useProgram(programInfo.program);

// calls gl.bindBuffer, gl.enableVertexAttribArray, gl.vertexAttribPointer
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);

const canvasAspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const imageAspect = originalImage.width / originalImage.height;
const mat = m3.scaling(imageAspect / canvasAspect, -1);

// calls gl.activeTexture, gl.bindTexture, gl.uniformXXX
twgl.setUniforms(programInfo, {
u_matrix: mat,
u_originalImage: originalTexture,
u_mapImage: mapTexture,
u_distortionAmount: 0.003, // .3%
u_distortionRange: 100,
u_time: time * 10,
});

// calls gl.drawArrays or gl.drawElements
twgl.drawBufferInfo(gl, bufferInfo);

requestAnimationFrame(render);
}
}

main();
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<canvas id="canvas"></canvas>

<script id="vs" type="f">
attribute vec2 position;
attribute vec2 texcoord;

uniform mat3 u_matrix;

varying vec2 v_texcoord;

void main() {
gl_Position = vec4(u_matrix * vec3(position, 1), 1);
v_texcoord = texcoord;
}
</script>
<script id="fs" type="f">
precision mediump float;

uniform float u_time;
uniform float u_distortionAmount;
uniform float u_distortionRange;

// our textures
uniform sampler2D u_originalImage;
uniform sampler2D u_mapImage;

// the texcoords passed in from the vertex shader.
varying vec2 v_texcoord;

void main() {
vec4 depthDistortion = texture2D(u_mapImage, v_texcoord);
float distortionMult = depthDistortion.g; // just green channel

vec2 distortion = vec2(
sin(u_time + v_texcoord.y * u_distortionRange), 0) * u_distortionAmount;
vec4 color0 = texture2D(u_originalImage, v_texcoord + distortion * distortionMult);
gl_FragColor = color0;
}
</script>
<script src="https://twgljs.org/dist/3.x/twgl-full.min.js"></script>
<script src="https://webglfundamentals.org/webgl/resources/m3.js"></script>

接下来是鼠标的偏移量乘以另一张手绘 map 。此贴图是上图的红色 channel ,红色越多,应用的鼠标偏移量就越大。 map 有点代表深度。由于我们需要前面的东西与后面的东西相反,我们需要在着色器中将该 channel 从 0 转换为 1 到 -.5 到 +.5

"use strict";

function main() {
// Get A WebGL context
/** @type {HTMLCanvasElement} */
const canvas = document.getElementById("canvas");
const gl = canvas.getContext("webgl");
if (!gl) {
return;
}

let originalImage = { width: 1, height: 1 }; // replaced after loading
const originalTexture = twgl.createTexture(gl, {
src: "https://i.imgur.com/xKYRSwu.jpg",
crossOrigin: '',
}, (err, texture, source) => {
originalImage = source;
});

const mapTexture = twgl.createTexture(gl, {
src: "https://i.imgur.com/W9QazjL.jpg", crossOrigin: '',
});

// compile shaders, link program, lookup location
const programInfo = twgl.createProgramInfo(gl, ["vs", "fs"]);

// calls gl.createBuffer, gl.bindBuffer, gl.bufferData for a quad
const bufferInfo = twgl.primitives.createXYQuadBufferInfo(gl);

const mouse = [0, 0];
document.addEventListener('mousemove', (event) => {
mouse[0] = (event.clientX / gl.canvas.clientWidth * 2 - 1) * 0.05;
mouse[1] = (event.clientY / gl.canvas.clientHeight * 2 - 1) * 0.05;
});

requestAnimationFrame(render);

function render(time) {
time *= 0.001; // seconds

twgl.resizeCanvasToDisplaySize(gl.canvas);

gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);

gl.clearColor(0, 0, 0, 0);
gl.clear(gl.COLOR_BUFFER_BIT);

gl.useProgram(programInfo.program);

// calls gl.bindBuffer, gl.enableVertexAttribArray, gl.vertexAttribPointer
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);

const canvasAspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const imageAspect = originalImage.width / originalImage.height;
const mat = m3.scaling(imageAspect / canvasAspect, -1);

// calls gl.activeTexture, gl.bindTexture, gl.uniformXXX
twgl.setUniforms(programInfo, {
u_matrix: mat,
u_originalImage: originalTexture,
u_mapImage: mapTexture,
u_distortionAmount: 0.003, // .3%
u_distortionRange: 100,
u_time: time * 10,
u_mouse: mouse,
});

// calls gl.drawArrays or gl.drawElements
twgl.drawBufferInfo(gl, bufferInfo);

requestAnimationFrame(render);
}
}

main();
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<canvas id="canvas"></canvas>

<!-- vertex shader -->
<script id="vs" type="f">
attribute vec2 position;
attribute vec2 texcoord;

uniform mat3 u_matrix;

varying vec2 v_texcoord;

void main() {
gl_Position = vec4(u_matrix * vec3(position, 1), 1);
v_texcoord = texcoord;
}
</script>
<!-- fragment shader -->
<script id="fs" type="f">
precision mediump float;

uniform float u_time;
uniform float u_distortionAmount;
uniform float u_distortionRange;
uniform vec2 u_mouse;

// our textures
uniform sampler2D u_originalImage;
uniform sampler2D u_mapImage;

// the texcoords passed in from the vertex shader.
varying vec2 v_texcoord;

void main() {
vec4 depthDistortion = texture2D(u_mapImage, v_texcoord);
float distortionMult = depthDistortion.g; // just green channel
float parallaxMult = 0.5 - depthDistortion.r; // just red channel

vec2 distortion = vec2(
sin(u_time + v_texcoord.y * u_distortionRange), 0) * u_distortionAmount * distortionMult;
vec2 parallax = u_mouse * parallaxMult;

vec4 color0 = texture2D(u_originalImage, v_texcoord + distortion + parallax);
gl_FragColor = color0;
}
</script>
<script src="https://twgljs.org/dist/3.x/twgl-full.min.js"></script>
<script src="https://webglfundamentals.org/webgl/resources/m3.js"></script>

最后,(不是在教程中,而是在示例中)它加载了原始图像的模糊版本(在一些图像编辑程序如 photoshop 中模糊)

可能很难看出它是模糊的,因为模糊很微妙。

样本然后使用模糊的图像,失真越多

"use strict";

function main() {
// Get A WebGL context
/** @type {HTMLCanvasElement} */
const canvas = document.getElementById("canvas");
const gl = canvas.getContext("webgl");
if (!gl) {
return;
}

let originalImage = { width: 1, height: 1 }; // replaced after loading
const originalTexture = twgl.createTexture(gl, {
src: "https://i.imgur.com/xKYRSwu.jpg",
crossOrigin: '',
}, (err, texture, source) => {
originalImage = source;
});

const mapTexture = twgl.createTexture(gl, {
src: "https://i.imgur.com/W9QazjL.jpg", crossOrigin: '',
});

const blurredTexture = twgl.createTexture(gl, {
src: "https://i.imgur.com/Zw7mMLX.jpg", crossOrigin: '',
});

// compile shaders, link program, lookup location
const programInfo = twgl.createProgramInfo(gl, ["vs", "fs"]);

// calls gl.createBuffer, gl.bindBuffer, gl.bufferData for a quad
const bufferInfo = twgl.primitives.createXYQuadBufferInfo(gl);

const mouse = [0, 0];
document.addEventListener('mousemove', (event) => {
mouse[0] = (event.clientX / gl.canvas.clientWidth * 2 - 1) * 0.05;
mouse[1] = (event.clientY / gl.canvas.clientHeight * 2 - 1) * 0.05;
});

requestAnimationFrame(render);

function render(time) {
time *= 0.001; // seconds

twgl.resizeCanvasToDisplaySize(gl.canvas);

gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);

gl.clearColor(0, 0, 0, 0);
gl.clear(gl.COLOR_BUFFER_BIT);

gl.useProgram(programInfo.program);

// calls gl.bindBuffer, gl.enableVertexAttribArray, gl.vertexAttribPointer
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);

const canvasAspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const imageAspect = originalImage.width / originalImage.height;
const mat = m3.scaling(imageAspect / canvasAspect, -1);

// calls gl.activeTexture, gl.bindTexture, gl.uniformXXX
twgl.setUniforms(programInfo, {
u_matrix: mat,
u_originalImage: originalTexture,
u_mapImage: mapTexture,
u_blurredImage: blurredTexture,
u_distortionAmount: 0.003, // .3%
u_distortionRange: 100,
u_time: time * 10,
u_mouse: mouse,
});

// calls gl.drawArrays or gl.drawElements
twgl.drawBufferInfo(gl, bufferInfo);

requestAnimationFrame(render);
}
}

main();
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<canvas id="canvas"></canvas>

<!-- vertex shader -->
<script id="vs" type="f">
attribute vec2 position;
attribute vec2 texcoord;

uniform mat3 u_matrix;

varying vec2 v_texcoord;

void main() {
gl_Position = vec4(u_matrix * vec3(position, 1), 1);
v_texcoord = texcoord;
}
</script>
<!-- fragment shader -->
<script id="fs" type="f">
precision mediump float;

uniform float u_time;
uniform float u_distortionAmount;
uniform float u_distortionRange;
uniform vec2 u_mouse;

// our textures
uniform sampler2D u_originalImage;
uniform sampler2D u_blurredImage;
uniform sampler2D u_mapImage;

// the texcoords passed in from the vertex shader.
varying vec2 v_texcoord;

void main() {
vec4 depthDistortion = texture2D(u_mapImage, v_texcoord);
float distortionMult = depthDistortion.g; // just green channel
float parallaxMult = 0.5 - depthDistortion.r; // just red channel

vec2 distortion = vec2(
sin(u_time + v_texcoord.y * u_distortionRange), 0) * u_distortionAmount * distortionMult;
vec2 parallax = u_mouse * parallaxMult;

vec2 uv = v_texcoord + distortion + parallax;
vec4 original = texture2D(u_originalImage, uv);
vec4 blurred = texture2D(u_blurredImage, uv);
gl_FragColor = mix(original, blurred, length(distortion) / u_distortionAmount);
}
</script>
<script src="https://twgljs.org/dist/3.x/twgl-full.min.js"></script>
<script src="https://webglfundamentals.org/webgl/resources/m3.js"></script>

最后一个很大的区别是,该样本上的着色器不是使用简单的正弦波进行失真,而是计算稍微复杂一些的东西。

封面

上面的代码使用了一个 2 单元的四边形,它在 X 和 Y 中从 -1 到 +1。如果你传入一个恒等矩阵(或一个 1,1 比例矩阵,这是相同的东西),它会覆盖 Canvas .相反,我们希望图像不失真。为此,我们有这段代码

const canvasAspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const imageAspect = originalImage.width / originalImage.height;
const mat = m3.scaling(imageAspect / canvasAspect, -1);

这实际上是说让它垂直填充 Canvas ,并根据需要缩放它以匹配原始图像的纵横比。 -1 是翻转四边形,否则图像会上下颠倒。

要实现 cover,我们只需要检查比例是否 < 1。如果是,它不会填满 Canvas ,所以我们将水平比例设置为 1 并调整垂直比例

// this assumes we want to fill vertically
let horizontalDrawAspect = imageAspect / canvasAspect;
let verticalDrawAspect = -1;
// does it fill horizontally?
if (horizontalDrawAspect < 1) {
// no it does not so scale so we fill horizontally and
// adjust vertical to match
verticalDrawAspect /= horizontalDrawAspect;
horizontalDrawAspect = 1;
}
const mat = m3.scaling(horizontalDrawAspect, verticalDrawAspect);

关于javascript - WebGL绘制带深度图的2D图像实现伪3D效果,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/44372487/

27 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com