gpt4 book ai didi

textures - 如何在 WebGL 中将不同的纹理映射到立方体的不同面?

转载 作者:行者123 更新时间:2023-12-02 17:27:57 25 4
gpt4 key购买 nike

我有以下立方体坐标:

var vertices = [
// Front face
-1.0, -1.0, 1.0,
1.0, -1.0, 1.0,
1.0, 1.0, 1.0,
-1.0, 1.0, 1.0,

// Back face
-1.0, -1.0, -1.0,
-1.0, 1.0, -1.0,
1.0, 1.0, -1.0,
1.0, -1.0, -1.0,

// Top face
-1.0, 1.0, -1.0,
-1.0, 1.0, 1.0,
1.0, 1.0, 1.0,
1.0, 1.0, -1.0,

// Bottom face
-1.0, -1.0, -1.0,
1.0, -1.0, -1.0,
1.0, -1.0, 1.0,
-1.0, -1.0, 1.0,

// Right face
1.0, -1.0, -1.0,
1.0, 1.0, -1.0,
1.0, 1.0, 1.0,
1.0, -1.0, 1.0,

// Left face
-1.0, -1.0, -1.0,
-1.0, -1.0, 1.0,
-1.0, 1.0, 1.0,
-1.0, 1.0, -1.0
];

这些是纹理坐标:

var textureCoordinates = [
// Front
0.0, 0.0,
1.0, 0.0,
1.0, 1.0,
0.0, 1.0,

// Back
0.0, 0.0,
1.0, 0.0,
1.0, 1.0,
0.0, 1.0,

// Top
0.0, 0.0,
1.0, 0.0,
1.0, 1.0,
0.0, 1.0,

// Bottom
0.0, 0.0,
1.0, 0.0,
1.0, 1.0,
0.0, 1.0,

// Right
0.0, 0.0,
1.0, 0.0,
1.0, 1.0,
0.0, 1.0,

// Left
0.0, 0.0,
1.0, 0.0,
1.0, 1.0,
0.0, 1.0
];

现在,通过下面的代码,它只加载一张纹理图像。

gl.activeTexture(gl.TEXTURE0);
gl.bindTexture(gl.TEXTURE_2D, cubeTexture);
gl.uniform1i(gl.getUniformLocation(shaderProgram, "uSampler"), 0);

我从 MDN web docs 得到了这段代码示例我想知道我应该做哪些修改,以便可以为每个立方体面设置不同的纹理。我已经阅读过有关更改事件纹理的信息,但我不知道如何进行更改以使其不会影响整个立方体。我是 WebGL 的新手,我希望有人能帮助我理解这一点。

最佳答案

可以说,将不同图像放在立方体上的最佳方法是将所有图像放入一个纹理中,并使用 UV 坐标为立方体的每个面选择纹理的不同部分。

See example on this page with lots of explanation

或者这里

"use strict";
var m4 = twgl.m4;
var gl = document.getElementById("c").getContext("webgl");
// compiles shader, links and looks up locations
var programInfo = twgl.createProgramInfo(gl, ["vs", "fs"]);

var arrays = {
position: [1, 1, -1, 1, 1, 1, 1, -1, 1, 1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, -1],
texcoord: [
// select the bottom left image
0 , 0 ,
0 , 0.5,
0.25, 0.5,
0.25, 0 ,
// select the bottom middle image
0.25, 0 ,
0.5 , 0 ,
0.5 , 0.5,
0.25, 0.5,
// select to bottom right image
0.5 , 0 ,
0.5 , 0.5,
0.75, 0.5,
0.75, 0 ,
// select the top left image
0 , 0.5,
0.25, 0.5,
0.25, 1 ,
0 , 1 ,
// select the top middle image
0.25, 0.5,
0.25, 1 ,
0.5 , 1 ,
0.5 , 0.5,
// select the top right image
0.5 , 0.5,
0.75, 0.5,
0.75, 1 ,
0.5 , 1 ,
],
indices: [0, 1, 2, 0, 2, 3, 4, 5, 6, 4, 6, 7, 8, 9, 10, 8, 10, 11, 12, 13, 14, 12, 14, 15, 16, 17, 18, 16, 18, 19, 20, 21, 22, 20, 22, 23],
};
// calls gl.createBuffer, gl.bindBuffer, gl.bufferData for each array
var bufferInfo = twgl.createBufferInfoFromArrays(gl, arrays);

// calls gl.createTexture, gl.bindTexture, gl.texImage2D, gl.texParameteri
var tex = twgl.createTexture(gl, {
src: "https://webglfundamentals.org/webgl/resources/noodles.jpg",
crossOrigin: "",
});

var uniforms = {
u_texture: tex,
};

function render(time) {
time *= 0.001;
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);

gl.enable(gl.DEPTH_TEST);
gl.enable(gl.CULL_FACE);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);

var projection = m4.perspective(30 * Math.PI / 180, gl.canvas.clientWidth / gl.canvas.clientHeight, 0.5, 10);
var eye = [1, 4, -6];
var target = [0, 0, 0];
var up = [0, 1, 0];

var camera = m4.lookAt(eye, target, up);
var view = m4.inverse(camera);
var viewProjection = m4.multiply(view, projection);
var world = m4.rotationY(time);

uniforms.u_worldViewProjection = m4.multiply(world, viewProjection);

gl.useProgram(programInfo.program);
// calls gl.bindBuffer, gl.enableVertexAttribArray, gl.vertexAttribPointer
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
// calls gl.uniformXXX, gl.activeTexture, gl.bindTexture
twgl.setUniforms(programInfo, uniforms);
// calls gl.drawArray or gl.drawElements
twgl.drawBufferInfo(gl, gl.TRIANGLES, bufferInfo);

requestAnimationFrame(render);
}
requestAnimationFrame(render);
body { margin: 0px; }
canvas { width: 100vw; height: 100vh; display: block; }
<script id="vs" type="notjs">
uniform mat4 u_worldViewProjection;

attribute vec4 position;
attribute vec2 texcoord;

varying vec2 v_texCoord;

void main() {
v_texCoord = texcoord;
gl_Position = u_worldViewProjection * position;
}
</script>
<script id="fs" type="notjs">
precision mediump float;

varying vec2 v_texCoord;

uniform sampler2D u_texture;
void main() {
gl_FragColor = texture2D(u_texture, v_texCoord);
}
</script>
<script src="https://twgljs.org/dist/twgl-full.min.js"></script>
<canvas id="c"></canvas>

它被认为是最佳方式的原因有很多。作为一个简单的例子来说明为什么如果你真的制作了一个使用 6 个纹理的着色器,你需要一个不同的着色器用于金字塔(3-4 个面)和另一个用于二十面体(12 个面)和另一个用于十二面体(20 个面),而如果您将图像放在一个纹理中,它只适用于相同的着色器。

即使您想分别加载 6 个图像,最好制作一个 2d Canvas ,使用 drawImage 将所有 6 个图像复制到该 Canvas 中,然后将该 Canvas 复制到单个纹理.

"use strict";
var m4 = twgl.m4;
var gl = document.getElementById("c").getContext("webgl");
// compiles shader, links and looks up locations
var programInfo = twgl.createProgramInfo(gl, ["vs", "fs"]);

var arrays = {
position: [1, 1, -1, 1, 1, 1, 1, -1, 1, 1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, -1],
texcoord: [
// select the bottom left image
0 , 0 ,
0 , 0.5,
0.25, 0.5,
0.25, 0 ,
// select the bottom middle image
0.25, 0 ,
0.5 , 0 ,
0.5 , 0.5,
0.25, 0.5,
// select to bottom right image
0.5 , 0 ,
0.5 , 0.5,
0.75, 0.5,
0.75, 0 ,
// select the top left image
0 , 0.5,
0.25, 0.5,
0.25, 1 ,
0 , 1 ,
// select the top middle image
0.25, 0.5,
0.25, 1 ,
0.5 , 1 ,
0.5 , 0.5,
// select the top right image
0.5 , 0.5,
0.75, 0.5,
0.75, 1 ,
0.5 , 1 ,
],
indices: [0, 1, 2, 0, 2, 3, 4, 5, 6, 4, 6, 7, 8, 9, 10, 8, 10, 11, 12, 13, 14, 12, 14, 15, 16, 17, 18, 16, 18, 19, 20, 21, 22, 20, 22, 23],
};
// calls gl.createBuffer, gl.bindBuffer, gl.bufferData for each array
var bufferInfo = twgl.createBufferInfoFromArrays(gl, arrays);


var ctx = document.createElement("canvas").getContext("2d");
ctx.canvas.width = 512;
ctx.canvas.height = 256;
ctx.fillRect(0, 0, 512, 256); // black to start

var tex = gl.createTexture();
uploadCanvasToTexture();

[
"https://i.imgur.com/weklTat.gif",
"https://i.imgur.com/6AvnLa3.jpg",
"https://i.imgur.com/HkzeCU2.jpg",
"https://i.imgur.com/D9HVm6n.png",
"https://i.imgur.com/7MlmkJr.jpg",
"https://i.imgur.com/v38pV.jpg",
].forEach(function(url, ndx) {
var img = new Image();
img.onload = function() {
addFaceToCanvasAndUploadToTexture(img, ndx);
};
img.crossOrigin = "";
img.src = url;
});

function addFaceToCanvasAndUploadToTexture(img, ndx) {
var x = ndx % 3;
var y = ndx / 3 | 0;
ctx.drawImage(img, 0, 0, img.width, img.height, x * 128, y * 128, 128, 128);
uploadCanvasToTexture();
}

function uploadCanvasToTexture() {
gl.bindTexture(gl.TEXTURE_2D, tex);
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, gl.RGBA, gl.UNSIGNED_BYTE, ctx.canvas);
gl.generateMipmap(gl.TEXTURE_2D);
}

var uniforms = {
u_texture: tex,
};

function render(time) {
time *= 0.001;
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);

gl.enable(gl.DEPTH_TEST);
gl.enable(gl.CULL_FACE);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);

var projection = m4.perspective(30 * Math.PI / 180, gl.canvas.clientWidth / gl.canvas.clientHeight, 0.5, 10);
var eye = [1, 4, -6];
var target = [0, 0, 0];
var up = [0, 1, 0];

var camera = m4.lookAt(eye, target, up);
var view = m4.inverse(camera);
var viewProjection = m4.multiply(view, projection);
var world = m4.rotationY(time);

uniforms.u_worldViewProjection = m4.multiply(world, viewProjection);

gl.useProgram(programInfo.program);
// calls gl.bindBuffer, gl.enableVertexAttribArray, gl.vertexAttribPointer
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
// calls gl.uniformXXX, gl.activeTexture, gl.bindTexture
twgl.setUniforms(programInfo, uniforms);
// calls gl.drawArray or gl.drawElements
twgl.drawBufferInfo(gl, gl.TRIANGLES, bufferInfo);

requestAnimationFrame(render);
}
requestAnimationFrame(render);
body { margin: 0px; }
canvas { width: 100vw; height: 100vh; display: block; }
<script id="vs" type="notjs">
uniform mat4 u_worldViewProjection;

attribute vec4 position;
attribute vec2 texcoord;

varying vec2 v_texCoord;

void main() {
v_texCoord = texcoord;
gl_Position = u_worldViewProjection * position;
}
</script>
<script id="fs" type="notjs">
precision mediump float;

varying vec2 v_texCoord;

uniform sampler2D u_texture;
void main() {
gl_FragColor = texture2D(u_texture, v_texCoord);
}
</script>
<script src="https://twgljs.org/dist/twgl-full.min.js"></script>
<canvas id="c"></canvas>

如果您真的需要每个面的分辨率,那么第二种最常见的方法是制作 6 个平面,然后将它们定位,使它们形成一个立方体。在每个平面上放置一个纹理。使用 6 个绘制调用进行绘制,每个平面一个。

"use strict";
var m4 = twgl.m4;
var gl = document.getElementById("c").getContext("webgl");
// compiles shader, links and looks up locations
var programInfo = twgl.createProgramInfo(gl, ["vs", "fs"]);

var arrays = {
position: [
-1, -1, 0,
1, -1, 0,
-1, 1, 0,
1, 1, 0,
],
texcoord: [
0, 0,
1, 0,
0, 1,
1, 1,
],
indices: [
0, 1, 2, 2, 1, 3,
],
};
// calls gl.createBuffer, gl.bindBuffer, gl.bufferData for each array
var bufferInfo = twgl.createBufferInfoFromArrays(gl, arrays);

// calls gl.createTexture, gl.bindTexture, gl.texImage2D, gl.texParameteri
var textures = twgl.createTextures(gl, {
face0: { src:"https://i.imgur.com/weklTat.gif", crossOrigin: "", },
face1: { src:"https://i.imgur.com/6AvnLa3.jpg", crossOrigin: "", },
face2: { src:"https://i.imgur.com/HkzeCU2.jpg", crossOrigin: "", },
face3: { src:"https://i.imgur.com/D9HVm6n.png", crossOrigin: "", },
face4: { src:"https://i.imgur.com/7MlmkJr.jpg", crossOrigin: "", },
face5: { src:"https://i.imgur.com/v38pV.jpg", crossOrigin: "", },
});

var models = [
{ tex: textures.face0, local: m4.translate(m4.rotationY(Math.PI * 0.0), [0, 0, 1]), },
{ tex: textures.face1, local: m4.translate(m4.rotationY(Math.PI * 0.5), [0, 0, 1]), },
{ tex: textures.face2, local: m4.translate(m4.rotationY(Math.PI * 1.0), [0, 0, 1]), },
{ tex: textures.face3, local: m4.translate(m4.rotationY(Math.PI * 1.5), [0, 0, 1]), },
{ tex: textures.face4, local: m4.translate(m4.rotationX(Math.PI * 0.5), [0, 0, 1]), },
{ tex: textures.face5, local: m4.translate(m4.rotationX(Math.PI * 1.5), [0, 0, 1]), },
];

var uniforms = {
};

function render(time) {
time *= 0.001;
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);

gl.enable(gl.DEPTH_TEST);
gl.enable(gl.CULL_FACE);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);

var projection = m4.perspective(30 * Math.PI / 180, gl.canvas.clientWidth / gl.canvas.clientHeight, 0.5, 10);
var eye = [1, 4, -6];
var target = [0, 0, 0];
var up = [0, 1, 0];

var camera = m4.lookAt(eye, target, up);
var view = m4.inverse(camera);
var viewProjection = m4.multiply(view, projection);

gl.useProgram(programInfo.program);
// calls gl.bindBuffer, gl.enableVertexAttribArray, gl.vertexAttribPointer
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);

models.forEach(function(model) {
var world = m4.rotationY(time);
m4.multiply(model.local, world, world);
uniforms.u_texture = model.tex;
uniforms.u_worldViewProjection = m4.multiply(world, viewProjection);

// calls gl.uniformXXX, gl.activeTexture, gl.bindTexture
twgl.setUniforms(programInfo, uniforms);
// calls gl.drawArray or gl.drawElements
twgl.drawBufferInfo(gl, gl.TRIANGLES, bufferInfo);
});

requestAnimationFrame(render);
}
requestAnimationFrame(render);
body { margin: 0px; }
canvas { width: 100vw; height: 100vh; display: block; }
<script id="vs" type="notjs">
uniform mat4 u_worldViewProjection;

attribute vec4 position;
attribute vec2 texcoord;

varying vec2 v_texCoord;

void main() {
v_texCoord = texcoord;
gl_Position = u_worldViewProjection * position;
}
</script>
<script id="fs" type="notjs">
precision mediump float;

varying vec2 v_texCoord;

uniform sampler2D u_texture;
void main() {
gl_FragColor = texture2D(u_texture, v_texCoord);
}
</script>
<script src="https://twgljs.org/dist/twgl-full.min.js"></script>
<canvas id="c"></canvas>

关于textures - 如何在 WebGL 中将不同的纹理映射到立方体的不同面?,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/37116831/

25 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com