gpt4 book ai didi

WebGL,如何从 3D 转换到 2D 并返回

转载 作者:行者123 更新时间:2023-12-03 09:09:28 30 4
gpt4 key购买 nike

如何在 WebGL 中的 3D View 和 2D View 之间转换?

我有一个场景的 3D View ,并且我还想显示 2D View ,例如 map View 。如何在两种类型的 View 之间切换?

最佳答案

通常要从 3d 切换到 2d,您只需使用 orthographic projection而不是 perspective projection

如果你想对两者之间的过渡进行动画处理似乎可以

 const ortho = someOrthoFunc(left, right, top, bottom, orthoZNear, orthZFar);
const persp = somePerspFunc(fov, aspect, perspZNear, perspZFar);
const projection = [];
for (let i = 0; i < 16; ++i) {
projection[i] = lerp(ortho[i], persp[i], mixAmount);
}


function lerp(a, b, l) {
return a + (b - a) * l;
}

当您需要正交 View (2d 左右)时,mixAmount 为 0;当您需要透视 View (3d)时,mixAmount 为 1,并且您可以为其设置动画0 到 1 之间。

请注意,如果您希望正交 View 与透视 View 匹配,则需要选择 topbottomleftright与适合您的应用的 值相匹配。要在两种不同的 View 之间进行转换(例如地面上的第一人称与直视向下的人称),您可以选择您想要的任何设置。但是假设您正在向下看,只是想以相同的 View 从 3D 切换到 2D。在这种情况下,您需要选择与给定数量的单元的透视图相匹配的左、右、上、下。对于顶部和底部,这可能是有多少单位垂直适合距相机的“地面”距离。

See this answer其中距离是到地面的距离,公式将给出该距离处​​单位数的一半,然后您可以将其插入top底部。对于leftright只需乘以 Canvas 显示尺寸的纵横比

另一件事发生变化是 camera 。定位相机的常见方法是使用 lookAt 函数,根据库的不同,该函数可能会生成 View 矩阵或相机矩阵。

往下看

const cameraPosition = [x, groundHeight + distanceAboveGround, z];
const target = [x, groundHeight, z];
const up = [0, 0, 1];
const camera = someLookAtFunction(camearPosition, target, up);

您将为 3D 相机设置一组不同的 cameraPositiontargetup。您可以通过调整这 ​​3 个变量来动画化它们之间的转换。

const vs = `
uniform mat4 u_worldViewProjection;

attribute vec4 a_position;
attribute vec2 a_texcoord;

varying vec4 v_position;
varying vec2 v_texcoord;

void main() {
v_texcoord = a_texcoord;
gl_Position = u_worldViewProjection * a_position;
}
`;
const fs = `
precision mediump float;

varying vec2 v_texcoord;

uniform sampler2D u_texture;

void main() {
gl_FragColor = texture2D(u_texture, v_texcoord);
}
`;


"use strict";
twgl.setDefaults({attribPrefix: "a_"});
const m4 = twgl.m4;
const v3 = twgl.v3;
const gl = document.getElementById("c").getContext("webgl");

// compiles shaders, links program, looks up locations
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);

// calls gl.createBuffer, gl.bindBuffer, gl.bufferData for positions, texcoords
const bufferInfo = twgl.primitives.createCubeBufferInfo(gl);

// calls gl.createTexture, gl.bindTexture, gl.texImage2D, gl.texParameteri
const tex = twgl.createTexture(gl, {
min: gl.NEAREST,
mag: gl.NEAREST,
src: [
255, 0, 0, 255,
0, 192, 0, 255,
0, 0, 255, 255,
255, 224, 0, 255,
],
});

const settings = {
projectionMode: 2,
cameraMode: 2,
fov: 30,
};

function render(time) {
time *= 0.001;
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);

gl.enable(gl.DEPTH_TEST);
gl.enable(gl.CULL_FACE);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);

const fov = settings.fov * Math.PI / 180;
const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const perspZNear = 0.5;
const perspZFar = 10;
const persp = m4.perspective(fov, aspect, perspZNear, perspZFar);

// the size to make the orthographic view is arbitrary.
// here we're choosing the number of units at ground level
// away from the top perspective camera
const heightAboveGroundInTopView = 7;
const halfSizeToFitOnScreen = heightAboveGroundInTopView * Math.tan(fov / 2);

const top = -halfSizeToFitOnScreen;
const bottom = +halfSizeToFitOnScreen;
const left = top * aspect;
const right = bottom * aspect;
const orthoZNear = 0.5;
const orthoZFar = 10;
const ortho = m4.ortho(left, right, top, bottom, orthoZNear, orthoZFar);

let perspMixAmount;
let camMixAmount;
switch (settings.projectionMode) {
case 0: // 2d
perspMixAmount = 0;
break;
case 1: // 3d
perspMixAmount = 1;
break;
case 2: // animated
perspMixAmount = Math.sin(time) * .5 + .5;
break;
}

switch (settings.cameraMode) {
case 0: // top
camMixAmount = 0;
break;
case 1: // angle
camMixAmount = 1;
break;
case 2: // animated
camMixAmount = Math.sin(time) * .5 + .5;
break;
}

const projection = [];
for (let i = 0; i < 16; ++i) {
projection[i] = lerp(ortho[i], persp[i], perspMixAmount);
}

const perspEye = [1, 4, -6];
const perspTarget = [0, 0, 0];
const perspUp = [0, 1, 0];

const orthoEye = [0, heightAboveGroundInTopView, 0];
const orthoTarget = [0, 0, 0];
const orthoUp = [0, 0, 1];

const eye = v3.lerp(orthoEye, perspEye, camMixAmount);
const target = v3.lerp(orthoTarget, perspTarget, camMixAmount);
const up = v3.lerp(orthoUp, perspUp, camMixAmount);

const camera = m4.lookAt(eye, target, up);
const view = m4.inverse(camera);
const viewProjection = m4.multiply(projection, view);

gl.useProgram(programInfo.program);

// calls gl.bindBuffer, gl.enableVertexAttribArray, gl.vertexAttribPointer
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
const t = time * .1;
for (let z = -1; z <= 1; ++z) {
for (let x = -1; x <= 1; ++x) {
const world = m4.translation([x * 1.4, 0, z * 1.4]);
m4.rotateY(world, t + z + x, world);

// calls gl.uniformXXX
twgl.setUniforms(programInfo, {
u_texture: tex,
u_worldViewProjection: m4.multiply(viewProjection, world),
});

// calls gl.drawArrays or gl.drawElements
twgl.drawBufferInfo(gl, bufferInfo);
}
}

requestAnimationFrame(render);
}
requestAnimationFrame(render);

setupRadioButtons("proj", "projectionMode");
setupRadioButtons("cam", "cameraMode");
setupSlider("#fovSlider", "#fov", "fov");

function setupSlider(sliderId, labelId, property) {
const slider = document.querySelector(sliderId);
const label = document.querySelector(labelId);

function updateLabel() {
label.textContent = settings[property];
}

slider.addEventListener('input', e => {
settings[property] = parseInt(slider.value);
updateLabel();
});

updateLabel();
slider.value = settings[property];
}

function setupRadioButtons(name, property) {
document.querySelectorAll(`input[name=${name}]`).forEach(elem => {
elem.addEventListener('change', e => {
if (e.target.checked) {
settings[property] = parseInt(e.target.value);
}
});
});
}

function lerp(a, b, l) {
return a + (b - a) * l;
}
body { margin: 0; }
canvas { display: block; width: 100vw; height: 100vh; }
#ui {
position: absolute;
left: 10px;
top: 10px;
z-index: 2;
background: rgba(255, 255, 255, 0.9);
padding: .5em;
}
<script src="https://twgljs.org/dist/3.x/twgl-full.min.js"></script>
<canvas id="c"></canvas>
<div id="ui">
<div>projection:</div>
<div><input type="radio" name="proj" value="0" /><label for="2d">orthographic</label></div>
<div><input type="radio" name="proj" value="1" /><label for="3d">perspective</label></div>
<div><input type="radio" name="proj" value="2" checked/><label for="animated">animated</label></div>
<div>&nbsp;</div>
<div>camera:</div>
<div><input type="radio" name="cam" value="0" /><label for="top">top</label></div>
<div><input type="radio" name="cam" value="1" /><label for="angle">angle</label></div>
<div><input type="radio" name="cam" value="2" checked/><label for="animated">animated</label></div>
<div>&nbsp;</div>
<div>field of view[<span id="fov"></span>]</div>
<div><input id="fovSlider" type="range" min="10" max="90" value="60"/></div>
</div>

关于WebGL,如何从 3D 转换到 2D 并返回,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/44044944/

30 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com