- html - 出于某种原因,IE8 对我的 Sass 文件中继承的 html5 CSS 不友好?
- JMeter 在响应断言中使用 span 标签的问题
- html - 在 :hover and :active? 上具有不同效果的 CSS 动画
- html - 相对于居中的 html 内容固定的 CSS 重复背景?
我正在尝试计算 View 体积平面的 8 (4+4) 个顶点:近处和远处。
我需要这个顶点来在 webGL 中绘制相机的 View 体积。
到目前为止,我设法从每个角度使用三角学来计算它们,但不知何故,当我绘制顶点时,结果似乎不准确。到目前为止,我得出了这个顶点方程:
y = sqrt(斜边^2 - 平面^2)
x = sqrt(斜边^2 - 平面^2)
z = 平面(近或远)
有人可以帮忙吗?预先感谢您。
最佳答案
您可以通过逆投影矩阵投影标准立方体。
const m4 = twgl.m4;
const gl = document.querySelector("canvas").getContext("webgl");
const vs = `
attribute vec4 position;
uniform mat4 u_worldViewProjection;
void main() {
gl_Position = u_worldViewProjection * position;
}
`;
const fs = `
precision mediump float;
void main() {
gl_FragColor = vec4(1, 0, 0, 1);
}
`;
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
const arrays = {
position: [
-1, 1, -1,
1, 1, -1,
1, -1, -1,
-1, -1, -1,
-1, 1, 1,
1, 1, 1,
1, -1, 1,
-1, -1, 1,
],
indices: [
0, 1, 1, 2, 2, 3, 3, 0,
4, 5, 5, 6, 6, 7, 7, 4,
0, 4, 1, 5, 2, 6, 3, 7,
],
};
const bufferInfo = twgl.createBufferInfoFromArrays(gl, arrays);
function render(time) {
time *= 0.001;
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
let projectionToViewWith;
{
const fov = 30 * Math.PI / 180;
const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const zNear = 0.5;
const zFar = 100;
projectionToViewWith = m4.perspective(fov, aspect, zNear, zFar);
}
let projectionToBeViewed;
{
const fov = 30 * Math.PI / 180;
const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const zNear = 2;
const zFar = 10;
projectionToBeViewed = m4.perspective(fov, aspect, zNear, zFar);
}
const inverseProjectionToBeViewed = m4.inverse(projectionToBeViewed);
const radius = 20;
const eye = [Math.sin(time) * radius, 4, Math.cos(time) * radius];
const target = [0, 0, 0];
const up = [0, 1, 0];
const camera = m4.lookAt(eye, target, up);
const view = m4.inverse(camera);
const viewProjection = m4.multiply(projectionToViewWith, view);
const worldViewProjection = m4.multiply(
viewProjection,
inverseProjectionToBeViewed);
gl.useProgram(programInfo.program);
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
twgl.setUniforms(programInfo, {
u_worldViewProjection: worldViewProjection,
});
twgl.drawBufferInfo(gl, bufferInfo, gl.LINES);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
<canvas></canvas>
要获得 8 个角的点,您只需进行反向投影即可。投影矩阵采用 fovy 高、fovy * 宽的视锥体空间,从 -zNear 开始,到 -zFar 结束,并在透视划分后将该空间转换为 -1 <-> +1 框。
要向后计算该框的点,我们只需通过逆投影矩阵投影一个 -1 到 +1 框,然后再次进行透视除法(这正是上面示例中发生的情况,我们只是在做一切都在 GPU 中)
因此,我们将其从 GPU 中取出并在 JavaScript 中完成
[
[-1, 1, -1],
[ 1, 1, -1],
[ 1, -1, -1],
[-1, -1, -1],
[-1, 1, 1],
[ 1, 1, 1],
[ 1, -1, 1],
[-1, -1, 1],
].forEach((point) => {
console.log(m4.transformPoint(inverseProjectionMatrix, point));
});
这是一个例子。
const m4 = twgl.m4;
const gl = document.querySelector("canvas").getContext("webgl");
const vs = `
attribute vec4 position;
uniform mat4 u_worldViewProjection;
void main() {
gl_Position = u_worldViewProjection * position;
gl_PointSize = 10.;
}
`;
const fs = `
precision mediump float;
uniform vec4 u_color;
void main() {
gl_FragColor = u_color;
}
`;
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
const positions = [
-1, 1, -1,
1, 1, -1,
1, -1, -1,
-1, -1, -1,
-1, 1, 1,
1, 1, 1,
1, -1, 1,
-1, -1, 1,
];
const arrays = {
position: positions,
indices: [
0, 1, 1, 2, 2, 3, 3, 0,
4, 5, 5, 6, 6, 7, 7, 4,
0, 4, 1, 5, 2, 6, 3, 7,
],
};
const bufferInfo = twgl.createBufferInfoFromArrays(gl, arrays);
function render(time) {
time *= 0.001;
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
let projectionToViewWith;
{
const fov = 30 * Math.PI / 180;
const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const zNear = 0.5;
const zFar = 100;
projectionToViewWith = m4.perspective(fov, aspect, zNear, zFar);
}
let projectionToBeViewed;
{
const fov = 30 * Math.PI / 180;
const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const zNear = 2;
const zFar = 10;
projectionToBeViewed = m4.perspective(fov, aspect, zNear, zFar);
}
const inverseProjectionToBeViewed = m4.inverse(projectionToBeViewed);
const radius = 20;
const eye = [Math.sin(time) * radius, 4, Math.cos(time) * radius];
const target = [0, 0, 0];
const up = [0, 1, 0];
const camera = m4.lookAt(eye, target, up);
const view = m4.inverse(camera);
const viewProjection = m4.multiply(projectionToViewWith, view);
const worldViewProjection = m4.multiply(
viewProjection,
inverseProjectionToBeViewed);
gl.useProgram(programInfo.program);
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
twgl.setUniforms(programInfo, {
u_worldViewProjection: worldViewProjection,
u_color: [1, 0, 0, 1],
});
twgl.drawBufferInfo(gl, bufferInfo, gl.LINES);
// just because I'm lazy let's draw each point one at a time
// note: since in our case the frustum is not moving we
// could have computed these at init time.
const positionLoc = programInfo.attribSetters.position.location;
gl.disableVertexAttribArray(positionLoc);
for (let i = 0; i < positions.length; i += 3) {
const point = positions.slice(i, i + 3);
const worldPosition = m4.transformPoint(
inverseProjectionToBeViewed, point);
gl.vertexAttrib3f(positionLoc, ...worldPosition);
twgl.setUniforms(programInfo, {
u_color: [0, 1, 0, 1],
u_worldViewProjection: viewProjection,
});
gl.drawArrays(gl.POINT, 0, 1);
}
requestAnimationFrame(render);
}
requestAnimationFrame(render);
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
<canvas></canvas>
在评论中提及您想要在一个 Canvas 中的另一个 Canvas 中显示相机的视锥体。
这实际上就是上面发生的情况,只是 canvasWhosFrustumWeWantToRender 中的相机没有移动。相反,它只是坐在原点处,向下看 -Z 轴,+Y 向上。要允许视锥体移动以显示它相对于相机的位置,只需添加相机矩阵
const m4 = twgl.m4;
const gl = document.querySelector("canvas").getContext("webgl");
const ext = gl.getExtension("OES_standard_derivatives");
const vs = `
attribute vec4 position;
uniform mat4 u_worldViewProjection;
varying vec3 v_position;
void main() {
gl_Position = u_worldViewProjection * position;
v_position = position.xyz; // for fake lighting
}
`;
const fs = `
#extension GL_OES_standard_derivatives : enable
precision mediump float;
varying vec3 v_position;
uniform vec4 u_color;
void main() {
vec3 fdx = dFdx(v_position);
vec3 fdy = dFdy(v_position);
vec3 n = normalize(cross(fdx,fdy));
float l = dot(n, normalize(vec3(1,2,-3))) * .5 + .5;
gl_FragColor = u_color;
gl_FragColor.rgb *= l;
}
`;
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
const arrays = {
position: [
-1, 1, -1,
1, 1, -1,
1, -1, -1,
-1, -1, -1,
-1, 1, 1,
1, 1, 1,
1, -1, 1,
-1, -1, 1,
],
indices: [
0, 1, 1, 2, 2, 3, 3, 0,
4, 5, 5, 6, 6, 7, 7, 4,
0, 4, 1, 5, 2, 6, 3, 7,
],
};
const concat = twgl.primitives.concatVertices;
const reorient = twgl.primitives.reorientVertices;
const wireCubeBufferInfo = twgl.createBufferInfoFromArrays(gl, arrays);
const solidCubeBufferInfo = twgl.primitives.createCubeBufferInfo(gl, 2);
const cameraBufferInfo = twgl.createBufferInfoFromArrays(gl,
concat([
reorient(twgl.primitives.createCubeVertices(2),
m4.translation([0, 0, 1])),
reorient(twgl.primitives.createTruncatedConeVertices(0, 1, 2, 12, 1),
m4.rotationX(Math.PI * -.5)),
])
);
const black = [0, 0, 0, 1];
const blue = [0, 0, 1, 1];
function drawScene(viewProjection, clearColor) {
gl.clearColor(...clearColor);
gl.clear(gl.COLOR_BUFFER_BIT);
const numCubes = 10;
for (let i = 0; i < numCubes; ++i) {
const u = i / numCubes;
let mat = m4.rotationY(u * Math.PI * 2);
mat = m4.translate(mat, [0, 0, 10]);
mat = m4.scale(mat, [1, 1 + u * 23 % 1, 1]);
mat = m4.translate(mat, [0, .5, 0]);
mat = m4.multiply(viewProjection, mat);
drawModel(solidCubeBufferInfo, mat, [u, u * 3 % 1, u * 7 % 1,1]);
}
}
function drawModel(bufferInfo, worldViewProjection, color, mode) {
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
twgl.setUniforms(programInfo, {
u_worldViewProjection: worldViewProjection,
u_color: color,
});
twgl.drawBufferInfo(gl, bufferInfo, mode);
}
function render(time) {
time *= 0.001;
twgl.resizeCanvasToDisplaySize(gl.canvas);
const width = gl.canvas.width;
const height = gl.canvas.height;
const halfWidth = width / 2;
gl.viewport(0, 0, width, height);
gl.disable(gl.SCISSOR_TEST);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
gl.enable(gl.DEPTH_TEST);
let projectionToViewWith; // the projection on the right
{
const fov = 60 * Math.PI / 180;
const aspect = gl.canvas.clientWidth / 2 / gl.canvas.clientHeight;
const zNear = 0.5;
const zFar = 100;
projectionToViewWith = m4.perspective(fov, aspect, zNear, zFar);
}
let projectionToBeViewed; // the projeciton on the left
{
const fov = 60 * Math.PI / 180;
const aspect = gl.canvas.clientWidth / 2 / gl.canvas.clientHeight;
const zNear = 1.5;
const zFar = 15;
projectionToBeViewed = m4.perspective(fov, aspect, zNear, zFar);
}
const inverseProjectionToBeViewed = m4.inverse(projectionToBeViewed);
let cameraViewingScene; // camera for right view
{
const t1 = 0;
const radius = 30;
const eye = [Math.sin(t1) * radius, 4, Math.cos(t1) * radius];
const target = [0, 0, 0];
const up = [0, 1, 0];
cameraViewingScene = m4.lookAt(eye, target, up);
}
let cameraInScene; // camera for left view
{
const t1 = time;
const t2 = time + .4;
const r1 = 10 + Math.sin(t1);
const r2 = 10 + Math.sin(t2) * 2;
const eye = [Math.sin(t1) * r1, 0 + Math.sin(t1) * 4, Math.cos(t1) * r1];
const target = [Math.sin(t2) * r2, 1 + Math.sin(t2), Math.cos(t2) * r2];
const up = [0, 1, 0];
cameraInScene = m4.lookAt(eye, target, up);
}
// there's only one shader program so just set it once
gl.useProgram(programInfo.program);
// draw only on left half of canvas
gl.enable(gl.SCISSOR_TEST);
gl.scissor(0, 0, halfWidth, height);
gl.viewport(0, 0, halfWidth, height);
// draw the scene on the left using the camera inside the scene
{
const view = m4.inverse(cameraInScene);
const viewProjection = m4.multiply(projectionToBeViewed, view);
drawScene(viewProjection, [.9, 1, .9, 1]);
}
// draw only on right half of canvas
gl.scissor(halfWidth, 0, halfWidth, height);
gl.viewport(halfWidth, 0, halfWidth, height);
// draw the same scene on the right using the camera outside the scene
{
const view = m4.inverse(cameraViewingScene);
const viewProjection = m4.multiply(projectionToViewWith, view);
drawScene(viewProjection, [.9, 1, 1, 1]);
// draw the in scene camera's frustum
{
const world = m4.multiply(cameraInScene, inverseProjectionToBeViewed);
const worldViewProjection = m4.multiply(viewProjection, world);
drawModel(wireCubeBufferInfo, worldViewProjection, black, gl.LINES);
}
// draw the in scene camera's camera model
{
const worldViewProjection = m4.multiply(viewProjection, cameraInScene);
drawModel(cameraBufferInfo, worldViewProjection, blue);
}
}
requestAnimationFrame(render);
}
requestAnimationFrame(render);
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
<canvas></canvas>
关于graphics - WebGL绘制透视图体积,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/47425723/
我想知道如何在 OCaml 中设置文本大小。我尝试了 Graphics.set_text_size,我猜这应该可以解决问题。 但是无论我输入 set_text_size 200 还是 set_text
我想使用mathematica 为我正在写的书绘制图表。我想在mathematica中画一个图,将其保存为图片,然后将其导入到quarkxpress中,最后将其导出为pdf。 我的问题是最好使用哪种格
在顶层加载 Graphics 模块时,我收到一条错误消息“找不到 graphics.cma”。 我使用的是 OS X,而且我很确定我已经正确安装了 OCaml,因为我已经使用了大约一个月了。所以看起来
我知道 DDS 文件的存在,允许在 as/400 上对显示图形进行编程,但是还有其他方法吗? 具体来说,我想要做的是直接操作终端缓冲区,以便能够显示除文本之外的任何其他内容。例如,终端如下所示: 假设
Graphics.Save 与 Graphics.BeginContainer 有何不同? 最佳答案 看看here : The documentation does not differentiate
由于 Gdiplus::Graphics::DrawGraphics(Graphics*, x, y) 不存在,执行此类操作的最佳方法是什么? 例如,创建一个 Graphics 对象,使用各种 Dra
你能看出这有什么问题吗: (define (box d x1 y1 x2 y2) ( (graphics-draw-line d x1 y
我想编写一个 2D 游戏引擎。我遇到的问题(我不使用opengl之类的东西,所以我用cpu渲染)是,我通过graphics.drawImage()只得到7fps;您有任何加快速度的建议或其他替代方案吗
我在某些代码中发现了渲染错误,并找到了解决方法,但我想知道为什么我会得到不同的行为。在旧代码中,背景(有时)会呈现为白色,尽管在调试时 getBackground() 会返回正确的颜色。 旧代码: @
有谁知道是否有办法(也许通过外部API)将图形绘图/转换为多个图形?这个想法是同时保存 PNG 和 PDF(使用 Java IText 库)。 最佳答案 您可以将 Graphics 对象写入(java
我试图了解如何在英特尔芯片组上以 x86 保护模式绘制简单图形。我已经(有点)知道如何使用 VGA 接口(interface)来做到这一点,并且我正在尝试了解如何使用 G35 Express 来做到这
在我的应用程序中,我生成了一个条形码图像,该图像是根据用户使用 OpenFileDialog 上传的文件中的数据生成的。我的目标是允许用户在屏幕上查看条形码数据和图像本身,打印并使他们能够将两者保存为
我是 Java 新手,我只是想得到一些简单的东西,可能类似于 Zelle's graphics对于Python。 最佳答案 Java 类 Graphics和 Graphics2D应该包含 Zelle
如何将 FMX.Graphics.TBitmap 转换为 VCL.Graphics.TBitmap 或 Vcl.Imaging.PngImage.TPngImage? 我的项目中有FMX表单和VCL表
我需要找到用于间距目的的字体大小,发现这很有帮助:https://docs.oracle.com/javase/tutorial/2d/text/measuringtext.html 但是,我不确定如
我有一点奇怪的错误是由一个看似简单的问题引起的。 在我的源代码中,我尝试使用 QQuickPaintedItem 来呈现 QWidget 派生类 (QPushButton) 的整体外观,然后将其绘制到
我正在尝试通过具有以下规范的设备来解决 Android 应用程序崩溃的问题: Device PAP3400DUO 1 Manufacturer — Android version Android 4.
关闭。这个问题需要多问focused 。目前不接受答案。 想要改进此问题吗?更新问题,使其仅关注一个问题 editing this post . 已关闭 9 年前。 Improve this ques
我有这行代码: mBgTransition = (TransitionDrawable) mSwitchBg.getBackground(); 背景曾经是一个常规的可绘制对象, 但现在是 9 个补丁
当我的应用程序在启动时崩溃时尝试实现自适应图标时出现此错误。 我无法想象为什么会收到此错误,因为在下面错误日志中提到的文件(MainActivity 和 BaseActivity)中,我没有使用Ada
我是一名优秀的程序员,十分优秀!