gpt4 book ai didi

javascript - Three.js 缩放以适应偏移

转载 作者:行者123 更新时间:2023-12-04 17:16:52 25 4
gpt4 key购买 nike

我正在尝试提出一个缩放到适合的功能,以确保点列表完全适合绘图区域,同时还在图像的所有边上添加可配置的偏移量。 IE。缩放以适合框架区域而不是整个查看器区域:

image with offsets applied
(请注意,此图像中的偏移量不准确)

我在这里使用的是透视相机。该函数必须更新相机位置,而不是它的参数或 View 方向。

我找到了一个工作良好的缩放以适应功能*,但我正在努力实现偏移。

我仅偏移点坐标(使用相机的坐标系)的第一种方法没有奏效。显示了更多图像,但我选择的点并没有出现在该区域的边缘。回想起来这是有道理的,因为透视变形会将点从它们的预期位置移开。

任何人都可以提供有关如何正确计算相机距离和位置的可能解决方案吗?


* Three.js 没有自带缩放功能,但是网上有很多关于如何实现这个逻辑的示例和问题。这种用例最好的一个可能是 CameraViewBox .我在 this fiddle 中的用例中采用了他们的示例:

import * as THREE from 'https://cdn.skypack.dev/three@0.130.1';
import { OrbitControls } from 'https://cdn.skypack.dev/three@0.130.1/examples/jsm/controls/OrbitControls.js';

let camera, controls, scene, renderer, material;
let isDragging = false;
let cameraViewBox;
const raycaster = new THREE.Raycaster();
const mouse = new THREE.Vector2();
const meshes = [];
const selection = new Set();
const selectedMaterial = new THREE.MeshPhongMaterial({ color: 0xff0000, flatShading: true });
const floorPlane = new THREE.Plane(new THREE.Vector3(0, 1, 0));

init();
animate();

function init() {
scene = new THREE.Scene();
scene.background = new THREE.Color(0xcccccc);
scene.fog = new THREE.FogExp2(0xcccccc, 0.002);

renderer = new THREE.WebGLRenderer({
antialias: true
});
renderer.setPixelRatio(window.devicePixelRatio);
renderer.setSize(window.innerWidth, window.innerHeight);
document.body.appendChild(renderer.domElement);

camera = new THREE.PerspectiveCamera(60, window.innerWidth / window.innerHeight, 1, 1000);
camera.position.set(400, 200, 0);

// Create the cameraViewBox
cameraViewBox = new THREE.CameraViewBox();
cameraViewBox.setViewFromCamera(camera);

// controls
controls = new OrbitControls(camera, renderer.domElement);
controls.minDistance = 100;
controls.maxDistance = 500;
controls.maxPolarAngle = Math.PI / 2;

// world
const geometry = new THREE.BoxGeometry(1, 1, 1);
geometry.translate(0, 0.5, 0);
material = new THREE.MeshPhongMaterial({
color: 0xffffff,
flatShading: true
});

for (let i = 0; i < 500; i++) {
const mesh = new THREE.Mesh(geometry, material);
mesh.position.x = Math.random() * 1600 - 800;
mesh.position.y = 0;
mesh.position.z = Math.random() * 1600 - 800;
mesh.scale.x = 20;
mesh.scale.y = Math.random() * 80 + 10;
mesh.scale.z = 20;
mesh.updateMatrix();
mesh.matrixAutoUpdate = false;
scene.add(mesh);
meshes.push(mesh);
}

// lights
const dirLight1 = new THREE.DirectionalLight(0xffffff);
dirLight1.position.set(1, 1, 1);
scene.add(dirLight1);

const dirLight2 = new THREE.DirectionalLight(0x002288);
dirLight2.position.set(-1, -1, -1);
scene.add(dirLight2);

const ambientLight = new THREE.AmbientLight(0x222222);
scene.add(ambientLight);

window.addEventListener('resize', onWindowResize);

// Add DOM events
renderer.domElement.addEventListener('mousedown', onMouseDown, false);
window.addEventListener('mousemove', onMouseMove, false);
renderer.domElement.addEventListener('mouseup', onMouseUp, false);
}

function onWindowResize() {

camera.aspect = window.innerWidth / window.innerHeight;
camera.updateProjectionMatrix();

renderer.setSize(window.innerWidth, window.innerHeight);
}

function animate() {
requestAnimationFrame(animate);
renderer.render(scene, camera);
}

// Add selection support
function onMouseDown() {
isDragging = false;
}

function onMouseMove() {
isDragging = true;
}

function onMouseUp(event) {
if (isDragging) {
isDragging = false;
return;
} else {
isDragging = false;
}

mouse.x = (event.clientX / window.innerWidth) * 2 - 1;
mouse.y = -(event.clientY / window.innerHeight) * 2 + 1;
raycaster.setFromCamera(mouse, camera);

var intersects = raycaster.intersectObjects(meshes);
if (intersects.length > 0) {
var mesh = intersects[0].object;

if (selection.has(mesh)) {
mesh.material = material;
selection.delete(mesh);
} else {
mesh.material = selectedMaterial;
selection.add(mesh);
}
}
}

function centerOnSelection() {
if (selection.size === 0) {
return;
}

cameraViewBox.setViewFromCamera(camera);
cameraViewBox.setFromObjects(Array.from(selection));
cameraViewBox.getCameraPositionAndTarget(camera.position, controls.target, floorPlane);
controls.update();
}

最佳答案

我现在能够在某种程度上自己解决这个问题。如果我们从对称偏移量开始,这会非常简单: PerspectiveView

使用较窄的 FOV Angular (绿色)计算相机位置会使最终图像中的投影点偏移一定量。如果我们找到正确的 Angular ,这些点就会以我们正在寻找的精确偏移结束。

我们可以使用基本三 Angular 学计算这个 Angular 。我们计算到归一化设备坐标平面的距离(即高度/宽度为 -1 到 1;图像中为蓝色),然后应用偏移量(百分比值范围为 0.0 到 1.0)并创建一个新 Angular :

tan(FOV/2) = 1/dist => dist = 1/tan(FOV/2)

tan(FOVg/2) = (1 - offset)/dist => FOVg = atan((1 - offset)/dist) * 2

对水平 FOV(按纵横比修改)重复此操作,使用相同或不同的偏移值。然后在给定这些新 Angular 情况下应用现有的缩放以适合逻辑。


这种方法适用于对称偏移。通过计算 4 个单独的新 Angular ,对于不对称偏移可能也是如此。棘手的部分是使用这些计算正确的相机位置和缩放...

关于javascript - Three.js 缩放以适应偏移,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/68519550/

25 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com