gpt4 book ai didi

javascript - 如何从 renderTarget 访问 imageData?

转载 作者:行者123 更新时间:2023-11-30 06:31:35 30 4
gpt4 key购买 nike

我是计算机图形学专业的大学硕士生,我在使用 three.js 访问使用 EffectComposer 创建的纹理的图像数据(像素)时遇到困难。

第一个 composer (composer) 使用线检测着色器在车道中查找道路线,并将结果放入 renderTarget (rt_Binary)。我的第二个 Composer (fcomposer2) 使用着色器将某个区域绘制为绿色,如果它在某个空间内。

计划是先渲染 composer,然后在分析 rt_Binary 图像后确定限制。

我发现了一些允许我获取图像数据的函数(getImageData(image)getPixel(imagedata, x, y)),但它们只在这些情况下有效:

                // before image 
var imagedata = getImageData(videoTexture.image);
// processed image
var imagedata2 = getImageData(renderer.domElement);

如果将第一个 Composer 渲染到屏幕上,我会得到正确的限制值,但是当我放置第二个 Composer 时,我会得到错误的限制值。有什么方法可以从 renderTarget 获取图像数据吗?是这样,怎么办?

编辑1:

这是我用于 html 的脚本代码:

<html xmlns="http://www.w3.org/1999/xhtml"><head>
<title>Tests WebGL</title>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<script src="http://ajax.googleapis.com/ajax/libs/jquery/1.9.1/jquery.min.js"></script>
<script src="three.js/build/three.js"></script>
<script src="js/CopyShader.js"></script>
<script src="js/EffectComposer.js"></script>
<script src="js/MaskPass.js" ></script>
<script src="js/RenderPass.js" ></script>
<script src="js/ShaderPass.js"></script>
<script src="js/stats.min.js" ></script>
<!-- Shaders -->
<script src="js/shaders/KernelShader.js" ></script>
<script src="js/shaders/SimpleShader.js"></script>
<script src="js/shaders/MyShader.js"></script>
<script src="js/shaders/BinaryShader.js"></script>
<script type="text/javascript">

var scene, fscene, sceneF;
var camera;
var renderer, rt_Binary;
var composer;
var stats;
var fmaterial;

var videoTexture;
var videoWidth = 480;
var videoHeight = 270;

var rendererWidth = videoWidth;
var rendererHeight = videoHeight;

var x_max = 345;//videoWidth*0.72; //
var x_min = 120;//videoWidth*0.25; //
var y_max = 189;//videoHeight*0.7 ;
var y_min = 148;//videoHeight*0.55;

// var ml=0.0, mr=0.0, mm=0.0;
// var bl=0.0, br=0.0, bm=0.0;

var yMaxL = 0, yMinL = 0, yMaxR = 0, yMinR = 0;
var xMaxL = 0, xMinL = 0, xMaxR = 0, xMinR = 0;

var frame = 0;
// init the scene
window.onload = function() {
renderer = new THREE.WebGLRenderer(
{
antialias: true, // to get smoother output
preserveDrawingBuffer: true // to allow screenshot
});
renderer.setClearColor(0xffffff, 1);
renderer.autoClear = false;
renderer.setSize(rendererWidth, rendererHeight);
document.getElementById('container').appendChild(renderer.domElement);

//add stats
stats = new Stats();
stats.domElement.style.position = 'absolute';
stats.domElement.style.top = '0px';
document.getElementById('container').appendChild(stats.domElement);

// create Main scene
scene = new THREE.Scene();
camera = new THREE.PerspectiveCamera(35, rendererWidth / rendererHeight, 1, 10000);
camera.position.set(0, 1, 6);
scene.add(camera);

// define video element
video = document.createElement('video');
// video.src = 'GOPR0007.webm';
video.src = 'output.webm';
video.width = videoWidth;
video.height = videoHeight;
video.autoplay = true;
video.loop = true;

//create 3d object and apply video texture to it
var videoMesh = new THREE.Object3D();
scene.add(videoMesh);

videoTexture = new THREE.Texture(video);

var geom = new THREE.PlaneGeometry(1, 1);
material = new THREE.MeshBasicMaterial({map: videoTexture});

var mesh = new THREE.Mesh(geom, material);
videoMesh.add(mesh);

var renderTargetParameters = { minFilter: THREE.LinearFilter, magFilter: THREE.LinearFilter, format: THREE.RGBFormat, stencilBufer: false };
rt_Binary = new THREE.WebGLRenderTarget( videoWidth, videoHeight, renderTargetParameters );

// Composers

// composer = new THREE.EffectComposer(renderer, renderTarget2);
composer = new THREE.EffectComposer(renderer, rt_Binary );
composer.addPass(new THREE.RenderPass(scene, camera));


var simple = new SimpleShader.Class(videoWidth, videoHeight);
var simEffect = new THREE.ShaderPass(simple.shader);
composer.addPass(simEffect);

var ef = new BinaryShader.Class(videoWidth, videoHeight, 1.1, [-2,-2,-2,0,0,0,2,2,2]);
var effect = new THREE.ShaderPass(ef.shader);
composer.addPass(effect);

var copyPass = new THREE.ShaderPass(THREE.CopyShader);
// copyPass.renderToScreen = true;
composer.addPass(copyPass);


//New scene
sceneF = new THREE.Scene();
sceneF.add(camera);

var videoMesh2 = new THREE.Object3D();
sceneF.add(videoMesh2);

var geomF = new THREE.PlaneGeometry(1, 1);
var materialF = new THREE.MeshBasicMaterial({map: videoTexture});

var meshF = new THREE.Mesh(geomF, materialF);
sceneF.add(meshF);

fcomposer2 = new THREE.EffectComposer(renderer );
fcomposer2.addPass(new THREE.RenderPass(sceneF, camera));

fcomposer2.addPass(simEffect);

var ef1 = new MyShader.Class(videoWidth, videoHeight, [yMaxL,yMinL,xMaxL,xMinL,yMaxR,yMinR,xMaxR,xMinR], videoTexture);
var effect1 = new THREE.ShaderPass(ef1.shader);
fcomposer2.addPass(effect1);

var copyPass2 = new THREE.ShaderPass(THREE.CopyShader);
copyPass2.renderToScreen = true;
fcomposer2.addPass(copyPass2);

animate();
}

// animation loop
function animate() {
// loop on request animation loop
// - it has to be at the begining of the function
requestAnimationFrame(animate);

// do the render
render();
stats.update();
if ((frame % 50) == 0) {
console.log("frame ", frame, " ");

console.log("yMaxL: ", yMaxL, " ");
console.log("yMinL: ", yMinL, " ");
console.log("xMaxL: ", xMaxL, " ");
console.log("xMinL: ", xMinL, " ");

console.log("yMaxR: ", yMaxR, " ");
console.log("yMinR: ", yMinR, " ");
console.log("xMaxR: ", xMaxR, " ");
console.log("xMinR: ", xMinR, " ");

manipulatePixels();
}
frame = frame + 1;
yMaxL = 0, yMinL = 0, yMaxR = 0, yMinR = 0;
xMaxL = 0, xMinL = 0, xMaxR = 0, xMinR = 0;

}

// render the scene
function render() {
if (video.readyState === video.HAVE_ENOUGH_DATA) {
videoTexture.needsUpdate = true;
}

// actually render the scene
renderer.clear();
composer.render();

var left_x = new Array();
var left_y = new Array();
var l = 0;

var right_x = new Array();
var right_y = new Array();
var r = 0;

if (frame == 200) {
var imagedata2 = getImageData(renderer.domElement);

var middle = imagedata2.width / 2;

for (var x=x_min; x < x_max; x=x+1) {
for (var y=y_min; y < y_max; y=y+1) {
var pixel = getPixel(imagedata2, x, y);
if (pixel.g > 0)
{
//console.log(pixel);
if (x < middle) {
left_x[l] = x;
left_y[l] = y;
l++;
}
else {
right_x[r] = x;
right_y[r] = y;
r++;
}
}
}
}

lineEquation(left_x, left_y, right_x, right_y);


}

fcomposer2.render();
}

function lineEquation(left_x,left_y,right_x,right_y) {
var newYMAX = left_y[0];
var newYMIN = left_y[0];

var maximosL = new Array();
var minimosL = new Array();

//left
for (var i=1; i < left_y.length; i++) {
if (left_y[i]>newYMAX) newYMAX = left_y[i];
else {
if (left_y[i]<newYMIN) newYMIN = left_y[i];
}
}

yMaxL = newYMAX;
yMinL = newYMIN;
// yMaxL = ymaxL/videoHeight;
// yMinL = yminL/videoHeight;


var pmin=0, pmax=0;
for (var i=0; i < left_y.length; i++) {
if (left_y[i] === newYMAX) {
// console.log(left_y[i]);
// console.log(left_x[i]);
maximosL[pmax] = left_x[i];
pmax++;
}

}
for (var j=0; j < left_y.length; j++) {
if (left_y[j] === newYMIN) {
// console.log(left_y[j]);
// console.log(left_x[j]);
minimosL[pmin] = left_x[j];
pmin++;
}
}

// console.log(maximosL);
// console.log(minimosL);

var sumMAX = 0, sumMIN = 0;
for (var i=0; i< maximosL.length; i++) {
sumMAX = sumMAX + maximosL[i];
}

for (var j=0; j< minimosL.length; j++) {
sumMIN = sumMIN + minimosL[j];
}

xMaxL = sumMAX/maximosL.length;
xMinL = sumMIN/minimosL.length;

// xMaxL /= videoWidth;
// xMinL /= videoWidth;

//right
var maximosR = new Array();
var minimosR = new Array();

newYMAX = right_y[0];
newYMIN = right_y[0];

pmin=0; pmax=0;
for (var i=0; i < right_y.length; i++) {
if (right_y[i]> newYMAX) newYMAX = right_y[i];
else {
if (right_y[i]< newYMIN) newYMIN = right_y[i];
}
}

yMaxR = newYMAX;
yMinR = newYMIN;
// yMaxR = ymaxR/videoHeight;
// yMinR = yminR/videoHeight;


for (var i=0; i < right_y.length; i++) {
if (right_y[i] === newYMAX)
{maximosR[pmax] = right_x[i]; pmax++;}
if (right_y[i] === newYMIN)
{minimosR[pmin] = right_x[i]; pmin++;}
}

// console.log(maximosR);
// console.log(minimosR);

xMaxR=0;
for (var i=0; i< maximosR.length; i++) {
xMaxR += maximosR[i];
}
xMinR=0;
for (var i=0; i< minimosR.length; i++) {
xMinR += minimosR[i];
}

// console.log(xMaxR);
// console.log(xMinR);

xMaxR /= maximosR.length;
xMinR /= minimosR.length;

// console.log(xMaxR);
// console.log(xMinR);

// xMinR /= videoWidth;
// xMaxR /= videoWidth;



}

function manipulatePixels() {
// imagem antes
var imagedata = getImageData(videoTexture.image);
// imagem processada
var imagedata2 = getImageData(renderer.domElement);

// console.log(getPixel(imagedata, 480 - 1, 270 - 1));
// console.log(getPixel(imagedata2, 480 - 1, 270 - 1));

}

function getImageData(image) {
var canvas = document.createElement('canvas');
canvas.width = image.width;
canvas.height = image.height;

var context = canvas.getContext('2d');
context.drawImage(image, 0, 0);

return context.getImageData(0, 0, image.width, image.height);
}

function getPixel(imagedata, x, y) {
var position = (x + imagedata.width * y) * 4, data = imagedata.data;
return {r: data[ position ], g: data[ position + 1 ], b: data[ position + 2 ], a: data[ position + 3 ]};
}

function findLineByLeastSquares(values_x, values_y) {
var sum_x = 0;
var sum_y = 0;
var sum_xy = 0;
var sum_xx = 0;

/*
* We'll use those variables for faster read/write access.
*/
var x = 0;
var y = 0;
var values_length = values_x.length;

if (values_length != values_y.length) {
throw new Error('The parameters values_x and values_y need to have same size!');
}

/*
* Nothing to do.
*/
if (values_length === 0) {
return [ [], [] ];
}

/*
* Calculate the sum for each of the parts necessary.
*/
for (var v = 0; v < values_length; v++) {
x = values_x[v];
y = values_y[v];
sum_x += x;
sum_y += y;
sum_xx += (x*x);
sum_xy += (x*y);
}

console.log (sum_x);
console.log(sum_y);
console.log(sum_xx);
console.log(sum_xy);
console.log(values_length);
/*
* Calculate m and b for the formular:
* y = x * m + b
*/
var m = (sum_x*sum_y - values_length*sum_xy) / (sum_x*sum_x - values_length*sum_xx);
var b = (sum_y - (m*sum_x))/values_length;

//console.log([m,b]);

return [m, b];
}

//resize method
/**window.addEventListener('resize', onWindowResize, false);
function onWindowResize() {
camera.aspect = window.innerWidth / window.innerHeight;
camera.updateProjectionMatrix();

renderer.setSize(window.innerWidth, window.innerHeight);
} */
</script>

Edit2:我正在尝试做的一些图片:图片 1 显示了控制台上 composer 的结果,我从 lineEquation 函数得到的限制是对于我打算做的事情来说是正确的,但是在图 2 中显示了 fcomposer2(固定区域)和控制台上的结果,限制是错误的。

![图片 1]:http://prntscr.com/1ays73![图 2]: http://prntscr.com/1ays0j

编辑3:“访问”是指能够从 binaryShader 创建的纹理中读取像素值。例如,在 image1 中,线条以蓝色/绿色色调绘制,我想搜索 renderTarget 将保存的图像中像素 (x,y) 的位置。如果我能找到这些像素,我就可以调整 image2 中的绿色区域以适应道路线。

此处理需要使绿色区域与用户当前所在的当前行驶车道重叠,如果我无法获得这些点,则无法识别车道。

最佳答案

我让它工作了。显然我忘了在脚本的开头声明 fcomposer2。感谢您的回复/评论,对于给您带来的不便,我们深表歉意。

关于javascript - 如何从 renderTarget 访问 imageData?,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/17217936/

30 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com