gpt4 book ai didi

2d - 如何在 webgpu 中创建 2d 剪贴蒙版?

转载 作者:行者123 更新时间:2023-12-03 07:56:16 26 4
gpt4 key购买 nike

我一直在研究一种 webgpu 方法来创建剪贴蒙版。

这是我尝试过的:

const pipeline1 = device.createRenderPipeline({
vertex: {
module: basicShaderModule,
entryPoint: 'vertex_main',
buffers: [{
attributes: [{
shaderLocation: 0,
offset: 0,
format: 'float32x2'
}],
arrayStride: 8,
stepMode: 'vertex'
}],
},
fragment: {
module: basicShaderModule,
entryPoint: 'fragment_main',
targets: [{ format }]
},
primitive: {
topology: 'triangle-strip',
},
layout: 'auto',
})
passEncoder.setPipeline(pipeline1);
const uniformValues1 = new Float32Array(4)
uniformValues1.set([1, 0, 0, 1], 0)
const uniformBuffer1 = device.createBuffer({
size: uniformValues1.byteLength,
usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
});
device.queue.writeBuffer(uniformBuffer1, 0, uniformValues1)
passEncoder.setBindGroup(0, device.createBindGroup({
layout: pipeline1.getBindGroupLayout(0),
entries: [
{
binding: 0, resource: {
buffer: uniformBuffer1
}
},
],
}));
const vertices1 = new Float32Array([-1, -1, 1, -1, 1, 1])
const verticesBuffer1 = device.createBuffer({
size: vertices1.byteLength,
usage: GPUBufferUsage.VERTEX | GPUBufferUsage.COPY_DST,
})
device.queue.writeBuffer(verticesBuffer1, 0, vertices1, 0, vertices1.length)
passEncoder.setVertexBuffer(0, verticesBuffer1);
passEncoder.draw(3);

const pipeline2 = device.createRenderPipeline({
vertex: {
module: basicShaderModule,
entryPoint: 'vertex_main',
buffers: [{
attributes: [{
shaderLocation: 0,
offset: 0,
format: 'float32x2'
}],
arrayStride: 8,
stepMode: 'vertex'
}],
},
fragment: {
module: basicShaderModule,
entryPoint: 'fragment_main',
targets: [{ format }]
},
primitive: {
topology: 'line-strip',
},
layout: 'auto',
})
passEncoder.setPipeline(pipeline2);
const uniformValues2 = new Float32Array(4)
uniformValues2.set([0, 1, 0, 1], 0)
const uniformBuffer2 = device.createBuffer({
size: uniformValues2.byteLength,
usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
});
device.queue.writeBuffer(uniformBuffer2, 0, uniformValues2)
passEncoder.setBindGroup(0, device.createBindGroup({
layout: pipeline2.getBindGroupLayout(0),
entries: [
{
binding: 0, resource: {
buffer: uniformBuffer2
}
},
],
}));
const vertices2 = new Float32Array([0, -1, 1, -1, -1, 1, 0, -1])
const verticesBuffer2 = device.createBuffer({
size: vertices2.byteLength,
usage: GPUBufferUsage.VERTEX | GPUBufferUsage.COPY_DST,
})
device.queue.writeBuffer(verticesBuffer2, 0, vertices2, 0, vertices2.length)
passEncoder.setVertexBuffer(0, verticesBuffer2);
passEncoder.draw(4);

passEncoder.end();
device.queue.submit([commandEncoder.finish()]);

上面的代码绘制了一个路径和一个内容,我期望内容被路径剪切。

这是当前结果:

enter image description here

这是预期结果:

enter image description here

我忽略了一些常见代码,因为 stackoverflow 提示“看起来您的帖子主要是代码;请添加更多详细信息。”

最佳答案

剪辑的方式有无限种。一些我的头顶上的东西

  • 通过顶点数学交集进行剪辑
  • 具有深度纹理的剪辑
  • 带有模板纹理的剪辑
  • 带有 Alpha 蒙版的剪辑
  • 包含区域交叉点的剪辑(例如 SDF 或 CSG)

Via alpha 蒙版的优点是您的蒙版可以混合。

无论如何,通过模板纹理剪辑意味着制作一个模板纹理,将 mask 渲染到它,然后渲染设置为仅在 mask 所在的位置绘制的其他内容。

特别是,设置掩码的管道将设置为类似的内容

  const maskMakingPipeline = device.createRenderPipeline({
...
fragment: {
module,
entryPoint: 'fs',
targets: [],
},
// replace the stencil value when we draw
depthStencil: {
format: 'stencil8',
depthCompare: 'always',
depthWriteEnabled: false,
stencilFront: {
passOp:'replace',
},
},
});

片段中没有目标,因为我们只是绘制到模板纹理。我们已经设置了当正面三角形绘制到此纹理并通过深度测试(设置为“始终”通过)时,然后用模板引用值“替换”模板(我们稍后设置)

绘制第二个三角形(被 mask 的三角形)的管道如下所示

  const maskedPipeline = device.createRenderPipeline({
...
fragment: {
module,
entryPoint: 'fs',
targets: [{format: presentationFormat}],
},
// draw only where stencil value matches
depthStencil: {
depthCompare: 'always',
depthWriteEnabled: false,
format: 'stencil8',
stencilFront: {
compare: 'equal',
},
},
});

现在设置了fragment.targets,因为我们想要渲染颜色。 depthStencil 已设置,因此只有当模板“等于”模板引用值时,才会绘制正面三角形中的像素。

在绘制时,我们首先将蒙版渲染到模板纹理

  {
const pass = encoder.beginRenderPass({
colorAttachments: [],
depthStencilAttachment: {
view: stencilTexture.createView(),
stencilClearValue: 0,
stencilLoadOp: 'clear',
stencilStoreOp: 'store',
}
});
// draw the mask
pass.setPipeline(maskMakingPipeline);
pass.setVertexBuffer(0, maskVertexBuffer);
pass.setStencilReference(1);
pass.draw(3);
pass.end();
}

模板设置为清除为 0,模板引用设置为 1,因此当此 channel 完成后,我们将在 1 秒内允许渲染

然后我们渲染第二个被 mask 的三角形

  {
const pass = encoder.beginRenderPass({
colorAttachments: [{
view: context.getCurrentTexture().createView(),
clearValue: [0, 0, 0, 1],
loadOp: 'clear',
storeOp: 'store',
}],
depthStencilAttachment: {
view: stencilTexture.createView(),
stencilLoadOp: 'load',
stencilStoreOp: 'store',
}
});
// draw only the mask is
pass.setPipeline(maskedPipeline);
pass.setStencilReference(1);
pass.setVertexBuffer(0, toBeMaskedVertexBuffer);
pass.draw(3);

pass.end();
}

在这里,我们在渲染之前“加载”模板纹理,并将模板引用设置为 1,这样我们就只在模板纹理中有 1 的地方绘制。

const code = `
struct VSIn {
@location(0) pos: vec4f,
};

struct VSOut {
@builtin(position) pos: vec4f,
};

@vertex fn vs(vsIn: VSIn) -> VSOut {
var vsOut: VSOut;
vsOut.pos = vsIn.pos;
return vsOut;
}

@fragment fn fs(vin: VSOut) -> @location(0) vec4f {
return vec4f(1, 0, 0, 1);
}
`;

(async() => {
const adapter = await navigator.gpu?.requestAdapter();
const device = await adapter?.requestDevice();
if (!device) {
alert('need webgpu');
return;
}

const canvas = document.querySelector("canvas")
const context = canvas.getContext('webgpu');
const presentationFormat = navigator.gpu.getPreferredCanvasFormat();
context.configure({
device,
format: presentationFormat,
alphaMode: 'opaque',
});

const module = device.createShaderModule({code});
const maskMakingPipeline = device.createRenderPipeline({
label: 'pipeline for rendering the mask',
layout: 'auto',
vertex: {
module,
entryPoint: 'vs',
buffers: [
// position
{
arrayStride: 2 * 4, // 2 floats, 4 bytes each
attributes: [
{shaderLocation: 0, offset: 0, format: 'float32x2'},
],
},
],
},
fragment: {
module,
entryPoint: 'fs',
targets: [],
},
// replace the stencil value when we draw
depthStencil: {
format: 'stencil8',
depthCompare: 'always',
depthWriteEnabled: false,
stencilFront: {
passOp:'replace',
},
},
});

const maskedPipeline = device.createRenderPipeline({
label: 'pipeline for rendering only where the mask is',
layout: 'auto',
vertex: {
module,
entryPoint: 'vs',
buffers: [
// position
{
arrayStride: 2 * 4, // 2 floats, 4 bytes each
attributes: [
{shaderLocation: 0, offset: 0, format: 'float32x2'},
],
},
],
},
fragment: {
module,
entryPoint: 'fs',
targets: [{format: presentationFormat}],
},
// draw only where stencil value matches
depthStencil: {
depthCompare: 'always',
depthWriteEnabled: false,
format: 'stencil8',
stencilFront: {
compare: 'equal',
},
},
});

const maskVerts = new Float32Array([-1, -1, 1, -1, 1, 1]);
const toBeMaskedVerts = new Float32Array([0, -1, 1, -1, -1, 1]);

const maskVertexBuffer = device.createBuffer({
size: maskVerts.byteLength,
usage: GPUBufferUsage.VERTEX | GPUBufferUsage.COPY_DST,
});
device.queue.writeBuffer(maskVertexBuffer, 0, maskVerts);
const toBeMaskedVertexBuffer = device.createBuffer({
size: toBeMaskedVerts.byteLength,
usage: GPUBufferUsage.VERTEX | GPUBufferUsage.COPY_DST,
});
device.queue.writeBuffer(toBeMaskedVertexBuffer, 0, toBeMaskedVerts);

const stencilTexture = device.createTexture({
format: 'stencil8',
size: [canvas.width, canvas.height],
usage: GPUTextureUsage.RENDER_ATTACHMENT,
});

const encoder = device.createCommandEncoder();
{
const pass = encoder.beginRenderPass({
colorAttachments: [],
depthStencilAttachment: {
view: stencilTexture.createView(),
stencilClearValue: 0,
stencilLoadOp: 'clear',
stencilStoreOp: 'store',
}
});
// draw the mask
pass.setPipeline(maskMakingPipeline);
pass.setVertexBuffer(0, maskVertexBuffer);
pass.setStencilReference(1);
pass.draw(3);
pass.end();
}
{
const pass = encoder.beginRenderPass({
colorAttachments: [{
view: context.getCurrentTexture().createView(),
clearValue: [0, 0, 0, 1],
loadOp: 'clear',
storeOp: 'store',
}],
depthStencilAttachment: {
view: stencilTexture.createView(),
stencilLoadOp: 'load',
stencilStoreOp: 'store',
}
});
// draw only the mask is
pass.setPipeline(maskedPipeline);
pass.setStencilReference(1);
pass.setVertexBuffer(0, toBeMaskedVertexBuffer);
pass.draw(3);

pass.end();
}

device.queue.submit([encoder.finish()]);


})();
<canvas></canvas>

就像我们将模板比较设置为'equal'一样。我们还可以使用深度比较和深度纹理进行 mask 。

步骤:

  1. 将深度纹理清除为 1.0。

  2. 将 mask 绘制为深度纹理,并将其 Z 值设置为某个值,例如 0.0(这是我们已经在做的事情)。

    这最终会在我们绘制的第一件事所在的深度纹理中出现 0,而其他地方则出现 1。

  3. 绘制我们想要 mask 的物体,深度比较设置为“等于”,其 Z 值也为 0.0(同样,我们已经在做的事情)。

    我们最终只会在深度纹理中绘制 0.0 的位置

const code = `
struct VSIn {
@location(0) pos: vec4f,
};

struct VSOut {
@builtin(position) pos: vec4f,
};

@vertex fn vs(vsIn: VSIn) -> VSOut {
var vsOut: VSOut;
vsOut.pos = vsIn.pos;
return vsOut;
}

@fragment fn fs(vin: VSOut) -> @location(0) vec4f {
return vec4f(1, 0, 0, 1);
}
`;

(async() => {
const adapter = await navigator.gpu?.requestAdapter();
const device = await adapter?.requestDevice();
if (!device) {
alert('need webgpu');
return;
}

const canvas = document.querySelector("canvas")
const context = canvas.getContext('webgpu');
const presentationFormat = navigator.gpu.getPreferredCanvasFormat();
context.configure({
device,
format: presentationFormat,
alphaMode: 'opaque',
});

const module = device.createShaderModule({code});
const maskMakingPipeline = device.createRenderPipeline({
label: 'pipeline for rendering the mask',
layout: 'auto',
vertex: {
module,
entryPoint: 'vs',
buffers: [
// position
{
arrayStride: 2 * 4, // 2 floats, 4 bytes each
attributes: [
{shaderLocation: 0, offset: 0, format: 'float32x2'},
],
},
],
},
fragment: {
module,
entryPoint: 'fs',
targets: [],
},
// replace the depth value when we draw
depthStencil: {
format: 'depth24plus',
depthCompare: 'always',
depthWriteEnabled: true,
},
});

const maskedPipeline = device.createRenderPipeline({
label: 'pipeline for rendering only where the mask is',
layout: 'auto',
vertex: {
module,
entryPoint: 'vs',
buffers: [
// position
{
arrayStride: 2 * 4, // 2 floats, 4 bytes each
attributes: [
{shaderLocation: 0, offset: 0, format: 'float32x2'},
],
},
],
},
fragment: {
module,
entryPoint: 'fs',
targets: [{format: presentationFormat}],
},
// draw only where stencil value matches
depthStencil: {
format: 'depth24plus',
depthCompare: 'equal',
depthWriteEnabled: false,
},
});

const maskVerts = new Float32Array([-1, -1, 1, -1, 1, 1]);
const toBeMaskedVerts = new Float32Array([0, -1, 1, -1, -1, 1]);

const maskVertexBuffer = device.createBuffer({
size: maskVerts.byteLength,
usage: GPUBufferUsage.VERTEX | GPUBufferUsage.COPY_DST,
});
device.queue.writeBuffer(maskVertexBuffer, 0, maskVerts);
const toBeMaskedVertexBuffer = device.createBuffer({
size: toBeMaskedVerts.byteLength,
usage: GPUBufferUsage.VERTEX | GPUBufferUsage.COPY_DST,
});
device.queue.writeBuffer(toBeMaskedVertexBuffer, 0, toBeMaskedVerts);

const depthTexture = device.createTexture({
format: 'depth24plus',
size: [canvas.width, canvas.height],
usage: GPUTextureUsage.RENDER_ATTACHMENT,
});

const encoder = device.createCommandEncoder();
{
const pass = encoder.beginRenderPass({
colorAttachments: [],
depthStencilAttachment: {
view: depthTexture.createView(),
depthClearValue: 1,
depthLoadOp: 'clear',
depthStoreOp: 'store',
}
});
// draw the mask
pass.setPipeline(maskMakingPipeline);
pass.setVertexBuffer(0, maskVertexBuffer);
pass.draw(3);
pass.end();
}
{
const pass = encoder.beginRenderPass({
colorAttachments: [{
view: context.getCurrentTexture().createView(),
clearValue: [0, 0, 0, 1],
loadOp: 'clear',
storeOp: 'store',
}],
depthStencilAttachment: {
view: depthTexture.createView(),
depthLoadOp: 'load',
depthStoreOp: 'store',
}
});
// draw only the mask is
pass.setPipeline(maskedPipeline);
pass.setVertexBuffer(0, toBeMaskedVertexBuffer);
pass.draw(3);

pass.end();
}

device.queue.submit([encoder.finish()]);


})();
<canvas></canvas>

关于2d - 如何在 webgpu 中创建 2d 剪贴蒙版?,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/75986461/

26 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com