gpt4 book ai didi

c# - 如何与 C# 统一进行实时光线追踪

转载 作者:太空宇宙 更新时间:2023-11-03 19:03:04 25 4
gpt4 key购买 nike

我正在统一制作视频游戏,并决定使用光线追踪。我有代码,但是你马上就会看到。它并不是逐帧渲染的。这是我的光线追踪代码,这是附加到主摄像头的主要脚本。

using UnityEngine;
using System.Collections;

public class RayTracer : MonoBehaviour
{

public Color backgroundColor = Color.black;
public float RenderResolution = 1f;
public float maxDist = 100f;
public int maxRecursion = 4;


private Light[] lights;
private Texture2D renderTexture;

void Awake()
{
renderTexture = new Texture2D((int)(Screen.width * RenderResolution), (int)(Screen.height * RenderResolution));
lights = FindObjectsOfType(typeof(Light)) as Light[];
}

void Start()
{
RayTrace();
}

void OnGUI()
{
GUI.DrawTexture(new Rect(0, 0, Screen.width, Screen.height), renderTexture);
}

void RayTrace()
{
for (int x = 0; x < renderTexture.width; x++)
{
for (int y = 0; y < renderTexture.height; y++)
{
Color color = Color.black;
Ray ray = GetComponent<Camera>().ScreenPointToRay(new Vector3(x / RenderResolution, y / RenderResolution, 0));

renderTexture.SetPixel(x, y, TraceRay(ray, color, 0));
}
}

renderTexture.Apply();
}

Color TraceRay(Ray ray, Color color, int recursiveLevel)
{

if (recursiveLevel < maxRecursion)
{
RaycastHit hit;
if (Physics.Raycast(ray, out hit, maxDist))
{
Vector3 viewVector = ray.direction;
Vector3 pos = hit.point + hit.normal * 0.0001f;
Vector3 normal = hit.normal;

RayTracerObject rto = hit.collider.gameObject.GetComponent<RayTracerObject>();
//Does the object we hit have that script?
if (rto == null)
{
var GO = hit.collider.gameObject;
Debug.Log("Raycast hit failure! On " + GO.name + " position " + GO.transform.position.ToString());
return color; //exit out
}

Material mat = hit.collider.GetComponent<Renderer>().material;
if (mat.mainTexture)
{
color += (mat.mainTexture as Texture2D).GetPixelBilinear(hit.textureCoord.x, hit.textureCoord.y);
}
else
{
color += mat.color;
}

color *= TraceLight(rto, viewVector, pos, normal);

if (rto.reflectiveCoeff > 0)
{
float reflet = 2.0f * Vector3.Dot(viewVector, normal);
Ray newRay = new Ray(pos, viewVector - reflet * normal);
color += rto.reflectiveCoeff * TraceRay(newRay, color, recursiveLevel + 1);
}

if (rto.transparentCoeff > 0)
{
Ray newRay = new Ray(hit.point - hit.normal * 0.0001f, viewVector);
color += rto.transparentCoeff * TraceRay(newRay, color, recursiveLevel + 1);
}
}
}

return color;

}

Color TraceLight(RayTracerObject rto, Vector3 viewVector, Vector3 pos, Vector3 normal)
{
Color c = RenderSettings.ambientLight;

foreach (Light light in lights)
{
if (light.enabled)
{
c += LightTrace(rto, light, viewVector, pos, normal);
}
}
return c;
}

Color LightTrace(RayTracerObject rto, Light light, Vector3 viewVector, Vector3 pos, Vector3 normal)
{


float dot, distance, contribution;
Vector3 direction;
switch (light.type)
{
case LightType.Directional:
contribution = 0;
direction = -light.transform.forward;
dot = Vector3.Dot(direction, normal);
if (dot > 0)
{
if (Physics.Raycast(pos, direction, maxDist))
{
return Color.black;
}

if (rto.lambertCoeff > 0)
{
contribution += dot * rto.lambertCoeff;
}
if (rto.reflectiveCoeff > 0)
{
if (rto.phongCoeff > 0)
{
float reflet = 2.0f * Vector3.Dot(viewVector, normal);
Vector3 phongDir = viewVector - reflet * normal;
float phongTerm = max(Vector3.Dot(phongDir, viewVector), 0.0f);
phongTerm = rto.reflectiveCoeff * Mathf.Pow(phongTerm, rto.phongPower) * rto.phongCoeff;

contribution += phongTerm;
}
if (rto.blinnPhongCoeff > 0)
{
Vector3 blinnDir = -light.transform.forward - viewVector;
float temp = Mathf.Sqrt(Vector3.Dot(blinnDir, blinnDir));
if (temp != 0.0f)
{
blinnDir = (1.0f / temp) * blinnDir;
float blinnTerm = max(Vector3.Dot(blinnDir, normal), 0.0f);
blinnTerm = rto.reflectiveCoeff * Mathf.Pow(blinnTerm, rto.blinnPhongPower) * rto.blinnPhongCoeff;

contribution += blinnTerm;
}
}
}
}
return light.color * light.intensity * contribution;
case LightType.Point:
contribution = 0;
direction = (light.transform.position - pos).normalized;
dot = Vector3.Dot(normal, direction);
distance = Vector3.Distance(pos, light.transform.position);
if ((distance < light.range) && (dot > 0))
{
if (Physics.Raycast(pos, direction, distance))
{
return Color.black;
}

if (rto.lambertCoeff > 0)
{
contribution += dot * rto.lambertCoeff;
}
if (rto.reflectiveCoeff > 0)
{
if (rto.phongCoeff > 0)
{
float reflet = 2.0f * Vector3.Dot(viewVector, normal);
Vector3 phongDir = viewVector - reflet * normal;
float phongTerm = max(Vector3.Dot(phongDir, viewVector), 0.0f);
phongTerm = rto.reflectiveCoeff * Mathf.Pow(phongTerm, rto.phongPower) * rto.phongCoeff;

contribution += phongTerm;
}
if (rto.blinnPhongCoeff > 0)
{
Vector3 blinnDir = -light.transform.forward - viewVector;
float temp = Mathf.Sqrt(Vector3.Dot(blinnDir, blinnDir));
if (temp != 0.0f)
{
blinnDir = (1.0f / temp) * blinnDir;
float blinnTerm = max(Vector3.Dot(blinnDir, normal), 0.0f);
blinnTerm = rto.reflectiveCoeff * Mathf.Pow(blinnTerm, rto.blinnPhongPower) * rto.blinnPhongCoeff;

contribution += blinnTerm;
}
}
}
}
if (contribution == 0)
{
return Color.black;
}
return light.color * light.intensity * contribution;
case LightType.Spot:
contribution = 0;
direction = (light.transform.position - pos).normalized;
dot = Vector3.Dot(normal, direction);
distance = Vector3.Distance(pos, light.transform.position);
if (distance < light.range && dot > 0)
{
float dot2 = Vector3.Dot(-light.transform.forward, direction);
if (dot2 > (1 - light.spotAngle / 180))
{
if (Physics.Raycast(pos, direction, distance))
{
return Color.black;
}
if (rto.lambertCoeff > 0)
{
contribution += dot * rto.lambertCoeff;
}
if (rto.reflectiveCoeff > 0)
{
if (rto.phongCoeff > 0)
{
float reflet = 2.0f * Vector3.Dot(viewVector, normal);
Vector3 phongDir = viewVector - reflet * normal;
float phongTerm = max(Vector3.Dot(phongDir, viewVector), 0.0f);
phongTerm = rto.reflectiveCoeff * Mathf.Pow(phongTerm, rto.phongPower) * rto.phongCoeff;

contribution += phongTerm;
}
if (rto.blinnPhongCoeff > 0)
{
Vector3 blinnDir = -light.transform.forward - viewVector;
float temp = Mathf.Sqrt(Vector3.Dot(blinnDir, blinnDir));
if (temp != 0.0f)
{
blinnDir = (1.0f / temp) * blinnDir;
float blinnTerm = max(Vector3.Dot(blinnDir, normal), 0.0f);
blinnTerm = rto.reflectiveCoeff * Mathf.Pow(blinnTerm, rto.blinnPhongPower) * rto.blinnPhongCoeff;

contribution += blinnTerm;
}
}
}
}
}
if (contribution == 0)
{
return Color.black;
}
return light.color * light.intensity * contribution;
}
return Color.black;
}

float max(float x0, float x1)
{
return x0 > x1 ? x0 : x1;
}
}

这是附加到场景中对象的代码

using UnityEngine;
using System.Collections;

public class RayTracerObject : MonoBehaviour
{

public float lambertCoeff = 1f;

public float reflectiveCoeff = 0f;

public float phongCoeff = 1f;
public float phongPower = 2f;

public float blinnPhongCoeff = 1f;
public float blinnPhongPower = 2f;

public float transparentCoeff = 0f;


public Color baseColor = Color.gray;

void Awake()
{
if (!GetComponent<Renderer>().material.mainTexture)
{
GetComponent<Renderer>().material.color = baseColor;
}
}
}

我该怎么做呢?代码是什么?

最佳答案

虽然主线程中的光线追踪是一个完全可以接受的设计,但它可能不是您在 Unity 中想要的,因为它会阻止其他一切。

现在您可以生成一个子线程来执行光线追踪并让主线程渲染结果。但问题是,这两种方法都没有使用 GPU,这在某种程度上违背了使用 Unity 的初衷。

How to do real time Raytracing in unity with C#

这完全取决于您的场景包含什么以及您打算如何渲染它。可以说,您可以在低分辨率下实时渲染一些简单的东西,但是以合理的屏幕分辨率和合理的光线反弹水平进行渲染,即使用反射或透射 Material 转换的递归光线的数量可能会困难得多。

相反,我会敦促您关注光线追踪的变化趋势,其中实时光线追踪现在正在 GPU 上使用称为通用 GPU的技术执行/em> 或 GPGPU。 nVidia 有一些关于这个主题的演讲,可以在 YouTube 上找到。 Here is my sample Unity GPGPU galaxy simulation作为 GPGPU 的背景可能很有用。

示例 GPGPU 内核只是为了向您展示 GPGPU 是什么:

// File: Galaxy1Compute.compute

// Each #kernel tells which function to compile; you can have many kernels
#pragma kernel UpdateStars

#include "Galaxy.cginc"

// blackmagic
#define BLOCKSIZE 128

RWStructuredBuffer<Star> stars;

Texture2D HueTexture;

// refer to http://forum.unity3d.com/threads/163591-Compute-Shader-SamplerState-confusion
SamplerState samplerHueTexture;

// time ellapsed since last frame
float deltaTime;

const float Softening=3e4f;
#define Softening2 Softening * Softening

static float G = 6.67300e-11f;
static float DefaultMass = 1000000.0f;

// Do a pre-calculation assuming all the stars have the same mass
static float GMM = G*DefaultMass*DefaultMass;


[numthreads(BLOCKSIZE,1,1)]
void UpdateStars (uint3 id : SV_DispatchThreadID)
{
uint i = id.x;
uint numStars, stride;
stars.GetDimensions(numStars, stride);

float3 position = stars[i].position;
float3 velocity = stars[i].velocity;

float3 A=float3(0,0,0);

[loop]
for (uint j = 0; j < numStars; j++)
{
if (i != j)
{
float3 D = stars[j].position - stars[i].position;
float r = length(D);
float f = GMM / (r * r + Softening2);
A += f * normalize(D);
}
}

velocity += A * deltaTime;
position += velocity * deltaTime;

if (i < numStars)
{
stars[i].velocity = velocity;
stars[i].position = position;
stars[i].accelMagnitude = length(A);
}


}

此外,还有一些关于该主题的好书。 Real-time Volume Graphics ,虽然它涵盖了体积,但它确实涵盖了转换光线——光线追踪的本质。最难的范式转换是为 GPGPU 编写,一旦您理解了它,从 GPGPU 体积着色器编写 GPGPU 光线追踪器是一个简单的步骤。

enter image description here

Matt Pharr 的 Physically Based Rendering 是任何光线追踪作者的绝妙著作。书(有第二版,但我没读过)

enter image description here

更多

关于c# - 如何与 C# 统一进行实时光线追踪,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/32814277/

25 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com