gpt4 book ai didi

c# - EMGU CV SURF图像匹配

转载 作者:太空狗 更新时间:2023-10-29 17:58:31 25 4
gpt4 key购买 nike

我一直在使用 EMGU CV 库中的 SURF 特征检测示例。

到目前为止,它的效果非常好;我可以检测到 2 个给定图像之间的匹配对象,但我遇到了关于图像何时不匹配的问题。

我一直在寻求论坛的支持,但他们与我所在的位置相去甚远。谁知道哪些参数决定图像是否匹配。当我测试 2 张不匹配的图像时,代码仍然像存在匹配一样继续进行,即使没有匹配,也会在图像的随机位置绘制一条模糊的粗红线。

如果没有匹配项,我希望中断代码,不再继续。

附录:

      static void Run()
{
Image<Gray, Byte> modelImage = new Image<Gray, byte>("HatersGonnaHate.png");
Image<Gray, Byte> observedImage = new Image<Gray, byte>("box_in_scene.png");
Stopwatch watch;
HomographyMatrix homography = null;

SURFDetector surfCPU = new SURFDetector(500, false);

VectorOfKeyPoint modelKeyPoints;
VectorOfKeyPoint observedKeyPoints;
Matrix<int> indices;
Matrix<float> dist;
Matrix<byte> mask;

if (GpuInvoke.HasCuda)
{
GpuSURFDetector surfGPU = new GpuSURFDetector(surfCPU.SURFParams, 0.01f);
using (GpuImage<Gray, Byte> gpuModelImage = new GpuImage<Gray, byte>(modelImage))
//extract features from the object image
using (GpuMat<float> gpuModelKeyPoints = surfGPU.DetectKeyPointsRaw(gpuModelImage, null))
using (GpuMat<float> gpuModelDescriptors = surfGPU.ComputeDescriptorsRaw(gpuModelImage, null, gpuModelKeyPoints))
using (GpuBruteForceMatcher matcher = new GpuBruteForceMatcher(GpuBruteForceMatcher.DistanceType.L2))
{
modelKeyPoints = new VectorOfKeyPoint();
surfGPU.DownloadKeypoints(gpuModelKeyPoints, modelKeyPoints);
watch = Stopwatch.StartNew();

// extract features from the observed image
using (GpuImage<Gray, Byte> gpuObservedImage = new GpuImage<Gray, byte>(observedImage))
using (GpuMat<float> gpuObservedKeyPoints = surfGPU.DetectKeyPointsRaw(gpuObservedImage, null))
using (GpuMat<float> gpuObservedDescriptors = surfGPU.ComputeDescriptorsRaw(gpuObservedImage, null, gpuObservedKeyPoints))
using (GpuMat<int> gpuMatchIndices = new GpuMat<int>(gpuObservedDescriptors.Size.Height, 2, 1))
using (GpuMat<float> gpuMatchDist = new GpuMat<float>(gpuMatchIndices.Size, 1))
{
observedKeyPoints = new VectorOfKeyPoint();
surfGPU.DownloadKeypoints(gpuObservedKeyPoints, observedKeyPoints);

matcher.KnnMatch(gpuObservedDescriptors, gpuModelDescriptors, gpuMatchIndices, gpuMatchDist, 2, null);

indices = new Matrix<int>(gpuMatchIndices.Size);
dist = new Matrix<float>(indices.Size);
gpuMatchIndices.Download(indices);
gpuMatchDist.Download(dist);

mask = new Matrix<byte>(dist.Rows, 1);

mask.SetValue(255);

Features2DTracker.VoteForUniqueness(dist, 0.8, mask);

int nonZeroCount = CvInvoke.cvCountNonZero(mask);
if (nonZeroCount >= 4)
{
nonZeroCount = Features2DTracker.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
if (nonZeroCount >= 4)
homography = Features2DTracker.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 3);
}

watch.Stop();
}
}
}
else
{
//extract features from the object image
modelKeyPoints = surfCPU.DetectKeyPointsRaw(modelImage, null);
//MKeyPoint[] kpts = modelKeyPoints.ToArray();
Matrix<float> modelDescriptors = surfCPU.ComputeDescriptorsRaw(modelImage, null, modelKeyPoints);

watch = Stopwatch.StartNew();

// extract features from the observed image
observedKeyPoints = surfCPU.DetectKeyPointsRaw(observedImage, null);
Matrix<float> observedDescriptors = surfCPU.ComputeDescriptorsRaw(observedImage, null, observedKeyPoints);

BruteForceMatcher matcher = new BruteForceMatcher(BruteForceMatcher.DistanceType.L2F32);
matcher.Add(modelDescriptors);
int k = 2;
indices = new Matrix<int>(observedDescriptors.Rows, k);
dist = new Matrix<float>(observedDescriptors.Rows, k);
matcher.KnnMatch(observedDescriptors, indices, dist, k, null);

mask = new Matrix<byte>(dist.Rows, 1);

mask.SetValue(255);

Features2DTracker.VoteForUniqueness(dist, 0.8, mask);

int nonZeroCount = CvInvoke.cvCountNonZero(mask);
if (nonZeroCount >= 4)
{
nonZeroCount = Features2DTracker.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
if (nonZeroCount >= 4)
homography = Features2DTracker.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 3);
}

watch.Stop();
}

//Draw the matched keypoints
Image<Bgr, Byte> result = Features2DTracker.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints,
indices, new Bgr(255, 255, 255), new Bgr(255, 255, 255), mask, Features2DTracker.KeypointDrawType.NOT_DRAW_SINGLE_POINTS);

#region draw the projected region on the image
if (homography != null)
{ //draw a rectangle along the projected model
Rectangle rect = modelImage.ROI;
PointF[] pts = new PointF[] {
new PointF(rect.Left, rect.Bottom),
new PointF(rect.Right, rect.Bottom),
new PointF(rect.Right, rect.Top),
new PointF(rect.Left, rect.Top)};
homography.ProjectPoints(pts);

result.DrawPolyline(Array.ConvertAll<PointF, Point>(pts, Point.Round), true, new Bgr(Color.Red), 5);
}
#endregion

ImageViewer.Show(result, String.Format("Matched using {0} in {1} milliseconds", GpuInvoke.HasCuda ? "GPU" : "CPU", watch.ElapsedMilliseconds));
}


}

}

`

最佳答案

我不确定是否有适合所有图像序列或所有几何变形情况的方法。

我建议你计算PSNR在两个图像之间,并研究图像序列的容差阈值。

关于c# - EMGU CV SURF图像匹配,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/9740724/

25 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com