gpt4 book ai didi

c# - Emgu CV SURF 获取匹配点坐标

转载 作者:太空狗 更新时间:2023-10-30 00:49:46 24 4
gpt4 key购买 nike

我正在使用 Emgu CV 的 SURF 功能来识别图像中的相似对象。

图像画对了,显示了所有找到的关键点,在两个图像中,相似点(这就是我想要的)和一个矩形(通常是矩形,有时只是一条线)覆盖相似点。

问题是图像中看到了相似点,但是它们没有以我想要的格式保存,实际上,它们被保存在一个VectorOfKeyPoint object,它只是存储了一个指针,其他的内存数据,其中的点存储在内存中(我是这么认为的)。意思是,我无法像这样成对地得到相似点:

((img1X, img1Y), (img2X, img2Y))

这就是我要找的东西,这样我以后就可以使用积分了。现在,我只能看到结果图像中的点,但看不到他们成对。

我使用的代码是来自 Emgu CV 的示例。

//----------------------------------------------------------------------------
// Copyright (C) 2004-2016 by EMGU Corporation. All rights reserved.
//----------------------------------------------------------------------------
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Drawing;
using System.Runtime.InteropServices;
using Emgu.CV;
using Emgu.CV.CvEnum;
using Emgu.CV.Features2D;
using Emgu.CV.Structure;
using Emgu.CV.Util;
#if !__IOS__
using Emgu.CV.Cuda;
#endif
using Emgu.CV.XFeatures2D;

namespace FirstEmgu
{

public static class DrawMatches
{
// --------------------------------
// ORIGINAL FUNCTION FROM EXAMPLE
// --------------------------------
private static void FindMatch(Mat modelImage, Mat observedImage, out long matchTime, out VectorOfKeyPoint modelKeyPoints, out VectorOfKeyPoint observedKeyPoints, VectorOfVectorOfDMatch matches, out Mat mask, out Mat homography)
{
int k = 2;
double uniquenessThreshold = 0.8;
double hessianThresh = 300;

Stopwatch watch;
homography = null;

modelKeyPoints = new VectorOfKeyPoint();
observedKeyPoints = new VectorOfKeyPoint();

#if !__IOS__
if (CudaInvoke.HasCuda)
{
CudaSURF surfCuda = new CudaSURF((float)hessianThresh);
using (GpuMat gpuModelImage = new GpuMat(modelImage))
//extract features from the object image
using (GpuMat gpuModelKeyPoints = surfCuda.DetectKeyPointsRaw(gpuModelImage, null))
using (GpuMat gpuModelDescriptors = surfCuda.ComputeDescriptorsRaw(gpuModelImage, null, gpuModelKeyPoints))
using (CudaBFMatcher matcher = new CudaBFMatcher(DistanceType.L2))
{
surfCuda.DownloadKeypoints(gpuModelKeyPoints, modelKeyPoints);
watch = Stopwatch.StartNew();

// extract features from the observed image
using (GpuMat gpuObservedImage = new GpuMat(observedImage))
using (GpuMat gpuObservedKeyPoints = surfCuda.DetectKeyPointsRaw(gpuObservedImage, null))
using (GpuMat gpuObservedDescriptors = surfCuda.ComputeDescriptorsRaw(gpuObservedImage, null, gpuObservedKeyPoints))
//using (GpuMat tmp = new GpuMat())
//using (Stream stream = new Stream())
{
matcher.KnnMatch(gpuObservedDescriptors, gpuModelDescriptors, matches, k);

surfCuda.DownloadKeypoints(gpuObservedKeyPoints, observedKeyPoints);

mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
mask.SetTo(new MCvScalar(255));
Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);

int nonZeroCount = CvInvoke.CountNonZero(mask);
if (nonZeroCount >= 4)
{
nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints,
matches, mask, 1.5, 20);
if (nonZeroCount >= 4)
homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints,
observedKeyPoints, matches, mask, 2);
}
}
watch.Stop();
}
}
else
#endif
{
using (UMat uModelImage = modelImage.ToUMat(AccessType.Read))
using (UMat uObservedImage = observedImage.ToUMat(AccessType.Read))
{
SURF surfCPU = new SURF(hessianThresh);
//extract features from the object image
UMat modelDescriptors = new UMat();
surfCPU.DetectAndCompute(uModelImage, null, modelKeyPoints, modelDescriptors, false);

watch = Stopwatch.StartNew();

// extract features from the observed image
UMat observedDescriptors = new UMat();
surfCPU.DetectAndCompute(uObservedImage, null, observedKeyPoints, observedDescriptors, false);
BFMatcher matcher = new BFMatcher(DistanceType.L2);
matcher.Add(modelDescriptors);

matcher.KnnMatch(observedDescriptors, matches, k, null);
mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
mask.SetTo(new MCvScalar(255));
Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);

int nonZeroCount = CvInvoke.CountNonZero(mask);
if (nonZeroCount >= 4)
{
nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints,
matches, mask, 1.5, 20);
if (nonZeroCount >= 4)
homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints,
observedKeyPoints, matches, mask, 2);
}

watch.Stop();
}
}
matchTime = watch.ElapsedMilliseconds;
}
// --------------------------------
// ORIGINAL FUNCTION FROM EXAMPLE
// --------------------------------
/// <summary>
/// Draw the model image and observed image, the matched features and homography projection.
/// </summary>
/// <param name="modelImage">The model image</param>
/// <param name="observedImage">The observed image</param>
/// <param name="matchTime">The output total time for computing the homography matrix.</param>
/// <returns>The model image and observed image, the matched features and homography projection.</returns>
public static Mat Draw(Mat modelImage, Mat observedImage, out long matchTime)
{
Mat homography;
VectorOfKeyPoint modelKeyPoints;
VectorOfKeyPoint observedKeyPoints;
using (VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch())
{
Mat mask;
FindMatch(modelImage, observedImage, out matchTime, out modelKeyPoints, out observedKeyPoints, matches,
out mask, out homography);

//Draw the matched keypoints
Mat result = new Mat();
Features2DToolbox.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints,
matches, result, new MCvScalar(255, 255, 255), new MCvScalar(255, 255, 255), mask);

#region draw the projected region on the image

if (homography != null)
{
//draw a rectangle along the projected model
Rectangle rect = new Rectangle(Point.Empty, modelImage.Size);
PointF[] pts = new PointF[]
{
new PointF(rect.Left, rect.Bottom),
new PointF(rect.Right, rect.Bottom),
new PointF(rect.Right, rect.Top),
new PointF(rect.Left, rect.Top)
};
pts = CvInvoke.PerspectiveTransform(pts, homography);

Point[] points = Array.ConvertAll<PointF, Point>(pts, Point.Round);
using (VectorOfPoint vp = new VectorOfPoint(points))
{
CvInvoke.Polylines(result, vp, true, new MCvScalar(255, 0, 0, 255), 5);
}

}

#endregion

return result;

}
}

// ----------------------------------
// WRITTEN BY MYSELF
// ----------------------------------
// Returns 4 points (usually rectangle) of similar points
// but can't be used, since sometimes this is a line (negative
// points)
public static Point[] FindPoints(Mat modelImage, Mat observedImage, out long matchTime)
{
Mat homography;
VectorOfKeyPoint modelKeyPoints;
VectorOfKeyPoint observedKeyPoints;
using (VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch())
{
Mat mask;
FindMatch(modelImage, observedImage, out matchTime, out modelKeyPoints, out observedKeyPoints, matches,
out mask, out homography);

//Draw the matched keypoints
Mat result = new Mat();
Features2DToolbox.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints,
matches, result, new MCvScalar(255, 255, 255), new MCvScalar(255, 255, 255), mask);

Point[] points = null;
if (homography != null)
{
//draw a rectangle along the projected model
Rectangle rect = new Rectangle(Point.Empty, modelImage.Size);
PointF[] pts = new PointF[]
{
new PointF(rect.Left, rect.Bottom),
new PointF(rect.Right, rect.Bottom),
new PointF(rect.Right, rect.Top),
new PointF(rect.Left, rect.Top)
};
pts = CvInvoke.PerspectiveTransform(pts, homography);

points = Array.ConvertAll<PointF, Point>(pts, Point.Round);

}

return points;
}
}
}
}

编辑

我已经设法从这样的匹配对象中得到一些分数:

Features2DToolbox.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints,
matches, result, new MCvScalar(255, 255, 255), new MCvScalar(255, 255, 255), mask);

for (int i = 0; i < matches.Size; i++)
{
var a = matches[i].ToArray();
foreach (var e in a)
{
Point p = new Point(e.TrainIdx, e.QueryIdx);
Console.WriteLine(string.Format("Point: {0}", p));
}
Console.WriteLine("-----------------------");
}

我认为这应该让我得到分数。我设法让它在 python 中工作,代码也没有太大的不同。问题是返回的点太多了。事实上,这会返回 Y 上的所有点。

示例

(45, 1), (67, 1)

(656, 2), (77, 2)

...

它没有给我想要的分数,即使我可能很接近。任何建议表示赞赏。

编辑 2本题:Find interest point in surf Detector Algorithm与我需要的非常相似。只有一个答案,但没有说明如何获取匹配点坐标。这就是我需要的,如果两个图像中都有一个对象,则从两个图像中获取对象点的坐标。

最佳答案

坐标不是由 TrainIdx 和 QueryIdx 组成的,它们是 KeyPoints 的索引。这将给出模型和观察到的图像之间匹配的像素坐标。

for (int i = 0; i < matches.Size; i++)
{
var arrayOfMatches = matches[i].ToArray();
if (mask.GetData(i)[0] == 0) continue;
foreach (var match in arrayOfMatches)
{
var matchingModelKeyPoint = modelKeyPoints[match.TrainIdx];
var matchingObservedKeyPoint = observedKeyPoints[match.QueryIdx];
Console.WriteLine("Model coordinate '" + matchingModelKeyPoint.Point + "' matches observed coordinate '" + matchingObservedKeyPoint.Point + "'.");
}
}

arrayOfMatches中的项数等于K的值,我的理解是距离最近的匹配最好。

关于c# - Emgu CV SURF 获取匹配点坐标,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/36269038/

24 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com