gpt4 book ai didi

python - 如何拼接使用广角镜头拍摄的重叠区域较小的图像?

转载 作者:行者123 更新时间:2023-12-04 13:24:47 24 4
gpt4 key购买 nike

我有一个带有广角镜头和移动物体的固定相机的系统。当物体以恒定速度移动时,我以 10 毫米的间隔和 2064x40 像素捕获了图像。此外,我在没有恒速的情况下拍摄了 2048x40 的图像。我想拼接这些捕获的图像。
首先引用link尝试了OpenCV的拼接方法.但是,我得到了错误代码 1,并且我了解到两个图像之间没有足够的重叠区域来拼接。
在那之后,我想我可以尝试连接恒速物体的图像。我使用了下面的代码,并将 13 px 作为移位参数。
我试过的代码:

import numpy as np
import cv2
import os

from Stitching.Blending import UVSSBlendingConcate
from Stitching.DistortionCorrection import load_coefficients


def load_images_from_folder(folder):
print("\nImages are reading from folder: " + folder)
images = []
for filename in os.listdir(folder):
img = cv2.imread((folder + "/" + filename))
if img is not None:
images.append(img)
return images


def unDistortImages(images):
mtx, dist = load_coefficients('calibration_chessboard.yml')
for i in range(len(images)):
images[i] = cv2.undistort(images[i], mtx, dist, None, None)
return images


def LineTriggerConcate(dx, images, blending, IsFlip, IsUnDistorted):
print("\nImage LineTrigger Concate Start")

if IsUnDistorted:
images = unDistortImages(images)

cropped_images = []
for i in range(len(images) - 1):
if IsFlip is True:
cropped_images.append(cv2.flip(images[i][2:2 + dx, 0:2064], 0))
else:
cropped_images.append(images[i][2:2 + dx, 0:2064])

if not blending:
result = cv2.vconcat(cropped_images)
return result
else:
global blendingResult
for i in range(len(cropped_images) - 1):
if i == 0:
blendingResult = UVSSBlendingConcate(cropped_images[i], cropped_images[i + 1], dx / 2)
else:
blendingResult = UVSSBlendingConcate(blendingResult, cropped_images[i + 1], dx / 2)

print("\nImage LineTrigger Concate Finish")
return blendingResult


def concateImages(image_list):
image_h = cv2.vconcat(image_list)
return image_h


def main():
images_path = "10mm"
image_list = load_images_from_folder(images_path)

# LineTriggerConcate Parameters
shiftParameter = 13
IsBlending = False
IsFlipped = True
IsUnDistorted = False
result = LineTriggerConcate(shiftParameter, image_list, IsBlending, IsFlipped, IsUnDistorted)

cv2.imwrite(images_path + r"//" + str(shiftParameter) + r"_Shift_" + str(IsBlending) + "_Blending_Result.bmp", result)
print('Successfully saved to %s' % images_path)


if __name__ == '__main__':
main()
输出图像:
Result for 10mm dataset
a closer look at the problem
在上面的结果中,过渡不平滑,我尝试使用混合和不失真方法修复过渡,但没有成功。
另一方面,我假设物体的速度是恒定的,但不幸的是,在实际情况中并非如此。当物体有加速度时,图像的某些部分可能会被拉长或缩短。
有人可以建议任何方法或研究吗?
我也分享了 a part of 10mm intervals datasets .

最佳答案

这是一个更“形态丰富”的解决方案:
- 将图像转换为边缘
-扩张边缘(以提高梯度体面的灵活性)
- 找到每个图像到累积图像底部的最佳匹配偏移量
-stash 偏移量并附加累积图像
- 使用 rgb 图像和存储的偏移量重建完整图像

#include <stdio.h>
#include <opencv2/opencv.hpp>
#include <Windows.h>
#include <string>

using namespace cv;

double imDiff(Mat mat1, Mat mat2)
{
double sumSquares = 0;
Mat channels1[3], channels2[3];
cv::split(mat1, channels1);
cv::split(mat2, channels2);
Mat dif1 = channels1[0] - channels2[0];
Mat dif2 = channels1[1] - channels2[1];
Mat dif3 = channels1[2] - channels2[2];
dif1.mul(dif1);
dif2.mul(dif2);
dif3.mul(dif3);
sumSquares = cv::sum(dif1)[0] + cv::sum(dif2)[0] + cv::sum(dif3)[0];
return sumSquares;
}

Mat autoCanny(Mat image)
{
Mat edged;
cv::Canny(image, edged,75,125);
Mat blurred;

int dilation_size = 1;
Mat element = cv::getStructuringElement(MORPH_CROSS,
Size(2 * dilation_size + 1, 2 * dilation_size + 1),
Point(dilation_size, dilation_size));

cv::dilate(edged, blurred, element,cv::Point(-1,-1),2);
//cv::GaussianBlur(edged, blurred, cv::Size(5, 5), 0);
return blurred;
}

int main(int argc, char** argv)
{
int compareHeight = 25;
int compareWidth = 350;

std::vector<int> offsets = std::vector<int>();

Mat image;
bool firstLoop = true;
for (int i = 140; i >=53; i--) //140
{
std::string fileName = "C:/Users/thoma/Downloads/stitching-main/stitching-main/dataset_10mm/"+std::to_string(i) + ".bmp";
Mat tempImage = imread(fileName, 1);
tempImage = autoCanny(tempImage);
//std::cout << "imsize: " << tempImage.rows << std::endl;
if (firstLoop) { image = tempImage; firstLoop = false; }
else
{
double lowestNorm = -1;
int index = -1;
Mat refSlice = image(Rect(image.cols/2-compareWidth, image.rows- compareHeight, 2*compareWidth, compareHeight));
for (int ii = 0; ii < tempImage.rows- compareHeight; ii++)
{
Mat testSlice = tempImage(Rect(tempImage.cols/2-compareWidth, ii, 2*compareWidth, compareHeight));
//double tempNorm = cv::norm(refSlice, testSlice);
double tempNorm = imDiff(refSlice, testSlice);
//std::cout << "norm: " << tempNorm << std::endl;
//std::cout << "cust: " << imDiff(refSlice, testSlice) << std::endl;
if (lowestNorm == -1)
{
lowestNorm = tempNorm;
index = ii;
}
else if (tempNorm < lowestNorm)
{
lowestNorm = tempNorm;
index = ii;
}
}
index += compareHeight;
std::cout << tempImage.rows - index << std::endl;
if (tempImage.rows - index < 1) { std::cout << "skipped" << std::endl; continue; }

//index = 32;
offsets.push_back(index);

Mat crop_img = tempImage(Rect(0, index, tempImage.cols, tempImage.rows-index));
vconcat(image, crop_img, image);
}
}

namedWindow("Display Image", WINDOW_AUTOSIZE);
imshow("Display Image", image);

waitKey(0);



firstLoop = true;
int offsetIndex = 0;
for (int i = 140; i >= 53; i--) //140
{
std::string fileName = "C:/Users/thoma/Downloads/stitching-main/stitching-main/dataset_10mm/" + std::to_string(i) + ".bmp";
Mat tempImage = imread(fileName, 1);

if (firstLoop) { image = tempImage; firstLoop = false; }
else
{
Mat crop_img = tempImage(Rect(0, offsets[offsetIndex], tempImage.cols, tempImage.rows - offsets[offsetIndex]));
vconcat(image, crop_img, image);
offsetIndex++;
}
}


namedWindow("Display Image", WINDOW_AUTOSIZE);
imshow("Display Image", image);

waitKey(0);
system("pause");
return 0;
}
结果图片:
https://imgur.com/9dEXonn
笔记:
此方法使用图像的中心条纹。图像上似乎仍然存在向边缘增加的非平凡失真,因此该方法试图忽略它。这种方法容易受到缺乏水平边缘的影响(很确定大多数缝合方法都会如此)。这个策略(正如我写的那样)有很多“神奇变量”,即如果您计划部署此代码或以自动化容量运行它,您可能希望拨入和/或自动化这些旋钮。

关于python - 如何拼接使用广角镜头拍摄的重叠区域较小的图像?,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/69199909/

24 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com