gpt4 book ai didi

c++ - 图像稳定

转载 作者:搜寻专家 更新时间:2023-10-31 01:58:19 27 4
gpt4 key购买 nike

嘿,我正在做一个项目,通过使用光流法来稳定视频序列。到目前为止,我已经很好地完成了光流。但是我面前有 2 个分支机构可以处理它..1- 获得光流后,我找到了图像位移的平均值,然后我从第二帧的特征中减去平均值,我的问题是下一步该怎么做?

2- 或者我可以使用 openCV 函数来稳定图像,我计算了变换矩阵,然后我使用了 cvPerspectiveTransform,然后是 cvWarpPerspective,但是我得到了错误,这是“坏标志”

代码大家可以看看,我要的是怎么稳定图像呢?我想知道你们能提供什么解决方案吗?

enter code here
#include <stdio.h>
#include <stdlib.h>
//#include "/usr/include/opencv/cv.h"
#include <cv.h>
#include <cvaux.h>
#include <highgui.h>
#include <math.h>
#include <iostream>

#define PI 3.1415926535898

double rads(double degs)
{
return (PI/180 * degs);
}

CvCapture *cap;

IplImage *img;
IplImage *frame;
IplImage *frame1;
IplImage *frame3;
IplImage *frame2;
IplImage *temp_image1;
IplImage *temp_image2;
IplImage *frame1_1C;
IplImage *frame2_1C;
IplImage *eig_image;
IplImage *temp_image;
IplImage *pyramid1 = NULL;
IplImage *pyramid2 = NULL;

char * mapx;
char * mapy;

int h;
int corner_count;
CvMat* M = cvCreateMat(3,3,CV_32FC1);
CvPoint p,q,l,s;
double hypotenuse;
double angle;

int line_thickness = 1, line_valid = 1, pos = 0;
CvScalar line_color;
CvScalar target_color[4] = { // in BGR order
{{ 0, 0, 255, 0 }}, // red
{{ 0, 255, 0, 0 }}, // green
{{ 255, 0, 0, 0 }}, // blue
{{ 0, 255, 255, 0 }} // yellow
};

inline static double square(int a)
{
return a * a;
}

char* IntToChar(int num){return NULL;}

/*{
char* retstr = static_cast<char*>(calloc(12, sizeof(char)));

if (sprintf(retstr, "%i", num) > 0)
{
return retstr;
}
else
{
return NULL;
}
}*/

inline static void allocateOnDemand( IplImage **img, CvSize size, int depth, int channels )
{
if ( *img != NULL )
return;

*img = cvCreateImage( size, depth, channels );

if ( *img == NULL )
{
fprintf(stderr, "Error: Couldn't allocate image. Out of memory?\n");
exit(-1);
}
}

void clearImage (IplImage *img)
{
for (int i=0; i<img->imageSize; i++)
img->imageData[i] = (char) 0;
}

int main()
{
cap = cvCaptureFromCAM(0);
//cap = cvCaptureFromAVI("/home/saif/Desktop/NAO.. the project/jj/Test3.avi");

CvSize frame_size;

// Reading the video's frame size
frame_size.height = (int) cvGetCaptureProperty( cap, CV_CAP_PROP_FRAME_HEIGHT );
frame_size.width = (int) cvGetCaptureProperty( cap, CV_CAP_PROP_FRAME_WIDTH );
cvNamedWindow("Optical Flow", CV_WINDOW_AUTOSIZE);

while(true)
{
frame = cvQueryFrame( cap );

if (frame == NULL)
{
fprintf(stderr, "Error: Hmm. The end came sooner than we thought.\n");
return -1;
}

// Allocating another image if it is not allocated already.
allocateOnDemand( &frame1_1C, frame_size, IPL_DEPTH_8U, 1 );
cvConvertImage(frame, frame1_1C, 0);
allocateOnDemand( &frame1, frame_size, IPL_DEPTH_8U, 3 );
cvConvertImage(frame, frame1, 0);

//Get the second frame of video.
frame = cvQueryFrame( cap );

if (frame == NULL)
{
fprintf(stderr, "Error: Hmm. The end came sooner than we thought.\n");
return -1;
}

if(!frame)
{
printf("bad video \n");
exit(0);
}

allocateOnDemand( &frame2_1C, frame_size, IPL_DEPTH_8U, 1 );
cvConvertImage(frame, frame2_1C, 0);
allocateOnDemand( &frame2, frame_size, IPL_DEPTH_8U, 3 );
cvConvertImage(frame, frame2, 0);

CvSize optical_flow_window = cvSize(5,5);
eig_image = cvCreateImage( frame_size, IPL_DEPTH_32F, 1 );
temp_image = cvCreateImage( frame_size, IPL_DEPTH_32F, 1 );

CvTermCriteria optical_flow_termination_criteria = cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, .3 );

// Feature tracking
CvPoint2D32f frame1_features[4];
CvPoint2D32f frame2_features[4];

//cvCornerEigenValsAndVecs(eig_image, temp_image, 1 );
corner_count = 4;

cvGoodFeaturesToTrack(frame1_1C,eig_image , temp_image, frame1_features, &corner_count, 0.1, .01, NULL, 5, 1);
cvFindCornerSubPix( frame1_1C, frame1_features, corner_count,cvSize(5, 5) ,optical_flow_window , optical_flow_termination_criteria);

if ( corner_count <= 0 )
printf( "\nNo features detected.\n" );
else
printf( "\nNumber of features found = %d\n", corner_count );

//Locus Kande method.
char optical_flow_found_feature[20];
float optical_flow_feature_error[20];

allocateOnDemand( &pyramid1, frame_size, IPL_DEPTH_8U, 1 );
allocateOnDemand( &pyramid2, frame_size, IPL_DEPTH_8U, 1 );

cvCalcOpticalFlowPyrLK(frame1_1C, frame2_1C, pyramid1, pyramid2, frame1_features, frame2_features, corner_count, optical_flow_window, 5, optical_flow_found_feature, NULL, optical_flow_termination_criteria, NULL);

/*
double sumOfDistancesX = 0;
double sumOfDistancesY = 0;

int debug = 0;

CvFont font1, font2;
CvScalar red, green, blue;
IplImage* seg_in = NULL;
IplImage *seg_out = NULL;

allocateOnDemand( &seg_in, frame_size, IPL_DEPTH_8U, 3 );
allocateOnDemand( &seg_out, frame_size, IPL_DEPTH_8U, 3 );

clearImage(seg_in);
clearImage(seg_in);

for( int i=0; i <corner_count; i++ )
{

if ( optical_flow_found_feature[i] == 0 )
continue;
p.x = (int) frame1_features[i].x;
p.y = (int) frame1_features[i].y;
q.x = (int) frame2_features[i].x;
q.y = (int) frame2_features[i].y;
angle = atan2( (double) p.y - q.y, (double) p.x - q.x );

sumOfDistancesX += q.x - p.x;
sumOfDistancesY += q.y - p.y;

//cvRemap(frame2,frame1,averageDistanceX , averageDistanceY,CV_INTER_LINEAR | CV_WARP_FILL_OUTLIERS, cvScalarAll(0));
}
*/

/*
int averageDistanceX = sumOfDistancesX / corner_count;
int averageDistanceY = sumOfDistancesY / corner_count;
l.x = averageDistanceX - q.x;
s.y = averageDistanceY - q.y;
*/

#define cvWarpPerspectiveQMatrix cvGetPerspectiveTransform

//CvMat* N = cvCreateMat(3,3,CV_32FC1);

cvGetPerspectiveTransform(frame2_features, frame1_features, M);
cvPerspectiveTransform(frame1_features, frame2_features, M);
cvWarpPerspective( frame2_features, frame1_features, M,CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS,cvScalarAll(0) );

cvShowImage("Optical Flow", frame1);
cvWaitKey(50);
}

cvReleaseCapture(&cap);
cvReleaseMat(&M);

return 0;
}

最佳答案

您不想从第二张图像中减去平均位移,而是希望通过平均位移转换(移动)第二张图像,使其“匹配”第一张图像。您使用的“位移”取决于您的情况。

  • 如果您的相机在摇晃但静止不动,否则您希望使用两个连续帧之间的平均位移作为第二帧的变换 vector 。对于每个新帧,您计算变换后的第一帧和新帧之间的位移,并变换新帧。
  • 如果您的相机移动和摇晃(即山地车手 Helm 上安装的相机),您希望首先找到几帧帧之间的平均位移,然后通过该平均位移与它与前一帧之间的位移。

编辑对于选项 2,您基本上需要做的是计算最后几帧帧之间平均移动的平均值。你可以通过多种方式做到这一点,但我建议使用卡尔曼滤波器之类的东西。然后,对于新帧,您可以计算该帧与(更正后的)前一帧之间的移动。从运动中减去到该点的平均运动,然后将新帧移动该差值。

关于c++ - 图像稳定,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/4247700/

27 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com