gpt4 book ai didi

c++ - 使用 Eigen 密集矩阵数据结构时应该期待什么?

转载 作者:行者123 更新时间:2023-11-30 02:54:03 26 4
gpt4 key购买 nike

我需要在我的程序中使用矩阵数据结构,而 C++ 有二维数组,它的级别很低,而像 Eigen 这样的一些库提供了更高级别的矩阵数据结构。但在我看来,无论一个库在一些高技能操作上表现得多么好,比如 svd,在基本操作上的快速速度,包括 read(access)、write、sum、dot,应该是这类库的先决条件.因为在实际应用中,这些基本操作可能比那些高技能操作要频繁得多,如果一个库在这些操作上很慢,它可能会成为系统的负担甚至瓶颈。

所以我使用二维数组和 Eigen3 密集矩阵 (MatrixXd) 编写了一些非常简单的程序,并比较了它们在 4 个基本操作上的性能,结果发现大多数时候,二维数组胜过 Eigen3,这非常令人失望。下面列出我的一些测试结果(代码在最后附录):

10000X10000 matrix, compile command: g++ -o test.o test.cpp -O0 -msse2

Eigen:

[!COST] init: 6.8 sec.

[!COST] read: 14.85 sec.

[!COST] write: 23.02 sec.

[!COST] sum: 3.28 sec.

[!COST] dot: 3.12 sec.

CPP:

[!COST] init: 1.81 sec.

[!COST] read: 2.4 sec.

[!COST] write: 3.4 sec.

[!COST] sum: 0.63 sec.

[!COST] dot: 0.52 sec.

10000X10000 matrix, compile command: g++ -o test.o test.cpp -O3 -msse2

Eigen:

[!COST] init: 2.44 sec.

[!COST] read: 2.16 sec.

[!COST] write: 2.18 sec.

[!COST] sum: 0.26 sec.

[!COST] dot: 0.26 sec.

CPP:

[!COST] init: 1.71 sec.

[!COST] read: 2.06 sec.

[!COST] write: 2.24 sec.

[!COST] sum: 0.15 sec.

[!COST] dot: 0.06 sec.

但是,我对此仍有一些疑问,也许我不应该期望更高级别的矩阵结构抽象应该像其原始版本一样快,如果是这样,我应该期望使用诸如 Eigen 之类的库吗?请注意,在我的程序中,有一些像 SVD 这样的高技能操作,同时还有一些更基本的操作,例如访问矩阵和写入矩阵。

附录,test.cpp:

#include <iostream>
#include <Eigen/Dense>
#include <ctime>
using Eigen::MatrixXf;

inline int cpp_testor_read(float **m, const int M, const int N)
{
float randomTmp = 0;
for (int i = 0; i < M; i ++)
for (int j = 0; j < N; j ++)
{
randomTmp += m[i][j];
randomTmp -= m[j][i];
}
return randomTmp;
}

inline int eigen_testor_read(MatrixXf m, const int M, const int N)
{
float randomTmp = 0;
for (int i = 0; i < M; i ++)
for (int j = 0; j < N; j ++)
{
randomTmp += m(i, j);
randomTmp -= m(j, i);
}
return randomTmp;
}

inline int cpp_testor_write(float **m, const int M, const int N)
{
for (int i = 0; i < M; i ++)
for (int j = 0; j < N; j ++)
{
m[i][j] += m[j][i];
m[j][i] -= m[i][j];
}
return m[rand()%10000][rand()%10000];
}

inline int eigen_testor_write(MatrixXf m, const int M, const int N)
{
for (int i = 0; i < M; i ++)
for (int j = 0; j < N; j ++)
{
m(i, j) += m(j, i);
m(j, i) -= m(i, j);
}
return m(rand()%10000, rand()%10000);
}

inline int cpp_testor_sum(float **m, const int M, const int N, float val)
{
for (int i = 0; i < M; i ++)
for (int j = 0; j < N; j ++)
{
m[i][i] += m[i][j];
}
return m[rand()%1000][rand()%1000];
}

inline int eigen_testor_sum(MatrixXf m, const int M, const int N, float val)
{
m += m;
return m(0, 0);
}

inline int cpp_testor_dot(float **m, const int M, const int N, float val)
{
float randomTmp = 0;
for (int i = 0; i < M; i ++)
for (int j = 0; j < N; j ++)
{
m[i][j] *= val;
}
return m[rand()%1000][rand()%1000];
}

inline int eigen_testor_dot(MatrixXf m, const int M, const int N, float val)
{
m *= val;
return m(0, 0);
}

float** cpp_generator_mtarix(const int M, const int N)
{
float **m = new float*[M];
for (int i = 0; i < M; i ++)
m[i] = new float[N];
return m;
}

MatrixXf& eigen_generator_matrix(const int M, const int N)
{

static MatrixXf m(M,N);
return m;
}

int main()
{
const int M = 10000;
const int N = M;
int antiopt = 0;
srand(time(NULL));
float val1 = rand()%10000 + 1;
float val2 = rand()%10000 + 1;
std::cout<< M << " " << N << std::endl;

std::cout<<"Eigen:" << std::endl;
size_t t = clock();
//MatrixXf m = eigen_generator_matrix(M, N);
MatrixXf m(M,N);
for (int i = 0; i < M; i ++)
for (int j = 0; j < N; j ++)
m(i,j) = rand()%1000 + 1;
t = clock() - t;
std::cout<< "[!COST] init: " << t/float(CLOCKS_PER_SEC) << " sec." <<std::endl;

t = clock();
antiopt += eigen_testor_read(m,M,N);
t = clock() - t;
std::cout<< "[!COST] read: " << t/float(CLOCKS_PER_SEC) << " sec." <<std::endl;

t = clock();
antiopt += eigen_testor_write(m,M,N);
t = clock() - t;
std::cout<< "[!COST] write: " << t/float(CLOCKS_PER_SEC) << " sec." <<std::endl;

t = clock();
antiopt += eigen_testor_sum(m,M,N, val1);
t = clock() - t;
std::cout<< "[!COST] sum: " << t/float(CLOCKS_PER_SEC) << " sec." <<std::endl;

t = clock();
antiopt += eigen_testor_dot(m,M,N, val2);
t = clock() - t;
std::cout<< "[!COST] dot: " << t/float(CLOCKS_PER_SEC) << " sec." <<std::endl;

std::cout<<"CPP:" << std::endl;
t = clock();
//float **mm = cpp_generator_mtarix(M, N);
float **mm = new float*[M];
for (int i = 0; i < M; i ++)
mm[i] = new float[N];
for (int i = 0; i < M; i ++)
for (int j = 0; j < N; j ++)
mm[i][j] = rand()%1000 + 1;
t = clock() - t;
std::cout<< "[!COST] init: " << t/float(CLOCKS_PER_SEC) << " sec." <<std::endl;

t = clock();
antiopt += cpp_testor_read(mm,M,N);
t = clock() - t;
std::cout<< "[!COST] read: " << t/float(CLOCKS_PER_SEC) << " sec." <<std::endl;

t = clock();
antiopt += cpp_testor_write(mm,M,N);
t = clock() - t;
std::cout<< "[!COST] write: " << t/float(CLOCKS_PER_SEC) << " sec." <<std::endl;

t = clock();
antiopt += cpp_testor_sum(mm,M,N, val1);
t = clock() - t;
std::cout<< "[!COST] sum: " << t/float(CLOCKS_PER_SEC) << " sec." <<std::endl;

t = clock();
antiopt += cpp_testor_dot(mm,M,N, val2);
t = clock() - t;
std::cout<< "[!COST] dot: " << t/float(CLOCKS_PER_SEC) << " sec." <<std::endl;

std::cout<<antiopt<<std::endl;
}

最佳答案

对于 Eigen 测试函数,您按值传递矩阵,这意味着它必须被复制。这些(大)拷贝的时间包含在基准测试中。

您应该改为通过引用传递矩阵以避免复制开销并获得与数组版本相同的语义。通过此更改,我得到如下结果,这对我来说听起来相当快:

10000 10000
Eigen:
[!COST] init: 3.5 sec.
[!COST] read: 2.98 sec.
[!COST] write: 3.03 sec.
[!COST] sum: 0.06 sec.
[!COST] dot: 0.07 sec.
CPP:
[!COST] init: 1.46 sec.
[!COST] read: 3.41 sec.
[!COST] write: 3.57 sec.
[!COST] sum: 0.14 sec.
[!COST] dot: 0.05 sec.

(另请注意,使用 -O0 进行基准测试是毫无意义的:您明确告诉编译器不要让它变快。)

关于c++ - 使用 Eigen 密集矩阵数据结构时应该期待什么?,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/17438921/

26 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com