- mongodb - 在 MongoDB mapreduce 中,如何展平值对象?
- javascript - 对象传播与 Object.assign
- html - 输入类型 ="submit"Vs 按钮标签它们可以互换吗?
- sql - 使用 MongoDB 而不是 MS SQL Server 的优缺点
我发现了一个有趣的 Gamasutra article关于 SIMD 陷阱,它指出不可能达到“纯”的性能 __m128
类型与包装类型。好吧,我持怀疑态度,所以我下载了项目文件并制作了一个类似的测试用例。
结果(出乎我的意料)包装器版本要慢得多。由于我不想只谈论稀薄的空气,因此测试用例如下:
第一种情况 Vec4
是 __m128
的简单别名使用一些运算符键入:
#include <xmmintrin.h>
#include <emmintrin.h>
using Vec4 = __m128;
inline __m128 VLoad(float f)
{
return _mm_set_ps(f, f, f, f);
};
inline Vec4& operator+=(Vec4 &va, Vec4 vb)
{
return (va = _mm_add_ps(va, vb));
};
inline Vec4& operator*=(Vec4 &va, Vec4 vb)
{
return (va = _mm_mul_ps(va, vb));
};
inline Vec4 operator+(Vec4 va, Vec4 vb)
{
return _mm_add_ps(va, vb);
};
inline Vec4 operator-(Vec4 va, Vec4 vb)
{
return _mm_sub_ps(va, vb);
};
inline Vec4 operator*(Vec4 va, Vec4 vb)
{
return _mm_mul_ps(va, vb);
};
Vec4
是围绕
__m128
的轻量级包装器.
Vec4
如
const
引用:
#include <xmmintrin.h>
#include <emmintrin.h>
struct Vec4
{
__m128 simd;
inline Vec4() = default;
inline Vec4(const Vec4&) = default;
inline Vec4& operator=(const Vec4&) = default;
inline Vec4(__m128 s)
: simd(s)
{}
inline operator __m128() const
{
return simd;
}
inline operator __m128&()
{
return simd;
}
};
inline __m128 VLoad(float f)
{
return _mm_set_ps(f, f, f, f);
};
inline Vec4 VAdd(const Vec4 &va, const Vec4 &vb)
{
return _mm_add_ps(va, vb);
// return _mm_add_ps(va.simd, vb.simd); // doesn't make difference
};
inline Vec4 VSub(const Vec4 &va, const Vec4 &vb)
{
return _mm_sub_ps(va, vb);
// return _mm_sub_ps(va.simd, vb.simd); // doesn't make difference
};
inline Vec4 VMul(const Vec4 &va, const Vec4 &vb)
{
return _mm_mul_ps(va, vb);
// return _mm_mul_ps(va.simd, vb.simd); // doesn't make difference
};
Vec4
产生不同的性能:
#include <xmmintrin.h>
#include <emmintrin.h>
struct EQSTATE
{
// Filter #1 (Low band)
Vec4 lf; // Frequency
Vec4 f1p0; // Poles ...
Vec4 f1p1;
Vec4 f1p2;
Vec4 f1p3;
// Filter #2 (High band)
Vec4 hf; // Frequency
Vec4 f2p0; // Poles ...
Vec4 f2p1;
Vec4 f2p2;
Vec4 f2p3;
// Sample history buffer
Vec4 sdm1; // Sample data minus 1
Vec4 sdm2; // 2
Vec4 sdm3; // 3
// Gain Controls
Vec4 lg; // low gain
Vec4 mg; // mid gain
Vec4 hg; // high gain
};
static float vsaf = (1.0f / 4294967295.0f); // Very small amount (Denormal Fix)
static Vec4 vsa = VLoad(vsaf);
Vec4 TestEQ(EQSTATE* es, Vec4& sample)
{
// Locals
Vec4 l,m,h; // Low / Mid / High - Sample Values
// Filter #1 (lowpass)
es->f1p0 += (es->lf * (sample - es->f1p0)) + vsa;
//es->f1p0 = VAdd(es->f1p0, VAdd(VMul(es->lf, VSub(sample, es->f1p0)), vsa));
es->f1p1 += (es->lf * (es->f1p0 - es->f1p1));
//es->f1p1 = VAdd(es->f1p1, VMul(es->lf, VSub(es->f1p0, es->f1p1)));
es->f1p2 += (es->lf * (es->f1p1 - es->f1p2));
//es->f1p2 = VAdd(es->f1p2, VMul(es->lf, VSub(es->f1p1, es->f1p2)));
es->f1p3 += (es->lf * (es->f1p2 - es->f1p3));
//es->f1p3 = VAdd(es->f1p3, VMul(es->lf, VSub(es->f1p2, es->f1p3)));
l = es->f1p3;
// Filter #2 (highpass)
es->f2p0 += (es->hf * (sample - es->f2p0)) + vsa;
//es->f2p0 = VAdd(es->f2p0, VAdd(VMul(es->hf, VSub(sample, es->f2p0)), vsa));
es->f2p1 += (es->hf * (es->f2p0 - es->f2p1));
//es->f2p1 = VAdd(es->f2p1, VMul(es->hf, VSub(es->f2p0, es->f2p1)));
es->f2p2 += (es->hf * (es->f2p1 - es->f2p2));
//es->f2p2 = VAdd(es->f2p2, VMul(es->hf, VSub(es->f2p1, es->f2p2)));
es->f2p3 += (es->hf * (es->f2p2 - es->f2p3));
//es->f2p3 = VAdd(es->f2p3, VMul(es->hf, VSub(es->f2p2, es->f2p3)));
h = es->sdm3 - es->f2p3;
//h = VSub(es->sdm3, es->f2p3);
// Calculate midrange (signal - (low + high))
m = es->sdm3 - (h + l);
//m = VSub(es->sdm3, VAdd(h, l));
// Scale, Combine and store
l *= es->lg;
m *= es->mg;
h *= es->hg;
//l = VMul(l, es->lg);
//m = VMul(m, es->mg);
//h = VMul(h, es->hg);
// Shuffle history buffer
es->sdm3 = es->sdm2;
es->sdm2 = es->sdm1;
es->sdm1 = sample;
// Return result
return(l + m + h);
//return(VAdd(l, VAdd(m, h)));
}
//make these as globals to enforce the function call;
static Vec4 sample[1024], result[1024];
static EQSTATE es;
#include <chrono>
#include <iostream>
int main()
{
auto t0 = std::chrono::high_resolution_clock::now();
for (int ii=0; ii<1024; ii++)
{
result[ii] = TestEQ(&es, sample[ii]);
}
auto t1 = std::chrono::high_resolution_clock::now();
auto t = std::chrono::duration_cast<std::chrono::nanoseconds>(t1 - t0).count();
std::cout << "timing: " << t << '\n';
std::cin.get();
return 0;
}
; COMDAT ?TestEQ@@YA?AT__m128@@PAUEQSTATE@@AAT1@@Z
_TEXT SEGMENT
?TestEQ@@YA?AT__m128@@PAUEQSTATE@@AAT1@@Z PROC ; TestEQ, COMDAT
; _es$dead$ = ecx
; _sample$ = edx
vmovaps xmm0, XMMWORD PTR [edx]
vsubps xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+16
vmovaps xmm2, XMMWORD PTR ?es@@3UEQSTATE@@A
vmulps xmm0, xmm0, xmm2
vaddps xmm0, xmm0, XMMWORD PTR ?vsa@@3T__m128@@A
vaddps xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+16
vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+16, xmm0
vsubps xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+32
vmulps xmm0, xmm0, xmm2
vaddps xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+32
vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+32, xmm0
vsubps xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+48
vmulps xmm0, xmm0, xmm2
vaddps xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+48
vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+48, xmm0
vsubps xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+64
vmulps xmm0, xmm0, xmm2
vaddps xmm4, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+64
vmovaps xmm2, XMMWORD PTR ?es@@3UEQSTATE@@A+80
vmovaps xmm1, XMMWORD PTR ?es@@3UEQSTATE@@A+192
vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+64, xmm4
vmovaps xmm0, XMMWORD PTR [edx]
vsubps xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+96
vmulps xmm0, xmm0, xmm2
vaddps xmm0, xmm0, XMMWORD PTR ?vsa@@3T__m128@@A
vaddps xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+96
vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+96, xmm0
vsubps xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+112
vmulps xmm0, xmm0, xmm2
vaddps xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+112
vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+112, xmm0
vsubps xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+128
vmulps xmm0, xmm0, xmm2
vaddps xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+128
vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+128, xmm0
vsubps xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+144
vmulps xmm0, xmm0, xmm2
vaddps xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+144
vsubps xmm2, xmm1, xmm0
vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+144, xmm0
vmovaps xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+176
vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+192, xmm0
vmovaps xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+160
vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+176, xmm0
vmovaps xmm0, XMMWORD PTR [edx]
vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+160, xmm0
vaddps xmm0, xmm4, xmm2
vsubps xmm0, xmm1, xmm0
vmulps xmm1, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+224
vmulps xmm0, xmm2, XMMWORD PTR ?es@@3UEQSTATE@@A+240
vaddps xmm1, xmm1, xmm0
vmulps xmm0, xmm4, XMMWORD PTR ?es@@3UEQSTATE@@A+208
vaddps xmm0, xmm1, xmm0
ret 0
?TestEQ@@YA?AT__m128@@PAUEQSTATE@@AAT1@@Z ENDP ; TestEQ
?TestEQ@@YA?AUVec4@VMATH@@PAUEQSTATE@@AAU12@@Z PROC ; TestEQ, COMDAT
; ___$ReturnUdt$ = ecx
; _es$dead$ = edx
push ebx
mov ebx, esp
sub esp, 8
and esp, -8 ; fffffff8H
add esp, 4
push ebp
mov ebp, DWORD PTR [ebx+4]
mov eax, DWORD PTR _sample$[ebx]
vmovaps xmm2, XMMWORD PTR ?es@@3UEQSTATE@@A
vmovaps xmm1, XMMWORD PTR ?es@@3UEQSTATE@@A+192
mov DWORD PTR [esp+4], ebp
vmovaps xmm0, XMMWORD PTR [eax]
vsubps xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+16
vmulps xmm0, xmm0, xmm2
vaddps xmm0, xmm0, XMMWORD PTR ?vsa@@3UVec4@VMATH@@A
vaddps xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+16
vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+16, xmm0
vsubps xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+32
vmulps xmm0, xmm0, xmm2
vaddps xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+32
vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+32, xmm0
vsubps xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+48
vmulps xmm0, xmm0, xmm2
vaddps xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+48
vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+48, xmm0
vsubps xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+64
vmulps xmm0, xmm0, xmm2
vaddps xmm4, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+64
vmovaps xmm2, XMMWORD PTR ?es@@3UEQSTATE@@A+80
vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+64, xmm4
vmovaps xmm0, XMMWORD PTR [eax]
vsubps xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+96
vmulps xmm0, xmm0, xmm2
vaddps xmm0, xmm0, XMMWORD PTR ?vsa@@3UVec4@VMATH@@A
vaddps xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+96
vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+96, xmm0
vsubps xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+112
vmulps xmm0, xmm0, xmm2
vaddps xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+112
vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+112, xmm0
vsubps xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+128
vmulps xmm0, xmm0, xmm2
vaddps xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+128
vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+128, xmm0
vsubps xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+144
vmulps xmm0, xmm0, xmm2
vaddps xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+144
vsubps xmm2, xmm1, xmm0
vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+144, xmm0
vaddps xmm0, xmm2, xmm4
vsubps xmm0, xmm1, xmm0
vmulps xmm1, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+224
vmovdqu xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+176
vmovdqu XMMWORD PTR ?es@@3UEQSTATE@@A+192, xmm0
vmovdqu xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+160
vmovdqu XMMWORD PTR ?es@@3UEQSTATE@@A+176, xmm0
vmovdqu xmm0, XMMWORD PTR [eax]
vmovdqu XMMWORD PTR ?es@@3UEQSTATE@@A+160, xmm0
vmulps xmm0, xmm4, XMMWORD PTR ?es@@3UEQSTATE@@A+208
vaddps xmm1, xmm0, xmm1
vmulps xmm0, xmm2, XMMWORD PTR ?es@@3UEQSTATE@@A+240
vaddps xmm0, xmm1, xmm0
vmovaps XMMWORD PTR [ecx], xmm0
mov eax, ecx
pop ebp
mov esp, ebx
pop ebx
ret 0
?TestEQ@@YA?AUVec4@VMATH@@PAUEQSTATE@@AAU12@@Z ENDP ; TestEQ
"?TestEQ@@YAT__m128@@PAUEQSTATE@@AAT1@@Z": # @"\01?TestEQ@@YAT__m128@@PAUEQSTATE@@AAT1@@Z"
Lfunc_begin0:
Ltmp0:
# BB#0: # %entry
movl 8(%esp), %eax
movl 4(%esp), %ecx
vmovaps _vsa, %xmm0
vmovaps (%ecx), %xmm1
vmovaps 16(%ecx), %xmm2
vmovaps (%eax), %xmm3
vsubps %xmm2, %xmm3, %xmm3
vmulps %xmm3, %xmm1, %xmm3
vaddps %xmm3, %xmm0, %xmm3
vaddps %xmm3, %xmm2, %xmm2
vmovaps %xmm2, 16(%ecx)
vmovaps 32(%ecx), %xmm3
vsubps %xmm3, %xmm2, %xmm2
vmulps %xmm2, %xmm1, %xmm2
vaddps %xmm2, %xmm3, %xmm2
vmovaps %xmm2, 32(%ecx)
vmovaps 48(%ecx), %xmm3
vsubps %xmm3, %xmm2, %xmm2
vmulps %xmm2, %xmm1, %xmm2
vaddps %xmm2, %xmm3, %xmm2
vmovaps %xmm2, 48(%ecx)
vmovaps 64(%ecx), %xmm3
vsubps %xmm3, %xmm2, %xmm2
vmulps %xmm2, %xmm1, %xmm1
vaddps %xmm1, %xmm3, %xmm1
vmovaps %xmm1, 64(%ecx)
vmovaps 80(%ecx), %xmm2
vmovaps 96(%ecx), %xmm3
vmovaps (%eax), %xmm4
vsubps %xmm3, %xmm4, %xmm4
vmulps %xmm4, %xmm2, %xmm4
vaddps %xmm4, %xmm0, %xmm0
vaddps %xmm0, %xmm3, %xmm0
vmovaps %xmm0, 96(%ecx)
vmovaps 112(%ecx), %xmm3
vsubps %xmm3, %xmm0, %xmm0
vmulps %xmm0, %xmm2, %xmm0
vaddps %xmm0, %xmm3, %xmm0
vmovaps %xmm0, 112(%ecx)
vmovaps 128(%ecx), %xmm3
vsubps %xmm3, %xmm0, %xmm0
vmulps %xmm0, %xmm2, %xmm0
vaddps %xmm0, %xmm3, %xmm0
vmovaps %xmm0, 128(%ecx)
vmovaps 144(%ecx), %xmm3
vsubps %xmm3, %xmm0, %xmm0
vmulps %xmm0, %xmm2, %xmm0
vaddps %xmm0, %xmm3, %xmm0
vmovaps %xmm0, 144(%ecx)
vmovaps 192(%ecx), %xmm2
vsubps %xmm0, %xmm2, %xmm0
vaddps %xmm0, %xmm1, %xmm3
vsubps %xmm3, %xmm2, %xmm2
vmulps 208(%ecx), %xmm1, %xmm1
vmulps 224(%ecx), %xmm2, %xmm2
vmulps 240(%ecx), %xmm0, %xmm0
vmovaps 176(%ecx), %xmm3
vmovaps %xmm3, 192(%ecx)
vmovaps 160(%ecx), %xmm3
vmovaps %xmm3, 176(%ecx)
vmovaps (%eax), %xmm3
vmovaps %xmm3, 160(%ecx)
vaddps %xmm2, %xmm0, %xmm0
vaddps %xmm0, %xmm1, %xmm0
retl
Lfunc_end0:
"?TestEQ@@YA?AUVec4@@PAUEQSTATE@@AAU1@@Z": # @"\01?TestEQ@@YA?AUVec4@@PAUEQSTATE@@AAU1@@Z"
Lfunc_begin0:
Ltmp0:
# BB#0: # %entry
movl 12(%esp), %ecx
movl 8(%esp), %edx
vmovaps (%edx), %xmm0
vmovaps 16(%edx), %xmm1
vmovaps (%ecx), %xmm2
vsubps %xmm1, %xmm2, %xmm2
vmulps %xmm0, %xmm2, %xmm2
vaddps _vsa, %xmm2, %xmm2
vaddps %xmm2, %xmm1, %xmm1
vmovaps %xmm1, 16(%edx)
vmovaps 32(%edx), %xmm2
vsubps %xmm2, %xmm1, %xmm1
vmulps %xmm0, %xmm1, %xmm1
vaddps %xmm1, %xmm2, %xmm1
vmovaps %xmm1, 32(%edx)
vmovaps 48(%edx), %xmm2
vsubps %xmm2, %xmm1, %xmm1
vmulps %xmm0, %xmm1, %xmm1
vaddps %xmm1, %xmm2, %xmm1
vmovaps %xmm1, 48(%edx)
vmovaps 64(%edx), %xmm2
vsubps %xmm2, %xmm1, %xmm1
vmulps %xmm0, %xmm1, %xmm0
vaddps %xmm0, %xmm2, %xmm0
vmovaps %xmm0, 64(%edx)
vmovaps 80(%edx), %xmm1
vmovaps 96(%edx), %xmm2
vmovaps (%ecx), %xmm3
vsubps %xmm2, %xmm3, %xmm3
vmulps %xmm1, %xmm3, %xmm3
vaddps _vsa, %xmm3, %xmm3
vaddps %xmm3, %xmm2, %xmm2
vmovaps %xmm2, 96(%edx)
vmovaps 112(%edx), %xmm3
vsubps %xmm3, %xmm2, %xmm2
vmulps %xmm1, %xmm2, %xmm2
vaddps %xmm2, %xmm3, %xmm2
vmovaps %xmm2, 112(%edx)
vmovaps 128(%edx), %xmm3
vsubps %xmm3, %xmm2, %xmm2
vmulps %xmm1, %xmm2, %xmm2
vaddps %xmm2, %xmm3, %xmm2
vmovaps %xmm2, 128(%edx)
vmovaps 144(%edx), %xmm3
vsubps %xmm3, %xmm2, %xmm2
vmulps %xmm1, %xmm2, %xmm1
vaddps %xmm1, %xmm3, %xmm1
vmovaps %xmm1, 144(%edx)
vmovaps 192(%edx), %xmm2
vsubps %xmm1, %xmm2, %xmm1
vaddps %xmm1, %xmm0, %xmm3
vsubps %xmm3, %xmm2, %xmm2
vmulps 208(%edx), %xmm0, %xmm0
vmulps 224(%edx), %xmm2, %xmm2
movl 4(%esp), %eax
vmulps 240(%edx), %xmm1, %xmm1
vmovaps 176(%edx), %xmm3
vmovaps %xmm3, 192(%edx)
vmovaps 160(%edx), %xmm3
vmovaps %xmm3, 176(%edx)
vmovaps (%ecx), %xmm3
vmovaps %xmm3, 160(%edx)
vaddps %xmm2, %xmm0, %xmm0
vaddps %xmm0, %xmm1, %xmm0
vmovaps %xmm0, (%eax)
retl
Lfunc_end0:
vmovdqu
第二个 MSVC 程序集中的说明。构造、复制赋值运算符和传递引用也可以不必要地将数据从 SSE 寄存器移回内存,但是我所有试图解决或准确识别问题的尝试都没有成功。
__m128
相同的性能。 ,无论导致开销的原因都可以消除。
最佳答案
事实证明,问题不在于用户定义的 struct Vec4
.
它与 x86 调用约定密切相关。
Visual C++ 中默认的 x86 调用约定是 __cdecl
, 哪个
Pushes parameters on the stack, in reverse order (right to left)
Vec4
应该保存并传递到 XMM 寄存器中。但让我们看看实际发生了什么。
Vec4
是
__m128
的简单类型别名.
using Vec4 = __m128;
/* ... */
Vec4 TestEQ(EQSTATE* es, Vec4 &sample) { ... }
TestEQ
生成的函数头在组装是
?TestEQ@@YA?AT__m128@@PAUEQSTATE@@AAT1@@Z PROC ; TestEQ, COMDAT
; _es$ = ecx
; _sample$ = edx
...
Vec4
不是
__m128
的别名,它现在是用户定义的类型。
__cdecl
(这是 x86 中的默认调用约定)不允许将对齐的值传递给函数(会发出
Error C2719: 'sample': formal parameter with requested alignment of 16 won't be aligned
)我们通过
const
传递它引用。
struct Vec4{ __m128 simd; /* ... */ };
/* ... */
Vec4 TestEQ(EQSTATE* es, const Vec4 &sample) { ... }
TestEQ
生成函数头文件作为
?TestEQ@@YA?AUVec4@@PAUEQSTATE@@ABU1@@Z PROC ; TestEQ, COMDAT
; ___$ReturnUdt$ = ecx
; _es$ = edx
push ebx
mov ebx, esp
sub esp, 8
and esp, -8 ; fffffff8H
add esp, 4
push ebp
mov ebp, DWORD PTR [ebx+4]
mov eax, DWORD PTR _sample$[ebx]
...
mov
前几条 SSE 指令之间的指令也是如此,此处未列出。总的来说,这些指令足以在一定程度上影响性能。
The x64 Application Binary Interface (ABI) is a 4 register fast-call calling convention, with stack-backing for those registers. There is a strict one-to-one correspondence between arguments in a function, and the registers for those arguments. Any argument that doesn’t fit in 8 bytes, or is not 1, 2, 4, or 8 bytes, must be passed by reference. (...) All floating point operations are done using the 16 XMM registers. The arguments are passed in registers RCX, RDX, R8, and R9. If the argumentsare float/double, they are passed in XMM0L, XMM1L, XMM2L, and XMM3L. 16 byte arguments are passed by reference.
The Microsoft x64 calling convention is followed on Windows and pre-boot UEFI (for long mode on x86-64). It uses registers RCX, RDX, R8, R9 for the first four integer or pointer arguments (in that order), and XMM0, XMM1, XMM2, XMM3 are used for floating point arguments. Additional arguments are pushed onto the stack (right to left). Integer return values (similar to x86) are returned in RAX if 64 bits or less. Floating point return values are returned in XMM0.
TestEQ
生成函数头文件作为
?TestEQ@@YQ?AUVec4@@PAUEQSTATE@@ABU1@@Z PROC ; TestEQ, COMDAT
; _es$ = ecx
; _sample$ = edx
...
inline
功能。
__vectorcall
Visual Studio 2013 及更高版本中的约定(适用于 x86 和 x64 模式)。这与默认的 Windows x64 调用约定非常相似,但具有更多可利用的寄存器。
__vectorcall
重写第二个案例:
Vec4 __vectorcall TestEQ(EQSTATE* es, const Vec4 &sample) { ... }
TestEQ
生成的汇编函数头文件是
?TestEQ@@YQ?AUVec4@@PAUEQSTATE@@ABU1@@Z PROC ; TestEQ, COMDAT
; _es$ = ecx
; _sample$ = edx
...
__vectorcall
,
Vec4
参数应该按值传递,而不是常量引用。为此,传递的类型应该满足一些要求,比如它必须是可简单复制构造的(没有用户定义的复制构造函数)并且不应该包含任何 union 。更多信息在下面的评论和
here .
__vectorcall
当检测到
__m128
时,约定作为优化争论。否则它使用默认的调用约定
__cdecl
(您可以通过编译器选项更改此行为)。
-O2
只需内联
TestEQ
函数进入测试循环体 (
see )。也有可能它们会比 MSVC 更聪明,并且它们会更好地优化函数调用。
关于c++ - 与裸 __m128 相比,SSE vector 包装器类型的性能,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/36833462/
我是一名优秀的程序员,十分优秀!