- c - 在位数组中找到第一个零
- linux - Unix 显示有关匹配两种模式之一的文件的信息
- 正则表达式替换多个文件
- linux - 隐藏来自 xtrace 的命令
我正在处理一些具有使用内联汇编的优化版本的 C++ 代码。
优化版本表现出的行为不是线程安全的,这可以追溯到 3 个从程序集内部广泛访问的全局变量。
__attribute__ ((aligned (16))) unsigned int SHAVITE_MESS[16];
__attribute__ ((aligned (16))) thread_local unsigned char SHAVITE_PTXT[8*4];
__attribute__ ((aligned (16))) unsigned int SHAVITE_CNTS[4] = {0,0,0,0};
asm ("movaps xmm0, SHAVITE_PTXT[rip]");
asm ("movaps xmm1, SHAVITE_PTXT[rip+16]");
asm ("movaps xmm3, SHAVITE_CNTS[rip]");
asm ("movaps xmm4, SHAVITE256_XOR2[rip]");
asm ("pxor xmm2, xmm2");
mov eax, DWORD PTR fs:num1@tpoff
并尝试修改代码以执行相同的操作:
asm ("movaps xmm0, fs:SHAVITE_PTXT@tpoff");
asm ("movaps xmm1, fs:SHAVITE_PTXT@tpoff+16");
asm ("movaps xmm3, fs:SHAVITE_CNTS@tpoff");
asm ("movaps xmm4, fs:SHAVITE256_XOR2@tpoff");
asm ("pxor xmm2, xmm2");
-m32
编译的输出,我会得到
mov eax, DWORD PTR gs:num1@ntpoff
最佳答案
正如另一个答案所说,内联汇编是一团糟并且被滥用了。 用内在函数重写应该是好的 ,并允许您使用或不使用 -mavx
(或 -march=haswell
或 -march=znver1
或其他)进行编译,让编译器保存一堆寄存器复制指令。
它还允许编译器优化( vector )寄存器分配以及何时加载/存储,这是编译器非常擅长的。
好吧,我无法使用您提供的测试数据。它使用了这里没有提供的其他几个例程,我懒得去找它们。
也就是说,我能够为测试数据拼凑一些东西。我的 E256() 返回与您相同的值。这并不意味着我 100% 正确(您需要自己进行测试),但是考虑到所有异或/aesenc 一遍又一遍地针对所有内容,如果出现问题,我希望它表演。
转换为内在函数并不是特别困难。大多数情况下,您只需要为给定的 asm 指令 找到等效的 _mm_
函数。当你的意思是 x13 (grrr).
请注意,虽然此代码使用名为 x0-x15 的变量,但这只是因为它使翻译更容易。这些 C 变量名称与 gcc 在编译代码时将使用的寄存器之间没有相关性。此外,gcc 使用了大量有关 SSE 的知识来重新排序指令,因此输出(尤其是 -O3)与原始 asm 非常不同。如果您认为可以比较它们以检查正确性(就像我所做的那样),那么您可能会感到沮丧。
此代码包含原始例程(以“old”为前缀)和新例程,并从 main() 调用两者以查看它们是否产生相同的输出。我没有努力对内置函数进行任何更改以尝试对其进行优化。一旦它起作用,我就停下来了。我会把任何进一步的改进留给你,因为它都是 C 代码。
也就是说,gcc 能够优化内在函数(它不能为 asm 做这件事)。这意味着如果您使用 -mavx2
重新编译此代码,生成的代码将大不相同。
一些统计数据:
// Compile with -O3 -msse4.2 -maes
// or -O3 -msse4.2 -maes -mavx2
#include <wmmintrin.h>
#include <x86intrin.h>
#include <stdio.h>
///////////////////////////
#define tos(a) #a
#define tostr(a) tos(a)
#define rev_reg_0321(j){ asm ("pshufb xmm" tostr(j)", [oldSHAVITE_REVERSE]"); }
#define replace_aes(i, j){ asm ("aesenc xmm" tostr(i)", xmm" tostr(j)""); }
__attribute__ ((aligned (16))) unsigned int oldSHAVITE_MESS[16];
__attribute__ ((aligned (16))) unsigned char oldSHAVITE_PTXT[8*4];
__attribute__ ((aligned (16))) unsigned int oldSHAVITE_CNTS[4] = {0,0,0,0};
__attribute__ ((aligned (16))) unsigned int oldSHAVITE_REVERSE[4] = {0x07060504, 0x0b0a0908, 0x0f0e0d0c, 0x03020100 };
__attribute__ ((aligned (16))) unsigned int oldSHAVITE256_XOR2[4] = {0x0, 0xFFFFFFFF, 0x0, 0x0};
__attribute__ ((aligned (16))) unsigned int oldSHAVITE256_XOR3[4] = {0x0, 0x0, 0xFFFFFFFF, 0x0};
__attribute__ ((aligned (16))) unsigned int oldSHAVITE256_XOR4[4] = {0x0, 0x0, 0x0, 0xFFFFFFFF};
#define oldmixing() do {\
asm("movaps xmm11, xmm15");\
asm("movaps xmm10, xmm14");\
asm("movaps xmm9, xmm13");\
asm("movaps xmm8, xmm12");\
\
asm("movaps xmm6, xmm11");\
asm("psrldq xmm6, 4");\
asm("pxor xmm8, xmm6");\
asm("movaps xmm6, xmm8");\
asm("pslldq xmm6, 12");\
asm("pxor xmm8, xmm6");\
\
asm("movaps xmm7, xmm8");\
asm("psrldq xmm7, 4");\
asm("pxor xmm9, xmm7");\
asm("movaps xmm7, xmm9");\
asm("pslldq xmm7, 12");\
asm("pxor xmm9, xmm7");\
\
asm("movaps xmm6, xmm9");\
asm("psrldq xmm6, 4");\
asm("pxor xmm10, xmm6");\
asm("movaps xmm6, xmm10");\
asm("pslldq xmm6, 12");\
asm("pxor xmm10, xmm6");\
\
asm("movaps xmm7, xmm10");\
asm("psrldq xmm7, 4");\
asm("pxor xmm11, xmm7");\
asm("movaps xmm7, xmm11");\
asm("pslldq xmm7, 12");\
asm("pxor xmm11, xmm7");\
} while(0);
void oldE256()
{
asm (".intel_syntax noprefix");
/* (L,R) = (xmm0,xmm1) */
asm ("movaps xmm0, [oldSHAVITE_PTXT]");
asm ("movaps xmm1, [oldSHAVITE_PTXT+16]");
asm ("movaps xmm3, [oldSHAVITE_CNTS]");
asm ("movaps xmm4, [oldSHAVITE256_XOR2]");
asm ("pxor xmm2, xmm2");
/* init key schedule */
asm ("movaps xmm8, [oldSHAVITE_MESS]");
asm ("movaps xmm9, [oldSHAVITE_MESS+16]");
asm ("movaps xmm10, [oldSHAVITE_MESS+32]");
asm ("movaps xmm11, [oldSHAVITE_MESS+48]");
/* xmm8..xmm11 = rk[0..15] */
/* start key schedule */
asm ("movaps xmm12, xmm8");
asm ("movaps xmm13, xmm9");
asm ("movaps xmm14, xmm10");
asm ("movaps xmm15, xmm11");
rev_reg_0321(12);
rev_reg_0321(13);
rev_reg_0321(14);
rev_reg_0321(15);
replace_aes(12, 2);
replace_aes(13, 2);
replace_aes(14, 2);
replace_aes(15, 2);
asm ("pxor xmm12, xmm3");
asm ("pxor xmm12, xmm4");
asm ("movaps xmm4, [oldSHAVITE256_XOR3]");
asm ("pxor xmm12, xmm11");
asm ("pxor xmm13, xmm12");
asm ("pxor xmm14, xmm13");
asm ("pxor xmm15, xmm14");
/* xmm12..xmm15 = rk[16..31] */
/* F3 - first round */
asm ("movaps xmm6, xmm8");
asm ("pxor xmm8, xmm1");
replace_aes(8, 9);
replace_aes(8, 10);
replace_aes(8, 2);
asm ("pxor xmm0, xmm8");
asm ("movaps xmm8, xmm6");
/* F3 - second round */
asm ("movaps xmm6, xmm11");
asm ("pxor xmm11, xmm0");
replace_aes(11, 12);
replace_aes(11, 13);
replace_aes(11, 2);
asm ("pxor xmm1, xmm11");
asm ("movaps xmm11, xmm6");
/* key schedule */
oldmixing();
/* xmm8..xmm11 - rk[32..47] */
/* F3 - third round */
asm ("movaps xmm6, xmm14");
asm ("pxor xmm14, xmm1");
replace_aes(14, 15);
replace_aes(14, 8);
replace_aes(14, 2);
asm ("pxor xmm0, xmm14");
asm ("movaps xmm14, xmm6");
/* key schedule */
asm ("pshufd xmm3, xmm3,135");
asm ("movaps xmm12, xmm8");
asm ("movaps xmm13, xmm9");
asm ("movaps xmm14, xmm10");
asm ("movaps xmm15, xmm11");
rev_reg_0321(12);
rev_reg_0321(13);
rev_reg_0321(14);
rev_reg_0321(15);
replace_aes(12, 2);
replace_aes(13, 2);
replace_aes(14, 2);
replace_aes(15, 2);
asm ("pxor xmm12, xmm11");
asm ("pxor xmm14, xmm3");
asm ("pxor xmm14, xmm4");
asm ("movaps xmm4, [oldSHAVITE256_XOR4]");
asm ("pxor xmm13, xmm12");
asm ("pxor xmm14, xmm13");
asm ("pxor xmm15, xmm14");
/* xmm12..xmm15 - rk[48..63] */
/* F3 - fourth round */
asm ("movaps xmm6, xmm9");
asm ("pxor xmm9, xmm0");
replace_aes(9, 10);
replace_aes(9, 11);
replace_aes(9, 2);
asm ("pxor xmm1, xmm9");
asm ("movaps xmm9, xmm6");
/* key schedule */
oldmixing();
/* xmm8..xmm11 = rk[64..79] */
/* F3 - fifth round */
asm ("movaps xmm6, xmm12");
asm ("pxor xmm12, xmm1");
replace_aes(12, 13);
replace_aes(12, 14);
replace_aes(12, 2);
asm ("pxor xmm0, xmm12");
asm ("movaps xmm12, xmm6");
/* F3 - sixth round */
asm ("movaps xmm6, xmm15");
asm ("pxor xmm15, xmm0");
replace_aes(15, 8);
replace_aes(15, 9);
replace_aes(15, 2);
asm ("pxor xmm1, xmm15");
asm ("movaps xmm15, xmm6");
/* key schedule */
asm ("pshufd xmm3, xmm3, 147");
asm ("movaps xmm12, xmm8");
asm ("movaps xmm13, xmm9");
asm ("movaps xmm14, xmm10");
asm ("movaps xmm15, xmm11");
rev_reg_0321(12);
rev_reg_0321(13);
rev_reg_0321(14);
rev_reg_0321(15);
replace_aes(12, 2);
replace_aes(13, 2);
replace_aes(14, 2);
replace_aes(15, 2);
asm ("pxor xmm12, xmm11");
asm ("pxor xmm13, xmm3");
asm ("pxor xmm13, xmm4");
asm ("pxor xmm13, xmm12");
asm ("pxor xmm14, xmm13");
asm ("pxor xmm15, xmm14");
/* xmm12..xmm15 = rk[80..95] */
/* F3 - seventh round */
asm ("movaps xmm6, xmm10");
asm ("pxor xmm10, xmm1");
replace_aes(10, 11);
replace_aes(10, 12);
replace_aes(10, 2);
asm ("pxor xmm0, xmm10");
asm ("movaps xmm10, xmm6");
/* key schedule */
oldmixing();
/* xmm8..xmm11 = rk[96..111] */
/* F3 - eigth round */
asm ("movaps xmm6, xmm13");
asm ("pxor xmm13, xmm0");
replace_aes(13, 14);
replace_aes(13, 15);
replace_aes(13, 2);
asm ("pxor xmm1, xmm13");
asm ("movaps xmm13, xmm6");
/* key schedule */
asm ("pshufd xmm3, xmm3, 135");
asm ("movaps xmm12, xmm8");
asm ("movaps xmm13, xmm9");
asm ("movaps xmm14, xmm10");
asm ("movaps xmm15, xmm11");
rev_reg_0321(12);
rev_reg_0321(13);
rev_reg_0321(14);
rev_reg_0321(15);
replace_aes(12, 2);
replace_aes(13, 2);
replace_aes(14, 2);
replace_aes(15, 2);
asm ("pxor xmm12, xmm11");
asm ("pxor xmm15, xmm3");
asm ("pxor xmm15, xmm4");
asm ("pxor xmm13, xmm12");
asm ("pxor xmm14, xmm13");
asm ("pxor xmm15, xmm14");
/* xmm12..xmm15 = rk[112..127] */
/* F3 - ninth round */
asm ("movaps xmm6, xmm8");
asm ("pxor xmm8, xmm1");
replace_aes(8, 9);
replace_aes(8, 10);
replace_aes(8, 2);
asm ("pxor xmm0, xmm8");
asm ("movaps xmm8, xmm6");
/* F3 - tenth round */
asm ("movaps xmm6, xmm11");
asm ("pxor xmm11, xmm0");
replace_aes(11, 12);
replace_aes(11, 13);
replace_aes(11, 2);
asm ("pxor xmm1, xmm11");
asm ("movaps xmm11, xmm6");
/* key schedule */
oldmixing();
/* xmm8..xmm11 = rk[128..143] */
/* F3 - eleventh round */
asm ("movaps xmm6, xmm14");
asm ("pxor xmm14, xmm1");
replace_aes(14, 15);
replace_aes(14, 8);
replace_aes(14, 2);
asm ("pxor xmm0, xmm14");
asm ("movaps xmm14, xmm6");
/* F3 - twelfth round */
asm ("movaps xmm6, xmm9");
asm ("pxor xmm9, xmm0");
replace_aes(9, 10);
replace_aes(9, 11);
replace_aes(9, 2);
asm ("pxor xmm1, xmm9");
asm ("movaps xmm9, xmm6");
/* feedforward */
asm ("pxor xmm0, [oldSHAVITE_PTXT]");
asm ("pxor xmm1, [oldSHAVITE_PTXT+16]");
asm ("movaps [oldSHAVITE_PTXT], xmm0");
asm ("movaps [oldSHAVITE_PTXT+16], xmm1");
asm (".att_syntax noprefix");
return;
}
void oldCompress256(const unsigned char *message_block, unsigned char *chaining_value, unsigned long long counter,
const unsigned char salt[32])
{
int i, j;
for (i=0;i<8*4;i++)
oldSHAVITE_PTXT[i]=chaining_value[i];
for (i=0;i<16;i++)
oldSHAVITE_MESS[i] = *((unsigned int*)(message_block+4*i));
oldSHAVITE_CNTS[0] = (unsigned int)(counter & 0xFFFFFFFFULL);
oldSHAVITE_CNTS[1] = (unsigned int)(counter>>32);
/* encryption + Davies-Meyer transform */
oldE256();
for (i=0; i<4*8; i++)
chaining_value[i]=oldSHAVITE_PTXT[i];
return;
}
////////////////////////////////
__attribute__ ((aligned (16))) unsigned int SHAVITE_MESS[16];
__attribute__ ((aligned (16))) unsigned char SHAVITE_PTXT[8*4];
__attribute__ ((aligned (16))) unsigned int SHAVITE_CNTS[4] = {0,0,0,0};
__attribute__ ((aligned (16))) unsigned int SHAVITE_REVERSE[4] = {0x07060504, 0x0b0a0908, 0x0f0e0d0c, 0x03020100 };
__attribute__ ((aligned (16))) unsigned int SHAVITE256_XOR2[4] = {0x0, 0xFFFFFFFF, 0x0, 0x0};
__attribute__ ((aligned (16))) unsigned int SHAVITE256_XOR3[4] = {0x0, 0x0, 0xFFFFFFFF, 0x0};
__attribute__ ((aligned (16))) unsigned int SHAVITE256_XOR4[4] = {0x0, 0x0, 0x0, 0xFFFFFFFF};
#define mixing() do {\
x11 = x15; \
x10 = x14; \
x9 = x13;\
x8 = x12;\
\
x6 = x11;\
x6 = _mm_srli_si128(x6, 4);\
x8 = _mm_xor_si128(x8, x6);\
x6 = x8;\
x6 = _mm_slli_si128(x6, 12);\
x8 = _mm_xor_si128(x8, x6);\
\
x7 = x8;\
x7 = _mm_srli_si128(x7, 4);\
x9 = _mm_xor_si128(x9, x7);\
x7 = x9;\
x7 = _mm_slli_si128(x7, 12);\
x9 = _mm_xor_si128(x9, x7);\
\
x6 = x9;\
x6 = _mm_srli_si128(x6, 4);\
x10 = _mm_xor_si128(x10, x6);\
x6 = x10;\
x6 = _mm_slli_si128(x6, 12);\
x10 = _mm_xor_si128(x10, x6);\
\
x7 = x10;\
x7 = _mm_srli_si128(x7, 4);\
x11 = _mm_xor_si128(x11, x7);\
x7 = x11;\
x7 = _mm_slli_si128(x7, 12);\
x11 = _mm_xor_si128(x11, x7);\
} while(0);
void E256()
{
__m128i x0;
__m128i x1;
__m128i x2;
__m128i x3;
__m128i x4;
__m128i x5;
__m128i x6;
__m128i x7;
__m128i x8;
__m128i x9;
__m128i x10;
__m128i x11;
__m128i x12;
__m128i x13;
__m128i x14;
__m128i x15;
/* (L,R) = (xmm0,xmm1) */
const __m128i ptxt1 = _mm_loadu_si128((const __m128i*)SHAVITE_PTXT);
const __m128i ptxt2 = _mm_loadu_si128((const __m128i*)(SHAVITE_PTXT+16));
x0 = ptxt1;
x1 = ptxt2;
x3 = _mm_loadu_si128((__m128i*)SHAVITE_CNTS);
x4 = _mm_loadu_si128((__m128i*)SHAVITE256_XOR2);
x2 = _mm_setzero_si128();
/* init key schedule */
x8 = _mm_loadu_si128((__m128i*)SHAVITE_MESS);
x9 = _mm_loadu_si128((__m128i*)(SHAVITE_MESS+4));
x10 = _mm_loadu_si128((__m128i*)(SHAVITE_MESS+8));
x11 = _mm_loadu_si128((__m128i*)(SHAVITE_MESS+12));
/* xmm8..xmm11 = rk[0..15] */
/* start key schedule */
x12 = x8;
x13 = x9;
x14 = x10;
x15 = x11;
const __m128i xtemp = _mm_loadu_si128((__m128i*)SHAVITE_REVERSE);
x12 = _mm_shuffle_epi8(x12, xtemp);
x13 = _mm_shuffle_epi8(x13, xtemp);
x14 = _mm_shuffle_epi8(x14, xtemp);
x15 = _mm_shuffle_epi8(x15, xtemp);
x12 = _mm_aesenc_si128(x12, x2);
x13 = _mm_aesenc_si128(x13, x2);
x14 = _mm_aesenc_si128(x14, x2);
x15 = _mm_aesenc_si128(x15, x2);
x12 = _mm_xor_si128(x12, x3);
x12 = _mm_xor_si128(x12, x4);
x4 = _mm_loadu_si128((__m128i*)SHAVITE256_XOR3);
x12 = _mm_xor_si128(x12, x11);
x13 = _mm_xor_si128(x13, x12);
x14 = _mm_xor_si128(x14, x13);
x15 = _mm_xor_si128(x15, x14);
/* xmm12..xmm15 = rk[16..31] */
/* F3 - first round */
x6 = x8;
x8 = _mm_xor_si128(x8, x1);
x8 = _mm_aesenc_si128(x8, x9);
x8 = _mm_aesenc_si128(x8, x10);
x8 = _mm_aesenc_si128(x8, x2);
x0 = _mm_xor_si128(x0, x8);
x8 = x6;
/* F3 - second round */
x6 = x11;
x11 = _mm_xor_si128(x11, x0);
x11 = _mm_aesenc_si128(x11, x12);
x11 = _mm_aesenc_si128(x11, x13);
x11 = _mm_aesenc_si128(x11, x2);
x1 = _mm_xor_si128(x1, x11);
x11 = x6;
/* key schedule */
mixing();
/* xmm8..xmm11 - rk[32..47] */
/* F3 - third round */
x6 = x14;
x14 = _mm_xor_si128(x14, x1);
x14 = _mm_aesenc_si128(x14, x15);
x14 = _mm_aesenc_si128(x14, x8);
x14 = _mm_aesenc_si128(x14, x2);
x0 = _mm_xor_si128(x0, x14);
x14 = x6;
/* key schedule */
x3 = _mm_shuffle_epi32(x3, 135);
x12 = x8;
x13 = x9;
x14 = x10;
x15 = x11;
x12 = _mm_shuffle_epi8(x12, xtemp);
x13 = _mm_shuffle_epi8(x13, xtemp);
x14 = _mm_shuffle_epi8(x14, xtemp);
x15 = _mm_shuffle_epi8(x15, xtemp);
x12 = _mm_aesenc_si128(x12, x2);
x13 = _mm_aesenc_si128(x13, x2);
x14 = _mm_aesenc_si128(x14, x2);
x15 = _mm_aesenc_si128(x15, x2);
x12 = _mm_xor_si128(x12, x11);
x14 = _mm_xor_si128(x14, x3);
x14 = _mm_xor_si128(x14, x4);
x4 = _mm_loadu_si128((__m128i*)SHAVITE256_XOR4);
x13 = _mm_xor_si128(x13, x12);
x14 = _mm_xor_si128(x14, x13);
x15 = _mm_xor_si128(x15, x14);
/* xmm12..xmm15 - rk[48..63] */
/* F3 - fourth round */
x6 = x9;
x9 = _mm_xor_si128(x9, x0);
x9 = _mm_aesenc_si128(x9, x10);
x9 = _mm_aesenc_si128(x9, x11);
x9 = _mm_aesenc_si128(x9, x2);
x1 = _mm_xor_si128(x1, x9);
x9 = x6;
/* key schedule */
mixing();
/* xmm8..xmm11 = rk[64..79] */
/* F3 - fifth round */
x6 = x12;
x12 = _mm_xor_si128(x12, x1);
x12 = _mm_aesenc_si128(x12, x13);
x12 = _mm_aesenc_si128(x12, x14);
x12 = _mm_aesenc_si128(x12, x2);
x0 = _mm_xor_si128(x0, x12);
x12 = x6;
/* F3 - sixth round */
x6 = x15;
x15 = _mm_xor_si128(x15, x0);
x15 = _mm_aesenc_si128(x15, x8);
x15 = _mm_aesenc_si128(x15, x9);
x15 = _mm_aesenc_si128(x15, x2);
x1 = _mm_xor_si128(x1, x15);
x15 = x6;
/* key schedule */
x3 = _mm_shuffle_epi32(x3, 147);
x12 = x8;
x13 = x9;
x14 = x10;
x15 = x11;
x12 = _mm_shuffle_epi8(x12, xtemp);
x13 = _mm_shuffle_epi8(x13, xtemp);
x14 = _mm_shuffle_epi8(x14, xtemp);
x15 = _mm_shuffle_epi8(x15, xtemp);
x12 = _mm_aesenc_si128(x12, x2);
x13 = _mm_aesenc_si128(x13, x2);
x14 = _mm_aesenc_si128(x14, x2);
x15 = _mm_aesenc_si128(x15, x2);
x12 = _mm_xor_si128(x12, x11);
x13 = _mm_xor_si128(x13, x3);
x13 = _mm_xor_si128(x13, x4);
x13 = _mm_xor_si128(x13, x12);
x14 = _mm_xor_si128(x14, x13);
x15 = _mm_xor_si128(x15, x14);
/* xmm12..xmm15 = rk[80..95] */
/* F3 - seventh round */
x6 = x10;
x10 = _mm_xor_si128(x10, x1);
x10 = _mm_aesenc_si128(x10, x11);
x10 = _mm_aesenc_si128(x10, x12);
x10 = _mm_aesenc_si128(x10, x2);
x0 = _mm_xor_si128(x0, x10);
x10 = x6;
/* key schedule */
mixing();
/* xmm8..xmm11 = rk[96..111] */
/* F3 - eigth round */
x6 = x13;
x13 = _mm_xor_si128(x13, x0);
x13 = _mm_aesenc_si128(x13, x14);
x13 = _mm_aesenc_si128(x13, x15);
x13 = _mm_aesenc_si128(x13, x2);
x1 = _mm_xor_si128(x1, x13);
x13 = x6;
/* key schedule */
x3 = _mm_shuffle_epi32(x3, 135);
x12 = x8;
x13 = x9;
x14 = x10;
x15 = x11;
x12 = _mm_shuffle_epi8(x12, xtemp);
x13 = _mm_shuffle_epi8(x13, xtemp);
x14 = _mm_shuffle_epi8(x14, xtemp);
x15 = _mm_shuffle_epi8(x15, xtemp);
x12 = _mm_aesenc_si128(x12, x2);
x13 = _mm_aesenc_si128(x13, x2);
x14 = _mm_aesenc_si128(x14, x2);
x15 = _mm_aesenc_si128(x15, x2);
x12 = _mm_xor_si128(x12, x11);
x15 = _mm_xor_si128(x15, x3);
x15 = _mm_xor_si128(x15, x4);
x13 = _mm_xor_si128(x13, x12);
x14 = _mm_xor_si128(x14, x13);
x15 = _mm_xor_si128(x15, x14);
/* xmm12..xmm15 = rk[112..127] */
/* F3 - ninth round */
x6 = x8;
x8 = _mm_xor_si128(x8, x1);
x8 = _mm_aesenc_si128(x8, x9);
x8 = _mm_aesenc_si128(x8, x10);
x8 = _mm_aesenc_si128(x8, x2);
x0 = _mm_xor_si128(x0, x8);
x8 = x6;
/* F3 - tenth round */
x6 = x11;
x11 = _mm_xor_si128(x11, x0);
x11 = _mm_aesenc_si128(x11, x12);
x11 = _mm_aesenc_si128(x11, x13);
x11 = _mm_aesenc_si128(x11, x2);
x1 = _mm_xor_si128(x1, x11);
x11 = x6;
/* key schedule */
mixing();
/* xmm8..xmm11 = rk[128..143] */
/* F3 - eleventh round */
x6 = x14;
x14 = _mm_xor_si128(x14, x1);
x14 = _mm_aesenc_si128(x14, x15);
x14 = _mm_aesenc_si128(x14, x8);
x14 = _mm_aesenc_si128(x14, x2);
x0 = _mm_xor_si128(x0, x14);
x14 = x6;
/* F3 - twelfth round */
x6 = x9;
x9 = _mm_xor_si128(x9, x0);
x9 = _mm_aesenc_si128(x9, x10);
x9 = _mm_aesenc_si128(x9, x11);
x9 = _mm_aesenc_si128(x9, x2);
x1 = _mm_xor_si128(x1, x9);
x9 = x6;
/* feedforward */
x0 = _mm_xor_si128(x0, ptxt1);
x1 = _mm_xor_si128(x1, ptxt2);
_mm_storeu_si128((__m128i *)SHAVITE_PTXT, x0);
_mm_storeu_si128((__m128i *)(SHAVITE_PTXT + 16), x1);
return;
}
void Compress256(const unsigned char *message_block, unsigned char *chaining_value, unsigned long long counter,
const unsigned char salt[32])
{
int i, j;
for (i=0;i<8*4;i++)
SHAVITE_PTXT[i]=chaining_value[i];
for (i=0;i<16;i++)
SHAVITE_MESS[i] = *((unsigned int*)(message_block+4*i));
SHAVITE_CNTS[0] = (unsigned int)(counter & 0xFFFFFFFFULL);
SHAVITE_CNTS[1] = (unsigned int)(counter>>32);
/* encryption + Davies-Meyer transform */
E256();
for (i=0; i<4*8; i++)
chaining_value[i]=SHAVITE_PTXT[i];
return;
}
int main(int argc, char *argv[])
{
const int cvlen = 32;
unsigned char *cv = (unsigned char *)malloc(cvlen);
for (int x=0; x < cvlen; x++)
cv[x] = x + argc;
const int mblen = 64;
unsigned char *mb = (unsigned char *)malloc(mblen);
for (int x=0; x < mblen; x++)
mb[x] = x + argc;
unsigned long long counter = 0x1234567812345678ull;
unsigned char s[32] = {0};
oldCompress256(mb, cv, counter, s);
printf("old: ");
for (int x=0; x < cvlen; x++)
printf("%2x ", cv[x]);
printf("\n");
for (int x=0; x < cvlen; x++)
cv[x] = x + argc;
Compress256(mb, cv, counter, s);
printf("new: ");
for (int x=0; x < cvlen; x++)
printf("%2x ", cv[x]);
printf("\n");
}
#include <x86intrin.h>
#include <stdio.h>
#include <time.h>
#define mixing() \
x11 = x15;\
x10 = x14;\
x9 = x13;\
x8 = x12;\
\
x6 = x11;\
x6 = _mm_srli_si128(x6, 4);\
x8 = _mm_xor_si128(x8, x6);\
x6 = x8;\
x6 = _mm_slli_si128(x6, 12);\
x8 = _mm_xor_si128(x8, x6);\
\
x7 = x8;\
x7 = _mm_srli_si128(x7, 4);\
x9 = _mm_xor_si128(x9, x7);\
x7 = x9;\
x7 = _mm_slli_si128(x7, 12);\
x9 = _mm_xor_si128(x9, x7);\
\
x6 = x9;\
x6 = _mm_srli_si128(x6, 4);\
x10 = _mm_xor_si128(x10, x6);\
x6 = x10;\
x6 = _mm_slli_si128(x6, 12);\
x10 = _mm_xor_si128(x10, x6);\
\
x7 = x10;\
x7 = _mm_srli_si128(x7, 4);\
x11 = _mm_xor_si128(x11, x7);\
x7 = x11;\
x7 = _mm_slli_si128(x7, 12);\
x11 = _mm_xor_si128(x11, x7);
// If mess & chain won't be 16byte aligned, change _mm_load to _mm_loadu and
// _mm_store to _mm_storeu
void Compress256(const __m128i *mess, __m128i *chain, unsigned long long counter, const unsigned char salt[32])
{
// note: _mm_set_epi32 uses (int e3, int e2, int e1, int e0)
const __m128i SHAVITE_REVERSE = _mm_set_epi32(0x03020100, 0x0f0e0d0c, 0x0b0a0908, 0x07060504);
const __m128i SHAVITE256_XOR2 = _mm_set_epi32(0x0, 0x0, 0xFFFFFFFF, 0x0);
const __m128i SHAVITE256_XOR3 = _mm_set_epi32(0x0, 0xFFFFFFFF, 0x0, 0x0);
const __m128i SHAVITE256_XOR4 = _mm_set_epi32(0xFFFFFFFF, 0x0, 0x0, 0x0);
const __m128i SHAVITE_CNTS =
_mm_set_epi32(0, 0, (unsigned int)(counter>>32), (unsigned int)(counter & 0xFFFFFFFFULL));
__m128i x0, x1, x2, x3, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15;
/* (L,R) = (xmm0,xmm1) */
const __m128i ptxt1 = _mm_load_si128(chain);
const __m128i ptxt2 = _mm_load_si128(chain+1);
x0 = ptxt1;
x1 = ptxt2;
x3 = SHAVITE_CNTS;
x2 = _mm_setzero_si128();
/* init key schedule */
x8 = _mm_load_si128(mess);
x9 = _mm_load_si128(mess+1);
x10 = _mm_load_si128(mess+2);
x11 = _mm_load_si128(mess+3);
/* xmm8..xmm11 = rk[0..15] */
/* start key schedule */
x12 = x8;
x13 = x9;
x14 = x10;
x15 = x11;
x12 = _mm_shuffle_epi8(x12, SHAVITE_REVERSE);
x13 = _mm_shuffle_epi8(x13, SHAVITE_REVERSE);
x14 = _mm_shuffle_epi8(x14, SHAVITE_REVERSE);
x15 = _mm_shuffle_epi8(x15, SHAVITE_REVERSE);
x12 = _mm_aesenc_si128(x12, x2);
x13 = _mm_aesenc_si128(x13, x2);
x14 = _mm_aesenc_si128(x14, x2);
x15 = _mm_aesenc_si128(x15, x2);
x12 = _mm_xor_si128(x12, x3);
x12 = _mm_xor_si128(x12, SHAVITE256_XOR2);
x12 = _mm_xor_si128(x12, x11);
x13 = _mm_xor_si128(x13, x12);
x14 = _mm_xor_si128(x14, x13);
x15 = _mm_xor_si128(x15, x14);
/* xmm12..xmm15 = rk[16..31] */
/* F3 - first round */
x6 = x8;
x8 = _mm_xor_si128(x8, x1);
x8 = _mm_aesenc_si128(x8, x9);
x8 = _mm_aesenc_si128(x8, x10);
x8 = _mm_aesenc_si128(x8, x2);
x0 = _mm_xor_si128(x0, x8);
x8 = x6;
/* F3 - second round */
x6 = x11;
x11 = _mm_xor_si128(x11, x0);
x11 = _mm_aesenc_si128(x11, x12);
x11 = _mm_aesenc_si128(x11, x13);
x11 = _mm_aesenc_si128(x11, x2);
x1 = _mm_xor_si128(x1, x11);
x11 = x6;
/* key schedule */
mixing();
/* xmm8..xmm11 - rk[32..47] */
/* F3 - third round */
x6 = x14;
x14 = _mm_xor_si128(x14, x1);
x14 = _mm_aesenc_si128(x14, x15);
x14 = _mm_aesenc_si128(x14, x8);
x14 = _mm_aesenc_si128(x14, x2);
x0 = _mm_xor_si128(x0, x14);
x14 = x6;
/* key schedule */
x3 = _mm_shuffle_epi32(x3, 135);
x12 = x8;
x13 = x9;
x14 = x10;
x15 = x11;
x12 = _mm_shuffle_epi8(x12, SHAVITE_REVERSE);
x13 = _mm_shuffle_epi8(x13, SHAVITE_REVERSE);
x14 = _mm_shuffle_epi8(x14, SHAVITE_REVERSE);
x15 = _mm_shuffle_epi8(x15, SHAVITE_REVERSE);
x12 = _mm_aesenc_si128(x12, x2);
x13 = _mm_aesenc_si128(x13, x2);
x14 = _mm_aesenc_si128(x14, x2);
x15 = _mm_aesenc_si128(x15, x2);
x12 = _mm_xor_si128(x12, x11);
x14 = _mm_xor_si128(x14, x3);
x14 = _mm_xor_si128(x14, SHAVITE256_XOR3);
x13 = _mm_xor_si128(x13, x12);
x14 = _mm_xor_si128(x14, x13);
x15 = _mm_xor_si128(x15, x14);
/* xmm12..xmm15 - rk[48..63] */
/* F3 - fourth round */
x6 = x9;
x9 = _mm_xor_si128(x9, x0);
x9 = _mm_aesenc_si128(x9, x10);
x9 = _mm_aesenc_si128(x9, x11);
x9 = _mm_aesenc_si128(x9, x2);
x1 = _mm_xor_si128(x1, x9);
x9 = x6;
/* key schedule */
mixing();
/* xmm8..xmm11 = rk[64..79] */
/* F3 - fifth round */
x6 = x12;
x12 = _mm_xor_si128(x12, x1);
x12 = _mm_aesenc_si128(x12, x13);
x12 = _mm_aesenc_si128(x12, x14);
x12 = _mm_aesenc_si128(x12, x2);
x0 = _mm_xor_si128(x0, x12);
x12 = x6;
/* F3 - sixth round */
x6 = x15;
x15 = _mm_xor_si128(x15, x0);
x15 = _mm_aesenc_si128(x15, x8);
x15 = _mm_aesenc_si128(x15, x9);
x15 = _mm_aesenc_si128(x15, x2);
x1 = _mm_xor_si128(x1, x15);
x15 = x6;
/* key schedule */
x3 = _mm_shuffle_epi32(x3, 147);
x12 = x8;
x13 = x9;
x14 = x10;
x15 = x11;
x12 = _mm_shuffle_epi8(x12, SHAVITE_REVERSE);
x13 = _mm_shuffle_epi8(x13, SHAVITE_REVERSE);
x14 = _mm_shuffle_epi8(x14, SHAVITE_REVERSE);
x15 = _mm_shuffle_epi8(x15, SHAVITE_REVERSE);
x12 = _mm_aesenc_si128(x12, x2);
x13 = _mm_aesenc_si128(x13, x2);
x14 = _mm_aesenc_si128(x14, x2);
x15 = _mm_aesenc_si128(x15, x2);
x12 = _mm_xor_si128(x12, x11);
x13 = _mm_xor_si128(x13, x3);
x13 = _mm_xor_si128(x13, SHAVITE256_XOR4);
x13 = _mm_xor_si128(x13, x12);
x14 = _mm_xor_si128(x14, x13);
x15 = _mm_xor_si128(x15, x14);
/* xmm12..xmm15 = rk[80..95] */
/* F3 - seventh round */
x6 = x10;
x10 = _mm_xor_si128(x10, x1);
x10 = _mm_aesenc_si128(x10, x11);
x10 = _mm_aesenc_si128(x10, x12);
x10 = _mm_aesenc_si128(x10, x2);
x0 = _mm_xor_si128(x0, x10);
x10 = x6;
/* key schedule */
mixing();
/* xmm8..xmm11 = rk[96..111] */
/* F3 - eigth round */
x6 = x13;
x13 = _mm_xor_si128(x13, x0);
x13 = _mm_aesenc_si128(x13, x14);
x13 = _mm_aesenc_si128(x13, x15);
x13 = _mm_aesenc_si128(x13, x2);
x1 = _mm_xor_si128(x1, x13);
x13 = x6;
/* key schedule */
x3 = _mm_shuffle_epi32(x3, 135);
x12 = x8;
x13 = x9;
x14 = x10;
x15 = x11;
x12 = _mm_shuffle_epi8(x12, SHAVITE_REVERSE);
x13 = _mm_shuffle_epi8(x13, SHAVITE_REVERSE);
x14 = _mm_shuffle_epi8(x14, SHAVITE_REVERSE);
x15 = _mm_shuffle_epi8(x15, SHAVITE_REVERSE);
x12 = _mm_aesenc_si128(x12, x2);
x13 = _mm_aesenc_si128(x13, x2);
x14 = _mm_aesenc_si128(x14, x2);
x15 = _mm_aesenc_si128(x15, x2);
x12 = _mm_xor_si128(x12, x11);
x15 = _mm_xor_si128(x15, x3);
x15 = _mm_xor_si128(x15, SHAVITE256_XOR4);
x13 = _mm_xor_si128(x13, x12);
x14 = _mm_xor_si128(x14, x13);
x15 = _mm_xor_si128(x15, x14);
/* xmm12..xmm15 = rk[112..127] */
/* F3 - ninth round */
x6 = x8;
x8 = _mm_xor_si128(x8, x1);
x8 = _mm_aesenc_si128(x8, x9);
x8 = _mm_aesenc_si128(x8, x10);
x8 = _mm_aesenc_si128(x8, x2);
x0 = _mm_xor_si128(x0, x8);
x8 = x6;
/* F3 - tenth round */
x6 = x11;
x11 = _mm_xor_si128(x11, x0);
x11 = _mm_aesenc_si128(x11, x12);
x11 = _mm_aesenc_si128(x11, x13);
x11 = _mm_aesenc_si128(x11, x2);
x1 = _mm_xor_si128(x1, x11);
x11 = x6;
/* key schedule */
mixing();
/* xmm8..xmm11 = rk[128..143] */
/* F3 - eleventh round */
x6 = x14;
x14 = _mm_xor_si128(x14, x1);
x14 = _mm_aesenc_si128(x14, x15);
x14 = _mm_aesenc_si128(x14, x8);
x14 = _mm_aesenc_si128(x14, x2);
x0 = _mm_xor_si128(x0, x14);
x14 = x6;
/* F3 - twelfth round */
x6 = x9;
x9 = _mm_xor_si128(x9, x0);
x9 = _mm_aesenc_si128(x9, x10);
x9 = _mm_aesenc_si128(x9, x11);
x9 = _mm_aesenc_si128(x9, x2);
x1 = _mm_xor_si128(x1, x9);
x9 = x6;
/* feedforward */
x0 = _mm_xor_si128(x0, ptxt1);
x1 = _mm_xor_si128(x1, ptxt2);
_mm_store_si128(chain, x0);
_mm_store_si128(chain + 1, x1);
}
int main(int argc, char *argv[])
{
__m128i chain[2], mess[4];
unsigned char *p;
// argc prevents compiler from precalculating results
p = (unsigned char *)mess;
for (int x=0; x < 64; x++)
p[x] = x + argc;
p = (unsigned char *)chain;
for (int x=0; x < 32; x++)
p[x] = x + argc;
unsigned long long counter = 0x1234567812345678ull + argc;
// Unused, but prototype requires it.
unsigned char s[32] = {0};
Compress256(mess, chain, counter, s);
for (int x=0; x < 32; x++)
printf("%02x ", p[x]);
printf("\n");
struct timespec start, end;
clock_gettime(CLOCK_MONOTONIC, &start);
unsigned char res = 0;
for (int x=0; x < 400000; x++)
{
Compress256(mess, chain, counter, s);
// Ensure optimizer doesn't omit the calc
res ^= *p;
}
clock_gettime(CLOCK_MONOTONIC, &end);
unsigned long long delta_us = (end.tv_sec - start.tv_sec) * 1000000ull + (end.tv_nsec - start.tv_nsec) / 1000ull;
printf("%x: %llu\n", res, delta_us);
}
关于c++ - 在内联汇编中访问 thread_local 变量,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/57664080/
关闭。这个问题是opinion-based 。目前不接受答案。 想要改进这个问题吗?更新问题,以便 editing this post 可以用事实和引文来回答它。 . 已关闭 4 年前。 Improv
PowerShell Web Access 允许您通过 Web 浏览器运行 PowerShell cmdlet。它显示了一个基于 Web 的控制台窗口。 有没有办法运行 cmdlet 而无需在控制台窗
我尝试在无需用户登录的情况下访问 Sharepoint 文件。 我可以通过以下任一方式获取访问 token 方法一: var client = new RestClient("https://logi
我目前正在尝试通过 Chrome 扩展程序访问 Google 服务。我的理解是,对于 JS 应用程序,Google 首选的身份验证机制是 OAuth。我的应用目前已成功通过 OAuth 向服务进行身份
假设我有纯抽象类 IHandler 和派生自它的类: class IHandler { public: virtual int process_input(char input) = 0; };
我有一个带有 ThymeLeaf 和 Dojo 的 Spring 应用程序,这给我带来了问题。当我从我的 HTML 文件中引用 CSS 文件时,它们在 Firebug 中显示为中止。但是,当我通过在地
这个问题已经有答案了: JavaScript property access: dot notation vs. brackets? (17 个回答) 已关闭 6 年前。 为什么这不起作用? func
我想将所有流量重定向到 https,只有 robot.txt 应该可以通过 http 访问。 是否可以为 robot.txt 文件创建异常(exception)? 我的 .htaccess 文件: R
我遇到了 LinkedIn OAuth2: "Unable to verify access token" 中描述的相同问题;但是,那里描述的解决方案并不能解决我的问题。 我能够成功请求访问 toke
问题 我有一个暴露给 *:8080 的 Docker 服务容器. 我无法通过 localhost:8080 访问容器. Chrome /curl无限期挂断。 但是如果我使用任何其他本地IP,我就可以访
我正在使用 Google 的 Oauth 2.0 来获取用户的 access_token,但我不知道如何将它与 imaplib 一起使用来访问收件箱。 最佳答案 下面是带有 oauth 2.0 的 I
我正在做 docker 入门指南:https://docs.docker.com/get-started/part3/#recap-and-cheat-sheet-optional docker-co
我正在尝试使用静态 IP 在 AKS 上创建一个 Web 应用程序,自然找到了一个带有 Nginx ingress controller in Azure's documentation 的解决方案。
这是我在名为 foo.js 的文件中的代码。 console.log('module.exports:', module.exports) console.log('module.id:', modu
我试图理解访问键。我读过https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-se
我正在使用 MGTwitterEngine"将 twitter 集成到我的应用程序中。它在 iOS 4.2 上运行良好。当我尝试从任何 iOS 5 设备访问 twitter 时,我遇到了身份验证 to
我试图理解访问键。我读过https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-se
我正在使用以下 API 列出我的 Facebook 好友。 https://graph.facebook.com/me/friends?access_token= ??? 我想知道访问 token 过
401 Unauthorized - Show headers - { "error": { "errors": [ { "domain": "global", "reas
我已经将我的 django 应用程序部署到 heroku 并使用 Amazon s3 存储桶存储静态文件,我发现从 s3 存储桶到 heroku 获取数据没有问题。但是,当我测试查看内容存储位置时,除
我是一名优秀的程序员,十分优秀!